1// SPDX-License-Identifier: GPL-2.0+
2/*
3 * ipmi_msghandler.c
4 *
5 * Incoming and outgoing message routing for an IPMI interface.
6 *
7 * Author: MontaVista Software, Inc.
8 *         Corey Minyard <minyard@mvista.com>
9 *         source@mvista.com
10 *
11 * Copyright 2002 MontaVista Software Inc.
12 */
13
14#define pr_fmt(fmt) "IPMI message handler: " fmt
15#define dev_fmt(fmt) pr_fmt(fmt)
16
17#include <linux/module.h>
18#include <linux/errno.h>
19#include <linux/panic_notifier.h>
20#include <linux/poll.h>
21#include <linux/sched.h>
22#include <linux/seq_file.h>
23#include <linux/spinlock.h>
24#include <linux/mutex.h>
25#include <linux/slab.h>
26#include <linux/ipmi.h>
27#include <linux/ipmi_smi.h>
28#include <linux/notifier.h>
29#include <linux/init.h>
30#include <linux/proc_fs.h>
31#include <linux/rcupdate.h>
32#include <linux/interrupt.h>
33#include <linux/moduleparam.h>
34#include <linux/workqueue.h>
35#include <linux/uuid.h>
36#include <linux/nospec.h>
37#include <linux/vmalloc.h>
38#include <linux/delay.h>
39
40#define IPMI_DRIVER_VERSION "39.2"
41
42static struct ipmi_recv_msg *ipmi_alloc_recv_msg(void);
43static int ipmi_init_msghandler(void);
44static void smi_recv_tasklet(struct tasklet_struct *t);
45static void handle_new_recv_msgs(struct ipmi_smi *intf);
46static void need_waiter(struct ipmi_smi *intf);
47static int handle_one_recv_msg(struct ipmi_smi *intf,
48			       struct ipmi_smi_msg *msg);
49
50static bool initialized;
51static bool drvregistered;
52
53/* Numbers in this enumerator should be mapped to ipmi_panic_event_str */
54enum ipmi_panic_event_op {
55	IPMI_SEND_PANIC_EVENT_NONE,
56	IPMI_SEND_PANIC_EVENT,
57	IPMI_SEND_PANIC_EVENT_STRING,
58	IPMI_SEND_PANIC_EVENT_MAX
59};
60
61/* Indices in this array should be mapped to enum ipmi_panic_event_op */
62static const char *const ipmi_panic_event_str[] = { "none", "event", "string", NULL };
63
64#ifdef CONFIG_IPMI_PANIC_STRING
65#define IPMI_PANIC_DEFAULT IPMI_SEND_PANIC_EVENT_STRING
66#elif defined(CONFIG_IPMI_PANIC_EVENT)
67#define IPMI_PANIC_DEFAULT IPMI_SEND_PANIC_EVENT
68#else
69#define IPMI_PANIC_DEFAULT IPMI_SEND_PANIC_EVENT_NONE
70#endif
71
72static enum ipmi_panic_event_op ipmi_send_panic_event = IPMI_PANIC_DEFAULT;
73
74static int panic_op_write_handler(const char *val,
75				  const struct kernel_param *kp)
76{
77	char valcp[16];
78	int e;
79
80	strscpy(valcp, val, sizeof(valcp));
81	e = match_string(ipmi_panic_event_str, -1, strstrip(valcp));
82	if (e < 0)
83		return e;
84
85	ipmi_send_panic_event = e;
86	return 0;
87}
88
89static int panic_op_read_handler(char *buffer, const struct kernel_param *kp)
90{
91	const char *event_str;
92
93	if (ipmi_send_panic_event >= IPMI_SEND_PANIC_EVENT_MAX)
94		event_str = "???";
95	else
96		event_str = ipmi_panic_event_str[ipmi_send_panic_event];
97
98	return sprintf(buffer, "%s\n", event_str);
99}
100
101static const struct kernel_param_ops panic_op_ops = {
102	.set = panic_op_write_handler,
103	.get = panic_op_read_handler
104};
105module_param_cb(panic_op, &panic_op_ops, NULL, 0600);
106MODULE_PARM_DESC(panic_op, "Sets if the IPMI driver will attempt to store panic information in the event log in the event of a panic.  Set to 'none' for no, 'event' for a single event, or 'string' for a generic event and the panic string in IPMI OEM events.");
107
108
109#define MAX_EVENTS_IN_QUEUE	25
110
111/* Remain in auto-maintenance mode for this amount of time (in ms). */
112static unsigned long maintenance_mode_timeout_ms = 30000;
113module_param(maintenance_mode_timeout_ms, ulong, 0644);
114MODULE_PARM_DESC(maintenance_mode_timeout_ms,
115		 "The time (milliseconds) after the last maintenance message that the connection stays in maintenance mode.");
116
117/*
118 * Don't let a message sit in a queue forever, always time it with at lest
119 * the max message timer.  This is in milliseconds.
120 */
121#define MAX_MSG_TIMEOUT		60000
122
123/*
124 * Timeout times below are in milliseconds, and are done off a 1
125 * second timer.  So setting the value to 1000 would mean anything
126 * between 0 and 1000ms.  So really the only reasonable minimum
127 * setting it 2000ms, which is between 1 and 2 seconds.
128 */
129
130/* The default timeout for message retries. */
131static unsigned long default_retry_ms = 2000;
132module_param(default_retry_ms, ulong, 0644);
133MODULE_PARM_DESC(default_retry_ms,
134		 "The time (milliseconds) between retry sends");
135
136/* The default timeout for maintenance mode message retries. */
137static unsigned long default_maintenance_retry_ms = 3000;
138module_param(default_maintenance_retry_ms, ulong, 0644);
139MODULE_PARM_DESC(default_maintenance_retry_ms,
140		 "The time (milliseconds) between retry sends in maintenance mode");
141
142/* The default maximum number of retries */
143static unsigned int default_max_retries = 4;
144module_param(default_max_retries, uint, 0644);
145MODULE_PARM_DESC(default_max_retries,
146		 "The time (milliseconds) between retry sends in maintenance mode");
147
148/* The default maximum number of users that may register. */
149static unsigned int max_users = 30;
150module_param(max_users, uint, 0644);
151MODULE_PARM_DESC(max_users,
152		 "The most users that may use the IPMI stack at one time.");
153
154/* The default maximum number of message a user may have outstanding. */
155static unsigned int max_msgs_per_user = 100;
156module_param(max_msgs_per_user, uint, 0644);
157MODULE_PARM_DESC(max_msgs_per_user,
158		 "The most message a user may have outstanding.");
159
160/* Call every ~1000 ms. */
161#define IPMI_TIMEOUT_TIME	1000
162
163/* How many jiffies does it take to get to the timeout time. */
164#define IPMI_TIMEOUT_JIFFIES	((IPMI_TIMEOUT_TIME * HZ) / 1000)
165
166/*
167 * Request events from the queue every second (this is the number of
168 * IPMI_TIMEOUT_TIMES between event requests).  Hopefully, in the
169 * future, IPMI will add a way to know immediately if an event is in
170 * the queue and this silliness can go away.
171 */
172#define IPMI_REQUEST_EV_TIME	(1000 / (IPMI_TIMEOUT_TIME))
173
174/* How long should we cache dynamic device IDs? */
175#define IPMI_DYN_DEV_ID_EXPIRY	(10 * HZ)
176
177/*
178 * The main "user" data structure.
179 */
180struct ipmi_user {
181	struct list_head link;
182
183	/*
184	 * Set to NULL when the user is destroyed, a pointer to myself
185	 * so srcu_dereference can be used on it.
186	 */
187	struct ipmi_user *self;
188	struct srcu_struct release_barrier;
189
190	struct kref refcount;
191
192	/* The upper layer that handles receive messages. */
193	const struct ipmi_user_hndl *handler;
194	void             *handler_data;
195
196	/* The interface this user is bound to. */
197	struct ipmi_smi *intf;
198
199	/* Does this interface receive IPMI events? */
200	bool gets_events;
201
202	atomic_t nr_msgs;
203
204	/* Free must run in process context for RCU cleanup. */
205	struct work_struct remove_work;
206};
207
208static struct workqueue_struct *remove_work_wq;
209
210static struct ipmi_user *acquire_ipmi_user(struct ipmi_user *user, int *index)
211	__acquires(user->release_barrier)
212{
213	struct ipmi_user *ruser;
214
215	*index = srcu_read_lock(&user->release_barrier);
216	ruser = srcu_dereference(user->self, &user->release_barrier);
217	if (!ruser)
218		srcu_read_unlock(&user->release_barrier, *index);
219	return ruser;
220}
221
222static void release_ipmi_user(struct ipmi_user *user, int index)
223{
224	srcu_read_unlock(&user->release_barrier, index);
225}
226
227struct cmd_rcvr {
228	struct list_head link;
229
230	struct ipmi_user *user;
231	unsigned char netfn;
232	unsigned char cmd;
233	unsigned int  chans;
234
235	/*
236	 * This is used to form a linked lised during mass deletion.
237	 * Since this is in an RCU list, we cannot use the link above
238	 * or change any data until the RCU period completes.  So we
239	 * use this next variable during mass deletion so we can have
240	 * a list and don't have to wait and restart the search on
241	 * every individual deletion of a command.
242	 */
243	struct cmd_rcvr *next;
244};
245
246struct seq_table {
247	unsigned int         inuse : 1;
248	unsigned int         broadcast : 1;
249
250	unsigned long        timeout;
251	unsigned long        orig_timeout;
252	unsigned int         retries_left;
253
254	/*
255	 * To verify on an incoming send message response that this is
256	 * the message that the response is for, we keep a sequence id
257	 * and increment it every time we send a message.
258	 */
259	long                 seqid;
260
261	/*
262	 * This is held so we can properly respond to the message on a
263	 * timeout, and it is used to hold the temporary data for
264	 * retransmission, too.
265	 */
266	struct ipmi_recv_msg *recv_msg;
267};
268
269/*
270 * Store the information in a msgid (long) to allow us to find a
271 * sequence table entry from the msgid.
272 */
273#define STORE_SEQ_IN_MSGID(seq, seqid) \
274	((((seq) & 0x3f) << 26) | ((seqid) & 0x3ffffff))
275
276#define GET_SEQ_FROM_MSGID(msgid, seq, seqid) \
277	do {								\
278		seq = (((msgid) >> 26) & 0x3f);				\
279		seqid = ((msgid) & 0x3ffffff);				\
280	} while (0)
281
282#define NEXT_SEQID(seqid) (((seqid) + 1) & 0x3ffffff)
283
284#define IPMI_MAX_CHANNELS       16
285struct ipmi_channel {
286	unsigned char medium;
287	unsigned char protocol;
288};
289
290struct ipmi_channel_set {
291	struct ipmi_channel c[IPMI_MAX_CHANNELS];
292};
293
294struct ipmi_my_addrinfo {
295	/*
296	 * My slave address.  This is initialized to IPMI_BMC_SLAVE_ADDR,
297	 * but may be changed by the user.
298	 */
299	unsigned char address;
300
301	/*
302	 * My LUN.  This should generally stay the SMS LUN, but just in
303	 * case...
304	 */
305	unsigned char lun;
306};
307
308/*
309 * Note that the product id, manufacturer id, guid, and device id are
310 * immutable in this structure, so dyn_mutex is not required for
311 * accessing those.  If those change on a BMC, a new BMC is allocated.
312 */
313struct bmc_device {
314	struct platform_device pdev;
315	struct list_head       intfs; /* Interfaces on this BMC. */
316	struct ipmi_device_id  id;
317	struct ipmi_device_id  fetch_id;
318	int                    dyn_id_set;
319	unsigned long          dyn_id_expiry;
320	struct mutex           dyn_mutex; /* Protects id, intfs, & dyn* */
321	guid_t                 guid;
322	guid_t                 fetch_guid;
323	int                    dyn_guid_set;
324	struct kref	       usecount;
325	struct work_struct     remove_work;
326	unsigned char	       cc; /* completion code */
327};
328#define to_bmc_device(x) container_of((x), struct bmc_device, pdev.dev)
329
330static int bmc_get_device_id(struct ipmi_smi *intf, struct bmc_device *bmc,
331			     struct ipmi_device_id *id,
332			     bool *guid_set, guid_t *guid);
333
334/*
335 * Various statistics for IPMI, these index stats[] in the ipmi_smi
336 * structure.
337 */
338enum ipmi_stat_indexes {
339	/* Commands we got from the user that were invalid. */
340	IPMI_STAT_sent_invalid_commands = 0,
341
342	/* Commands we sent to the MC. */
343	IPMI_STAT_sent_local_commands,
344
345	/* Responses from the MC that were delivered to a user. */
346	IPMI_STAT_handled_local_responses,
347
348	/* Responses from the MC that were not delivered to a user. */
349	IPMI_STAT_unhandled_local_responses,
350
351	/* Commands we sent out to the IPMB bus. */
352	IPMI_STAT_sent_ipmb_commands,
353
354	/* Commands sent on the IPMB that had errors on the SEND CMD */
355	IPMI_STAT_sent_ipmb_command_errs,
356
357	/* Each retransmit increments this count. */
358	IPMI_STAT_retransmitted_ipmb_commands,
359
360	/*
361	 * When a message times out (runs out of retransmits) this is
362	 * incremented.
363	 */
364	IPMI_STAT_timed_out_ipmb_commands,
365
366	/*
367	 * This is like above, but for broadcasts.  Broadcasts are
368	 * *not* included in the above count (they are expected to
369	 * time out).
370	 */
371	IPMI_STAT_timed_out_ipmb_broadcasts,
372
373	/* Responses I have sent to the IPMB bus. */
374	IPMI_STAT_sent_ipmb_responses,
375
376	/* The response was delivered to the user. */
377	IPMI_STAT_handled_ipmb_responses,
378
379	/* The response had invalid data in it. */
380	IPMI_STAT_invalid_ipmb_responses,
381
382	/* The response didn't have anyone waiting for it. */
383	IPMI_STAT_unhandled_ipmb_responses,
384
385	/* Commands we sent out to the IPMB bus. */
386	IPMI_STAT_sent_lan_commands,
387
388	/* Commands sent on the IPMB that had errors on the SEND CMD */
389	IPMI_STAT_sent_lan_command_errs,
390
391	/* Each retransmit increments this count. */
392	IPMI_STAT_retransmitted_lan_commands,
393
394	/*
395	 * When a message times out (runs out of retransmits) this is
396	 * incremented.
397	 */
398	IPMI_STAT_timed_out_lan_commands,
399
400	/* Responses I have sent to the IPMB bus. */
401	IPMI_STAT_sent_lan_responses,
402
403	/* The response was delivered to the user. */
404	IPMI_STAT_handled_lan_responses,
405
406	/* The response had invalid data in it. */
407	IPMI_STAT_invalid_lan_responses,
408
409	/* The response didn't have anyone waiting for it. */
410	IPMI_STAT_unhandled_lan_responses,
411
412	/* The command was delivered to the user. */
413	IPMI_STAT_handled_commands,
414
415	/* The command had invalid data in it. */
416	IPMI_STAT_invalid_commands,
417
418	/* The command didn't have anyone waiting for it. */
419	IPMI_STAT_unhandled_commands,
420
421	/* Invalid data in an event. */
422	IPMI_STAT_invalid_events,
423
424	/* Events that were received with the proper format. */
425	IPMI_STAT_events,
426
427	/* Retransmissions on IPMB that failed. */
428	IPMI_STAT_dropped_rexmit_ipmb_commands,
429
430	/* Retransmissions on LAN that failed. */
431	IPMI_STAT_dropped_rexmit_lan_commands,
432
433	/* This *must* remain last, add new values above this. */
434	IPMI_NUM_STATS
435};
436
437
438#define IPMI_IPMB_NUM_SEQ	64
439struct ipmi_smi {
440	struct module *owner;
441
442	/* What interface number are we? */
443	int intf_num;
444
445	struct kref refcount;
446
447	/* Set when the interface is being unregistered. */
448	bool in_shutdown;
449
450	/* Used for a list of interfaces. */
451	struct list_head link;
452
453	/*
454	 * The list of upper layers that are using me.  seq_lock write
455	 * protects this.  Read protection is with srcu.
456	 */
457	struct list_head users;
458	struct srcu_struct users_srcu;
459	atomic_t nr_users;
460	struct device_attribute nr_users_devattr;
461	struct device_attribute nr_msgs_devattr;
462
463
464	/* Used for wake ups at startup. */
465	wait_queue_head_t waitq;
466
467	/*
468	 * Prevents the interface from being unregistered when the
469	 * interface is used by being looked up through the BMC
470	 * structure.
471	 */
472	struct mutex bmc_reg_mutex;
473
474	struct bmc_device tmp_bmc;
475	struct bmc_device *bmc;
476	bool bmc_registered;
477	struct list_head bmc_link;
478	char *my_dev_name;
479	bool in_bmc_register;  /* Handle recursive situations.  Yuck. */
480	struct work_struct bmc_reg_work;
481
482	const struct ipmi_smi_handlers *handlers;
483	void                     *send_info;
484
485	/* Driver-model device for the system interface. */
486	struct device          *si_dev;
487
488	/*
489	 * A table of sequence numbers for this interface.  We use the
490	 * sequence numbers for IPMB messages that go out of the
491	 * interface to match them up with their responses.  A routine
492	 * is called periodically to time the items in this list.
493	 */
494	spinlock_t       seq_lock;
495	struct seq_table seq_table[IPMI_IPMB_NUM_SEQ];
496	int curr_seq;
497
498	/*
499	 * Messages queued for delivery.  If delivery fails (out of memory
500	 * for instance), They will stay in here to be processed later in a
501	 * periodic timer interrupt.  The tasklet is for handling received
502	 * messages directly from the handler.
503	 */
504	spinlock_t       waiting_rcv_msgs_lock;
505	struct list_head waiting_rcv_msgs;
506	atomic_t	 watchdog_pretimeouts_to_deliver;
507	struct tasklet_struct recv_tasklet;
508
509	spinlock_t             xmit_msgs_lock;
510	struct list_head       xmit_msgs;
511	struct ipmi_smi_msg    *curr_msg;
512	struct list_head       hp_xmit_msgs;
513
514	/*
515	 * The list of command receivers that are registered for commands
516	 * on this interface.
517	 */
518	struct mutex     cmd_rcvrs_mutex;
519	struct list_head cmd_rcvrs;
520
521	/*
522	 * Events that were queues because no one was there to receive
523	 * them.
524	 */
525	spinlock_t       events_lock; /* For dealing with event stuff. */
526	struct list_head waiting_events;
527	unsigned int     waiting_events_count; /* How many events in queue? */
528	char             delivering_events;
529	char             event_msg_printed;
530
531	/* How many users are waiting for events? */
532	atomic_t         event_waiters;
533	unsigned int     ticks_to_req_ev;
534
535	spinlock_t       watch_lock; /* For dealing with watch stuff below. */
536
537	/* How many users are waiting for commands? */
538	unsigned int     command_waiters;
539
540	/* How many users are waiting for watchdogs? */
541	unsigned int     watchdog_waiters;
542
543	/* How many users are waiting for message responses? */
544	unsigned int     response_waiters;
545
546	/*
547	 * Tells what the lower layer has last been asked to watch for,
548	 * messages and/or watchdogs.  Protected by watch_lock.
549	 */
550	unsigned int     last_watch_mask;
551
552	/*
553	 * The event receiver for my BMC, only really used at panic
554	 * shutdown as a place to store this.
555	 */
556	unsigned char event_receiver;
557	unsigned char event_receiver_lun;
558	unsigned char local_sel_device;
559	unsigned char local_event_generator;
560
561	/* For handling of maintenance mode. */
562	int maintenance_mode;
563	bool maintenance_mode_enable;
564	int auto_maintenance_timeout;
565	spinlock_t maintenance_mode_lock; /* Used in a timer... */
566
567	/*
568	 * If we are doing maintenance on something on IPMB, extend
569	 * the timeout time to avoid timeouts writing firmware and
570	 * such.
571	 */
572	int ipmb_maintenance_mode_timeout;
573
574	/*
575	 * A cheap hack, if this is non-null and a message to an
576	 * interface comes in with a NULL user, call this routine with
577	 * it.  Note that the message will still be freed by the
578	 * caller.  This only works on the system interface.
579	 *
580	 * Protected by bmc_reg_mutex.
581	 */
582	void (*null_user_handler)(struct ipmi_smi *intf,
583				  struct ipmi_recv_msg *msg);
584
585	/*
586	 * When we are scanning the channels for an SMI, this will
587	 * tell which channel we are scanning.
588	 */
589	int curr_channel;
590
591	/* Channel information */
592	struct ipmi_channel_set *channel_list;
593	unsigned int curr_working_cset; /* First index into the following. */
594	struct ipmi_channel_set wchannels[2];
595	struct ipmi_my_addrinfo addrinfo[IPMI_MAX_CHANNELS];
596	bool channels_ready;
597
598	atomic_t stats[IPMI_NUM_STATS];
599
600	/*
601	 * run_to_completion duplicate of smb_info, smi_info
602	 * and ipmi_serial_info structures. Used to decrease numbers of
603	 * parameters passed by "low" level IPMI code.
604	 */
605	int run_to_completion;
606};
607#define to_si_intf_from_dev(device) container_of(device, struct ipmi_smi, dev)
608
609static void __get_guid(struct ipmi_smi *intf);
610static void __ipmi_bmc_unregister(struct ipmi_smi *intf);
611static int __ipmi_bmc_register(struct ipmi_smi *intf,
612			       struct ipmi_device_id *id,
613			       bool guid_set, guid_t *guid, int intf_num);
614static int __scan_channels(struct ipmi_smi *intf, struct ipmi_device_id *id);
615
616
617/*
618 * The driver model view of the IPMI messaging driver.
619 */
620static struct platform_driver ipmidriver = {
621	.driver = {
622		.name = "ipmi",
623		.bus = &platform_bus_type
624	}
625};
626/*
627 * This mutex keeps us from adding the same BMC twice.
628 */
629static DEFINE_MUTEX(ipmidriver_mutex);
630
631static LIST_HEAD(ipmi_interfaces);
632static DEFINE_MUTEX(ipmi_interfaces_mutex);
633#define ipmi_interfaces_mutex_held() \
634	lockdep_is_held(&ipmi_interfaces_mutex)
635static struct srcu_struct ipmi_interfaces_srcu;
636
637/*
638 * List of watchers that want to know when smi's are added and deleted.
639 */
640static LIST_HEAD(smi_watchers);
641static DEFINE_MUTEX(smi_watchers_mutex);
642
643#define ipmi_inc_stat(intf, stat) \
644	atomic_inc(&(intf)->stats[IPMI_STAT_ ## stat])
645#define ipmi_get_stat(intf, stat) \
646	((unsigned int) atomic_read(&(intf)->stats[IPMI_STAT_ ## stat]))
647
648static const char * const addr_src_to_str[] = {
649	"invalid", "hotmod", "hardcoded", "SPMI", "ACPI", "SMBIOS", "PCI",
650	"device-tree", "platform"
651};
652
653const char *ipmi_addr_src_to_str(enum ipmi_addr_src src)
654{
655	if (src >= SI_LAST)
656		src = 0; /* Invalid */
657	return addr_src_to_str[src];
658}
659EXPORT_SYMBOL(ipmi_addr_src_to_str);
660
661static int is_lan_addr(struct ipmi_addr *addr)
662{
663	return addr->addr_type == IPMI_LAN_ADDR_TYPE;
664}
665
666static int is_ipmb_addr(struct ipmi_addr *addr)
667{
668	return addr->addr_type == IPMI_IPMB_ADDR_TYPE;
669}
670
671static int is_ipmb_bcast_addr(struct ipmi_addr *addr)
672{
673	return addr->addr_type == IPMI_IPMB_BROADCAST_ADDR_TYPE;
674}
675
676static int is_ipmb_direct_addr(struct ipmi_addr *addr)
677{
678	return addr->addr_type == IPMI_IPMB_DIRECT_ADDR_TYPE;
679}
680
681static void free_recv_msg_list(struct list_head *q)
682{
683	struct ipmi_recv_msg *msg, *msg2;
684
685	list_for_each_entry_safe(msg, msg2, q, link) {
686		list_del(&msg->link);
687		ipmi_free_recv_msg(msg);
688	}
689}
690
691static void free_smi_msg_list(struct list_head *q)
692{
693	struct ipmi_smi_msg *msg, *msg2;
694
695	list_for_each_entry_safe(msg, msg2, q, link) {
696		list_del(&msg->link);
697		ipmi_free_smi_msg(msg);
698	}
699}
700
701static void clean_up_interface_data(struct ipmi_smi *intf)
702{
703	int              i;
704	struct cmd_rcvr  *rcvr, *rcvr2;
705	struct list_head list;
706
707	tasklet_kill(&intf->recv_tasklet);
708
709	free_smi_msg_list(&intf->waiting_rcv_msgs);
710	free_recv_msg_list(&intf->waiting_events);
711
712	/*
713	 * Wholesale remove all the entries from the list in the
714	 * interface and wait for RCU to know that none are in use.
715	 */
716	mutex_lock(&intf->cmd_rcvrs_mutex);
717	INIT_LIST_HEAD(&list);
718	list_splice_init_rcu(&intf->cmd_rcvrs, &list, synchronize_rcu);
719	mutex_unlock(&intf->cmd_rcvrs_mutex);
720
721	list_for_each_entry_safe(rcvr, rcvr2, &list, link)
722		kfree(rcvr);
723
724	for (i = 0; i < IPMI_IPMB_NUM_SEQ; i++) {
725		if ((intf->seq_table[i].inuse)
726					&& (intf->seq_table[i].recv_msg))
727			ipmi_free_recv_msg(intf->seq_table[i].recv_msg);
728	}
729}
730
731static void intf_free(struct kref *ref)
732{
733	struct ipmi_smi *intf = container_of(ref, struct ipmi_smi, refcount);
734
735	clean_up_interface_data(intf);
736	kfree(intf);
737}
738
739int ipmi_smi_watcher_register(struct ipmi_smi_watcher *watcher)
740{
741	struct ipmi_smi *intf;
742	int index, rv;
743
744	/*
745	 * Make sure the driver is actually initialized, this handles
746	 * problems with initialization order.
747	 */
748	rv = ipmi_init_msghandler();
749	if (rv)
750		return rv;
751
752	mutex_lock(&smi_watchers_mutex);
753
754	list_add(&watcher->link, &smi_watchers);
755
756	index = srcu_read_lock(&ipmi_interfaces_srcu);
757	list_for_each_entry_rcu(intf, &ipmi_interfaces, link,
758			lockdep_is_held(&smi_watchers_mutex)) {
759		int intf_num = READ_ONCE(intf->intf_num);
760
761		if (intf_num == -1)
762			continue;
763		watcher->new_smi(intf_num, intf->si_dev);
764	}
765	srcu_read_unlock(&ipmi_interfaces_srcu, index);
766
767	mutex_unlock(&smi_watchers_mutex);
768
769	return 0;
770}
771EXPORT_SYMBOL(ipmi_smi_watcher_register);
772
773int ipmi_smi_watcher_unregister(struct ipmi_smi_watcher *watcher)
774{
775	mutex_lock(&smi_watchers_mutex);
776	list_del(&watcher->link);
777	mutex_unlock(&smi_watchers_mutex);
778	return 0;
779}
780EXPORT_SYMBOL(ipmi_smi_watcher_unregister);
781
782/*
783 * Must be called with smi_watchers_mutex held.
784 */
785static void
786call_smi_watchers(int i, struct device *dev)
787{
788	struct ipmi_smi_watcher *w;
789
790	mutex_lock(&smi_watchers_mutex);
791	list_for_each_entry(w, &smi_watchers, link) {
792		if (try_module_get(w->owner)) {
793			w->new_smi(i, dev);
794			module_put(w->owner);
795		}
796	}
797	mutex_unlock(&smi_watchers_mutex);
798}
799
800static int
801ipmi_addr_equal(struct ipmi_addr *addr1, struct ipmi_addr *addr2)
802{
803	if (addr1->addr_type != addr2->addr_type)
804		return 0;
805
806	if (addr1->channel != addr2->channel)
807		return 0;
808
809	if (addr1->addr_type == IPMI_SYSTEM_INTERFACE_ADDR_TYPE) {
810		struct ipmi_system_interface_addr *smi_addr1
811		    = (struct ipmi_system_interface_addr *) addr1;
812		struct ipmi_system_interface_addr *smi_addr2
813		    = (struct ipmi_system_interface_addr *) addr2;
814		return (smi_addr1->lun == smi_addr2->lun);
815	}
816
817	if (is_ipmb_addr(addr1) || is_ipmb_bcast_addr(addr1)) {
818		struct ipmi_ipmb_addr *ipmb_addr1
819		    = (struct ipmi_ipmb_addr *) addr1;
820		struct ipmi_ipmb_addr *ipmb_addr2
821		    = (struct ipmi_ipmb_addr *) addr2;
822
823		return ((ipmb_addr1->slave_addr == ipmb_addr2->slave_addr)
824			&& (ipmb_addr1->lun == ipmb_addr2->lun));
825	}
826
827	if (is_ipmb_direct_addr(addr1)) {
828		struct ipmi_ipmb_direct_addr *daddr1
829			= (struct ipmi_ipmb_direct_addr *) addr1;
830		struct ipmi_ipmb_direct_addr *daddr2
831			= (struct ipmi_ipmb_direct_addr *) addr2;
832
833		return daddr1->slave_addr == daddr2->slave_addr &&
834			daddr1->rq_lun == daddr2->rq_lun &&
835			daddr1->rs_lun == daddr2->rs_lun;
836	}
837
838	if (is_lan_addr(addr1)) {
839		struct ipmi_lan_addr *lan_addr1
840			= (struct ipmi_lan_addr *) addr1;
841		struct ipmi_lan_addr *lan_addr2
842		    = (struct ipmi_lan_addr *) addr2;
843
844		return ((lan_addr1->remote_SWID == lan_addr2->remote_SWID)
845			&& (lan_addr1->local_SWID == lan_addr2->local_SWID)
846			&& (lan_addr1->session_handle
847			    == lan_addr2->session_handle)
848			&& (lan_addr1->lun == lan_addr2->lun));
849	}
850
851	return 1;
852}
853
854int ipmi_validate_addr(struct ipmi_addr *addr, int len)
855{
856	if (len < sizeof(struct ipmi_system_interface_addr))
857		return -EINVAL;
858
859	if (addr->addr_type == IPMI_SYSTEM_INTERFACE_ADDR_TYPE) {
860		if (addr->channel != IPMI_BMC_CHANNEL)
861			return -EINVAL;
862		return 0;
863	}
864
865	if ((addr->channel == IPMI_BMC_CHANNEL)
866	    || (addr->channel >= IPMI_MAX_CHANNELS)
867	    || (addr->channel < 0))
868		return -EINVAL;
869
870	if (is_ipmb_addr(addr) || is_ipmb_bcast_addr(addr)) {
871		if (len < sizeof(struct ipmi_ipmb_addr))
872			return -EINVAL;
873		return 0;
874	}
875
876	if (is_ipmb_direct_addr(addr)) {
877		struct ipmi_ipmb_direct_addr *daddr = (void *) addr;
878
879		if (addr->channel != 0)
880			return -EINVAL;
881		if (len < sizeof(struct ipmi_ipmb_direct_addr))
882			return -EINVAL;
883
884		if (daddr->slave_addr & 0x01)
885			return -EINVAL;
886		if (daddr->rq_lun >= 4)
887			return -EINVAL;
888		if (daddr->rs_lun >= 4)
889			return -EINVAL;
890		return 0;
891	}
892
893	if (is_lan_addr(addr)) {
894		if (len < sizeof(struct ipmi_lan_addr))
895			return -EINVAL;
896		return 0;
897	}
898
899	return -EINVAL;
900}
901EXPORT_SYMBOL(ipmi_validate_addr);
902
903unsigned int ipmi_addr_length(int addr_type)
904{
905	if (addr_type == IPMI_SYSTEM_INTERFACE_ADDR_TYPE)
906		return sizeof(struct ipmi_system_interface_addr);
907
908	if ((addr_type == IPMI_IPMB_ADDR_TYPE)
909			|| (addr_type == IPMI_IPMB_BROADCAST_ADDR_TYPE))
910		return sizeof(struct ipmi_ipmb_addr);
911
912	if (addr_type == IPMI_IPMB_DIRECT_ADDR_TYPE)
913		return sizeof(struct ipmi_ipmb_direct_addr);
914
915	if (addr_type == IPMI_LAN_ADDR_TYPE)
916		return sizeof(struct ipmi_lan_addr);
917
918	return 0;
919}
920EXPORT_SYMBOL(ipmi_addr_length);
921
922static int deliver_response(struct ipmi_smi *intf, struct ipmi_recv_msg *msg)
923{
924	int rv = 0;
925
926	if (!msg->user) {
927		/* Special handling for NULL users. */
928		if (intf->null_user_handler) {
929			intf->null_user_handler(intf, msg);
930		} else {
931			/* No handler, so give up. */
932			rv = -EINVAL;
933		}
934		ipmi_free_recv_msg(msg);
935	} else if (oops_in_progress) {
936		/*
937		 * If we are running in the panic context, calling the
938		 * receive handler doesn't much meaning and has a deadlock
939		 * risk.  At this moment, simply skip it in that case.
940		 */
941		ipmi_free_recv_msg(msg);
942		atomic_dec(&msg->user->nr_msgs);
943	} else {
944		int index;
945		struct ipmi_user *user = acquire_ipmi_user(msg->user, &index);
946
947		if (user) {
948			atomic_dec(&user->nr_msgs);
949			user->handler->ipmi_recv_hndl(msg, user->handler_data);
950			release_ipmi_user(user, index);
951		} else {
952			/* User went away, give up. */
953			ipmi_free_recv_msg(msg);
954			rv = -EINVAL;
955		}
956	}
957
958	return rv;
959}
960
961static void deliver_local_response(struct ipmi_smi *intf,
962				   struct ipmi_recv_msg *msg)
963{
964	if (deliver_response(intf, msg))
965		ipmi_inc_stat(intf, unhandled_local_responses);
966	else
967		ipmi_inc_stat(intf, handled_local_responses);
968}
969
970static void deliver_err_response(struct ipmi_smi *intf,
971				 struct ipmi_recv_msg *msg, int err)
972{
973	msg->recv_type = IPMI_RESPONSE_RECV_TYPE;
974	msg->msg_data[0] = err;
975	msg->msg.netfn |= 1; /* Convert to a response. */
976	msg->msg.data_len = 1;
977	msg->msg.data = msg->msg_data;
978	deliver_local_response(intf, msg);
979}
980
981static void smi_add_watch(struct ipmi_smi *intf, unsigned int flags)
982{
983	unsigned long iflags;
984
985	if (!intf->handlers->set_need_watch)
986		return;
987
988	spin_lock_irqsave(&intf->watch_lock, iflags);
989	if (flags & IPMI_WATCH_MASK_CHECK_MESSAGES)
990		intf->response_waiters++;
991
992	if (flags & IPMI_WATCH_MASK_CHECK_WATCHDOG)
993		intf->watchdog_waiters++;
994
995	if (flags & IPMI_WATCH_MASK_CHECK_COMMANDS)
996		intf->command_waiters++;
997
998	if ((intf->last_watch_mask & flags) != flags) {
999		intf->last_watch_mask |= flags;
1000		intf->handlers->set_need_watch(intf->send_info,
1001					       intf->last_watch_mask);
1002	}
1003	spin_unlock_irqrestore(&intf->watch_lock, iflags);
1004}
1005
1006static void smi_remove_watch(struct ipmi_smi *intf, unsigned int flags)
1007{
1008	unsigned long iflags;
1009
1010	if (!intf->handlers->set_need_watch)
1011		return;
1012
1013	spin_lock_irqsave(&intf->watch_lock, iflags);
1014	if (flags & IPMI_WATCH_MASK_CHECK_MESSAGES)
1015		intf->response_waiters--;
1016
1017	if (flags & IPMI_WATCH_MASK_CHECK_WATCHDOG)
1018		intf->watchdog_waiters--;
1019
1020	if (flags & IPMI_WATCH_MASK_CHECK_COMMANDS)
1021		intf->command_waiters--;
1022
1023	flags = 0;
1024	if (intf->response_waiters)
1025		flags |= IPMI_WATCH_MASK_CHECK_MESSAGES;
1026	if (intf->watchdog_waiters)
1027		flags |= IPMI_WATCH_MASK_CHECK_WATCHDOG;
1028	if (intf->command_waiters)
1029		flags |= IPMI_WATCH_MASK_CHECK_COMMANDS;
1030
1031	if (intf->last_watch_mask != flags) {
1032		intf->last_watch_mask = flags;
1033		intf->handlers->set_need_watch(intf->send_info,
1034					       intf->last_watch_mask);
1035	}
1036	spin_unlock_irqrestore(&intf->watch_lock, iflags);
1037}
1038
1039/*
1040 * Find the next sequence number not being used and add the given
1041 * message with the given timeout to the sequence table.  This must be
1042 * called with the interface's seq_lock held.
1043 */
1044static int intf_next_seq(struct ipmi_smi      *intf,
1045			 struct ipmi_recv_msg *recv_msg,
1046			 unsigned long        timeout,
1047			 int                  retries,
1048			 int                  broadcast,
1049			 unsigned char        *seq,
1050			 long                 *seqid)
1051{
1052	int          rv = 0;
1053	unsigned int i;
1054
1055	if (timeout == 0)
1056		timeout = default_retry_ms;
1057	if (retries < 0)
1058		retries = default_max_retries;
1059
1060	for (i = intf->curr_seq; (i+1)%IPMI_IPMB_NUM_SEQ != intf->curr_seq;
1061					i = (i+1)%IPMI_IPMB_NUM_SEQ) {
1062		if (!intf->seq_table[i].inuse)
1063			break;
1064	}
1065
1066	if (!intf->seq_table[i].inuse) {
1067		intf->seq_table[i].recv_msg = recv_msg;
1068
1069		/*
1070		 * Start with the maximum timeout, when the send response
1071		 * comes in we will start the real timer.
1072		 */
1073		intf->seq_table[i].timeout = MAX_MSG_TIMEOUT;
1074		intf->seq_table[i].orig_timeout = timeout;
1075		intf->seq_table[i].retries_left = retries;
1076		intf->seq_table[i].broadcast = broadcast;
1077		intf->seq_table[i].inuse = 1;
1078		intf->seq_table[i].seqid = NEXT_SEQID(intf->seq_table[i].seqid);
1079		*seq = i;
1080		*seqid = intf->seq_table[i].seqid;
1081		intf->curr_seq = (i+1)%IPMI_IPMB_NUM_SEQ;
1082		smi_add_watch(intf, IPMI_WATCH_MASK_CHECK_MESSAGES);
1083		need_waiter(intf);
1084	} else {
1085		rv = -EAGAIN;
1086	}
1087
1088	return rv;
1089}
1090
1091/*
1092 * Return the receive message for the given sequence number and
1093 * release the sequence number so it can be reused.  Some other data
1094 * is passed in to be sure the message matches up correctly (to help
1095 * guard against message coming in after their timeout and the
1096 * sequence number being reused).
1097 */
1098static int intf_find_seq(struct ipmi_smi      *intf,
1099			 unsigned char        seq,
1100			 short                channel,
1101			 unsigned char        cmd,
1102			 unsigned char        netfn,
1103			 struct ipmi_addr     *addr,
1104			 struct ipmi_recv_msg **recv_msg)
1105{
1106	int           rv = -ENODEV;
1107	unsigned long flags;
1108
1109	if (seq >= IPMI_IPMB_NUM_SEQ)
1110		return -EINVAL;
1111
1112	spin_lock_irqsave(&intf->seq_lock, flags);
1113	if (intf->seq_table[seq].inuse) {
1114		struct ipmi_recv_msg *msg = intf->seq_table[seq].recv_msg;
1115
1116		if ((msg->addr.channel == channel) && (msg->msg.cmd == cmd)
1117				&& (msg->msg.netfn == netfn)
1118				&& (ipmi_addr_equal(addr, &msg->addr))) {
1119			*recv_msg = msg;
1120			intf->seq_table[seq].inuse = 0;
1121			smi_remove_watch(intf, IPMI_WATCH_MASK_CHECK_MESSAGES);
1122			rv = 0;
1123		}
1124	}
1125	spin_unlock_irqrestore(&intf->seq_lock, flags);
1126
1127	return rv;
1128}
1129
1130
1131/* Start the timer for a specific sequence table entry. */
1132static int intf_start_seq_timer(struct ipmi_smi *intf,
1133				long       msgid)
1134{
1135	int           rv = -ENODEV;
1136	unsigned long flags;
1137	unsigned char seq;
1138	unsigned long seqid;
1139
1140
1141	GET_SEQ_FROM_MSGID(msgid, seq, seqid);
1142
1143	spin_lock_irqsave(&intf->seq_lock, flags);
1144	/*
1145	 * We do this verification because the user can be deleted
1146	 * while a message is outstanding.
1147	 */
1148	if ((intf->seq_table[seq].inuse)
1149				&& (intf->seq_table[seq].seqid == seqid)) {
1150		struct seq_table *ent = &intf->seq_table[seq];
1151		ent->timeout = ent->orig_timeout;
1152		rv = 0;
1153	}
1154	spin_unlock_irqrestore(&intf->seq_lock, flags);
1155
1156	return rv;
1157}
1158
1159/* Got an error for the send message for a specific sequence number. */
1160static int intf_err_seq(struct ipmi_smi *intf,
1161			long         msgid,
1162			unsigned int err)
1163{
1164	int                  rv = -ENODEV;
1165	unsigned long        flags;
1166	unsigned char        seq;
1167	unsigned long        seqid;
1168	struct ipmi_recv_msg *msg = NULL;
1169
1170
1171	GET_SEQ_FROM_MSGID(msgid, seq, seqid);
1172
1173	spin_lock_irqsave(&intf->seq_lock, flags);
1174	/*
1175	 * We do this verification because the user can be deleted
1176	 * while a message is outstanding.
1177	 */
1178	if ((intf->seq_table[seq].inuse)
1179				&& (intf->seq_table[seq].seqid == seqid)) {
1180		struct seq_table *ent = &intf->seq_table[seq];
1181
1182		ent->inuse = 0;
1183		smi_remove_watch(intf, IPMI_WATCH_MASK_CHECK_MESSAGES);
1184		msg = ent->recv_msg;
1185		rv = 0;
1186	}
1187	spin_unlock_irqrestore(&intf->seq_lock, flags);
1188
1189	if (msg)
1190		deliver_err_response(intf, msg, err);
1191
1192	return rv;
1193}
1194
1195static void free_user_work(struct work_struct *work)
1196{
1197	struct ipmi_user *user = container_of(work, struct ipmi_user,
1198					      remove_work);
1199
1200	cleanup_srcu_struct(&user->release_barrier);
1201	vfree(user);
1202}
1203
1204int ipmi_create_user(unsigned int          if_num,
1205		     const struct ipmi_user_hndl *handler,
1206		     void                  *handler_data,
1207		     struct ipmi_user      **user)
1208{
1209	unsigned long flags;
1210	struct ipmi_user *new_user;
1211	int           rv, index;
1212	struct ipmi_smi *intf;
1213
1214	/*
1215	 * There is no module usecount here, because it's not
1216	 * required.  Since this can only be used by and called from
1217	 * other modules, they will implicitly use this module, and
1218	 * thus this can't be removed unless the other modules are
1219	 * removed.
1220	 */
1221
1222	if (handler == NULL)
1223		return -EINVAL;
1224
1225	/*
1226	 * Make sure the driver is actually initialized, this handles
1227	 * problems with initialization order.
1228	 */
1229	rv = ipmi_init_msghandler();
1230	if (rv)
1231		return rv;
1232
1233	new_user = vzalloc(sizeof(*new_user));
1234	if (!new_user)
1235		return -ENOMEM;
1236
1237	index = srcu_read_lock(&ipmi_interfaces_srcu);
1238	list_for_each_entry_rcu(intf, &ipmi_interfaces, link) {
1239		if (intf->intf_num == if_num)
1240			goto found;
1241	}
1242	/* Not found, return an error */
1243	rv = -EINVAL;
1244	goto out_kfree;
1245
1246 found:
1247	if (atomic_add_return(1, &intf->nr_users) > max_users) {
1248		rv = -EBUSY;
1249		goto out_kfree;
1250	}
1251
1252	INIT_WORK(&new_user->remove_work, free_user_work);
1253
1254	rv = init_srcu_struct(&new_user->release_barrier);
1255	if (rv)
1256		goto out_kfree;
1257
1258	if (!try_module_get(intf->owner)) {
1259		rv = -ENODEV;
1260		goto out_kfree;
1261	}
1262
1263	/* Note that each existing user holds a refcount to the interface. */
1264	kref_get(&intf->refcount);
1265
1266	atomic_set(&new_user->nr_msgs, 0);
1267	kref_init(&new_user->refcount);
1268	new_user->handler = handler;
1269	new_user->handler_data = handler_data;
1270	new_user->intf = intf;
1271	new_user->gets_events = false;
1272
1273	rcu_assign_pointer(new_user->self, new_user);
1274	spin_lock_irqsave(&intf->seq_lock, flags);
1275	list_add_rcu(&new_user->link, &intf->users);
1276	spin_unlock_irqrestore(&intf->seq_lock, flags);
1277	if (handler->ipmi_watchdog_pretimeout)
1278		/* User wants pretimeouts, so make sure to watch for them. */
1279		smi_add_watch(intf, IPMI_WATCH_MASK_CHECK_WATCHDOG);
1280	srcu_read_unlock(&ipmi_interfaces_srcu, index);
1281	*user = new_user;
1282	return 0;
1283
1284out_kfree:
1285	atomic_dec(&intf->nr_users);
1286	srcu_read_unlock(&ipmi_interfaces_srcu, index);
1287	vfree(new_user);
1288	return rv;
1289}
1290EXPORT_SYMBOL(ipmi_create_user);
1291
1292int ipmi_get_smi_info(int if_num, struct ipmi_smi_info *data)
1293{
1294	int rv, index;
1295	struct ipmi_smi *intf;
1296
1297	index = srcu_read_lock(&ipmi_interfaces_srcu);
1298	list_for_each_entry_rcu(intf, &ipmi_interfaces, link) {
1299		if (intf->intf_num == if_num)
1300			goto found;
1301	}
1302	srcu_read_unlock(&ipmi_interfaces_srcu, index);
1303
1304	/* Not found, return an error */
1305	return -EINVAL;
1306
1307found:
1308	if (!intf->handlers->get_smi_info)
1309		rv = -ENOTTY;
1310	else
1311		rv = intf->handlers->get_smi_info(intf->send_info, data);
1312	srcu_read_unlock(&ipmi_interfaces_srcu, index);
1313
1314	return rv;
1315}
1316EXPORT_SYMBOL(ipmi_get_smi_info);
1317
1318static void free_user(struct kref *ref)
1319{
1320	struct ipmi_user *user = container_of(ref, struct ipmi_user, refcount);
1321
1322	/* SRCU cleanup must happen in task context. */
1323	queue_work(remove_work_wq, &user->remove_work);
1324}
1325
1326static void _ipmi_destroy_user(struct ipmi_user *user)
1327{
1328	struct ipmi_smi  *intf = user->intf;
1329	int              i;
1330	unsigned long    flags;
1331	struct cmd_rcvr  *rcvr;
1332	struct cmd_rcvr  *rcvrs = NULL;
1333	struct module    *owner;
1334
1335	if (!acquire_ipmi_user(user, &i)) {
1336		/*
1337		 * The user has already been cleaned up, just make sure
1338		 * nothing is using it and return.
1339		 */
1340		synchronize_srcu(&user->release_barrier);
1341		return;
1342	}
1343
1344	rcu_assign_pointer(user->self, NULL);
1345	release_ipmi_user(user, i);
1346
1347	synchronize_srcu(&user->release_barrier);
1348
1349	if (user->handler->shutdown)
1350		user->handler->shutdown(user->handler_data);
1351
1352	if (user->handler->ipmi_watchdog_pretimeout)
1353		smi_remove_watch(intf, IPMI_WATCH_MASK_CHECK_WATCHDOG);
1354
1355	if (user->gets_events)
1356		atomic_dec(&intf->event_waiters);
1357
1358	/* Remove the user from the interface's sequence table. */
1359	spin_lock_irqsave(&intf->seq_lock, flags);
1360	list_del_rcu(&user->link);
1361	atomic_dec(&intf->nr_users);
1362
1363	for (i = 0; i < IPMI_IPMB_NUM_SEQ; i++) {
1364		if (intf->seq_table[i].inuse
1365		    && (intf->seq_table[i].recv_msg->user == user)) {
1366			intf->seq_table[i].inuse = 0;
1367			smi_remove_watch(intf, IPMI_WATCH_MASK_CHECK_MESSAGES);
1368			ipmi_free_recv_msg(intf->seq_table[i].recv_msg);
1369		}
1370	}
1371	spin_unlock_irqrestore(&intf->seq_lock, flags);
1372
1373	/*
1374	 * Remove the user from the command receiver's table.  First
1375	 * we build a list of everything (not using the standard link,
1376	 * since other things may be using it till we do
1377	 * synchronize_srcu()) then free everything in that list.
1378	 */
1379	mutex_lock(&intf->cmd_rcvrs_mutex);
1380	list_for_each_entry_rcu(rcvr, &intf->cmd_rcvrs, link,
1381				lockdep_is_held(&intf->cmd_rcvrs_mutex)) {
1382		if (rcvr->user == user) {
1383			list_del_rcu(&rcvr->link);
1384			rcvr->next = rcvrs;
1385			rcvrs = rcvr;
1386		}
1387	}
1388	mutex_unlock(&intf->cmd_rcvrs_mutex);
1389	synchronize_rcu();
1390	while (rcvrs) {
1391		rcvr = rcvrs;
1392		rcvrs = rcvr->next;
1393		kfree(rcvr);
1394	}
1395
1396	owner = intf->owner;
1397	kref_put(&intf->refcount, intf_free);
1398	module_put(owner);
1399}
1400
1401int ipmi_destroy_user(struct ipmi_user *user)
1402{
1403	_ipmi_destroy_user(user);
1404
1405	kref_put(&user->refcount, free_user);
1406
1407	return 0;
1408}
1409EXPORT_SYMBOL(ipmi_destroy_user);
1410
1411int ipmi_get_version(struct ipmi_user *user,
1412		     unsigned char *major,
1413		     unsigned char *minor)
1414{
1415	struct ipmi_device_id id;
1416	int rv, index;
1417
1418	user = acquire_ipmi_user(user, &index);
1419	if (!user)
1420		return -ENODEV;
1421
1422	rv = bmc_get_device_id(user->intf, NULL, &id, NULL, NULL);
1423	if (!rv) {
1424		*major = ipmi_version_major(&id);
1425		*minor = ipmi_version_minor(&id);
1426	}
1427	release_ipmi_user(user, index);
1428
1429	return rv;
1430}
1431EXPORT_SYMBOL(ipmi_get_version);
1432
1433int ipmi_set_my_address(struct ipmi_user *user,
1434			unsigned int  channel,
1435			unsigned char address)
1436{
1437	int index, rv = 0;
1438
1439	user = acquire_ipmi_user(user, &index);
1440	if (!user)
1441		return -ENODEV;
1442
1443	if (channel >= IPMI_MAX_CHANNELS) {
1444		rv = -EINVAL;
1445	} else {
1446		channel = array_index_nospec(channel, IPMI_MAX_CHANNELS);
1447		user->intf->addrinfo[channel].address = address;
1448	}
1449	release_ipmi_user(user, index);
1450
1451	return rv;
1452}
1453EXPORT_SYMBOL(ipmi_set_my_address);
1454
1455int ipmi_get_my_address(struct ipmi_user *user,
1456			unsigned int  channel,
1457			unsigned char *address)
1458{
1459	int index, rv = 0;
1460
1461	user = acquire_ipmi_user(user, &index);
1462	if (!user)
1463		return -ENODEV;
1464
1465	if (channel >= IPMI_MAX_CHANNELS) {
1466		rv = -EINVAL;
1467	} else {
1468		channel = array_index_nospec(channel, IPMI_MAX_CHANNELS);
1469		*address = user->intf->addrinfo[channel].address;
1470	}
1471	release_ipmi_user(user, index);
1472
1473	return rv;
1474}
1475EXPORT_SYMBOL(ipmi_get_my_address);
1476
1477int ipmi_set_my_LUN(struct ipmi_user *user,
1478		    unsigned int  channel,
1479		    unsigned char LUN)
1480{
1481	int index, rv = 0;
1482
1483	user = acquire_ipmi_user(user, &index);
1484	if (!user)
1485		return -ENODEV;
1486
1487	if (channel >= IPMI_MAX_CHANNELS) {
1488		rv = -EINVAL;
1489	} else {
1490		channel = array_index_nospec(channel, IPMI_MAX_CHANNELS);
1491		user->intf->addrinfo[channel].lun = LUN & 0x3;
1492	}
1493	release_ipmi_user(user, index);
1494
1495	return rv;
1496}
1497EXPORT_SYMBOL(ipmi_set_my_LUN);
1498
1499int ipmi_get_my_LUN(struct ipmi_user *user,
1500		    unsigned int  channel,
1501		    unsigned char *address)
1502{
1503	int index, rv = 0;
1504
1505	user = acquire_ipmi_user(user, &index);
1506	if (!user)
1507		return -ENODEV;
1508
1509	if (channel >= IPMI_MAX_CHANNELS) {
1510		rv = -EINVAL;
1511	} else {
1512		channel = array_index_nospec(channel, IPMI_MAX_CHANNELS);
1513		*address = user->intf->addrinfo[channel].lun;
1514	}
1515	release_ipmi_user(user, index);
1516
1517	return rv;
1518}
1519EXPORT_SYMBOL(ipmi_get_my_LUN);
1520
1521int ipmi_get_maintenance_mode(struct ipmi_user *user)
1522{
1523	int mode, index;
1524	unsigned long flags;
1525
1526	user = acquire_ipmi_user(user, &index);
1527	if (!user)
1528		return -ENODEV;
1529
1530	spin_lock_irqsave(&user->intf->maintenance_mode_lock, flags);
1531	mode = user->intf->maintenance_mode;
1532	spin_unlock_irqrestore(&user->intf->maintenance_mode_lock, flags);
1533	release_ipmi_user(user, index);
1534
1535	return mode;
1536}
1537EXPORT_SYMBOL(ipmi_get_maintenance_mode);
1538
1539static void maintenance_mode_update(struct ipmi_smi *intf)
1540{
1541	if (intf->handlers->set_maintenance_mode)
1542		intf->handlers->set_maintenance_mode(
1543			intf->send_info, intf->maintenance_mode_enable);
1544}
1545
1546int ipmi_set_maintenance_mode(struct ipmi_user *user, int mode)
1547{
1548	int rv = 0, index;
1549	unsigned long flags;
1550	struct ipmi_smi *intf = user->intf;
1551
1552	user = acquire_ipmi_user(user, &index);
1553	if (!user)
1554		return -ENODEV;
1555
1556	spin_lock_irqsave(&intf->maintenance_mode_lock, flags);
1557	if (intf->maintenance_mode != mode) {
1558		switch (mode) {
1559		case IPMI_MAINTENANCE_MODE_AUTO:
1560			intf->maintenance_mode_enable
1561				= (intf->auto_maintenance_timeout > 0);
1562			break;
1563
1564		case IPMI_MAINTENANCE_MODE_OFF:
1565			intf->maintenance_mode_enable = false;
1566			break;
1567
1568		case IPMI_MAINTENANCE_MODE_ON:
1569			intf->maintenance_mode_enable = true;
1570			break;
1571
1572		default:
1573			rv = -EINVAL;
1574			goto out_unlock;
1575		}
1576		intf->maintenance_mode = mode;
1577
1578		maintenance_mode_update(intf);
1579	}
1580 out_unlock:
1581	spin_unlock_irqrestore(&intf->maintenance_mode_lock, flags);
1582	release_ipmi_user(user, index);
1583
1584	return rv;
1585}
1586EXPORT_SYMBOL(ipmi_set_maintenance_mode);
1587
1588int ipmi_set_gets_events(struct ipmi_user *user, bool val)
1589{
1590	unsigned long        flags;
1591	struct ipmi_smi      *intf = user->intf;
1592	struct ipmi_recv_msg *msg, *msg2;
1593	struct list_head     msgs;
1594	int index;
1595
1596	user = acquire_ipmi_user(user, &index);
1597	if (!user)
1598		return -ENODEV;
1599
1600	INIT_LIST_HEAD(&msgs);
1601
1602	spin_lock_irqsave(&intf->events_lock, flags);
1603	if (user->gets_events == val)
1604		goto out;
1605
1606	user->gets_events = val;
1607
1608	if (val) {
1609		if (atomic_inc_return(&intf->event_waiters) == 1)
1610			need_waiter(intf);
1611	} else {
1612		atomic_dec(&intf->event_waiters);
1613	}
1614
1615	if (intf->delivering_events)
1616		/*
1617		 * Another thread is delivering events for this, so
1618		 * let it handle any new events.
1619		 */
1620		goto out;
1621
1622	/* Deliver any queued events. */
1623	while (user->gets_events && !list_empty(&intf->waiting_events)) {
1624		list_for_each_entry_safe(msg, msg2, &intf->waiting_events, link)
1625			list_move_tail(&msg->link, &msgs);
1626		intf->waiting_events_count = 0;
1627		if (intf->event_msg_printed) {
1628			dev_warn(intf->si_dev, "Event queue no longer full\n");
1629			intf->event_msg_printed = 0;
1630		}
1631
1632		intf->delivering_events = 1;
1633		spin_unlock_irqrestore(&intf->events_lock, flags);
1634
1635		list_for_each_entry_safe(msg, msg2, &msgs, link) {
1636			msg->user = user;
1637			kref_get(&user->refcount);
1638			deliver_local_response(intf, msg);
1639		}
1640
1641		spin_lock_irqsave(&intf->events_lock, flags);
1642		intf->delivering_events = 0;
1643	}
1644
1645 out:
1646	spin_unlock_irqrestore(&intf->events_lock, flags);
1647	release_ipmi_user(user, index);
1648
1649	return 0;
1650}
1651EXPORT_SYMBOL(ipmi_set_gets_events);
1652
1653static struct cmd_rcvr *find_cmd_rcvr(struct ipmi_smi *intf,
1654				      unsigned char netfn,
1655				      unsigned char cmd,
1656				      unsigned char chan)
1657{
1658	struct cmd_rcvr *rcvr;
1659
1660	list_for_each_entry_rcu(rcvr, &intf->cmd_rcvrs, link,
1661				lockdep_is_held(&intf->cmd_rcvrs_mutex)) {
1662		if ((rcvr->netfn == netfn) && (rcvr->cmd == cmd)
1663					&& (rcvr->chans & (1 << chan)))
1664			return rcvr;
1665	}
1666	return NULL;
1667}
1668
1669static int is_cmd_rcvr_exclusive(struct ipmi_smi *intf,
1670				 unsigned char netfn,
1671				 unsigned char cmd,
1672				 unsigned int  chans)
1673{
1674	struct cmd_rcvr *rcvr;
1675
1676	list_for_each_entry_rcu(rcvr, &intf->cmd_rcvrs, link,
1677				lockdep_is_held(&intf->cmd_rcvrs_mutex)) {
1678		if ((rcvr->netfn == netfn) && (rcvr->cmd == cmd)
1679					&& (rcvr->chans & chans))
1680			return 0;
1681	}
1682	return 1;
1683}
1684
1685int ipmi_register_for_cmd(struct ipmi_user *user,
1686			  unsigned char netfn,
1687			  unsigned char cmd,
1688			  unsigned int  chans)
1689{
1690	struct ipmi_smi *intf = user->intf;
1691	struct cmd_rcvr *rcvr;
1692	int rv = 0, index;
1693
1694	user = acquire_ipmi_user(user, &index);
1695	if (!user)
1696		return -ENODEV;
1697
1698	rcvr = kmalloc(sizeof(*rcvr), GFP_KERNEL);
1699	if (!rcvr) {
1700		rv = -ENOMEM;
1701		goto out_release;
1702	}
1703	rcvr->cmd = cmd;
1704	rcvr->netfn = netfn;
1705	rcvr->chans = chans;
1706	rcvr->user = user;
1707
1708	mutex_lock(&intf->cmd_rcvrs_mutex);
1709	/* Make sure the command/netfn is not already registered. */
1710	if (!is_cmd_rcvr_exclusive(intf, netfn, cmd, chans)) {
1711		rv = -EBUSY;
1712		goto out_unlock;
1713	}
1714
1715	smi_add_watch(intf, IPMI_WATCH_MASK_CHECK_COMMANDS);
1716
1717	list_add_rcu(&rcvr->link, &intf->cmd_rcvrs);
1718
1719out_unlock:
1720	mutex_unlock(&intf->cmd_rcvrs_mutex);
1721	if (rv)
1722		kfree(rcvr);
1723out_release:
1724	release_ipmi_user(user, index);
1725
1726	return rv;
1727}
1728EXPORT_SYMBOL(ipmi_register_for_cmd);
1729
1730int ipmi_unregister_for_cmd(struct ipmi_user *user,
1731			    unsigned char netfn,
1732			    unsigned char cmd,
1733			    unsigned int  chans)
1734{
1735	struct ipmi_smi *intf = user->intf;
1736	struct cmd_rcvr *rcvr;
1737	struct cmd_rcvr *rcvrs = NULL;
1738	int i, rv = -ENOENT, index;
1739
1740	user = acquire_ipmi_user(user, &index);
1741	if (!user)
1742		return -ENODEV;
1743
1744	mutex_lock(&intf->cmd_rcvrs_mutex);
1745	for (i = 0; i < IPMI_NUM_CHANNELS; i++) {
1746		if (((1 << i) & chans) == 0)
1747			continue;
1748		rcvr = find_cmd_rcvr(intf, netfn, cmd, i);
1749		if (rcvr == NULL)
1750			continue;
1751		if (rcvr->user == user) {
1752			rv = 0;
1753			rcvr->chans &= ~chans;
1754			if (rcvr->chans == 0) {
1755				list_del_rcu(&rcvr->link);
1756				rcvr->next = rcvrs;
1757				rcvrs = rcvr;
1758			}
1759		}
1760	}
1761	mutex_unlock(&intf->cmd_rcvrs_mutex);
1762	synchronize_rcu();
1763	release_ipmi_user(user, index);
1764	while (rcvrs) {
1765		smi_remove_watch(intf, IPMI_WATCH_MASK_CHECK_COMMANDS);
1766		rcvr = rcvrs;
1767		rcvrs = rcvr->next;
1768		kfree(rcvr);
1769	}
1770
1771	return rv;
1772}
1773EXPORT_SYMBOL(ipmi_unregister_for_cmd);
1774
1775unsigned char
1776ipmb_checksum(unsigned char *data, int size)
1777{
1778	unsigned char csum = 0;
1779
1780	for (; size > 0; size--, data++)
1781		csum += *data;
1782
1783	return -csum;
1784}
1785EXPORT_SYMBOL(ipmb_checksum);
1786
1787static inline void format_ipmb_msg(struct ipmi_smi_msg   *smi_msg,
1788				   struct kernel_ipmi_msg *msg,
1789				   struct ipmi_ipmb_addr *ipmb_addr,
1790				   long                  msgid,
1791				   unsigned char         ipmb_seq,
1792				   int                   broadcast,
1793				   unsigned char         source_address,
1794				   unsigned char         source_lun)
1795{
1796	int i = broadcast;
1797
1798	/* Format the IPMB header data. */
1799	smi_msg->data[0] = (IPMI_NETFN_APP_REQUEST << 2);
1800	smi_msg->data[1] = IPMI_SEND_MSG_CMD;
1801	smi_msg->data[2] = ipmb_addr->channel;
1802	if (broadcast)
1803		smi_msg->data[3] = 0;
1804	smi_msg->data[i+3] = ipmb_addr->slave_addr;
1805	smi_msg->data[i+4] = (msg->netfn << 2) | (ipmb_addr->lun & 0x3);
1806	smi_msg->data[i+5] = ipmb_checksum(&smi_msg->data[i + 3], 2);
1807	smi_msg->data[i+6] = source_address;
1808	smi_msg->data[i+7] = (ipmb_seq << 2) | source_lun;
1809	smi_msg->data[i+8] = msg->cmd;
1810
1811	/* Now tack on the data to the message. */
1812	if (msg->data_len > 0)
1813		memcpy(&smi_msg->data[i + 9], msg->data, msg->data_len);
1814	smi_msg->data_size = msg->data_len + 9;
1815
1816	/* Now calculate the checksum and tack it on. */
1817	smi_msg->data[i+smi_msg->data_size]
1818		= ipmb_checksum(&smi_msg->data[i + 6], smi_msg->data_size - 6);
1819
1820	/*
1821	 * Add on the checksum size and the offset from the
1822	 * broadcast.
1823	 */
1824	smi_msg->data_size += 1 + i;
1825
1826	smi_msg->msgid = msgid;
1827}
1828
1829static inline void format_lan_msg(struct ipmi_smi_msg   *smi_msg,
1830				  struct kernel_ipmi_msg *msg,
1831				  struct ipmi_lan_addr  *lan_addr,
1832				  long                  msgid,
1833				  unsigned char         ipmb_seq,
1834				  unsigned char         source_lun)
1835{
1836	/* Format the IPMB header data. */
1837	smi_msg->data[0] = (IPMI_NETFN_APP_REQUEST << 2);
1838	smi_msg->data[1] = IPMI_SEND_MSG_CMD;
1839	smi_msg->data[2] = lan_addr->channel;
1840	smi_msg->data[3] = lan_addr->session_handle;
1841	smi_msg->data[4] = lan_addr->remote_SWID;
1842	smi_msg->data[5] = (msg->netfn << 2) | (lan_addr->lun & 0x3);
1843	smi_msg->data[6] = ipmb_checksum(&smi_msg->data[4], 2);
1844	smi_msg->data[7] = lan_addr->local_SWID;
1845	smi_msg->data[8] = (ipmb_seq << 2) | source_lun;
1846	smi_msg->data[9] = msg->cmd;
1847
1848	/* Now tack on the data to the message. */
1849	if (msg->data_len > 0)
1850		memcpy(&smi_msg->data[10], msg->data, msg->data_len);
1851	smi_msg->data_size = msg->data_len + 10;
1852
1853	/* Now calculate the checksum and tack it on. */
1854	smi_msg->data[smi_msg->data_size]
1855		= ipmb_checksum(&smi_msg->data[7], smi_msg->data_size - 7);
1856
1857	/*
1858	 * Add on the checksum size and the offset from the
1859	 * broadcast.
1860	 */
1861	smi_msg->data_size += 1;
1862
1863	smi_msg->msgid = msgid;
1864}
1865
1866static struct ipmi_smi_msg *smi_add_send_msg(struct ipmi_smi *intf,
1867					     struct ipmi_smi_msg *smi_msg,
1868					     int priority)
1869{
1870	if (intf->curr_msg) {
1871		if (priority > 0)
1872			list_add_tail(&smi_msg->link, &intf->hp_xmit_msgs);
1873		else
1874			list_add_tail(&smi_msg->link, &intf->xmit_msgs);
1875		smi_msg = NULL;
1876	} else {
1877		intf->curr_msg = smi_msg;
1878	}
1879
1880	return smi_msg;
1881}
1882
1883static void smi_send(struct ipmi_smi *intf,
1884		     const struct ipmi_smi_handlers *handlers,
1885		     struct ipmi_smi_msg *smi_msg, int priority)
1886{
1887	int run_to_completion = intf->run_to_completion;
1888	unsigned long flags = 0;
1889
1890	if (!run_to_completion)
1891		spin_lock_irqsave(&intf->xmit_msgs_lock, flags);
1892	smi_msg = smi_add_send_msg(intf, smi_msg, priority);
1893
1894	if (!run_to_completion)
1895		spin_unlock_irqrestore(&intf->xmit_msgs_lock, flags);
1896
1897	if (smi_msg)
1898		handlers->sender(intf->send_info, smi_msg);
1899}
1900
1901static bool is_maintenance_mode_cmd(struct kernel_ipmi_msg *msg)
1902{
1903	return (((msg->netfn == IPMI_NETFN_APP_REQUEST)
1904		 && ((msg->cmd == IPMI_COLD_RESET_CMD)
1905		     || (msg->cmd == IPMI_WARM_RESET_CMD)))
1906		|| (msg->netfn == IPMI_NETFN_FIRMWARE_REQUEST));
1907}
1908
1909static int i_ipmi_req_sysintf(struct ipmi_smi        *intf,
1910			      struct ipmi_addr       *addr,
1911			      long                   msgid,
1912			      struct kernel_ipmi_msg *msg,
1913			      struct ipmi_smi_msg    *smi_msg,
1914			      struct ipmi_recv_msg   *recv_msg,
1915			      int                    retries,
1916			      unsigned int           retry_time_ms)
1917{
1918	struct ipmi_system_interface_addr *smi_addr;
1919
1920	if (msg->netfn & 1)
1921		/* Responses are not allowed to the SMI. */
1922		return -EINVAL;
1923
1924	smi_addr = (struct ipmi_system_interface_addr *) addr;
1925	if (smi_addr->lun > 3) {
1926		ipmi_inc_stat(intf, sent_invalid_commands);
1927		return -EINVAL;
1928	}
1929
1930	memcpy(&recv_msg->addr, smi_addr, sizeof(*smi_addr));
1931
1932	if ((msg->netfn == IPMI_NETFN_APP_REQUEST)
1933	    && ((msg->cmd == IPMI_SEND_MSG_CMD)
1934		|| (msg->cmd == IPMI_GET_MSG_CMD)
1935		|| (msg->cmd == IPMI_READ_EVENT_MSG_BUFFER_CMD))) {
1936		/*
1937		 * We don't let the user do these, since we manage
1938		 * the sequence numbers.
1939		 */
1940		ipmi_inc_stat(intf, sent_invalid_commands);
1941		return -EINVAL;
1942	}
1943
1944	if (is_maintenance_mode_cmd(msg)) {
1945		unsigned long flags;
1946
1947		spin_lock_irqsave(&intf->maintenance_mode_lock, flags);
1948		intf->auto_maintenance_timeout
1949			= maintenance_mode_timeout_ms;
1950		if (!intf->maintenance_mode
1951		    && !intf->maintenance_mode_enable) {
1952			intf->maintenance_mode_enable = true;
1953			maintenance_mode_update(intf);
1954		}
1955		spin_unlock_irqrestore(&intf->maintenance_mode_lock,
1956				       flags);
1957	}
1958
1959	if (msg->data_len + 2 > IPMI_MAX_MSG_LENGTH) {
1960		ipmi_inc_stat(intf, sent_invalid_commands);
1961		return -EMSGSIZE;
1962	}
1963
1964	smi_msg->data[0] = (msg->netfn << 2) | (smi_addr->lun & 0x3);
1965	smi_msg->data[1] = msg->cmd;
1966	smi_msg->msgid = msgid;
1967	smi_msg->user_data = recv_msg;
1968	if (msg->data_len > 0)
1969		memcpy(&smi_msg->data[2], msg->data, msg->data_len);
1970	smi_msg->data_size = msg->data_len + 2;
1971	ipmi_inc_stat(intf, sent_local_commands);
1972
1973	return 0;
1974}
1975
1976static int i_ipmi_req_ipmb(struct ipmi_smi        *intf,
1977			   struct ipmi_addr       *addr,
1978			   long                   msgid,
1979			   struct kernel_ipmi_msg *msg,
1980			   struct ipmi_smi_msg    *smi_msg,
1981			   struct ipmi_recv_msg   *recv_msg,
1982			   unsigned char          source_address,
1983			   unsigned char          source_lun,
1984			   int                    retries,
1985			   unsigned int           retry_time_ms)
1986{
1987	struct ipmi_ipmb_addr *ipmb_addr;
1988	unsigned char ipmb_seq;
1989	long seqid;
1990	int broadcast = 0;
1991	struct ipmi_channel *chans;
1992	int rv = 0;
1993
1994	if (addr->channel >= IPMI_MAX_CHANNELS) {
1995		ipmi_inc_stat(intf, sent_invalid_commands);
1996		return -EINVAL;
1997	}
1998
1999	chans = READ_ONCE(intf->channel_list)->c;
2000
2001	if (chans[addr->channel].medium != IPMI_CHANNEL_MEDIUM_IPMB) {
2002		ipmi_inc_stat(intf, sent_invalid_commands);
2003		return -EINVAL;
2004	}
2005
2006	if (addr->addr_type == IPMI_IPMB_BROADCAST_ADDR_TYPE) {
2007		/*
2008		 * Broadcasts add a zero at the beginning of the
2009		 * message, but otherwise is the same as an IPMB
2010		 * address.
2011		 */
2012		addr->addr_type = IPMI_IPMB_ADDR_TYPE;
2013		broadcast = 1;
2014		retries = 0; /* Don't retry broadcasts. */
2015	}
2016
2017	/*
2018	 * 9 for the header and 1 for the checksum, plus
2019	 * possibly one for the broadcast.
2020	 */
2021	if ((msg->data_len + 10 + broadcast) > IPMI_MAX_MSG_LENGTH) {
2022		ipmi_inc_stat(intf, sent_invalid_commands);
2023		return -EMSGSIZE;
2024	}
2025
2026	ipmb_addr = (struct ipmi_ipmb_addr *) addr;
2027	if (ipmb_addr->lun > 3) {
2028		ipmi_inc_stat(intf, sent_invalid_commands);
2029		return -EINVAL;
2030	}
2031
2032	memcpy(&recv_msg->addr, ipmb_addr, sizeof(*ipmb_addr));
2033
2034	if (recv_msg->msg.netfn & 0x1) {
2035		/*
2036		 * It's a response, so use the user's sequence
2037		 * from msgid.
2038		 */
2039		ipmi_inc_stat(intf, sent_ipmb_responses);
2040		format_ipmb_msg(smi_msg, msg, ipmb_addr, msgid,
2041				msgid, broadcast,
2042				source_address, source_lun);
2043
2044		/*
2045		 * Save the receive message so we can use it
2046		 * to deliver the response.
2047		 */
2048		smi_msg->user_data = recv_msg;
2049	} else {
2050		/* It's a command, so get a sequence for it. */
2051		unsigned long flags;
2052
2053		spin_lock_irqsave(&intf->seq_lock, flags);
2054
2055		if (is_maintenance_mode_cmd(msg))
2056			intf->ipmb_maintenance_mode_timeout =
2057				maintenance_mode_timeout_ms;
2058
2059		if (intf->ipmb_maintenance_mode_timeout && retry_time_ms == 0)
2060			/* Different default in maintenance mode */
2061			retry_time_ms = default_maintenance_retry_ms;
2062
2063		/*
2064		 * Create a sequence number with a 1 second
2065		 * timeout and 4 retries.
2066		 */
2067		rv = intf_next_seq(intf,
2068				   recv_msg,
2069				   retry_time_ms,
2070				   retries,
2071				   broadcast,
2072				   &ipmb_seq,
2073				   &seqid);
2074		if (rv)
2075			/*
2076			 * We have used up all the sequence numbers,
2077			 * probably, so abort.
2078			 */
2079			goto out_err;
2080
2081		ipmi_inc_stat(intf, sent_ipmb_commands);
2082
2083		/*
2084		 * Store the sequence number in the message,
2085		 * so that when the send message response
2086		 * comes back we can start the timer.
2087		 */
2088		format_ipmb_msg(smi_msg, msg, ipmb_addr,
2089				STORE_SEQ_IN_MSGID(ipmb_seq, seqid),
2090				ipmb_seq, broadcast,
2091				source_address, source_lun);
2092
2093		/*
2094		 * Copy the message into the recv message data, so we
2095		 * can retransmit it later if necessary.
2096		 */
2097		memcpy(recv_msg->msg_data, smi_msg->data,
2098		       smi_msg->data_size);
2099		recv_msg->msg.data = recv_msg->msg_data;
2100		recv_msg->msg.data_len = smi_msg->data_size;
2101
2102		/*
2103		 * We don't unlock until here, because we need
2104		 * to copy the completed message into the
2105		 * recv_msg before we release the lock.
2106		 * Otherwise, race conditions may bite us.  I
2107		 * know that's pretty paranoid, but I prefer
2108		 * to be correct.
2109		 */
2110out_err:
2111		spin_unlock_irqrestore(&intf->seq_lock, flags);
2112	}
2113
2114	return rv;
2115}
2116
2117static int i_ipmi_req_ipmb_direct(struct ipmi_smi        *intf,
2118				  struct ipmi_addr       *addr,
2119				  long			 msgid,
2120				  struct kernel_ipmi_msg *msg,
2121				  struct ipmi_smi_msg    *smi_msg,
2122				  struct ipmi_recv_msg   *recv_msg,
2123				  unsigned char          source_lun)
2124{
2125	struct ipmi_ipmb_direct_addr *daddr;
2126	bool is_cmd = !(recv_msg->msg.netfn & 0x1);
2127
2128	if (!(intf->handlers->flags & IPMI_SMI_CAN_HANDLE_IPMB_DIRECT))
2129		return -EAFNOSUPPORT;
2130
2131	/* Responses must have a completion code. */
2132	if (!is_cmd && msg->data_len < 1) {
2133		ipmi_inc_stat(intf, sent_invalid_commands);
2134		return -EINVAL;
2135	}
2136
2137	if ((msg->data_len + 4) > IPMI_MAX_MSG_LENGTH) {
2138		ipmi_inc_stat(intf, sent_invalid_commands);
2139		return -EMSGSIZE;
2140	}
2141
2142	daddr = (struct ipmi_ipmb_direct_addr *) addr;
2143	if (daddr->rq_lun > 3 || daddr->rs_lun > 3) {
2144		ipmi_inc_stat(intf, sent_invalid_commands);
2145		return -EINVAL;
2146	}
2147
2148	smi_msg->type = IPMI_SMI_MSG_TYPE_IPMB_DIRECT;
2149	smi_msg->msgid = msgid;
2150
2151	if (is_cmd) {
2152		smi_msg->data[0] = msg->netfn << 2 | daddr->rs_lun;
2153		smi_msg->data[2] = recv_msg->msgid << 2 | daddr->rq_lun;
2154	} else {
2155		smi_msg->data[0] = msg->netfn << 2 | daddr->rq_lun;
2156		smi_msg->data[2] = recv_msg->msgid << 2 | daddr->rs_lun;
2157	}
2158	smi_msg->data[1] = daddr->slave_addr;
2159	smi_msg->data[3] = msg->cmd;
2160
2161	memcpy(smi_msg->data + 4, msg->data, msg->data_len);
2162	smi_msg->data_size = msg->data_len + 4;
2163
2164	smi_msg->user_data = recv_msg;
2165
2166	return 0;
2167}
2168
2169static int i_ipmi_req_lan(struct ipmi_smi        *intf,
2170			  struct ipmi_addr       *addr,
2171			  long                   msgid,
2172			  struct kernel_ipmi_msg *msg,
2173			  struct ipmi_smi_msg    *smi_msg,
2174			  struct ipmi_recv_msg   *recv_msg,
2175			  unsigned char          source_lun,
2176			  int                    retries,
2177			  unsigned int           retry_time_ms)
2178{
2179	struct ipmi_lan_addr  *lan_addr;
2180	unsigned char ipmb_seq;
2181	long seqid;
2182	struct ipmi_channel *chans;
2183	int rv = 0;
2184
2185	if (addr->channel >= IPMI_MAX_CHANNELS) {
2186		ipmi_inc_stat(intf, sent_invalid_commands);
2187		return -EINVAL;
2188	}
2189
2190	chans = READ_ONCE(intf->channel_list)->c;
2191
2192	if ((chans[addr->channel].medium
2193				!= IPMI_CHANNEL_MEDIUM_8023LAN)
2194			&& (chans[addr->channel].medium
2195			    != IPMI_CHANNEL_MEDIUM_ASYNC)) {
2196		ipmi_inc_stat(intf, sent_invalid_commands);
2197		return -EINVAL;
2198	}
2199
2200	/* 11 for the header and 1 for the checksum. */
2201	if ((msg->data_len + 12) > IPMI_MAX_MSG_LENGTH) {
2202		ipmi_inc_stat(intf, sent_invalid_commands);
2203		return -EMSGSIZE;
2204	}
2205
2206	lan_addr = (struct ipmi_lan_addr *) addr;
2207	if (lan_addr->lun > 3) {
2208		ipmi_inc_stat(intf, sent_invalid_commands);
2209		return -EINVAL;
2210	}
2211
2212	memcpy(&recv_msg->addr, lan_addr, sizeof(*lan_addr));
2213
2214	if (recv_msg->msg.netfn & 0x1) {
2215		/*
2216		 * It's a response, so use the user's sequence
2217		 * from msgid.
2218		 */
2219		ipmi_inc_stat(intf, sent_lan_responses);
2220		format_lan_msg(smi_msg, msg, lan_addr, msgid,
2221			       msgid, source_lun);
2222
2223		/*
2224		 * Save the receive message so we can use it
2225		 * to deliver the response.
2226		 */
2227		smi_msg->user_data = recv_msg;
2228	} else {
2229		/* It's a command, so get a sequence for it. */
2230		unsigned long flags;
2231
2232		spin_lock_irqsave(&intf->seq_lock, flags);
2233
2234		/*
2235		 * Create a sequence number with a 1 second
2236		 * timeout and 4 retries.
2237		 */
2238		rv = intf_next_seq(intf,
2239				   recv_msg,
2240				   retry_time_ms,
2241				   retries,
2242				   0,
2243				   &ipmb_seq,
2244				   &seqid);
2245		if (rv)
2246			/*
2247			 * We have used up all the sequence numbers,
2248			 * probably, so abort.
2249			 */
2250			goto out_err;
2251
2252		ipmi_inc_stat(intf, sent_lan_commands);
2253
2254		/*
2255		 * Store the sequence number in the message,
2256		 * so that when the send message response
2257		 * comes back we can start the timer.
2258		 */
2259		format_lan_msg(smi_msg, msg, lan_addr,
2260			       STORE_SEQ_IN_MSGID(ipmb_seq, seqid),
2261			       ipmb_seq, source_lun);
2262
2263		/*
2264		 * Copy the message into the recv message data, so we
2265		 * can retransmit it later if necessary.
2266		 */
2267		memcpy(recv_msg->msg_data, smi_msg->data,
2268		       smi_msg->data_size);
2269		recv_msg->msg.data = recv_msg->msg_data;
2270		recv_msg->msg.data_len = smi_msg->data_size;
2271
2272		/*
2273		 * We don't unlock until here, because we need
2274		 * to copy the completed message into the
2275		 * recv_msg before we release the lock.
2276		 * Otherwise, race conditions may bite us.  I
2277		 * know that's pretty paranoid, but I prefer
2278		 * to be correct.
2279		 */
2280out_err:
2281		spin_unlock_irqrestore(&intf->seq_lock, flags);
2282	}
2283
2284	return rv;
2285}
2286
2287/*
2288 * Separate from ipmi_request so that the user does not have to be
2289 * supplied in certain circumstances (mainly at panic time).  If
2290 * messages are supplied, they will be freed, even if an error
2291 * occurs.
2292 */
2293static int i_ipmi_request(struct ipmi_user     *user,
2294			  struct ipmi_smi      *intf,
2295			  struct ipmi_addr     *addr,
2296			  long                 msgid,
2297			  struct kernel_ipmi_msg *msg,
2298			  void                 *user_msg_data,
2299			  void                 *supplied_smi,
2300			  struct ipmi_recv_msg *supplied_recv,
2301			  int                  priority,
2302			  unsigned char        source_address,
2303			  unsigned char        source_lun,
2304			  int                  retries,
2305			  unsigned int         retry_time_ms)
2306{
2307	struct ipmi_smi_msg *smi_msg;
2308	struct ipmi_recv_msg *recv_msg;
2309	int rv = 0;
2310
2311	if (user) {
2312		if (atomic_add_return(1, &user->nr_msgs) > max_msgs_per_user) {
2313			/* Decrement will happen at the end of the routine. */
2314			rv = -EBUSY;
2315			goto out;
2316		}
2317	}
2318
2319	if (supplied_recv)
2320		recv_msg = supplied_recv;
2321	else {
2322		recv_msg = ipmi_alloc_recv_msg();
2323		if (recv_msg == NULL) {
2324			rv = -ENOMEM;
2325			goto out;
2326		}
2327	}
2328	recv_msg->user_msg_data = user_msg_data;
2329
2330	if (supplied_smi)
2331		smi_msg = supplied_smi;
2332	else {
2333		smi_msg = ipmi_alloc_smi_msg();
2334		if (smi_msg == NULL) {
2335			if (!supplied_recv)
2336				ipmi_free_recv_msg(recv_msg);
2337			rv = -ENOMEM;
2338			goto out;
2339		}
2340	}
2341
2342	rcu_read_lock();
2343	if (intf->in_shutdown) {
2344		rv = -ENODEV;
2345		goto out_err;
2346	}
2347
2348	recv_msg->user = user;
2349	if (user)
2350		/* The put happens when the message is freed. */
2351		kref_get(&user->refcount);
2352	recv_msg->msgid = msgid;
2353	/*
2354	 * Store the message to send in the receive message so timeout
2355	 * responses can get the proper response data.
2356	 */
2357	recv_msg->msg = *msg;
2358
2359	if (addr->addr_type == IPMI_SYSTEM_INTERFACE_ADDR_TYPE) {
2360		rv = i_ipmi_req_sysintf(intf, addr, msgid, msg, smi_msg,
2361					recv_msg, retries, retry_time_ms);
2362	} else if (is_ipmb_addr(addr) || is_ipmb_bcast_addr(addr)) {
2363		rv = i_ipmi_req_ipmb(intf, addr, msgid, msg, smi_msg, recv_msg,
2364				     source_address, source_lun,
2365				     retries, retry_time_ms);
2366	} else if (is_ipmb_direct_addr(addr)) {
2367		rv = i_ipmi_req_ipmb_direct(intf, addr, msgid, msg, smi_msg,
2368					    recv_msg, source_lun);
2369	} else if (is_lan_addr(addr)) {
2370		rv = i_ipmi_req_lan(intf, addr, msgid, msg, smi_msg, recv_msg,
2371				    source_lun, retries, retry_time_ms);
2372	} else {
2373	    /* Unknown address type. */
2374		ipmi_inc_stat(intf, sent_invalid_commands);
2375		rv = -EINVAL;
2376	}
2377
2378	if (rv) {
2379out_err:
2380		ipmi_free_smi_msg(smi_msg);
2381		ipmi_free_recv_msg(recv_msg);
2382	} else {
2383		dev_dbg(intf->si_dev, "Send: %*ph\n",
2384			smi_msg->data_size, smi_msg->data);
2385
2386		smi_send(intf, intf->handlers, smi_msg, priority);
2387	}
2388	rcu_read_unlock();
2389
2390out:
2391	if (rv && user)
2392		atomic_dec(&user->nr_msgs);
2393	return rv;
2394}
2395
2396static int check_addr(struct ipmi_smi  *intf,
2397		      struct ipmi_addr *addr,
2398		      unsigned char    *saddr,
2399		      unsigned char    *lun)
2400{
2401	if (addr->channel >= IPMI_MAX_CHANNELS)
2402		return -EINVAL;
2403	addr->channel = array_index_nospec(addr->channel, IPMI_MAX_CHANNELS);
2404	*lun = intf->addrinfo[addr->channel].lun;
2405	*saddr = intf->addrinfo[addr->channel].address;
2406	return 0;
2407}
2408
2409int ipmi_request_settime(struct ipmi_user *user,
2410			 struct ipmi_addr *addr,
2411			 long             msgid,
2412			 struct kernel_ipmi_msg  *msg,
2413			 void             *user_msg_data,
2414			 int              priority,
2415			 int              retries,
2416			 unsigned int     retry_time_ms)
2417{
2418	unsigned char saddr = 0, lun = 0;
2419	int rv, index;
2420
2421	if (!user)
2422		return -EINVAL;
2423
2424	user = acquire_ipmi_user(user, &index);
2425	if (!user)
2426		return -ENODEV;
2427
2428	rv = check_addr(user->intf, addr, &saddr, &lun);
2429	if (!rv)
2430		rv = i_ipmi_request(user,
2431				    user->intf,
2432				    addr,
2433				    msgid,
2434				    msg,
2435				    user_msg_data,
2436				    NULL, NULL,
2437				    priority,
2438				    saddr,
2439				    lun,
2440				    retries,
2441				    retry_time_ms);
2442
2443	release_ipmi_user(user, index);
2444	return rv;
2445}
2446EXPORT_SYMBOL(ipmi_request_settime);
2447
2448int ipmi_request_supply_msgs(struct ipmi_user     *user,
2449			     struct ipmi_addr     *addr,
2450			     long                 msgid,
2451			     struct kernel_ipmi_msg *msg,
2452			     void                 *user_msg_data,
2453			     void                 *supplied_smi,
2454			     struct ipmi_recv_msg *supplied_recv,
2455			     int                  priority)
2456{
2457	unsigned char saddr = 0, lun = 0;
2458	int rv, index;
2459
2460	if (!user)
2461		return -EINVAL;
2462
2463	user = acquire_ipmi_user(user, &index);
2464	if (!user)
2465		return -ENODEV;
2466
2467	rv = check_addr(user->intf, addr, &saddr, &lun);
2468	if (!rv)
2469		rv = i_ipmi_request(user,
2470				    user->intf,
2471				    addr,
2472				    msgid,
2473				    msg,
2474				    user_msg_data,
2475				    supplied_smi,
2476				    supplied_recv,
2477				    priority,
2478				    saddr,
2479				    lun,
2480				    -1, 0);
2481
2482	release_ipmi_user(user, index);
2483	return rv;
2484}
2485EXPORT_SYMBOL(ipmi_request_supply_msgs);
2486
2487static void bmc_device_id_handler(struct ipmi_smi *intf,
2488				  struct ipmi_recv_msg *msg)
2489{
2490	int rv;
2491
2492	if ((msg->addr.addr_type != IPMI_SYSTEM_INTERFACE_ADDR_TYPE)
2493			|| (msg->msg.netfn != IPMI_NETFN_APP_RESPONSE)
2494			|| (msg->msg.cmd != IPMI_GET_DEVICE_ID_CMD)) {
2495		dev_warn(intf->si_dev,
2496			 "invalid device_id msg: addr_type=%d netfn=%x cmd=%x\n",
2497			 msg->addr.addr_type, msg->msg.netfn, msg->msg.cmd);
2498		return;
2499	}
2500
2501	if (msg->msg.data[0]) {
2502		dev_warn(intf->si_dev, "device id fetch failed: 0x%2.2x\n",
2503			 msg->msg.data[0]);
2504		intf->bmc->dyn_id_set = 0;
2505		goto out;
2506	}
2507
2508	rv = ipmi_demangle_device_id(msg->msg.netfn, msg->msg.cmd,
2509			msg->msg.data, msg->msg.data_len, &intf->bmc->fetch_id);
2510	if (rv) {
2511		dev_warn(intf->si_dev, "device id demangle failed: %d\n", rv);
2512		/* record completion code when error */
2513		intf->bmc->cc = msg->msg.data[0];
2514		intf->bmc->dyn_id_set = 0;
2515	} else {
2516		/*
2517		 * Make sure the id data is available before setting
2518		 * dyn_id_set.
2519		 */
2520		smp_wmb();
2521		intf->bmc->dyn_id_set = 1;
2522	}
2523out:
2524	wake_up(&intf->waitq);
2525}
2526
2527static int
2528send_get_device_id_cmd(struct ipmi_smi *intf)
2529{
2530	struct ipmi_system_interface_addr si;
2531	struct kernel_ipmi_msg msg;
2532
2533	si.addr_type = IPMI_SYSTEM_INTERFACE_ADDR_TYPE;
2534	si.channel = IPMI_BMC_CHANNEL;
2535	si.lun = 0;
2536
2537	msg.netfn = IPMI_NETFN_APP_REQUEST;
2538	msg.cmd = IPMI_GET_DEVICE_ID_CMD;
2539	msg.data = NULL;
2540	msg.data_len = 0;
2541
2542	return i_ipmi_request(NULL,
2543			      intf,
2544			      (struct ipmi_addr *) &si,
2545			      0,
2546			      &msg,
2547			      intf,
2548			      NULL,
2549			      NULL,
2550			      0,
2551			      intf->addrinfo[0].address,
2552			      intf->addrinfo[0].lun,
2553			      -1, 0);
2554}
2555
2556static int __get_device_id(struct ipmi_smi *intf, struct bmc_device *bmc)
2557{
2558	int rv;
2559	unsigned int retry_count = 0;
2560
2561	intf->null_user_handler = bmc_device_id_handler;
2562
2563retry:
2564	bmc->cc = 0;
2565	bmc->dyn_id_set = 2;
2566
2567	rv = send_get_device_id_cmd(intf);
2568	if (rv)
2569		goto out_reset_handler;
2570
2571	wait_event(intf->waitq, bmc->dyn_id_set != 2);
2572
2573	if (!bmc->dyn_id_set) {
2574		if (bmc->cc != IPMI_CC_NO_ERROR &&
2575		    ++retry_count <= GET_DEVICE_ID_MAX_RETRY) {
2576			msleep(500);
2577			dev_warn(intf->si_dev,
2578			    "BMC returned 0x%2.2x, retry get bmc device id\n",
2579			    bmc->cc);
2580			goto retry;
2581		}
2582
2583		rv = -EIO; /* Something went wrong in the fetch. */
2584	}
2585
2586	/* dyn_id_set makes the id data available. */
2587	smp_rmb();
2588
2589out_reset_handler:
2590	intf->null_user_handler = NULL;
2591
2592	return rv;
2593}
2594
2595/*
2596 * Fetch the device id for the bmc/interface.  You must pass in either
2597 * bmc or intf, this code will get the other one.  If the data has
2598 * been recently fetched, this will just use the cached data.  Otherwise
2599 * it will run a new fetch.
2600 *
2601 * Except for the first time this is called (in ipmi_add_smi()),
2602 * this will always return good data;
2603 */
2604static int __bmc_get_device_id(struct ipmi_smi *intf, struct bmc_device *bmc,
2605			       struct ipmi_device_id *id,
2606			       bool *guid_set, guid_t *guid, int intf_num)
2607{
2608	int rv = 0;
2609	int prev_dyn_id_set, prev_guid_set;
2610	bool intf_set = intf != NULL;
2611
2612	if (!intf) {
2613		mutex_lock(&bmc->dyn_mutex);
2614retry_bmc_lock:
2615		if (list_empty(&bmc->intfs)) {
2616			mutex_unlock(&bmc->dyn_mutex);
2617			return -ENOENT;
2618		}
2619		intf = list_first_entry(&bmc->intfs, struct ipmi_smi,
2620					bmc_link);
2621		kref_get(&intf->refcount);
2622		mutex_unlock(&bmc->dyn_mutex);
2623		mutex_lock(&intf->bmc_reg_mutex);
2624		mutex_lock(&bmc->dyn_mutex);
2625		if (intf != list_first_entry(&bmc->intfs, struct ipmi_smi,
2626					     bmc_link)) {
2627			mutex_unlock(&intf->bmc_reg_mutex);
2628			kref_put(&intf->refcount, intf_free);
2629			goto retry_bmc_lock;
2630		}
2631	} else {
2632		mutex_lock(&intf->bmc_reg_mutex);
2633		bmc = intf->bmc;
2634		mutex_lock(&bmc->dyn_mutex);
2635		kref_get(&intf->refcount);
2636	}
2637
2638	/* If we have a valid and current ID, just return that. */
2639	if (intf->in_bmc_register ||
2640	    (bmc->dyn_id_set && time_is_after_jiffies(bmc->dyn_id_expiry)))
2641		goto out_noprocessing;
2642
2643	prev_guid_set = bmc->dyn_guid_set;
2644	__get_guid(intf);
2645
2646	prev_dyn_id_set = bmc->dyn_id_set;
2647	rv = __get_device_id(intf, bmc);
2648	if (rv)
2649		goto out;
2650
2651	/*
2652	 * The guid, device id, manufacturer id, and product id should
2653	 * not change on a BMC.  If it does we have to do some dancing.
2654	 */
2655	if (!intf->bmc_registered
2656	    || (!prev_guid_set && bmc->dyn_guid_set)
2657	    || (!prev_dyn_id_set && bmc->dyn_id_set)
2658	    || (prev_guid_set && bmc->dyn_guid_set
2659		&& !guid_equal(&bmc->guid, &bmc->fetch_guid))
2660	    || bmc->id.device_id != bmc->fetch_id.device_id
2661	    || bmc->id.manufacturer_id != bmc->fetch_id.manufacturer_id
2662	    || bmc->id.product_id != bmc->fetch_id.product_id) {
2663		struct ipmi_device_id id = bmc->fetch_id;
2664		int guid_set = bmc->dyn_guid_set;
2665		guid_t guid;
2666
2667		guid = bmc->fetch_guid;
2668		mutex_unlock(&bmc->dyn_mutex);
2669
2670		__ipmi_bmc_unregister(intf);
2671		/* Fill in the temporary BMC for good measure. */
2672		intf->bmc->id = id;
2673		intf->bmc->dyn_guid_set = guid_set;
2674		intf->bmc->guid = guid;
2675		if (__ipmi_bmc_register(intf, &id, guid_set, &guid, intf_num))
2676			need_waiter(intf); /* Retry later on an error. */
2677		else
2678			__scan_channels(intf, &id);
2679
2680
2681		if (!intf_set) {
2682			/*
2683			 * We weren't given the interface on the
2684			 * command line, so restart the operation on
2685			 * the next interface for the BMC.
2686			 */
2687			mutex_unlock(&intf->bmc_reg_mutex);
2688			mutex_lock(&bmc->dyn_mutex);
2689			goto retry_bmc_lock;
2690		}
2691
2692		/* We have a new BMC, set it up. */
2693		bmc = intf->bmc;
2694		mutex_lock(&bmc->dyn_mutex);
2695		goto out_noprocessing;
2696	} else if (memcmp(&bmc->fetch_id, &bmc->id, sizeof(bmc->id)))
2697		/* Version info changes, scan the channels again. */
2698		__scan_channels(intf, &bmc->fetch_id);
2699
2700	bmc->dyn_id_expiry = jiffies + IPMI_DYN_DEV_ID_EXPIRY;
2701
2702out:
2703	if (rv && prev_dyn_id_set) {
2704		rv = 0; /* Ignore failures if we have previous data. */
2705		bmc->dyn_id_set = prev_dyn_id_set;
2706	}
2707	if (!rv) {
2708		bmc->id = bmc->fetch_id;
2709		if (bmc->dyn_guid_set)
2710			bmc->guid = bmc->fetch_guid;
2711		else if (prev_guid_set)
2712			/*
2713			 * The guid used to be valid and it failed to fetch,
2714			 * just use the cached value.
2715			 */
2716			bmc->dyn_guid_set = prev_guid_set;
2717	}
2718out_noprocessing:
2719	if (!rv) {
2720		if (id)
2721			*id = bmc->id;
2722
2723		if (guid_set)
2724			*guid_set = bmc->dyn_guid_set;
2725
2726		if (guid && bmc->dyn_guid_set)
2727			*guid =  bmc->guid;
2728	}
2729
2730	mutex_unlock(&bmc->dyn_mutex);
2731	mutex_unlock(&intf->bmc_reg_mutex);
2732
2733	kref_put(&intf->refcount, intf_free);
2734	return rv;
2735}
2736
2737static int bmc_get_device_id(struct ipmi_smi *intf, struct bmc_device *bmc,
2738			     struct ipmi_device_id *id,
2739			     bool *guid_set, guid_t *guid)
2740{
2741	return __bmc_get_device_id(intf, bmc, id, guid_set, guid, -1);
2742}
2743
2744static ssize_t device_id_show(struct device *dev,
2745			      struct device_attribute *attr,
2746			      char *buf)
2747{
2748	struct bmc_device *bmc = to_bmc_device(dev);
2749	struct ipmi_device_id id;
2750	int rv;
2751
2752	rv = bmc_get_device_id(NULL, bmc, &id, NULL, NULL);
2753	if (rv)
2754		return rv;
2755
2756	return sysfs_emit(buf, "%u\n", id.device_id);
2757}
2758static DEVICE_ATTR_RO(device_id);
2759
2760static ssize_t provides_device_sdrs_show(struct device *dev,
2761					 struct device_attribute *attr,
2762					 char *buf)
2763{
2764	struct bmc_device *bmc = to_bmc_device(dev);
2765	struct ipmi_device_id id;
2766	int rv;
2767
2768	rv = bmc_get_device_id(NULL, bmc, &id, NULL, NULL);
2769	if (rv)
2770		return rv;
2771
2772	return sysfs_emit(buf, "%u\n", (id.device_revision & 0x80) >> 7);
2773}
2774static DEVICE_ATTR_RO(provides_device_sdrs);
2775
2776static ssize_t revision_show(struct device *dev, struct device_attribute *attr,
2777			     char *buf)
2778{
2779	struct bmc_device *bmc = to_bmc_device(dev);
2780	struct ipmi_device_id id;
2781	int rv;
2782
2783	rv = bmc_get_device_id(NULL, bmc, &id, NULL, NULL);
2784	if (rv)
2785		return rv;
2786
2787	return sysfs_emit(buf, "%u\n", id.device_revision & 0x0F);
2788}
2789static DEVICE_ATTR_RO(revision);
2790
2791static ssize_t firmware_revision_show(struct device *dev,
2792				      struct device_attribute *attr,
2793				      char *buf)
2794{
2795	struct bmc_device *bmc = to_bmc_device(dev);
2796	struct ipmi_device_id id;
2797	int rv;
2798
2799	rv = bmc_get_device_id(NULL, bmc, &id, NULL, NULL);
2800	if (rv)
2801		return rv;
2802
2803	return sysfs_emit(buf, "%u.%x\n", id.firmware_revision_1,
2804			id.firmware_revision_2);
2805}
2806static DEVICE_ATTR_RO(firmware_revision);
2807
2808static ssize_t ipmi_version_show(struct device *dev,
2809				 struct device_attribute *attr,
2810				 char *buf)
2811{
2812	struct bmc_device *bmc = to_bmc_device(dev);
2813	struct ipmi_device_id id;
2814	int rv;
2815
2816	rv = bmc_get_device_id(NULL, bmc, &id, NULL, NULL);
2817	if (rv)
2818		return rv;
2819
2820	return sysfs_emit(buf, "%u.%u\n",
2821			ipmi_version_major(&id),
2822			ipmi_version_minor(&id));
2823}
2824static DEVICE_ATTR_RO(ipmi_version);
2825
2826static ssize_t add_dev_support_show(struct device *dev,
2827				    struct device_attribute *attr,
2828				    char *buf)
2829{
2830	struct bmc_device *bmc = to_bmc_device(dev);
2831	struct ipmi_device_id id;
2832	int rv;
2833
2834	rv = bmc_get_device_id(NULL, bmc, &id, NULL, NULL);
2835	if (rv)
2836		return rv;
2837
2838	return sysfs_emit(buf, "0x%02x\n", id.additional_device_support);
2839}
2840static DEVICE_ATTR(additional_device_support, S_IRUGO, add_dev_support_show,
2841		   NULL);
2842
2843static ssize_t manufacturer_id_show(struct device *dev,
2844				    struct device_attribute *attr,
2845				    char *buf)
2846{
2847	struct bmc_device *bmc = to_bmc_device(dev);
2848	struct ipmi_device_id id;
2849	int rv;
2850
2851	rv = bmc_get_device_id(NULL, bmc, &id, NULL, NULL);
2852	if (rv)
2853		return rv;
2854
2855	return sysfs_emit(buf, "0x%6.6x\n", id.manufacturer_id);
2856}
2857static DEVICE_ATTR_RO(manufacturer_id);
2858
2859static ssize_t product_id_show(struct device *dev,
2860			       struct device_attribute *attr,
2861			       char *buf)
2862{
2863	struct bmc_device *bmc = to_bmc_device(dev);
2864	struct ipmi_device_id id;
2865	int rv;
2866
2867	rv = bmc_get_device_id(NULL, bmc, &id, NULL, NULL);
2868	if (rv)
2869		return rv;
2870
2871	return sysfs_emit(buf, "0x%4.4x\n", id.product_id);
2872}
2873static DEVICE_ATTR_RO(product_id);
2874
2875static ssize_t aux_firmware_rev_show(struct device *dev,
2876				     struct device_attribute *attr,
2877				     char *buf)
2878{
2879	struct bmc_device *bmc = to_bmc_device(dev);
2880	struct ipmi_device_id id;
2881	int rv;
2882
2883	rv = bmc_get_device_id(NULL, bmc, &id, NULL, NULL);
2884	if (rv)
2885		return rv;
2886
2887	return sysfs_emit(buf, "0x%02x 0x%02x 0x%02x 0x%02x\n",
2888			id.aux_firmware_revision[3],
2889			id.aux_firmware_revision[2],
2890			id.aux_firmware_revision[1],
2891			id.aux_firmware_revision[0]);
2892}
2893static DEVICE_ATTR(aux_firmware_revision, S_IRUGO, aux_firmware_rev_show, NULL);
2894
2895static ssize_t guid_show(struct device *dev, struct device_attribute *attr,
2896			 char *buf)
2897{
2898	struct bmc_device *bmc = to_bmc_device(dev);
2899	bool guid_set;
2900	guid_t guid;
2901	int rv;
2902
2903	rv = bmc_get_device_id(NULL, bmc, NULL, &guid_set, &guid);
2904	if (rv)
2905		return rv;
2906	if (!guid_set)
2907		return -ENOENT;
2908
2909	return sysfs_emit(buf, "%pUl\n", &guid);
2910}
2911static DEVICE_ATTR_RO(guid);
2912
2913static struct attribute *bmc_dev_attrs[] = {
2914	&dev_attr_device_id.attr,
2915	&dev_attr_provides_device_sdrs.attr,
2916	&dev_attr_revision.attr,
2917	&dev_attr_firmware_revision.attr,
2918	&dev_attr_ipmi_version.attr,
2919	&dev_attr_additional_device_support.attr,
2920	&dev_attr_manufacturer_id.attr,
2921	&dev_attr_product_id.attr,
2922	&dev_attr_aux_firmware_revision.attr,
2923	&dev_attr_guid.attr,
2924	NULL
2925};
2926
2927static umode_t bmc_dev_attr_is_visible(struct kobject *kobj,
2928				       struct attribute *attr, int idx)
2929{
2930	struct device *dev = kobj_to_dev(kobj);
2931	struct bmc_device *bmc = to_bmc_device(dev);
2932	umode_t mode = attr->mode;
2933	int rv;
2934
2935	if (attr == &dev_attr_aux_firmware_revision.attr) {
2936		struct ipmi_device_id id;
2937
2938		rv = bmc_get_device_id(NULL, bmc, &id, NULL, NULL);
2939		return (!rv && id.aux_firmware_revision_set) ? mode : 0;
2940	}
2941	if (attr == &dev_attr_guid.attr) {
2942		bool guid_set;
2943
2944		rv = bmc_get_device_id(NULL, bmc, NULL, &guid_set, NULL);
2945		return (!rv && guid_set) ? mode : 0;
2946	}
2947	return mode;
2948}
2949
2950static const struct attribute_group bmc_dev_attr_group = {
2951	.attrs		= bmc_dev_attrs,
2952	.is_visible	= bmc_dev_attr_is_visible,
2953};
2954
2955static const struct attribute_group *bmc_dev_attr_groups[] = {
2956	&bmc_dev_attr_group,
2957	NULL
2958};
2959
2960static const struct device_type bmc_device_type = {
2961	.groups		= bmc_dev_attr_groups,
2962};
2963
2964static int __find_bmc_guid(struct device *dev, const void *data)
2965{
2966	const guid_t *guid = data;
2967	struct bmc_device *bmc;
2968	int rv;
2969
2970	if (dev->type != &bmc_device_type)
2971		return 0;
2972
2973	bmc = to_bmc_device(dev);
2974	rv = bmc->dyn_guid_set && guid_equal(&bmc->guid, guid);
2975	if (rv)
2976		rv = kref_get_unless_zero(&bmc->usecount);
2977	return rv;
2978}
2979
2980/*
2981 * Returns with the bmc's usecount incremented, if it is non-NULL.
2982 */
2983static struct bmc_device *ipmi_find_bmc_guid(struct device_driver *drv,
2984					     guid_t *guid)
2985{
2986	struct device *dev;
2987	struct bmc_device *bmc = NULL;
2988
2989	dev = driver_find_device(drv, NULL, guid, __find_bmc_guid);
2990	if (dev) {
2991		bmc = to_bmc_device(dev);
2992		put_device(dev);
2993	}
2994	return bmc;
2995}
2996
2997struct prod_dev_id {
2998	unsigned int  product_id;
2999	unsigned char device_id;
3000};
3001
3002static int __find_bmc_prod_dev_id(struct device *dev, const void *data)
3003{
3004	const struct prod_dev_id *cid = data;
3005	struct bmc_device *bmc;
3006	int rv;
3007
3008	if (dev->type != &bmc_device_type)
3009		return 0;
3010
3011	bmc = to_bmc_device(dev);
3012	rv = (bmc->id.product_id == cid->product_id
3013	      && bmc->id.device_id == cid->device_id);
3014	if (rv)
3015		rv = kref_get_unless_zero(&bmc->usecount);
3016	return rv;
3017}
3018
3019/*
3020 * Returns with the bmc's usecount incremented, if it is non-NULL.
3021 */
3022static struct bmc_device *ipmi_find_bmc_prod_dev_id(
3023	struct device_driver *drv,
3024	unsigned int product_id, unsigned char device_id)
3025{
3026	struct prod_dev_id id = {
3027		.product_id = product_id,
3028		.device_id = device_id,
3029	};
3030	struct device *dev;
3031	struct bmc_device *bmc = NULL;
3032
3033	dev = driver_find_device(drv, NULL, &id, __find_bmc_prod_dev_id);
3034	if (dev) {
3035		bmc = to_bmc_device(dev);
3036		put_device(dev);
3037	}
3038	return bmc;
3039}
3040
3041static DEFINE_IDA(ipmi_bmc_ida);
3042
3043static void
3044release_bmc_device(struct device *dev)
3045{
3046	kfree(to_bmc_device(dev));
3047}
3048
3049static void cleanup_bmc_work(struct work_struct *work)
3050{
3051	struct bmc_device *bmc = container_of(work, struct bmc_device,
3052					      remove_work);
3053	int id = bmc->pdev.id; /* Unregister overwrites id */
3054
3055	platform_device_unregister(&bmc->pdev);
3056	ida_free(&ipmi_bmc_ida, id);
3057}
3058
3059static void
3060cleanup_bmc_device(struct kref *ref)
3061{
3062	struct bmc_device *bmc = container_of(ref, struct bmc_device, usecount);
3063
3064	/*
3065	 * Remove the platform device in a work queue to avoid issues
3066	 * with removing the device attributes while reading a device
3067	 * attribute.
3068	 */
3069	queue_work(remove_work_wq, &bmc->remove_work);
3070}
3071
3072/*
3073 * Must be called with intf->bmc_reg_mutex held.
3074 */
3075static void __ipmi_bmc_unregister(struct ipmi_smi *intf)
3076{
3077	struct bmc_device *bmc = intf->bmc;
3078
3079	if (!intf->bmc_registered)
3080		return;
3081
3082	sysfs_remove_link(&intf->si_dev->kobj, "bmc");
3083	sysfs_remove_link(&bmc->pdev.dev.kobj, intf->my_dev_name);
3084	kfree(intf->my_dev_name);
3085	intf->my_dev_name = NULL;
3086
3087	mutex_lock(&bmc->dyn_mutex);
3088	list_del(&intf->bmc_link);
3089	mutex_unlock(&bmc->dyn_mutex);
3090	intf->bmc = &intf->tmp_bmc;
3091	kref_put(&bmc->usecount, cleanup_bmc_device);
3092	intf->bmc_registered = false;
3093}
3094
3095static void ipmi_bmc_unregister(struct ipmi_smi *intf)
3096{
3097	mutex_lock(&intf->bmc_reg_mutex);
3098	__ipmi_bmc_unregister(intf);
3099	mutex_unlock(&intf->bmc_reg_mutex);
3100}
3101
3102/*
3103 * Must be called with intf->bmc_reg_mutex held.
3104 */
3105static int __ipmi_bmc_register(struct ipmi_smi *intf,
3106			       struct ipmi_device_id *id,
3107			       bool guid_set, guid_t *guid, int intf_num)
3108{
3109	int               rv;
3110	struct bmc_device *bmc;
3111	struct bmc_device *old_bmc;
3112
3113	/*
3114	 * platform_device_register() can cause bmc_reg_mutex to
3115	 * be claimed because of the is_visible functions of
3116	 * the attributes.  Eliminate possible recursion and
3117	 * release the lock.
3118	 */
3119	intf->in_bmc_register = true;
3120	mutex_unlock(&intf->bmc_reg_mutex);
3121
3122	/*
3123	 * Try to find if there is an bmc_device struct
3124	 * representing the interfaced BMC already
3125	 */
3126	mutex_lock(&ipmidriver_mutex);
3127	if (guid_set)
3128		old_bmc = ipmi_find_bmc_guid(&ipmidriver.driver, guid);
3129	else
3130		old_bmc = ipmi_find_bmc_prod_dev_id(&ipmidriver.driver,
3131						    id->product_id,
3132						    id->device_id);
3133
3134	/*
3135	 * If there is already an bmc_device, free the new one,
3136	 * otherwise register the new BMC device
3137	 */
3138	if (old_bmc) {
3139		bmc = old_bmc;
3140		/*
3141		 * Note: old_bmc already has usecount incremented by
3142		 * the BMC find functions.
3143		 */
3144		intf->bmc = old_bmc;
3145		mutex_lock(&bmc->dyn_mutex);
3146		list_add_tail(&intf->bmc_link, &bmc->intfs);
3147		mutex_unlock(&bmc->dyn_mutex);
3148
3149		dev_info(intf->si_dev,
3150			 "interfacing existing BMC (man_id: 0x%6.6x, prod_id: 0x%4.4x, dev_id: 0x%2.2x)\n",
3151			 bmc->id.manufacturer_id,
3152			 bmc->id.product_id,
3153			 bmc->id.device_id);
3154	} else {
3155		bmc = kzalloc(sizeof(*bmc), GFP_KERNEL);
3156		if (!bmc) {
3157			rv = -ENOMEM;
3158			goto out;
3159		}
3160		INIT_LIST_HEAD(&bmc->intfs);
3161		mutex_init(&bmc->dyn_mutex);
3162		INIT_WORK(&bmc->remove_work, cleanup_bmc_work);
3163
3164		bmc->id = *id;
3165		bmc->dyn_id_set = 1;
3166		bmc->dyn_guid_set = guid_set;
3167		bmc->guid = *guid;
3168		bmc->dyn_id_expiry = jiffies + IPMI_DYN_DEV_ID_EXPIRY;
3169
3170		bmc->pdev.name = "ipmi_bmc";
3171
3172		rv = ida_alloc(&ipmi_bmc_ida, GFP_KERNEL);
3173		if (rv < 0) {
3174			kfree(bmc);
3175			goto out;
3176		}
3177
3178		bmc->pdev.dev.driver = &ipmidriver.driver;
3179		bmc->pdev.id = rv;
3180		bmc->pdev.dev.release = release_bmc_device;
3181		bmc->pdev.dev.type = &bmc_device_type;
3182		kref_init(&bmc->usecount);
3183
3184		intf->bmc = bmc;
3185		mutex_lock(&bmc->dyn_mutex);
3186		list_add_tail(&intf->bmc_link, &bmc->intfs);
3187		mutex_unlock(&bmc->dyn_mutex);
3188
3189		rv = platform_device_register(&bmc->pdev);
3190		if (rv) {
3191			dev_err(intf->si_dev,
3192				"Unable to register bmc device: %d\n",
3193				rv);
3194			goto out_list_del;
3195		}
3196
3197		dev_info(intf->si_dev,
3198			 "Found new BMC (man_id: 0x%6.6x, prod_id: 0x%4.4x, dev_id: 0x%2.2x)\n",
3199			 bmc->id.manufacturer_id,
3200			 bmc->id.product_id,
3201			 bmc->id.device_id);
3202	}
3203
3204	/*
3205	 * create symlink from system interface device to bmc device
3206	 * and back.
3207	 */
3208	rv = sysfs_create_link(&intf->si_dev->kobj, &bmc->pdev.dev.kobj, "bmc");
3209	if (rv) {
3210		dev_err(intf->si_dev, "Unable to create bmc symlink: %d\n", rv);
3211		goto out_put_bmc;
3212	}
3213
3214	if (intf_num == -1)
3215		intf_num = intf->intf_num;
3216	intf->my_dev_name = kasprintf(GFP_KERNEL, "ipmi%d", intf_num);
3217	if (!intf->my_dev_name) {
3218		rv = -ENOMEM;
3219		dev_err(intf->si_dev, "Unable to allocate link from BMC: %d\n",
3220			rv);
3221		goto out_unlink1;
3222	}
3223
3224	rv = sysfs_create_link(&bmc->pdev.dev.kobj, &intf->si_dev->kobj,
3225			       intf->my_dev_name);
3226	if (rv) {
3227		dev_err(intf->si_dev, "Unable to create symlink to bmc: %d\n",
3228			rv);
3229		goto out_free_my_dev_name;
3230	}
3231
3232	intf->bmc_registered = true;
3233
3234out:
3235	mutex_unlock(&ipmidriver_mutex);
3236	mutex_lock(&intf->bmc_reg_mutex);
3237	intf->in_bmc_register = false;
3238	return rv;
3239
3240
3241out_free_my_dev_name:
3242	kfree(intf->my_dev_name);
3243	intf->my_dev_name = NULL;
3244
3245out_unlink1:
3246	sysfs_remove_link(&intf->si_dev->kobj, "bmc");
3247
3248out_put_bmc:
3249	mutex_lock(&bmc->dyn_mutex);
3250	list_del(&intf->bmc_link);
3251	mutex_unlock(&bmc->dyn_mutex);
3252	intf->bmc = &intf->tmp_bmc;
3253	kref_put(&bmc->usecount, cleanup_bmc_device);
3254	goto out;
3255
3256out_list_del:
3257	mutex_lock(&bmc->dyn_mutex);
3258	list_del(&intf->bmc_link);
3259	mutex_unlock(&bmc->dyn_mutex);
3260	intf->bmc = &intf->tmp_bmc;
3261	put_device(&bmc->pdev.dev);
3262	goto out;
3263}
3264
3265static int
3266send_guid_cmd(struct ipmi_smi *intf, int chan)
3267{
3268	struct kernel_ipmi_msg            msg;
3269	struct ipmi_system_interface_addr si;
3270
3271	si.addr_type = IPMI_SYSTEM_INTERFACE_ADDR_TYPE;
3272	si.channel = IPMI_BMC_CHANNEL;
3273	si.lun = 0;
3274
3275	msg.netfn = IPMI_NETFN_APP_REQUEST;
3276	msg.cmd = IPMI_GET_DEVICE_GUID_CMD;
3277	msg.data = NULL;
3278	msg.data_len = 0;
3279	return i_ipmi_request(NULL,
3280			      intf,
3281			      (struct ipmi_addr *) &si,
3282			      0,
3283			      &msg,
3284			      intf,
3285			      NULL,
3286			      NULL,
3287			      0,
3288			      intf->addrinfo[0].address,
3289			      intf->addrinfo[0].lun,
3290			      -1, 0);
3291}
3292
3293static void guid_handler(struct ipmi_smi *intf, struct ipmi_recv_msg *msg)
3294{
3295	struct bmc_device *bmc = intf->bmc;
3296
3297	if ((msg->addr.addr_type != IPMI_SYSTEM_INTERFACE_ADDR_TYPE)
3298	    || (msg->msg.netfn != IPMI_NETFN_APP_RESPONSE)
3299	    || (msg->msg.cmd != IPMI_GET_DEVICE_GUID_CMD))
3300		/* Not for me */
3301		return;
3302
3303	if (msg->msg.data[0] != 0) {
3304		/* Error from getting the GUID, the BMC doesn't have one. */
3305		bmc->dyn_guid_set = 0;
3306		goto out;
3307	}
3308
3309	if (msg->msg.data_len < UUID_SIZE + 1) {
3310		bmc->dyn_guid_set = 0;
3311		dev_warn(intf->si_dev,
3312			 "The GUID response from the BMC was too short, it was %d but should have been %d.  Assuming GUID is not available.\n",
3313			 msg->msg.data_len, UUID_SIZE + 1);
3314		goto out;
3315	}
3316
3317	import_guid(&bmc->fetch_guid, msg->msg.data + 1);
3318	/*
3319	 * Make sure the guid data is available before setting
3320	 * dyn_guid_set.
3321	 */
3322	smp_wmb();
3323	bmc->dyn_guid_set = 1;
3324 out:
3325	wake_up(&intf->waitq);
3326}
3327
3328static void __get_guid(struct ipmi_smi *intf)
3329{
3330	int rv;
3331	struct bmc_device *bmc = intf->bmc;
3332
3333	bmc->dyn_guid_set = 2;
3334	intf->null_user_handler = guid_handler;
3335	rv = send_guid_cmd(intf, 0);
3336	if (rv)
3337		/* Send failed, no GUID available. */
3338		bmc->dyn_guid_set = 0;
3339	else
3340		wait_event(intf->waitq, bmc->dyn_guid_set != 2);
3341
3342	/* dyn_guid_set makes the guid data available. */
3343	smp_rmb();
3344
3345	intf->null_user_handler = NULL;
3346}
3347
3348static int
3349send_channel_info_cmd(struct ipmi_smi *intf, int chan)
3350{
3351	struct kernel_ipmi_msg            msg;
3352	unsigned char                     data[1];
3353	struct ipmi_system_interface_addr si;
3354
3355	si.addr_type = IPMI_SYSTEM_INTERFACE_ADDR_TYPE;
3356	si.channel = IPMI_BMC_CHANNEL;
3357	si.lun = 0;
3358
3359	msg.netfn = IPMI_NETFN_APP_REQUEST;
3360	msg.cmd = IPMI_GET_CHANNEL_INFO_CMD;
3361	msg.data = data;
3362	msg.data_len = 1;
3363	data[0] = chan;
3364	return i_ipmi_request(NULL,
3365			      intf,
3366			      (struct ipmi_addr *) &si,
3367			      0,
3368			      &msg,
3369			      intf,
3370			      NULL,
3371			      NULL,
3372			      0,
3373			      intf->addrinfo[0].address,
3374			      intf->addrinfo[0].lun,
3375			      -1, 0);
3376}
3377
3378static void
3379channel_handler(struct ipmi_smi *intf, struct ipmi_recv_msg *msg)
3380{
3381	int rv = 0;
3382	int ch;
3383	unsigned int set = intf->curr_working_cset;
3384	struct ipmi_channel *chans;
3385
3386	if ((msg->addr.addr_type == IPMI_SYSTEM_INTERFACE_ADDR_TYPE)
3387	    && (msg->msg.netfn == IPMI_NETFN_APP_RESPONSE)
3388	    && (msg->msg.cmd == IPMI_GET_CHANNEL_INFO_CMD)) {
3389		/* It's the one we want */
3390		if (msg->msg.data[0] != 0) {
3391			/* Got an error from the channel, just go on. */
3392			if (msg->msg.data[0] == IPMI_INVALID_COMMAND_ERR) {
3393				/*
3394				 * If the MC does not support this
3395				 * command, that is legal.  We just
3396				 * assume it has one IPMB at channel
3397				 * zero.
3398				 */
3399				intf->wchannels[set].c[0].medium
3400					= IPMI_CHANNEL_MEDIUM_IPMB;
3401				intf->wchannels[set].c[0].protocol
3402					= IPMI_CHANNEL_PROTOCOL_IPMB;
3403
3404				intf->channel_list = intf->wchannels + set;
3405				intf->channels_ready = true;
3406				wake_up(&intf->waitq);
3407				goto out;
3408			}
3409			goto next_channel;
3410		}
3411		if (msg->msg.data_len < 4) {
3412			/* Message not big enough, just go on. */
3413			goto next_channel;
3414		}
3415		ch = intf->curr_channel;
3416		chans = intf->wchannels[set].c;
3417		chans[ch].medium = msg->msg.data[2] & 0x7f;
3418		chans[ch].protocol = msg->msg.data[3] & 0x1f;
3419
3420 next_channel:
3421		intf->curr_channel++;
3422		if (intf->curr_channel >= IPMI_MAX_CHANNELS) {
3423			intf->channel_list = intf->wchannels + set;
3424			intf->channels_ready = true;
3425			wake_up(&intf->waitq);
3426		} else {
3427			intf->channel_list = intf->wchannels + set;
3428			intf->channels_ready = true;
3429			rv = send_channel_info_cmd(intf, intf->curr_channel);
3430		}
3431
3432		if (rv) {
3433			/* Got an error somehow, just give up. */
3434			dev_warn(intf->si_dev,
3435				 "Error sending channel information for channel %d: %d\n",
3436				 intf->curr_channel, rv);
3437
3438			intf->channel_list = intf->wchannels + set;
3439			intf->channels_ready = true;
3440			wake_up(&intf->waitq);
3441		}
3442	}
3443 out:
3444	return;
3445}
3446
3447/*
3448 * Must be holding intf->bmc_reg_mutex to call this.
3449 */
3450static int __scan_channels(struct ipmi_smi *intf, struct ipmi_device_id *id)
3451{
3452	int rv;
3453
3454	if (ipmi_version_major(id) > 1
3455			|| (ipmi_version_major(id) == 1
3456			    && ipmi_version_minor(id) >= 5)) {
3457		unsigned int set;
3458
3459		/*
3460		 * Start scanning the channels to see what is
3461		 * available.
3462		 */
3463		set = !intf->curr_working_cset;
3464		intf->curr_working_cset = set;
3465		memset(&intf->wchannels[set], 0,
3466		       sizeof(struct ipmi_channel_set));
3467
3468		intf->null_user_handler = channel_handler;
3469		intf->curr_channel = 0;
3470		rv = send_channel_info_cmd(intf, 0);
3471		if (rv) {
3472			dev_warn(intf->si_dev,
3473				 "Error sending channel information for channel 0, %d\n",
3474				 rv);
3475			intf->null_user_handler = NULL;
3476			return -EIO;
3477		}
3478
3479		/* Wait for the channel info to be read. */
3480		wait_event(intf->waitq, intf->channels_ready);
3481		intf->null_user_handler = NULL;
3482	} else {
3483		unsigned int set = intf->curr_working_cset;
3484
3485		/* Assume a single IPMB channel at zero. */
3486		intf->wchannels[set].c[0].medium = IPMI_CHANNEL_MEDIUM_IPMB;
3487		intf->wchannels[set].c[0].protocol = IPMI_CHANNEL_PROTOCOL_IPMB;
3488		intf->channel_list = intf->wchannels + set;
3489		intf->channels_ready = true;
3490	}
3491
3492	return 0;
3493}
3494
3495static void ipmi_poll(struct ipmi_smi *intf)
3496{
3497	if (intf->handlers->poll)
3498		intf->handlers->poll(intf->send_info);
3499	/* In case something came in */
3500	handle_new_recv_msgs(intf);
3501}
3502
3503void ipmi_poll_interface(struct ipmi_user *user)
3504{
3505	ipmi_poll(user->intf);
3506}
3507EXPORT_SYMBOL(ipmi_poll_interface);
3508
3509static ssize_t nr_users_show(struct device *dev,
3510			     struct device_attribute *attr,
3511			     char *buf)
3512{
3513	struct ipmi_smi *intf = container_of(attr,
3514			 struct ipmi_smi, nr_users_devattr);
3515
3516	return sysfs_emit(buf, "%d\n", atomic_read(&intf->nr_users));
3517}
3518static DEVICE_ATTR_RO(nr_users);
3519
3520static ssize_t nr_msgs_show(struct device *dev,
3521			    struct device_attribute *attr,
3522			    char *buf)
3523{
3524	struct ipmi_smi *intf = container_of(attr,
3525			 struct ipmi_smi, nr_msgs_devattr);
3526	struct ipmi_user *user;
3527	int index;
3528	unsigned int count = 0;
3529
3530	index = srcu_read_lock(&intf->users_srcu);
3531	list_for_each_entry_rcu(user, &intf->users, link)
3532		count += atomic_read(&user->nr_msgs);
3533	srcu_read_unlock(&intf->users_srcu, index);
3534
3535	return sysfs_emit(buf, "%u\n", count);
3536}
3537static DEVICE_ATTR_RO(nr_msgs);
3538
3539static void redo_bmc_reg(struct work_struct *work)
3540{
3541	struct ipmi_smi *intf = container_of(work, struct ipmi_smi,
3542					     bmc_reg_work);
3543
3544	if (!intf->in_shutdown)
3545		bmc_get_device_id(intf, NULL, NULL, NULL, NULL);
3546
3547	kref_put(&intf->refcount, intf_free);
3548}
3549
3550int ipmi_add_smi(struct module         *owner,
3551		 const struct ipmi_smi_handlers *handlers,
3552		 void		       *send_info,
3553		 struct device         *si_dev,
3554		 unsigned char         slave_addr)
3555{
3556	int              i, j;
3557	int              rv;
3558	struct ipmi_smi *intf, *tintf;
3559	struct list_head *link;
3560	struct ipmi_device_id id;
3561
3562	/*
3563	 * Make sure the driver is actually initialized, this handles
3564	 * problems with initialization order.
3565	 */
3566	rv = ipmi_init_msghandler();
3567	if (rv)
3568		return rv;
3569
3570	intf = kzalloc(sizeof(*intf), GFP_KERNEL);
3571	if (!intf)
3572		return -ENOMEM;
3573
3574	rv = init_srcu_struct(&intf->users_srcu);
3575	if (rv) {
3576		kfree(intf);
3577		return rv;
3578	}
3579
3580	intf->owner = owner;
3581	intf->bmc = &intf->tmp_bmc;
3582	INIT_LIST_HEAD(&intf->bmc->intfs);
3583	mutex_init(&intf->bmc->dyn_mutex);
3584	INIT_LIST_HEAD(&intf->bmc_link);
3585	mutex_init(&intf->bmc_reg_mutex);
3586	intf->intf_num = -1; /* Mark it invalid for now. */
3587	kref_init(&intf->refcount);
3588	INIT_WORK(&intf->bmc_reg_work, redo_bmc_reg);
3589	intf->si_dev = si_dev;
3590	for (j = 0; j < IPMI_MAX_CHANNELS; j++) {
3591		intf->addrinfo[j].address = IPMI_BMC_SLAVE_ADDR;
3592		intf->addrinfo[j].lun = 2;
3593	}
3594	if (slave_addr != 0)
3595		intf->addrinfo[0].address = slave_addr;
3596	INIT_LIST_HEAD(&intf->users);
3597	atomic_set(&intf->nr_users, 0);
3598	intf->handlers = handlers;
3599	intf->send_info = send_info;
3600	spin_lock_init(&intf->seq_lock);
3601	for (j = 0; j < IPMI_IPMB_NUM_SEQ; j++) {
3602		intf->seq_table[j].inuse = 0;
3603		intf->seq_table[j].seqid = 0;
3604	}
3605	intf->curr_seq = 0;
3606	spin_lock_init(&intf->waiting_rcv_msgs_lock);
3607	INIT_LIST_HEAD(&intf->waiting_rcv_msgs);
3608	tasklet_setup(&intf->recv_tasklet,
3609		     smi_recv_tasklet);
3610	atomic_set(&intf->watchdog_pretimeouts_to_deliver, 0);
3611	spin_lock_init(&intf->xmit_msgs_lock);
3612	INIT_LIST_HEAD(&intf->xmit_msgs);
3613	INIT_LIST_HEAD(&intf->hp_xmit_msgs);
3614	spin_lock_init(&intf->events_lock);
3615	spin_lock_init(&intf->watch_lock);
3616	atomic_set(&intf->event_waiters, 0);
3617	intf->ticks_to_req_ev = IPMI_REQUEST_EV_TIME;
3618	INIT_LIST_HEAD(&intf->waiting_events);
3619	intf->waiting_events_count = 0;
3620	mutex_init(&intf->cmd_rcvrs_mutex);
3621	spin_lock_init(&intf->maintenance_mode_lock);
3622	INIT_LIST_HEAD(&intf->cmd_rcvrs);
3623	init_waitqueue_head(&intf->waitq);
3624	for (i = 0; i < IPMI_NUM_STATS; i++)
3625		atomic_set(&intf->stats[i], 0);
3626
3627	mutex_lock(&ipmi_interfaces_mutex);
3628	/* Look for a hole in the numbers. */
3629	i = 0;
3630	link = &ipmi_interfaces;
3631	list_for_each_entry_rcu(tintf, &ipmi_interfaces, link,
3632				ipmi_interfaces_mutex_held()) {
3633		if (tintf->intf_num != i) {
3634			link = &tintf->link;
3635			break;
3636		}
3637		i++;
3638	}
3639	/* Add the new interface in numeric order. */
3640	if (i == 0)
3641		list_add_rcu(&intf->link, &ipmi_interfaces);
3642	else
3643		list_add_tail_rcu(&intf->link, link);
3644
3645	rv = handlers->start_processing(send_info, intf);
3646	if (rv)
3647		goto out_err;
3648
3649	rv = __bmc_get_device_id(intf, NULL, &id, NULL, NULL, i);
3650	if (rv) {
3651		dev_err(si_dev, "Unable to get the device id: %d\n", rv);
3652		goto out_err_started;
3653	}
3654
3655	mutex_lock(&intf->bmc_reg_mutex);
3656	rv = __scan_channels(intf, &id);
3657	mutex_unlock(&intf->bmc_reg_mutex);
3658	if (rv)
3659		goto out_err_bmc_reg;
3660
3661	intf->nr_users_devattr = dev_attr_nr_users;
3662	sysfs_attr_init(&intf->nr_users_devattr.attr);
3663	rv = device_create_file(intf->si_dev, &intf->nr_users_devattr);
3664	if (rv)
3665		goto out_err_bmc_reg;
3666
3667	intf->nr_msgs_devattr = dev_attr_nr_msgs;
3668	sysfs_attr_init(&intf->nr_msgs_devattr.attr);
3669	rv = device_create_file(intf->si_dev, &intf->nr_msgs_devattr);
3670	if (rv) {
3671		device_remove_file(intf->si_dev, &intf->nr_users_devattr);
3672		goto out_err_bmc_reg;
3673	}
3674
3675	/*
3676	 * Keep memory order straight for RCU readers.  Make
3677	 * sure everything else is committed to memory before
3678	 * setting intf_num to mark the interface valid.
3679	 */
3680	smp_wmb();
3681	intf->intf_num = i;
3682	mutex_unlock(&ipmi_interfaces_mutex);
3683
3684	/* After this point the interface is legal to use. */
3685	call_smi_watchers(i, intf->si_dev);
3686
3687	return 0;
3688
3689 out_err_bmc_reg:
3690	ipmi_bmc_unregister(intf);
3691 out_err_started:
3692	if (intf->handlers->shutdown)
3693		intf->handlers->shutdown(intf->send_info);
3694 out_err:
3695	list_del_rcu(&intf->link);
3696	mutex_unlock(&ipmi_interfaces_mutex);
3697	synchronize_srcu(&ipmi_interfaces_srcu);
3698	cleanup_srcu_struct(&intf->users_srcu);
3699	kref_put(&intf->refcount, intf_free);
3700
3701	return rv;
3702}
3703EXPORT_SYMBOL(ipmi_add_smi);
3704
3705static void deliver_smi_err_response(struct ipmi_smi *intf,
3706				     struct ipmi_smi_msg *msg,
3707				     unsigned char err)
3708{
3709	int rv;
3710	msg->rsp[0] = msg->data[0] | 4;
3711	msg->rsp[1] = msg->data[1];
3712	msg->rsp[2] = err;
3713	msg->rsp_size = 3;
3714
3715	/* This will never requeue, but it may ask us to free the message. */
3716	rv = handle_one_recv_msg(intf, msg);
3717	if (rv == 0)
3718		ipmi_free_smi_msg(msg);
3719}
3720
3721static void cleanup_smi_msgs(struct ipmi_smi *intf)
3722{
3723	int              i;
3724	struct seq_table *ent;
3725	struct ipmi_smi_msg *msg;
3726	struct list_head *entry;
3727	struct list_head tmplist;
3728
3729	/* Clear out our transmit queues and hold the messages. */
3730	INIT_LIST_HEAD(&tmplist);
3731	list_splice_tail(&intf->hp_xmit_msgs, &tmplist);
3732	list_splice_tail(&intf->xmit_msgs, &tmplist);
3733
3734	/* Current message first, to preserve order */
3735	while (intf->curr_msg && !list_empty(&intf->waiting_rcv_msgs)) {
3736		/* Wait for the message to clear out. */
3737		schedule_timeout(1);
3738	}
3739
3740	/* No need for locks, the interface is down. */
3741
3742	/*
3743	 * Return errors for all pending messages in queue and in the
3744	 * tables waiting for remote responses.
3745	 */
3746	while (!list_empty(&tmplist)) {
3747		entry = tmplist.next;
3748		list_del(entry);
3749		msg = list_entry(entry, struct ipmi_smi_msg, link);
3750		deliver_smi_err_response(intf, msg, IPMI_ERR_UNSPECIFIED);
3751	}
3752
3753	for (i = 0; i < IPMI_IPMB_NUM_SEQ; i++) {
3754		ent = &intf->seq_table[i];
3755		if (!ent->inuse)
3756			continue;
3757		deliver_err_response(intf, ent->recv_msg, IPMI_ERR_UNSPECIFIED);
3758	}
3759}
3760
3761void ipmi_unregister_smi(struct ipmi_smi *intf)
3762{
3763	struct ipmi_smi_watcher *w;
3764	int intf_num, index;
3765
3766	if (!intf)
3767		return;
3768	intf_num = intf->intf_num;
3769	mutex_lock(&ipmi_interfaces_mutex);
3770	intf->intf_num = -1;
3771	intf->in_shutdown = true;
3772	list_del_rcu(&intf->link);
3773	mutex_unlock(&ipmi_interfaces_mutex);
3774	synchronize_srcu(&ipmi_interfaces_srcu);
3775
3776	/* At this point no users can be added to the interface. */
3777
3778	device_remove_file(intf->si_dev, &intf->nr_msgs_devattr);
3779	device_remove_file(intf->si_dev, &intf->nr_users_devattr);
3780
3781	/*
3782	 * Call all the watcher interfaces to tell them that
3783	 * an interface is going away.
3784	 */
3785	mutex_lock(&smi_watchers_mutex);
3786	list_for_each_entry(w, &smi_watchers, link)
3787		w->smi_gone(intf_num);
3788	mutex_unlock(&smi_watchers_mutex);
3789
3790	index = srcu_read_lock(&intf->users_srcu);
3791	while (!list_empty(&intf->users)) {
3792		struct ipmi_user *user =
3793			container_of(list_next_rcu(&intf->users),
3794				     struct ipmi_user, link);
3795
3796		_ipmi_destroy_user(user);
3797	}
3798	srcu_read_unlock(&intf->users_srcu, index);
3799
3800	if (intf->handlers->shutdown)
3801		intf->handlers->shutdown(intf->send_info);
3802
3803	cleanup_smi_msgs(intf);
3804
3805	ipmi_bmc_unregister(intf);
3806
3807	cleanup_srcu_struct(&intf->users_srcu);
3808	kref_put(&intf->refcount, intf_free);
3809}
3810EXPORT_SYMBOL(ipmi_unregister_smi);
3811
3812static int handle_ipmb_get_msg_rsp(struct ipmi_smi *intf,
3813				   struct ipmi_smi_msg *msg)
3814{
3815	struct ipmi_ipmb_addr ipmb_addr;
3816	struct ipmi_recv_msg  *recv_msg;
3817
3818	/*
3819	 * This is 11, not 10, because the response must contain a
3820	 * completion code.
3821	 */
3822	if (msg->rsp_size < 11) {
3823		/* Message not big enough, just ignore it. */
3824		ipmi_inc_stat(intf, invalid_ipmb_responses);
3825		return 0;
3826	}
3827
3828	if (msg->rsp[2] != 0) {
3829		/* An error getting the response, just ignore it. */
3830		return 0;
3831	}
3832
3833	ipmb_addr.addr_type = IPMI_IPMB_ADDR_TYPE;
3834	ipmb_addr.slave_addr = msg->rsp[6];
3835	ipmb_addr.channel = msg->rsp[3] & 0x0f;
3836	ipmb_addr.lun = msg->rsp[7] & 3;
3837
3838	/*
3839	 * It's a response from a remote entity.  Look up the sequence
3840	 * number and handle the response.
3841	 */
3842	if (intf_find_seq(intf,
3843			  msg->rsp[7] >> 2,
3844			  msg->rsp[3] & 0x0f,
3845			  msg->rsp[8],
3846			  (msg->rsp[4] >> 2) & (~1),
3847			  (struct ipmi_addr *) &ipmb_addr,
3848			  &recv_msg)) {
3849		/*
3850		 * We were unable to find the sequence number,
3851		 * so just nuke the message.
3852		 */
3853		ipmi_inc_stat(intf, unhandled_ipmb_responses);
3854		return 0;
3855	}
3856
3857	memcpy(recv_msg->msg_data, &msg->rsp[9], msg->rsp_size - 9);
3858	/*
3859	 * The other fields matched, so no need to set them, except
3860	 * for netfn, which needs to be the response that was
3861	 * returned, not the request value.
3862	 */
3863	recv_msg->msg.netfn = msg->rsp[4] >> 2;
3864	recv_msg->msg.data = recv_msg->msg_data;
3865	recv_msg->msg.data_len = msg->rsp_size - 10;
3866	recv_msg->recv_type = IPMI_RESPONSE_RECV_TYPE;
3867	if (deliver_response(intf, recv_msg))
3868		ipmi_inc_stat(intf, unhandled_ipmb_responses);
3869	else
3870		ipmi_inc_stat(intf, handled_ipmb_responses);
3871
3872	return 0;
3873}
3874
3875static int handle_ipmb_get_msg_cmd(struct ipmi_smi *intf,
3876				   struct ipmi_smi_msg *msg)
3877{
3878	struct cmd_rcvr          *rcvr;
3879	int                      rv = 0;
3880	unsigned char            netfn;
3881	unsigned char            cmd;
3882	unsigned char            chan;
3883	struct ipmi_user         *user = NULL;
3884	struct ipmi_ipmb_addr    *ipmb_addr;
3885	struct ipmi_recv_msg     *recv_msg;
3886
3887	if (msg->rsp_size < 10) {
3888		/* Message not big enough, just ignore it. */
3889		ipmi_inc_stat(intf, invalid_commands);
3890		return 0;
3891	}
3892
3893	if (msg->rsp[2] != 0) {
3894		/* An error getting the response, just ignore it. */
3895		return 0;
3896	}
3897
3898	netfn = msg->rsp[4] >> 2;
3899	cmd = msg->rsp[8];
3900	chan = msg->rsp[3] & 0xf;
3901
3902	rcu_read_lock();
3903	rcvr = find_cmd_rcvr(intf, netfn, cmd, chan);
3904	if (rcvr) {
3905		user = rcvr->user;
3906		kref_get(&user->refcount);
3907	} else
3908		user = NULL;
3909	rcu_read_unlock();
3910
3911	if (user == NULL) {
3912		/* We didn't find a user, deliver an error response. */
3913		ipmi_inc_stat(intf, unhandled_commands);
3914
3915		msg->data[0] = (IPMI_NETFN_APP_REQUEST << 2);
3916		msg->data[1] = IPMI_SEND_MSG_CMD;
3917		msg->data[2] = msg->rsp[3];
3918		msg->data[3] = msg->rsp[6];
3919		msg->data[4] = ((netfn + 1) << 2) | (msg->rsp[7] & 0x3);
3920		msg->data[5] = ipmb_checksum(&msg->data[3], 2);
3921		msg->data[6] = intf->addrinfo[msg->rsp[3] & 0xf].address;
3922		/* rqseq/lun */
3923		msg->data[7] = (msg->rsp[7] & 0xfc) | (msg->rsp[4] & 0x3);
3924		msg->data[8] = msg->rsp[8]; /* cmd */
3925		msg->data[9] = IPMI_INVALID_CMD_COMPLETION_CODE;
3926		msg->data[10] = ipmb_checksum(&msg->data[6], 4);
3927		msg->data_size = 11;
3928
3929		dev_dbg(intf->si_dev, "Invalid command: %*ph\n",
3930			msg->data_size, msg->data);
3931
3932		rcu_read_lock();
3933		if (!intf->in_shutdown) {
3934			smi_send(intf, intf->handlers, msg, 0);
3935			/*
3936			 * We used the message, so return the value
3937			 * that causes it to not be freed or
3938			 * queued.
3939			 */
3940			rv = -1;
3941		}
3942		rcu_read_unlock();
3943	} else {
3944		recv_msg = ipmi_alloc_recv_msg();
3945		if (!recv_msg) {
3946			/*
3947			 * We couldn't allocate memory for the
3948			 * message, so requeue it for handling
3949			 * later.
3950			 */
3951			rv = 1;
3952			kref_put(&user->refcount, free_user);
3953		} else {
3954			/* Extract the source address from the data. */
3955			ipmb_addr = (struct ipmi_ipmb_addr *) &recv_msg->addr;
3956			ipmb_addr->addr_type = IPMI_IPMB_ADDR_TYPE;
3957			ipmb_addr->slave_addr = msg->rsp[6];
3958			ipmb_addr->lun = msg->rsp[7] & 3;
3959			ipmb_addr->channel = msg->rsp[3] & 0xf;
3960
3961			/*
3962			 * Extract the rest of the message information
3963			 * from the IPMB header.
3964			 */
3965			recv_msg->user = user;
3966			recv_msg->recv_type = IPMI_CMD_RECV_TYPE;
3967			recv_msg->msgid = msg->rsp[7] >> 2;
3968			recv_msg->msg.netfn = msg->rsp[4] >> 2;
3969			recv_msg->msg.cmd = msg->rsp[8];
3970			recv_msg->msg.data = recv_msg->msg_data;
3971
3972			/*
3973			 * We chop off 10, not 9 bytes because the checksum
3974			 * at the end also needs to be removed.
3975			 */
3976			recv_msg->msg.data_len = msg->rsp_size - 10;
3977			memcpy(recv_msg->msg_data, &msg->rsp[9],
3978			       msg->rsp_size - 10);
3979			if (deliver_response(intf, recv_msg))
3980				ipmi_inc_stat(intf, unhandled_commands);
3981			else
3982				ipmi_inc_stat(intf, handled_commands);
3983		}
3984	}
3985
3986	return rv;
3987}
3988
3989static int handle_ipmb_direct_rcv_cmd(struct ipmi_smi *intf,
3990				      struct ipmi_smi_msg *msg)
3991{
3992	struct cmd_rcvr          *rcvr;
3993	int                      rv = 0;
3994	struct ipmi_user         *user = NULL;
3995	struct ipmi_ipmb_direct_addr *daddr;
3996	struct ipmi_recv_msg     *recv_msg;
3997	unsigned char netfn = msg->rsp[0] >> 2;
3998	unsigned char cmd = msg->rsp[3];
3999
4000	rcu_read_lock();
4001	/* We always use channel 0 for direct messages. */
4002	rcvr = find_cmd_rcvr(intf, netfn, cmd, 0);
4003	if (rcvr) {
4004		user = rcvr->user;
4005		kref_get(&user->refcount);
4006	} else
4007		user = NULL;
4008	rcu_read_unlock();
4009
4010	if (user == NULL) {
4011		/* We didn't find a user, deliver an error response. */
4012		ipmi_inc_stat(intf, unhandled_commands);
4013
4014		msg->data[0] = (netfn + 1) << 2;
4015		msg->data[0] |= msg->rsp[2] & 0x3; /* rqLUN */
4016		msg->data[1] = msg->rsp[1]; /* Addr */
4017		msg->data[2] = msg->rsp[2] & ~0x3; /* rqSeq */
4018		msg->data[2] |= msg->rsp[0] & 0x3; /* rsLUN */
4019		msg->data[3] = cmd;
4020		msg->data[4] = IPMI_INVALID_CMD_COMPLETION_CODE;
4021		msg->data_size = 5;
4022
4023		rcu_read_lock();
4024		if (!intf->in_shutdown) {
4025			smi_send(intf, intf->handlers, msg, 0);
4026			/*
4027			 * We used the message, so return the value
4028			 * that causes it to not be freed or
4029			 * queued.
4030			 */
4031			rv = -1;
4032		}
4033		rcu_read_unlock();
4034	} else {
4035		recv_msg = ipmi_alloc_recv_msg();
4036		if (!recv_msg) {
4037			/*
4038			 * We couldn't allocate memory for the
4039			 * message, so requeue it for handling
4040			 * later.
4041			 */
4042			rv = 1;
4043			kref_put(&user->refcount, free_user);
4044		} else {
4045			/* Extract the source address from the data. */
4046			daddr = (struct ipmi_ipmb_direct_addr *)&recv_msg->addr;
4047			daddr->addr_type = IPMI_IPMB_DIRECT_ADDR_TYPE;
4048			daddr->channel = 0;
4049			daddr->slave_addr = msg->rsp[1];
4050			daddr->rs_lun = msg->rsp[0] & 3;
4051			daddr->rq_lun = msg->rsp[2] & 3;
4052
4053			/*
4054			 * Extract the rest of the message information
4055			 * from the IPMB header.
4056			 */
4057			recv_msg->user = user;
4058			recv_msg->recv_type = IPMI_CMD_RECV_TYPE;
4059			recv_msg->msgid = (msg->rsp[2] >> 2);
4060			recv_msg->msg.netfn = msg->rsp[0] >> 2;
4061			recv_msg->msg.cmd = msg->rsp[3];
4062			recv_msg->msg.data = recv_msg->msg_data;
4063
4064			recv_msg->msg.data_len = msg->rsp_size - 4;
4065			memcpy(recv_msg->msg_data, msg->rsp + 4,
4066			       msg->rsp_size - 4);
4067			if (deliver_response(intf, recv_msg))
4068				ipmi_inc_stat(intf, unhandled_commands);
4069			else
4070				ipmi_inc_stat(intf, handled_commands);
4071		}
4072	}
4073
4074	return rv;
4075}
4076
4077static int handle_ipmb_direct_rcv_rsp(struct ipmi_smi *intf,
4078				      struct ipmi_smi_msg *msg)
4079{
4080	struct ipmi_recv_msg *recv_msg;
4081	struct ipmi_ipmb_direct_addr *daddr;
4082
4083	recv_msg = msg->user_data;
4084	if (recv_msg == NULL) {
4085		dev_warn(intf->si_dev,
4086			 "IPMI direct message received with no owner. This could be because of a malformed message, or because of a hardware error.  Contact your hardware vendor for assistance.\n");
4087		return 0;
4088	}
4089
4090	recv_msg->recv_type = IPMI_RESPONSE_RECV_TYPE;
4091	recv_msg->msgid = msg->msgid;
4092	daddr = (struct ipmi_ipmb_direct_addr *) &recv_msg->addr;
4093	daddr->addr_type = IPMI_IPMB_DIRECT_ADDR_TYPE;
4094	daddr->channel = 0;
4095	daddr->slave_addr = msg->rsp[1];
4096	daddr->rq_lun = msg->rsp[0] & 3;
4097	daddr->rs_lun = msg->rsp[2] & 3;
4098	recv_msg->msg.netfn = msg->rsp[0] >> 2;
4099	recv_msg->msg.cmd = msg->rsp[3];
4100	memcpy(recv_msg->msg_data, &msg->rsp[4], msg->rsp_size - 4);
4101	recv_msg->msg.data = recv_msg->msg_data;
4102	recv_msg->msg.data_len = msg->rsp_size - 4;
4103	deliver_local_response(intf, recv_msg);
4104
4105	return 0;
4106}
4107
4108static int handle_lan_get_msg_rsp(struct ipmi_smi *intf,
4109				  struct ipmi_smi_msg *msg)
4110{
4111	struct ipmi_lan_addr  lan_addr;
4112	struct ipmi_recv_msg  *recv_msg;
4113
4114
4115	/*
4116	 * This is 13, not 12, because the response must contain a
4117	 * completion code.
4118	 */
4119	if (msg->rsp_size < 13) {
4120		/* Message not big enough, just ignore it. */
4121		ipmi_inc_stat(intf, invalid_lan_responses);
4122		return 0;
4123	}
4124
4125	if (msg->rsp[2] != 0) {
4126		/* An error getting the response, just ignore it. */
4127		return 0;
4128	}
4129
4130	lan_addr.addr_type = IPMI_LAN_ADDR_TYPE;
4131	lan_addr.session_handle = msg->rsp[4];
4132	lan_addr.remote_SWID = msg->rsp[8];
4133	lan_addr.local_SWID = msg->rsp[5];
4134	lan_addr.channel = msg->rsp[3] & 0x0f;
4135	lan_addr.privilege = msg->rsp[3] >> 4;
4136	lan_addr.lun = msg->rsp[9] & 3;
4137
4138	/*
4139	 * It's a response from a remote entity.  Look up the sequence
4140	 * number and handle the response.
4141	 */
4142	if (intf_find_seq(intf,
4143			  msg->rsp[9] >> 2,
4144			  msg->rsp[3] & 0x0f,
4145			  msg->rsp[10],
4146			  (msg->rsp[6] >> 2) & (~1),
4147			  (struct ipmi_addr *) &lan_addr,
4148			  &recv_msg)) {
4149		/*
4150		 * We were unable to find the sequence number,
4151		 * so just nuke the message.
4152		 */
4153		ipmi_inc_stat(intf, unhandled_lan_responses);
4154		return 0;
4155	}
4156
4157	memcpy(recv_msg->msg_data, &msg->rsp[11], msg->rsp_size - 11);
4158	/*
4159	 * The other fields matched, so no need to set them, except
4160	 * for netfn, which needs to be the response that was
4161	 * returned, not the request value.
4162	 */
4163	recv_msg->msg.netfn = msg->rsp[6] >> 2;
4164	recv_msg->msg.data = recv_msg->msg_data;
4165	recv_msg->msg.data_len = msg->rsp_size - 12;
4166	recv_msg->recv_type = IPMI_RESPONSE_RECV_TYPE;
4167	if (deliver_response(intf, recv_msg))
4168		ipmi_inc_stat(intf, unhandled_lan_responses);
4169	else
4170		ipmi_inc_stat(intf, handled_lan_responses);
4171
4172	return 0;
4173}
4174
4175static int handle_lan_get_msg_cmd(struct ipmi_smi *intf,
4176				  struct ipmi_smi_msg *msg)
4177{
4178	struct cmd_rcvr          *rcvr;
4179	int                      rv = 0;
4180	unsigned char            netfn;
4181	unsigned char            cmd;
4182	unsigned char            chan;
4183	struct ipmi_user         *user = NULL;
4184	struct ipmi_lan_addr     *lan_addr;
4185	struct ipmi_recv_msg     *recv_msg;
4186
4187	if (msg->rsp_size < 12) {
4188		/* Message not big enough, just ignore it. */
4189		ipmi_inc_stat(intf, invalid_commands);
4190		return 0;
4191	}
4192
4193	if (msg->rsp[2] != 0) {
4194		/* An error getting the response, just ignore it. */
4195		return 0;
4196	}
4197
4198	netfn = msg->rsp[6] >> 2;
4199	cmd = msg->rsp[10];
4200	chan = msg->rsp[3] & 0xf;
4201
4202	rcu_read_lock();
4203	rcvr = find_cmd_rcvr(intf, netfn, cmd, chan);
4204	if (rcvr) {
4205		user = rcvr->user;
4206		kref_get(&user->refcount);
4207	} else
4208		user = NULL;
4209	rcu_read_unlock();
4210
4211	if (user == NULL) {
4212		/* We didn't find a user, just give up. */
4213		ipmi_inc_stat(intf, unhandled_commands);
4214
4215		/*
4216		 * Don't do anything with these messages, just allow
4217		 * them to be freed.
4218		 */
4219		rv = 0;
4220	} else {
4221		recv_msg = ipmi_alloc_recv_msg();
4222		if (!recv_msg) {
4223			/*
4224			 * We couldn't allocate memory for the
4225			 * message, so requeue it for handling later.
4226			 */
4227			rv = 1;
4228			kref_put(&user->refcount, free_user);
4229		} else {
4230			/* Extract the source address from the data. */
4231			lan_addr = (struct ipmi_lan_addr *) &recv_msg->addr;
4232			lan_addr->addr_type = IPMI_LAN_ADDR_TYPE;
4233			lan_addr->session_handle = msg->rsp[4];
4234			lan_addr->remote_SWID = msg->rsp[8];
4235			lan_addr->local_SWID = msg->rsp[5];
4236			lan_addr->lun = msg->rsp[9] & 3;
4237			lan_addr->channel = msg->rsp[3] & 0xf;
4238			lan_addr->privilege = msg->rsp[3] >> 4;
4239
4240			/*
4241			 * Extract the rest of the message information
4242			 * from the IPMB header.
4243			 */
4244			recv_msg->user = user;
4245			recv_msg->recv_type = IPMI_CMD_RECV_TYPE;
4246			recv_msg->msgid = msg->rsp[9] >> 2;
4247			recv_msg->msg.netfn = msg->rsp[6] >> 2;
4248			recv_msg->msg.cmd = msg->rsp[10];
4249			recv_msg->msg.data = recv_msg->msg_data;
4250
4251			/*
4252			 * We chop off 12, not 11 bytes because the checksum
4253			 * at the end also needs to be removed.
4254			 */
4255			recv_msg->msg.data_len = msg->rsp_size - 12;
4256			memcpy(recv_msg->msg_data, &msg->rsp[11],
4257			       msg->rsp_size - 12);
4258			if (deliver_response(intf, recv_msg))
4259				ipmi_inc_stat(intf, unhandled_commands);
4260			else
4261				ipmi_inc_stat(intf, handled_commands);
4262		}
4263	}
4264
4265	return rv;
4266}
4267
4268/*
4269 * This routine will handle "Get Message" command responses with
4270 * channels that use an OEM Medium. The message format belongs to
4271 * the OEM.  See IPMI 2.0 specification, Chapter 6 and
4272 * Chapter 22, sections 22.6 and 22.24 for more details.
4273 */
4274static int handle_oem_get_msg_cmd(struct ipmi_smi *intf,
4275				  struct ipmi_smi_msg *msg)
4276{
4277	struct cmd_rcvr       *rcvr;
4278	int                   rv = 0;
4279	unsigned char         netfn;
4280	unsigned char         cmd;
4281	unsigned char         chan;
4282	struct ipmi_user *user = NULL;
4283	struct ipmi_system_interface_addr *smi_addr;
4284	struct ipmi_recv_msg  *recv_msg;
4285
4286	/*
4287	 * We expect the OEM SW to perform error checking
4288	 * so we just do some basic sanity checks
4289	 */
4290	if (msg->rsp_size < 4) {
4291		/* Message not big enough, just ignore it. */
4292		ipmi_inc_stat(intf, invalid_commands);
4293		return 0;
4294	}
4295
4296	if (msg->rsp[2] != 0) {
4297		/* An error getting the response, just ignore it. */
4298		return 0;
4299	}
4300
4301	/*
4302	 * This is an OEM Message so the OEM needs to know how
4303	 * handle the message. We do no interpretation.
4304	 */
4305	netfn = msg->rsp[0] >> 2;
4306	cmd = msg->rsp[1];
4307	chan = msg->rsp[3] & 0xf;
4308
4309	rcu_read_lock();
4310	rcvr = find_cmd_rcvr(intf, netfn, cmd, chan);
4311	if (rcvr) {
4312		user = rcvr->user;
4313		kref_get(&user->refcount);
4314	} else
4315		user = NULL;
4316	rcu_read_unlock();
4317
4318	if (user == NULL) {
4319		/* We didn't find a user, just give up. */
4320		ipmi_inc_stat(intf, unhandled_commands);
4321
4322		/*
4323		 * Don't do anything with these messages, just allow
4324		 * them to be freed.
4325		 */
4326
4327		rv = 0;
4328	} else {
4329		recv_msg = ipmi_alloc_recv_msg();
4330		if (!recv_msg) {
4331			/*
4332			 * We couldn't allocate memory for the
4333			 * message, so requeue it for handling
4334			 * later.
4335			 */
4336			rv = 1;
4337			kref_put(&user->refcount, free_user);
4338		} else {
4339			/*
4340			 * OEM Messages are expected to be delivered via
4341			 * the system interface to SMS software.  We might
4342			 * need to visit this again depending on OEM
4343			 * requirements
4344			 */
4345			smi_addr = ((struct ipmi_system_interface_addr *)
4346				    &recv_msg->addr);
4347			smi_addr->addr_type = IPMI_SYSTEM_INTERFACE_ADDR_TYPE;
4348			smi_addr->channel = IPMI_BMC_CHANNEL;
4349			smi_addr->lun = msg->rsp[0] & 3;
4350
4351			recv_msg->user = user;
4352			recv_msg->user_msg_data = NULL;
4353			recv_msg->recv_type = IPMI_OEM_RECV_TYPE;
4354			recv_msg->msg.netfn = msg->rsp[0] >> 2;
4355			recv_msg->msg.cmd = msg->rsp[1];
4356			recv_msg->msg.data = recv_msg->msg_data;
4357
4358			/*
4359			 * The message starts at byte 4 which follows the
4360			 * Channel Byte in the "GET MESSAGE" command
4361			 */
4362			recv_msg->msg.data_len = msg->rsp_size - 4;
4363			memcpy(recv_msg->msg_data, &msg->rsp[4],
4364			       msg->rsp_size - 4);
4365			if (deliver_response(intf, recv_msg))
4366				ipmi_inc_stat(intf, unhandled_commands);
4367			else
4368				ipmi_inc_stat(intf, handled_commands);
4369		}
4370	}
4371
4372	return rv;
4373}
4374
4375static void copy_event_into_recv_msg(struct ipmi_recv_msg *recv_msg,
4376				     struct ipmi_smi_msg  *msg)
4377{
4378	struct ipmi_system_interface_addr *smi_addr;
4379
4380	recv_msg->msgid = 0;
4381	smi_addr = (struct ipmi_system_interface_addr *) &recv_msg->addr;
4382	smi_addr->addr_type = IPMI_SYSTEM_INTERFACE_ADDR_TYPE;
4383	smi_addr->channel = IPMI_BMC_CHANNEL;
4384	smi_addr->lun = msg->rsp[0] & 3;
4385	recv_msg->recv_type = IPMI_ASYNC_EVENT_RECV_TYPE;
4386	recv_msg->msg.netfn = msg->rsp[0] >> 2;
4387	recv_msg->msg.cmd = msg->rsp[1];
4388	memcpy(recv_msg->msg_data, &msg->rsp[3], msg->rsp_size - 3);
4389	recv_msg->msg.data = recv_msg->msg_data;
4390	recv_msg->msg.data_len = msg->rsp_size - 3;
4391}
4392
4393static int handle_read_event_rsp(struct ipmi_smi *intf,
4394				 struct ipmi_smi_msg *msg)
4395{
4396	struct ipmi_recv_msg *recv_msg, *recv_msg2;
4397	struct list_head     msgs;
4398	struct ipmi_user     *user;
4399	int rv = 0, deliver_count = 0, index;
4400	unsigned long        flags;
4401
4402	if (msg->rsp_size < 19) {
4403		/* Message is too small to be an IPMB event. */
4404		ipmi_inc_stat(intf, invalid_events);
4405		return 0;
4406	}
4407
4408	if (msg->rsp[2] != 0) {
4409		/* An error getting the event, just ignore it. */
4410		return 0;
4411	}
4412
4413	INIT_LIST_HEAD(&msgs);
4414
4415	spin_lock_irqsave(&intf->events_lock, flags);
4416
4417	ipmi_inc_stat(intf, events);
4418
4419	/*
4420	 * Allocate and fill in one message for every user that is
4421	 * getting events.
4422	 */
4423	index = srcu_read_lock(&intf->users_srcu);
4424	list_for_each_entry_rcu(user, &intf->users, link) {
4425		if (!user->gets_events)
4426			continue;
4427
4428		recv_msg = ipmi_alloc_recv_msg();
4429		if (!recv_msg) {
4430			rcu_read_unlock();
4431			list_for_each_entry_safe(recv_msg, recv_msg2, &msgs,
4432						 link) {
4433				list_del(&recv_msg->link);
4434				ipmi_free_recv_msg(recv_msg);
4435			}
4436			/*
4437			 * We couldn't allocate memory for the
4438			 * message, so requeue it for handling
4439			 * later.
4440			 */
4441			rv = 1;
4442			goto out;
4443		}
4444
4445		deliver_count++;
4446
4447		copy_event_into_recv_msg(recv_msg, msg);
4448		recv_msg->user = user;
4449		kref_get(&user->refcount);
4450		list_add_tail(&recv_msg->link, &msgs);
4451	}
4452	srcu_read_unlock(&intf->users_srcu, index);
4453
4454	if (deliver_count) {
4455		/* Now deliver all the messages. */
4456		list_for_each_entry_safe(recv_msg, recv_msg2, &msgs, link) {
4457			list_del(&recv_msg->link);
4458			deliver_local_response(intf, recv_msg);
4459		}
4460	} else if (intf->waiting_events_count < MAX_EVENTS_IN_QUEUE) {
4461		/*
4462		 * No one to receive the message, put it in queue if there's
4463		 * not already too many things in the queue.
4464		 */
4465		recv_msg = ipmi_alloc_recv_msg();
4466		if (!recv_msg) {
4467			/*
4468			 * We couldn't allocate memory for the
4469			 * message, so requeue it for handling
4470			 * later.
4471			 */
4472			rv = 1;
4473			goto out;
4474		}
4475
4476		copy_event_into_recv_msg(recv_msg, msg);
4477		list_add_tail(&recv_msg->link, &intf->waiting_events);
4478		intf->waiting_events_count++;
4479	} else if (!intf->event_msg_printed) {
4480		/*
4481		 * There's too many things in the queue, discard this
4482		 * message.
4483		 */
4484		dev_warn(intf->si_dev,
4485			 "Event queue full, discarding incoming events\n");
4486		intf->event_msg_printed = 1;
4487	}
4488
4489 out:
4490	spin_unlock_irqrestore(&intf->events_lock, flags);
4491
4492	return rv;
4493}
4494
4495static int handle_bmc_rsp(struct ipmi_smi *intf,
4496			  struct ipmi_smi_msg *msg)
4497{
4498	struct ipmi_recv_msg *recv_msg;
4499	struct ipmi_system_interface_addr *smi_addr;
4500
4501	recv_msg = msg->user_data;
4502	if (recv_msg == NULL) {
4503		dev_warn(intf->si_dev,
4504			 "IPMI SMI message received with no owner. This could be because of a malformed message, or because of a hardware error.  Contact your hardware vendor for assistance.\n");
4505		return 0;
4506	}
4507
4508	recv_msg->recv_type = IPMI_RESPONSE_RECV_TYPE;
4509	recv_msg->msgid = msg->msgid;
4510	smi_addr = ((struct ipmi_system_interface_addr *)
4511		    &recv_msg->addr);
4512	smi_addr->addr_type = IPMI_SYSTEM_INTERFACE_ADDR_TYPE;
4513	smi_addr->channel = IPMI_BMC_CHANNEL;
4514	smi_addr->lun = msg->rsp[0] & 3;
4515	recv_msg->msg.netfn = msg->rsp[0] >> 2;
4516	recv_msg->msg.cmd = msg->rsp[1];
4517	memcpy(recv_msg->msg_data, &msg->rsp[2], msg->rsp_size - 2);
4518	recv_msg->msg.data = recv_msg->msg_data;
4519	recv_msg->msg.data_len = msg->rsp_size - 2;
4520	deliver_local_response(intf, recv_msg);
4521
4522	return 0;
4523}
4524
4525/*
4526 * Handle a received message.  Return 1 if the message should be requeued,
4527 * 0 if the message should be freed, or -1 if the message should not
4528 * be freed or requeued.
4529 */
4530static int handle_one_recv_msg(struct ipmi_smi *intf,
4531			       struct ipmi_smi_msg *msg)
4532{
4533	int requeue = 0;
4534	int chan;
4535	unsigned char cc;
4536	bool is_cmd = !((msg->rsp[0] >> 2) & 1);
4537
4538	dev_dbg(intf->si_dev, "Recv: %*ph\n", msg->rsp_size, msg->rsp);
4539
4540	if (msg->rsp_size < 2) {
4541		/* Message is too small to be correct. */
4542		dev_warn(intf->si_dev,
4543			 "BMC returned too small a message for netfn %x cmd %x, got %d bytes\n",
4544			 (msg->data[0] >> 2) | 1, msg->data[1], msg->rsp_size);
4545
4546return_unspecified:
4547		/* Generate an error response for the message. */
4548		msg->rsp[0] = msg->data[0] | (1 << 2);
4549		msg->rsp[1] = msg->data[1];
4550		msg->rsp[2] = IPMI_ERR_UNSPECIFIED;
4551		msg->rsp_size = 3;
4552	} else if (msg->type == IPMI_SMI_MSG_TYPE_IPMB_DIRECT) {
4553		/* commands must have at least 4 bytes, responses 5. */
4554		if (is_cmd && (msg->rsp_size < 4)) {
4555			ipmi_inc_stat(intf, invalid_commands);
4556			goto out;
4557		}
4558		if (!is_cmd && (msg->rsp_size < 5)) {
4559			ipmi_inc_stat(intf, invalid_ipmb_responses);
4560			/* Construct a valid error response. */
4561			msg->rsp[0] = msg->data[0] & 0xfc; /* NetFN */
4562			msg->rsp[0] |= (1 << 2); /* Make it a response */
4563			msg->rsp[0] |= msg->data[2] & 3; /* rqLUN */
4564			msg->rsp[1] = msg->data[1]; /* Addr */
4565			msg->rsp[2] = msg->data[2] & 0xfc; /* rqSeq */
4566			msg->rsp[2] |= msg->data[0] & 0x3; /* rsLUN */
4567			msg->rsp[3] = msg->data[3]; /* Cmd */
4568			msg->rsp[4] = IPMI_ERR_UNSPECIFIED;
4569			msg->rsp_size = 5;
4570		}
4571	} else if ((msg->data_size >= 2)
4572	    && (msg->data[0] == (IPMI_NETFN_APP_REQUEST << 2))
4573	    && (msg->data[1] == IPMI_SEND_MSG_CMD)
4574	    && (msg->user_data == NULL)) {
4575
4576		if (intf->in_shutdown)
4577			goto out;
4578
4579		/*
4580		 * This is the local response to a command send, start
4581		 * the timer for these.  The user_data will not be
4582		 * NULL if this is a response send, and we will let
4583		 * response sends just go through.
4584		 */
4585
4586		/*
4587		 * Check for errors, if we get certain errors (ones
4588		 * that mean basically we can try again later), we
4589		 * ignore them and start the timer.  Otherwise we
4590		 * report the error immediately.
4591		 */
4592		if ((msg->rsp_size >= 3) && (msg->rsp[2] != 0)
4593		    && (msg->rsp[2] != IPMI_NODE_BUSY_ERR)
4594		    && (msg->rsp[2] != IPMI_LOST_ARBITRATION_ERR)
4595		    && (msg->rsp[2] != IPMI_BUS_ERR)
4596		    && (msg->rsp[2] != IPMI_NAK_ON_WRITE_ERR)) {
4597			int ch = msg->rsp[3] & 0xf;
4598			struct ipmi_channel *chans;
4599
4600			/* Got an error sending the message, handle it. */
4601
4602			chans = READ_ONCE(intf->channel_list)->c;
4603			if ((chans[ch].medium == IPMI_CHANNEL_MEDIUM_8023LAN)
4604			    || (chans[ch].medium == IPMI_CHANNEL_MEDIUM_ASYNC))
4605				ipmi_inc_stat(intf, sent_lan_command_errs);
4606			else
4607				ipmi_inc_stat(intf, sent_ipmb_command_errs);
4608			intf_err_seq(intf, msg->msgid, msg->rsp[2]);
4609		} else
4610			/* The message was sent, start the timer. */
4611			intf_start_seq_timer(intf, msg->msgid);
4612		requeue = 0;
4613		goto out;
4614	} else if (((msg->rsp[0] >> 2) != ((msg->data[0] >> 2) | 1))
4615		   || (msg->rsp[1] != msg->data[1])) {
4616		/*
4617		 * The NetFN and Command in the response is not even
4618		 * marginally correct.
4619		 */
4620		dev_warn(intf->si_dev,
4621			 "BMC returned incorrect response, expected netfn %x cmd %x, got netfn %x cmd %x\n",
4622			 (msg->data[0] >> 2) | 1, msg->data[1],
4623			 msg->rsp[0] >> 2, msg->rsp[1]);
4624
4625		goto return_unspecified;
4626	}
4627
4628	if (msg->type == IPMI_SMI_MSG_TYPE_IPMB_DIRECT) {
4629		if ((msg->data[0] >> 2) & 1) {
4630			/* It's a response to a sent response. */
4631			chan = 0;
4632			cc = msg->rsp[4];
4633			goto process_response_response;
4634		}
4635		if (is_cmd)
4636			requeue = handle_ipmb_direct_rcv_cmd(intf, msg);
4637		else
4638			requeue = handle_ipmb_direct_rcv_rsp(intf, msg);
4639	} else if ((msg->rsp[0] == ((IPMI_NETFN_APP_REQUEST|1) << 2))
4640		   && (msg->rsp[1] == IPMI_SEND_MSG_CMD)
4641		   && (msg->user_data != NULL)) {
4642		/*
4643		 * It's a response to a response we sent.  For this we
4644		 * deliver a send message response to the user.
4645		 */
4646		struct ipmi_recv_msg *recv_msg;
4647
4648		chan = msg->data[2] & 0x0f;
4649		if (chan >= IPMI_MAX_CHANNELS)
4650			/* Invalid channel number */
4651			goto out;
4652		cc = msg->rsp[2];
4653
4654process_response_response:
4655		recv_msg = msg->user_data;
4656
4657		requeue = 0;
4658		if (!recv_msg)
4659			goto out;
4660
4661		recv_msg->recv_type = IPMI_RESPONSE_RESPONSE_TYPE;
4662		recv_msg->msg.data = recv_msg->msg_data;
4663		recv_msg->msg_data[0] = cc;
4664		recv_msg->msg.data_len = 1;
4665		deliver_local_response(intf, recv_msg);
4666	} else if ((msg->rsp[0] == ((IPMI_NETFN_APP_REQUEST|1) << 2))
4667		   && (msg->rsp[1] == IPMI_GET_MSG_CMD)) {
4668		struct ipmi_channel   *chans;
4669
4670		/* It's from the receive queue. */
4671		chan = msg->rsp[3] & 0xf;
4672		if (chan >= IPMI_MAX_CHANNELS) {
4673			/* Invalid channel number */
4674			requeue = 0;
4675			goto out;
4676		}
4677
4678		/*
4679		 * We need to make sure the channels have been initialized.
4680		 * The channel_handler routine will set the "curr_channel"
4681		 * equal to or greater than IPMI_MAX_CHANNELS when all the
4682		 * channels for this interface have been initialized.
4683		 */
4684		if (!intf->channels_ready) {
4685			requeue = 0; /* Throw the message away */
4686			goto out;
4687		}
4688
4689		chans = READ_ONCE(intf->channel_list)->c;
4690
4691		switch (chans[chan].medium) {
4692		case IPMI_CHANNEL_MEDIUM_IPMB:
4693			if (msg->rsp[4] & 0x04) {
4694				/*
4695				 * It's a response, so find the
4696				 * requesting message and send it up.
4697				 */
4698				requeue = handle_ipmb_get_msg_rsp(intf, msg);
4699			} else {
4700				/*
4701				 * It's a command to the SMS from some other
4702				 * entity.  Handle that.
4703				 */
4704				requeue = handle_ipmb_get_msg_cmd(intf, msg);
4705			}
4706			break;
4707
4708		case IPMI_CHANNEL_MEDIUM_8023LAN:
4709		case IPMI_CHANNEL_MEDIUM_ASYNC:
4710			if (msg->rsp[6] & 0x04) {
4711				/*
4712				 * It's a response, so find the
4713				 * requesting message and send it up.
4714				 */
4715				requeue = handle_lan_get_msg_rsp(intf, msg);
4716			} else {
4717				/*
4718				 * It's a command to the SMS from some other
4719				 * entity.  Handle that.
4720				 */
4721				requeue = handle_lan_get_msg_cmd(intf, msg);
4722			}
4723			break;
4724
4725		default:
4726			/* Check for OEM Channels.  Clients had better
4727			   register for these commands. */
4728			if ((chans[chan].medium >= IPMI_CHANNEL_MEDIUM_OEM_MIN)
4729			    && (chans[chan].medium
4730				<= IPMI_CHANNEL_MEDIUM_OEM_MAX)) {
4731				requeue = handle_oem_get_msg_cmd(intf, msg);
4732			} else {
4733				/*
4734				 * We don't handle the channel type, so just
4735				 * free the message.
4736				 */
4737				requeue = 0;
4738			}
4739		}
4740
4741	} else if ((msg->rsp[0] == ((IPMI_NETFN_APP_REQUEST|1) << 2))
4742		   && (msg->rsp[1] == IPMI_READ_EVENT_MSG_BUFFER_CMD)) {
4743		/* It's an asynchronous event. */
4744		requeue = handle_read_event_rsp(intf, msg);
4745	} else {
4746		/* It's a response from the local BMC. */
4747		requeue = handle_bmc_rsp(intf, msg);
4748	}
4749
4750 out:
4751	return requeue;
4752}
4753
4754/*
4755 * If there are messages in the queue or pretimeouts, handle them.
4756 */
4757static void handle_new_recv_msgs(struct ipmi_smi *intf)
4758{
4759	struct ipmi_smi_msg  *smi_msg;
4760	unsigned long        flags = 0;
4761	int                  rv;
4762	int                  run_to_completion = intf->run_to_completion;
4763
4764	/* See if any waiting messages need to be processed. */
4765	if (!run_to_completion)
4766		spin_lock_irqsave(&intf->waiting_rcv_msgs_lock, flags);
4767	while (!list_empty(&intf->waiting_rcv_msgs)) {
4768		smi_msg = list_entry(intf->waiting_rcv_msgs.next,
4769				     struct ipmi_smi_msg, link);
4770		list_del(&smi_msg->link);
4771		if (!run_to_completion)
4772			spin_unlock_irqrestore(&intf->waiting_rcv_msgs_lock,
4773					       flags);
4774		rv = handle_one_recv_msg(intf, smi_msg);
4775		if (!run_to_completion)
4776			spin_lock_irqsave(&intf->waiting_rcv_msgs_lock, flags);
4777		if (rv > 0) {
4778			/*
4779			 * To preserve message order, quit if we
4780			 * can't handle a message.  Add the message
4781			 * back at the head, this is safe because this
4782			 * tasklet is the only thing that pulls the
4783			 * messages.
4784			 */
4785			list_add(&smi_msg->link, &intf->waiting_rcv_msgs);
4786			break;
4787		} else {
4788			if (rv == 0)
4789				/* Message handled */
4790				ipmi_free_smi_msg(smi_msg);
4791			/* If rv < 0, fatal error, del but don't free. */
4792		}
4793	}
4794	if (!run_to_completion)
4795		spin_unlock_irqrestore(&intf->waiting_rcv_msgs_lock, flags);
4796
4797	/*
4798	 * If the pretimout count is non-zero, decrement one from it and
4799	 * deliver pretimeouts to all the users.
4800	 */
4801	if (atomic_add_unless(&intf->watchdog_pretimeouts_to_deliver, -1, 0)) {
4802		struct ipmi_user *user;
4803		int index;
4804
4805		index = srcu_read_lock(&intf->users_srcu);
4806		list_for_each_entry_rcu(user, &intf->users, link) {
4807			if (user->handler->ipmi_watchdog_pretimeout)
4808				user->handler->ipmi_watchdog_pretimeout(
4809					user->handler_data);
4810		}
4811		srcu_read_unlock(&intf->users_srcu, index);
4812	}
4813}
4814
4815static void smi_recv_tasklet(struct tasklet_struct *t)
4816{
4817	unsigned long flags = 0; /* keep us warning-free. */
4818	struct ipmi_smi *intf = from_tasklet(intf, t, recv_tasklet);
4819	int run_to_completion = intf->run_to_completion;
4820	struct ipmi_smi_msg *newmsg = NULL;
4821
4822	/*
4823	 * Start the next message if available.
4824	 *
4825	 * Do this here, not in the actual receiver, because we may deadlock
4826	 * because the lower layer is allowed to hold locks while calling
4827	 * message delivery.
4828	 */
4829
4830	rcu_read_lock();
4831
4832	if (!run_to_completion)
4833		spin_lock_irqsave(&intf->xmit_msgs_lock, flags);
4834	if (intf->curr_msg == NULL && !intf->in_shutdown) {
4835		struct list_head *entry = NULL;
4836
4837		/* Pick the high priority queue first. */
4838		if (!list_empty(&intf->hp_xmit_msgs))
4839			entry = intf->hp_xmit_msgs.next;
4840		else if (!list_empty(&intf->xmit_msgs))
4841			entry = intf->xmit_msgs.next;
4842
4843		if (entry) {
4844			list_del(entry);
4845			newmsg = list_entry(entry, struct ipmi_smi_msg, link);
4846			intf->curr_msg = newmsg;
4847		}
4848	}
4849
4850	if (!run_to_completion)
4851		spin_unlock_irqrestore(&intf->xmit_msgs_lock, flags);
4852	if (newmsg)
4853		intf->handlers->sender(intf->send_info, newmsg);
4854
4855	rcu_read_unlock();
4856
4857	handle_new_recv_msgs(intf);
4858}
4859
4860/* Handle a new message from the lower layer. */
4861void ipmi_smi_msg_received(struct ipmi_smi *intf,
4862			   struct ipmi_smi_msg *msg)
4863{
4864	unsigned long flags = 0; /* keep us warning-free. */
4865	int run_to_completion = intf->run_to_completion;
4866
4867	/*
4868	 * To preserve message order, we keep a queue and deliver from
4869	 * a tasklet.
4870	 */
4871	if (!run_to_completion)
4872		spin_lock_irqsave(&intf->waiting_rcv_msgs_lock, flags);
4873	list_add_tail(&msg->link, &intf->waiting_rcv_msgs);
4874	if (!run_to_completion)
4875		spin_unlock_irqrestore(&intf->waiting_rcv_msgs_lock,
4876				       flags);
4877
4878	if (!run_to_completion)
4879		spin_lock_irqsave(&intf->xmit_msgs_lock, flags);
4880	/*
4881	 * We can get an asynchronous event or receive message in addition
4882	 * to commands we send.
4883	 */
4884	if (msg == intf->curr_msg)
4885		intf->curr_msg = NULL;
4886	if (!run_to_completion)
4887		spin_unlock_irqrestore(&intf->xmit_msgs_lock, flags);
4888
4889	if (run_to_completion)
4890		smi_recv_tasklet(&intf->recv_tasklet);
4891	else
4892		tasklet_schedule(&intf->recv_tasklet);
4893}
4894EXPORT_SYMBOL(ipmi_smi_msg_received);
4895
4896void ipmi_smi_watchdog_pretimeout(struct ipmi_smi *intf)
4897{
4898	if (intf->in_shutdown)
4899		return;
4900
4901	atomic_set(&intf->watchdog_pretimeouts_to_deliver, 1);
4902	tasklet_schedule(&intf->recv_tasklet);
4903}
4904EXPORT_SYMBOL(ipmi_smi_watchdog_pretimeout);
4905
4906static struct ipmi_smi_msg *
4907smi_from_recv_msg(struct ipmi_smi *intf, struct ipmi_recv_msg *recv_msg,
4908		  unsigned char seq, long seqid)
4909{
4910	struct ipmi_smi_msg *smi_msg = ipmi_alloc_smi_msg();
4911	if (!smi_msg)
4912		/*
4913		 * If we can't allocate the message, then just return, we
4914		 * get 4 retries, so this should be ok.
4915		 */
4916		return NULL;
4917
4918	memcpy(smi_msg->data, recv_msg->msg.data, recv_msg->msg.data_len);
4919	smi_msg->data_size = recv_msg->msg.data_len;
4920	smi_msg->msgid = STORE_SEQ_IN_MSGID(seq, seqid);
4921
4922	dev_dbg(intf->si_dev, "Resend: %*ph\n",
4923		smi_msg->data_size, smi_msg->data);
4924
4925	return smi_msg;
4926}
4927
4928static void check_msg_timeout(struct ipmi_smi *intf, struct seq_table *ent,
4929			      struct list_head *timeouts,
4930			      unsigned long timeout_period,
4931			      int slot, unsigned long *flags,
4932			      bool *need_timer)
4933{
4934	struct ipmi_recv_msg *msg;
4935
4936	if (intf->in_shutdown)
4937		return;
4938
4939	if (!ent->inuse)
4940		return;
4941
4942	if (timeout_period < ent->timeout) {
4943		ent->timeout -= timeout_period;
4944		*need_timer = true;
4945		return;
4946	}
4947
4948	if (ent->retries_left == 0) {
4949		/* The message has used all its retries. */
4950		ent->inuse = 0;
4951		smi_remove_watch(intf, IPMI_WATCH_MASK_CHECK_MESSAGES);
4952		msg = ent->recv_msg;
4953		list_add_tail(&msg->link, timeouts);
4954		if (ent->broadcast)
4955			ipmi_inc_stat(intf, timed_out_ipmb_broadcasts);
4956		else if (is_lan_addr(&ent->recv_msg->addr))
4957			ipmi_inc_stat(intf, timed_out_lan_commands);
4958		else
4959			ipmi_inc_stat(intf, timed_out_ipmb_commands);
4960	} else {
4961		struct ipmi_smi_msg *smi_msg;
4962		/* More retries, send again. */
4963
4964		*need_timer = true;
4965
4966		/*
4967		 * Start with the max timer, set to normal timer after
4968		 * the message is sent.
4969		 */
4970		ent->timeout = MAX_MSG_TIMEOUT;
4971		ent->retries_left--;
4972		smi_msg = smi_from_recv_msg(intf, ent->recv_msg, slot,
4973					    ent->seqid);
4974		if (!smi_msg) {
4975			if (is_lan_addr(&ent->recv_msg->addr))
4976				ipmi_inc_stat(intf,
4977					      dropped_rexmit_lan_commands);
4978			else
4979				ipmi_inc_stat(intf,
4980					      dropped_rexmit_ipmb_commands);
4981			return;
4982		}
4983
4984		spin_unlock_irqrestore(&intf->seq_lock, *flags);
4985
4986		/*
4987		 * Send the new message.  We send with a zero
4988		 * priority.  It timed out, I doubt time is that
4989		 * critical now, and high priority messages are really
4990		 * only for messages to the local MC, which don't get
4991		 * resent.
4992		 */
4993		if (intf->handlers) {
4994			if (is_lan_addr(&ent->recv_msg->addr))
4995				ipmi_inc_stat(intf,
4996					      retransmitted_lan_commands);
4997			else
4998				ipmi_inc_stat(intf,
4999					      retransmitted_ipmb_commands);
5000
5001			smi_send(intf, intf->handlers, smi_msg, 0);
5002		} else
5003			ipmi_free_smi_msg(smi_msg);
5004
5005		spin_lock_irqsave(&intf->seq_lock, *flags);
5006	}
5007}
5008
5009static bool ipmi_timeout_handler(struct ipmi_smi *intf,
5010				 unsigned long timeout_period)
5011{
5012	struct list_head     timeouts;
5013	struct ipmi_recv_msg *msg, *msg2;
5014	unsigned long        flags;
5015	int                  i;
5016	bool                 need_timer = false;
5017
5018	if (!intf->bmc_registered) {
5019		kref_get(&intf->refcount);
5020		if (!schedule_work(&intf->bmc_reg_work)) {
5021			kref_put(&intf->refcount, intf_free);
5022			need_timer = true;
5023		}
5024	}
5025
5026	/*
5027	 * Go through the seq table and find any messages that
5028	 * have timed out, putting them in the timeouts
5029	 * list.
5030	 */
5031	INIT_LIST_HEAD(&timeouts);
5032	spin_lock_irqsave(&intf->seq_lock, flags);
5033	if (intf->ipmb_maintenance_mode_timeout) {
5034		if (intf->ipmb_maintenance_mode_timeout <= timeout_period)
5035			intf->ipmb_maintenance_mode_timeout = 0;
5036		else
5037			intf->ipmb_maintenance_mode_timeout -= timeout_period;
5038	}
5039	for (i = 0; i < IPMI_IPMB_NUM_SEQ; i++)
5040		check_msg_timeout(intf, &intf->seq_table[i],
5041				  &timeouts, timeout_period, i,
5042				  &flags, &need_timer);
5043	spin_unlock_irqrestore(&intf->seq_lock, flags);
5044
5045	list_for_each_entry_safe(msg, msg2, &timeouts, link)
5046		deliver_err_response(intf, msg, IPMI_TIMEOUT_COMPLETION_CODE);
5047
5048	/*
5049	 * Maintenance mode handling.  Check the timeout
5050	 * optimistically before we claim the lock.  It may
5051	 * mean a timeout gets missed occasionally, but that
5052	 * only means the timeout gets extended by one period
5053	 * in that case.  No big deal, and it avoids the lock
5054	 * most of the time.
5055	 */
5056	if (intf->auto_maintenance_timeout > 0) {
5057		spin_lock_irqsave(&intf->maintenance_mode_lock, flags);
5058		if (intf->auto_maintenance_timeout > 0) {
5059			intf->auto_maintenance_timeout
5060				-= timeout_period;
5061			if (!intf->maintenance_mode
5062			    && (intf->auto_maintenance_timeout <= 0)) {
5063				intf->maintenance_mode_enable = false;
5064				maintenance_mode_update(intf);
5065			}
5066		}
5067		spin_unlock_irqrestore(&intf->maintenance_mode_lock,
5068				       flags);
5069	}
5070
5071	tasklet_schedule(&intf->recv_tasklet);
5072
5073	return need_timer;
5074}
5075
5076static void ipmi_request_event(struct ipmi_smi *intf)
5077{
5078	/* No event requests when in maintenance mode. */
5079	if (intf->maintenance_mode_enable)
5080		return;
5081
5082	if (!intf->in_shutdown)
5083		intf->handlers->request_events(intf->send_info);
5084}
5085
5086static struct timer_list ipmi_timer;
5087
5088static atomic_t stop_operation;
5089
5090static void ipmi_timeout(struct timer_list *unused)
5091{
5092	struct ipmi_smi *intf;
5093	bool need_timer = false;
5094	int index;
5095
5096	if (atomic_read(&stop_operation))
5097		return;
5098
5099	index = srcu_read_lock(&ipmi_interfaces_srcu);
5100	list_for_each_entry_rcu(intf, &ipmi_interfaces, link) {
5101		if (atomic_read(&intf->event_waiters)) {
5102			intf->ticks_to_req_ev--;
5103			if (intf->ticks_to_req_ev == 0) {
5104				ipmi_request_event(intf);
5105				intf->ticks_to_req_ev = IPMI_REQUEST_EV_TIME;
5106			}
5107			need_timer = true;
5108		}
5109
5110		need_timer |= ipmi_timeout_handler(intf, IPMI_TIMEOUT_TIME);
5111	}
5112	srcu_read_unlock(&ipmi_interfaces_srcu, index);
5113
5114	if (need_timer)
5115		mod_timer(&ipmi_timer, jiffies + IPMI_TIMEOUT_JIFFIES);
5116}
5117
5118static void need_waiter(struct ipmi_smi *intf)
5119{
5120	/* Racy, but worst case we start the timer twice. */
5121	if (!timer_pending(&ipmi_timer))
5122		mod_timer(&ipmi_timer, jiffies + IPMI_TIMEOUT_JIFFIES);
5123}
5124
5125static atomic_t smi_msg_inuse_count = ATOMIC_INIT(0);
5126static atomic_t recv_msg_inuse_count = ATOMIC_INIT(0);
5127
5128static void free_smi_msg(struct ipmi_smi_msg *msg)
5129{
5130	atomic_dec(&smi_msg_inuse_count);
5131	/* Try to keep as much stuff out of the panic path as possible. */
5132	if (!oops_in_progress)
5133		kfree(msg);
5134}
5135
5136struct ipmi_smi_msg *ipmi_alloc_smi_msg(void)
5137{
5138	struct ipmi_smi_msg *rv;
5139	rv = kmalloc(sizeof(struct ipmi_smi_msg), GFP_ATOMIC);
5140	if (rv) {
5141		rv->done = free_smi_msg;
5142		rv->user_data = NULL;
5143		rv->type = IPMI_SMI_MSG_TYPE_NORMAL;
5144		atomic_inc(&smi_msg_inuse_count);
5145	}
5146	return rv;
5147}
5148EXPORT_SYMBOL(ipmi_alloc_smi_msg);
5149
5150static void free_recv_msg(struct ipmi_recv_msg *msg)
5151{
5152	atomic_dec(&recv_msg_inuse_count);
5153	/* Try to keep as much stuff out of the panic path as possible. */
5154	if (!oops_in_progress)
5155		kfree(msg);
5156}
5157
5158static struct ipmi_recv_msg *ipmi_alloc_recv_msg(void)
5159{
5160	struct ipmi_recv_msg *rv;
5161
5162	rv = kmalloc(sizeof(struct ipmi_recv_msg), GFP_ATOMIC);
5163	if (rv) {
5164		rv->user = NULL;
5165		rv->done = free_recv_msg;
5166		atomic_inc(&recv_msg_inuse_count);
5167	}
5168	return rv;
5169}
5170
5171void ipmi_free_recv_msg(struct ipmi_recv_msg *msg)
5172{
5173	if (msg->user && !oops_in_progress)
5174		kref_put(&msg->user->refcount, free_user);
5175	msg->done(msg);
5176}
5177EXPORT_SYMBOL(ipmi_free_recv_msg);
5178
5179static atomic_t panic_done_count = ATOMIC_INIT(0);
5180
5181static void dummy_smi_done_handler(struct ipmi_smi_msg *msg)
5182{
5183	atomic_dec(&panic_done_count);
5184}
5185
5186static void dummy_recv_done_handler(struct ipmi_recv_msg *msg)
5187{
5188	atomic_dec(&panic_done_count);
5189}
5190
5191/*
5192 * Inside a panic, send a message and wait for a response.
5193 */
5194static void ipmi_panic_request_and_wait(struct ipmi_smi *intf,
5195					struct ipmi_addr *addr,
5196					struct kernel_ipmi_msg *msg)
5197{
5198	struct ipmi_smi_msg  smi_msg;
5199	struct ipmi_recv_msg recv_msg;
5200	int rv;
5201
5202	smi_msg.done = dummy_smi_done_handler;
5203	recv_msg.done = dummy_recv_done_handler;
5204	atomic_add(2, &panic_done_count);
5205	rv = i_ipmi_request(NULL,
5206			    intf,
5207			    addr,
5208			    0,
5209			    msg,
5210			    intf,
5211			    &smi_msg,
5212			    &recv_msg,
5213			    0,
5214			    intf->addrinfo[0].address,
5215			    intf->addrinfo[0].lun,
5216			    0, 1); /* Don't retry, and don't wait. */
5217	if (rv)
5218		atomic_sub(2, &panic_done_count);
5219	else if (intf->handlers->flush_messages)
5220		intf->handlers->flush_messages(intf->send_info);
5221
5222	while (atomic_read(&panic_done_count) != 0)
5223		ipmi_poll(intf);
5224}
5225
5226static void event_receiver_fetcher(struct ipmi_smi *intf,
5227				   struct ipmi_recv_msg *msg)
5228{
5229	if ((msg->addr.addr_type == IPMI_SYSTEM_INTERFACE_ADDR_TYPE)
5230	    && (msg->msg.netfn == IPMI_NETFN_SENSOR_EVENT_RESPONSE)
5231	    && (msg->msg.cmd == IPMI_GET_EVENT_RECEIVER_CMD)
5232	    && (msg->msg.data[0] == IPMI_CC_NO_ERROR)) {
5233		/* A get event receiver command, save it. */
5234		intf->event_receiver = msg->msg.data[1];
5235		intf->event_receiver_lun = msg->msg.data[2] & 0x3;
5236	}
5237}
5238
5239static void device_id_fetcher(struct ipmi_smi *intf, struct ipmi_recv_msg *msg)
5240{
5241	if ((msg->addr.addr_type == IPMI_SYSTEM_INTERFACE_ADDR_TYPE)
5242	    && (msg->msg.netfn == IPMI_NETFN_APP_RESPONSE)
5243	    && (msg->msg.cmd == IPMI_GET_DEVICE_ID_CMD)
5244	    && (msg->msg.data[0] == IPMI_CC_NO_ERROR)) {
5245		/*
5246		 * A get device id command, save if we are an event
5247		 * receiver or generator.
5248		 */
5249		intf->local_sel_device = (msg->msg.data[6] >> 2) & 1;
5250		intf->local_event_generator = (msg->msg.data[6] >> 5) & 1;
5251	}
5252}
5253
5254static void send_panic_events(struct ipmi_smi *intf, char *str)
5255{
5256	struct kernel_ipmi_msg msg;
5257	unsigned char data[16];
5258	struct ipmi_system_interface_addr *si;
5259	struct ipmi_addr addr;
5260	char *p = str;
5261	struct ipmi_ipmb_addr *ipmb;
5262	int j;
5263
5264	if (ipmi_send_panic_event == IPMI_SEND_PANIC_EVENT_NONE)
5265		return;
5266
5267	si = (struct ipmi_system_interface_addr *) &addr;
5268	si->addr_type = IPMI_SYSTEM_INTERFACE_ADDR_TYPE;
5269	si->channel = IPMI_BMC_CHANNEL;
5270	si->lun = 0;
5271
5272	/* Fill in an event telling that we have failed. */
5273	msg.netfn = 0x04; /* Sensor or Event. */
5274	msg.cmd = 2; /* Platform event command. */
5275	msg.data = data;
5276	msg.data_len = 8;
5277	data[0] = 0x41; /* Kernel generator ID, IPMI table 5-4 */
5278	data[1] = 0x03; /* This is for IPMI 1.0. */
5279	data[2] = 0x20; /* OS Critical Stop, IPMI table 36-3 */
5280	data[4] = 0x6f; /* Sensor specific, IPMI table 36-1 */
5281	data[5] = 0xa1; /* Runtime stop OEM bytes 2 & 3. */
5282
5283	/*
5284	 * Put a few breadcrumbs in.  Hopefully later we can add more things
5285	 * to make the panic events more useful.
5286	 */
5287	if (str) {
5288		data[3] = str[0];
5289		data[6] = str[1];
5290		data[7] = str[2];
5291	}
5292
5293	/* Send the event announcing the panic. */
5294	ipmi_panic_request_and_wait(intf, &addr, &msg);
5295
5296	/*
5297	 * On every interface, dump a bunch of OEM event holding the
5298	 * string.
5299	 */
5300	if (ipmi_send_panic_event != IPMI_SEND_PANIC_EVENT_STRING || !str)
5301		return;
5302
5303	/*
5304	 * intf_num is used as an marker to tell if the
5305	 * interface is valid.  Thus we need a read barrier to
5306	 * make sure data fetched before checking intf_num
5307	 * won't be used.
5308	 */
5309	smp_rmb();
5310
5311	/*
5312	 * First job here is to figure out where to send the
5313	 * OEM events.  There's no way in IPMI to send OEM
5314	 * events using an event send command, so we have to
5315	 * find the SEL to put them in and stick them in
5316	 * there.
5317	 */
5318
5319	/* Get capabilities from the get device id. */
5320	intf->local_sel_device = 0;
5321	intf->local_event_generator = 0;
5322	intf->event_receiver = 0;
5323
5324	/* Request the device info from the local MC. */
5325	msg.netfn = IPMI_NETFN_APP_REQUEST;
5326	msg.cmd = IPMI_GET_DEVICE_ID_CMD;
5327	msg.data = NULL;
5328	msg.data_len = 0;
5329	intf->null_user_handler = device_id_fetcher;
5330	ipmi_panic_request_and_wait(intf, &addr, &msg);
5331
5332	if (intf->local_event_generator) {
5333		/* Request the event receiver from the local MC. */
5334		msg.netfn = IPMI_NETFN_SENSOR_EVENT_REQUEST;
5335		msg.cmd = IPMI_GET_EVENT_RECEIVER_CMD;
5336		msg.data = NULL;
5337		msg.data_len = 0;
5338		intf->null_user_handler = event_receiver_fetcher;
5339		ipmi_panic_request_and_wait(intf, &addr, &msg);
5340	}
5341	intf->null_user_handler = NULL;
5342
5343	/*
5344	 * Validate the event receiver.  The low bit must not
5345	 * be 1 (it must be a valid IPMB address), it cannot
5346	 * be zero, and it must not be my address.
5347	 */
5348	if (((intf->event_receiver & 1) == 0)
5349	    && (intf->event_receiver != 0)
5350	    && (intf->event_receiver != intf->addrinfo[0].address)) {
5351		/*
5352		 * The event receiver is valid, send an IPMB
5353		 * message.
5354		 */
5355		ipmb = (struct ipmi_ipmb_addr *) &addr;
5356		ipmb->addr_type = IPMI_IPMB_ADDR_TYPE;
5357		ipmb->channel = 0; /* FIXME - is this right? */
5358		ipmb->lun = intf->event_receiver_lun;
5359		ipmb->slave_addr = intf->event_receiver;
5360	} else if (intf->local_sel_device) {
5361		/*
5362		 * The event receiver was not valid (or was
5363		 * me), but I am an SEL device, just dump it
5364		 * in my SEL.
5365		 */
5366		si = (struct ipmi_system_interface_addr *) &addr;
5367		si->addr_type = IPMI_SYSTEM_INTERFACE_ADDR_TYPE;
5368		si->channel = IPMI_BMC_CHANNEL;
5369		si->lun = 0;
5370	} else
5371		return; /* No where to send the event. */
5372
5373	msg.netfn = IPMI_NETFN_STORAGE_REQUEST; /* Storage. */
5374	msg.cmd = IPMI_ADD_SEL_ENTRY_CMD;
5375	msg.data = data;
5376	msg.data_len = 16;
5377
5378	j = 0;
5379	while (*p) {
5380		int size = strnlen(p, 11);
5381
5382		data[0] = 0;
5383		data[1] = 0;
5384		data[2] = 0xf0; /* OEM event without timestamp. */
5385		data[3] = intf->addrinfo[0].address;
5386		data[4] = j++; /* sequence # */
5387
5388		memcpy_and_pad(data+5, 11, p, size, '\0');
5389		p += size;
5390
5391		ipmi_panic_request_and_wait(intf, &addr, &msg);
5392	}
5393}
5394
5395static int has_panicked;
5396
5397static int panic_event(struct notifier_block *this,
5398		       unsigned long         event,
5399		       void                  *ptr)
5400{
5401	struct ipmi_smi *intf;
5402	struct ipmi_user *user;
5403
5404	if (has_panicked)
5405		return NOTIFY_DONE;
5406	has_panicked = 1;
5407
5408	/* For every registered interface, set it to run to completion. */
5409	list_for_each_entry_rcu(intf, &ipmi_interfaces, link) {
5410		if (!intf->handlers || intf->intf_num == -1)
5411			/* Interface is not ready. */
5412			continue;
5413
5414		if (!intf->handlers->poll)
5415			continue;
5416
5417		/*
5418		 * If we were interrupted while locking xmit_msgs_lock or
5419		 * waiting_rcv_msgs_lock, the corresponding list may be
5420		 * corrupted.  In this case, drop items on the list for
5421		 * the safety.
5422		 */
5423		if (!spin_trylock(&intf->xmit_msgs_lock)) {
5424			INIT_LIST_HEAD(&intf->xmit_msgs);
5425			INIT_LIST_HEAD(&intf->hp_xmit_msgs);
5426		} else
5427			spin_unlock(&intf->xmit_msgs_lock);
5428
5429		if (!spin_trylock(&intf->waiting_rcv_msgs_lock))
5430			INIT_LIST_HEAD(&intf->waiting_rcv_msgs);
5431		else
5432			spin_unlock(&intf->waiting_rcv_msgs_lock);
5433
5434		intf->run_to_completion = 1;
5435		if (intf->handlers->set_run_to_completion)
5436			intf->handlers->set_run_to_completion(intf->send_info,
5437							      1);
5438
5439		list_for_each_entry_rcu(user, &intf->users, link) {
5440			if (user->handler->ipmi_panic_handler)
5441				user->handler->ipmi_panic_handler(
5442					user->handler_data);
5443		}
5444
5445		send_panic_events(intf, ptr);
5446	}
5447
5448	return NOTIFY_DONE;
5449}
5450
5451/* Must be called with ipmi_interfaces_mutex held. */
5452static int ipmi_register_driver(void)
5453{
5454	int rv;
5455
5456	if (drvregistered)
5457		return 0;
5458
5459	rv = driver_register(&ipmidriver.driver);
5460	if (rv)
5461		pr_err("Could not register IPMI driver\n");
5462	else
5463		drvregistered = true;
5464	return rv;
5465}
5466
5467static struct notifier_block panic_block = {
5468	.notifier_call	= panic_event,
5469	.next		= NULL,
5470	.priority	= 200	/* priority: INT_MAX >= x >= 0 */
5471};
5472
5473static int ipmi_init_msghandler(void)
5474{
5475	int rv;
5476
5477	mutex_lock(&ipmi_interfaces_mutex);
5478	rv = ipmi_register_driver();
5479	if (rv)
5480		goto out;
5481	if (initialized)
5482		goto out;
5483
5484	rv = init_srcu_struct(&ipmi_interfaces_srcu);
5485	if (rv)
5486		goto out;
5487
5488	remove_work_wq = create_singlethread_workqueue("ipmi-msghandler-remove-wq");
5489	if (!remove_work_wq) {
5490		pr_err("unable to create ipmi-msghandler-remove-wq workqueue");
5491		rv = -ENOMEM;
5492		goto out_wq;
5493	}
5494
5495	timer_setup(&ipmi_timer, ipmi_timeout, 0);
5496	mod_timer(&ipmi_timer, jiffies + IPMI_TIMEOUT_JIFFIES);
5497
5498	atomic_notifier_chain_register(&panic_notifier_list, &panic_block);
5499
5500	initialized = true;
5501
5502out_wq:
5503	if (rv)
5504		cleanup_srcu_struct(&ipmi_interfaces_srcu);
5505out:
5506	mutex_unlock(&ipmi_interfaces_mutex);
5507	return rv;
5508}
5509
5510static int __init ipmi_init_msghandler_mod(void)
5511{
5512	int rv;
5513
5514	pr_info("version " IPMI_DRIVER_VERSION "\n");
5515
5516	mutex_lock(&ipmi_interfaces_mutex);
5517	rv = ipmi_register_driver();
5518	mutex_unlock(&ipmi_interfaces_mutex);
5519
5520	return rv;
5521}
5522
5523static void __exit cleanup_ipmi(void)
5524{
5525	int count;
5526
5527	if (initialized) {
5528		destroy_workqueue(remove_work_wq);
5529
5530		atomic_notifier_chain_unregister(&panic_notifier_list,
5531						 &panic_block);
5532
5533		/*
5534		 * This can't be called if any interfaces exist, so no worry
5535		 * about shutting down the interfaces.
5536		 */
5537
5538		/*
5539		 * Tell the timer to stop, then wait for it to stop.  This
5540		 * avoids problems with race conditions removing the timer
5541		 * here.
5542		 */
5543		atomic_set(&stop_operation, 1);
5544		del_timer_sync(&ipmi_timer);
5545
5546		initialized = false;
5547
5548		/* Check for buffer leaks. */
5549		count = atomic_read(&smi_msg_inuse_count);
5550		if (count != 0)
5551			pr_warn("SMI message count %d at exit\n", count);
5552		count = atomic_read(&recv_msg_inuse_count);
5553		if (count != 0)
5554			pr_warn("recv message count %d at exit\n", count);
5555
5556		cleanup_srcu_struct(&ipmi_interfaces_srcu);
5557	}
5558	if (drvregistered)
5559		driver_unregister(&ipmidriver.driver);
5560}
5561module_exit(cleanup_ipmi);
5562
5563module_init(ipmi_init_msghandler_mod);
5564MODULE_LICENSE("GPL");
5565MODULE_AUTHOR("Corey Minyard <minyard@mvista.com>");
5566MODULE_DESCRIPTION("Incoming and outgoing message routing for an IPMI interface.");
5567MODULE_VERSION(IPMI_DRIVER_VERSION);
5568MODULE_SOFTDEP("post: ipmi_devintf");
5569