1/*-
2 * Copyright (c) 2006 IronPort Systems Inc. <ambrisko@ironport.com>
3 * All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
7 * are met:
8 * 1. Redistributions of source code must retain the above copyright
9 *    notice, this list of conditions and the following disclaimer.
10 * 2. Redistributions in binary form must reproduce the above copyright
11 *    notice, this list of conditions and the following disclaimer in the
12 *    documentation and/or other materials provided with the distribution.
13 *
14 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
15 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
16 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
17 * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
18 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
19 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
20 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
21 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
22 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
23 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
24 * SUCH DAMAGE.
25 */
26
27#include <sys/cdefs.h>
28__FBSDID("$FreeBSD: releng/10.3/sys/dev/ipmi/ipmi.c 287435 2015-09-03 16:43:35Z jhb $");
29
30#include <sys/param.h>
31#include <sys/systm.h>
32#include <sys/bus.h>
33#include <sys/condvar.h>
34#include <sys/conf.h>
35#include <sys/kernel.h>
36#include <sys/malloc.h>
37#include <sys/module.h>
38#include <sys/poll.h>
39#include <sys/rman.h>
40#include <sys/selinfo.h>
41#include <sys/sysctl.h>
42#include <sys/watchdog.h>
43
44#ifdef LOCAL_MODULE
45#include <ipmi.h>
46#include <ipmivars.h>
47#else
48#include <sys/ipmi.h>
49#include <dev/ipmi/ipmivars.h>
50#endif
51
52/*
53 * Driver request structures are allocated on the stack via alloca() to
54 * avoid calling malloc(), especially for the watchdog handler.
55 * To avoid too much stack growth, a previously allocated structure can
56 * be reused via IPMI_INIT_DRIVER_REQUEST(), but the caller should ensure
57 * that there is adequate reply/request space in the original allocation.
58 */
59#define	IPMI_INIT_DRIVER_REQUEST(req, addr, cmd, reqlen, replylen)	\
60	bzero((req), sizeof(struct ipmi_request));			\
61	ipmi_init_request((req), NULL, 0, (addr), (cmd), (reqlen), (replylen))
62
63#define	IPMI_ALLOC_DRIVER_REQUEST(req, addr, cmd, reqlen, replylen)	\
64	(req) = __builtin_alloca(sizeof(struct ipmi_request) +		\
65	    (reqlen) + (replylen));					\
66	IPMI_INIT_DRIVER_REQUEST((req), (addr), (cmd), (reqlen),	\
67	    (replylen))
68
69#ifdef IPMB
70static int ipmi_ipmb_checksum(u_char, int);
71static int ipmi_ipmb_send_message(device_t, u_char, u_char, u_char,
72     u_char, u_char, int)
73#endif
74
75static d_ioctl_t ipmi_ioctl;
76static d_poll_t ipmi_poll;
77static d_open_t ipmi_open;
78static void ipmi_dtor(void *arg);
79
80int ipmi_attached = 0;
81
82static int on = 1;
83static SYSCTL_NODE(_hw, OID_AUTO, ipmi, CTLFLAG_RD, 0,
84    "IPMI driver parameters");
85SYSCTL_INT(_hw_ipmi, OID_AUTO, on, CTLFLAG_RW,
86	&on, 0, "");
87
88static struct cdevsw ipmi_cdevsw = {
89	.d_version =    D_VERSION,
90	.d_open =	ipmi_open,
91	.d_ioctl =	ipmi_ioctl,
92	.d_poll =	ipmi_poll,
93	.d_name =	"ipmi",
94};
95
96static MALLOC_DEFINE(M_IPMI, "ipmi", "ipmi");
97
98static int
99ipmi_open(struct cdev *cdev, int flags, int fmt, struct thread *td)
100{
101	struct ipmi_device *dev;
102	struct ipmi_softc *sc;
103	int error;
104
105	if (!on)
106		return (ENOENT);
107
108	/* Initialize the per file descriptor data. */
109	dev = malloc(sizeof(struct ipmi_device), M_IPMI, M_WAITOK | M_ZERO);
110	error = devfs_set_cdevpriv(dev, ipmi_dtor);
111	if (error) {
112		free(dev, M_IPMI);
113		return (error);
114	}
115
116	sc = cdev->si_drv1;
117	TAILQ_INIT(&dev->ipmi_completed_requests);
118	dev->ipmi_address = IPMI_BMC_SLAVE_ADDR;
119	dev->ipmi_lun = IPMI_BMC_SMS_LUN;
120	dev->ipmi_softc = sc;
121	IPMI_LOCK(sc);
122	sc->ipmi_opened++;
123	IPMI_UNLOCK(sc);
124
125	return (0);
126}
127
128static int
129ipmi_poll(struct cdev *cdev, int poll_events, struct thread *td)
130{
131	struct ipmi_device *dev;
132	struct ipmi_softc *sc;
133	int revents = 0;
134
135	if (devfs_get_cdevpriv((void **)&dev))
136		return (0);
137
138	sc = cdev->si_drv1;
139	IPMI_LOCK(sc);
140	if (poll_events & (POLLIN | POLLRDNORM)) {
141		if (!TAILQ_EMPTY(&dev->ipmi_completed_requests))
142		    revents |= poll_events & (POLLIN | POLLRDNORM);
143		if (dev->ipmi_requests == 0)
144		    revents |= POLLERR;
145	}
146
147	if (revents == 0) {
148		if (poll_events & (POLLIN | POLLRDNORM))
149			selrecord(td, &dev->ipmi_select);
150	}
151	IPMI_UNLOCK(sc);
152
153	return (revents);
154}
155
156static void
157ipmi_purge_completed_requests(struct ipmi_device *dev)
158{
159	struct ipmi_request *req;
160
161	while (!TAILQ_EMPTY(&dev->ipmi_completed_requests)) {
162		req = TAILQ_FIRST(&dev->ipmi_completed_requests);
163		TAILQ_REMOVE(&dev->ipmi_completed_requests, req, ir_link);
164		dev->ipmi_requests--;
165		ipmi_free_request(req);
166	}
167}
168
169static void
170ipmi_dtor(void *arg)
171{
172	struct ipmi_request *req, *nreq;
173	struct ipmi_device *dev;
174	struct ipmi_softc *sc;
175
176	dev = arg;
177	sc = dev->ipmi_softc;
178
179	IPMI_LOCK(sc);
180	if (dev->ipmi_requests) {
181		/* Throw away any pending requests for this device. */
182		TAILQ_FOREACH_SAFE(req, &sc->ipmi_pending_requests, ir_link,
183		    nreq) {
184			if (req->ir_owner == dev) {
185				TAILQ_REMOVE(&sc->ipmi_pending_requests, req,
186				    ir_link);
187				dev->ipmi_requests--;
188				ipmi_free_request(req);
189			}
190		}
191
192		/* Throw away any pending completed requests for this device. */
193		ipmi_purge_completed_requests(dev);
194
195		/*
196		 * If we still have outstanding requests, they must be stuck
197		 * in an interface driver, so wait for those to drain.
198		 */
199		dev->ipmi_closing = 1;
200		while (dev->ipmi_requests > 0) {
201			msleep(&dev->ipmi_requests, &sc->ipmi_requests_lock,
202			    PWAIT, "ipmidrain", 0);
203			ipmi_purge_completed_requests(dev);
204		}
205	}
206	sc->ipmi_opened--;
207	IPMI_UNLOCK(sc);
208
209	/* Cleanup. */
210	free(dev, M_IPMI);
211}
212
213#ifdef IPMB
214static int
215ipmi_ipmb_checksum(u_char *data, int len)
216{
217	u_char sum = 0;
218
219	for (; len; len--) {
220		sum += *data++;
221	}
222	return (-sum);
223}
224
225/* XXX: Needs work */
226static int
227ipmi_ipmb_send_message(device_t dev, u_char channel, u_char netfn,
228    u_char command, u_char seq, u_char *data, int data_len)
229{
230	struct ipmi_softc *sc = device_get_softc(dev);
231	struct ipmi_request *req;
232	u_char slave_addr = 0x52;
233	int error;
234
235	IPMI_ALLOC_DRIVER_REQUEST(req, IPMI_ADDR(IPMI_APP_REQUEST, 0),
236	    IPMI_SEND_MSG, data_len + 8, 0);
237	req->ir_request[0] = channel;
238	req->ir_request[1] = slave_addr;
239	req->ir_request[2] = IPMI_ADDR(netfn, 0);
240	req->ir_request[3] = ipmi_ipmb_checksum(&req->ir_request[1], 2);
241	req->ir_request[4] = sc->ipmi_address;
242	req->ir_request[5] = IPMI_ADDR(seq, sc->ipmi_lun);
243	req->ir_request[6] = command;
244
245	bcopy(data, &req->ir_request[7], data_len);
246	temp[data_len + 7] = ipmi_ipmb_checksum(&req->ir_request[4],
247	    data_len + 3);
248
249	ipmi_submit_driver_request(sc, req);
250	error = req->ir_error;
251
252	return (error);
253}
254
255static int
256ipmi_handle_attn(struct ipmi_softc *sc)
257{
258	struct ipmi_request *req;
259	int error;
260
261	device_printf(sc->ipmi_dev, "BMC has a message\n");
262	IPMI_ALLOC_DRIVER_REQUEST(req, IPMI_ADDR(IPMI_APP_REQUEST, 0),
263	    IPMI_GET_MSG_FLAGS, 0, 1);
264
265	ipmi_submit_driver_request(sc, req);
266
267	if (req->ir_error == 0 && req->ir_compcode == 0) {
268		if (req->ir_reply[0] & IPMI_MSG_BUFFER_FULL) {
269			device_printf(sc->ipmi_dev, "message buffer full");
270		}
271		if (req->ir_reply[0] & IPMI_WDT_PRE_TIMEOUT) {
272			device_printf(sc->ipmi_dev,
273			    "watchdog about to go off");
274		}
275		if (req->ir_reply[0] & IPMI_MSG_AVAILABLE) {
276			IPMI_ALLOC_DRIVER_REQUEST(req,
277			    IPMI_ADDR(IPMI_APP_REQUEST, 0), IPMI_GET_MSG, 0,
278			    16);
279
280			device_printf(sc->ipmi_dev, "throw out message ");
281			dump_buf(temp, 16);
282		}
283	}
284	error = req->ir_error;
285
286	return (error);
287}
288#endif
289
290#ifdef IPMICTL_SEND_COMMAND_32
291#define	PTRIN(p)	((void *)(uintptr_t)(p))
292#define	PTROUT(p)	((uintptr_t)(p))
293#endif
294
295static int
296ipmi_ioctl(struct cdev *cdev, u_long cmd, caddr_t data,
297    int flags, struct thread *td)
298{
299	struct ipmi_softc *sc;
300	struct ipmi_device *dev;
301	struct ipmi_request *kreq;
302	struct ipmi_req *req = (struct ipmi_req *)data;
303	struct ipmi_recv *recv = (struct ipmi_recv *)data;
304	struct ipmi_addr addr;
305#ifdef IPMICTL_SEND_COMMAND_32
306	struct ipmi_req32 *req32 = (struct ipmi_req32 *)data;
307	struct ipmi_recv32 *recv32 = (struct ipmi_recv32 *)data;
308	union {
309		struct ipmi_req req;
310		struct ipmi_recv recv;
311	} thunk32;
312#endif
313	int error, len;
314
315	error = devfs_get_cdevpriv((void **)&dev);
316	if (error)
317		return (error);
318
319	sc = cdev->si_drv1;
320
321#ifdef IPMICTL_SEND_COMMAND_32
322	/* Convert 32-bit structures to native. */
323	switch (cmd) {
324	case IPMICTL_SEND_COMMAND_32:
325		req = &thunk32.req;
326		req->addr = PTRIN(req32->addr);
327		req->addr_len = req32->addr_len;
328		req->msgid = req32->msgid;
329		req->msg.netfn = req32->msg.netfn;
330		req->msg.cmd = req32->msg.cmd;
331		req->msg.data_len = req32->msg.data_len;
332		req->msg.data = PTRIN(req32->msg.data);
333		break;
334	case IPMICTL_RECEIVE_MSG_TRUNC_32:
335	case IPMICTL_RECEIVE_MSG_32:
336		recv = &thunk32.recv;
337		recv->addr = PTRIN(recv32->addr);
338		recv->addr_len = recv32->addr_len;
339		recv->msg.data_len = recv32->msg.data_len;
340		recv->msg.data = PTRIN(recv32->msg.data);
341		break;
342	}
343#endif
344
345	switch (cmd) {
346#ifdef IPMICTL_SEND_COMMAND_32
347	case IPMICTL_SEND_COMMAND_32:
348#endif
349	case IPMICTL_SEND_COMMAND:
350		/*
351		 * XXX: Need to add proper handling of this.
352		 */
353		error = copyin(req->addr, &addr, sizeof(addr));
354		if (error)
355			return (error);
356
357		IPMI_LOCK(sc);
358		/* clear out old stuff in queue of stuff done */
359		/* XXX: This seems odd. */
360		while ((kreq = TAILQ_FIRST(&dev->ipmi_completed_requests))) {
361			TAILQ_REMOVE(&dev->ipmi_completed_requests, kreq,
362			    ir_link);
363			dev->ipmi_requests--;
364			ipmi_free_request(kreq);
365		}
366		IPMI_UNLOCK(sc);
367
368		kreq = ipmi_alloc_request(dev, req->msgid,
369		    IPMI_ADDR(req->msg.netfn, 0), req->msg.cmd,
370		    req->msg.data_len, IPMI_MAX_RX);
371		error = copyin(req->msg.data, kreq->ir_request,
372		    req->msg.data_len);
373		if (error) {
374			ipmi_free_request(kreq);
375			return (error);
376		}
377		IPMI_LOCK(sc);
378		dev->ipmi_requests++;
379		error = sc->ipmi_enqueue_request(sc, kreq);
380		IPMI_UNLOCK(sc);
381		if (error)
382			return (error);
383		break;
384#ifdef IPMICTL_SEND_COMMAND_32
385	case IPMICTL_RECEIVE_MSG_TRUNC_32:
386	case IPMICTL_RECEIVE_MSG_32:
387#endif
388	case IPMICTL_RECEIVE_MSG_TRUNC:
389	case IPMICTL_RECEIVE_MSG:
390		error = copyin(recv->addr, &addr, sizeof(addr));
391		if (error)
392			return (error);
393
394		IPMI_LOCK(sc);
395		kreq = TAILQ_FIRST(&dev->ipmi_completed_requests);
396		if (kreq == NULL) {
397			IPMI_UNLOCK(sc);
398			return (EAGAIN);
399		}
400		addr.channel = IPMI_BMC_CHANNEL;
401		/* XXX */
402		recv->recv_type = IPMI_RESPONSE_RECV_TYPE;
403		recv->msgid = kreq->ir_msgid;
404		recv->msg.netfn = IPMI_REPLY_ADDR(kreq->ir_addr) >> 2;
405		recv->msg.cmd = kreq->ir_command;
406		error = kreq->ir_error;
407		if (error) {
408			TAILQ_REMOVE(&dev->ipmi_completed_requests, kreq,
409			    ir_link);
410			dev->ipmi_requests--;
411			IPMI_UNLOCK(sc);
412			ipmi_free_request(kreq);
413			return (error);
414		}
415		len = kreq->ir_replylen + 1;
416		if (recv->msg.data_len < len &&
417		    (cmd == IPMICTL_RECEIVE_MSG
418#ifdef IPMICTL_RECEIVE_MSG_32
419		     || cmd == IPMICTL_RECEIVE_MSG_32
420#endif
421		    )) {
422			IPMI_UNLOCK(sc);
423			return (EMSGSIZE);
424		}
425		TAILQ_REMOVE(&dev->ipmi_completed_requests, kreq, ir_link);
426		dev->ipmi_requests--;
427		IPMI_UNLOCK(sc);
428		len = min(recv->msg.data_len, len);
429		recv->msg.data_len = len;
430		error = copyout(&addr, recv->addr,sizeof(addr));
431		if (error == 0)
432			error = copyout(&kreq->ir_compcode, recv->msg.data, 1);
433		if (error == 0)
434			error = copyout(kreq->ir_reply, recv->msg.data + 1,
435			    len - 1);
436		ipmi_free_request(kreq);
437		if (error)
438			return (error);
439		break;
440	case IPMICTL_SET_MY_ADDRESS_CMD:
441		IPMI_LOCK(sc);
442		dev->ipmi_address = *(int*)data;
443		IPMI_UNLOCK(sc);
444		break;
445	case IPMICTL_GET_MY_ADDRESS_CMD:
446		IPMI_LOCK(sc);
447		*(int*)data = dev->ipmi_address;
448		IPMI_UNLOCK(sc);
449		break;
450	case IPMICTL_SET_MY_LUN_CMD:
451		IPMI_LOCK(sc);
452		dev->ipmi_lun = *(int*)data & 0x3;
453		IPMI_UNLOCK(sc);
454		break;
455	case IPMICTL_GET_MY_LUN_CMD:
456		IPMI_LOCK(sc);
457		*(int*)data = dev->ipmi_lun;
458		IPMI_UNLOCK(sc);
459		break;
460	case IPMICTL_SET_GETS_EVENTS_CMD:
461		/*
462		device_printf(sc->ipmi_dev,
463		    "IPMICTL_SET_GETS_EVENTS_CMD NA\n");
464		*/
465		break;
466	case IPMICTL_REGISTER_FOR_CMD:
467	case IPMICTL_UNREGISTER_FOR_CMD:
468		return (EOPNOTSUPP);
469	default:
470		device_printf(sc->ipmi_dev, "Unknown IOCTL %lX\n", cmd);
471		return (ENOIOCTL);
472	}
473
474#ifdef IPMICTL_SEND_COMMAND_32
475	/* Update changed fields in 32-bit structures. */
476	switch (cmd) {
477	case IPMICTL_RECEIVE_MSG_TRUNC_32:
478	case IPMICTL_RECEIVE_MSG_32:
479		recv32->recv_type = recv->recv_type;
480		recv32->msgid = recv->msgid;
481		recv32->msg.netfn = recv->msg.netfn;
482		recv32->msg.cmd = recv->msg.cmd;
483		recv32->msg.data_len = recv->msg.data_len;
484		break;
485	}
486#endif
487	return (0);
488}
489
490/*
491 * Request management.
492 */
493
494static __inline void
495ipmi_init_request(struct ipmi_request *req, struct ipmi_device *dev, long msgid,
496    uint8_t addr, uint8_t command, size_t requestlen, size_t replylen)
497{
498
499	req->ir_owner = dev;
500	req->ir_msgid = msgid;
501	req->ir_addr = addr;
502	req->ir_command = command;
503	if (requestlen) {
504		req->ir_request = (char *)&req[1];
505		req->ir_requestlen = requestlen;
506	}
507	if (replylen) {
508		req->ir_reply = (char *)&req[1] + requestlen;
509		req->ir_replybuflen = replylen;
510	}
511}
512
513/* Allocate a new request with request and reply buffers. */
514struct ipmi_request *
515ipmi_alloc_request(struct ipmi_device *dev, long msgid, uint8_t addr,
516    uint8_t command, size_t requestlen, size_t replylen)
517{
518	struct ipmi_request *req;
519
520	req = malloc(sizeof(struct ipmi_request) + requestlen + replylen,
521	    M_IPMI, M_WAITOK | M_ZERO);
522	ipmi_init_request(req, dev, msgid, addr, command, requestlen, replylen);
523	return (req);
524}
525
526/* Free a request no longer in use. */
527void
528ipmi_free_request(struct ipmi_request *req)
529{
530
531	free(req, M_IPMI);
532}
533
534/* Store a processed request on the appropriate completion queue. */
535void
536ipmi_complete_request(struct ipmi_softc *sc, struct ipmi_request *req)
537{
538	struct ipmi_device *dev;
539
540	IPMI_LOCK_ASSERT(sc);
541
542	/*
543	 * Anonymous requests (from inside the driver) always have a
544	 * waiter that we awaken.
545	 */
546	if (req->ir_owner == NULL)
547		wakeup(req);
548	else {
549		dev = req->ir_owner;
550		TAILQ_INSERT_TAIL(&dev->ipmi_completed_requests, req, ir_link);
551		selwakeup(&dev->ipmi_select);
552		if (dev->ipmi_closing)
553			wakeup(&dev->ipmi_requests);
554	}
555}
556
557/* Perform an internal driver request. */
558int
559ipmi_submit_driver_request(struct ipmi_softc *sc, struct ipmi_request *req,
560    int timo)
561{
562
563	return (sc->ipmi_driver_request(sc, req, timo));
564}
565
566/*
567 * Helper routine for polled system interfaces that use
568 * ipmi_polled_enqueue_request() to queue requests.  This request
569 * waits until there is a pending request and then returns the first
570 * request.  If the driver is shutting down, it returns NULL.
571 */
572struct ipmi_request *
573ipmi_dequeue_request(struct ipmi_softc *sc)
574{
575	struct ipmi_request *req;
576
577	IPMI_LOCK_ASSERT(sc);
578
579	while (!sc->ipmi_detaching && TAILQ_EMPTY(&sc->ipmi_pending_requests))
580		cv_wait(&sc->ipmi_request_added, &sc->ipmi_requests_lock);
581	if (sc->ipmi_detaching)
582		return (NULL);
583
584	req = TAILQ_FIRST(&sc->ipmi_pending_requests);
585	TAILQ_REMOVE(&sc->ipmi_pending_requests, req, ir_link);
586	return (req);
587}
588
589/* Default implementation of ipmi_enqueue_request() for polled interfaces. */
590int
591ipmi_polled_enqueue_request(struct ipmi_softc *sc, struct ipmi_request *req)
592{
593
594	IPMI_LOCK_ASSERT(sc);
595
596	TAILQ_INSERT_TAIL(&sc->ipmi_pending_requests, req, ir_link);
597	cv_signal(&sc->ipmi_request_added);
598	return (0);
599}
600
601/*
602 * Watchdog event handler.
603 */
604
605static int
606ipmi_set_watchdog(struct ipmi_softc *sc, unsigned int sec)
607{
608	struct ipmi_request *req;
609	int error;
610
611	if (sec > 0xffff / 10)
612		return (EINVAL);
613
614	IPMI_ALLOC_DRIVER_REQUEST(req, IPMI_ADDR(IPMI_APP_REQUEST, 0),
615	    IPMI_SET_WDOG, 6, 0);
616
617	if (sec) {
618		req->ir_request[0] = IPMI_SET_WD_TIMER_DONT_STOP
619		    | IPMI_SET_WD_TIMER_SMS_OS;
620		req->ir_request[1] = IPMI_SET_WD_ACTION_RESET;
621		req->ir_request[2] = 0;
622		req->ir_request[3] = 0;	/* Timer use */
623		req->ir_request[4] = (sec * 10) & 0xff;
624		req->ir_request[5] = (sec * 10) >> 8;
625	} else {
626		req->ir_request[0] = IPMI_SET_WD_TIMER_SMS_OS;
627		req->ir_request[1] = 0;
628		req->ir_request[2] = 0;
629		req->ir_request[3] = 0;	/* Timer use */
630		req->ir_request[4] = 0;
631		req->ir_request[5] = 0;
632	}
633
634	error = ipmi_submit_driver_request(sc, req, 0);
635	if (error)
636		device_printf(sc->ipmi_dev, "Failed to set watchdog\n");
637	else if (sec) {
638		IPMI_INIT_DRIVER_REQUEST(req, IPMI_ADDR(IPMI_APP_REQUEST, 0),
639		    IPMI_RESET_WDOG, 0, 0);
640
641		error = ipmi_submit_driver_request(sc, req, 0);
642		if (error)
643			device_printf(sc->ipmi_dev,
644			    "Failed to reset watchdog\n");
645	}
646
647	return (error);
648	/*
649	dump_watchdog(sc);
650	*/
651}
652
653static void
654ipmi_wd_event(void *arg, unsigned int cmd, int *error)
655{
656	struct ipmi_softc *sc = arg;
657	unsigned int timeout;
658	int e;
659
660	if (dumping)
661		return;
662
663	cmd &= WD_INTERVAL;
664	if (cmd > 0 && cmd <= 63) {
665		timeout = ((uint64_t)1 << cmd) / 1000000000;
666		if (timeout == 0)
667			timeout = 1;
668		e = ipmi_set_watchdog(sc, timeout);
669		if (e == 0) {
670			*error = 0;
671			sc->ipmi_watchdog_active = 1;
672		} else
673			(void)ipmi_set_watchdog(sc, 0);
674	} else if (atomic_readandclear_int(&sc->ipmi_watchdog_active) != 0) {
675		e = ipmi_set_watchdog(sc, 0);
676		if (e != 0 && cmd == 0)
677			*error = EOPNOTSUPP;
678	}
679}
680
681static void
682ipmi_startup(void *arg)
683{
684	struct ipmi_softc *sc = arg;
685	struct ipmi_request *req;
686	device_t dev;
687	int error, i;
688
689	config_intrhook_disestablish(&sc->ipmi_ich);
690	dev = sc->ipmi_dev;
691
692	/* Initialize interface-independent state. */
693	mtx_init(&sc->ipmi_requests_lock, "ipmi requests", NULL, MTX_DEF);
694	mtx_init(&sc->ipmi_io_lock, "ipmi io", NULL, MTX_DEF);
695	cv_init(&sc->ipmi_request_added, "ipmireq");
696	TAILQ_INIT(&sc->ipmi_pending_requests);
697
698	/* Initialize interface-dependent state. */
699	error = sc->ipmi_startup(sc);
700	if (error) {
701		device_printf(dev, "Failed to initialize interface: %d\n",
702		    error);
703		return;
704	}
705
706	/* Send a GET_DEVICE_ID request. */
707	IPMI_ALLOC_DRIVER_REQUEST(req, IPMI_ADDR(IPMI_APP_REQUEST, 0),
708	    IPMI_GET_DEVICE_ID, 0, 15);
709
710	error = ipmi_submit_driver_request(sc, req, MAX_TIMEOUT);
711	if (error == EWOULDBLOCK) {
712		device_printf(dev, "Timed out waiting for GET_DEVICE_ID\n");
713		return;
714	} else if (error) {
715		device_printf(dev, "Failed GET_DEVICE_ID: %d\n", error);
716		return;
717	} else if (req->ir_compcode != 0) {
718		device_printf(dev,
719		    "Bad completion code for GET_DEVICE_ID: %d\n",
720		    req->ir_compcode);
721		return;
722	} else if (req->ir_replylen < 5) {
723		device_printf(dev, "Short reply for GET_DEVICE_ID: %d\n",
724		    req->ir_replylen);
725		return;
726	}
727
728	device_printf(dev, "IPMI device rev. %d, firmware rev. %d.%d%d, "
729	    "version %d.%d\n",
730	     req->ir_reply[1] & 0x0f,
731	     req->ir_reply[2] & 0x7f, req->ir_reply[3] >> 4, req->ir_reply[3] & 0x0f,
732	     req->ir_reply[4] & 0x0f, req->ir_reply[4] >> 4);
733
734	IPMI_INIT_DRIVER_REQUEST(req, IPMI_ADDR(IPMI_APP_REQUEST, 0),
735	    IPMI_CLEAR_FLAGS, 1, 0);
736
737	ipmi_submit_driver_request(sc, req, 0);
738
739	/* XXX: Magic numbers */
740	if (req->ir_compcode == 0xc0) {
741		device_printf(dev, "Clear flags is busy\n");
742	}
743	if (req->ir_compcode == 0xc1) {
744		device_printf(dev, "Clear flags illegal\n");
745	}
746
747	for (i = 0; i < 8; i++) {
748		IPMI_INIT_DRIVER_REQUEST(req, IPMI_ADDR(IPMI_APP_REQUEST, 0),
749		    IPMI_GET_CHANNEL_INFO, 1, 0);
750		req->ir_request[0] = i;
751
752		ipmi_submit_driver_request(sc, req, 0);
753
754		if (req->ir_compcode != 0)
755			break;
756	}
757	device_printf(dev, "Number of channels %d\n", i);
758
759	/*
760	 * Probe for watchdog, but only for backends which support
761	 * polled driver requests.
762	 */
763	if (sc->ipmi_driver_requests_polled) {
764		IPMI_INIT_DRIVER_REQUEST(req, IPMI_ADDR(IPMI_APP_REQUEST, 0),
765		    IPMI_GET_WDOG, 0, 0);
766
767		ipmi_submit_driver_request(sc, req, 0);
768
769		if (req->ir_compcode == 0x00) {
770			device_printf(dev, "Attached watchdog\n");
771			/* register the watchdog event handler */
772			sc->ipmi_watchdog_tag = EVENTHANDLER_REGISTER(
773			    watchdog_list, ipmi_wd_event, sc, 0);
774		}
775	}
776
777	sc->ipmi_cdev = make_dev(&ipmi_cdevsw, device_get_unit(dev),
778	    UID_ROOT, GID_OPERATOR, 0660, "ipmi%d", device_get_unit(dev));
779	if (sc->ipmi_cdev == NULL) {
780		device_printf(dev, "Failed to create cdev\n");
781		return;
782	}
783	sc->ipmi_cdev->si_drv1 = sc;
784}
785
786int
787ipmi_attach(device_t dev)
788{
789	struct ipmi_softc *sc = device_get_softc(dev);
790	int error;
791
792	if (sc->ipmi_irq_res != NULL && sc->ipmi_intr != NULL) {
793		error = bus_setup_intr(dev, sc->ipmi_irq_res, INTR_TYPE_MISC,
794		    NULL, sc->ipmi_intr, sc, &sc->ipmi_irq);
795		if (error) {
796			device_printf(dev, "can't set up interrupt\n");
797			return (error);
798		}
799	}
800
801	bzero(&sc->ipmi_ich, sizeof(struct intr_config_hook));
802	sc->ipmi_ich.ich_func = ipmi_startup;
803	sc->ipmi_ich.ich_arg = sc;
804	if (config_intrhook_establish(&sc->ipmi_ich) != 0) {
805		device_printf(dev, "can't establish configuration hook\n");
806		return (ENOMEM);
807	}
808
809	ipmi_attached = 1;
810	return (0);
811}
812
813int
814ipmi_detach(device_t dev)
815{
816	struct ipmi_softc *sc;
817
818	sc = device_get_softc(dev);
819
820	/* Fail if there are any open handles. */
821	IPMI_LOCK(sc);
822	if (sc->ipmi_opened) {
823		IPMI_UNLOCK(sc);
824		return (EBUSY);
825	}
826	IPMI_UNLOCK(sc);
827	if (sc->ipmi_cdev)
828		destroy_dev(sc->ipmi_cdev);
829
830	/* Detach from watchdog handling and turn off watchdog. */
831	if (sc->ipmi_watchdog_tag) {
832		EVENTHANDLER_DEREGISTER(watchdog_list, sc->ipmi_watchdog_tag);
833		ipmi_set_watchdog(sc, 0);
834	}
835
836	/* XXX: should use shutdown callout I think. */
837	/* If the backend uses a kthread, shut it down. */
838	IPMI_LOCK(sc);
839	sc->ipmi_detaching = 1;
840	if (sc->ipmi_kthread) {
841		cv_broadcast(&sc->ipmi_request_added);
842		msleep(sc->ipmi_kthread, &sc->ipmi_requests_lock, 0,
843		    "ipmi_wait", 0);
844	}
845	IPMI_UNLOCK(sc);
846	if (sc->ipmi_irq)
847		bus_teardown_intr(dev, sc->ipmi_irq_res, sc->ipmi_irq);
848
849	ipmi_release_resources(dev);
850	mtx_destroy(&sc->ipmi_io_lock);
851	mtx_destroy(&sc->ipmi_requests_lock);
852	return (0);
853}
854
855void
856ipmi_release_resources(device_t dev)
857{
858	struct ipmi_softc *sc;
859	int i;
860
861	sc = device_get_softc(dev);
862	if (sc->ipmi_irq)
863		bus_teardown_intr(dev, sc->ipmi_irq_res, sc->ipmi_irq);
864	if (sc->ipmi_irq_res)
865		bus_release_resource(dev, SYS_RES_IRQ, sc->ipmi_irq_rid,
866		    sc->ipmi_irq_res);
867	for (i = 0; i < MAX_RES; i++)
868		if (sc->ipmi_io_res[i])
869			bus_release_resource(dev, sc->ipmi_io_type,
870			    sc->ipmi_io_rid + i, sc->ipmi_io_res[i]);
871}
872
873devclass_t ipmi_devclass;
874
875/* XXX: Why? */
876static void
877ipmi_unload(void *arg)
878{
879	device_t *	devs;
880	int		count;
881	int		i;
882
883	if (devclass_get_devices(ipmi_devclass, &devs, &count) != 0)
884		return;
885	for (i = 0; i < count; i++)
886		device_delete_child(device_get_parent(devs[i]), devs[i]);
887	free(devs, M_TEMP);
888}
889SYSUNINIT(ipmi_unload, SI_SUB_DRIVERS, SI_ORDER_FIRST, ipmi_unload, NULL);
890
891#ifdef IMPI_DEBUG
892static void
893dump_buf(u_char *data, int len)
894{
895	char buf[20];
896	char line[1024];
897	char temp[30];
898	int count = 0;
899	int i=0;
900
901	printf("Address %p len %d\n", data, len);
902	if (len > 256)
903		len = 256;
904	line[0] = '\000';
905	for (; len > 0; len--, data++) {
906		sprintf(temp, "%02x ", *data);
907		strcat(line, temp);
908		if (*data >= ' ' && *data <= '~')
909			buf[count] = *data;
910		else if (*data >= 'A' && *data <= 'Z')
911			buf[count] = *data;
912		else
913			buf[count] = '.';
914		if (++count == 16) {
915			buf[count] = '\000';
916			count = 0;
917			printf("  %3x  %s %s\n", i, line, buf);
918			i+=16;
919			line[0] = '\000';
920		}
921	}
922	buf[count] = '\000';
923
924	for (; count != 16; count++) {
925		strcat(line, "   ");
926	}
927	printf("  %3x  %s %s\n", i, line, buf);
928}
929#endif
930