Deleted Added
full compact
ipmi.c (182322) ipmi.c (184949)
1/*-
2 * Copyright (c) 2006 IronPort Systems Inc. <ambrisko@ironport.com>
3 * All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
7 * are met:
8 * 1. Redistributions of source code must retain the above copyright
9 * notice, this list of conditions and the following disclaimer.
10 * 2. Redistributions in binary form must reproduce the above copyright
11 * notice, this list of conditions and the following disclaimer in the
12 * documentation and/or other materials provided with the distribution.
13 *
14 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
15 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
16 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
17 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
18 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
19 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
20 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
21 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
22 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
23 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
24 * SUCH DAMAGE.
25 */
26
27#include <sys/cdefs.h>
1/*-
2 * Copyright (c) 2006 IronPort Systems Inc. <ambrisko@ironport.com>
3 * All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
7 * are met:
8 * 1. Redistributions of source code must retain the above copyright
9 * notice, this list of conditions and the following disclaimer.
10 * 2. Redistributions in binary form must reproduce the above copyright
11 * notice, this list of conditions and the following disclaimer in the
12 * documentation and/or other materials provided with the distribution.
13 *
14 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
15 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
16 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
17 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
18 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
19 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
20 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
21 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
22 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
23 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
24 * SUCH DAMAGE.
25 */
26
27#include <sys/cdefs.h>
28__FBSDID("$FreeBSD: head/sys/dev/ipmi/ipmi.c 182322 2008-08-28 02:13:53Z jhb $");
28__FBSDID("$FreeBSD: head/sys/dev/ipmi/ipmi.c 184949 2008-11-14 01:53:10Z obrien $");
29
30#include <sys/param.h>
31#include <sys/systm.h>
32#include <sys/bus.h>
33#include <sys/condvar.h>
34#include <sys/conf.h>
35#include <sys/kernel.h>
36#include <sys/malloc.h>
37#include <sys/module.h>
38#include <sys/poll.h>
39#include <sys/rman.h>
40#include <sys/selinfo.h>
41#include <sys/sysctl.h>
42#include <sys/watchdog.h>
43
44#ifdef LOCAL_MODULE
45#include <ipmi.h>
46#include <ipmivars.h>
47#else
48#include <sys/ipmi.h>
49#include <dev/ipmi/ipmivars.h>
50#endif
51
52#ifdef IPMB
53static int ipmi_ipmb_checksum(u_char, int);
54static int ipmi_ipmb_send_message(device_t, u_char, u_char, u_char,
55 u_char, u_char, int)
56#endif
57
58static d_ioctl_t ipmi_ioctl;
59static d_poll_t ipmi_poll;
60static d_open_t ipmi_open;
61static void ipmi_dtor(void *arg);
62
63int ipmi_attached = 0;
64
65static int on = 1;
66SYSCTL_NODE(_hw, OID_AUTO, ipmi, CTLFLAG_RD, 0, "IPMI driver parameters");
67SYSCTL_INT(_hw_ipmi, OID_AUTO, on, CTLFLAG_RW,
68 &on, 0, "");
69
70static struct cdevsw ipmi_cdevsw = {
71 .d_version = D_VERSION,
72 .d_open = ipmi_open,
73 .d_ioctl = ipmi_ioctl,
74 .d_poll = ipmi_poll,
75 .d_name = "ipmi",
76};
77
78MALLOC_DEFINE(M_IPMI, "ipmi", "ipmi");
79
80static int
81ipmi_open(struct cdev *cdev, int flags, int fmt, struct thread *td)
82{
83 struct ipmi_device *dev;
84 struct ipmi_softc *sc;
85 int error;
86
87 if (!on)
88 return (ENOENT);
89
90 /* Initialize the per file descriptor data. */
91 dev = malloc(sizeof(struct ipmi_device), M_IPMI, M_WAITOK | M_ZERO);
92 error = devfs_set_cdevpriv(dev, ipmi_dtor);
93 if (error) {
94 free(dev, M_IPMI);
95 return (error);
96 }
97
98 sc = cdev->si_drv1;
99 TAILQ_INIT(&dev->ipmi_completed_requests);
100 dev->ipmi_address = IPMI_BMC_SLAVE_ADDR;
101 dev->ipmi_lun = IPMI_BMC_SMS_LUN;
102 dev->ipmi_softc = sc;
103 IPMI_LOCK(sc);
104 sc->ipmi_opened++;
105 IPMI_UNLOCK(sc);
106
107 return (0);
108}
109
110static int
111ipmi_poll(struct cdev *cdev, int poll_events, struct thread *td)
112{
113 struct ipmi_device *dev;
114 struct ipmi_softc *sc;
115 int revents = 0;
116
117 if (devfs_get_cdevpriv((void **)&dev))
118 return (0);
119
120 sc = cdev->si_drv1;
121 IPMI_LOCK(sc);
122 if (poll_events & (POLLIN | POLLRDNORM)) {
123 if (!TAILQ_EMPTY(&dev->ipmi_completed_requests))
124 revents |= poll_events & (POLLIN | POLLRDNORM);
125 if (dev->ipmi_requests == 0)
126 revents |= POLLERR;
127 }
128
129 if (revents == 0) {
130 if (poll_events & (POLLIN | POLLRDNORM))
131 selrecord(td, &dev->ipmi_select);
132 }
133 IPMI_UNLOCK(sc);
134
135 return (revents);
136}
137
138static void
139ipmi_purge_completed_requests(struct ipmi_device *dev)
140{
141 struct ipmi_request *req;
142
143 while (!TAILQ_EMPTY(&dev->ipmi_completed_requests)) {
144 req = TAILQ_FIRST(&dev->ipmi_completed_requests);
145 TAILQ_REMOVE(&dev->ipmi_completed_requests, req, ir_link);
146 dev->ipmi_requests--;
147 ipmi_free_request(req);
148 }
149}
150
151static void
152ipmi_dtor(void *arg)
153{
154 struct ipmi_request *req, *nreq;
155 struct ipmi_device *dev;
156 struct ipmi_softc *sc;
157
158 dev = arg;
159 sc = dev->ipmi_softc;
160
161 IPMI_LOCK(sc);
162 if (dev->ipmi_requests) {
163 /* Throw away any pending requests for this device. */
164 TAILQ_FOREACH_SAFE(req, &sc->ipmi_pending_requests, ir_link,
165 nreq) {
166 if (req->ir_owner == dev) {
167 TAILQ_REMOVE(&sc->ipmi_pending_requests, req,
168 ir_link);
169 dev->ipmi_requests--;
170 ipmi_free_request(req);
171 }
172 }
173
174 /* Throw away any pending completed requests for this device. */
175 ipmi_purge_completed_requests(dev);
176
177 /*
178 * If we still have outstanding requests, they must be stuck
179 * in an interface driver, so wait for those to drain.
180 */
181 dev->ipmi_closing = 1;
182 while (dev->ipmi_requests > 0) {
183 msleep(&dev->ipmi_requests, &sc->ipmi_lock, PWAIT,
184 "ipmidrain", 0);
185 ipmi_purge_completed_requests(dev);
186 }
187 }
188 sc->ipmi_opened--;
189 IPMI_UNLOCK(sc);
190
191 /* Cleanup. */
192 free(dev, M_IPMI);
193}
194
195#ifdef IPMB
196static int
197ipmi_ipmb_checksum(u_char *data, int len)
198{
199 u_char sum = 0;
200
201 for (; len; len--) {
202 sum += *data++;
203 }
204 return (-sum);
205}
206
207/* XXX: Needs work */
208static int
209ipmi_ipmb_send_message(device_t dev, u_char channel, u_char netfn,
210 u_char command, u_char seq, u_char *data, int data_len)
211{
212 struct ipmi_softc *sc = device_get_softc(dev);
213 struct ipmi_request *req;
214 u_char slave_addr = 0x52;
215 int error;
216
217 req = ipmi_alloc_driver_request(IPMI_ADDR(IPMI_APP_REQUEST, 0),
218 IPMI_SEND_MSG, data_len + 8, 0);
219 req->ir_request[0] = channel;
220 req->ir_request[1] = slave_addr;
221 req->ir_request[2] = IPMI_ADDR(netfn, 0);
222 req->ir_request[3] = ipmi_ipmb_checksum(&req->ir_request[1], 2);
223 req->ir_request[4] = sc->ipmi_address;
224 req->ir_request[5] = IPMI_ADDR(seq, sc->ipmi_lun);
225 req->ir_request[6] = command;
226
227 bcopy(data, &req->ir_request[7], data_len);
228 temp[data_len + 7] = ipmi_ipmb_checksum(&req->ir_request[4],
229 data_len + 3);
230
231 ipmi_submit_driver_request(sc, req);
232 error = req->ir_error;
233 ipmi_free_request(req);
234
235 return (error);
236}
237
238static int
239ipmi_handle_attn(struct ipmi_softc *sc)
240{
241 struct ipmi_request *req;
242 int error;
243
244 device_printf(sc->ipmi_dev, "BMC has a message\n");
245 req = ipmi_alloc_driver_request(IPMI_ADDR(IPMI_APP_REQUEST, 0),
246 IPMI_GET_MSG_FLAGS, 0, 1);
247
248 ipmi_submit_driver_request(sc, req);
249
250 if (req->ir_error == 0 && req->ir_compcode == 0) {
251 if (req->ir_reply[0] & IPMI_MSG_BUFFER_FULL) {
252 device_printf(sc->ipmi_dev, "message buffer full");
253 }
254 if (req->ir_reply[0] & IPMI_WDT_PRE_TIMEOUT) {
255 device_printf(sc->ipmi_dev,
256 "watchdog about to go off");
257 }
258 if (req->ir_reply[0] & IPMI_MSG_AVAILABLE) {
259 ipmi_free_request(req);
260
261 req = ipmi_alloc_driver_request(
262 IPMI_ADDR(IPMI_APP_REQUEST, 0), IPMI_GET_MSG, 0,
263 16);
264
265 device_printf(sc->ipmi_dev, "throw out message ");
266 dump_buf(temp, 16);
267 }
268 }
269 error = req->ir_error;
270 ipmi_free_request(req);
271
272 return (error);
273}
274#endif
275
276#ifdef IPMICTL_SEND_COMMAND_32
277#define PTRIN(p) ((void *)(uintptr_t)(p))
278#define PTROUT(p) ((uintptr_t)(p))
279#endif
280
281static int
282ipmi_ioctl(struct cdev *cdev, u_long cmd, caddr_t data,
283 int flags, struct thread *td)
284{
285 struct ipmi_softc *sc;
286 struct ipmi_device *dev;
287 struct ipmi_request *kreq;
288 struct ipmi_req *req = (struct ipmi_req *)data;
289 struct ipmi_recv *recv = (struct ipmi_recv *)data;
290 struct ipmi_addr addr;
291#ifdef IPMICTL_SEND_COMMAND_32
292 struct ipmi_req32 *req32 = (struct ipmi_req32 *)data;
293 struct ipmi_recv32 *recv32 = (struct ipmi_recv32 *)data;
294 union {
295 struct ipmi_req req;
296 struct ipmi_recv recv;
297 } thunk32;
298#endif
299 int error, len;
300
301 error = devfs_get_cdevpriv((void **)&dev);
302 if (error)
303 return (error);
304
305 sc = cdev->si_drv1;
306
307#ifdef IPMICTL_SEND_COMMAND_32
308 /* Convert 32-bit structures to native. */
309 switch (cmd) {
310 case IPMICTL_SEND_COMMAND_32:
311 req = &thunk32.req;
312 req->addr = PTRIN(req32->addr);
313 req->addr_len = req32->addr_len;
314 req->msgid = req32->msgid;
315 req->msg.netfn = req32->msg.netfn;
316 req->msg.cmd = req32->msg.cmd;
317 req->msg.data_len = req32->msg.data_len;
318 req->msg.data = PTRIN(req32->msg.data);
319 break;
320 case IPMICTL_RECEIVE_MSG_TRUNC_32:
321 case IPMICTL_RECEIVE_MSG_32:
322 recv = &thunk32.recv;
323 recv->addr = PTRIN(recv32->addr);
324 recv->addr_len = recv32->addr_len;
325 recv->msg.data_len = recv32->msg.data_len;
326 recv->msg.data = PTRIN(recv32->msg.data);
327 break;
328 }
329#endif
330
331 switch (cmd) {
332#ifdef IPMICTL_SEND_COMMAND_32
333 case IPMICTL_SEND_COMMAND_32:
334#endif
335 case IPMICTL_SEND_COMMAND:
336 /*
337 * XXX: Need to add proper handling of this.
338 */
339 error = copyin(req->addr, &addr, sizeof(addr));
340 if (error)
341 return (error);
342
343 IPMI_LOCK(sc);
344 /* clear out old stuff in queue of stuff done */
345 /* XXX: This seems odd. */
346 while ((kreq = TAILQ_FIRST(&dev->ipmi_completed_requests))) {
347 TAILQ_REMOVE(&dev->ipmi_completed_requests, kreq,
348 ir_link);
349 dev->ipmi_requests--;
350 ipmi_free_request(kreq);
351 }
352 IPMI_UNLOCK(sc);
353
354 kreq = ipmi_alloc_request(dev, req->msgid,
355 IPMI_ADDR(req->msg.netfn, 0), req->msg.cmd,
356 req->msg.data_len, IPMI_MAX_RX);
357 error = copyin(req->msg.data, kreq->ir_request,
358 req->msg.data_len);
359 if (error) {
360 ipmi_free_request(kreq);
361 return (error);
362 }
363 IPMI_LOCK(sc);
364 dev->ipmi_requests++;
365 error = sc->ipmi_enqueue_request(sc, kreq);
366 IPMI_UNLOCK(sc);
367 if (error)
368 return (error);
369 break;
370#ifdef IPMICTL_SEND_COMMAND_32
371 case IPMICTL_RECEIVE_MSG_TRUNC_32:
372 case IPMICTL_RECEIVE_MSG_32:
373#endif
374 case IPMICTL_RECEIVE_MSG_TRUNC:
375 case IPMICTL_RECEIVE_MSG:
376 error = copyin(recv->addr, &addr, sizeof(addr));
377 if (error)
378 return (error);
379
380 IPMI_LOCK(sc);
381 kreq = TAILQ_FIRST(&dev->ipmi_completed_requests);
382 if (kreq == NULL) {
383 IPMI_UNLOCK(sc);
384 return (EAGAIN);
385 }
386 addr.channel = IPMI_BMC_CHANNEL;
387 /* XXX */
388 recv->recv_type = IPMI_RESPONSE_RECV_TYPE;
389 recv->msgid = kreq->ir_msgid;
390 recv->msg.netfn = IPMI_REPLY_ADDR(kreq->ir_addr) >> 2;
391 recv->msg.cmd = kreq->ir_command;
392 error = kreq->ir_error;
393 if (error) {
394 TAILQ_REMOVE(&dev->ipmi_completed_requests, kreq,
395 ir_link);
396 dev->ipmi_requests--;
397 IPMI_UNLOCK(sc);
398 ipmi_free_request(kreq);
399 return (error);
400 }
401 len = kreq->ir_replylen + 1;
402 if (recv->msg.data_len < len &&
403 (cmd == IPMICTL_RECEIVE_MSG
404#ifdef IPMICTL_RECEIVE_MSG_32
29
30#include <sys/param.h>
31#include <sys/systm.h>
32#include <sys/bus.h>
33#include <sys/condvar.h>
34#include <sys/conf.h>
35#include <sys/kernel.h>
36#include <sys/malloc.h>
37#include <sys/module.h>
38#include <sys/poll.h>
39#include <sys/rman.h>
40#include <sys/selinfo.h>
41#include <sys/sysctl.h>
42#include <sys/watchdog.h>
43
44#ifdef LOCAL_MODULE
45#include <ipmi.h>
46#include <ipmivars.h>
47#else
48#include <sys/ipmi.h>
49#include <dev/ipmi/ipmivars.h>
50#endif
51
52#ifdef IPMB
53static int ipmi_ipmb_checksum(u_char, int);
54static int ipmi_ipmb_send_message(device_t, u_char, u_char, u_char,
55 u_char, u_char, int)
56#endif
57
58static d_ioctl_t ipmi_ioctl;
59static d_poll_t ipmi_poll;
60static d_open_t ipmi_open;
61static void ipmi_dtor(void *arg);
62
63int ipmi_attached = 0;
64
65static int on = 1;
66SYSCTL_NODE(_hw, OID_AUTO, ipmi, CTLFLAG_RD, 0, "IPMI driver parameters");
67SYSCTL_INT(_hw_ipmi, OID_AUTO, on, CTLFLAG_RW,
68 &on, 0, "");
69
70static struct cdevsw ipmi_cdevsw = {
71 .d_version = D_VERSION,
72 .d_open = ipmi_open,
73 .d_ioctl = ipmi_ioctl,
74 .d_poll = ipmi_poll,
75 .d_name = "ipmi",
76};
77
78MALLOC_DEFINE(M_IPMI, "ipmi", "ipmi");
79
80static int
81ipmi_open(struct cdev *cdev, int flags, int fmt, struct thread *td)
82{
83 struct ipmi_device *dev;
84 struct ipmi_softc *sc;
85 int error;
86
87 if (!on)
88 return (ENOENT);
89
90 /* Initialize the per file descriptor data. */
91 dev = malloc(sizeof(struct ipmi_device), M_IPMI, M_WAITOK | M_ZERO);
92 error = devfs_set_cdevpriv(dev, ipmi_dtor);
93 if (error) {
94 free(dev, M_IPMI);
95 return (error);
96 }
97
98 sc = cdev->si_drv1;
99 TAILQ_INIT(&dev->ipmi_completed_requests);
100 dev->ipmi_address = IPMI_BMC_SLAVE_ADDR;
101 dev->ipmi_lun = IPMI_BMC_SMS_LUN;
102 dev->ipmi_softc = sc;
103 IPMI_LOCK(sc);
104 sc->ipmi_opened++;
105 IPMI_UNLOCK(sc);
106
107 return (0);
108}
109
110static int
111ipmi_poll(struct cdev *cdev, int poll_events, struct thread *td)
112{
113 struct ipmi_device *dev;
114 struct ipmi_softc *sc;
115 int revents = 0;
116
117 if (devfs_get_cdevpriv((void **)&dev))
118 return (0);
119
120 sc = cdev->si_drv1;
121 IPMI_LOCK(sc);
122 if (poll_events & (POLLIN | POLLRDNORM)) {
123 if (!TAILQ_EMPTY(&dev->ipmi_completed_requests))
124 revents |= poll_events & (POLLIN | POLLRDNORM);
125 if (dev->ipmi_requests == 0)
126 revents |= POLLERR;
127 }
128
129 if (revents == 0) {
130 if (poll_events & (POLLIN | POLLRDNORM))
131 selrecord(td, &dev->ipmi_select);
132 }
133 IPMI_UNLOCK(sc);
134
135 return (revents);
136}
137
138static void
139ipmi_purge_completed_requests(struct ipmi_device *dev)
140{
141 struct ipmi_request *req;
142
143 while (!TAILQ_EMPTY(&dev->ipmi_completed_requests)) {
144 req = TAILQ_FIRST(&dev->ipmi_completed_requests);
145 TAILQ_REMOVE(&dev->ipmi_completed_requests, req, ir_link);
146 dev->ipmi_requests--;
147 ipmi_free_request(req);
148 }
149}
150
151static void
152ipmi_dtor(void *arg)
153{
154 struct ipmi_request *req, *nreq;
155 struct ipmi_device *dev;
156 struct ipmi_softc *sc;
157
158 dev = arg;
159 sc = dev->ipmi_softc;
160
161 IPMI_LOCK(sc);
162 if (dev->ipmi_requests) {
163 /* Throw away any pending requests for this device. */
164 TAILQ_FOREACH_SAFE(req, &sc->ipmi_pending_requests, ir_link,
165 nreq) {
166 if (req->ir_owner == dev) {
167 TAILQ_REMOVE(&sc->ipmi_pending_requests, req,
168 ir_link);
169 dev->ipmi_requests--;
170 ipmi_free_request(req);
171 }
172 }
173
174 /* Throw away any pending completed requests for this device. */
175 ipmi_purge_completed_requests(dev);
176
177 /*
178 * If we still have outstanding requests, they must be stuck
179 * in an interface driver, so wait for those to drain.
180 */
181 dev->ipmi_closing = 1;
182 while (dev->ipmi_requests > 0) {
183 msleep(&dev->ipmi_requests, &sc->ipmi_lock, PWAIT,
184 "ipmidrain", 0);
185 ipmi_purge_completed_requests(dev);
186 }
187 }
188 sc->ipmi_opened--;
189 IPMI_UNLOCK(sc);
190
191 /* Cleanup. */
192 free(dev, M_IPMI);
193}
194
195#ifdef IPMB
196static int
197ipmi_ipmb_checksum(u_char *data, int len)
198{
199 u_char sum = 0;
200
201 for (; len; len--) {
202 sum += *data++;
203 }
204 return (-sum);
205}
206
207/* XXX: Needs work */
208static int
209ipmi_ipmb_send_message(device_t dev, u_char channel, u_char netfn,
210 u_char command, u_char seq, u_char *data, int data_len)
211{
212 struct ipmi_softc *sc = device_get_softc(dev);
213 struct ipmi_request *req;
214 u_char slave_addr = 0x52;
215 int error;
216
217 req = ipmi_alloc_driver_request(IPMI_ADDR(IPMI_APP_REQUEST, 0),
218 IPMI_SEND_MSG, data_len + 8, 0);
219 req->ir_request[0] = channel;
220 req->ir_request[1] = slave_addr;
221 req->ir_request[2] = IPMI_ADDR(netfn, 0);
222 req->ir_request[3] = ipmi_ipmb_checksum(&req->ir_request[1], 2);
223 req->ir_request[4] = sc->ipmi_address;
224 req->ir_request[5] = IPMI_ADDR(seq, sc->ipmi_lun);
225 req->ir_request[6] = command;
226
227 bcopy(data, &req->ir_request[7], data_len);
228 temp[data_len + 7] = ipmi_ipmb_checksum(&req->ir_request[4],
229 data_len + 3);
230
231 ipmi_submit_driver_request(sc, req);
232 error = req->ir_error;
233 ipmi_free_request(req);
234
235 return (error);
236}
237
238static int
239ipmi_handle_attn(struct ipmi_softc *sc)
240{
241 struct ipmi_request *req;
242 int error;
243
244 device_printf(sc->ipmi_dev, "BMC has a message\n");
245 req = ipmi_alloc_driver_request(IPMI_ADDR(IPMI_APP_REQUEST, 0),
246 IPMI_GET_MSG_FLAGS, 0, 1);
247
248 ipmi_submit_driver_request(sc, req);
249
250 if (req->ir_error == 0 && req->ir_compcode == 0) {
251 if (req->ir_reply[0] & IPMI_MSG_BUFFER_FULL) {
252 device_printf(sc->ipmi_dev, "message buffer full");
253 }
254 if (req->ir_reply[0] & IPMI_WDT_PRE_TIMEOUT) {
255 device_printf(sc->ipmi_dev,
256 "watchdog about to go off");
257 }
258 if (req->ir_reply[0] & IPMI_MSG_AVAILABLE) {
259 ipmi_free_request(req);
260
261 req = ipmi_alloc_driver_request(
262 IPMI_ADDR(IPMI_APP_REQUEST, 0), IPMI_GET_MSG, 0,
263 16);
264
265 device_printf(sc->ipmi_dev, "throw out message ");
266 dump_buf(temp, 16);
267 }
268 }
269 error = req->ir_error;
270 ipmi_free_request(req);
271
272 return (error);
273}
274#endif
275
276#ifdef IPMICTL_SEND_COMMAND_32
277#define PTRIN(p) ((void *)(uintptr_t)(p))
278#define PTROUT(p) ((uintptr_t)(p))
279#endif
280
281static int
282ipmi_ioctl(struct cdev *cdev, u_long cmd, caddr_t data,
283 int flags, struct thread *td)
284{
285 struct ipmi_softc *sc;
286 struct ipmi_device *dev;
287 struct ipmi_request *kreq;
288 struct ipmi_req *req = (struct ipmi_req *)data;
289 struct ipmi_recv *recv = (struct ipmi_recv *)data;
290 struct ipmi_addr addr;
291#ifdef IPMICTL_SEND_COMMAND_32
292 struct ipmi_req32 *req32 = (struct ipmi_req32 *)data;
293 struct ipmi_recv32 *recv32 = (struct ipmi_recv32 *)data;
294 union {
295 struct ipmi_req req;
296 struct ipmi_recv recv;
297 } thunk32;
298#endif
299 int error, len;
300
301 error = devfs_get_cdevpriv((void **)&dev);
302 if (error)
303 return (error);
304
305 sc = cdev->si_drv1;
306
307#ifdef IPMICTL_SEND_COMMAND_32
308 /* Convert 32-bit structures to native. */
309 switch (cmd) {
310 case IPMICTL_SEND_COMMAND_32:
311 req = &thunk32.req;
312 req->addr = PTRIN(req32->addr);
313 req->addr_len = req32->addr_len;
314 req->msgid = req32->msgid;
315 req->msg.netfn = req32->msg.netfn;
316 req->msg.cmd = req32->msg.cmd;
317 req->msg.data_len = req32->msg.data_len;
318 req->msg.data = PTRIN(req32->msg.data);
319 break;
320 case IPMICTL_RECEIVE_MSG_TRUNC_32:
321 case IPMICTL_RECEIVE_MSG_32:
322 recv = &thunk32.recv;
323 recv->addr = PTRIN(recv32->addr);
324 recv->addr_len = recv32->addr_len;
325 recv->msg.data_len = recv32->msg.data_len;
326 recv->msg.data = PTRIN(recv32->msg.data);
327 break;
328 }
329#endif
330
331 switch (cmd) {
332#ifdef IPMICTL_SEND_COMMAND_32
333 case IPMICTL_SEND_COMMAND_32:
334#endif
335 case IPMICTL_SEND_COMMAND:
336 /*
337 * XXX: Need to add proper handling of this.
338 */
339 error = copyin(req->addr, &addr, sizeof(addr));
340 if (error)
341 return (error);
342
343 IPMI_LOCK(sc);
344 /* clear out old stuff in queue of stuff done */
345 /* XXX: This seems odd. */
346 while ((kreq = TAILQ_FIRST(&dev->ipmi_completed_requests))) {
347 TAILQ_REMOVE(&dev->ipmi_completed_requests, kreq,
348 ir_link);
349 dev->ipmi_requests--;
350 ipmi_free_request(kreq);
351 }
352 IPMI_UNLOCK(sc);
353
354 kreq = ipmi_alloc_request(dev, req->msgid,
355 IPMI_ADDR(req->msg.netfn, 0), req->msg.cmd,
356 req->msg.data_len, IPMI_MAX_RX);
357 error = copyin(req->msg.data, kreq->ir_request,
358 req->msg.data_len);
359 if (error) {
360 ipmi_free_request(kreq);
361 return (error);
362 }
363 IPMI_LOCK(sc);
364 dev->ipmi_requests++;
365 error = sc->ipmi_enqueue_request(sc, kreq);
366 IPMI_UNLOCK(sc);
367 if (error)
368 return (error);
369 break;
370#ifdef IPMICTL_SEND_COMMAND_32
371 case IPMICTL_RECEIVE_MSG_TRUNC_32:
372 case IPMICTL_RECEIVE_MSG_32:
373#endif
374 case IPMICTL_RECEIVE_MSG_TRUNC:
375 case IPMICTL_RECEIVE_MSG:
376 error = copyin(recv->addr, &addr, sizeof(addr));
377 if (error)
378 return (error);
379
380 IPMI_LOCK(sc);
381 kreq = TAILQ_FIRST(&dev->ipmi_completed_requests);
382 if (kreq == NULL) {
383 IPMI_UNLOCK(sc);
384 return (EAGAIN);
385 }
386 addr.channel = IPMI_BMC_CHANNEL;
387 /* XXX */
388 recv->recv_type = IPMI_RESPONSE_RECV_TYPE;
389 recv->msgid = kreq->ir_msgid;
390 recv->msg.netfn = IPMI_REPLY_ADDR(kreq->ir_addr) >> 2;
391 recv->msg.cmd = kreq->ir_command;
392 error = kreq->ir_error;
393 if (error) {
394 TAILQ_REMOVE(&dev->ipmi_completed_requests, kreq,
395 ir_link);
396 dev->ipmi_requests--;
397 IPMI_UNLOCK(sc);
398 ipmi_free_request(kreq);
399 return (error);
400 }
401 len = kreq->ir_replylen + 1;
402 if (recv->msg.data_len < len &&
403 (cmd == IPMICTL_RECEIVE_MSG
404#ifdef IPMICTL_RECEIVE_MSG_32
405 || cmd == IPMICTL_RECEIVE_MSG
405 || cmd == IPMICTL_RECEIVE_MSG_32
406#endif
407 )) {
408 IPMI_UNLOCK(sc);
409 return (EMSGSIZE);
410 }
411 TAILQ_REMOVE(&dev->ipmi_completed_requests, kreq, ir_link);
412 dev->ipmi_requests--;
413 IPMI_UNLOCK(sc);
414 len = min(recv->msg.data_len, len);
415 recv->msg.data_len = len;
416 error = copyout(&addr, recv->addr,sizeof(addr));
417 if (error == 0)
418 error = copyout(&kreq->ir_compcode, recv->msg.data, 1);
419 if (error == 0)
420 error = copyout(kreq->ir_reply, recv->msg.data + 1,
421 len - 1);
422 ipmi_free_request(kreq);
423 if (error)
424 return (error);
425 break;
426 case IPMICTL_SET_MY_ADDRESS_CMD:
427 IPMI_LOCK(sc);
428 dev->ipmi_address = *(int*)data;
429 IPMI_UNLOCK(sc);
430 break;
431 case IPMICTL_GET_MY_ADDRESS_CMD:
432 IPMI_LOCK(sc);
433 *(int*)data = dev->ipmi_address;
434 IPMI_UNLOCK(sc);
435 break;
436 case IPMICTL_SET_MY_LUN_CMD:
437 IPMI_LOCK(sc);
438 dev->ipmi_lun = *(int*)data & 0x3;
439 IPMI_UNLOCK(sc);
440 break;
441 case IPMICTL_GET_MY_LUN_CMD:
442 IPMI_LOCK(sc);
443 *(int*)data = dev->ipmi_lun;
444 IPMI_UNLOCK(sc);
445 break;
446 case IPMICTL_SET_GETS_EVENTS_CMD:
447 /*
448 device_printf(sc->ipmi_dev,
449 "IPMICTL_SET_GETS_EVENTS_CMD NA\n");
450 */
451 break;
452 case IPMICTL_REGISTER_FOR_CMD:
453 case IPMICTL_UNREGISTER_FOR_CMD:
454 return (EOPNOTSUPP);
455 default:
456 device_printf(sc->ipmi_dev, "Unknown IOCTL %lX\n", cmd);
457 return (ENOIOCTL);
458 }
459
460#ifdef IPMICTL_SEND_COMMAND_32
461 /* Update changed fields in 32-bit structures. */
462 switch (cmd) {
463 case IPMICTL_RECEIVE_MSG_TRUNC_32:
464 case IPMICTL_RECEIVE_MSG_32:
465 recv32->recv_type = recv->recv_type;
466 recv32->msgid = recv->msgid;
467 recv32->msg.netfn = recv->msg.netfn;
468 recv32->msg.cmd = recv->msg.cmd;
469 recv32->msg.data_len = recv->msg.data_len;
470 break;
471 }
472#endif
473 return (0);
474}
475
476/*
477 * Request management.
478 */
479
480/* Allocate a new request with request and reply buffers. */
481struct ipmi_request *
482ipmi_alloc_request(struct ipmi_device *dev, long msgid, uint8_t addr,
483 uint8_t command, size_t requestlen, size_t replylen)
484{
485 struct ipmi_request *req;
486
487 req = malloc(sizeof(struct ipmi_request) + requestlen + replylen,
488 M_IPMI, M_WAITOK | M_ZERO);
489 req->ir_owner = dev;
490 req->ir_msgid = msgid;
491 req->ir_addr = addr;
492 req->ir_command = command;
493 if (requestlen) {
494 req->ir_request = (char *)&req[1];
495 req->ir_requestlen = requestlen;
496 }
497 if (replylen) {
498 req->ir_reply = (char *)&req[1] + requestlen;
499 req->ir_replybuflen = replylen;
500 }
501 return (req);
502}
503
504/* Free a request no longer in use. */
505void
506ipmi_free_request(struct ipmi_request *req)
507{
508
509 free(req, M_IPMI);
510}
511
512/* Store a processed request on the appropriate completion queue. */
513void
514ipmi_complete_request(struct ipmi_softc *sc, struct ipmi_request *req)
515{
516 struct ipmi_device *dev;
517
518 IPMI_LOCK_ASSERT(sc);
519
520 /*
521 * Anonymous requests (from inside the driver) always have a
522 * waiter that we awaken.
523 */
524 if (req->ir_owner == NULL)
525 wakeup(req);
526 else {
527 dev = req->ir_owner;
528 TAILQ_INSERT_TAIL(&dev->ipmi_completed_requests, req, ir_link);
529 selwakeup(&dev->ipmi_select);
530 if (dev->ipmi_closing)
531 wakeup(&dev->ipmi_requests);
532 }
533}
534
535/* Enqueue an internal driver request and wait until it is completed. */
536int
537ipmi_submit_driver_request(struct ipmi_softc *sc, struct ipmi_request *req,
538 int timo)
539{
540 int error;
541
542 IPMI_LOCK(sc);
543 error = sc->ipmi_enqueue_request(sc, req);
544 if (error == 0)
545 error = msleep(req, &sc->ipmi_lock, 0, "ipmireq", timo);
546 if (error == 0)
547 error = req->ir_error;
548 IPMI_UNLOCK(sc);
549 return (error);
550}
551
552/*
553 * Helper routine for polled system interfaces that use
554 * ipmi_polled_enqueue_request() to queue requests. This request
555 * waits until there is a pending request and then returns the first
556 * request. If the driver is shutting down, it returns NULL.
557 */
558struct ipmi_request *
559ipmi_dequeue_request(struct ipmi_softc *sc)
560{
561 struct ipmi_request *req;
562
563 IPMI_LOCK_ASSERT(sc);
564
565 while (!sc->ipmi_detaching && TAILQ_EMPTY(&sc->ipmi_pending_requests))
566 cv_wait(&sc->ipmi_request_added, &sc->ipmi_lock);
567 if (sc->ipmi_detaching)
568 return (NULL);
569
570 req = TAILQ_FIRST(&sc->ipmi_pending_requests);
571 TAILQ_REMOVE(&sc->ipmi_pending_requests, req, ir_link);
572 return (req);
573}
574
575/* Default implementation of ipmi_enqueue_request() for polled interfaces. */
576int
577ipmi_polled_enqueue_request(struct ipmi_softc *sc, struct ipmi_request *req)
578{
579
580 IPMI_LOCK_ASSERT(sc);
581
582 TAILQ_INSERT_TAIL(&sc->ipmi_pending_requests, req, ir_link);
583 cv_signal(&sc->ipmi_request_added);
584 return (0);
585}
586
587/*
588 * Watchdog event handler.
589 */
590
591static void
592ipmi_set_watchdog(struct ipmi_softc *sc, int sec)
593{
594 struct ipmi_request *req;
595 int error;
596
597 req = ipmi_alloc_driver_request(IPMI_ADDR(IPMI_APP_REQUEST, 0),
598 IPMI_SET_WDOG, 6, 0);
599
600 if (sec) {
601 req->ir_request[0] = IPMI_SET_WD_TIMER_DONT_STOP
602 | IPMI_SET_WD_TIMER_SMS_OS;
603 req->ir_request[1] = IPMI_SET_WD_ACTION_RESET;
604 req->ir_request[2] = 0;
605 req->ir_request[3] = 0; /* Timer use */
606 req->ir_request[4] = (sec * 10) & 0xff;
607 req->ir_request[5] = (sec * 10) / 2550;
608 } else {
609 req->ir_request[0] = IPMI_SET_WD_TIMER_SMS_OS;
610 req->ir_request[1] = 0;
611 req->ir_request[2] = 0;
612 req->ir_request[3] = 0; /* Timer use */
613 req->ir_request[4] = 0;
614 req->ir_request[5] = 0;
615 }
616
617 error = ipmi_submit_driver_request(sc, req, 0);
618 if (error)
619 device_printf(sc->ipmi_dev, "Failed to set watchdog\n");
620
621 if (error == 0 && sec) {
622 ipmi_free_request(req);
623
624 req = ipmi_alloc_driver_request(IPMI_ADDR(IPMI_APP_REQUEST, 0),
625 IPMI_RESET_WDOG, 0, 0);
626
627 error = ipmi_submit_driver_request(sc, req, 0);
628 if (error)
629 device_printf(sc->ipmi_dev,
630 "Failed to reset watchdog\n");
631 }
632
633 ipmi_free_request(req);
634 /*
635 dump_watchdog(sc);
636 */
637}
638
639static void
640ipmi_wd_event(void *arg, unsigned int cmd, int *error)
641{
642 struct ipmi_softc *sc = arg;
643 unsigned int timeout;
644
645 cmd &= WD_INTERVAL;
646 if (cmd > 0 && cmd <= 63) {
647 timeout = ((uint64_t)1 << cmd) / 1800000000;
648 ipmi_set_watchdog(sc, timeout);
649 *error = 0;
650 } else {
651 ipmi_set_watchdog(sc, 0);
652 }
653}
654
655static void
656ipmi_startup(void *arg)
657{
658 struct ipmi_softc *sc = arg;
659 struct ipmi_request *req;
660 device_t dev;
661 int error, i;
662
663 config_intrhook_disestablish(&sc->ipmi_ich);
664 dev = sc->ipmi_dev;
665
666 /* Initialize interface-independent state. */
667 mtx_init(&sc->ipmi_lock, device_get_nameunit(dev), "ipmi", MTX_DEF);
668 cv_init(&sc->ipmi_request_added, "ipmireq");
669 TAILQ_INIT(&sc->ipmi_pending_requests);
670
671 /* Initialize interface-dependent state. */
672 error = sc->ipmi_startup(sc);
673 if (error) {
674 device_printf(dev, "Failed to initialize interface: %d\n",
675 error);
676 return;
677 }
678
679 /* Send a GET_DEVICE_ID request. */
680 req = ipmi_alloc_driver_request(IPMI_ADDR(IPMI_APP_REQUEST, 0),
681 IPMI_GET_DEVICE_ID, 0, 15);
682
683 error = ipmi_submit_driver_request(sc, req, MAX_TIMEOUT);
684 if (error == EWOULDBLOCK) {
685 device_printf(dev, "Timed out waiting for GET_DEVICE_ID\n");
686 ipmi_free_request(req);
687 return;
688 } else if (error) {
689 device_printf(dev, "Failed GET_DEVICE_ID: %d\n", error);
690 ipmi_free_request(req);
691 return;
692 } else if (req->ir_compcode != 0) {
693 device_printf(dev,
694 "Bad completion code for GET_DEVICE_ID: %d\n",
695 req->ir_compcode);
696 ipmi_free_request(req);
697 return;
698 } else if (req->ir_replylen < 5) {
699 device_printf(dev, "Short reply for GET_DEVICE_ID: %d\n",
700 req->ir_replylen);
701 ipmi_free_request(req);
702 return;
703 }
704
705 device_printf(dev, "IPMI device rev. %d, firmware rev. %d.%d, "
706 "version %d.%d\n",
707 req->ir_reply[1] & 0x0f,
708 req->ir_reply[2] & 0x0f, req->ir_reply[4],
709 req->ir_reply[4] & 0x0f, req->ir_reply[4] >> 4);
710
711 ipmi_free_request(req);
712
713 req = ipmi_alloc_driver_request(IPMI_ADDR(IPMI_APP_REQUEST, 0),
714 IPMI_CLEAR_FLAGS, 1, 0);
715
716 ipmi_submit_driver_request(sc, req, 0);
717
718 /* XXX: Magic numbers */
719 if (req->ir_compcode == 0xc0) {
720 device_printf(dev, "Clear flags is busy\n");
721 }
722 if (req->ir_compcode == 0xc1) {
723 device_printf(dev, "Clear flags illegal\n");
724 }
725 ipmi_free_request(req);
726
727 for (i = 0; i < 8; i++) {
728 req = ipmi_alloc_driver_request(IPMI_ADDR(IPMI_APP_REQUEST, 0),
729 IPMI_GET_CHANNEL_INFO, 1, 0);
730 req->ir_request[0] = i;
731
732 ipmi_submit_driver_request(sc, req, 0);
733
734 if (req->ir_compcode != 0) {
735 ipmi_free_request(req);
736 break;
737 }
738 ipmi_free_request(req);
739 }
740 device_printf(dev, "Number of channels %d\n", i);
741
742 /* probe for watchdog */
743 req = ipmi_alloc_driver_request(IPMI_ADDR(IPMI_APP_REQUEST, 0),
744 IPMI_GET_WDOG, 0, 0);
745
746 ipmi_submit_driver_request(sc, req, 0);
747
748 if (req->ir_compcode == 0x00) {
749 device_printf(dev, "Attached watchdog\n");
750 /* register the watchdog event handler */
751 sc->ipmi_watchdog_tag = EVENTHANDLER_REGISTER(watchdog_list,
752 ipmi_wd_event, sc, 0);
753 }
754 ipmi_free_request(req);
755
756 sc->ipmi_cdev = make_dev(&ipmi_cdevsw, device_get_unit(dev),
757 UID_ROOT, GID_OPERATOR, 0660, "ipmi%d", device_get_unit(dev));
758 if (sc->ipmi_cdev == NULL) {
759 device_printf(dev, "Failed to create cdev\n");
760 return;
761 }
762 sc->ipmi_cdev->si_drv1 = sc;
763}
764
765int
766ipmi_attach(device_t dev)
767{
768 struct ipmi_softc *sc = device_get_softc(dev);
769 int error;
770
771 if (sc->ipmi_irq_res != NULL && sc->ipmi_intr != NULL) {
772 error = bus_setup_intr(dev, sc->ipmi_irq_res, INTR_TYPE_MISC,
773 NULL, sc->ipmi_intr, sc, &sc->ipmi_irq);
774 if (error) {
775 device_printf(dev, "can't set up interrupt\n");
776 return (error);
777 }
778 }
779
780 bzero(&sc->ipmi_ich, sizeof(struct intr_config_hook));
781 sc->ipmi_ich.ich_func = ipmi_startup;
782 sc->ipmi_ich.ich_arg = sc;
783 if (config_intrhook_establish(&sc->ipmi_ich) != 0) {
784 device_printf(dev, "can't establish configuration hook\n");
785 return (ENOMEM);
786 }
787
788 ipmi_attached = 1;
789 return (0);
790}
791
792int
793ipmi_detach(device_t dev)
794{
795 struct ipmi_softc *sc;
796
797 sc = device_get_softc(dev);
798
799 /* Fail if there are any open handles. */
800 IPMI_LOCK(sc);
801 if (sc->ipmi_opened) {
802 IPMI_UNLOCK(sc);
803 return (EBUSY);
804 }
805 IPMI_UNLOCK(sc);
806 if (sc->ipmi_cdev)
807 destroy_dev(sc->ipmi_cdev);
808
809 /* Detach from watchdog handling and turn off watchdog. */
810 if (sc->ipmi_watchdog_tag) {
811 EVENTHANDLER_DEREGISTER(watchdog_list, sc->ipmi_watchdog_tag);
812 ipmi_set_watchdog(sc, 0);
813 }
814
815 /* XXX: should use shutdown callout I think. */
816 /* If the backend uses a kthread, shut it down. */
817 IPMI_LOCK(sc);
818 sc->ipmi_detaching = 1;
819 if (sc->ipmi_kthread) {
820 cv_broadcast(&sc->ipmi_request_added);
821 msleep(sc->ipmi_kthread, &sc->ipmi_lock, 0, "ipmi_wait", 0);
822 }
823 IPMI_UNLOCK(sc);
824 if (sc->ipmi_irq)
825 bus_teardown_intr(dev, sc->ipmi_irq_res, sc->ipmi_irq);
826
827 ipmi_release_resources(dev);
828 mtx_destroy(&sc->ipmi_lock);
829 return (0);
830}
831
832void
833ipmi_release_resources(device_t dev)
834{
835 struct ipmi_softc *sc;
836 int i;
837
838 sc = device_get_softc(dev);
839 if (sc->ipmi_irq)
840 bus_teardown_intr(dev, sc->ipmi_irq_res, sc->ipmi_irq);
841 if (sc->ipmi_irq_res)
842 bus_release_resource(dev, SYS_RES_IRQ, sc->ipmi_irq_rid,
843 sc->ipmi_irq_res);
844 for (i = 0; i < MAX_RES; i++)
845 if (sc->ipmi_io_res[i])
846 bus_release_resource(dev, sc->ipmi_io_type,
847 sc->ipmi_io_rid + i, sc->ipmi_io_res[i]);
848}
849
850devclass_t ipmi_devclass;
851
852/* XXX: Why? */
853static void
854ipmi_unload(void *arg)
855{
856 device_t * devs;
857 int count;
858 int i;
859
860 if (devclass_get_devices(ipmi_devclass, &devs, &count) != 0)
861 return;
862 for (i = 0; i < count; i++)
863 device_delete_child(device_get_parent(devs[i]), devs[i]);
864 free(devs, M_TEMP);
865}
866SYSUNINIT(ipmi_unload, SI_SUB_DRIVERS, SI_ORDER_FIRST, ipmi_unload, NULL);
867
868#ifdef IMPI_DEBUG
869static void
870dump_buf(u_char *data, int len)
871{
872 char buf[20];
873 char line[1024];
874 char temp[30];
875 int count = 0;
876 int i=0;
877
878 printf("Address %p len %d\n", data, len);
879 if (len > 256)
880 len = 256;
881 line[0] = '\000';
882 for (; len > 0; len--, data++) {
883 sprintf(temp, "%02x ", *data);
884 strcat(line, temp);
885 if (*data >= ' ' && *data <= '~')
886 buf[count] = *data;
887 else if (*data >= 'A' && *data <= 'Z')
888 buf[count] = *data;
889 else
890 buf[count] = '.';
891 if (++count == 16) {
892 buf[count] = '\000';
893 count = 0;
894 printf(" %3x %s %s\n", i, line, buf);
895 i+=16;
896 line[0] = '\000';
897 }
898 }
899 buf[count] = '\000';
900
901 for (; count != 16; count++) {
902 strcat(line, " ");
903 }
904 printf(" %3x %s %s\n", i, line, buf);
905}
906#endif
406#endif
407 )) {
408 IPMI_UNLOCK(sc);
409 return (EMSGSIZE);
410 }
411 TAILQ_REMOVE(&dev->ipmi_completed_requests, kreq, ir_link);
412 dev->ipmi_requests--;
413 IPMI_UNLOCK(sc);
414 len = min(recv->msg.data_len, len);
415 recv->msg.data_len = len;
416 error = copyout(&addr, recv->addr,sizeof(addr));
417 if (error == 0)
418 error = copyout(&kreq->ir_compcode, recv->msg.data, 1);
419 if (error == 0)
420 error = copyout(kreq->ir_reply, recv->msg.data + 1,
421 len - 1);
422 ipmi_free_request(kreq);
423 if (error)
424 return (error);
425 break;
426 case IPMICTL_SET_MY_ADDRESS_CMD:
427 IPMI_LOCK(sc);
428 dev->ipmi_address = *(int*)data;
429 IPMI_UNLOCK(sc);
430 break;
431 case IPMICTL_GET_MY_ADDRESS_CMD:
432 IPMI_LOCK(sc);
433 *(int*)data = dev->ipmi_address;
434 IPMI_UNLOCK(sc);
435 break;
436 case IPMICTL_SET_MY_LUN_CMD:
437 IPMI_LOCK(sc);
438 dev->ipmi_lun = *(int*)data & 0x3;
439 IPMI_UNLOCK(sc);
440 break;
441 case IPMICTL_GET_MY_LUN_CMD:
442 IPMI_LOCK(sc);
443 *(int*)data = dev->ipmi_lun;
444 IPMI_UNLOCK(sc);
445 break;
446 case IPMICTL_SET_GETS_EVENTS_CMD:
447 /*
448 device_printf(sc->ipmi_dev,
449 "IPMICTL_SET_GETS_EVENTS_CMD NA\n");
450 */
451 break;
452 case IPMICTL_REGISTER_FOR_CMD:
453 case IPMICTL_UNREGISTER_FOR_CMD:
454 return (EOPNOTSUPP);
455 default:
456 device_printf(sc->ipmi_dev, "Unknown IOCTL %lX\n", cmd);
457 return (ENOIOCTL);
458 }
459
460#ifdef IPMICTL_SEND_COMMAND_32
461 /* Update changed fields in 32-bit structures. */
462 switch (cmd) {
463 case IPMICTL_RECEIVE_MSG_TRUNC_32:
464 case IPMICTL_RECEIVE_MSG_32:
465 recv32->recv_type = recv->recv_type;
466 recv32->msgid = recv->msgid;
467 recv32->msg.netfn = recv->msg.netfn;
468 recv32->msg.cmd = recv->msg.cmd;
469 recv32->msg.data_len = recv->msg.data_len;
470 break;
471 }
472#endif
473 return (0);
474}
475
476/*
477 * Request management.
478 */
479
480/* Allocate a new request with request and reply buffers. */
481struct ipmi_request *
482ipmi_alloc_request(struct ipmi_device *dev, long msgid, uint8_t addr,
483 uint8_t command, size_t requestlen, size_t replylen)
484{
485 struct ipmi_request *req;
486
487 req = malloc(sizeof(struct ipmi_request) + requestlen + replylen,
488 M_IPMI, M_WAITOK | M_ZERO);
489 req->ir_owner = dev;
490 req->ir_msgid = msgid;
491 req->ir_addr = addr;
492 req->ir_command = command;
493 if (requestlen) {
494 req->ir_request = (char *)&req[1];
495 req->ir_requestlen = requestlen;
496 }
497 if (replylen) {
498 req->ir_reply = (char *)&req[1] + requestlen;
499 req->ir_replybuflen = replylen;
500 }
501 return (req);
502}
503
504/* Free a request no longer in use. */
505void
506ipmi_free_request(struct ipmi_request *req)
507{
508
509 free(req, M_IPMI);
510}
511
512/* Store a processed request on the appropriate completion queue. */
513void
514ipmi_complete_request(struct ipmi_softc *sc, struct ipmi_request *req)
515{
516 struct ipmi_device *dev;
517
518 IPMI_LOCK_ASSERT(sc);
519
520 /*
521 * Anonymous requests (from inside the driver) always have a
522 * waiter that we awaken.
523 */
524 if (req->ir_owner == NULL)
525 wakeup(req);
526 else {
527 dev = req->ir_owner;
528 TAILQ_INSERT_TAIL(&dev->ipmi_completed_requests, req, ir_link);
529 selwakeup(&dev->ipmi_select);
530 if (dev->ipmi_closing)
531 wakeup(&dev->ipmi_requests);
532 }
533}
534
535/* Enqueue an internal driver request and wait until it is completed. */
536int
537ipmi_submit_driver_request(struct ipmi_softc *sc, struct ipmi_request *req,
538 int timo)
539{
540 int error;
541
542 IPMI_LOCK(sc);
543 error = sc->ipmi_enqueue_request(sc, req);
544 if (error == 0)
545 error = msleep(req, &sc->ipmi_lock, 0, "ipmireq", timo);
546 if (error == 0)
547 error = req->ir_error;
548 IPMI_UNLOCK(sc);
549 return (error);
550}
551
552/*
553 * Helper routine for polled system interfaces that use
554 * ipmi_polled_enqueue_request() to queue requests. This request
555 * waits until there is a pending request and then returns the first
556 * request. If the driver is shutting down, it returns NULL.
557 */
558struct ipmi_request *
559ipmi_dequeue_request(struct ipmi_softc *sc)
560{
561 struct ipmi_request *req;
562
563 IPMI_LOCK_ASSERT(sc);
564
565 while (!sc->ipmi_detaching && TAILQ_EMPTY(&sc->ipmi_pending_requests))
566 cv_wait(&sc->ipmi_request_added, &sc->ipmi_lock);
567 if (sc->ipmi_detaching)
568 return (NULL);
569
570 req = TAILQ_FIRST(&sc->ipmi_pending_requests);
571 TAILQ_REMOVE(&sc->ipmi_pending_requests, req, ir_link);
572 return (req);
573}
574
575/* Default implementation of ipmi_enqueue_request() for polled interfaces. */
576int
577ipmi_polled_enqueue_request(struct ipmi_softc *sc, struct ipmi_request *req)
578{
579
580 IPMI_LOCK_ASSERT(sc);
581
582 TAILQ_INSERT_TAIL(&sc->ipmi_pending_requests, req, ir_link);
583 cv_signal(&sc->ipmi_request_added);
584 return (0);
585}
586
587/*
588 * Watchdog event handler.
589 */
590
591static void
592ipmi_set_watchdog(struct ipmi_softc *sc, int sec)
593{
594 struct ipmi_request *req;
595 int error;
596
597 req = ipmi_alloc_driver_request(IPMI_ADDR(IPMI_APP_REQUEST, 0),
598 IPMI_SET_WDOG, 6, 0);
599
600 if (sec) {
601 req->ir_request[0] = IPMI_SET_WD_TIMER_DONT_STOP
602 | IPMI_SET_WD_TIMER_SMS_OS;
603 req->ir_request[1] = IPMI_SET_WD_ACTION_RESET;
604 req->ir_request[2] = 0;
605 req->ir_request[3] = 0; /* Timer use */
606 req->ir_request[4] = (sec * 10) & 0xff;
607 req->ir_request[5] = (sec * 10) / 2550;
608 } else {
609 req->ir_request[0] = IPMI_SET_WD_TIMER_SMS_OS;
610 req->ir_request[1] = 0;
611 req->ir_request[2] = 0;
612 req->ir_request[3] = 0; /* Timer use */
613 req->ir_request[4] = 0;
614 req->ir_request[5] = 0;
615 }
616
617 error = ipmi_submit_driver_request(sc, req, 0);
618 if (error)
619 device_printf(sc->ipmi_dev, "Failed to set watchdog\n");
620
621 if (error == 0 && sec) {
622 ipmi_free_request(req);
623
624 req = ipmi_alloc_driver_request(IPMI_ADDR(IPMI_APP_REQUEST, 0),
625 IPMI_RESET_WDOG, 0, 0);
626
627 error = ipmi_submit_driver_request(sc, req, 0);
628 if (error)
629 device_printf(sc->ipmi_dev,
630 "Failed to reset watchdog\n");
631 }
632
633 ipmi_free_request(req);
634 /*
635 dump_watchdog(sc);
636 */
637}
638
639static void
640ipmi_wd_event(void *arg, unsigned int cmd, int *error)
641{
642 struct ipmi_softc *sc = arg;
643 unsigned int timeout;
644
645 cmd &= WD_INTERVAL;
646 if (cmd > 0 && cmd <= 63) {
647 timeout = ((uint64_t)1 << cmd) / 1800000000;
648 ipmi_set_watchdog(sc, timeout);
649 *error = 0;
650 } else {
651 ipmi_set_watchdog(sc, 0);
652 }
653}
654
655static void
656ipmi_startup(void *arg)
657{
658 struct ipmi_softc *sc = arg;
659 struct ipmi_request *req;
660 device_t dev;
661 int error, i;
662
663 config_intrhook_disestablish(&sc->ipmi_ich);
664 dev = sc->ipmi_dev;
665
666 /* Initialize interface-independent state. */
667 mtx_init(&sc->ipmi_lock, device_get_nameunit(dev), "ipmi", MTX_DEF);
668 cv_init(&sc->ipmi_request_added, "ipmireq");
669 TAILQ_INIT(&sc->ipmi_pending_requests);
670
671 /* Initialize interface-dependent state. */
672 error = sc->ipmi_startup(sc);
673 if (error) {
674 device_printf(dev, "Failed to initialize interface: %d\n",
675 error);
676 return;
677 }
678
679 /* Send a GET_DEVICE_ID request. */
680 req = ipmi_alloc_driver_request(IPMI_ADDR(IPMI_APP_REQUEST, 0),
681 IPMI_GET_DEVICE_ID, 0, 15);
682
683 error = ipmi_submit_driver_request(sc, req, MAX_TIMEOUT);
684 if (error == EWOULDBLOCK) {
685 device_printf(dev, "Timed out waiting for GET_DEVICE_ID\n");
686 ipmi_free_request(req);
687 return;
688 } else if (error) {
689 device_printf(dev, "Failed GET_DEVICE_ID: %d\n", error);
690 ipmi_free_request(req);
691 return;
692 } else if (req->ir_compcode != 0) {
693 device_printf(dev,
694 "Bad completion code for GET_DEVICE_ID: %d\n",
695 req->ir_compcode);
696 ipmi_free_request(req);
697 return;
698 } else if (req->ir_replylen < 5) {
699 device_printf(dev, "Short reply for GET_DEVICE_ID: %d\n",
700 req->ir_replylen);
701 ipmi_free_request(req);
702 return;
703 }
704
705 device_printf(dev, "IPMI device rev. %d, firmware rev. %d.%d, "
706 "version %d.%d\n",
707 req->ir_reply[1] & 0x0f,
708 req->ir_reply[2] & 0x0f, req->ir_reply[4],
709 req->ir_reply[4] & 0x0f, req->ir_reply[4] >> 4);
710
711 ipmi_free_request(req);
712
713 req = ipmi_alloc_driver_request(IPMI_ADDR(IPMI_APP_REQUEST, 0),
714 IPMI_CLEAR_FLAGS, 1, 0);
715
716 ipmi_submit_driver_request(sc, req, 0);
717
718 /* XXX: Magic numbers */
719 if (req->ir_compcode == 0xc0) {
720 device_printf(dev, "Clear flags is busy\n");
721 }
722 if (req->ir_compcode == 0xc1) {
723 device_printf(dev, "Clear flags illegal\n");
724 }
725 ipmi_free_request(req);
726
727 for (i = 0; i < 8; i++) {
728 req = ipmi_alloc_driver_request(IPMI_ADDR(IPMI_APP_REQUEST, 0),
729 IPMI_GET_CHANNEL_INFO, 1, 0);
730 req->ir_request[0] = i;
731
732 ipmi_submit_driver_request(sc, req, 0);
733
734 if (req->ir_compcode != 0) {
735 ipmi_free_request(req);
736 break;
737 }
738 ipmi_free_request(req);
739 }
740 device_printf(dev, "Number of channels %d\n", i);
741
742 /* probe for watchdog */
743 req = ipmi_alloc_driver_request(IPMI_ADDR(IPMI_APP_REQUEST, 0),
744 IPMI_GET_WDOG, 0, 0);
745
746 ipmi_submit_driver_request(sc, req, 0);
747
748 if (req->ir_compcode == 0x00) {
749 device_printf(dev, "Attached watchdog\n");
750 /* register the watchdog event handler */
751 sc->ipmi_watchdog_tag = EVENTHANDLER_REGISTER(watchdog_list,
752 ipmi_wd_event, sc, 0);
753 }
754 ipmi_free_request(req);
755
756 sc->ipmi_cdev = make_dev(&ipmi_cdevsw, device_get_unit(dev),
757 UID_ROOT, GID_OPERATOR, 0660, "ipmi%d", device_get_unit(dev));
758 if (sc->ipmi_cdev == NULL) {
759 device_printf(dev, "Failed to create cdev\n");
760 return;
761 }
762 sc->ipmi_cdev->si_drv1 = sc;
763}
764
765int
766ipmi_attach(device_t dev)
767{
768 struct ipmi_softc *sc = device_get_softc(dev);
769 int error;
770
771 if (sc->ipmi_irq_res != NULL && sc->ipmi_intr != NULL) {
772 error = bus_setup_intr(dev, sc->ipmi_irq_res, INTR_TYPE_MISC,
773 NULL, sc->ipmi_intr, sc, &sc->ipmi_irq);
774 if (error) {
775 device_printf(dev, "can't set up interrupt\n");
776 return (error);
777 }
778 }
779
780 bzero(&sc->ipmi_ich, sizeof(struct intr_config_hook));
781 sc->ipmi_ich.ich_func = ipmi_startup;
782 sc->ipmi_ich.ich_arg = sc;
783 if (config_intrhook_establish(&sc->ipmi_ich) != 0) {
784 device_printf(dev, "can't establish configuration hook\n");
785 return (ENOMEM);
786 }
787
788 ipmi_attached = 1;
789 return (0);
790}
791
792int
793ipmi_detach(device_t dev)
794{
795 struct ipmi_softc *sc;
796
797 sc = device_get_softc(dev);
798
799 /* Fail if there are any open handles. */
800 IPMI_LOCK(sc);
801 if (sc->ipmi_opened) {
802 IPMI_UNLOCK(sc);
803 return (EBUSY);
804 }
805 IPMI_UNLOCK(sc);
806 if (sc->ipmi_cdev)
807 destroy_dev(sc->ipmi_cdev);
808
809 /* Detach from watchdog handling and turn off watchdog. */
810 if (sc->ipmi_watchdog_tag) {
811 EVENTHANDLER_DEREGISTER(watchdog_list, sc->ipmi_watchdog_tag);
812 ipmi_set_watchdog(sc, 0);
813 }
814
815 /* XXX: should use shutdown callout I think. */
816 /* If the backend uses a kthread, shut it down. */
817 IPMI_LOCK(sc);
818 sc->ipmi_detaching = 1;
819 if (sc->ipmi_kthread) {
820 cv_broadcast(&sc->ipmi_request_added);
821 msleep(sc->ipmi_kthread, &sc->ipmi_lock, 0, "ipmi_wait", 0);
822 }
823 IPMI_UNLOCK(sc);
824 if (sc->ipmi_irq)
825 bus_teardown_intr(dev, sc->ipmi_irq_res, sc->ipmi_irq);
826
827 ipmi_release_resources(dev);
828 mtx_destroy(&sc->ipmi_lock);
829 return (0);
830}
831
832void
833ipmi_release_resources(device_t dev)
834{
835 struct ipmi_softc *sc;
836 int i;
837
838 sc = device_get_softc(dev);
839 if (sc->ipmi_irq)
840 bus_teardown_intr(dev, sc->ipmi_irq_res, sc->ipmi_irq);
841 if (sc->ipmi_irq_res)
842 bus_release_resource(dev, SYS_RES_IRQ, sc->ipmi_irq_rid,
843 sc->ipmi_irq_res);
844 for (i = 0; i < MAX_RES; i++)
845 if (sc->ipmi_io_res[i])
846 bus_release_resource(dev, sc->ipmi_io_type,
847 sc->ipmi_io_rid + i, sc->ipmi_io_res[i]);
848}
849
850devclass_t ipmi_devclass;
851
852/* XXX: Why? */
853static void
854ipmi_unload(void *arg)
855{
856 device_t * devs;
857 int count;
858 int i;
859
860 if (devclass_get_devices(ipmi_devclass, &devs, &count) != 0)
861 return;
862 for (i = 0; i < count; i++)
863 device_delete_child(device_get_parent(devs[i]), devs[i]);
864 free(devs, M_TEMP);
865}
866SYSUNINIT(ipmi_unload, SI_SUB_DRIVERS, SI_ORDER_FIRST, ipmi_unload, NULL);
867
868#ifdef IMPI_DEBUG
869static void
870dump_buf(u_char *data, int len)
871{
872 char buf[20];
873 char line[1024];
874 char temp[30];
875 int count = 0;
876 int i=0;
877
878 printf("Address %p len %d\n", data, len);
879 if (len > 256)
880 len = 256;
881 line[0] = '\000';
882 for (; len > 0; len--, data++) {
883 sprintf(temp, "%02x ", *data);
884 strcat(line, temp);
885 if (*data >= ' ' && *data <= '~')
886 buf[count] = *data;
887 else if (*data >= 'A' && *data <= 'Z')
888 buf[count] = *data;
889 else
890 buf[count] = '.';
891 if (++count == 16) {
892 buf[count] = '\000';
893 count = 0;
894 printf(" %3x %s %s\n", i, line, buf);
895 i+=16;
896 line[0] = '\000';
897 }
898 }
899 buf[count] = '\000';
900
901 for (; count != 16; count++) {
902 strcat(line, " ");
903 }
904 printf(" %3x %s %s\n", i, line, buf);
905}
906#endif