apmdev.c revision 1.8
1/*	$NetBSD: apmdev.c,v 1.8 2006/10/12 21:19:13 uwe Exp $ */
2
3/*-
4 * Copyright (c) 1996, 1997 The NetBSD Foundation, Inc.
5 * All rights reserved.
6 *
7 * This code is derived from software contributed to The NetBSD Foundation
8 * by John Kohl and Christopher G. Demetriou.
9 *
10 * Redistribution and use in source and binary forms, with or without
11 * modification, are permitted provided that the following conditions
12 * are met:
13 * 1. Redistributions of source code must retain the above copyright
14 *    notice, this list of conditions and the following disclaimer.
15 * 2. Redistributions in binary form must reproduce the above copyright
16 *    notice, this list of conditions and the following disclaimer in the
17 *    documentation and/or other materials provided with the distribution.
18 * 3. All advertising materials mentioning features or use of this software
19 *    must display the following acknowledgement:
20 *        This product includes software developed by the NetBSD
21 *        Foundation, Inc. and its contributors.
22 * 4. Neither the name of The NetBSD Foundation nor the names of its
23 *    contributors may be used to endorse or promote products derived
24 *    from this software without specific prior written permission.
25 *
26 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
27 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
28 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
29 * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
30 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
31 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
32 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
33 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
34 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
35 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
36 * POSSIBILITY OF SUCH DAMAGE.
37 */
38/*
39 * from: sys/arch/i386/i386/apm.c,v 1.49 2000/05/08
40 */
41
42#include <sys/cdefs.h>
43__KERNEL_RCSID(0, "$NetBSD: apmdev.c,v 1.8 2006/10/12 21:19:13 uwe Exp $");
44
45#ifdef _KERNEL_OPT
46#include "opt_apmdev.h"
47#endif
48
49#ifdef APM_NOIDLE
50#error APM_NOIDLE option deprecated; use APM_NO_IDLE instead
51#endif
52
53#if defined(DEBUG) && !defined(APMDEBUG)
54#define	APMDEBUG
55#endif
56
57#include <sys/param.h>
58#include <sys/systm.h>
59#include <sys/signalvar.h>
60#include <sys/kernel.h>
61#include <sys/proc.h>
62#include <sys/kthread.h>
63#include <sys/lock.h>
64#include <sys/user.h>
65#include <sys/malloc.h>
66#include <sys/device.h>
67#include <sys/fcntl.h>
68#include <sys/ioctl.h>
69#include <sys/select.h>
70#include <sys/poll.h>
71#include <sys/conf.h>
72
73#include <dev/hpc/apm/apmvar.h>
74
75#include <machine/stdarg.h>
76
77#if defined(APMDEBUG)
78#define	DPRINTF(f, x)		do { if (apmdebug & (f)) printf x; } while (0)
79
80#define	APMDEBUG_INFO		0x01
81#define	APMDEBUG_APMCALLS	0x02
82#define	APMDEBUG_EVENTS		0x04
83#define	APMDEBUG_PROBE		0x10
84#define	APMDEBUG_ATTACH		0x40
85#define	APMDEBUG_DEVICE		0x20
86#define	APMDEBUG_ANOM		0x40
87
88#ifdef APMDEBUG_VALUE
89int	apmdebug = APMDEBUG_VALUE;
90#else
91int	apmdebug = 0;
92#endif
93#else
94#define	DPRINTF(f, x)		/**/
95#endif
96
97#define APM_NEVENTS 16
98
99struct apm_softc {
100	struct device sc_dev;
101	struct selinfo sc_rsel;
102	struct selinfo sc_xsel;
103	int	sc_flags;
104	int	event_count;
105	int	event_ptr;
106	int	sc_power_state;
107	struct proc *sc_thread;
108	struct lock sc_lock;
109	struct apm_event_info event_list[APM_NEVENTS];
110	struct apm_accessops *ops;
111	void *cookie;
112};
113#define	SCFLAG_OREAD	0x0000001
114#define	SCFLAG_OWRITE	0x0000002
115#define	SCFLAG_OPEN	(SCFLAG_OREAD|SCFLAG_OWRITE)
116
117#define	APMUNIT(dev)	(minor(dev)&0xf0)
118#define	APMDEV(dev)	(minor(dev)&0x0f)
119#define APMDEV_NORMAL	0
120#define APMDEV_CTL	8
121
122/*
123 * A brief note on the locking protocol: it's very simple; we
124 * assert an exclusive lock any time thread context enters the
125 * APM module.  This is both the APM thread itself, as well as
126 * user context.
127 */
128#define	APM_LOCK(apmsc)							\
129	(void) lockmgr(&(apmsc)->sc_lock, LK_EXCLUSIVE, NULL)
130#define	APM_UNLOCK(apmsc)						\
131	(void) lockmgr(&(apmsc)->sc_lock, LK_RELEASE, NULL)
132
133static void	apmattach(struct device *, struct device *, void *);
134static int	apmmatch(struct device *, struct cfdata *, void *);
135
136static void	apm_event_handle(struct apm_softc *, u_int, u_int);
137static void	apm_periodic_check(struct apm_softc *);
138static void	apm_create_thread(void *);
139static void	apm_thread(void *);
140static void	apm_perror(const char *, int, ...)
141		    __attribute__((__format__(__printf__,1,3)));
142#ifdef APM_POWER_PRINT
143static void	apm_power_print(struct apm_softc *, struct apm_power_info *);
144#endif
145static int	apm_record_event(struct apm_softc *, u_int);
146static void	apm_set_ver(struct apm_softc *, u_long);
147static void	apm_standby(struct apm_softc *);
148static const char *apm_strerror(int);
149static void	apm_suspend(struct apm_softc *);
150static void	apm_resume(struct apm_softc *, u_int, u_int);
151
152CFATTACH_DECL(apmdev, sizeof(struct apm_softc),
153    apmmatch, apmattach, NULL, NULL);
154
155extern struct cfdriver apmdev_cd;
156
157dev_type_open(apmdevopen);
158dev_type_close(apmdevclose);
159dev_type_ioctl(apmdevioctl);
160dev_type_poll(apmdevpoll);
161dev_type_kqfilter(apmdevkqfilter);
162
163const struct cdevsw apmdev_cdevsw = {
164	apmdevopen, apmdevclose, noread, nowrite, apmdevioctl,
165	nostop, notty, apmdevpoll, nommap, apmdevkqfilter, D_OTHER
166};
167
168/* configurable variables */
169int	apm_bogus_bios = 0;
170#ifdef APM_DISABLE
171int	apm_enabled = 0;
172#else
173int	apm_enabled = 1;
174#endif
175#ifdef APM_NO_IDLE
176int	apm_do_idle = 0;
177#else
178int	apm_do_idle = 1;
179#endif
180#ifdef APM_NO_STANDBY
181int	apm_do_standby = 0;
182#else
183int	apm_do_standby = 1;
184#endif
185#ifdef APM_V10_ONLY
186int	apm_v11_enabled = 0;
187#else
188int	apm_v11_enabled = 1;
189#endif
190#ifdef APM_NO_V12
191int	apm_v12_enabled = 0;
192#else
193int	apm_v12_enabled = 1;
194#endif
195
196/* variables used during operation (XXX cgd) */
197u_char	apm_majver, apm_minver;
198int	apm_inited;
199int	apm_standbys, apm_userstandbys, apm_suspends, apm_battlow;
200int	apm_damn_fool_bios, apm_op_inprog;
201int	apm_evindex;
202
203static int apm_spl;		/* saved spl while suspended */
204
205static const char *
206apm_strerror(int code)
207{
208	switch (code) {
209	case APM_ERR_PM_DISABLED:
210		return ("power management disabled");
211	case APM_ERR_REALALREADY:
212		return ("real mode interface already connected");
213	case APM_ERR_NOTCONN:
214		return ("interface not connected");
215	case APM_ERR_16ALREADY:
216		return ("16-bit interface already connected");
217	case APM_ERR_16NOTSUPP:
218		return ("16-bit interface not supported");
219	case APM_ERR_32ALREADY:
220		return ("32-bit interface already connected");
221	case APM_ERR_32NOTSUPP:
222		return ("32-bit interface not supported");
223	case APM_ERR_UNRECOG_DEV:
224		return ("unrecognized device ID");
225	case APM_ERR_ERANGE:
226		return ("parameter out of range");
227	case APM_ERR_NOTENGAGED:
228		return ("interface not engaged");
229	case APM_ERR_UNABLE:
230		return ("unable to enter requested state");
231	case APM_ERR_NOEVENTS:
232		return ("no pending events");
233	case APM_ERR_NOT_PRESENT:
234		return ("no APM present");
235	default:
236		return ("unknown error code");
237	}
238}
239
240static void
241apm_perror(const char *str, int errinfo, ...) /* XXX cgd */
242{
243	va_list ap;
244
245	printf("APM ");
246
247	va_start(ap, errinfo);
248	vprintf(str, ap);			/* XXX cgd */
249	va_end(ap);
250
251	printf(": %s\n", apm_strerror(errinfo));
252}
253
254#ifdef APM_POWER_PRINT
255static void
256apm_power_print(struct apm_softc *sc, struct apm_power_info *pi)
257{
258
259	if (pi->battery_life != APM_BATT_LIFE_UNKNOWN) {
260		printf("%s: battery life expectancy: %d%%\n",
261		    sc->sc_dev.dv_xname, pi->battery_life);
262	}
263	printf("%s: A/C state: ", sc->sc_dev.dv_xname);
264	switch (pi->ac_state) {
265	case APM_AC_OFF:
266		printf("off\n");
267		break;
268	case APM_AC_ON:
269		printf("on\n");
270		break;
271	case APM_AC_BACKUP:
272		printf("backup power\n");
273		break;
274	default:
275	case APM_AC_UNKNOWN:
276		printf("unknown\n");
277		break;
278	}
279	if (apm_major == 1 && apm_minor == 0) {
280		printf("%s: battery charge state:", sc->sc_dev.dv_xname);
281		switch (pi->battery_state) {
282		case APM_BATT_HIGH:
283			printf("high\n");
284			break;
285		case APM_BATT_LOW:
286			printf("low\n");
287			break;
288		case APM_BATT_CRITICAL:
289			printf("critical\n");
290			break;
291		case APM_BATT_CHARGING:
292			printf("charging\n");
293			break;
294		case APM_BATT_UNKNOWN:
295			printf("unknown\n");
296			break;
297		default:
298			printf("undecoded state %x\n", pi->battery_state);
299			break;
300		}
301	} else {
302		if (pi->battery_state&APM_BATT_FLAG_CHARGING)
303			printf("charging ");
304		}
305		if (pi->battery_state&APM_BATT_FLAG_UNKNOWN)
306			printf("unknown\n");
307		else if (pi->battery_state&APM_BATT_FLAG_CRITICAL)
308			printf("critical\n");
309		else if (pi->battery_state&APM_BATT_FLAG_LOW)
310			printf("low\n");
311		else if (pi->battery_state&APM_BATT_FLAG_HIGH)
312			printf("high\n");
313	}
314	if (pi->minutes_left != 0) {
315		printf("%s: estimated ", sc->sc_dev.dv_xname);
316		printf("%dh ", pi->minutes_left / 60);
317	}
318	return;
319}
320#endif
321
322static void
323apm_suspend(struct apm_softc *sc)
324{
325
326	if (sc->sc_power_state == PWR_SUSPEND) {
327#ifdef APMDEBUG
328		printf("%s: apm_suspend: already suspended?\n",
329		    sc->sc_dev.dv_xname);
330#endif
331		return;
332	}
333	sc->sc_power_state = PWR_SUSPEND;
334
335	dopowerhooks(PWR_SOFTSUSPEND);
336	(void) tsleep(sc, PWAIT, "apmsuspend",  hz/2);
337
338	apm_spl = splhigh();
339
340	dopowerhooks(PWR_SUSPEND);
341
342	/* XXX cgd */
343	(void)sc->ops->set_powstate(sc->cookie, APM_DEV_ALLDEVS, APM_SYS_SUSPEND);
344}
345
346static void
347apm_standby(struct apm_softc *sc)
348{
349
350	if (sc->sc_power_state == PWR_STANDBY) {
351#ifdef APMDEBUG
352		printf("%s: apm_standby: already standing by?\n",
353		    sc->sc_dev.dv_xname);
354#endif
355		return;
356	}
357	sc->sc_power_state = PWR_STANDBY;
358
359	dopowerhooks(PWR_SOFTSTANDBY);
360	(void) tsleep(sc, PWAIT, "apmstandby",  hz/2);
361
362	apm_spl = splhigh();
363
364	dopowerhooks(PWR_STANDBY);
365	/* XXX cgd */
366	(void)sc->ops->set_powstate(sc->cookie, APM_DEV_ALLDEVS, APM_SYS_STANDBY);
367}
368
369static void
370apm_resume(struct apm_softc *sc, u_int event_type, u_int event_info __unused)
371{
372
373	if (sc->sc_power_state == PWR_RESUME) {
374#ifdef APMDEBUG
375		printf("%s: apm_resume: already running?\n",
376		    sc->sc_dev.dv_xname);
377#endif
378		return;
379	}
380	sc->sc_power_state = PWR_RESUME;
381
382	/*
383	 * Some system requires its clock to be initialized after hybernation.
384	 */
385/* XXX
386	initrtclock();
387*/
388
389	inittodr(time_second);
390	dopowerhooks(PWR_RESUME);
391
392	splx(apm_spl);
393
394	dopowerhooks(PWR_SOFTRESUME);
395
396	apm_record_event(sc, event_type);
397}
398
399/*
400 * return 0 if the user will notice and handle the event,
401 * return 1 if the kernel driver should do so.
402 */
403static int
404apm_record_event(struct apm_softc *sc, u_int event_type)
405{
406	struct apm_event_info *evp;
407
408	if ((sc->sc_flags & SCFLAG_OPEN) == 0)
409		return 1;		/* no user waiting */
410	if (sc->event_count == APM_NEVENTS)
411		return 1;			/* overflow */
412	evp = &sc->event_list[sc->event_ptr];
413	sc->event_count++;
414	sc->event_ptr++;
415	sc->event_ptr %= APM_NEVENTS;
416	evp->type = event_type;
417	evp->index = ++apm_evindex;
418	selnotify(&sc->sc_rsel, 0);
419	return (sc->sc_flags & SCFLAG_OWRITE) ? 0 : 1; /* user may handle */
420}
421
422static void
423apm_event_handle(struct apm_softc *sc, u_int event_code, u_int event_info)
424{
425	int error;
426	const char *code;
427	struct apm_power_info pi;
428
429	switch (event_code) {
430	case APM_USER_STANDBY_REQ:
431		DPRINTF(APMDEBUG_EVENTS, ("apmev: user standby request\n"));
432		if (apm_do_standby) {
433			if (apm_record_event(sc, event_code))
434				apm_userstandbys++;
435			apm_op_inprog++;
436			(void)sc->ops->set_powstate(sc->cookie,
437						    APM_DEV_ALLDEVS,
438						    APM_LASTREQ_INPROG);
439		} else {
440			(void)sc->ops->set_powstate(sc->cookie,
441						    APM_DEV_ALLDEVS,
442						    APM_LASTREQ_REJECTED);
443			/* in case BIOS hates being spurned */
444			sc->ops->enable(sc->cookie, 1);
445		}
446		break;
447
448	case APM_STANDBY_REQ:
449		DPRINTF(APMDEBUG_EVENTS, ("apmev: system standby request\n"));
450		if (apm_standbys || apm_suspends) {
451			DPRINTF(APMDEBUG_EVENTS | APMDEBUG_ANOM,
452			    ("damn fool BIOS did not wait for answer\n"));
453			/* just give up the fight */
454			apm_damn_fool_bios = 1;
455		}
456		if (apm_do_standby) {
457			if (apm_record_event(sc, event_code))
458				apm_standbys++;
459			apm_op_inprog++;
460			(void)sc->ops->set_powstate(sc->cookie,
461						    APM_DEV_ALLDEVS,
462						    APM_LASTREQ_INPROG);
463		} else {
464			(void)sc->ops->set_powstate(sc->cookie,
465						    APM_DEV_ALLDEVS,
466						    APM_LASTREQ_REJECTED);
467			/* in case BIOS hates being spurned */
468			sc->ops->enable(sc->cookie, 1);
469		}
470		break;
471
472	case APM_USER_SUSPEND_REQ:
473		DPRINTF(APMDEBUG_EVENTS, ("apmev: user suspend request\n"));
474		if (apm_record_event(sc, event_code))
475			apm_suspends++;
476		apm_op_inprog++;
477		(void)sc->ops->set_powstate(sc->cookie,
478					    APM_DEV_ALLDEVS,
479					    APM_LASTREQ_INPROG);
480		break;
481
482	case APM_SUSPEND_REQ:
483		DPRINTF(APMDEBUG_EVENTS, ("apmev: system suspend request\n"));
484		if (apm_standbys || apm_suspends) {
485			DPRINTF(APMDEBUG_EVENTS | APMDEBUG_ANOM,
486			    ("damn fool BIOS did not wait for answer\n"));
487			/* just give up the fight */
488			apm_damn_fool_bios = 1;
489		}
490		if (apm_record_event(sc, event_code))
491			apm_suspends++;
492		apm_op_inprog++;
493		(void)sc->ops->set_powstate(sc->cookie,
494					    APM_DEV_ALLDEVS,
495					    APM_LASTREQ_INPROG);
496		break;
497
498	case APM_POWER_CHANGE:
499		DPRINTF(APMDEBUG_EVENTS, ("apmev: power status change\n"));
500		error = sc->ops->get_powstat(sc->cookie, &pi);
501#ifdef APM_POWER_PRINT
502		/* only print if nobody is catching events. */
503		if (error == 0 &&
504		    (sc->sc_flags & (SCFLAG_OREAD|SCFLAG_OWRITE)) == 0)
505			apm_power_print(sc, &pi);
506#endif
507		apm_record_event(sc, event_code);
508		break;
509
510	case APM_NORMAL_RESUME:
511		DPRINTF(APMDEBUG_EVENTS, ("apmev: resume system\n"));
512		apm_resume(sc, event_code, event_info);
513		break;
514
515	case APM_CRIT_RESUME:
516		DPRINTF(APMDEBUG_EVENTS, ("apmev: critical resume system"));
517		apm_resume(sc, event_code, event_info);
518		break;
519
520	case APM_SYS_STANDBY_RESUME:
521		DPRINTF(APMDEBUG_EVENTS, ("apmev: system standby resume\n"));
522		apm_resume(sc, event_code, event_info);
523		break;
524
525	case APM_UPDATE_TIME:
526		DPRINTF(APMDEBUG_EVENTS, ("apmev: update time\n"));
527		apm_resume(sc, event_code, event_info);
528		break;
529
530	case APM_CRIT_SUSPEND_REQ:
531		DPRINTF(APMDEBUG_EVENTS, ("apmev: critical system suspend\n"));
532		apm_record_event(sc, event_code);
533		apm_suspend(sc);
534		break;
535
536	case APM_BATTERY_LOW:
537		DPRINTF(APMDEBUG_EVENTS, ("apmev: battery low\n"));
538		apm_battlow++;
539		apm_record_event(sc, event_code);
540		break;
541
542	case APM_CAP_CHANGE:
543		DPRINTF(APMDEBUG_EVENTS, ("apmev: capability change\n"));
544		if (apm_minver < 2) {
545			DPRINTF(APMDEBUG_EVENTS, ("apm: unexpected event\n"));
546		} else {
547			u_int numbatts, capflags;
548			sc->ops->get_capabilities(sc->cookie,
549						  &numbatts, &capflags);
550			sc->ops->get_powstat(sc->cookie, &pi); /* XXX */
551		}
552		break;
553
554	default:
555		switch (event_code >> 8) {
556			case 0:
557				code = "reserved system";
558				break;
559			case 1:
560				code = "reserved device";
561				break;
562			case 2:
563				code = "OEM defined";
564				break;
565			default:
566				code = "reserved";
567				break;
568		}
569		printf("APM: %s event code %x\n", code, event_code);
570	}
571}
572
573static void
574apm_periodic_check(struct apm_softc *sc)
575{
576	int error;
577	u_int event_code, event_info;
578
579
580	/*
581	 * tell the BIOS we're working on it, if asked to do a
582	 * suspend/standby
583	 */
584	if (apm_op_inprog)
585		sc->ops->set_powstate(sc->cookie, APM_DEV_ALLDEVS,
586				      APM_LASTREQ_INPROG);
587
588	while ((error = sc->ops->get_event(sc->cookie, &event_code,
589					   &event_info)) == 0
590	       && !apm_damn_fool_bios)
591		apm_event_handle(sc, event_code, event_info);
592
593	if (error != APM_ERR_NOEVENTS)
594		apm_perror("get event", error);
595	if (apm_suspends) {
596		apm_op_inprog = 0;
597		apm_suspend(sc);
598	} else if (apm_standbys || apm_userstandbys) {
599		apm_op_inprog = 0;
600		apm_standby(sc);
601	}
602	apm_suspends = apm_standbys = apm_battlow = apm_userstandbys = 0;
603	apm_damn_fool_bios = 0;
604}
605
606static void
607apm_set_ver(struct apm_softc *self __unused, u_long detail)
608{
609
610	if (apm_v12_enabled &&
611	    APM_MAJOR_VERS(detail) == 1 &&
612	    APM_MINOR_VERS(detail) == 2) {
613		apm_majver = 1;
614		apm_minver = 2;
615		goto ok;
616	}
617
618	if (apm_v11_enabled &&
619	    APM_MAJOR_VERS(detail) == 1 &&
620	    APM_MINOR_VERS(detail) == 1) {
621		apm_majver = 1;
622		apm_minver = 1;
623	} else {
624		apm_majver = 1;
625		apm_minver = 0;
626	}
627ok:
628	printf("Power Management spec V%d.%d", apm_majver, apm_minver);
629	apm_inited = 1;
630	if (detail & APM_IDLE_SLOWS) {
631#ifdef DIAGNOSTIC
632		/* not relevant often */
633		printf(" (slowidle)");
634#endif
635		/* leave apm_do_idle at its user-configured setting */
636	} else
637		apm_do_idle = 0;
638#ifdef DIAGNOSTIC
639	if (detail & APM_BIOS_PM_DISABLED)
640		printf(" (BIOS mgmt disabled)");
641	if (detail & APM_BIOS_PM_DISENGAGED)
642		printf(" (BIOS managing devices)");
643#endif
644}
645
646static int
647apmmatch(struct device *parent __unused,
648	 struct cfdata *match __unused, void *aux __unused)
649{
650
651	/* There can be only one! */
652	if (apm_inited)
653		return 0;
654
655	return (1);
656}
657
658static void
659apmattach(struct device *parent __unused, struct device *self, void *aux)
660{
661	struct apm_softc *sc = (void *)self;
662	struct apmdev_attach_args *aaa = aux;
663	struct apm_power_info pinfo;
664	u_int numbatts, capflags;
665	int error;
666
667	printf(": ");
668
669	sc->ops = aaa->accessops;
670	sc->cookie = aaa->accesscookie;
671
672	switch ((APM_MAJOR_VERS(aaa->apm_detail) << 8) +
673		APM_MINOR_VERS(aaa->apm_detail)) {
674	case 0x0100:
675		apm_v11_enabled = 0;
676		apm_v12_enabled = 0;
677		break;
678	case 0x0101:
679		apm_v12_enabled = 0;
680		/* fall through */
681	case 0x0102:
682	default:
683		break;
684	}
685
686	apm_set_ver(sc, aaa->apm_detail);	/* prints version info */
687	printf("\n");
688	if (apm_minver >= 2)
689		sc->ops->get_capabilities(sc->cookie, &numbatts, &capflags);
690
691	/*
692	 * enable power management
693	 */
694	sc->ops->enable(sc->cookie, 1);
695
696	error = sc->ops->get_powstat(sc->cookie, &pinfo);
697	if (error == 0) {
698#ifdef APM_POWER_PRINT
699		apm_power_print(apmsc, &pinfo);
700#endif
701	} else
702		apm_perror("get power status", error);
703	sc->ops->cpu_busy(sc->cookie);
704
705	lockinit(&sc->sc_lock, PWAIT, "apmlk", 0, 0);
706
707	/* Initial state is `resumed'. */
708	sc->sc_power_state = PWR_RESUME;
709
710	/* Do an initial check. */
711	apm_periodic_check(sc);
712
713	/*
714	 * Create a kernel thread to periodically check for APM events,
715	 * and notify other subsystems when they occur.
716	 */
717	kthread_create(apm_create_thread, sc);
718
719	return;
720}
721
722/*
723 * Print function (for parent devices).
724 */
725int
726apmprint(void *aux __unused, const char *pnp)
727{
728	if (pnp)
729		aprint_normal("apm at %s", pnp);
730
731	return (UNCONF);
732}
733
734void
735apm_create_thread(void *arg)
736{
737	struct apm_softc *sc = arg;
738
739	if (kthread_create1(apm_thread, sc, &sc->sc_thread,
740			    "%s", sc->sc_dev.dv_xname) == 0)
741		return;
742
743	/*
744	 * We were unable to create the APM thread; bail out.
745	 */
746	sc->ops->disconnect(sc->cookie);
747	printf("%s: unable to create thread, kernel APM support disabled\n",
748	       sc->sc_dev.dv_xname);
749}
750
751void
752apm_thread(void *arg)
753{
754	struct apm_softc *apmsc = arg;
755
756	/*
757	 * Loop forever, doing a periodic check for APM events.
758	 */
759	for (;;) {
760		APM_LOCK(apmsc);
761		apm_periodic_check(apmsc);
762		APM_UNLOCK(apmsc);
763		(void) tsleep(apmsc, PWAIT, "apmev",  (8 * hz) / 7);
764	}
765}
766
767int
768apmdevopen(dev_t dev, int flag, int mode __unused, struct lwp *l __unused)
769{
770	int unit = APMUNIT(dev);
771	int ctl = APMDEV(dev);
772	int error = 0;
773	struct apm_softc *sc;
774
775	if (unit >= apmdev_cd.cd_ndevs)
776		return ENXIO;
777	sc = apmdev_cd.cd_devs[unit];
778	if (!sc)
779		return ENXIO;
780
781	if (!apm_inited)
782		return ENXIO;
783
784	DPRINTF(APMDEBUG_DEVICE,
785	    ("apmopen: pid %d flag %x mode %x\n", l->l_proc->p_pid, flag, mode));
786
787	APM_LOCK(sc);
788	switch (ctl) {
789	case APMDEV_CTL:
790		if (!(flag & FWRITE)) {
791			error = EINVAL;
792			break;
793		}
794		if (sc->sc_flags & SCFLAG_OWRITE) {
795			error = EBUSY;
796			break;
797		}
798		sc->sc_flags |= SCFLAG_OWRITE;
799		break;
800	case APMDEV_NORMAL:
801		if (!(flag & FREAD) || (flag & FWRITE)) {
802			error = EINVAL;
803			break;
804		}
805		sc->sc_flags |= SCFLAG_OREAD;
806		break;
807	default:
808		error = ENXIO;
809		break;
810	}
811	APM_UNLOCK(sc);
812
813	return (error);
814}
815
816int
817apmdevclose(dev_t dev, int flag __unused, int mode __unused,
818	    struct lwp *l __unused)
819{
820	struct apm_softc *sc = apmdev_cd.cd_devs[APMUNIT(dev)];
821	int ctl = APMDEV(dev);
822
823	DPRINTF(APMDEBUG_DEVICE,
824	    ("apmclose: pid %d flag %x mode %x\n", l->l_proc->p_pid, flag, mode));
825
826	APM_LOCK(sc);
827	switch (ctl) {
828	case APMDEV_CTL:
829		sc->sc_flags &= ~SCFLAG_OWRITE;
830		break;
831	case APMDEV_NORMAL:
832		sc->sc_flags &= ~SCFLAG_OREAD;
833		break;
834	}
835	if ((sc->sc_flags & SCFLAG_OPEN) == 0) {
836		sc->event_count = 0;
837		sc->event_ptr = 0;
838	}
839	APM_UNLOCK(sc);
840	return 0;
841}
842
843int
844apmdevioctl(dev_t dev, u_long cmd, caddr_t data, int flag,
845	    struct lwp *l __unused)
846{
847	struct apm_softc *sc = apmdev_cd.cd_devs[APMUNIT(dev)];
848	struct apm_power_info *powerp;
849	struct apm_event_info *evp;
850#if 0
851	struct apm_ctl *actl;
852#endif
853	int i, error = 0;
854	int batt_flags;
855
856	APM_LOCK(sc);
857	switch (cmd) {
858	case APM_IOC_STANDBY:
859		if (!apm_do_standby) {
860			error = EOPNOTSUPP;
861			break;
862		}
863
864		if ((flag & FWRITE) == 0) {
865			error = EBADF;
866			break;
867		}
868		apm_userstandbys++;
869		break;
870
871	case APM_IOC_SUSPEND:
872		if ((flag & FWRITE) == 0) {
873			error = EBADF;
874			break;
875		}
876		apm_suspends++;
877		break;
878
879	case APM_IOC_NEXTEVENT:
880		if (!sc->event_count)
881			error = EAGAIN;
882		else {
883			evp = (struct apm_event_info *)data;
884			i = sc->event_ptr + APM_NEVENTS - sc->event_count;
885			i %= APM_NEVENTS;
886			*evp = sc->event_list[i];
887			sc->event_count--;
888		}
889		break;
890
891	case OAPM_IOC_GETPOWER:
892	case APM_IOC_GETPOWER:
893		powerp = (struct apm_power_info *)data;
894		if ((error = sc->ops->get_powstat(sc->cookie, powerp)) != 0) {
895			apm_perror("ioctl get power status", error);
896			error = EIO;
897			break;
898		}
899		switch (apm_minver) {
900		case 0:
901			break;
902		case 1:
903		default:
904			batt_flags = powerp->battery_state;
905			powerp->battery_state = APM_BATT_UNKNOWN;
906			if (batt_flags & APM_BATT_FLAG_HIGH)
907				powerp->battery_state = APM_BATT_HIGH;
908			else if (batt_flags & APM_BATT_FLAG_LOW)
909				powerp->battery_state = APM_BATT_LOW;
910			else if (batt_flags & APM_BATT_FLAG_CRITICAL)
911				powerp->battery_state = APM_BATT_CRITICAL;
912			else if (batt_flags & APM_BATT_FLAG_CHARGING)
913				powerp->battery_state = APM_BATT_CHARGING;
914			else if (batt_flags & APM_BATT_FLAG_NO_SYSTEM_BATTERY)
915				powerp->battery_state = APM_BATT_ABSENT;
916			break;
917		}
918		break;
919
920	default:
921		error = ENOTTY;
922	}
923	APM_UNLOCK(sc);
924
925	return (error);
926}
927
928int
929apmdevpoll(dev_t dev, int events, struct lwp *l)
930{
931	struct apm_softc *sc = apmdev_cd.cd_devs[APMUNIT(dev)];
932	int revents = 0;
933
934	APM_LOCK(sc);
935	if (events & (POLLIN | POLLRDNORM)) {
936		if (sc->event_count)
937			revents |= events & (POLLIN | POLLRDNORM);
938		else
939			selrecord(l, &sc->sc_rsel);
940	}
941	APM_UNLOCK(sc);
942
943	return (revents);
944}
945
946static void
947filt_apmrdetach(struct knote *kn)
948{
949	struct apm_softc *sc = kn->kn_hook;
950
951	APM_LOCK(sc);
952	SLIST_REMOVE(&sc->sc_rsel.sel_klist, kn, knote, kn_selnext);
953	APM_UNLOCK(sc);
954}
955
956static int
957filt_apmread(struct knote *kn, long hint __unused)
958{
959	struct apm_softc *sc = kn->kn_hook;
960
961	kn->kn_data = sc->event_count;
962	return (kn->kn_data > 0);
963}
964
965static const struct filterops apmread_filtops =
966	{ 1, NULL, filt_apmrdetach, filt_apmread };
967
968int
969apmdevkqfilter(dev_t dev, struct knote *kn)
970{
971	struct apm_softc *sc = apmdev_cd.cd_devs[APMUNIT(dev)];
972	struct klist *klist;
973
974	switch (kn->kn_filter) {
975	case EVFILT_READ:
976		klist = &sc->sc_rsel.sel_klist;
977		kn->kn_fop = &apmread_filtops;
978		break;
979
980	default:
981		return (1);
982	}
983
984	kn->kn_hook = sc;
985
986	APM_LOCK(sc);
987	SLIST_INSERT_HEAD(klist, kn, kn_selnext);
988	APM_UNLOCK(sc);
989
990	return (0);
991}
992