1/*	$NetBSD: apmdev.c,v 1.24 2009/11/23 02:13:45 rmind Exp $ */
2
3/*-
4 * Copyright (c) 1996, 1997 The NetBSD Foundation, Inc.
5 * All rights reserved.
6 *
7 * This code is derived from software contributed to The NetBSD Foundation
8 * by John Kohl and Christopher G. Demetriou.
9 *
10 * Redistribution and use in source and binary forms, with or without
11 * modification, are permitted provided that the following conditions
12 * are met:
13 * 1. Redistributions of source code must retain the above copyright
14 *    notice, this list of conditions and the following disclaimer.
15 * 2. Redistributions in binary form must reproduce the above copyright
16 *    notice, this list of conditions and the following disclaimer in the
17 *    documentation and/or other materials provided with the distribution.
18 *
19 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
20 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
21 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
22 * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
23 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
24 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
25 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
26 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
27 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
28 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
29 * POSSIBILITY OF SUCH DAMAGE.
30 */
31/*
32 * from: sys/arch/i386/i386/apm.c,v 1.49 2000/05/08
33 */
34
35#include <sys/cdefs.h>
36__KERNEL_RCSID(0, "$NetBSD: apmdev.c,v 1.24 2009/11/23 02:13:45 rmind Exp $");
37
38#ifdef _KERNEL_OPT
39#include "opt_apm.h"
40#endif
41
42#ifdef APM_NOIDLE
43#error APM_NOIDLE option deprecated; use APM_NO_IDLE instead
44#endif
45
46#if defined(DEBUG) && !defined(APMDEBUG)
47#define	APMDEBUG
48#endif
49
50#include <sys/param.h>
51#include <sys/systm.h>
52#include <sys/signalvar.h>
53#include <sys/kernel.h>
54#include <sys/proc.h>
55#include <sys/kthread.h>
56#include <sys/malloc.h>
57#include <sys/device.h>
58#include <sys/fcntl.h>
59#include <sys/ioctl.h>
60#include <sys/select.h>
61#include <sys/poll.h>
62#include <sys/conf.h>
63
64#include <dev/hpc/apm/apmvar.h>
65
66#ifdef APMDEBUG
67#define DPRINTF(f, x)		do { if (apmdebug & (f)) printf x; } while (0)
68
69
70#ifdef APMDEBUG_VALUE
71int	apmdebug = APMDEBUG_VALUE;
72#else
73int	apmdebug = 0;
74#endif /* APMDEBUG_VALUE */
75
76#else
77#define	DPRINTF(f, x)		/**/
78#endif /* APMDEBUG */
79
80#define	SCFLAG_OREAD	0x0000001
81#define	SCFLAG_OWRITE	0x0000002
82#define	SCFLAG_OPEN	(SCFLAG_OREAD|SCFLAG_OWRITE)
83
84#define	APMUNIT(dev)	(minor(dev)&0xf0)
85#define	APM(dev)	(minor(dev)&0x0f)
86#define APM_NORMAL	0
87#define APM_CTL	8
88
89/*
90 * A brief note on the locking protocol: it's very simple; we
91 * assert an exclusive lock any time thread context enters the
92 * APM module.  This is both the APM thread itself, as well as
93 * user context.
94 */
95#define	APM_LOCK(apmsc)						\
96	(void) mutex_enter(&(apmsc)->sc_lock)
97#define	APM_UNLOCK(apmsc)						\
98	(void) mutex_exit(&(apmsc)->sc_lock)
99
100static void	apmdevattach(device_t, device_t, void *);
101static int	apmdevmatch(device_t, cfdata_t, void *);
102
103static void	apm_event_handle(struct apm_softc *, u_int, u_int);
104static void	apm_periodic_check(struct apm_softc *);
105static void	apm_thread(void *);
106static void	apm_perror(const char *, int, ...)
107		    __attribute__((__format__(__printf__,1,3)));
108#ifdef APM_POWER_PRINT
109static void	apm_power_print(struct apm_softc *, struct apm_power_info *);
110#endif
111static int	apm_record_event(struct apm_softc *, u_int);
112static void	apm_set_ver(struct apm_softc *);
113static void	apm_standby(struct apm_softc *);
114static void	apm_suspend(struct apm_softc *);
115static void	apm_resume(struct apm_softc *, u_int, u_int);
116
117CFATTACH_DECL_NEW(apmdev, sizeof(struct apm_softc),
118    apmdevmatch, apmdevattach, NULL, NULL);
119
120extern struct cfdriver apmdev_cd;
121
122dev_type_open(apmdevopen);
123dev_type_close(apmdevclose);
124dev_type_ioctl(apmdevioctl);
125dev_type_poll(apmdevpoll);
126dev_type_kqfilter(apmdevkqfilter);
127
128const struct cdevsw apmdev_cdevsw = {
129	apmdevopen, apmdevclose, noread, nowrite, apmdevioctl,
130	nostop, notty, apmdevpoll, nommap, apmdevkqfilter, D_OTHER
131};
132
133/* configurable variables */
134int	apm_bogus_bios = 0;
135#ifdef APM_DISABLE
136int	apm_enabled = 0;
137#else
138int	apm_enabled = 1;
139#endif
140#ifdef APM_NO_IDLE
141int	apm_do_idle = 0;
142#else
143int	apm_do_idle = 1;
144#endif
145#ifdef APM_NO_STANDBY
146int	apm_do_standby = 0;
147#else
148int	apm_do_standby = 1;
149#endif
150#ifdef APM_V10_ONLY
151int	apm_v11_enabled = 0;
152#else
153int	apm_v11_enabled = 1;
154#endif
155#ifdef APM_NO_V12
156int	apm_v12_enabled = 0;
157#else
158int	apm_v12_enabled = 1;
159#endif
160
161/* variables used during operation (XXX cgd) */
162u_char	apm_majver, apm_minver;
163int	apm_inited;
164int	apm_standbys, apm_userstandbys, apm_suspends, apm_battlow;
165int	apm_damn_fool_bios, apm_op_inprog;
166int	apm_evindex;
167
168static int apm_spl;		/* saved spl while suspended */
169
170const char *
171apm_strerror(int code)
172{
173	switch (code) {
174	case APM_ERR_PM_DISABLED:
175		return ("power management disabled");
176	case APM_ERR_REALALREADY:
177		return ("real mode interface already connected");
178	case APM_ERR_NOTCONN:
179		return ("interface not connected");
180	case APM_ERR_16ALREADY:
181		return ("16-bit interface already connected");
182	case APM_ERR_16NOTSUPP:
183		return ("16-bit interface not supported");
184	case APM_ERR_32ALREADY:
185		return ("32-bit interface already connected");
186	case APM_ERR_32NOTSUPP:
187		return ("32-bit interface not supported");
188	case APM_ERR_UNRECOG_DEV:
189		return ("unrecognized device ID");
190	case APM_ERR_ERANGE:
191		return ("parameter out of range");
192	case APM_ERR_NOTENGAGED:
193		return ("interface not engaged");
194	case APM_ERR_UNABLE:
195		return ("unable to enter requested state");
196	case APM_ERR_NOEVENTS:
197		return ("no pending events");
198	case APM_ERR_NOT_PRESENT:
199		return ("no APM present");
200	default:
201		return ("unknown error code");
202	}
203}
204
205static void
206apm_perror(const char *str, int errinfo, ...) /* XXX cgd */
207{
208	va_list ap;
209
210	printf("APM ");
211
212	va_start(ap, errinfo);
213	vprintf(str, ap);			/* XXX cgd */
214	va_end(ap);
215
216	printf(": %s\n", apm_strerror(errinfo));
217}
218
219#ifdef APM_POWER_PRINT
220static void
221apm_power_print(struct apm_softc *sc, struct apm_power_info *pi)
222{
223
224	if (pi->battery_life != APM_BATT_LIFE_UNKNOWN) {
225		aprint_normal_dev(sc->sc_dev,
226		    "battery life expectancy: %d%%\n",
227		    pi->battery_life);
228	}
229	aprint_normal_dev(sc->sc_dev, "A/C state: ");
230	switch (pi->ac_state) {
231	case APM_AC_OFF:
232		printf("off\n");
233		break;
234	case APM_AC_ON:
235		printf("on\n");
236		break;
237	case APM_AC_BACKUP:
238		printf("backup power\n");
239		break;
240	default:
241	case APM_AC_UNKNOWN:
242		printf("unknown\n");
243		break;
244	}
245	aprint_normal_dev(sc->sc_dev, "battery charge state:");
246	if (apm_minver == 0)
247		switch (pi->battery_state) {
248		case APM_BATT_HIGH:
249			printf("high\n");
250			break;
251		case APM_BATT_LOW:
252			printf("low\n");
253			break;
254		case APM_BATT_CRITICAL:
255			printf("critical\n");
256			break;
257		case APM_BATT_CHARGING:
258			printf("charging\n");
259			break;
260		case APM_BATT_UNKNOWN:
261			printf("unknown\n");
262			break;
263		default:
264			printf("undecoded state %x\n", pi->battery_state);
265			break;
266		}
267	else if (apm_minver >= 1) {
268		if (pi->battery_flags & APM_BATT_FLAG_NO_SYSTEM_BATTERY)
269			printf(" no battery");
270		else {
271			if (pi->battery_flags & APM_BATT_FLAG_HIGH)
272				printf(" high");
273			if (pi->battery_flags & APM_BATT_FLAG_LOW)
274				printf(" low");
275			if (pi->battery_flags & APM_BATT_FLAG_CRITICAL)
276				printf(" critical");
277			if (pi->battery_flags & APM_BATT_FLAG_CHARGING)
278				printf(" charging");
279		}
280		printf("\n");
281		if (pi->minutes_valid) {
282			aprint_normal_dev(sc->sc_dev, "estimated ");
283			if (pi->minutes_left / 60)
284				printf("%dh ", pi->minutes_left / 60);
285			printf("%dm\n", pi->minutes_left % 60);
286		}
287	}
288	return;
289}
290#endif
291
292static void
293apm_suspend(struct apm_softc *sc)
294{
295	int error;
296
297	if (sc->sc_power_state == PWR_SUSPEND) {
298#ifdef APMDEBUG
299		aprint_debug_dev(sc->sc_dev,
300		    "apm_suspend: already suspended?\n");
301#endif
302		return;
303	}
304	sc->sc_power_state = PWR_SUSPEND;
305
306	dopowerhooks(PWR_SOFTSUSPEND);
307	(void) tsleep(sc, PWAIT, "apmsuspend",  hz/2);
308
309	apm_spl = splhigh();
310
311	dopowerhooks(PWR_SUSPEND);
312
313	error = (*sc->sc_ops->aa_set_powstate)(sc->sc_cookie, APM_DEV_ALLDEVS,
314	    APM_SYS_SUSPEND);
315
316	if (error)
317		apm_resume(sc, 0, 0);
318}
319
320static void
321apm_standby(struct apm_softc *sc)
322{
323	int error;
324
325	if (sc->sc_power_state == PWR_STANDBY) {
326#ifdef APMDEBUG
327		aprint_debug_dev(sc->sc_dev,
328		    "apm_standby: already standing by?\n");
329#endif
330		return;
331	}
332	sc->sc_power_state = PWR_STANDBY;
333
334	dopowerhooks(PWR_SOFTSTANDBY);
335	(void) tsleep(sc, PWAIT, "apmstandby",  hz/2);
336
337	apm_spl = splhigh();
338
339	dopowerhooks(PWR_STANDBY);
340
341	error = (*sc->sc_ops->aa_set_powstate)(sc->sc_cookie, APM_DEV_ALLDEVS,
342	    APM_SYS_STANDBY);
343	if (error)
344		apm_resume(sc, 0, 0);
345}
346
347static void
348apm_resume(struct apm_softc *sc, u_int event_type, u_int event_info)
349{
350
351	if (sc->sc_power_state == PWR_RESUME) {
352#ifdef APMDEBUG
353		aprint_debug_dev(sc->sc_dev, "apm_resume: already running?\n");
354#endif
355		return;
356	}
357	sc->sc_power_state = PWR_RESUME;
358
359#if 0 /* XXX: def TIME_FREQ */
360	/*
361	 * Some system requires its clock to be initialized after hybernation.
362	 */
363	initrtclock(TIMER_FREQ);
364#endif
365
366	inittodr(time_second);
367	dopowerhooks(PWR_RESUME);
368
369	splx(apm_spl);
370
371	dopowerhooks(PWR_SOFTRESUME);
372
373	apm_record_event(sc, event_type);
374}
375
376/*
377 * return 0 if the user will notice and handle the event,
378 * return 1 if the kernel driver should do so.
379 */
380static int
381apm_record_event(struct apm_softc *sc, u_int event_type)
382{
383	struct apm_event_info *evp;
384
385	if ((sc->sc_flags & SCFLAG_OPEN) == 0)
386		return 1;		/* no user waiting */
387	if (sc->sc_event_count == APM_NEVENTS)
388		return 1;			/* overflow */
389	evp = &sc->sc_event_list[sc->sc_event_ptr];
390	sc->sc_event_count++;
391	sc->sc_event_ptr++;
392	sc->sc_event_ptr %= APM_NEVENTS;
393	evp->type = event_type;
394	evp->index = ++apm_evindex;
395	selnotify(&sc->sc_rsel, 0, 0);
396	return (sc->sc_flags & SCFLAG_OWRITE) ? 0 : 1; /* user may handle */
397}
398
399static void
400apm_event_handle(struct apm_softc *sc, u_int event_code, u_int event_info)
401{
402	int error;
403	const char *code;
404	struct apm_power_info pi;
405
406	switch (event_code) {
407	case APM_USER_STANDBY_REQ:
408		DPRINTF(APMDEBUG_EVENTS, ("apmev: user standby request\n"));
409		if (apm_do_standby) {
410			if (apm_op_inprog == 0 && apm_record_event(sc, event_code))
411				apm_userstandbys++;
412			apm_op_inprog++;
413			(void)(*sc->sc_ops->aa_set_powstate)(sc->sc_cookie,
414			    APM_DEV_ALLDEVS, APM_LASTREQ_INPROG);
415		} else {
416			(void)(*sc->sc_ops->aa_set_powstate)(sc->sc_cookie,
417			    APM_DEV_ALLDEVS, APM_LASTREQ_REJECTED);
418			/* in case BIOS hates being spurned */
419			(*sc->sc_ops->aa_enable)(sc->sc_cookie, 1);
420		}
421		break;
422
423	case APM_STANDBY_REQ:
424		DPRINTF(APMDEBUG_EVENTS, ("apmev: system standby request\n"));
425		if (apm_standbys || apm_suspends) {
426			DPRINTF(APMDEBUG_EVENTS | APMDEBUG_ANOM,
427			    ("damn fool BIOS did not wait for answer\n"));
428			/* just give up the fight */
429			apm_damn_fool_bios = 1;
430		}
431		if (apm_do_standby) {
432			if (apm_op_inprog == 0 &&
433			    apm_record_event(sc, event_code))
434				apm_standbys++;
435			apm_op_inprog++;
436			(void)(*sc->sc_ops->aa_set_powstate)(sc->sc_cookie,
437			    APM_DEV_ALLDEVS, APM_LASTREQ_INPROG);
438		} else {
439			(void)(*sc->sc_ops->aa_set_powstate)(sc->sc_cookie,
440			    APM_DEV_ALLDEVS, APM_LASTREQ_REJECTED);
441			/* in case BIOS hates being spurned */
442			(*sc->sc_ops->aa_enable)(sc->sc_cookie, 1);
443		}
444		break;
445
446	case APM_USER_SUSPEND_REQ:
447		DPRINTF(APMDEBUG_EVENTS, ("apmev: user suspend request\n"));
448		if (apm_op_inprog == 0 && apm_record_event(sc, event_code))
449			apm_suspends++;
450		apm_op_inprog++;
451		(void)(*sc->sc_ops->aa_set_powstate)(sc->sc_cookie,
452		    APM_DEV_ALLDEVS, APM_LASTREQ_INPROG);
453		break;
454
455	case APM_SUSPEND_REQ:
456		DPRINTF(APMDEBUG_EVENTS, ("apmev: system suspend request\n"));
457		if (apm_standbys || apm_suspends) {
458			DPRINTF(APMDEBUG_EVENTS | APMDEBUG_ANOM,
459			    ("damn fool BIOS did not wait for answer\n"));
460			/* just give up the fight */
461			apm_damn_fool_bios = 1;
462		}
463		if (apm_op_inprog == 0 && apm_record_event(sc, event_code))
464			apm_suspends++;
465		apm_op_inprog++;
466		(void)(*sc->sc_ops->aa_set_powstate)(sc->sc_cookie,
467		    APM_DEV_ALLDEVS, APM_LASTREQ_INPROG);
468		break;
469
470	case APM_POWER_CHANGE:
471		DPRINTF(APMDEBUG_EVENTS, ("apmev: power status change\n"));
472		error = (*sc->sc_ops->aa_get_powstat)(sc->sc_cookie, 0, &pi);
473#ifdef APM_POWER_PRINT
474		/* only print if nobody is catching events. */
475		if (error == 0 &&
476		    (sc->sc_flags & (SCFLAG_OREAD|SCFLAG_OWRITE)) == 0)
477			apm_power_print(sc, &pi);
478#endif
479		apm_record_event(sc, event_code);
480		break;
481
482	case APM_NORMAL_RESUME:
483		DPRINTF(APMDEBUG_EVENTS, ("apmev: resume system\n"));
484		apm_resume(sc, event_code, event_info);
485		break;
486
487	case APM_CRIT_RESUME:
488		DPRINTF(APMDEBUG_EVENTS, ("apmev: critical resume system"));
489		apm_resume(sc, event_code, event_info);
490		break;
491
492	case APM_SYS_STANDBY_RESUME:
493		DPRINTF(APMDEBUG_EVENTS, ("apmev: system standby resume\n"));
494		apm_resume(sc, event_code, event_info);
495		break;
496
497	case APM_UPDATE_TIME:
498		DPRINTF(APMDEBUG_EVENTS, ("apmev: update time\n"));
499		apm_resume(sc, event_code, event_info);
500		break;
501
502	case APM_CRIT_SUSPEND_REQ:
503		DPRINTF(APMDEBUG_EVENTS, ("apmev: critical system suspend\n"));
504		apm_record_event(sc, event_code);
505		apm_suspend(sc);
506		break;
507
508	case APM_BATTERY_LOW:
509		DPRINTF(APMDEBUG_EVENTS, ("apmev: battery low\n"));
510		apm_battlow++;
511		apm_record_event(sc, event_code);
512		break;
513
514	case APM_CAP_CHANGE:
515		DPRINTF(APMDEBUG_EVENTS, ("apmev: capability change\n"));
516		if (apm_minver < 2) {
517			DPRINTF(APMDEBUG_EVENTS, ("apm: unexpected event\n"));
518		} else {
519			u_int numbatts, capflags;
520			(*sc->sc_ops->aa_get_capabilities)(sc->sc_cookie,
521			    &numbatts, &capflags);
522			(*sc->sc_ops->aa_get_powstat)(sc->sc_cookie, 0, &pi);
523		}
524		break;
525
526	default:
527		switch (event_code >> 8) {
528			case 0:
529				code = "reserved system";
530				break;
531			case 1:
532				code = "reserved device";
533				break;
534			case 2:
535				code = "OEM defined";
536				break;
537			default:
538				code = "reserved";
539				break;
540		}
541		printf("APM: %s event code %x\n", code, event_code);
542	}
543}
544
545static void
546apm_periodic_check(struct apm_softc *sc)
547{
548	int error;
549	u_int event_code, event_info;
550
551
552	/*
553	 * tell the BIOS we're working on it, if asked to do a
554	 * suspend/standby
555	 */
556	if (apm_op_inprog)
557		(*sc->sc_ops->aa_set_powstate)(sc->sc_cookie, APM_DEV_ALLDEVS,
558		    APM_LASTREQ_INPROG);
559
560	while ((error = (*sc->sc_ops->aa_get_event)(sc->sc_cookie, &event_code,
561	    &event_info)) == 0 && !apm_damn_fool_bios)
562		apm_event_handle(sc, event_code, event_info);
563
564	if (error != APM_ERR_NOEVENTS)
565		apm_perror("get event", error);
566	if (apm_suspends) {
567		apm_op_inprog = 0;
568		apm_suspend(sc);
569	} else if (apm_standbys || apm_userstandbys) {
570		apm_op_inprog = 0;
571		apm_standby(sc);
572	}
573	apm_suspends = apm_standbys = apm_battlow = apm_userstandbys = 0;
574	apm_damn_fool_bios = 0;
575}
576
577static void
578apm_set_ver(struct apm_softc *sc)
579{
580
581	if (apm_v12_enabled &&
582	    APM_MAJOR_VERS(sc->sc_vers) == 1 &&
583	    APM_MINOR_VERS(sc->sc_vers) == 2) {
584		apm_majver = 1;
585		apm_minver = 2;
586		goto ok;
587	}
588
589	if (apm_v11_enabled &&
590	    APM_MAJOR_VERS(sc->sc_vers) == 1 &&
591	    APM_MINOR_VERS(sc->sc_vers) == 1) {
592		apm_majver = 1;
593		apm_minver = 1;
594	} else {
595		apm_majver = 1;
596		apm_minver = 0;
597	}
598ok:
599	aprint_normal("Power Management spec V%d.%d", apm_majver, apm_minver);
600	apm_inited = 1;
601	if (sc->sc_detail & APM_IDLE_SLOWS) {
602#ifdef DIAGNOSTIC
603		/* not relevant often */
604		aprint_normal(" (slowidle)");
605#endif
606		/* leave apm_do_idle at its user-configured setting */
607	} else
608		apm_do_idle = 0;
609#ifdef DIAGNOSTIC
610	if (sc->sc_detail & APM_BIOS_PM_DISABLED)
611		aprint_normal(" (BIOS mgmt disabled)");
612	if (sc->sc_detail & APM_BIOS_PM_DISENGAGED)
613		aprint_normal(" (BIOS managing devices)");
614#endif
615}
616
617static int
618apmdevmatch(device_t parent, cfdata_t match, void *aux)
619{
620
621	return apm_match();
622}
623
624static void
625apmdevattach(device_t parent, device_t self, void *aux)
626{
627	struct apm_softc *sc;
628	struct apmdev_attach_args *aaa = aux;
629
630	sc = device_private(self);
631	sc->sc_dev = self;
632
633	sc->sc_detail = aaa->apm_detail;
634	sc->sc_vers = aaa->apm_detail & 0xffff; /* XXX: magic */
635
636	sc->sc_ops = aaa->accessops;
637	sc->sc_cookie = aaa->accesscookie;
638
639	apm_attach(sc);
640}
641
642/*
643 * Print function (for parent devices).
644 */
645int
646apmprint(void *aux, const char *pnp)
647{
648	if (pnp)
649		aprint_normal("apm at %s", pnp);
650
651	return (UNCONF);
652}
653
654int
655apm_match(void)
656{
657	static int got;
658	return !got++;
659}
660
661void
662apm_attach(struct apm_softc *sc)
663{
664	struct apm_power_info pinfo;
665	u_int numbatts, capflags;
666	int error;
667
668	aprint_naive("\n");
669	aprint_normal(": ");
670
671	switch ((APM_MAJOR_VERS(sc->sc_vers) << 8) + APM_MINOR_VERS(sc->sc_vers)) {
672	case 0x0100:
673		apm_v11_enabled = 0;
674		apm_v12_enabled = 0;
675		break;
676	case 0x0101:
677		apm_v12_enabled = 0;
678		/* fall through */
679	case 0x0102:
680	default:
681		break;
682	}
683
684	apm_set_ver(sc);	/* prints version info */
685	aprint_normal("\n");
686	if (apm_minver >= 2)
687		(*sc->sc_ops->aa_get_capabilities)(sc->sc_cookie, &numbatts,
688		    &capflags);
689
690	/*
691	 * enable power management
692	 */
693	(*sc->sc_ops->aa_enable)(sc->sc_cookie, 1);
694
695	error = (*sc->sc_ops->aa_get_powstat)(sc->sc_cookie, 0, &pinfo);
696	if (error == 0) {
697#ifdef APM_POWER_PRINT
698		apm_power_print(sc, &pinfo);
699#endif
700	} else
701		apm_perror("get power status", error);
702
703	if (sc->sc_ops->aa_cpu_busy)
704		(*sc->sc_ops->aa_cpu_busy)(sc->sc_cookie);
705
706	mutex_init(&sc->sc_lock, MUTEX_DEFAULT, IPL_NONE);
707
708	/* Initial state is `resumed'. */
709	sc->sc_power_state = PWR_RESUME;
710	selinit(&sc->sc_rsel);
711	selinit(&sc->sc_xsel);
712
713	/* Do an initial check. */
714	apm_periodic_check(sc);
715
716	/*
717	 * Create a kernel thread to periodically check for APM events,
718	 * and notify other subsystems when they occur.
719	 */
720	if (kthread_create(PRI_NONE, 0, NULL, apm_thread, sc,
721	    &sc->sc_thread, "%s", device_xname(sc->sc_dev)) != 0) {
722		/*
723		 * We were unable to create the APM thread; bail out.
724		 */
725		if (sc->sc_ops->aa_disconnect)
726			(*sc->sc_ops->aa_disconnect)(sc->sc_cookie);
727		aprint_error_dev(sc->sc_dev, "unable to create thread, "
728		    "kernel APM support disabled\n");
729	}
730}
731
732void
733apm_thread(void *arg)
734{
735	struct apm_softc *apmsc = arg;
736
737	/*
738	 * Loop forever, doing a periodic check for APM events.
739	 */
740	for (;;) {
741		APM_LOCK(apmsc);
742		apm_periodic_check(apmsc);
743		APM_UNLOCK(apmsc);
744		(void) tsleep(apmsc, PWAIT, "apmev",  (8 * hz) / 7);
745	}
746}
747
748int
749apmdevopen(dev_t dev, int flag, int mode, struct lwp *l)
750{
751	int ctl = APM(dev);
752	int error = 0;
753	struct apm_softc *sc;
754
755	sc = device_lookup_private(&apmdev_cd, APMUNIT(dev));
756	if (!sc)
757		return ENXIO;
758
759	if (!apm_inited)
760		return ENXIO;
761
762	DPRINTF(APMDEBUG_DEVICE,
763	    ("apmopen: pid %d flag %x mode %x\n", l->l_proc->p_pid, flag, mode));
764
765	APM_LOCK(sc);
766	switch (ctl) {
767	case APM_CTL:
768		if (!(flag & FWRITE)) {
769			error = EINVAL;
770			break;
771		}
772		if (sc->sc_flags & SCFLAG_OWRITE) {
773			error = EBUSY;
774			break;
775		}
776		sc->sc_flags |= SCFLAG_OWRITE;
777		break;
778	case APM_NORMAL:
779		if (!(flag & FREAD) || (flag & FWRITE)) {
780			error = EINVAL;
781			break;
782		}
783		sc->sc_flags |= SCFLAG_OREAD;
784		break;
785	default:
786		error = ENXIO;
787		break;
788	}
789	APM_UNLOCK(sc);
790
791	return (error);
792}
793
794int
795apmdevclose(dev_t dev, int flag, int mode,
796	    struct lwp *l)
797{
798	struct apm_softc *sc = device_lookup_private(&apmdev_cd, APMUNIT(dev));
799	int ctl = APM(dev);
800
801	DPRINTF(APMDEBUG_DEVICE,
802	    ("apmclose: pid %d flag %x mode %x\n", l->l_proc->p_pid, flag, mode));
803
804	APM_LOCK(sc);
805	switch (ctl) {
806	case APM_CTL:
807		sc->sc_flags &= ~SCFLAG_OWRITE;
808		break;
809	case APM_NORMAL:
810		sc->sc_flags &= ~SCFLAG_OREAD;
811		break;
812	}
813	if ((sc->sc_flags & SCFLAG_OPEN) == 0) {
814		sc->sc_event_count = 0;
815		sc->sc_event_ptr = 0;
816	}
817	APM_UNLOCK(sc);
818	return 0;
819}
820
821int
822apmdevioctl(dev_t dev, u_long cmd, void *data, int flag,
823	    struct lwp *l)
824{
825	struct apm_softc *sc = device_lookup_private(&apmdev_cd, APMUNIT(dev));
826	struct apm_power_info *powerp;
827	struct apm_event_info *evp;
828#if 0
829	struct apm_ctl *actl;
830#endif
831	int i, error = 0;
832	int batt_flags;
833
834	APM_LOCK(sc);
835	switch (cmd) {
836	case APM_IOC_STANDBY:
837		if (!apm_do_standby) {
838			error = EOPNOTSUPP;
839			break;
840		}
841
842		if ((flag & FWRITE) == 0) {
843			error = EBADF;
844			break;
845		}
846		apm_userstandbys++;
847		break;
848
849	case APM_IOC_SUSPEND:
850		if ((flag & FWRITE) == 0) {
851			error = EBADF;
852			break;
853		}
854		apm_suspends++;
855		break;
856
857	case APM_IOC_NEXTEVENT:
858		if (!sc->sc_event_count)
859			error = EAGAIN;
860		else {
861			evp = (struct apm_event_info *)data;
862			i = sc->sc_event_ptr + APM_NEVENTS - sc->sc_event_count;
863			i %= APM_NEVENTS;
864			*evp = sc->sc_event_list[i];
865			sc->sc_event_count--;
866		}
867		break;
868
869	case OAPM_IOC_GETPOWER:
870	case APM_IOC_GETPOWER:
871		powerp = (struct apm_power_info *)data;
872		if ((error = (*sc->sc_ops->aa_get_powstat)(sc->sc_cookie, 0,
873		    powerp)) != 0) {
874			apm_perror("ioctl get power status", error);
875			error = EIO;
876			break;
877		}
878		switch (apm_minver) {
879		case 0:
880			break;
881		case 1:
882		default:
883			batt_flags = powerp->battery_flags;
884			powerp->battery_state = APM_BATT_UNKNOWN;
885			if (batt_flags & APM_BATT_FLAG_HIGH)
886				powerp->battery_state = APM_BATT_HIGH;
887			else if (batt_flags & APM_BATT_FLAG_LOW)
888				powerp->battery_state = APM_BATT_LOW;
889			else if (batt_flags & APM_BATT_FLAG_CRITICAL)
890				powerp->battery_state = APM_BATT_CRITICAL;
891			else if (batt_flags & APM_BATT_FLAG_CHARGING)
892				powerp->battery_state = APM_BATT_CHARGING;
893			else if (batt_flags & APM_BATT_FLAG_NO_SYSTEM_BATTERY)
894				powerp->battery_state = APM_BATT_ABSENT;
895			break;
896		}
897		break;
898
899	default:
900		error = ENOTTY;
901	}
902	APM_UNLOCK(sc);
903
904	return (error);
905}
906
907int
908apmdevpoll(dev_t dev, int events, struct lwp *l)
909{
910	struct apm_softc *sc = device_lookup_private(&apmdev_cd, APMUNIT(dev));
911	int revents = 0;
912
913	APM_LOCK(sc);
914	if (events & (POLLIN | POLLRDNORM)) {
915		if (sc->sc_event_count)
916			revents |= events & (POLLIN | POLLRDNORM);
917		else
918			selrecord(l, &sc->sc_rsel);
919	}
920	APM_UNLOCK(sc);
921
922	return (revents);
923}
924
925static void
926filt_apmrdetach(struct knote *kn)
927{
928	struct apm_softc *sc = kn->kn_hook;
929
930	APM_LOCK(sc);
931	SLIST_REMOVE(&sc->sc_rsel.sel_klist, kn, knote, kn_selnext);
932	APM_UNLOCK(sc);
933}
934
935static int
936filt_apmread(struct knote *kn, long hint)
937{
938	struct apm_softc *sc = kn->kn_hook;
939
940	kn->kn_data = sc->sc_event_count;
941	return (kn->kn_data > 0);
942}
943
944static const struct filterops apmread_filtops =
945	{ 1, NULL, filt_apmrdetach, filt_apmread };
946
947int
948apmdevkqfilter(dev_t dev, struct knote *kn)
949{
950	struct apm_softc *sc = device_lookup_private(&apmdev_cd, APMUNIT(dev));
951	struct klist *klist;
952
953	switch (kn->kn_filter) {
954	case EVFILT_READ:
955		klist = &sc->sc_rsel.sel_klist;
956		kn->kn_fop = &apmread_filtops;
957		break;
958
959	default:
960		return (EINVAL);
961	}
962
963	kn->kn_hook = sc;
964
965	APM_LOCK(sc);
966	SLIST_INSERT_HEAD(klist, kn, kn_selnext);
967	APM_UNLOCK(sc);
968
969	return (0);
970}
971