1/*	$NetBSD: apm.c,v 1.35 2021/09/26 01:16:08 thorpej Exp $ */
2
3/*-
4 * Copyright (c) 1996, 1997 The NetBSD Foundation, Inc.
5 * All rights reserved.
6 *
7 * This code is derived from software contributed to The NetBSD Foundation
8 * by John Kohl and Christopher G. Demetriou.
9 *
10 * Redistribution and use in source and binary forms, with or without
11 * modification, are permitted provided that the following conditions
12 * are met:
13 * 1. Redistributions of source code must retain the above copyright
14 *    notice, this list of conditions and the following disclaimer.
15 * 2. Redistributions in binary form must reproduce the above copyright
16 *    notice, this list of conditions and the following disclaimer in the
17 *    documentation and/or other materials provided with the distribution.
18 *
19 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
20 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
21 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
22 * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
23 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
24 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
25 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
26 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
27 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
28 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
29 * POSSIBILITY OF SUCH DAMAGE.
30 */
31/*
32 * from: sys/arch/i386/i386/apm.c,v 1.49 2000/05/08
33 */
34
35#include <sys/cdefs.h>
36__KERNEL_RCSID(0, "$NetBSD: apm.c,v 1.35 2021/09/26 01:16:08 thorpej Exp $");
37
38#include "opt_apm.h"
39
40#if defined(DEBUG) && !defined(APMDEBUG)
41#define	APMDEBUG
42#endif
43
44#include <sys/param.h>
45#include <sys/systm.h>
46#include <sys/signalvar.h>
47#include <sys/kernel.h>
48#include <sys/proc.h>
49#include <sys/kthread.h>
50#include <sys/malloc.h>
51#include <sys/device.h>
52#include <sys/fcntl.h>
53#include <sys/ioctl.h>
54#include <sys/select.h>
55#include <sys/poll.h>
56#include <sys/conf.h>
57
58#include <dev/apm/apmvar.h>
59
60#include "ioconf.h"
61
62#ifdef APMDEBUG
63#define DPRINTF(f, x)		do { if (apmdebug & (f)) printf x; } while (0)
64
65
66#ifdef APMDEBUG_VALUE
67int	apmdebug = APMDEBUG_VALUE;
68#else
69int	apmdebug = 0;
70#endif /* APMDEBUG_VALUE */
71
72#else
73#define	DPRINTF(f, x)		/**/
74#endif /* APMDEBUG */
75
76#define	SCFLAG_OREAD	0x0000001
77#define	SCFLAG_OWRITE	0x0000002
78#define	SCFLAG_OPEN	(SCFLAG_OREAD|SCFLAG_OWRITE)
79
80#define	APMUNIT(dev)	(minor(dev)&0xf0)
81#define	APM(dev)	(minor(dev)&0x0f)
82#define APM_NORMAL	0
83#define APM_CTL	8
84
85/*
86 * A brief note on the locking protocol: it's very simple; we
87 * assert an exclusive lock any time thread context enters the
88 * APM module.  This is both the APM thread itself, as well as
89 * user context.
90 */
91#define	APM_LOCK(apmsc)						\
92	(void) mutex_enter(&(apmsc)->sc_lock)
93#define	APM_UNLOCK(apmsc)						\
94	(void) mutex_exit(&(apmsc)->sc_lock)
95
96static void	apm_event_handle(struct apm_softc *, u_int, u_int);
97static void	apm_periodic_check(struct apm_softc *);
98static void	apm_thread(void *);
99static void	apm_perror(const char *, int, ...)
100		    __attribute__((__format__(__printf__,1,3)));
101#ifdef APM_POWER_PRINT
102static void	apm_power_print(struct apm_softc *, struct apm_power_info *);
103#endif
104static int	apm_record_event(struct apm_softc *, u_int);
105static void	apm_set_ver(struct apm_softc *);
106static void	apm_standby(struct apm_softc *);
107static void	apm_suspend(struct apm_softc *);
108static void	apm_resume(struct apm_softc *, u_int, u_int);
109
110dev_type_open(apmopen);
111dev_type_close(apmclose);
112dev_type_ioctl(apmioctl);
113dev_type_poll(apmpoll);
114dev_type_kqfilter(apmkqfilter);
115
116const struct cdevsw apm_cdevsw = {
117	.d_open = apmopen,
118	.d_close = apmclose,
119	.d_read = noread,
120	.d_write = nowrite,
121	.d_ioctl = apmioctl,
122	.d_stop = nostop,
123	.d_tty = notty,
124	.d_poll = apmpoll,
125	.d_mmap = nommap,
126	.d_kqfilter = apmkqfilter,
127	.d_discard = nodiscard,
128	.d_flag = D_OTHER,
129};
130
131/* configurable variables */
132#ifdef APM_NO_STANDBY
133int	apm_do_standby = 0;
134#else
135int	apm_do_standby = 1;
136#endif
137#ifdef APM_V10_ONLY
138int	apm_v11_enabled = 0;
139#else
140int	apm_v11_enabled = 1;
141#endif
142#ifdef APM_NO_V12
143int	apm_v12_enabled = 0;
144#else
145int	apm_v12_enabled = 1;
146#endif
147
148/* variables used during operation (XXX cgd) */
149u_char	apm_majver, apm_minver;
150int	apm_inited;
151int	apm_standbys, apm_userstandbys, apm_suspends, apm_battlow;
152int	apm_damn_fool_bios, apm_op_inprog;
153int	apm_evindex;
154
155static int apm_spl;		/* saved spl while suspended */
156
157const char *
158apm_strerror(int code)
159{
160	switch (code) {
161	case APM_ERR_PM_DISABLED:
162		return ("power management disabled");
163	case APM_ERR_REALALREADY:
164		return ("real mode interface already connected");
165	case APM_ERR_NOTCONN:
166		return ("interface not connected");
167	case APM_ERR_16ALREADY:
168		return ("16-bit interface already connected");
169	case APM_ERR_16NOTSUPP:
170		return ("16-bit interface not supported");
171	case APM_ERR_32ALREADY:
172		return ("32-bit interface already connected");
173	case APM_ERR_32NOTSUPP:
174		return ("32-bit interface not supported");
175	case APM_ERR_UNRECOG_DEV:
176		return ("unrecognized device ID");
177	case APM_ERR_ERANGE:
178		return ("parameter out of range");
179	case APM_ERR_NOTENGAGED:
180		return ("interface not engaged");
181	case APM_ERR_UNABLE:
182		return ("unable to enter requested state");
183	case APM_ERR_NOEVENTS:
184		return ("no pending events");
185	case APM_ERR_NOT_PRESENT:
186		return ("no APM present");
187	default:
188		return ("unknown error code");
189	}
190}
191
192static void
193apm_perror(const char *str, int errinfo, ...) /* XXX cgd */
194{
195	va_list ap;
196
197	printf("APM ");
198
199	va_start(ap, errinfo);
200	vprintf(str, ap);			/* XXX cgd */
201	va_end(ap);
202
203	printf(": %s\n", apm_strerror(errinfo));
204}
205
206#ifdef APM_POWER_PRINT
207static void
208apm_power_print(struct apm_softc *sc, struct apm_power_info *pi)
209{
210
211	if (pi->battery_life != APM_BATT_LIFE_UNKNOWN) {
212		aprint_normal_dev(sc->sc_dev,
213		    "battery life expectancy: %d%%\n",
214		    pi->battery_life);
215	}
216	aprint_normal_dev(sc->sc_dev, "A/C state: ");
217	switch (pi->ac_state) {
218	case APM_AC_OFF:
219		printf("off\n");
220		break;
221	case APM_AC_ON:
222		printf("on\n");
223		break;
224	case APM_AC_BACKUP:
225		printf("backup power\n");
226		break;
227	default:
228	case APM_AC_UNKNOWN:
229		printf("unknown\n");
230		break;
231	}
232	aprint_normal_dev(sc->sc_dev, "battery charge state:");
233	if (apm_minver == 0)
234		switch (pi->battery_state) {
235		case APM_BATT_HIGH:
236			printf("high\n");
237			break;
238		case APM_BATT_LOW:
239			printf("low\n");
240			break;
241		case APM_BATT_CRITICAL:
242			printf("critical\n");
243			break;
244		case APM_BATT_CHARGING:
245			printf("charging\n");
246			break;
247		case APM_BATT_UNKNOWN:
248			printf("unknown\n");
249			break;
250		default:
251			printf("undecoded state %x\n", pi->battery_state);
252			break;
253		}
254	else if (apm_minver >= 1) {
255		if (pi->battery_flags & APM_BATT_FLAG_NO_SYSTEM_BATTERY)
256			printf(" no battery");
257		else {
258			if (pi->battery_flags & APM_BATT_FLAG_HIGH)
259				printf(" high");
260			if (pi->battery_flags & APM_BATT_FLAG_LOW)
261				printf(" low");
262			if (pi->battery_flags & APM_BATT_FLAG_CRITICAL)
263				printf(" critical");
264			if (pi->battery_flags & APM_BATT_FLAG_CHARGING)
265				printf(" charging");
266		}
267		printf("\n");
268		if (pi->minutes_valid) {
269			aprint_normal_dev(sc->sc_dev, "estimated ");
270			if (pi->minutes_left / 60)
271				printf("%dh ", pi->minutes_left / 60);
272			printf("%dm\n", pi->minutes_left % 60);
273		}
274	}
275	return;
276}
277#endif
278
279static void
280apm_suspend(struct apm_softc *sc)
281{
282	int error;
283
284	if (sc->sc_power_state == PWR_SUSPEND) {
285#ifdef APMDEBUG
286		aprint_debug_dev(sc->sc_dev,
287		    "apm_suspend: already suspended?\n");
288#endif
289		return;
290	}
291	sc->sc_power_state = PWR_SUSPEND;
292
293	if (!(sc->sc_hwflags & APM_F_DONT_RUN_HOOKS)) {
294		pmf_system_suspend(PMF_Q_NONE);
295		apm_spl = splhigh();
296	}
297
298	error = (*sc->sc_ops->aa_set_powstate)(sc->sc_cookie, APM_DEV_ALLDEVS,
299	    APM_SYS_SUSPEND);
300
301	if (error)
302		apm_resume(sc, 0, 0);
303	else
304		apm_resume(sc, APM_SYS_STANDBY_RESUME, 0);
305}
306
307static void
308apm_standby(struct apm_softc *sc)
309{
310	int error;
311
312	if (sc->sc_power_state == PWR_STANDBY) {
313#ifdef APMDEBUG
314		aprint_debug_dev(sc->sc_dev,
315		    "apm_standby: already standing by?\n");
316#endif
317		return;
318	}
319	sc->sc_power_state = PWR_STANDBY;
320
321	if (!(sc->sc_hwflags & APM_F_DONT_RUN_HOOKS)) {
322		pmf_system_suspend(PMF_Q_NONE);
323		apm_spl = splhigh();
324	}
325	error = (*sc->sc_ops->aa_set_powstate)(sc->sc_cookie, APM_DEV_ALLDEVS,
326	    APM_SYS_STANDBY);
327	if (error)
328		apm_resume(sc, 0, 0);
329	else
330		apm_resume(sc, APM_SYS_STANDBY_RESUME, 0);
331}
332
333static void
334apm_resume(struct apm_softc *sc, u_int event_type, u_int event_info)
335{
336	if (sc->sc_power_state == PWR_RESUME) {
337#ifdef APMDEBUG
338		aprint_debug_dev(sc->sc_dev, "apm_resume: already running?\n");
339#endif
340		return;
341	}
342	sc->sc_power_state = PWR_RESUME;
343
344#ifdef TIMER_FREQ
345	/*
346	 * Some system requires its clock to be initialized after hybernation.
347	 */
348	initrtclock(TIMER_FREQ);
349#endif
350
351	inittodr(time_second);
352	if (!(sc->sc_hwflags & APM_F_DONT_RUN_HOOKS)) {
353		splx(apm_spl);
354		pmf_system_resume(PMF_Q_NONE);
355	}
356
357	apm_record_event(sc, event_type);
358}
359
360/*
361 * return 0 if the user will notice and handle the event,
362 * return 1 if the kernel driver should do so.
363 */
364static int
365apm_record_event(struct apm_softc *sc, u_int event_type)
366{
367	struct apm_event_info *evp;
368
369	if ((sc->sc_flags & SCFLAG_OPEN) == 0)
370		return 1;		/* no user waiting */
371	if (sc->sc_event_count == APM_NEVENTS)
372		return 1;			/* overflow */
373	evp = &sc->sc_event_list[sc->sc_event_ptr];
374	sc->sc_event_count++;
375	sc->sc_event_ptr++;
376	sc->sc_event_ptr %= APM_NEVENTS;
377	evp->type = event_type;
378	evp->index = ++apm_evindex;
379	selnotify(&sc->sc_rsel, 0, 0);
380	return (sc->sc_flags & SCFLAG_OWRITE) ? 0 : 1; /* user may handle */
381}
382
383static void
384apm_event_handle(struct apm_softc *sc, u_int event_code, u_int event_info)
385{
386	int error;
387	const char *code;
388	struct apm_power_info pi;
389
390	switch (event_code) {
391	case APM_USER_STANDBY_REQ:
392		DPRINTF(APMDEBUG_EVENTS, ("apmev: user standby request\n"));
393		if (apm_do_standby) {
394			if (apm_op_inprog == 0 && apm_record_event(sc, event_code))
395				apm_userstandbys++;
396			apm_op_inprog++;
397			(void)(*sc->sc_ops->aa_set_powstate)(sc->sc_cookie,
398			    APM_DEV_ALLDEVS, APM_LASTREQ_INPROG);
399		} else {
400			(void)(*sc->sc_ops->aa_set_powstate)(sc->sc_cookie,
401			    APM_DEV_ALLDEVS, APM_LASTREQ_REJECTED);
402			/* in case BIOS hates being spurned */
403			(*sc->sc_ops->aa_enable)(sc->sc_cookie, 1);
404		}
405		break;
406
407	case APM_STANDBY_REQ:
408		DPRINTF(APMDEBUG_EVENTS, ("apmev: system standby request\n"));
409		if (apm_op_inprog) {
410			DPRINTF(APMDEBUG_EVENTS | APMDEBUG_ANOM,
411			    ("damn fool BIOS did not wait for answer\n"));
412			/* just give up the fight */
413			apm_damn_fool_bios = 1;
414		}
415		if (apm_do_standby) {
416			if (apm_op_inprog == 0 &&
417			    apm_record_event(sc, event_code))
418				apm_standbys++;
419			apm_op_inprog++;
420			(void)(*sc->sc_ops->aa_set_powstate)(sc->sc_cookie,
421			    APM_DEV_ALLDEVS, APM_LASTREQ_INPROG);
422		} else {
423			(void)(*sc->sc_ops->aa_set_powstate)(sc->sc_cookie,
424			    APM_DEV_ALLDEVS, APM_LASTREQ_REJECTED);
425			/* in case BIOS hates being spurned */
426			(*sc->sc_ops->aa_enable)(sc->sc_cookie, 1);
427		}
428		break;
429
430	case APM_USER_SUSPEND_REQ:
431		DPRINTF(APMDEBUG_EVENTS, ("apmev: user suspend request\n"));
432		if (apm_op_inprog == 0 && apm_record_event(sc, event_code))
433			apm_suspends++;
434		apm_op_inprog++;
435		(void)(*sc->sc_ops->aa_set_powstate)(sc->sc_cookie,
436		    APM_DEV_ALLDEVS, APM_LASTREQ_INPROG);
437		break;
438
439	case APM_SUSPEND_REQ:
440		DPRINTF(APMDEBUG_EVENTS, ("apmev: system suspend request\n"));
441		if (apm_op_inprog) {
442			DPRINTF(APMDEBUG_EVENTS | APMDEBUG_ANOM,
443			    ("damn fool BIOS did not wait for answer\n"));
444			/* just give up the fight */
445			apm_damn_fool_bios = 1;
446		}
447		if (apm_op_inprog == 0 && apm_record_event(sc, event_code))
448			apm_suspends++;
449		apm_op_inprog++;
450		(void)(*sc->sc_ops->aa_set_powstate)(sc->sc_cookie,
451		    APM_DEV_ALLDEVS, APM_LASTREQ_INPROG);
452		break;
453
454	case APM_POWER_CHANGE:
455		DPRINTF(APMDEBUG_EVENTS, ("apmev: power status change\n"));
456		error = (*sc->sc_ops->aa_get_powstat)(sc->sc_cookie, 0, &pi);
457#ifdef APM_POWER_PRINT
458		/* only print if nobody is catching events. */
459		if (error == 0 &&
460		    (sc->sc_flags & (SCFLAG_OREAD|SCFLAG_OWRITE)) == 0)
461			apm_power_print(sc, &pi);
462#else
463		__USE(error);
464#endif
465		apm_record_event(sc, event_code);
466		break;
467
468	case APM_NORMAL_RESUME:
469		DPRINTF(APMDEBUG_EVENTS, ("apmev: resume system\n"));
470		apm_resume(sc, event_code, event_info);
471		break;
472
473	case APM_CRIT_RESUME:
474		DPRINTF(APMDEBUG_EVENTS, ("apmev: critical resume system"));
475		apm_resume(sc, event_code, event_info);
476		break;
477
478	case APM_SYS_STANDBY_RESUME:
479		DPRINTF(APMDEBUG_EVENTS, ("apmev: system standby resume\n"));
480		apm_resume(sc, event_code, event_info);
481		break;
482
483	case APM_UPDATE_TIME:
484		DPRINTF(APMDEBUG_EVENTS, ("apmev: update time\n"));
485		apm_resume(sc, event_code, event_info);
486		break;
487
488	case APM_CRIT_SUSPEND_REQ:
489		DPRINTF(APMDEBUG_EVENTS, ("apmev: critical system suspend\n"));
490		apm_record_event(sc, event_code);
491		apm_suspend(sc);
492		break;
493
494	case APM_BATTERY_LOW:
495		DPRINTF(APMDEBUG_EVENTS, ("apmev: battery low\n"));
496		apm_battlow++;
497		apm_record_event(sc, event_code);
498		break;
499
500	case APM_CAP_CHANGE:
501		DPRINTF(APMDEBUG_EVENTS, ("apmev: capability change\n"));
502		if (apm_minver < 2) {
503			DPRINTF(APMDEBUG_EVENTS, ("apm: unexpected event\n"));
504		} else {
505			u_int numbatts, capflags;
506			(*sc->sc_ops->aa_get_capabilities)(sc->sc_cookie,
507			    &numbatts, &capflags);
508			(*sc->sc_ops->aa_get_powstat)(sc->sc_cookie, 0, &pi);
509		}
510		break;
511
512	default:
513		switch (event_code >> 8) {
514			case 0:
515				code = "reserved system";
516				break;
517			case 1:
518				code = "reserved device";
519				break;
520			case 2:
521				code = "OEM defined";
522				break;
523			default:
524				code = "reserved";
525				break;
526		}
527		printf("APM: %s event code %x\n", code, event_code);
528	}
529}
530
531static void
532apm_periodic_check(struct apm_softc *sc)
533{
534	int error;
535	u_int event_code, event_info;
536
537
538	/*
539	 * tell the BIOS we're working on it, if asked to do a
540	 * suspend/standby
541	 */
542	if (apm_op_inprog)
543		(*sc->sc_ops->aa_set_powstate)(sc->sc_cookie, APM_DEV_ALLDEVS,
544		    APM_LASTREQ_INPROG);
545
546	while ((error = (*sc->sc_ops->aa_get_event)(sc->sc_cookie, &event_code,
547	    &event_info)) == 0 && !apm_damn_fool_bios)
548		apm_event_handle(sc, event_code, event_info);
549
550	if (error != APM_ERR_NOEVENTS)
551		apm_perror("get event", error);
552	if (apm_suspends) {
553		apm_op_inprog = 0;
554		apm_suspend(sc);
555	} else if (apm_standbys || apm_userstandbys) {
556		apm_op_inprog = 0;
557		apm_standby(sc);
558	}
559	apm_suspends = apm_standbys = apm_battlow = apm_userstandbys = 0;
560	apm_damn_fool_bios = 0;
561}
562
563static void
564apm_set_ver(struct apm_softc *sc)
565{
566
567	if (apm_v12_enabled &&
568	    APM_MAJOR_VERS(sc->sc_vers) == 1 &&
569	    APM_MINOR_VERS(sc->sc_vers) == 2) {
570		apm_majver = 1;
571		apm_minver = 2;
572		goto ok;
573	}
574
575	if (apm_v11_enabled &&
576	    APM_MAJOR_VERS(sc->sc_vers) == 1 &&
577	    APM_MINOR_VERS(sc->sc_vers) == 1) {
578		apm_majver = 1;
579		apm_minver = 1;
580	} else {
581		apm_majver = 1;
582		apm_minver = 0;
583	}
584ok:
585	aprint_normal("Power Management spec V%d.%d", apm_majver, apm_minver);
586	apm_inited = 1;
587}
588
589int
590apm_match(void)
591{
592	static int got;
593	return !got++;
594}
595
596void
597apm_attach(struct apm_softc *sc)
598{
599	u_int numbatts, capflags;
600
601	aprint_normal(": ");
602
603	switch ((APM_MAJOR_VERS(sc->sc_vers) << 8) + APM_MINOR_VERS(sc->sc_vers)) {
604	case 0x0100:
605		apm_v11_enabled = 0;
606		apm_v12_enabled = 0;
607		break;
608	case 0x0101:
609		apm_v12_enabled = 0;
610		/* fall through */
611	case 0x0102:
612	default:
613		break;
614	}
615
616	apm_set_ver(sc);	/* prints version info */
617	aprint_normal("\n");
618	if (apm_minver >= 2)
619		(*sc->sc_ops->aa_get_capabilities)(sc->sc_cookie, &numbatts,
620		    &capflags);
621
622	/*
623	 * enable power management
624	 */
625	(*sc->sc_ops->aa_enable)(sc->sc_cookie, 1);
626
627	if (sc->sc_ops->aa_cpu_busy)
628		(*sc->sc_ops->aa_cpu_busy)(sc->sc_cookie);
629
630	mutex_init(&sc->sc_lock, MUTEX_DEFAULT, IPL_NONE);
631
632	/* Initial state is `resumed'. */
633	sc->sc_power_state = PWR_RESUME;
634	selinit(&sc->sc_rsel);
635	selinit(&sc->sc_xsel);
636
637	/* Do an initial check. */
638	apm_periodic_check(sc);
639
640	/*
641	 * Create a kernel thread to periodically check for APM events,
642	 * and notify other subsystems when they occur.
643	 */
644	if (kthread_create(PRI_NONE, 0, NULL, apm_thread, sc,
645	    &sc->sc_thread, "%s", device_xname(sc->sc_dev)) != 0) {
646		/*
647		 * We were unable to create the APM thread; bail out.
648		 */
649		if (sc->sc_ops->aa_disconnect)
650			(*sc->sc_ops->aa_disconnect)(sc->sc_cookie);
651		aprint_error_dev(sc->sc_dev, "unable to create thread, "
652		    "kernel APM support disabled\n");
653	}
654
655	if (!pmf_device_register(sc->sc_dev, NULL, NULL))
656		aprint_error_dev(sc->sc_dev, "couldn't establish power handler\n");
657}
658
659void
660apm_thread(void *arg)
661{
662	struct apm_softc *apmsc = arg;
663
664	/*
665	 * Loop forever, doing a periodic check for APM events.
666	 */
667	for (;;) {
668		APM_LOCK(apmsc);
669		apm_periodic_check(apmsc);
670		APM_UNLOCK(apmsc);
671		(void) tsleep(apmsc, PWAIT, "apmev",  (8 * hz) / 7);
672	}
673}
674
675int
676apmopen(dev_t dev, int flag, int mode, struct lwp *l)
677{
678	int ctl = APM(dev);
679	int error = 0;
680	struct apm_softc *sc;
681
682	sc = device_lookup_private(&apm_cd, APMUNIT(dev));
683	if (!sc)
684		return ENXIO;
685
686	if (!apm_inited)
687		return ENXIO;
688
689	DPRINTF(APMDEBUG_DEVICE,
690	    ("apmopen: pid %d flag %x mode %x\n", l->l_proc->p_pid, flag, mode));
691
692	APM_LOCK(sc);
693	switch (ctl) {
694	case APM_CTL:
695		if (!(flag & FWRITE)) {
696			error = EINVAL;
697			break;
698		}
699		if (sc->sc_flags & SCFLAG_OWRITE) {
700			error = EBUSY;
701			break;
702		}
703		sc->sc_flags |= SCFLAG_OWRITE;
704		break;
705	case APM_NORMAL:
706		if (!(flag & FREAD) || (flag & FWRITE)) {
707			error = EINVAL;
708			break;
709		}
710		sc->sc_flags |= SCFLAG_OREAD;
711		break;
712	default:
713		error = ENXIO;
714		break;
715	}
716	APM_UNLOCK(sc);
717
718	return (error);
719}
720
721int
722apmclose(dev_t dev, int flag, int mode,
723	struct lwp *l)
724{
725	struct apm_softc *sc = device_lookup_private(&apm_cd, APMUNIT(dev));
726	int ctl = APM(dev);
727
728	DPRINTF(APMDEBUG_DEVICE,
729	    ("apmclose: pid %d flag %x mode %x\n", l->l_proc->p_pid, flag, mode));
730
731	APM_LOCK(sc);
732	switch (ctl) {
733	case APM_CTL:
734		sc->sc_flags &= ~SCFLAG_OWRITE;
735		break;
736	case APM_NORMAL:
737		sc->sc_flags &= ~SCFLAG_OREAD;
738		break;
739	}
740	if ((sc->sc_flags & SCFLAG_OPEN) == 0) {
741		sc->sc_event_count = 0;
742		sc->sc_event_ptr = 0;
743	}
744	APM_UNLOCK(sc);
745	return 0;
746}
747
748int
749apmioctl(dev_t dev, u_long cmd, void *data, int flag,
750	struct lwp *l)
751{
752	struct apm_softc *sc = device_lookup_private(&apm_cd, APMUNIT(dev));
753	struct apm_power_info *powerp;
754	struct apm_event_info *evp;
755#if 0
756	struct apm_ctl *actl;
757#endif
758	int i, error = 0;
759	int batt_flags;
760	struct apm_ctl *actl;
761
762	APM_LOCK(sc);
763	switch (cmd) {
764	case APM_IOC_STANDBY:
765		if (!apm_do_standby) {
766			error = EOPNOTSUPP;
767			break;
768		}
769
770		if ((flag & FWRITE) == 0) {
771			error = EBADF;
772			break;
773		}
774		apm_userstandbys++;
775		break;
776
777	case APM_IOC_DEV_CTL:
778		actl = (struct apm_ctl *)data;
779		if ((flag & FWRITE) == 0) {
780			error = EBADF;
781			break;
782		}
783#if 0
784		apm_get_powstate(actl->dev); /* XXX */
785#endif
786		error = (*sc->sc_ops->aa_set_powstate)(sc->sc_cookie, actl->dev,
787		    actl->mode);
788		apm_suspends++;
789 		break;
790
791	case APM_IOC_SUSPEND:
792		if ((flag & FWRITE) == 0) {
793			error = EBADF;
794			break;
795		}
796		apm_suspends++;
797		break;
798
799	case APM_IOC_NEXTEVENT:
800		if (!sc->sc_event_count)
801			error = EAGAIN;
802		else {
803			evp = (struct apm_event_info *)data;
804			i = sc->sc_event_ptr + APM_NEVENTS - sc->sc_event_count;
805			i %= APM_NEVENTS;
806			*evp = sc->sc_event_list[i];
807			sc->sc_event_count--;
808		}
809		break;
810
811	case OAPM_IOC_GETPOWER:
812	case APM_IOC_GETPOWER:
813		powerp = (struct apm_power_info *)data;
814		if ((error = (*sc->sc_ops->aa_get_powstat)(sc->sc_cookie, 0,
815		    powerp)) != 0) {
816			apm_perror("ioctl get power status", error);
817			error = EIO;
818			break;
819		}
820		switch (apm_minver) {
821		case 0:
822			break;
823		case 1:
824		default:
825			batt_flags = powerp->battery_flags;
826			powerp->battery_state = APM_BATT_UNKNOWN;
827			if (batt_flags & APM_BATT_FLAG_HIGH)
828				powerp->battery_state = APM_BATT_HIGH;
829			else if (batt_flags & APM_BATT_FLAG_LOW)
830				powerp->battery_state = APM_BATT_LOW;
831			else if (batt_flags & APM_BATT_FLAG_CRITICAL)
832				powerp->battery_state = APM_BATT_CRITICAL;
833			else if (batt_flags & APM_BATT_FLAG_CHARGING)
834				powerp->battery_state = APM_BATT_CHARGING;
835			else if (batt_flags & APM_BATT_FLAG_NO_SYSTEM_BATTERY)
836				powerp->battery_state = APM_BATT_ABSENT;
837			break;
838		}
839		break;
840
841	default:
842		error = ENOTTY;
843	}
844	APM_UNLOCK(sc);
845
846	return (error);
847}
848
849int
850apmpoll(dev_t dev, int events, struct lwp *l)
851{
852	struct apm_softc *sc = device_lookup_private(&apm_cd, APMUNIT(dev));
853	int revents = 0;
854
855	APM_LOCK(sc);
856	if (events & (POLLIN | POLLRDNORM)) {
857		if (sc->sc_event_count)
858			revents |= events & (POLLIN | POLLRDNORM);
859		else
860			selrecord(l, &sc->sc_rsel);
861	}
862	APM_UNLOCK(sc);
863
864	return (revents);
865}
866
867static void
868filt_apmrdetach(struct knote *kn)
869{
870	struct apm_softc *sc = kn->kn_hook;
871
872	APM_LOCK(sc);
873	selremove_knote(&sc->sc_rsel, kn);
874	APM_UNLOCK(sc);
875}
876
877static int
878filt_apmread(struct knote *kn, long hint)
879{
880	struct apm_softc *sc = kn->kn_hook;
881
882	kn->kn_data = sc->sc_event_count;
883	return (kn->kn_data > 0);
884}
885
886static const struct filterops apmread_filtops = {
887	.f_flags = FILTEROP_ISFD,
888	.f_attach = NULL,
889	.f_detach = filt_apmrdetach,
890	.f_event = filt_apmread,
891};
892
893int
894apmkqfilter(dev_t dev, struct knote *kn)
895{
896	struct apm_softc *sc = device_lookup_private(&apm_cd, APMUNIT(dev));
897
898	switch (kn->kn_filter) {
899	case EVFILT_READ:
900		kn->kn_fop = &apmread_filtops;
901		break;
902
903	default:
904		return (EINVAL);
905	}
906
907	kn->kn_hook = sc;
908
909	APM_LOCK(sc);
910	selrecord_knote(&sc->sc_rsel, kn);
911	APM_UNLOCK(sc);
912
913	return (0);
914}
915