subr_log.c revision 1.72
1/*	$OpenBSD: subr_log.c,v 1.72 2021/02/08 08:18:45 mpi Exp $	*/
2/*	$NetBSD: subr_log.c,v 1.11 1996/03/30 22:24:44 christos Exp $	*/
3
4/*
5 * Copyright (c) 1982, 1986, 1993
6 *	The Regents of the University of California.  All rights reserved.
7 *
8 * Redistribution and use in source and binary forms, with or without
9 * modification, are permitted provided that the following conditions
10 * are met:
11 * 1. Redistributions of source code must retain the above copyright
12 *    notice, this list of conditions and the following disclaimer.
13 * 2. Redistributions in binary form must reproduce the above copyright
14 *    notice, this list of conditions and the following disclaimer in the
15 *    documentation and/or other materials provided with the distribution.
16 * 3. Neither the name of the University nor the names of its contributors
17 *    may be used to endorse or promote products derived from this software
18 *    without specific prior written permission.
19 *
20 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
21 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
22 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
23 * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
24 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
25 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
26 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
27 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
28 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
29 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
30 * SUCH DAMAGE.
31 *
32 *	@(#)subr_log.c	8.1 (Berkeley) 6/10/93
33 */
34
35/*
36 * Error log buffer for kernel printf's.
37 */
38
39#include <sys/param.h>
40#include <sys/systm.h>
41#include <sys/proc.h>
42#include <sys/vnode.h>
43#include <sys/ioctl.h>
44#include <sys/msgbuf.h>
45#include <sys/file.h>
46#include <sys/tty.h>
47#include <sys/signalvar.h>
48#include <sys/syslog.h>
49#include <sys/poll.h>
50#include <sys/malloc.h>
51#include <sys/filedesc.h>
52#include <sys/socket.h>
53#include <sys/socketvar.h>
54#include <sys/fcntl.h>
55#include <sys/mutex.h>
56#include <sys/timeout.h>
57
58#ifdef KTRACE
59#include <sys/ktrace.h>
60#endif
61
62#include <sys/mount.h>
63#include <sys/syscallargs.h>
64
65#include <dev/cons.h>
66
67#define LOG_RDPRI	(PZERO + 1)
68#define LOG_TICK	50		/* log tick interval in msec */
69
70#define LOG_ASYNC	0x04
71#define LOG_RDWAIT	0x08
72
73/*
74 * Locking:
75 *	L	log_mtx
76 */
77struct logsoftc {
78	int	sc_state;		/* [L] see above for possibilities */
79	struct	selinfo sc_selp;	/* process waiting on select call */
80	struct	sigio_ref sc_sigio;	/* async I/O registration */
81	int	sc_need_wakeup;		/* if set, wake up waiters */
82	struct timeout sc_tick;		/* wakeup poll timeout */
83} logsoftc;
84
85int	log_open;			/* also used in log() */
86int	msgbufmapped;			/* is the message buffer mapped */
87struct	msgbuf *msgbufp;		/* the mapped buffer, itself. */
88struct	msgbuf *consbufp;		/* console message buffer. */
89struct	file *syslogf;
90
91/*
92 * Lock that serializes access to log message buffers.
93 * This should be kept as a leaf lock in order not to constrain where
94 * printf(9) can be used.
95 */
96struct	mutex log_mtx =
97    MUTEX_INITIALIZER_FLAGS(IPL_HIGH, "logmtx", MTX_NOWITNESS);
98
99void filt_logrdetach(struct knote *kn);
100int filt_logread(struct knote *kn, long hint);
101
102const struct filterops logread_filtops = {
103	.f_flags	= FILTEROP_ISFD,
104	.f_attach	= NULL,
105	.f_detach	= filt_logrdetach,
106	.f_event	= filt_logread,
107};
108
109int dosendsyslog(struct proc *, const char *, size_t, int, enum uio_seg);
110void logtick(void *);
111size_t msgbuf_getlen(struct msgbuf *);
112void msgbuf_putchar_locked(struct msgbuf *, const char);
113
114void
115initmsgbuf(caddr_t buf, size_t bufsize)
116{
117	struct msgbuf *mbp;
118	long new_bufs;
119
120	/* Sanity-check the given size. */
121	if (bufsize < sizeof(struct msgbuf))
122		return;
123
124	mbp = msgbufp = (struct msgbuf *)buf;
125
126	new_bufs = bufsize - offsetof(struct msgbuf, msg_bufc);
127	if ((mbp->msg_magic != MSG_MAGIC) || (mbp->msg_bufs != new_bufs) ||
128	    (mbp->msg_bufr < 0) || (mbp->msg_bufr >= mbp->msg_bufs) ||
129	    (mbp->msg_bufx < 0) || (mbp->msg_bufx >= mbp->msg_bufs)) {
130		/*
131		 * If the buffer magic number is wrong, has changed
132		 * size (which shouldn't happen often), or is
133		 * internally inconsistent, initialize it.
134		 */
135
136		memset(buf, 0, bufsize);
137		mbp->msg_magic = MSG_MAGIC;
138		mbp->msg_bufs = new_bufs;
139	}
140
141	/*
142	 * Always start new buffer data on a new line.
143	 * Avoid using log_mtx because mutexes do not work during early boot
144	 * on some architectures.
145	 */
146	if (mbp->msg_bufx > 0 && mbp->msg_bufc[mbp->msg_bufx - 1] != '\n')
147		msgbuf_putchar_locked(mbp, '\n');
148
149	/* mark it as ready for use. */
150	msgbufmapped = 1;
151}
152
153void
154initconsbuf(void)
155{
156	/* Set up a buffer to collect /dev/console output */
157	consbufp = malloc(CONSBUFSIZE, M_TTYS, M_WAITOK | M_ZERO);
158	consbufp->msg_magic = MSG_MAGIC;
159	consbufp->msg_bufs = CONSBUFSIZE - offsetof(struct msgbuf, msg_bufc);
160}
161
162void
163msgbuf_putchar(struct msgbuf *mbp, const char c)
164{
165	if (mbp->msg_magic != MSG_MAGIC)
166		/* Nothing we can do */
167		return;
168
169	mtx_enter(&log_mtx);
170	msgbuf_putchar_locked(mbp, c);
171	mtx_leave(&log_mtx);
172}
173
174void
175msgbuf_putchar_locked(struct msgbuf *mbp, const char c)
176{
177	mbp->msg_bufc[mbp->msg_bufx++] = c;
178	if (mbp->msg_bufx < 0 || mbp->msg_bufx >= mbp->msg_bufs)
179		mbp->msg_bufx = 0;
180	/* If the buffer is full, keep the most recent data. */
181	if (mbp->msg_bufr == mbp->msg_bufx) {
182		if (++mbp->msg_bufr >= mbp->msg_bufs)
183			mbp->msg_bufr = 0;
184		mbp->msg_bufd++;
185	}
186}
187
188size_t
189msgbuf_getlen(struct msgbuf *mbp)
190{
191	long len;
192
193	mtx_enter(&log_mtx);
194	len = mbp->msg_bufx - mbp->msg_bufr;
195	if (len < 0)
196		len += mbp->msg_bufs;
197	mtx_leave(&log_mtx);
198	return (len);
199}
200
201int
202logopen(dev_t dev, int flags, int mode, struct proc *p)
203{
204	if (log_open)
205		return (EBUSY);
206	log_open = 1;
207	sigio_init(&logsoftc.sc_sigio);
208	timeout_set(&logsoftc.sc_tick, logtick, NULL);
209	timeout_add_msec(&logsoftc.sc_tick, LOG_TICK);
210	return (0);
211}
212
213int
214logclose(dev_t dev, int flag, int mode, struct proc *p)
215{
216	struct file *fp;
217
218	fp = syslogf;
219	syslogf = NULL;
220	if (fp)
221		FRELE(fp, p);
222	log_open = 0;
223	timeout_del(&logsoftc.sc_tick);
224	logsoftc.sc_state = 0;
225	sigio_free(&logsoftc.sc_sigio);
226	return (0);
227}
228
229int
230logread(dev_t dev, struct uio *uio, int flag)
231{
232	struct sleep_state sls;
233	struct msgbuf *mbp = msgbufp;
234	size_t l, rpos;
235	int error = 0;
236
237	mtx_enter(&log_mtx);
238	while (mbp->msg_bufr == mbp->msg_bufx) {
239		if (flag & IO_NDELAY) {
240			error = EWOULDBLOCK;
241			goto out;
242		}
243		logsoftc.sc_state |= LOG_RDWAIT;
244		mtx_leave(&log_mtx);
245		/*
246		 * Set up and enter sleep manually instead of using msleep()
247		 * to keep log_mtx as a leaf lock.
248		 */
249		sleep_setup(&sls, mbp, LOG_RDPRI | PCATCH, "klog", 0);
250		error = sleep_finish(&sls, logsoftc.sc_state & LOG_RDWAIT);
251		mtx_enter(&log_mtx);
252		if (error)
253			goto out;
254	}
255
256	if (mbp->msg_bufd > 0) {
257		char buf[64];
258		long ndropped;
259
260		ndropped = mbp->msg_bufd;
261		mtx_leave(&log_mtx);
262		l = snprintf(buf, sizeof(buf),
263		    "<%d>klog: dropped %ld byte%s, message buffer full\n",
264		    LOG_KERN|LOG_WARNING, ndropped,
265		    ndropped == 1 ? "" : "s");
266		error = uiomove(buf, ulmin(l, sizeof(buf) - 1), uio);
267		mtx_enter(&log_mtx);
268		if (error)
269			goto out;
270		mbp->msg_bufd -= ndropped;
271	}
272
273	while (uio->uio_resid > 0) {
274		if (mbp->msg_bufx >= mbp->msg_bufr)
275			l = mbp->msg_bufx - mbp->msg_bufr;
276		else
277			l = mbp->msg_bufs - mbp->msg_bufr;
278		l = ulmin(l, uio->uio_resid);
279		if (l == 0)
280			break;
281		rpos = mbp->msg_bufr;
282		mtx_leave(&log_mtx);
283		/* Ignore that concurrent readers may consume the same data. */
284		error = uiomove(&mbp->msg_bufc[rpos], l, uio);
285		mtx_enter(&log_mtx);
286		if (error)
287			break;
288		mbp->msg_bufr += l;
289		if (mbp->msg_bufr < 0 || mbp->msg_bufr >= mbp->msg_bufs)
290			mbp->msg_bufr = 0;
291	}
292 out:
293	mtx_leave(&log_mtx);
294	return (error);
295}
296
297int
298logpoll(dev_t dev, int events, struct proc *p)
299{
300	int revents = 0;
301
302	mtx_enter(&log_mtx);
303	if (events & (POLLIN | POLLRDNORM)) {
304		if (msgbufp->msg_bufr != msgbufp->msg_bufx)
305			revents |= events & (POLLIN | POLLRDNORM);
306		else
307			selrecord(p, &logsoftc.sc_selp);
308	}
309	mtx_leave(&log_mtx);
310	return (revents);
311}
312
313int
314logkqfilter(dev_t dev, struct knote *kn)
315{
316	struct klist *klist;
317	int s;
318
319	switch (kn->kn_filter) {
320	case EVFILT_READ:
321		klist = &logsoftc.sc_selp.si_note;
322		kn->kn_fop = &logread_filtops;
323		break;
324	default:
325		return (EINVAL);
326	}
327
328	kn->kn_hook = (void *)msgbufp;
329
330	s = splhigh();
331	klist_insert_locked(klist, kn);
332	splx(s);
333
334	return (0);
335}
336
337void
338filt_logrdetach(struct knote *kn)
339{
340	int s;
341
342	s = splhigh();
343	klist_remove_locked(&logsoftc.sc_selp.si_note, kn);
344	splx(s);
345}
346
347int
348filt_logread(struct knote *kn, long hint)
349{
350	struct msgbuf *mbp = kn->kn_hook;
351
352	kn->kn_data = msgbuf_getlen(mbp);
353	return (kn->kn_data != 0);
354}
355
356void
357logwakeup(void)
358{
359	/*
360	 * The actual wakeup has to be deferred because logwakeup() can be
361	 * called in very varied contexts.
362	 * Keep the print routines usable in as many situations as possible
363	 * by not using locking here.
364	 */
365
366	/*
367	 * Ensure that preceding stores become visible to other CPUs
368	 * before the flag.
369	 */
370	membar_producer();
371
372	logsoftc.sc_need_wakeup = 1;
373}
374
375void
376logtick(void *arg)
377{
378	int state;
379
380	if (!log_open)
381		return;
382
383	if (!logsoftc.sc_need_wakeup)
384		goto out;
385	logsoftc.sc_need_wakeup = 0;
386
387	/*
388	 * sc_need_wakeup has to be cleared before handling the wakeup.
389	 * Visiting log_mtx ensures the proper order.
390	 */
391
392	mtx_enter(&log_mtx);
393	state = logsoftc.sc_state;
394	if (logsoftc.sc_state & LOG_RDWAIT)
395		logsoftc.sc_state &= ~LOG_RDWAIT;
396	mtx_leave(&log_mtx);
397
398	selwakeup(&logsoftc.sc_selp);
399	if (state & LOG_ASYNC)
400		pgsigio(&logsoftc.sc_sigio, SIGIO, 0);
401	if (state & LOG_RDWAIT)
402		wakeup(msgbufp);
403out:
404	timeout_add_msec(&logsoftc.sc_tick, LOG_TICK);
405}
406
407int
408logioctl(dev_t dev, u_long com, caddr_t data, int flag, struct proc *p)
409{
410	struct file *fp;
411	int error;
412
413	switch (com) {
414
415	/* return number of characters immediately available */
416	case FIONREAD:
417		*(int *)data = (int)msgbuf_getlen(msgbufp);
418		break;
419
420	case FIONBIO:
421		break;
422
423	case FIOASYNC:
424		mtx_enter(&log_mtx);
425		if (*(int *)data)
426			logsoftc.sc_state |= LOG_ASYNC;
427		else
428			logsoftc.sc_state &= ~LOG_ASYNC;
429		mtx_leave(&log_mtx);
430		break;
431
432	case FIOSETOWN:
433	case TIOCSPGRP:
434		return (sigio_setown(&logsoftc.sc_sigio, com, data));
435
436	case FIOGETOWN:
437	case TIOCGPGRP:
438		sigio_getown(&logsoftc.sc_sigio, com, data);
439		break;
440
441	case LIOCSFD:
442		if ((error = suser(p)) != 0)
443			return (error);
444		fp = syslogf;
445		if ((error = getsock(p, *(int *)data, &syslogf)) != 0)
446			return (error);
447		if (fp)
448			FRELE(fp, p);
449		break;
450
451	default:
452		return (ENOTTY);
453	}
454	return (0);
455}
456
457int
458sys_sendsyslog(struct proc *p, void *v, register_t *retval)
459{
460	struct sys_sendsyslog_args /* {
461		syscallarg(const char *) buf;
462		syscallarg(size_t) nbyte;
463		syscallarg(int) flags;
464	} */ *uap = v;
465	int error;
466	static int dropped_count, orig_error, orig_pid;
467
468	if (dropped_count) {
469		size_t l;
470		char buf[80];
471
472		l = snprintf(buf, sizeof(buf),
473		    "<%d>sendsyslog: dropped %d message%s, error %d, pid %d",
474		    LOG_KERN|LOG_WARNING, dropped_count,
475		    dropped_count == 1 ? "" : "s", orig_error, orig_pid);
476		error = dosendsyslog(p, buf, ulmin(l, sizeof(buf) - 1),
477		    0, UIO_SYSSPACE);
478		if (error == 0) {
479			dropped_count = 0;
480			orig_error = 0;
481			orig_pid = 0;
482		}
483	}
484	error = dosendsyslog(p, SCARG(uap, buf), SCARG(uap, nbyte),
485	    SCARG(uap, flags), UIO_USERSPACE);
486	if (error) {
487		dropped_count++;
488		orig_error = error;
489		orig_pid = p->p_p->ps_pid;
490	}
491	return (error);
492}
493
494int
495dosendsyslog(struct proc *p, const char *buf, size_t nbyte, int flags,
496    enum uio_seg sflg)
497{
498#ifdef KTRACE
499	struct iovec ktriov;
500#endif
501	struct file *fp;
502	char pri[6], *kbuf;
503	struct iovec aiov;
504	struct uio auio;
505	size_t i, len;
506	int error;
507
508	if (nbyte > LOG_MAXLINE)
509		nbyte = LOG_MAXLINE;
510
511	/* Global variable syslogf may change during sleep, use local copy. */
512	fp = syslogf;
513	if (fp)
514		FREF(fp);
515	else if (!ISSET(flags, LOG_CONS))
516		return (ENOTCONN);
517	else {
518		/*
519		 * Strip off syslog priority when logging to console.
520		 * LOG_PRIMASK | LOG_FACMASK is 0x03ff, so at most 4
521		 * decimal digits may appear in priority as <1023>.
522		 */
523		len = MIN(nbyte, sizeof(pri));
524		if (sflg == UIO_USERSPACE) {
525			if ((error = copyin(buf, pri, len)))
526				return (error);
527		} else
528			memcpy(pri, buf, len);
529		if (0 < len && pri[0] == '<') {
530			for (i = 1; i < len; i++) {
531				if (pri[i] < '0' || pri[i] > '9')
532					break;
533			}
534			if (i < len && pri[i] == '>') {
535				i++;
536				/* There must be at least one digit <0>. */
537				if (i >= 3) {
538					buf += i;
539					nbyte -= i;
540				}
541			}
542		}
543	}
544
545	aiov.iov_base = (char *)buf;
546	aiov.iov_len = nbyte;
547	auio.uio_iov = &aiov;
548	auio.uio_iovcnt = 1;
549	auio.uio_segflg = sflg;
550	auio.uio_rw = UIO_WRITE;
551	auio.uio_procp = p;
552	auio.uio_offset = 0;
553	auio.uio_resid = aiov.iov_len;
554#ifdef KTRACE
555	if (sflg == UIO_USERSPACE && KTRPOINT(p, KTR_GENIO))
556		ktriov = aiov;
557	else
558		ktriov.iov_len = 0;
559#endif
560
561	len = auio.uio_resid;
562	if (fp) {
563		int flags = (fp->f_flag & FNONBLOCK) ? MSG_DONTWAIT : 0;
564		error = sosend(fp->f_data, NULL, &auio, NULL, NULL, flags);
565		if (error == 0)
566			len -= auio.uio_resid;
567	} else if (constty || cn_devvp) {
568		error = cnwrite(0, &auio, 0);
569		if (error == 0)
570			len -= auio.uio_resid;
571		aiov.iov_base = "\r\n";
572		aiov.iov_len = 2;
573		auio.uio_iov = &aiov;
574		auio.uio_iovcnt = 1;
575		auio.uio_segflg = UIO_SYSSPACE;
576		auio.uio_rw = UIO_WRITE;
577		auio.uio_procp = p;
578		auio.uio_offset = 0;
579		auio.uio_resid = aiov.iov_len;
580		cnwrite(0, &auio, 0);
581	} else {
582		/* XXX console redirection breaks down... */
583		if (sflg == UIO_USERSPACE) {
584			kbuf = malloc(len, M_TEMP, M_WAITOK);
585			error = copyin(aiov.iov_base, kbuf, len);
586		} else {
587			kbuf = aiov.iov_base;
588			error = 0;
589		}
590		if (error == 0)
591			for (i = 0; i < len; i++) {
592				if (kbuf[i] == '\0')
593					break;
594				cnputc(kbuf[i]);
595				auio.uio_resid--;
596			}
597		if (sflg == UIO_USERSPACE)
598			free(kbuf, M_TEMP, len);
599		if (error == 0)
600			len -= auio.uio_resid;
601		cnputc('\n');
602	}
603
604#ifdef KTRACE
605	if (error == 0 && ktriov.iov_len != 0)
606		ktrgenio(p, -1, UIO_WRITE, &ktriov, len);
607#endif
608	if (fp)
609		FRELE(fp, p);
610	else
611		error = ENOTCONN;
612	return (error);
613}
614