1/*-
2 * Copyright (c) 2005-2007 Joseph Koshy
3 * Copyright (c) 2007 The FreeBSD Foundation
4 * All rights reserved.
5 *
6 * Portions of this software were developed by A. Joseph Koshy under
7 * sponsorship from the FreeBSD Foundation and Google, Inc.
8 *
9 * Redistribution and use in source and binary forms, with or without
10 * modification, are permitted provided that the following conditions
11 * are met:
12 * 1. Redistributions of source code must retain the above copyright
13 *    notice, this list of conditions and the following disclaimer.
14 * 2. Redistributions in binary form must reproduce the above copyright
15 *    notice, this list of conditions and the following disclaimer in the
16 *    documentation and/or other materials provided with the distribution.
17 *
18 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
19 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
20 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
21 * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
22 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
23 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
24 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
25 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
26 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
27 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
28 * SUCH DAMAGE.
29 *
30 */
31
32/*
33 * Logging code for hwpmc(4)
34 */
35
36#include <sys/cdefs.h>
37__FBSDID("$FreeBSD: stable/11/sys/dev/hwpmc/hwpmc_logging.c 331722 2018-03-29 02:50:57Z eadler $");
38
39#include <sys/param.h>
40#if (__FreeBSD_version >= 1100000)
41#include <sys/capsicum.h>
42#else
43#include <sys/capability.h>
44#endif
45#include <sys/file.h>
46#include <sys/kernel.h>
47#include <sys/kthread.h>
48#include <sys/lock.h>
49#include <sys/module.h>
50#include <sys/mutex.h>
51#include <sys/pmc.h>
52#include <sys/pmckern.h>
53#include <sys/pmclog.h>
54#include <sys/proc.h>
55#include <sys/signalvar.h>
56#include <sys/syscallsubr.h>
57#include <sys/sysctl.h>
58#include <sys/systm.h>
59#include <sys/uio.h>
60#include <sys/unistd.h>
61#include <sys/vnode.h>
62
63/*
64 * Sysctl tunables
65 */
66
67SYSCTL_DECL(_kern_hwpmc);
68
69/*
70 * kern.hwpmc.logbuffersize -- size of the per-cpu owner buffers.
71 */
72
73static int pmclog_buffer_size = PMC_LOG_BUFFER_SIZE;
74#if (__FreeBSD_version < 1100000)
75TUNABLE_INT(PMC_SYSCTL_NAME_PREFIX "logbuffersize", &pmclog_buffer_size);
76#endif
77SYSCTL_INT(_kern_hwpmc, OID_AUTO, logbuffersize, CTLFLAG_RDTUN,
78    &pmclog_buffer_size, 0, "size of log buffers in kilobytes");
79
80/*
81 * kern.hwpmc.nbuffer -- number of global log buffers
82 */
83
84static int pmc_nlogbuffers = PMC_NLOGBUFFERS;
85#if (__FreeBSD_version < 1100000)
86TUNABLE_INT(PMC_SYSCTL_NAME_PREFIX "nbuffers", &pmc_nlogbuffers);
87#endif
88SYSCTL_INT(_kern_hwpmc, OID_AUTO, nbuffers, CTLFLAG_RDTUN,
89    &pmc_nlogbuffers, 0, "number of global log buffers");
90
91/*
92 * Global log buffer list and associated spin lock.
93 */
94
95TAILQ_HEAD(, pmclog_buffer) pmc_bufferlist =
96	TAILQ_HEAD_INITIALIZER(pmc_bufferlist);
97static struct mtx pmc_bufferlist_mtx;	/* spin lock */
98static struct mtx pmc_kthread_mtx;	/* sleep lock */
99
100#define	PMCLOG_INIT_BUFFER_DESCRIPTOR(D) do {				\
101		const int __roundup = roundup(sizeof(*D),		\
102			sizeof(uint32_t));				\
103		(D)->plb_fence = ((char *) (D)) +			\
104			 1024*pmclog_buffer_size;			\
105		(D)->plb_base  = (D)->plb_ptr = ((char *) (D)) +	\
106			__roundup;					\
107	} while (0)
108
109
110/*
111 * Log file record constructors.
112 */
113#define	_PMCLOG_TO_HEADER(T,L)						\
114	((PMCLOG_HEADER_MAGIC << 24) |					\
115	 (PMCLOG_TYPE_ ## T << 16)   |					\
116	 ((L) & 0xFFFF))
117
118/* reserve LEN bytes of space and initialize the entry header */
119#define	_PMCLOG_RESERVE(PO,TYPE,LEN,ACTION) do {			\
120		uint32_t *_le;						\
121		int _len = roundup((LEN), sizeof(uint32_t));		\
122		if ((_le = pmclog_reserve((PO), _len)) == NULL) {	\
123			ACTION;						\
124		}							\
125		*_le = _PMCLOG_TO_HEADER(TYPE,_len);			\
126		_le += 3	/* skip over timestamp */
127
128#define	PMCLOG_RESERVE(P,T,L)		_PMCLOG_RESERVE(P,T,L,return)
129#define	PMCLOG_RESERVE_WITH_ERROR(P,T,L) _PMCLOG_RESERVE(P,T,L,		\
130	error=ENOMEM;goto error)
131
132#define	PMCLOG_EMIT32(V)	do { *_le++ = (V); } while (0)
133#define	PMCLOG_EMIT64(V)	do { 					\
134		*_le++ = (uint32_t) ((V) & 0xFFFFFFFF);			\
135		*_le++ = (uint32_t) (((V) >> 32) & 0xFFFFFFFF);		\
136	} while (0)
137
138
139/* Emit a string.  Caution: does NOT update _le, so needs to be last */
140#define	PMCLOG_EMITSTRING(S,L)	do { bcopy((S), _le, (L)); } while (0)
141#define	PMCLOG_EMITNULLSTRING(L) do { bzero(_le, (L)); } while (0)
142
143#define	PMCLOG_DESPATCH(PO)						\
144		pmclog_release((PO));					\
145	} while (0)
146
147
148/*
149 * Assertions about the log file format.
150 */
151
152CTASSERT(sizeof(struct pmclog_callchain) == 6*4 +
153    PMC_CALLCHAIN_DEPTH_MAX*sizeof(uintfptr_t));
154CTASSERT(sizeof(struct pmclog_closelog) == 3*4);
155CTASSERT(sizeof(struct pmclog_dropnotify) == 3*4);
156CTASSERT(sizeof(struct pmclog_map_in) == PATH_MAX +
157    4*4 + sizeof(uintfptr_t));
158CTASSERT(offsetof(struct pmclog_map_in,pl_pathname) ==
159    4*4 + sizeof(uintfptr_t));
160CTASSERT(sizeof(struct pmclog_map_out) == 4*4 + 2*sizeof(uintfptr_t));
161CTASSERT(sizeof(struct pmclog_pcsample) == 6*4 + sizeof(uintfptr_t));
162CTASSERT(sizeof(struct pmclog_pmcallocate) == 6*4);
163CTASSERT(sizeof(struct pmclog_pmcattach) == 5*4 + PATH_MAX);
164CTASSERT(offsetof(struct pmclog_pmcattach,pl_pathname) == 5*4);
165CTASSERT(sizeof(struct pmclog_pmcdetach) == 5*4);
166CTASSERT(sizeof(struct pmclog_proccsw) == 5*4 + 8);
167CTASSERT(sizeof(struct pmclog_procexec) == 5*4 + PATH_MAX +
168    sizeof(uintfptr_t));
169CTASSERT(offsetof(struct pmclog_procexec,pl_pathname) == 5*4 +
170    sizeof(uintfptr_t));
171CTASSERT(sizeof(struct pmclog_procexit) == 5*4 + 8);
172CTASSERT(sizeof(struct pmclog_procfork) == 5*4);
173CTASSERT(sizeof(struct pmclog_sysexit) == 4*4);
174CTASSERT(sizeof(struct pmclog_userdata) == 4*4);
175
176/*
177 * Log buffer structure
178 */
179
180struct pmclog_buffer {
181	TAILQ_ENTRY(pmclog_buffer) plb_next;
182	char 		*plb_base;
183	char		*plb_ptr;
184	char 		*plb_fence;
185};
186
187/*
188 * Prototypes
189 */
190
191static int pmclog_get_buffer(struct pmc_owner *po);
192static void pmclog_loop(void *arg);
193static void pmclog_release(struct pmc_owner *po);
194static uint32_t *pmclog_reserve(struct pmc_owner *po, int length);
195static void pmclog_schedule_io(struct pmc_owner *po);
196static void pmclog_stop_kthread(struct pmc_owner *po);
197
198/*
199 * Helper functions
200 */
201
202/*
203 * Get a log buffer
204 */
205
206static int
207pmclog_get_buffer(struct pmc_owner *po)
208{
209	struct pmclog_buffer *plb;
210
211	mtx_assert(&po->po_mtx, MA_OWNED);
212
213	KASSERT(po->po_curbuf == NULL,
214	    ("[pmclog,%d] po=%p current buffer still valid", __LINE__, po));
215
216	mtx_lock_spin(&pmc_bufferlist_mtx);
217	if ((plb = TAILQ_FIRST(&pmc_bufferlist)) != NULL)
218		TAILQ_REMOVE(&pmc_bufferlist, plb, plb_next);
219	mtx_unlock_spin(&pmc_bufferlist_mtx);
220
221	PMCDBG2(LOG,GTB,1, "po=%p plb=%p", po, plb);
222
223#ifdef	HWPMC_DEBUG
224	if (plb)
225		KASSERT(plb->plb_ptr == plb->plb_base &&
226		    plb->plb_base < plb->plb_fence,
227		    ("[pmclog,%d] po=%p buffer invariants: ptr=%p "
228		    "base=%p fence=%p", __LINE__, po, plb->plb_ptr,
229		    plb->plb_base, plb->plb_fence));
230#endif
231
232	po->po_curbuf = plb;
233
234	/* update stats */
235	atomic_add_int(&pmc_stats.pm_buffer_requests, 1);
236	if (plb == NULL)
237		atomic_add_int(&pmc_stats.pm_buffer_requests_failed, 1);
238
239	return (plb ? 0 : ENOMEM);
240}
241
242struct pmclog_proc_init_args {
243	struct proc *kthr;
244	struct pmc_owner *po;
245	bool exit;
246	bool acted;
247};
248
249int
250pmclog_proc_create(struct thread *td, void **handlep)
251{
252	struct pmclog_proc_init_args *ia;
253	int error;
254
255	ia = malloc(sizeof(*ia), M_TEMP, M_WAITOK | M_ZERO);
256	error = kproc_create(pmclog_loop, ia, &ia->kthr,
257	    RFHIGHPID, 0, "hwpmc: proc(%d)", td->td_proc->p_pid);
258	if (error == 0)
259		*handlep = ia;
260	return (error);
261}
262
263void
264pmclog_proc_ignite(void *handle, struct pmc_owner *po)
265{
266	struct pmclog_proc_init_args *ia;
267
268	ia = handle;
269	mtx_lock(&pmc_kthread_mtx);
270	MPASS(!ia->acted);
271	MPASS(ia->po == NULL);
272	MPASS(!ia->exit);
273	MPASS(ia->kthr != NULL);
274	if (po == NULL) {
275		ia->exit = true;
276	} else {
277		ia->po = po;
278		KASSERT(po->po_kthread == NULL,
279		    ("[pmclog,%d] po=%p kthread (%p) already present",
280		    __LINE__, po, po->po_kthread));
281		po->po_kthread = ia->kthr;
282	}
283	wakeup(ia);
284	while (!ia->acted)
285		msleep(ia, &pmc_kthread_mtx, PWAIT, "pmclogw", 0);
286	mtx_unlock(&pmc_kthread_mtx);
287	free(ia, M_TEMP);
288}
289
290/*
291 * Log handler loop.
292 *
293 * This function is executed by each pmc owner's helper thread.
294 */
295
296static void
297pmclog_loop(void *arg)
298{
299	struct pmclog_proc_init_args *ia;
300	struct pmc_owner *po;
301	struct pmclog_buffer *lb;
302	struct proc *p;
303	struct ucred *ownercred;
304	struct ucred *mycred;
305	struct thread *td;
306	sigset_t unb;
307	struct uio auio;
308	struct iovec aiov;
309	size_t nbytes;
310	int error;
311
312	td = curthread;
313
314	SIGEMPTYSET(unb);
315	SIGADDSET(unb, SIGHUP);
316	(void)kern_sigprocmask(td, SIG_UNBLOCK, &unb, NULL, 0);
317
318	ia = arg;
319	MPASS(ia->kthr == curproc);
320	MPASS(!ia->acted);
321	mtx_lock(&pmc_kthread_mtx);
322	while (ia->po == NULL && !ia->exit)
323		msleep(ia, &pmc_kthread_mtx, PWAIT, "pmclogi", 0);
324	if (ia->exit) {
325		ia->acted = true;
326		wakeup(ia);
327		mtx_unlock(&pmc_kthread_mtx);
328		kproc_exit(0);
329	}
330	MPASS(ia->po != NULL);
331	po = ia->po;
332	ia->acted = true;
333	wakeup(ia);
334	mtx_unlock(&pmc_kthread_mtx);
335	ia = NULL;
336
337	p = po->po_owner;
338	mycred = td->td_ucred;
339
340	PROC_LOCK(p);
341	ownercred = crhold(p->p_ucred);
342	PROC_UNLOCK(p);
343
344	PMCDBG2(LOG,INI,1, "po=%p kt=%p", po, po->po_kthread);
345	KASSERT(po->po_kthread == curthread->td_proc,
346	    ("[pmclog,%d] proc mismatch po=%p po/kt=%p curproc=%p", __LINE__,
347		po, po->po_kthread, curthread->td_proc));
348
349	lb = NULL;
350
351
352	/*
353	 * Loop waiting for I/O requests to be added to the owner
354	 * struct's queue.  The loop is exited when the log file
355	 * is deconfigured.
356	 */
357
358	mtx_lock(&pmc_kthread_mtx);
359
360	for (;;) {
361
362		/* check if we've been asked to exit */
363		if ((po->po_flags & PMC_PO_OWNS_LOGFILE) == 0)
364			break;
365
366		if (lb == NULL) { /* look for a fresh buffer to write */
367			mtx_lock_spin(&po->po_mtx);
368			if ((lb = TAILQ_FIRST(&po->po_logbuffers)) == NULL) {
369				mtx_unlock_spin(&po->po_mtx);
370
371				/* No more buffers and shutdown required. */
372				if (po->po_flags & PMC_PO_SHUTDOWN)
373					break;
374
375				(void) msleep(po, &pmc_kthread_mtx, PWAIT,
376				    "pmcloop", 0);
377				continue;
378			}
379
380			TAILQ_REMOVE(&po->po_logbuffers, lb, plb_next);
381			mtx_unlock_spin(&po->po_mtx);
382		}
383
384		mtx_unlock(&pmc_kthread_mtx);
385
386		/* process the request */
387		PMCDBG3(LOG,WRI,2, "po=%p base=%p ptr=%p", po,
388		    lb->plb_base, lb->plb_ptr);
389		/* change our thread's credentials before issuing the I/O */
390
391		aiov.iov_base = lb->plb_base;
392		aiov.iov_len  = nbytes = lb->plb_ptr - lb->plb_base;
393
394		auio.uio_iov    = &aiov;
395		auio.uio_iovcnt = 1;
396		auio.uio_offset = -1;
397		auio.uio_resid  = nbytes;
398		auio.uio_rw     = UIO_WRITE;
399		auio.uio_segflg = UIO_SYSSPACE;
400		auio.uio_td     = td;
401
402		/* switch thread credentials -- see kern_ktrace.c */
403		td->td_ucred = ownercred;
404		error = fo_write(po->po_file, &auio, ownercred, 0, td);
405		td->td_ucred = mycred;
406
407		if (error) {
408			/* XXX some errors are recoverable */
409			/* send a SIGIO to the owner and exit */
410			PROC_LOCK(p);
411			kern_psignal(p, SIGIO);
412			PROC_UNLOCK(p);
413
414			mtx_lock(&pmc_kthread_mtx);
415
416			po->po_error = error; /* save for flush log */
417
418			PMCDBG2(LOG,WRI,2, "po=%p error=%d", po, error);
419
420			break;
421		}
422
423		mtx_lock(&pmc_kthread_mtx);
424
425		/* put the used buffer back into the global pool */
426		PMCLOG_INIT_BUFFER_DESCRIPTOR(lb);
427
428		mtx_lock_spin(&pmc_bufferlist_mtx);
429		TAILQ_INSERT_HEAD(&pmc_bufferlist, lb, plb_next);
430		mtx_unlock_spin(&pmc_bufferlist_mtx);
431
432		lb = NULL;
433	}
434
435	wakeup_one(po->po_kthread);
436	po->po_kthread = NULL;
437
438	mtx_unlock(&pmc_kthread_mtx);
439
440	/* return the current I/O buffer to the global pool */
441	if (lb) {
442		PMCLOG_INIT_BUFFER_DESCRIPTOR(lb);
443
444		mtx_lock_spin(&pmc_bufferlist_mtx);
445		TAILQ_INSERT_HEAD(&pmc_bufferlist, lb, plb_next);
446		mtx_unlock_spin(&pmc_bufferlist_mtx);
447	}
448
449	/*
450	 * Exit this thread, signalling the waiter
451	 */
452
453	crfree(ownercred);
454
455	kproc_exit(0);
456}
457
458/*
459 * Release and log entry and schedule an I/O if needed.
460 */
461
462static void
463pmclog_release(struct pmc_owner *po)
464{
465	KASSERT(po->po_curbuf->plb_ptr >= po->po_curbuf->plb_base,
466	    ("[pmclog,%d] buffer invariants po=%p ptr=%p base=%p", __LINE__,
467		po, po->po_curbuf->plb_ptr, po->po_curbuf->plb_base));
468	KASSERT(po->po_curbuf->plb_ptr <= po->po_curbuf->plb_fence,
469	    ("[pmclog,%d] buffer invariants po=%p ptr=%p fenc=%p", __LINE__,
470		po, po->po_curbuf->plb_ptr, po->po_curbuf->plb_fence));
471
472	/* schedule an I/O if we've filled a buffer */
473	if (po->po_curbuf->plb_ptr >= po->po_curbuf->plb_fence)
474		pmclog_schedule_io(po);
475
476	mtx_unlock_spin(&po->po_mtx);
477
478	PMCDBG1(LOG,REL,1, "po=%p", po);
479}
480
481
482/*
483 * Attempt to reserve 'length' bytes of space in an owner's log
484 * buffer.  The function returns a pointer to 'length' bytes of space
485 * if there was enough space or returns NULL if no space was
486 * available.  Non-null returns do so with the po mutex locked.  The
487 * caller must invoke pmclog_release() on the pmc owner structure
488 * when done.
489 */
490
491static uint32_t *
492pmclog_reserve(struct pmc_owner *po, int length)
493{
494	uintptr_t newptr, oldptr;
495	uint32_t *lh;
496	struct timespec ts;
497
498	PMCDBG2(LOG,ALL,1, "po=%p len=%d", po, length);
499
500	KASSERT(length % sizeof(uint32_t) == 0,
501	    ("[pmclog,%d] length not a multiple of word size", __LINE__));
502
503	mtx_lock_spin(&po->po_mtx);
504
505	/* No more data when shutdown in progress. */
506	if (po->po_flags & PMC_PO_SHUTDOWN) {
507		mtx_unlock_spin(&po->po_mtx);
508		return (NULL);
509	}
510
511	if (po->po_curbuf == NULL)
512		if (pmclog_get_buffer(po) != 0) {
513			mtx_unlock_spin(&po->po_mtx);
514			return (NULL);
515		}
516
517	KASSERT(po->po_curbuf != NULL,
518	    ("[pmclog,%d] po=%p no current buffer", __LINE__, po));
519
520	KASSERT(po->po_curbuf->plb_ptr >= po->po_curbuf->plb_base &&
521	    po->po_curbuf->plb_ptr <= po->po_curbuf->plb_fence,
522	    ("[pmclog,%d] po=%p buffer invariants: ptr=%p base=%p fence=%p",
523		__LINE__, po, po->po_curbuf->plb_ptr, po->po_curbuf->plb_base,
524		po->po_curbuf->plb_fence));
525
526	oldptr = (uintptr_t) po->po_curbuf->plb_ptr;
527	newptr = oldptr + length;
528
529	KASSERT(oldptr != (uintptr_t) NULL,
530	    ("[pmclog,%d] po=%p Null log buffer pointer", __LINE__, po));
531
532	/*
533	 * If we have space in the current buffer, return a pointer to
534	 * available space with the PO structure locked.
535	 */
536	if (newptr <= (uintptr_t) po->po_curbuf->plb_fence) {
537		po->po_curbuf->plb_ptr = (char *) newptr;
538		goto done;
539	}
540
541	/*
542	 * Otherwise, schedule the current buffer for output and get a
543	 * fresh buffer.
544	 */
545	pmclog_schedule_io(po);
546
547	if (pmclog_get_buffer(po) != 0) {
548		mtx_unlock_spin(&po->po_mtx);
549		return (NULL);
550	}
551
552	KASSERT(po->po_curbuf != NULL,
553	    ("[pmclog,%d] po=%p no current buffer", __LINE__, po));
554
555	KASSERT(po->po_curbuf->plb_ptr != NULL,
556	    ("[pmclog,%d] null return from pmc_get_log_buffer", __LINE__));
557
558	KASSERT(po->po_curbuf->plb_ptr == po->po_curbuf->plb_base &&
559	    po->po_curbuf->plb_ptr <= po->po_curbuf->plb_fence,
560	    ("[pmclog,%d] po=%p buffer invariants: ptr=%p base=%p fence=%p",
561		__LINE__, po, po->po_curbuf->plb_ptr, po->po_curbuf->plb_base,
562		po->po_curbuf->plb_fence));
563
564	oldptr = (uintptr_t) po->po_curbuf->plb_ptr;
565
566 done:
567	lh = (uint32_t *) oldptr;
568	lh++;				/* skip header */
569	getnanotime(&ts);		/* fill in the timestamp */
570	*lh++ = ts.tv_sec & 0xFFFFFFFF;
571	*lh++ = ts.tv_nsec & 0xFFFFFFF;
572	return ((uint32_t *) oldptr);
573}
574
575/*
576 * Schedule an I/O.
577 *
578 * Transfer the current buffer to the helper kthread.
579 */
580
581static void
582pmclog_schedule_io(struct pmc_owner *po)
583{
584	KASSERT(po->po_curbuf != NULL,
585	    ("[pmclog,%d] schedule_io with null buffer po=%p", __LINE__, po));
586
587	KASSERT(po->po_curbuf->plb_ptr >= po->po_curbuf->plb_base,
588	    ("[pmclog,%d] buffer invariants po=%p ptr=%p base=%p", __LINE__,
589		po, po->po_curbuf->plb_ptr, po->po_curbuf->plb_base));
590	KASSERT(po->po_curbuf->plb_ptr <= po->po_curbuf->plb_fence,
591	    ("[pmclog,%d] buffer invariants po=%p ptr=%p fenc=%p", __LINE__,
592		po, po->po_curbuf->plb_ptr, po->po_curbuf->plb_fence));
593
594	PMCDBG1(LOG,SIO, 1, "po=%p", po);
595
596	mtx_assert(&po->po_mtx, MA_OWNED);
597
598	/*
599	 * Add the current buffer to the tail of the buffer list and
600	 * wakeup the helper.
601	 */
602	TAILQ_INSERT_TAIL(&po->po_logbuffers, po->po_curbuf, plb_next);
603	po->po_curbuf = NULL;
604	wakeup_one(po);
605}
606
607/*
608 * Stop the helper kthread.
609 */
610
611static void
612pmclog_stop_kthread(struct pmc_owner *po)
613{
614
615	mtx_lock(&pmc_kthread_mtx);
616	po->po_flags &= ~PMC_PO_OWNS_LOGFILE;
617	if (po->po_kthread != NULL) {
618		PROC_LOCK(po->po_kthread);
619		kern_psignal(po->po_kthread, SIGHUP);
620		PROC_UNLOCK(po->po_kthread);
621	}
622	wakeup_one(po);
623	while (po->po_kthread)
624		msleep(po->po_kthread, &pmc_kthread_mtx, PPAUSE, "pmckstp", 0);
625	mtx_unlock(&pmc_kthread_mtx);
626}
627
628/*
629 * Public functions
630 */
631
632/*
633 * Configure a log file for pmc owner 'po'.
634 *
635 * Parameter 'logfd' is a file handle referencing an open file in the
636 * owner process.  This file needs to have been opened for writing.
637 */
638
639int
640pmclog_configure_log(struct pmc_mdep *md, struct pmc_owner *po, int logfd)
641{
642	struct proc *p;
643	cap_rights_t rights;
644	int error;
645
646	sx_assert(&pmc_sx, SA_XLOCKED);
647	PMCDBG2(LOG,CFG,1, "config po=%p logfd=%d", po, logfd);
648
649	p = po->po_owner;
650
651	/* return EBUSY if a log file was already present */
652	if (po->po_flags & PMC_PO_OWNS_LOGFILE)
653		return (EBUSY);
654
655	KASSERT(po->po_file == NULL,
656	    ("[pmclog,%d] po=%p file (%p) already present", __LINE__, po,
657		po->po_file));
658
659	/* get a reference to the file state */
660	error = fget_write(curthread, logfd,
661	    cap_rights_init(&rights, CAP_WRITE), &po->po_file);
662	if (error)
663		goto error;
664
665	/* mark process as owning a log file */
666	po->po_flags |= PMC_PO_OWNS_LOGFILE;
667
668	/* mark process as using HWPMCs */
669	PROC_LOCK(p);
670	p->p_flag |= P_HWPMC;
671	PROC_UNLOCK(p);
672
673	/* create a log initialization entry */
674	PMCLOG_RESERVE_WITH_ERROR(po, INITIALIZE,
675	    sizeof(struct pmclog_initialize));
676	PMCLOG_EMIT32(PMC_VERSION);
677	PMCLOG_EMIT32(md->pmd_cputype);
678	PMCLOG_DESPATCH(po);
679
680	return (0);
681
682 error:
683	KASSERT(po->po_kthread == NULL, ("[pmclog,%d] po=%p kthread not "
684	    "stopped", __LINE__, po));
685
686	if (po->po_file)
687		(void) fdrop(po->po_file, curthread);
688	po->po_file  = NULL;	/* clear file and error state */
689	po->po_error = 0;
690	po->po_flags &= ~PMC_PO_OWNS_LOGFILE;
691
692	return (error);
693}
694
695
696/*
697 * De-configure a log file.  This will throw away any buffers queued
698 * for this owner process.
699 */
700
701int
702pmclog_deconfigure_log(struct pmc_owner *po)
703{
704	int error;
705	struct pmclog_buffer *lb;
706
707	PMCDBG1(LOG,CFG,1, "de-config po=%p", po);
708
709	if ((po->po_flags & PMC_PO_OWNS_LOGFILE) == 0)
710		return (EINVAL);
711
712	KASSERT(po->po_sscount == 0,
713	    ("[pmclog,%d] po=%p still owning SS PMCs", __LINE__, po));
714	KASSERT(po->po_file != NULL,
715	    ("[pmclog,%d] po=%p no log file", __LINE__, po));
716
717	/* stop the kthread, this will reset the 'OWNS_LOGFILE' flag */
718	pmclog_stop_kthread(po);
719
720	KASSERT(po->po_kthread == NULL,
721	    ("[pmclog,%d] po=%p kthread not stopped", __LINE__, po));
722
723	/* return all queued log buffers to the global pool */
724	while ((lb = TAILQ_FIRST(&po->po_logbuffers)) != NULL) {
725		TAILQ_REMOVE(&po->po_logbuffers, lb, plb_next);
726		PMCLOG_INIT_BUFFER_DESCRIPTOR(lb);
727		mtx_lock_spin(&pmc_bufferlist_mtx);
728		TAILQ_INSERT_HEAD(&pmc_bufferlist, lb, plb_next);
729		mtx_unlock_spin(&pmc_bufferlist_mtx);
730	}
731
732	/* return the 'current' buffer to the global pool */
733	if ((lb = po->po_curbuf) != NULL) {
734		PMCLOG_INIT_BUFFER_DESCRIPTOR(lb);
735		mtx_lock_spin(&pmc_bufferlist_mtx);
736		TAILQ_INSERT_HEAD(&pmc_bufferlist, lb, plb_next);
737		mtx_unlock_spin(&pmc_bufferlist_mtx);
738	}
739
740	/* drop a reference to the fd */
741	if (po->po_file != NULL) {
742		error = fdrop(po->po_file, curthread);
743		po->po_file = NULL;
744	} else
745		error = 0;
746	po->po_error = 0;
747
748	return (error);
749}
750
751/*
752 * Flush a process' log buffer.
753 */
754
755int
756pmclog_flush(struct pmc_owner *po)
757{
758	int error;
759	struct pmclog_buffer *lb;
760
761	PMCDBG1(LOG,FLS,1, "po=%p", po);
762
763	/*
764	 * If there is a pending error recorded by the logger thread,
765	 * return that.
766	 */
767	if (po->po_error)
768		return (po->po_error);
769
770	error = 0;
771
772	/*
773	 * Check that we do have an active log file.
774	 */
775	mtx_lock(&pmc_kthread_mtx);
776	if ((po->po_flags & PMC_PO_OWNS_LOGFILE) == 0) {
777		error = EINVAL;
778		goto error;
779	}
780
781	/*
782	 * Schedule the current buffer if any and not empty.
783	 */
784	mtx_lock_spin(&po->po_mtx);
785	lb = po->po_curbuf;
786	if (lb && lb->plb_ptr != lb->plb_base) {
787		pmclog_schedule_io(po);
788	} else
789		error = ENOBUFS;
790	mtx_unlock_spin(&po->po_mtx);
791
792 error:
793	mtx_unlock(&pmc_kthread_mtx);
794
795	return (error);
796}
797
798int
799pmclog_close(struct pmc_owner *po)
800{
801
802	PMCDBG1(LOG,CLO,1, "po=%p", po);
803
804	pmclog_process_closelog(po);
805
806	mtx_lock(&pmc_kthread_mtx);
807
808	/*
809	 * Schedule the current buffer.
810	 */
811	mtx_lock_spin(&po->po_mtx);
812	if (po->po_curbuf)
813		pmclog_schedule_io(po);
814	else
815		wakeup_one(po);
816	mtx_unlock_spin(&po->po_mtx);
817
818	/*
819	 * Initiate shutdown: no new data queued,
820	 * thread will close file on last block.
821	 */
822	po->po_flags |= PMC_PO_SHUTDOWN;
823
824	mtx_unlock(&pmc_kthread_mtx);
825
826	return (0);
827}
828
829void
830pmclog_process_callchain(struct pmc *pm, struct pmc_sample *ps)
831{
832	int n, recordlen;
833	uint32_t flags;
834	struct pmc_owner *po;
835
836	PMCDBG3(LOG,SAM,1,"pm=%p pid=%d n=%d", pm, ps->ps_pid,
837	    ps->ps_nsamples);
838
839	recordlen = offsetof(struct pmclog_callchain, pl_pc) +
840	    ps->ps_nsamples * sizeof(uintfptr_t);
841	po = pm->pm_owner;
842	flags = PMC_CALLCHAIN_TO_CPUFLAGS(ps->ps_cpu,ps->ps_flags);
843	PMCLOG_RESERVE(po, CALLCHAIN, recordlen);
844	PMCLOG_EMIT32(ps->ps_pid);
845	PMCLOG_EMIT32(pm->pm_id);
846	PMCLOG_EMIT32(flags);
847	for (n = 0; n < ps->ps_nsamples; n++)
848		PMCLOG_EMITADDR(ps->ps_pc[n]);
849	PMCLOG_DESPATCH(po);
850}
851
852void
853pmclog_process_closelog(struct pmc_owner *po)
854{
855	PMCLOG_RESERVE(po,CLOSELOG,sizeof(struct pmclog_closelog));
856	PMCLOG_DESPATCH(po);
857}
858
859void
860pmclog_process_dropnotify(struct pmc_owner *po)
861{
862	PMCLOG_RESERVE(po,DROPNOTIFY,sizeof(struct pmclog_dropnotify));
863	PMCLOG_DESPATCH(po);
864}
865
866void
867pmclog_process_map_in(struct pmc_owner *po, pid_t pid, uintfptr_t start,
868    const char *path)
869{
870	int pathlen, recordlen;
871
872	KASSERT(path != NULL, ("[pmclog,%d] map-in, null path", __LINE__));
873
874	pathlen = strlen(path) + 1;	/* #bytes for path name */
875	recordlen = offsetof(struct pmclog_map_in, pl_pathname) +
876	    pathlen;
877
878	PMCLOG_RESERVE(po, MAP_IN, recordlen);
879	PMCLOG_EMIT32(pid);
880	PMCLOG_EMITADDR(start);
881	PMCLOG_EMITSTRING(path,pathlen);
882	PMCLOG_DESPATCH(po);
883}
884
885void
886pmclog_process_map_out(struct pmc_owner *po, pid_t pid, uintfptr_t start,
887    uintfptr_t end)
888{
889	KASSERT(start <= end, ("[pmclog,%d] start > end", __LINE__));
890
891	PMCLOG_RESERVE(po, MAP_OUT, sizeof(struct pmclog_map_out));
892	PMCLOG_EMIT32(pid);
893	PMCLOG_EMITADDR(start);
894	PMCLOG_EMITADDR(end);
895	PMCLOG_DESPATCH(po);
896}
897
898void
899pmclog_process_pmcallocate(struct pmc *pm)
900{
901	struct pmc_owner *po;
902	struct pmc_soft *ps;
903
904	po = pm->pm_owner;
905
906	PMCDBG1(LOG,ALL,1, "pm=%p", pm);
907
908	if (PMC_TO_CLASS(pm) == PMC_CLASS_SOFT) {
909		PMCLOG_RESERVE(po, PMCALLOCATEDYN,
910		    sizeof(struct pmclog_pmcallocatedyn));
911		PMCLOG_EMIT32(pm->pm_id);
912		PMCLOG_EMIT32(pm->pm_event);
913		PMCLOG_EMIT32(pm->pm_flags);
914		ps = pmc_soft_ev_acquire(pm->pm_event);
915		if (ps != NULL)
916			PMCLOG_EMITSTRING(ps->ps_ev.pm_ev_name,PMC_NAME_MAX);
917		else
918			PMCLOG_EMITNULLSTRING(PMC_NAME_MAX);
919		pmc_soft_ev_release(ps);
920		PMCLOG_DESPATCH(po);
921	} else {
922		PMCLOG_RESERVE(po, PMCALLOCATE,
923		    sizeof(struct pmclog_pmcallocate));
924		PMCLOG_EMIT32(pm->pm_id);
925		PMCLOG_EMIT32(pm->pm_event);
926		PMCLOG_EMIT32(pm->pm_flags);
927		PMCLOG_DESPATCH(po);
928	}
929}
930
931void
932pmclog_process_pmcattach(struct pmc *pm, pid_t pid, char *path)
933{
934	int pathlen, recordlen;
935	struct pmc_owner *po;
936
937	PMCDBG2(LOG,ATT,1,"pm=%p pid=%d", pm, pid);
938
939	po = pm->pm_owner;
940
941	pathlen = strlen(path) + 1;	/* #bytes for the string */
942	recordlen = offsetof(struct pmclog_pmcattach, pl_pathname) + pathlen;
943
944	PMCLOG_RESERVE(po, PMCATTACH, recordlen);
945	PMCLOG_EMIT32(pm->pm_id);
946	PMCLOG_EMIT32(pid);
947	PMCLOG_EMITSTRING(path, pathlen);
948	PMCLOG_DESPATCH(po);
949}
950
951void
952pmclog_process_pmcdetach(struct pmc *pm, pid_t pid)
953{
954	struct pmc_owner *po;
955
956	PMCDBG2(LOG,ATT,1,"!pm=%p pid=%d", pm, pid);
957
958	po = pm->pm_owner;
959
960	PMCLOG_RESERVE(po, PMCDETACH, sizeof(struct pmclog_pmcdetach));
961	PMCLOG_EMIT32(pm->pm_id);
962	PMCLOG_EMIT32(pid);
963	PMCLOG_DESPATCH(po);
964}
965
966/*
967 * Log a context switch event to the log file.
968 */
969
970void
971pmclog_process_proccsw(struct pmc *pm, struct pmc_process *pp, pmc_value_t v)
972{
973	struct pmc_owner *po;
974
975	KASSERT(pm->pm_flags & PMC_F_LOG_PROCCSW,
976	    ("[pmclog,%d] log-process-csw called gratuitously", __LINE__));
977
978	PMCDBG3(LOG,SWO,1,"pm=%p pid=%d v=%jx", pm, pp->pp_proc->p_pid,
979	    v);
980
981	po = pm->pm_owner;
982
983	PMCLOG_RESERVE(po, PROCCSW, sizeof(struct pmclog_proccsw));
984	PMCLOG_EMIT32(pm->pm_id);
985	PMCLOG_EMIT64(v);
986	PMCLOG_EMIT32(pp->pp_proc->p_pid);
987	PMCLOG_DESPATCH(po);
988}
989
990void
991pmclog_process_procexec(struct pmc_owner *po, pmc_id_t pmid, pid_t pid,
992    uintfptr_t startaddr, char *path)
993{
994	int pathlen, recordlen;
995
996	PMCDBG3(LOG,EXC,1,"po=%p pid=%d path=\"%s\"", po, pid, path);
997
998	pathlen   = strlen(path) + 1;	/* #bytes for the path */
999	recordlen = offsetof(struct pmclog_procexec, pl_pathname) + pathlen;
1000
1001	PMCLOG_RESERVE(po, PROCEXEC, recordlen);
1002	PMCLOG_EMIT32(pid);
1003	PMCLOG_EMITADDR(startaddr);
1004	PMCLOG_EMIT32(pmid);
1005	PMCLOG_EMITSTRING(path,pathlen);
1006	PMCLOG_DESPATCH(po);
1007}
1008
1009/*
1010 * Log a process exit event (and accumulated pmc value) to the log file.
1011 */
1012
1013void
1014pmclog_process_procexit(struct pmc *pm, struct pmc_process *pp)
1015{
1016	int ri;
1017	struct pmc_owner *po;
1018
1019	ri = PMC_TO_ROWINDEX(pm);
1020	PMCDBG3(LOG,EXT,1,"pm=%p pid=%d v=%jx", pm, pp->pp_proc->p_pid,
1021	    pp->pp_pmcs[ri].pp_pmcval);
1022
1023	po = pm->pm_owner;
1024
1025	PMCLOG_RESERVE(po, PROCEXIT, sizeof(struct pmclog_procexit));
1026	PMCLOG_EMIT32(pm->pm_id);
1027	PMCLOG_EMIT64(pp->pp_pmcs[ri].pp_pmcval);
1028	PMCLOG_EMIT32(pp->pp_proc->p_pid);
1029	PMCLOG_DESPATCH(po);
1030}
1031
1032/*
1033 * Log a fork event.
1034 */
1035
1036void
1037pmclog_process_procfork(struct pmc_owner *po, pid_t oldpid, pid_t newpid)
1038{
1039	PMCLOG_RESERVE(po, PROCFORK, sizeof(struct pmclog_procfork));
1040	PMCLOG_EMIT32(oldpid);
1041	PMCLOG_EMIT32(newpid);
1042	PMCLOG_DESPATCH(po);
1043}
1044
1045/*
1046 * Log a process exit event of the form suitable for system-wide PMCs.
1047 */
1048
1049void
1050pmclog_process_sysexit(struct pmc_owner *po, pid_t pid)
1051{
1052	PMCLOG_RESERVE(po, SYSEXIT, sizeof(struct pmclog_sysexit));
1053	PMCLOG_EMIT32(pid);
1054	PMCLOG_DESPATCH(po);
1055}
1056
1057/*
1058 * Write a user log entry.
1059 */
1060
1061int
1062pmclog_process_userlog(struct pmc_owner *po, struct pmc_op_writelog *wl)
1063{
1064	int error;
1065
1066	PMCDBG2(LOG,WRI,1, "writelog po=%p ud=0x%x", po, wl->pm_userdata);
1067
1068	error = 0;
1069
1070	PMCLOG_RESERVE_WITH_ERROR(po, USERDATA,
1071	    sizeof(struct pmclog_userdata));
1072	PMCLOG_EMIT32(wl->pm_userdata);
1073	PMCLOG_DESPATCH(po);
1074
1075 error:
1076	return (error);
1077}
1078
1079/*
1080 * Initialization.
1081 *
1082 * Create a pool of log buffers and initialize mutexes.
1083 */
1084
1085void
1086pmclog_initialize()
1087{
1088	int n;
1089	struct pmclog_buffer *plb;
1090
1091	if (pmclog_buffer_size <= 0) {
1092		(void) printf("hwpmc: tunable logbuffersize=%d must be "
1093		    "greater than zero.\n", pmclog_buffer_size);
1094		pmclog_buffer_size = PMC_LOG_BUFFER_SIZE;
1095	}
1096
1097	if (pmc_nlogbuffers <= 0) {
1098		(void) printf("hwpmc: tunable nlogbuffers=%d must be greater "
1099		    "than zero.\n", pmc_nlogbuffers);
1100		pmc_nlogbuffers = PMC_NLOGBUFFERS;
1101	}
1102
1103	/* create global pool of log buffers */
1104	for (n = 0; n < pmc_nlogbuffers; n++) {
1105		plb = malloc(1024 * pmclog_buffer_size, M_PMC,
1106		    M_WAITOK|M_ZERO);
1107		PMCLOG_INIT_BUFFER_DESCRIPTOR(plb);
1108		TAILQ_INSERT_HEAD(&pmc_bufferlist, plb, plb_next);
1109	}
1110	mtx_init(&pmc_bufferlist_mtx, "pmc-buffer-list", "pmc-leaf",
1111	    MTX_SPIN);
1112	mtx_init(&pmc_kthread_mtx, "pmc-kthread", "pmc-sleep", MTX_DEF);
1113}
1114
1115/*
1116 * Shutdown logging.
1117 *
1118 * Destroy mutexes and release memory back the to free pool.
1119 */
1120
1121void
1122pmclog_shutdown()
1123{
1124	struct pmclog_buffer *plb;
1125
1126	mtx_destroy(&pmc_kthread_mtx);
1127	mtx_destroy(&pmc_bufferlist_mtx);
1128
1129	while ((plb = TAILQ_FIRST(&pmc_bufferlist)) != NULL) {
1130		TAILQ_REMOVE(&pmc_bufferlist, plb, plb_next);
1131		free(plb, M_PMC);
1132	}
1133}
1134