kern_ktrace.c revision 170587
1139804Simp/*- 21541Srgrimes * Copyright (c) 1989, 1993 3152376Srwatson * The Regents of the University of California. 4152376Srwatson * Copyright (c) 2005 Robert N. M. Watson 5152376Srwatson * All rights reserved. 61541Srgrimes * 71541Srgrimes * Redistribution and use in source and binary forms, with or without 81541Srgrimes * modification, are permitted provided that the following conditions 91541Srgrimes * are met: 101541Srgrimes * 1. Redistributions of source code must retain the above copyright 111541Srgrimes * notice, this list of conditions and the following disclaimer. 121541Srgrimes * 2. Redistributions in binary form must reproduce the above copyright 131541Srgrimes * notice, this list of conditions and the following disclaimer in the 141541Srgrimes * documentation and/or other materials provided with the distribution. 151541Srgrimes * 4. Neither the name of the University nor the names of its contributors 161541Srgrimes * may be used to endorse or promote products derived from this software 171541Srgrimes * without specific prior written permission. 181541Srgrimes * 191541Srgrimes * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND 201541Srgrimes * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 211541Srgrimes * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 221541Srgrimes * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 231541Srgrimes * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 241541Srgrimes * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 251541Srgrimes * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 261541Srgrimes * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 271541Srgrimes * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 281541Srgrimes * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 291541Srgrimes * SUCH DAMAGE. 301541Srgrimes * 311541Srgrimes * @(#)kern_ktrace.c 8.2 (Berkeley) 9/23/93 321541Srgrimes */ 331541Srgrimes 34116182Sobrien#include <sys/cdefs.h> 35116182Sobrien__FBSDID("$FreeBSD: head/sys/kern/kern_ktrace.c 170587 2007-06-12 00:12:01Z rwatson $"); 36116182Sobrien 3713203Swollman#include "opt_ktrace.h" 38101123Srwatson#include "opt_mac.h" 391541Srgrimes 401541Srgrimes#include <sys/param.h> 412112Swollman#include <sys/systm.h> 4297993Sjhb#include <sys/fcntl.h> 4397993Sjhb#include <sys/kernel.h> 4497993Sjhb#include <sys/kthread.h> 4576166Smarkm#include <sys/lock.h> 4676166Smarkm#include <sys/mutex.h> 4797993Sjhb#include <sys/malloc.h> 48155031Sjeff#include <sys/mount.h> 4997993Sjhb#include <sys/namei.h> 50164033Srwatson#include <sys/priv.h> 511541Srgrimes#include <sys/proc.h> 5297993Sjhb#include <sys/unistd.h> 531541Srgrimes#include <sys/vnode.h> 541541Srgrimes#include <sys/ktrace.h> 5574927Sjhb#include <sys/sx.h> 5697993Sjhb#include <sys/sysctl.h> 571541Srgrimes#include <sys/syslog.h> 5897993Sjhb#include <sys/sysproto.h> 591541Srgrimes 60163606Srwatson#include <security/mac/mac_framework.h> 61163606Srwatson 62152376Srwatson/* 63152376Srwatson * The ktrace facility allows the tracing of certain key events in user space 64152376Srwatson * processes, such as system calls, signal delivery, context switches, and 65152376Srwatson * user generated events using utrace(2). It works by streaming event 66152376Srwatson * records and data to a vnode associated with the process using the 67152376Srwatson * ktrace(2) system call. In general, records can be written directly from 68152376Srwatson * the context that generates the event. One important exception to this is 69152376Srwatson * during a context switch, where sleeping is not permitted. To handle this 70152376Srwatson * case, trace events are generated using in-kernel ktr_request records, and 71152376Srwatson * then delivered to disk at a convenient moment -- either immediately, the 72152376Srwatson * next traceable event, at system call return, or at process exit. 73152376Srwatson * 74152376Srwatson * When dealing with multiple threads or processes writing to the same event 75152376Srwatson * log, ordering guarantees are weak: specifically, if an event has multiple 76152376Srwatson * records (i.e., system call enter and return), they may be interlaced with 77152376Srwatson * records from another event. Process and thread ID information is provided 78152376Srwatson * in the record, and user applications can de-interlace events if required. 79152376Srwatson */ 80152376Srwatson 8130354Sphkstatic MALLOC_DEFINE(M_KTRACE, "KTRACE", "KTRACE"); 8230309Sphk 8313203Swollman#ifdef KTRACE 8412577Sbde 8597993Sjhb#ifndef KTRACE_REQUEST_POOL 8697993Sjhb#define KTRACE_REQUEST_POOL 100 8797993Sjhb#endif 8812819Sphk 8997993Sjhbstruct ktr_request { 9097993Sjhb struct ktr_header ktr_header; 91151927Srwatson void *ktr_buffer; 9297993Sjhb union { 9397993Sjhb struct ktr_syscall ktr_syscall; 9497993Sjhb struct ktr_sysret ktr_sysret; 9597993Sjhb struct ktr_genio ktr_genio; 9697993Sjhb struct ktr_psig ktr_psig; 9797993Sjhb struct ktr_csw ktr_csw; 9897993Sjhb } ktr_data; 9997993Sjhb STAILQ_ENTRY(ktr_request) ktr_list; 10097993Sjhb}; 10197993Sjhb 10297993Sjhbstatic int data_lengths[] = { 10397993Sjhb 0, /* none */ 10497993Sjhb offsetof(struct ktr_syscall, ktr_args), /* KTR_SYSCALL */ 10597993Sjhb sizeof(struct ktr_sysret), /* KTR_SYSRET */ 10697993Sjhb 0, /* KTR_NAMEI */ 10797993Sjhb sizeof(struct ktr_genio), /* KTR_GENIO */ 10897993Sjhb sizeof(struct ktr_psig), /* KTR_PSIG */ 10997993Sjhb sizeof(struct ktr_csw), /* KTR_CSW */ 11097993Sjhb 0 /* KTR_USER */ 11197993Sjhb}; 11297993Sjhb 11397993Sjhbstatic STAILQ_HEAD(, ktr_request) ktr_free; 11497993Sjhb 115141633Sphkstatic SYSCTL_NODE(_kern, OID_AUTO, ktrace, CTLFLAG_RD, 0, "KTRACE options"); 116103234Sjhb 117118607Sjhbstatic u_int ktr_requestpool = KTRACE_REQUEST_POOL; 118103234SjhbTUNABLE_INT("kern.ktrace.request_pool", &ktr_requestpool); 11997993Sjhb 120118607Sjhbstatic u_int ktr_geniosize = PAGE_SIZE; 121103234SjhbTUNABLE_INT("kern.ktrace.genio_size", &ktr_geniosize); 122103234SjhbSYSCTL_UINT(_kern_ktrace, OID_AUTO, genio_size, CTLFLAG_RW, &ktr_geniosize, 123103234Sjhb 0, "Maximum size of genio event payload"); 124103234Sjhb 12597993Sjhbstatic int print_message = 1; 12697993Sjhbstruct mtx ktrace_mtx; 127152376Srwatsonstatic struct sx ktrace_sx; 12897993Sjhb 12997993Sjhbstatic void ktrace_init(void *dummy); 13097993Sjhbstatic int sysctl_kern_ktrace_request_pool(SYSCTL_HANDLER_ARGS); 131118607Sjhbstatic u_int ktrace_resize_pool(u_int newsize); 13297993Sjhbstatic struct ktr_request *ktr_getrequest(int type); 133152376Srwatsonstatic void ktr_submitrequest(struct thread *td, struct ktr_request *req); 13497993Sjhbstatic void ktr_freerequest(struct ktr_request *req); 135152376Srwatsonstatic void ktr_writerequest(struct thread *td, struct ktr_request *req); 13697993Sjhbstatic int ktrcanset(struct thread *,struct proc *); 13797993Sjhbstatic int ktrsetchildren(struct thread *,struct proc *,int,int,struct vnode *); 13897993Sjhbstatic int ktrops(struct thread *,struct proc *,int,int,struct vnode *); 13997993Sjhb 140152376Srwatson/* 141152376Srwatson * ktrace itself generates events, such as context switches, which we do not 142152376Srwatson * wish to trace. Maintain a flag, TDP_INKTRACE, on each thread to determine 143152376Srwatson * whether or not it is in a region where tracing of events should be 144152376Srwatson * suppressed. 145152376Srwatson */ 14697993Sjhbstatic void 147152376Srwatsonktrace_enter(struct thread *td) 148152376Srwatson{ 149152376Srwatson 150152376Srwatson KASSERT(!(td->td_pflags & TDP_INKTRACE), ("ktrace_enter: flag set")); 151152376Srwatson td->td_pflags |= TDP_INKTRACE; 152152376Srwatson} 153152376Srwatson 154152376Srwatsonstatic void 155152376Srwatsonktrace_exit(struct thread *td) 156152376Srwatson{ 157152376Srwatson 158152376Srwatson KASSERT(td->td_pflags & TDP_INKTRACE, ("ktrace_exit: flag not set")); 159152376Srwatson td->td_pflags &= ~TDP_INKTRACE; 160152376Srwatson} 161152376Srwatson 162152376Srwatsonstatic void 163152376Srwatsonktrace_assert(struct thread *td) 164152376Srwatson{ 165152376Srwatson 166152376Srwatson KASSERT(td->td_pflags & TDP_INKTRACE, ("ktrace_assert: flag not set")); 167152376Srwatson} 168152376Srwatson 169152376Srwatsonstatic void 17097993Sjhbktrace_init(void *dummy) 1711541Srgrimes{ 17297993Sjhb struct ktr_request *req; 17397993Sjhb int i; 1741541Srgrimes 17597993Sjhb mtx_init(&ktrace_mtx, "ktrace", NULL, MTX_DEF | MTX_QUIET); 176152376Srwatson sx_init(&ktrace_sx, "ktrace_sx"); 17797993Sjhb STAILQ_INIT(&ktr_free); 17897993Sjhb for (i = 0; i < ktr_requestpool; i++) { 179111119Simp req = malloc(sizeof(struct ktr_request), M_KTRACE, M_WAITOK); 18097993Sjhb STAILQ_INSERT_HEAD(&ktr_free, req, ktr_list); 18197993Sjhb } 1821541Srgrimes} 18397993SjhbSYSINIT(ktrace_init, SI_SUB_KTRACE, SI_ORDER_ANY, ktrace_init, NULL); 1841541Srgrimes 18597993Sjhbstatic int 18697993Sjhbsysctl_kern_ktrace_request_pool(SYSCTL_HANDLER_ARGS) 18797993Sjhb{ 18897993Sjhb struct thread *td; 189118607Sjhb u_int newsize, oldsize, wantsize; 19097993Sjhb int error; 19197993Sjhb 19297993Sjhb /* Handle easy read-only case first to avoid warnings from GCC. */ 19397993Sjhb if (!req->newptr) { 19497993Sjhb mtx_lock(&ktrace_mtx); 19597993Sjhb oldsize = ktr_requestpool; 19697993Sjhb mtx_unlock(&ktrace_mtx); 197118607Sjhb return (SYSCTL_OUT(req, &oldsize, sizeof(u_int))); 19897993Sjhb } 19997993Sjhb 200118607Sjhb error = SYSCTL_IN(req, &wantsize, sizeof(u_int)); 20197993Sjhb if (error) 20297993Sjhb return (error); 20397993Sjhb td = curthread; 204152376Srwatson ktrace_enter(td); 20597993Sjhb mtx_lock(&ktrace_mtx); 20697993Sjhb oldsize = ktr_requestpool; 20797993Sjhb newsize = ktrace_resize_pool(wantsize); 20897993Sjhb mtx_unlock(&ktrace_mtx); 209152376Srwatson ktrace_exit(td); 210118607Sjhb error = SYSCTL_OUT(req, &oldsize, sizeof(u_int)); 21197993Sjhb if (error) 21297993Sjhb return (error); 213122478Sjkoshy if (wantsize > oldsize && newsize < wantsize) 21497993Sjhb return (ENOSPC); 21597993Sjhb return (0); 21697993Sjhb} 217103234SjhbSYSCTL_PROC(_kern_ktrace, OID_AUTO, request_pool, CTLTYPE_UINT|CTLFLAG_RW, 21897993Sjhb &ktr_requestpool, 0, sysctl_kern_ktrace_request_pool, "IU", ""); 21997993Sjhb 220118607Sjhbstatic u_int 221118607Sjhbktrace_resize_pool(u_int newsize) 22297993Sjhb{ 22397993Sjhb struct ktr_request *req; 224122478Sjkoshy int bound; 22597993Sjhb 22697993Sjhb mtx_assert(&ktrace_mtx, MA_OWNED); 22797993Sjhb print_message = 1; 228122478Sjkoshy bound = newsize - ktr_requestpool; 229122478Sjkoshy if (bound == 0) 230122478Sjkoshy return (ktr_requestpool); 231122478Sjkoshy if (bound < 0) 23297993Sjhb /* Shrink pool down to newsize if possible. */ 233122478Sjkoshy while (bound++ < 0) { 23497993Sjhb req = STAILQ_FIRST(&ktr_free); 23597993Sjhb if (req == NULL) 23697993Sjhb return (ktr_requestpool); 23797993Sjhb STAILQ_REMOVE_HEAD(&ktr_free, ktr_list); 23897993Sjhb ktr_requestpool--; 23997993Sjhb mtx_unlock(&ktrace_mtx); 24097993Sjhb free(req, M_KTRACE); 24197993Sjhb mtx_lock(&ktrace_mtx); 24297993Sjhb } 24397993Sjhb else 24497993Sjhb /* Grow pool up to newsize. */ 245122478Sjkoshy while (bound-- > 0) { 24697993Sjhb mtx_unlock(&ktrace_mtx); 24797993Sjhb req = malloc(sizeof(struct ktr_request), M_KTRACE, 248111119Simp M_WAITOK); 24997993Sjhb mtx_lock(&ktrace_mtx); 25097993Sjhb STAILQ_INSERT_HEAD(&ktr_free, req, ktr_list); 25197993Sjhb ktr_requestpool++; 25297993Sjhb } 25397993Sjhb return (ktr_requestpool); 25497993Sjhb} 25597993Sjhb 25697993Sjhbstatic struct ktr_request * 25797993Sjhbktr_getrequest(int type) 25897993Sjhb{ 25997993Sjhb struct ktr_request *req; 26097993Sjhb struct thread *td = curthread; 26197993Sjhb struct proc *p = td->td_proc; 26297993Sjhb int pm; 26397993Sjhb 264152376Srwatson ktrace_enter(td); /* XXX: In caller instead? */ 265152430Srwatson mtx_lock(&ktrace_mtx); 26697993Sjhb if (!KTRCHECK(td, type)) { 267152430Srwatson mtx_unlock(&ktrace_mtx); 268152376Srwatson ktrace_exit(td); 26997993Sjhb return (NULL); 27097993Sjhb } 27197993Sjhb req = STAILQ_FIRST(&ktr_free); 27297993Sjhb if (req != NULL) { 27397993Sjhb STAILQ_REMOVE_HEAD(&ktr_free, ktr_list); 27497993Sjhb req->ktr_header.ktr_type = type; 275112199Sjhb if (p->p_traceflag & KTRFAC_DROP) { 276112199Sjhb req->ktr_header.ktr_type |= KTR_DROP; 277112199Sjhb p->p_traceflag &= ~KTRFAC_DROP; 278112199Sjhb } 279152430Srwatson mtx_unlock(&ktrace_mtx); 28097993Sjhb microtime(&req->ktr_header.ktr_time); 28197993Sjhb req->ktr_header.ktr_pid = p->p_pid; 282151929Srwatson req->ktr_header.ktr_tid = td->td_tid; 28397993Sjhb bcopy(p->p_comm, req->ktr_header.ktr_comm, MAXCOMLEN + 1); 284151927Srwatson req->ktr_buffer = NULL; 28597993Sjhb req->ktr_header.ktr_len = 0; 28697993Sjhb } else { 287112199Sjhb p->p_traceflag |= KTRFAC_DROP; 28897993Sjhb pm = print_message; 28997993Sjhb print_message = 0; 29097993Sjhb mtx_unlock(&ktrace_mtx); 29197993Sjhb if (pm) 29297993Sjhb printf("Out of ktrace request objects.\n"); 293152376Srwatson ktrace_exit(td); 29497993Sjhb } 29597993Sjhb return (req); 29697993Sjhb} 29797993Sjhb 298152376Srwatson/* 299152376Srwatson * Some trace generation environments don't permit direct access to VFS, 300152376Srwatson * such as during a context switch where sleeping is not allowed. Under these 301152376Srwatson * circumstances, queue a request to the thread to be written asynchronously 302152376Srwatson * later. 303152376Srwatson */ 30497993Sjhbstatic void 305152376Srwatsonktr_enqueuerequest(struct thread *td, struct ktr_request *req) 30697993Sjhb{ 30797993Sjhb 30897993Sjhb mtx_lock(&ktrace_mtx); 309152376Srwatson STAILQ_INSERT_TAIL(&td->td_proc->p_ktr, req, ktr_list); 310118599Sjhb mtx_unlock(&ktrace_mtx); 311152376Srwatson ktrace_exit(td); 31297993Sjhb} 31397993Sjhb 314152376Srwatson/* 315152376Srwatson * Drain any pending ktrace records from the per-thread queue to disk. This 316152376Srwatson * is used both internally before committing other records, and also on 317152376Srwatson * system call return. We drain all the ones we can find at the time when 318152376Srwatson * drain is requested, but don't keep draining after that as those events 319152376Srwatson * may me approximately "after" the current event. 320152376Srwatson */ 32197993Sjhbstatic void 322152376Srwatsonktr_drain(struct thread *td) 323152376Srwatson{ 324152376Srwatson struct ktr_request *queued_req; 325152376Srwatson STAILQ_HEAD(, ktr_request) local_queue; 326152376Srwatson 327152376Srwatson ktrace_assert(td); 328152376Srwatson sx_assert(&ktrace_sx, SX_XLOCKED); 329152376Srwatson 330152376Srwatson STAILQ_INIT(&local_queue); /* XXXRW: needed? */ 331152376Srwatson 332152376Srwatson if (!STAILQ_EMPTY(&td->td_proc->p_ktr)) { 333152376Srwatson mtx_lock(&ktrace_mtx); 334152376Srwatson STAILQ_CONCAT(&local_queue, &td->td_proc->p_ktr); 335152376Srwatson mtx_unlock(&ktrace_mtx); 336152376Srwatson 337152376Srwatson while ((queued_req = STAILQ_FIRST(&local_queue))) { 338152376Srwatson STAILQ_REMOVE_HEAD(&local_queue, ktr_list); 339152376Srwatson ktr_writerequest(td, queued_req); 340152376Srwatson ktr_freerequest(queued_req); 341152376Srwatson } 342152376Srwatson } 343152376Srwatson} 344152376Srwatson 345152376Srwatson/* 346152376Srwatson * Submit a trace record for immediate commit to disk -- to be used only 347152376Srwatson * where entering VFS is OK. First drain any pending records that may have 348152376Srwatson * been cached in the thread. 349152376Srwatson */ 350152376Srwatsonstatic void 351152376Srwatsonktr_submitrequest(struct thread *td, struct ktr_request *req) 352152376Srwatson{ 353152376Srwatson 354152376Srwatson ktrace_assert(td); 355152376Srwatson 356152376Srwatson sx_xlock(&ktrace_sx); 357152376Srwatson ktr_drain(td); 358152376Srwatson ktr_writerequest(td, req); 359152376Srwatson ktr_freerequest(req); 360152376Srwatson sx_xunlock(&ktrace_sx); 361152376Srwatson 362152376Srwatson ktrace_exit(td); 363152376Srwatson} 364152376Srwatson 365152376Srwatsonstatic void 36697993Sjhbktr_freerequest(struct ktr_request *req) 36797993Sjhb{ 36897993Sjhb 369151927Srwatson if (req->ktr_buffer != NULL) 370151927Srwatson free(req->ktr_buffer, M_KTRACE); 37197993Sjhb mtx_lock(&ktrace_mtx); 37297993Sjhb STAILQ_INSERT_HEAD(&ktr_free, req, ktr_list); 37397993Sjhb mtx_unlock(&ktrace_mtx); 37497993Sjhb} 37597993Sjhb 3761549Srgrimesvoid 37797993Sjhbktrsyscall(code, narg, args) 37847955Sdt int code, narg; 37947955Sdt register_t args[]; 3801541Srgrimes{ 38197993Sjhb struct ktr_request *req; 38297993Sjhb struct ktr_syscall *ktp; 38397993Sjhb size_t buflen; 384103233Sjhb char *buf = NULL; 3851541Srgrimes 386103233Sjhb buflen = sizeof(register_t) * narg; 387103233Sjhb if (buflen > 0) { 388111119Simp buf = malloc(buflen, M_KTRACE, M_WAITOK); 389103233Sjhb bcopy(args, buf, buflen); 390103233Sjhb } 39197993Sjhb req = ktr_getrequest(KTR_SYSCALL); 392104230Sphk if (req == NULL) { 393104230Sphk if (buf != NULL) 394104230Sphk free(buf, M_KTRACE); 39597993Sjhb return; 396104230Sphk } 39797993Sjhb ktp = &req->ktr_data.ktr_syscall; 3981541Srgrimes ktp->ktr_code = code; 3991541Srgrimes ktp->ktr_narg = narg; 40097993Sjhb if (buflen > 0) { 40197993Sjhb req->ktr_header.ktr_len = buflen; 402151927Srwatson req->ktr_buffer = buf; 40397993Sjhb } 404152376Srwatson ktr_submitrequest(curthread, req); 4051541Srgrimes} 4061541Srgrimes 4071549Srgrimesvoid 40897993Sjhbktrsysret(code, error, retval) 40947955Sdt int code, error; 41047955Sdt register_t retval; 4111541Srgrimes{ 41297993Sjhb struct ktr_request *req; 41397993Sjhb struct ktr_sysret *ktp; 4141541Srgrimes 41597993Sjhb req = ktr_getrequest(KTR_SYSRET); 41697993Sjhb if (req == NULL) 41797993Sjhb return; 41897993Sjhb ktp = &req->ktr_data.ktr_sysret; 41997993Sjhb ktp->ktr_code = code; 42097993Sjhb ktp->ktr_error = error; 42197993Sjhb ktp->ktr_retval = retval; /* what about val2 ? */ 422152376Srwatson ktr_submitrequest(curthread, req); 4231541Srgrimes} 4241541Srgrimes 425152376Srwatson/* 426152376Srwatson * When a process exits, drain per-process asynchronous trace records. 427152376Srwatson */ 4281549Srgrimesvoid 429152376Srwatsonktrprocexit(struct thread *td) 430152376Srwatson{ 431152376Srwatson 432152376Srwatson ktrace_enter(td); 433152376Srwatson sx_xlock(&ktrace_sx); 434152376Srwatson ktr_drain(td); 435152376Srwatson sx_xunlock(&ktrace_sx); 436152376Srwatson ktrace_exit(td); 437152376Srwatson} 438152376Srwatson 439152376Srwatson/* 440152376Srwatson * When a thread returns, drain any asynchronous records generated by the 441152376Srwatson * system call. 442152376Srwatson */ 443152376Srwatsonvoid 444152376Srwatsonktruserret(struct thread *td) 445152376Srwatson{ 446152376Srwatson 447152376Srwatson ktrace_enter(td); 448152376Srwatson sx_xlock(&ktrace_sx); 449152376Srwatson ktr_drain(td); 450152376Srwatson sx_xunlock(&ktrace_sx); 451152376Srwatson ktrace_exit(td); 452152376Srwatson} 453152376Srwatson 454152376Srwatsonvoid 45597993Sjhbktrnamei(path) 4561541Srgrimes char *path; 4571541Srgrimes{ 45897993Sjhb struct ktr_request *req; 45997993Sjhb int namelen; 460103233Sjhb char *buf = NULL; 4611541Srgrimes 462103233Sjhb namelen = strlen(path); 463103233Sjhb if (namelen > 0) { 464111119Simp buf = malloc(namelen, M_KTRACE, M_WAITOK); 465103233Sjhb bcopy(path, buf, namelen); 466103233Sjhb } 46797993Sjhb req = ktr_getrequest(KTR_NAMEI); 468104230Sphk if (req == NULL) { 469104230Sphk if (buf != NULL) 470104230Sphk free(buf, M_KTRACE); 47197993Sjhb return; 472104230Sphk } 47397993Sjhb if (namelen > 0) { 47497993Sjhb req->ktr_header.ktr_len = namelen; 475151927Srwatson req->ktr_buffer = buf; 47697993Sjhb } 477152376Srwatson ktr_submitrequest(curthread, req); 4781541Srgrimes} 4791541Srgrimes 4801549Srgrimesvoid 48197993Sjhbktrgenio(fd, rw, uio, error) 4821541Srgrimes int fd; 4831541Srgrimes enum uio_rw rw; 48462378Sgreen struct uio *uio; 48562378Sgreen int error; 4861541Srgrimes{ 48797993Sjhb struct ktr_request *req; 48897993Sjhb struct ktr_genio *ktg; 489103235Sjhb int datalen; 490103235Sjhb char *buf; 4918876Srgrimes 492131897Sphk if (error) { 493131897Sphk free(uio, M_IOV); 4941541Srgrimes return; 495131897Sphk } 496103235Sjhb uio->uio_offset = 0; 497103235Sjhb uio->uio_rw = UIO_WRITE; 498103235Sjhb datalen = imin(uio->uio_resid, ktr_geniosize); 499111119Simp buf = malloc(datalen, M_KTRACE, M_WAITOK); 500131897Sphk error = uiomove(buf, datalen, uio); 501131897Sphk free(uio, M_IOV); 502131897Sphk if (error) { 503103235Sjhb free(buf, M_KTRACE); 504103235Sjhb return; 505103235Sjhb } 50697993Sjhb req = ktr_getrequest(KTR_GENIO); 507103235Sjhb if (req == NULL) { 508103235Sjhb free(buf, M_KTRACE); 50997993Sjhb return; 510103235Sjhb } 51197993Sjhb ktg = &req->ktr_data.ktr_genio; 51297993Sjhb ktg->ktr_fd = fd; 51397993Sjhb ktg->ktr_rw = rw; 514103235Sjhb req->ktr_header.ktr_len = datalen; 515151927Srwatson req->ktr_buffer = buf; 516152376Srwatson ktr_submitrequest(curthread, req); 5171541Srgrimes} 5181541Srgrimes 5191549Srgrimesvoid 52097993Sjhbktrpsig(sig, action, mask, code) 52151941Smarcel int sig; 5221541Srgrimes sig_t action; 52351791Smarcel sigset_t *mask; 52451941Smarcel int code; 5251541Srgrimes{ 52697993Sjhb struct ktr_request *req; 52797993Sjhb struct ktr_psig *kp; 5281541Srgrimes 52997993Sjhb req = ktr_getrequest(KTR_PSIG); 53097993Sjhb if (req == NULL) 53197993Sjhb return; 53297993Sjhb kp = &req->ktr_data.ktr_psig; 53397993Sjhb kp->signo = (char)sig; 53497993Sjhb kp->action = action; 53597993Sjhb kp->mask = *mask; 53697993Sjhb kp->code = code; 537152376Srwatson ktr_enqueuerequest(curthread, req); 5381541Srgrimes} 5391541Srgrimes 5401549Srgrimesvoid 54197993Sjhbktrcsw(out, user) 5421541Srgrimes int out, user; 5431541Srgrimes{ 54497993Sjhb struct ktr_request *req; 54597993Sjhb struct ktr_csw *kc; 5461541Srgrimes 54797993Sjhb req = ktr_getrequest(KTR_CSW); 54897993Sjhb if (req == NULL) 54997993Sjhb return; 55097993Sjhb kc = &req->ktr_data.ktr_csw; 55197993Sjhb kc->out = out; 55297993Sjhb kc->user = user; 553152376Srwatson ktr_enqueuerequest(curthread, req); 5541541Srgrimes} 555114026Sjhb#endif /* KTRACE */ 5561541Srgrimes 5571541Srgrimes/* Interface and common routines */ 5581541Srgrimes 55912221Sbde#ifndef _SYS_SYSPROTO_H_ 5601541Srgrimesstruct ktrace_args { 5611541Srgrimes char *fname; 5621541Srgrimes int ops; 5631541Srgrimes int facs; 5641541Srgrimes int pid; 5651541Srgrimes}; 56612221Sbde#endif 5671541Srgrimes/* ARGSUSED */ 5681549Srgrimesint 56983366Sjulianktrace(td, uap) 57083366Sjulian struct thread *td; 5711541Srgrimes register struct ktrace_args *uap; 5721541Srgrimes{ 57313203Swollman#ifdef KTRACE 5741541Srgrimes register struct vnode *vp = NULL; 5751541Srgrimes register struct proc *p; 5761541Srgrimes struct pgrp *pg; 5771541Srgrimes int facs = uap->facs & ~KTRFAC_ROOT; 5781541Srgrimes int ops = KTROP(uap->ops); 5791541Srgrimes int descend = uap->ops & KTRFLAG_DESCEND; 580147576Spjd int nfound, ret = 0; 581157233Sjhb int flags, error = 0, vfslocked; 5821541Srgrimes struct nameidata nd; 583112198Sjhb struct ucred *cred; 5841541Srgrimes 585114026Sjhb /* 586114026Sjhb * Need something to (un)trace. 587114026Sjhb */ 588114026Sjhb if (ops != KTROP_CLEARFILE && facs == 0) 589114026Sjhb return (EINVAL); 590114026Sjhb 591152376Srwatson ktrace_enter(td); 5921541Srgrimes if (ops != KTROP_CLEAR) { 5931541Srgrimes /* 5941541Srgrimes * an operation which requires a file argument. 5951541Srgrimes */ 596157233Sjhb NDINIT(&nd, LOOKUP, NOFOLLOW | MPSAFE, UIO_USERSPACE, 597157233Sjhb uap->fname, td); 59862550Smckusick flags = FREAD | FWRITE | O_NOFOLLOW; 599170152Skib error = vn_open(&nd, &flags, 0, NULL); 6003308Sphk if (error) { 601152376Srwatson ktrace_exit(td); 6021541Srgrimes return (error); 6031541Srgrimes } 604157233Sjhb vfslocked = NDHASGIANT(&nd); 60554655Seivind NDFREE(&nd, NDF_ONLY_PNBUF); 6061541Srgrimes vp = nd.ni_vp; 60783366Sjulian VOP_UNLOCK(vp, 0, td); 6081541Srgrimes if (vp->v_type != VREG) { 60991406Sjhb (void) vn_close(vp, FREAD|FWRITE, td->td_ucred, td); 610157233Sjhb VFS_UNLOCK_GIANT(vfslocked); 611152376Srwatson ktrace_exit(td); 6121541Srgrimes return (EACCES); 6131541Srgrimes } 614157233Sjhb VFS_UNLOCK_GIANT(vfslocked); 6151541Srgrimes } 6161541Srgrimes /* 61785397Sdillon * Clear all uses of the tracefile. 6181541Srgrimes */ 6191541Srgrimes if (ops == KTROP_CLEARFILE) { 620166678Smpp int vrele_count; 621166678Smpp 622166678Smpp vrele_count = 0; 62374927Sjhb sx_slock(&allproc_lock); 624166073Sdelphij FOREACH_PROC_IN_SYSTEM(p) { 62594618Sjhb PROC_LOCK(p); 626112198Sjhb if (p->p_tracevp == vp) { 62797993Sjhb if (ktrcanset(td, p)) { 62897993Sjhb mtx_lock(&ktrace_mtx); 629112198Sjhb cred = p->p_tracecred; 630112198Sjhb p->p_tracecred = NULL; 631112198Sjhb p->p_tracevp = NULL; 6321541Srgrimes p->p_traceflag = 0; 63397993Sjhb mtx_unlock(&ktrace_mtx); 634166678Smpp vrele_count++; 635112198Sjhb crfree(cred); 636166678Smpp } else 6371541Srgrimes error = EPERM; 638166678Smpp } 639166678Smpp PROC_UNLOCK(p); 6401541Srgrimes } 64174927Sjhb sx_sunlock(&allproc_lock); 642166678Smpp if (vrele_count > 0) { 643166678Smpp vfslocked = VFS_LOCK_GIANT(vp->v_mount); 644166678Smpp while (vrele_count-- > 0) 645166678Smpp vrele(vp); 646166678Smpp VFS_UNLOCK_GIANT(vfslocked); 647166678Smpp } 6481541Srgrimes goto done; 6491541Srgrimes } 6501541Srgrimes /* 6511541Srgrimes * do it 6521541Srgrimes */ 653114026Sjhb sx_slock(&proctree_lock); 6541541Srgrimes if (uap->pid < 0) { 6551541Srgrimes /* 6561541Srgrimes * by process group 6571541Srgrimes */ 6581541Srgrimes pg = pgfind(-uap->pid); 6591541Srgrimes if (pg == NULL) { 66094861Sjhb sx_sunlock(&proctree_lock); 6611541Srgrimes error = ESRCH; 6621541Srgrimes goto done; 6631541Srgrimes } 66491140Stanimura /* 66591140Stanimura * ktrops() may call vrele(). Lock pg_members 66694861Sjhb * by the proctree_lock rather than pg_mtx. 66791140Stanimura */ 66891140Stanimura PGRP_UNLOCK(pg); 669147576Spjd nfound = 0; 670147576Spjd LIST_FOREACH(p, &pg->pg_members, p_pglist) { 671147576Spjd PROC_LOCK(p); 672147576Spjd if (p_cansee(td, p) != 0) { 673147576Spjd PROC_UNLOCK(p); 674147576Spjd continue; 675147576Spjd } 676147576Spjd PROC_UNLOCK(p); 677147576Spjd nfound++; 6781541Srgrimes if (descend) 67994618Sjhb ret |= ktrsetchildren(td, p, ops, facs, vp); 6808876Srgrimes else 68194618Sjhb ret |= ktrops(td, p, ops, facs, vp); 682147576Spjd } 683147576Spjd if (nfound == 0) { 684147576Spjd sx_sunlock(&proctree_lock); 685147576Spjd error = ESRCH; 686147576Spjd goto done; 687147576Spjd } 6881541Srgrimes } else { 6891541Srgrimes /* 6901541Srgrimes * by pid 6911541Srgrimes */ 6921541Srgrimes p = pfind(uap->pid); 6931541Srgrimes if (p == NULL) { 694114026Sjhb sx_sunlock(&proctree_lock); 6951541Srgrimes error = ESRCH; 6961541Srgrimes goto done; 6971541Srgrimes } 698147183Spjd error = p_cansee(td, p); 699114026Sjhb /* 700114026Sjhb * The slock of the proctree lock will keep this process 701114026Sjhb * from going away, so unlocking the proc here is ok. 702114026Sjhb */ 70375893Sjhb PROC_UNLOCK(p); 704147520Spjd if (error) { 705147520Spjd sx_sunlock(&proctree_lock); 706147183Spjd goto done; 707147520Spjd } 7081541Srgrimes if (descend) 70994618Sjhb ret |= ktrsetchildren(td, p, ops, facs, vp); 7101541Srgrimes else 71194618Sjhb ret |= ktrops(td, p, ops, facs, vp); 7121541Srgrimes } 713114026Sjhb sx_sunlock(&proctree_lock); 7141541Srgrimes if (!ret) 7151541Srgrimes error = EPERM; 7161541Srgrimesdone: 717114026Sjhb if (vp != NULL) { 718157233Sjhb vfslocked = VFS_LOCK_GIANT(vp->v_mount); 71991406Sjhb (void) vn_close(vp, FWRITE, td->td_ucred, td); 720157233Sjhb VFS_UNLOCK_GIANT(vfslocked); 721114026Sjhb } 722152376Srwatson ktrace_exit(td); 7231541Srgrimes return (error); 724114026Sjhb#else /* !KTRACE */ 725114026Sjhb return (ENOSYS); 726114026Sjhb#endif /* KTRACE */ 7271541Srgrimes} 7281541Srgrimes 72918398Sphk/* ARGSUSED */ 73018398Sphkint 73183366Sjulianutrace(td, uap) 73283366Sjulian struct thread *td; 73318398Sphk register struct utrace_args *uap; 73418398Sphk{ 73583366Sjulian 73613203Swollman#ifdef KTRACE 73797993Sjhb struct ktr_request *req; 73899009Salfred void *cp; 739103237Sjhb int error; 74018398Sphk 741103237Sjhb if (!KTRPOINT(td, KTR_USER)) 742103237Sjhb return (0); 74370792Salfred if (uap->len > KTR_USER_MAXLEN) 74470707Salfred return (EINVAL); 745111119Simp cp = malloc(uap->len, M_KTRACE, M_WAITOK); 746103237Sjhb error = copyin(uap->addr, cp, uap->len); 747104230Sphk if (error) { 748104230Sphk free(cp, M_KTRACE); 749103237Sjhb return (error); 750104230Sphk } 75197993Sjhb req = ktr_getrequest(KTR_USER); 752104230Sphk if (req == NULL) { 753104230Sphk free(cp, M_KTRACE); 754122457Sjkoshy return (ENOMEM); 755104230Sphk } 756151927Srwatson req->ktr_buffer = cp; 757103237Sjhb req->ktr_header.ktr_len = uap->len; 758152376Srwatson ktr_submitrequest(td, req); 75918398Sphk return (0); 760114026Sjhb#else /* !KTRACE */ 76118398Sphk return (ENOSYS); 762114026Sjhb#endif /* KTRACE */ 76318398Sphk} 76418398Sphk 76518398Sphk#ifdef KTRACE 76612819Sphkstatic int 76794618Sjhbktrops(td, p, ops, facs, vp) 76894618Sjhb struct thread *td; 76994618Sjhb struct proc *p; 7701541Srgrimes int ops, facs; 7711541Srgrimes struct vnode *vp; 7721541Srgrimes{ 77397993Sjhb struct vnode *tracevp = NULL; 774112198Sjhb struct ucred *tracecred = NULL; 7751541Srgrimes 77694618Sjhb PROC_LOCK(p); 77794618Sjhb if (!ktrcanset(td, p)) { 77894618Sjhb PROC_UNLOCK(p); 7791541Srgrimes return (0); 78094618Sjhb } 78197993Sjhb mtx_lock(&ktrace_mtx); 7821541Srgrimes if (ops == KTROP_SET) { 783112198Sjhb if (p->p_tracevp != vp) { 7841541Srgrimes /* 78594618Sjhb * if trace file already in use, relinquish below 7861541Srgrimes */ 787112198Sjhb tracevp = p->p_tracevp; 78897993Sjhb VREF(vp); 789112198Sjhb p->p_tracevp = vp; 7901541Srgrimes } 791112198Sjhb if (p->p_tracecred != td->td_ucred) { 792112198Sjhb tracecred = p->p_tracecred; 793112198Sjhb p->p_tracecred = crhold(td->td_ucred); 794112198Sjhb } 7951541Srgrimes p->p_traceflag |= facs; 796170587Srwatson if (priv_check(td, PRIV_KTRACE) == 0) 7971541Srgrimes p->p_traceflag |= KTRFAC_ROOT; 7988876Srgrimes } else { 7991541Srgrimes /* KTROP_CLEAR */ 8001541Srgrimes if (((p->p_traceflag &= ~facs) & KTRFAC_MASK) == 0) { 8011541Srgrimes /* no more tracing */ 8021541Srgrimes p->p_traceflag = 0; 803112198Sjhb tracevp = p->p_tracevp; 804112198Sjhb p->p_tracevp = NULL; 805112198Sjhb tracecred = p->p_tracecred; 806112198Sjhb p->p_tracecred = NULL; 8071541Srgrimes } 8081541Srgrimes } 80997993Sjhb mtx_unlock(&ktrace_mtx); 81094618Sjhb PROC_UNLOCK(p); 811114026Sjhb if (tracevp != NULL) { 812155031Sjeff int vfslocked; 813155031Sjeff 814155031Sjeff vfslocked = VFS_LOCK_GIANT(tracevp->v_mount); 81597993Sjhb vrele(tracevp); 816155031Sjeff VFS_UNLOCK_GIANT(vfslocked); 817114026Sjhb } 818112198Sjhb if (tracecred != NULL) 819112198Sjhb crfree(tracecred); 8201541Srgrimes 8211541Srgrimes return (1); 8221541Srgrimes} 8231541Srgrimes 82412819Sphkstatic int 82594618Sjhbktrsetchildren(td, top, ops, facs, vp) 82694618Sjhb struct thread *td; 82794618Sjhb struct proc *top; 8281541Srgrimes int ops, facs; 8291541Srgrimes struct vnode *vp; 8301541Srgrimes{ 8311541Srgrimes register struct proc *p; 8321541Srgrimes register int ret = 0; 8331541Srgrimes 8341541Srgrimes p = top; 835114026Sjhb sx_assert(&proctree_lock, SX_LOCKED); 8361541Srgrimes for (;;) { 83794618Sjhb ret |= ktrops(td, p, ops, facs, vp); 8381541Srgrimes /* 8391541Srgrimes * If this process has children, descend to them next, 8401541Srgrimes * otherwise do any siblings, and if done with this level, 8411541Srgrimes * follow back up the tree (but not past top). 8421541Srgrimes */ 84353212Sphk if (!LIST_EMPTY(&p->p_children)) 84453212Sphk p = LIST_FIRST(&p->p_children); 8451541Srgrimes else for (;;) { 846114026Sjhb if (p == top) 8471541Srgrimes return (ret); 84853212Sphk if (LIST_NEXT(p, p_sibling)) { 84953212Sphk p = LIST_NEXT(p, p_sibling); 8501541Srgrimes break; 8511541Srgrimes } 85214529Shsu p = p->p_pptr; 8531541Srgrimes } 8541541Srgrimes } 8551541Srgrimes /*NOTREACHED*/ 8561541Srgrimes} 8571541Srgrimes 85812819Sphkstatic void 859152376Srwatsonktr_writerequest(struct thread *td, struct ktr_request *req) 86097993Sjhb{ 86197993Sjhb struct ktr_header *kth; 8621541Srgrimes struct vnode *vp; 86397993Sjhb struct proc *p; 86497993Sjhb struct ucred *cred; 8651541Srgrimes struct uio auio; 86697993Sjhb struct iovec aiov[3]; 86762976Smckusick struct mount *mp; 86897993Sjhb int datalen, buflen, vrele_count; 869157233Sjhb int error, vfslocked; 8701541Srgrimes 87197993Sjhb /* 872152376Srwatson * We hold the vnode and credential for use in I/O in case ktrace is 873152376Srwatson * disabled on the process as we write out the request. 874152376Srwatson * 875152376Srwatson * XXXRW: This is not ideal: we could end up performing a write after 876152376Srwatson * the vnode has been closed. 877152376Srwatson */ 878152376Srwatson mtx_lock(&ktrace_mtx); 879152376Srwatson vp = td->td_proc->p_tracevp; 880152376Srwatson if (vp != NULL) 881152376Srwatson VREF(vp); 882152376Srwatson cred = td->td_proc->p_tracecred; 883152376Srwatson if (cred != NULL) 884152376Srwatson crhold(cred); 885152376Srwatson mtx_unlock(&ktrace_mtx); 886152376Srwatson 887152376Srwatson /* 88897993Sjhb * If vp is NULL, the vp has been cleared out from under this 889152376Srwatson * request, so just drop it. Make sure the credential and vnode are 890152376Srwatson * in sync: we should have both or neither. 89197993Sjhb */ 892152376Srwatson if (vp == NULL) { 893152376Srwatson KASSERT(cred == NULL, ("ktr_writerequest: cred != NULL")); 8941541Srgrimes return; 895152376Srwatson } 896152376Srwatson KASSERT(cred != NULL, ("ktr_writerequest: cred == NULL")); 897152376Srwatson 89897993Sjhb kth = &req->ktr_header; 899118607Sjhb datalen = data_lengths[(u_short)kth->ktr_type & ~KTR_DROP]; 90097993Sjhb buflen = kth->ktr_len; 9011541Srgrimes auio.uio_iov = &aiov[0]; 9021541Srgrimes auio.uio_offset = 0; 9031541Srgrimes auio.uio_segflg = UIO_SYSSPACE; 9041541Srgrimes auio.uio_rw = UIO_WRITE; 9051541Srgrimes aiov[0].iov_base = (caddr_t)kth; 9061541Srgrimes aiov[0].iov_len = sizeof(struct ktr_header); 9071541Srgrimes auio.uio_resid = sizeof(struct ktr_header); 9081541Srgrimes auio.uio_iovcnt = 1; 90997993Sjhb auio.uio_td = td; 91097993Sjhb if (datalen != 0) { 91197993Sjhb aiov[1].iov_base = (caddr_t)&req->ktr_data; 91297993Sjhb aiov[1].iov_len = datalen; 91397993Sjhb auio.uio_resid += datalen; 9141541Srgrimes auio.uio_iovcnt++; 91597993Sjhb kth->ktr_len += datalen; 9161541Srgrimes } 91797993Sjhb if (buflen != 0) { 918151927Srwatson KASSERT(req->ktr_buffer != NULL, ("ktrace: nothing to write")); 919151927Srwatson aiov[auio.uio_iovcnt].iov_base = req->ktr_buffer; 92097993Sjhb aiov[auio.uio_iovcnt].iov_len = buflen; 92197993Sjhb auio.uio_resid += buflen; 92297993Sjhb auio.uio_iovcnt++; 923103235Sjhb } 924152376Srwatson 925157233Sjhb vfslocked = VFS_LOCK_GIANT(vp->v_mount); 92662976Smckusick vn_start_write(vp, &mp, V_WAIT); 92783366Sjulian vn_lock(vp, LK_EXCLUSIVE | LK_RETRY, td); 92897993Sjhb (void)VOP_LEASE(vp, td, cred, LEASE_WRITE); 929101123Srwatson#ifdef MAC 930102129Srwatson error = mac_check_vnode_write(cred, NOCRED, vp); 931101123Srwatson if (error == 0) 932101123Srwatson#endif 933101123Srwatson error = VOP_WRITE(vp, &auio, IO_UNIT | IO_APPEND, cred); 93483366Sjulian VOP_UNLOCK(vp, 0, td); 93562976Smckusick vn_finished_write(mp); 936154739Sjhb vrele(vp); 937157233Sjhb VFS_UNLOCK_GIANT(vfslocked); 9381541Srgrimes if (!error) 9391541Srgrimes return; 9401541Srgrimes /* 94197993Sjhb * If error encountered, give up tracing on this vnode. We defer 94297993Sjhb * all the vrele()'s on the vnode until after we are finished walking 94397993Sjhb * the various lists to avoid needlessly holding locks. 9441541Srgrimes */ 9451541Srgrimes log(LOG_NOTICE, "ktrace write failed, errno %d, tracing stopped\n", 9461541Srgrimes error); 94797993Sjhb vrele_count = 0; 94897993Sjhb /* 94997993Sjhb * First, clear this vnode from being used by any processes in the 95097993Sjhb * system. 95197993Sjhb * XXX - If one process gets an EPERM writing to the vnode, should 95297993Sjhb * we really do this? Other processes might have suitable 95397993Sjhb * credentials for the operation. 95497993Sjhb */ 955112198Sjhb cred = NULL; 95674927Sjhb sx_slock(&allproc_lock); 957166073Sdelphij FOREACH_PROC_IN_SYSTEM(p) { 95897993Sjhb PROC_LOCK(p); 959112198Sjhb if (p->p_tracevp == vp) { 96097993Sjhb mtx_lock(&ktrace_mtx); 961112198Sjhb p->p_tracevp = NULL; 9621541Srgrimes p->p_traceflag = 0; 963112198Sjhb cred = p->p_tracecred; 964112198Sjhb p->p_tracecred = NULL; 96597993Sjhb mtx_unlock(&ktrace_mtx); 96697993Sjhb vrele_count++; 9671541Srgrimes } 96897993Sjhb PROC_UNLOCK(p); 969112198Sjhb if (cred != NULL) { 970112198Sjhb crfree(cred); 971112198Sjhb cred = NULL; 972112198Sjhb } 9731541Srgrimes } 97474927Sjhb sx_sunlock(&allproc_lock); 975152376Srwatson 97697993Sjhb /* 977152376Srwatson * We can't clear any pending requests in threads that have cached 978152376Srwatson * them but not yet committed them, as those are per-thread. The 979152376Srwatson * thread will have to clear it itself on system call return. 98097993Sjhb */ 981157233Sjhb vfslocked = VFS_LOCK_GIANT(vp->v_mount); 98297993Sjhb while (vrele_count-- > 0) 98397993Sjhb vrele(vp); 984157233Sjhb VFS_UNLOCK_GIANT(vfslocked); 9851541Srgrimes} 9861541Srgrimes 9871541Srgrimes/* 9881541Srgrimes * Return true if caller has permission to set the ktracing state 9891541Srgrimes * of target. Essentially, the target can't possess any 9901541Srgrimes * more permissions than the caller. KTRFAC_ROOT signifies that 9918876Srgrimes * root previously set the tracing status on the target process, and 9921541Srgrimes * so, only root may further change it. 9931541Srgrimes */ 99412819Sphkstatic int 99594618Sjhbktrcanset(td, targetp) 99694618Sjhb struct thread *td; 99794618Sjhb struct proc *targetp; 9981541Srgrimes{ 9991541Srgrimes 100094618Sjhb PROC_LOCK_ASSERT(targetp, MA_OWNED); 100179335Srwatson if (targetp->p_traceflag & KTRFAC_ROOT && 1002170587Srwatson priv_check(td, PRIV_KTRACE)) 100346155Sphk return (0); 10041541Srgrimes 100596886Sjhb if (p_candebug(td, targetp) != 0) 100679335Srwatson return (0); 100779335Srwatson 100879335Srwatson return (1); 10091541Srgrimes} 10101541Srgrimes 101113203Swollman#endif /* KTRACE */ 1012