1/*-
2 * SPDX-License-Identifier: BSD-4-Clause
3 *
4 * Copyright (C) 1994, David Greenman
5 * Copyright (c) 1990, 1993
6 *	The Regents of the University of California.  All rights reserved.
7 * Copyright (C) 2010 Konstantin Belousov <kib@freebsd.org>
8 *
9 * This code is derived from software contributed to Berkeley by
10 * the University of Utah, and William Jolitz.
11 *
12 * Redistribution and use in source and binary forms, with or without
13 * modification, are permitted provided that the following conditions
14 * are met:
15 * 1. Redistributions of source code must retain the above copyright
16 *    notice, this list of conditions and the following disclaimer.
17 * 2. Redistributions in binary form must reproduce the above copyright
18 *    notice, this list of conditions and the following disclaimer in the
19 *    documentation and/or other materials provided with the distribution.
20 * 3. All advertising materials mentioning features or use of this software
21 *    must display the following acknowledgement:
22 *	This product includes software developed by the University of
23 *	California, Berkeley and its contributors.
24 * 4. Neither the name of the University nor the names of its contributors
25 *    may be used to endorse or promote products derived from this software
26 *    without specific prior written permission.
27 *
28 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
29 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
30 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
31 * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
32 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
33 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
34 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
35 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
36 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
37 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
38 * SUCH DAMAGE.
39 *
40 *	from: @(#)trap.c	7.4 (Berkeley) 5/13/91
41 */
42
43#include "opt_capsicum.h"
44#include "opt_ktrace.h"
45
46__FBSDID("$FreeBSD$");
47
48#include <sys/capsicum.h>
49#include <sys/ktr.h>
50#include <sys/vmmeter.h>
51#ifdef KTRACE
52#include <sys/uio.h>
53#include <sys/ktrace.h>
54#endif
55#include <security/audit/audit.h>
56
57static inline void
58syscallenter(struct thread *td)
59{
60	struct proc *p;
61	struct syscall_args *sa;
62	int error, traced;
63
64	VM_CNT_INC(v_syscall);
65	p = td->td_proc;
66	sa = &td->td_sa;
67
68	td->td_pticks = 0;
69	if (td->td_cowgen != p->p_cowgen)
70		thread_cow_update(td);
71	traced = (p->p_flag & P_TRACED) != 0;
72	if (traced || td->td_dbgflags & TDB_USERWR) {
73		PROC_LOCK(p);
74		td->td_dbgflags &= ~TDB_USERWR;
75		if (traced)
76			td->td_dbgflags |= TDB_SCE;
77		PROC_UNLOCK(p);
78	}
79	error = (p->p_sysent->sv_fetch_syscall_args)(td);
80#ifdef KTRACE
81	if (KTRPOINT(td, KTR_SYSCALL))
82		ktrsyscall(sa->code, sa->narg, sa->args);
83#endif
84	KTR_START4(KTR_SYSC, "syscall", syscallname(p, sa->code),
85	    (uintptr_t)td, "pid:%d", td->td_proc->p_pid, "arg0:%p", sa->args[0],
86	    "arg1:%p", sa->args[1], "arg2:%p", sa->args[2]);
87
88	if (error != 0) {
89		td->td_errno = error;
90		goto retval;
91	}
92
93	STOPEVENT(p, S_SCE, sa->narg);
94	if ((p->p_flag & P_TRACED) != 0) {
95		PROC_LOCK(p);
96		if (p->p_ptevents & PTRACE_SCE)
97			ptracestop((td), SIGTRAP, NULL);
98		PROC_UNLOCK(p);
99	}
100	if ((td->td_dbgflags & TDB_USERWR) != 0) {
101		/*
102		 * Reread syscall number and arguments if debugger
103		 * modified registers or memory.
104		 */
105		error = (p->p_sysent->sv_fetch_syscall_args)(td);
106#ifdef KTRACE
107		if (KTRPOINT(td, KTR_SYSCALL))
108			ktrsyscall(sa->code, sa->narg, sa->args);
109#endif
110		if (error != 0) {
111			td->td_errno = error;
112			goto retval;
113		}
114	}
115
116#ifdef CAPABILITY_MODE
117	/*
118	 * In capability mode, we only allow access to system calls
119	 * flagged with SYF_CAPENABLED.
120	 */
121	if (IN_CAPABILITY_MODE(td) &&
122	    !(sa->callp->sy_flags & SYF_CAPENABLED)) {
123		td->td_errno = error = ECAPMODE;
124		goto retval;
125	}
126#endif
127
128	error = syscall_thread_enter(td, sa->callp);
129	if (error != 0) {
130		td->td_errno = error;
131		goto retval;
132	}
133
134#ifdef KDTRACE_HOOKS
135	/* Give the syscall:::entry DTrace probe a chance to fire. */
136	if (__predict_false(systrace_enabled && sa->callp->sy_entry != 0))
137		(*systrace_probe_func)(sa, SYSTRACE_ENTRY, 0);
138#endif
139
140	/* Let system calls set td_errno directly. */
141	td->td_pflags &= ~TDP_NERRNO;
142
143	AUDIT_SYSCALL_ENTER(sa->code, td);
144	error = (sa->callp->sy_call)(td, sa->args);
145
146	/*
147	 * Note that some syscall implementations (e.g., sys_execve)
148	 * will commit the audit record just before their final return.
149	 * These were done under the assumption that nothing of interest
150	 * would happen between their return and here, where we would
151	 * normally commit the audit record.  These assumptions will
152	 * need to be revisited should any substantial logic be added
153	 * above.
154	 */
155	AUDIT_SYSCALL_EXIT(error, td);
156
157	/* Save the latest error return value. */
158	if ((td->td_pflags & TDP_NERRNO) == 0)
159		td->td_errno = error;
160
161#ifdef KDTRACE_HOOKS
162	/* Give the syscall:::return DTrace probe a chance to fire. */
163	if (__predict_false(systrace_enabled && sa->callp->sy_return != 0))
164		(*systrace_probe_func)(sa, SYSTRACE_RETURN,
165		    error ? -1 : td->td_retval[0]);
166#endif
167	syscall_thread_exit(td, sa->callp);
168
169 retval:
170	KTR_STOP4(KTR_SYSC, "syscall", syscallname(p, sa->code),
171	    (uintptr_t)td, "pid:%d", td->td_proc->p_pid, "error:%d", error,
172	    "retval0:%#lx", td->td_retval[0], "retval1:%#lx",
173	    td->td_retval[1]);
174	if (traced) {
175		PROC_LOCK(p);
176		td->td_dbgflags &= ~TDB_SCE;
177		PROC_UNLOCK(p);
178	}
179	(p->p_sysent->sv_set_syscall_retval)(td, error);
180}
181
182static inline void
183syscallret(struct thread *td)
184{
185	struct proc *p, *p2;
186	struct syscall_args *sa;
187	ksiginfo_t ksi;
188	int traced;
189
190	KASSERT((td->td_pflags & TDP_FORKING) == 0,
191	    ("fork() did not clear TDP_FORKING upon completion"));
192
193	p = td->td_proc;
194	sa = &td->td_sa;
195	if ((trap_enotcap || (p->p_flag2 & P2_TRAPCAP) != 0) &&
196	    IN_CAPABILITY_MODE(td)) {
197		if (td->td_errno == ENOTCAPABLE || td->td_errno == ECAPMODE) {
198			ksiginfo_init_trap(&ksi);
199			ksi.ksi_signo = SIGTRAP;
200			ksi.ksi_errno = td->td_errno;
201			ksi.ksi_code = TRAP_CAP;
202			trapsignal(td, &ksi);
203		}
204	}
205
206	/*
207	 * Handle reschedule and other end-of-syscall issues
208	 */
209	userret(td, td->td_frame);
210
211#ifdef KTRACE
212	if (KTRPOINT(td, KTR_SYSRET)) {
213		ktrsysret(sa->code, td->td_errno, td->td_retval[0]);
214	}
215#endif
216
217	if (p->p_flag & P_TRACED) {
218		traced = 1;
219		PROC_LOCK(p);
220		td->td_dbgflags |= TDB_SCX;
221		PROC_UNLOCK(p);
222	} else
223		traced = 0;
224	/*
225	 * This works because errno is findable through the
226	 * register set.  If we ever support an emulation where this
227	 * is not the case, this code will need to be revisited.
228	 */
229	STOPEVENT(p, S_SCX, sa->code);
230	if (traced || (td->td_dbgflags & (TDB_EXEC | TDB_FORK)) != 0) {
231		PROC_LOCK(p);
232		/*
233		 * If tracing the execed process, trap to the debugger
234		 * so that breakpoints can be set before the program
235		 * executes.  If debugger requested tracing of syscall
236		 * returns, do it now too.
237		 */
238		if (traced &&
239		    ((td->td_dbgflags & (TDB_FORK | TDB_EXEC)) != 0 ||
240		    (p->p_ptevents & PTRACE_SCX) != 0))
241			ptracestop(td, SIGTRAP, NULL);
242		td->td_dbgflags &= ~(TDB_SCX | TDB_EXEC | TDB_FORK);
243		PROC_UNLOCK(p);
244	}
245
246	if (td->td_pflags & TDP_RFPPWAIT) {
247		/*
248		 * Preserve synchronization semantics of vfork.  If
249		 * waiting for child to exec or exit, fork set
250		 * P_PPWAIT on child, and there we sleep on our proc
251		 * (in case of exit).
252		 *
253		 * Do it after the ptracestop() above is finished, to
254		 * not block our debugger until child execs or exits
255		 * to finish vfork wait.
256		 */
257		td->td_pflags &= ~TDP_RFPPWAIT;
258		p2 = td->td_rfppwait_p;
259again:
260		PROC_LOCK(p2);
261		while (p2->p_flag & P_PPWAIT) {
262			PROC_LOCK(p);
263			if (thread_suspend_check_needed()) {
264				PROC_UNLOCK(p2);
265				thread_suspend_check(0);
266				PROC_UNLOCK(p);
267				goto again;
268			} else {
269				PROC_UNLOCK(p);
270			}
271			cv_timedwait(&p2->p_pwait, &p2->p_mtx, hz);
272		}
273		PROC_UNLOCK(p2);
274
275		if (td->td_dbgflags & TDB_VFORK) {
276			PROC_LOCK(p);
277			if (p->p_ptevents & PTRACE_VFORK)
278				ptracestop(td, SIGTRAP, NULL);
279			td->td_dbgflags &= ~TDB_VFORK;
280			PROC_UNLOCK(p);
281		}
282	}
283}
284