1/*
2 * CDDL HEADER START
3 *
4 * The contents of this file are subject to the terms of the
5 * Common Development and Distribution License (the "License").
6 * You may not use this file except in compliance with the License.
7 *
8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 * or http://www.opensolaris.org/os/licensing.
10 * See the License for the specific language governing permissions
11 * and limitations under the License.
12 *
13 * When distributing Covered Code, include this CDDL HEADER in each
14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 * If applicable, add the following below this CDDL HEADER, with the
16 * fields enclosed by brackets "[]" replaced with your own identifying
17 * information: Portions Copyright [yyyy] [name of copyright owner]
18 *
19 * CDDL HEADER END
20 */
21
22/*
23 * Copyright 2010 Sun Microsystems, Inc.  All rights reserved.
24 * Use is subject to license terms.
25 */
26
27/*	Copyright (c) 1984, 1986, 1987, 1988, 1989 AT&T	*/
28/*	  All Rights Reserved  	*/
29
30#include <sys/param.h>
31#include <sys/types.h>
32#include <sys/vmparam.h>
33#include <sys/systm.h>
34#include <sys/signal.h>
35#include <sys/stack.h>
36#include <sys/regset.h>
37#include <sys/privregs.h>
38#include <sys/frame.h>
39#include <sys/proc.h>
40#include <sys/brand.h>
41#include <sys/psw.h>
42#include <sys/ucontext.h>
43#include <sys/asm_linkage.h>
44#include <sys/errno.h>
45#include <sys/archsystm.h>
46#include <sys/schedctl.h>
47#include <sys/debug.h>
48#include <sys/sysmacros.h>
49
50/*
51 * Save user context.
52 */
53void
54savecontext(ucontext_t *ucp, const k_sigset_t *mask)
55{
56	proc_t *p = ttoproc(curthread);
57	klwp_t *lwp = ttolwp(curthread);
58	struct regs *rp = lwptoregs(lwp);
59
60	/*
61	 * We unconditionally assign to every field through the end
62	 * of the gregs, but we need to bzero() everything -after- that
63	 * to avoid having any kernel stack garbage escape to userland.
64	 */
65	bzero(&ucp->uc_mcontext.fpregs, sizeof (ucontext_t) -
66	    offsetof(ucontext_t, uc_mcontext.fpregs));
67
68	ucp->uc_flags = UC_ALL;
69	ucp->uc_link = (struct ucontext *)lwp->lwp_oldcontext;
70
71	/*
72	 * Try to copyin() the ustack if one is registered. If the stack
73	 * has zero size, this indicates that stack bounds checking has
74	 * been disabled for this LWP. If stack bounds checking is disabled
75	 * or the copyin() fails, we fall back to the legacy behavior.
76	 */
77	if (lwp->lwp_ustack == NULL ||
78	    copyin((void *)lwp->lwp_ustack, &ucp->uc_stack,
79	    sizeof (ucp->uc_stack)) != 0 ||
80	    ucp->uc_stack.ss_size == 0) {
81
82		if (lwp->lwp_sigaltstack.ss_flags == SS_ONSTACK) {
83			ucp->uc_stack = lwp->lwp_sigaltstack;
84		} else {
85			ucp->uc_stack.ss_sp = p->p_usrstack - p->p_stksize;
86			ucp->uc_stack.ss_size = p->p_stksize;
87			ucp->uc_stack.ss_flags = 0;
88		}
89	}
90
91	/*
92	 * If either the trace flag or REQUEST_STEP is set,
93	 * arrange for single-stepping and turn off the trace flag.
94	 */
95	if ((rp->r_ps & PS_T) || (lwp->lwp_pcb.pcb_flags & REQUEST_STEP)) {
96		/*
97		 * Clear PS_T so that saved user context won't have trace
98		 * flag set.
99		 */
100		rp->r_ps &= ~PS_T;
101
102		if (!(lwp->lwp_pcb.pcb_flags & REQUEST_NOSTEP)) {
103			lwp->lwp_pcb.pcb_flags |= DEBUG_PENDING;
104			/*
105			 * trap() always checks DEBUG_PENDING before
106			 * checking for any pending signal. This at times
107			 * can potentially lead to DEBUG_PENDING not being
108			 * honoured. (for eg: the lwp is stopped by
109			 * stop_on_fault() called from trap(), after being
110			 * awakened it might see a pending signal and call
111			 * savecontext(), however on the way back to userland
112			 * there is no place it can be detected). Hence in
113			 * anticipation of such occassions, set AST flag for
114			 * the thread which will make the thread take an
115			 * excursion through trap() where it will be handled
116			 * appropriately.
117			 */
118			aston(curthread);
119		}
120	}
121
122	getgregs(lwp, ucp->uc_mcontext.gregs);
123	if (lwp->lwp_pcb.pcb_fpu.fpu_flags & FPU_EN)
124		getfpregs(lwp, &ucp->uc_mcontext.fpregs);
125	else
126		ucp->uc_flags &= ~UC_FPU;
127
128	sigktou(mask, &ucp->uc_sigmask);
129}
130
131/*
132 * Restore user context.
133 */
134void
135restorecontext(ucontext_t *ucp)
136{
137	kthread_t *t = curthread;
138	klwp_t *lwp = ttolwp(t);
139
140	lwp->lwp_oldcontext = (uintptr_t)ucp->uc_link;
141
142	if (ucp->uc_flags & UC_STACK) {
143		if (ucp->uc_stack.ss_flags == SS_ONSTACK)
144			lwp->lwp_sigaltstack = ucp->uc_stack;
145		else
146			lwp->lwp_sigaltstack.ss_flags &= ~SS_ONSTACK;
147	}
148
149	if (ucp->uc_flags & UC_CPU) {
150		/*
151		 * If the trace flag is set, mark the lwp to take a
152		 * single-step trap on return to user level (below).
153		 * The x86 lcall interface and sysenter has already done this,
154		 * and turned off the flag, but amd64 syscall interface has not.
155		 */
156		if (lwptoregs(lwp)->r_ps & PS_T)
157			lwp->lwp_pcb.pcb_flags |= DEBUG_PENDING;
158		setgregs(lwp, ucp->uc_mcontext.gregs);
159		lwp->lwp_eosys = JUSTRETURN;
160		t->t_post_sys = 1;
161		aston(curthread);
162	}
163
164	if (ucp->uc_flags & UC_FPU)
165		setfpregs(lwp, &ucp->uc_mcontext.fpregs);
166
167	if (ucp->uc_flags & UC_SIGMASK) {
168		/*
169		 * We don't need to acquire p->p_lock here;
170		 * we are manipulating thread-private data.
171		 */
172		schedctl_finish_sigblock(t);
173		sigutok(&ucp->uc_sigmask, &t->t_hold);
174		if (sigcheck(ttoproc(t), t))
175			t->t_sig_check = 1;
176	}
177}
178
179
180int
181getsetcontext(int flag, void *arg)
182{
183	ucontext_t uc;
184	ucontext_t *ucp;
185	klwp_t *lwp = ttolwp(curthread);
186	stack_t dummy_stk;
187
188	/*
189	 * In future releases, when the ucontext structure grows,
190	 * getcontext should be modified to only return the fields
191	 * specified in the uc_flags.  That way, the structure can grow
192	 * and still be binary compatible will all .o's which will only
193	 * have old fields defined in uc_flags
194	 */
195
196	switch (flag) {
197	default:
198		return (set_errno(EINVAL));
199
200	case GETCONTEXT:
201		schedctl_finish_sigblock(curthread);
202		savecontext(&uc, &curthread->t_hold);
203		if (uc.uc_flags & UC_SIGMASK)
204			SIGSET_NATIVE_TO_BRAND(&uc.uc_sigmask);
205		if (copyout(&uc, arg, sizeof (uc)))
206			return (set_errno(EFAULT));
207		return (0);
208
209	case SETCONTEXT:
210		ucp = arg;
211		if (ucp == NULL)
212			exit(CLD_EXITED, 0);
213		/*
214		 * Don't copyin filler or floating state unless we need it.
215		 * The ucontext_t struct and fields are specified in the ABI.
216		 */
217		if (copyin(ucp, &uc, sizeof (ucontext_t) -
218		    sizeof (uc.uc_filler) -
219		    sizeof (uc.uc_mcontext.fpregs))) {
220			return (set_errno(EFAULT));
221		}
222		if (uc.uc_flags & UC_SIGMASK)
223			SIGSET_BRAND_TO_NATIVE(&uc.uc_sigmask);
224
225		if ((uc.uc_flags & UC_FPU) &&
226		    copyin(&ucp->uc_mcontext.fpregs, &uc.uc_mcontext.fpregs,
227		    sizeof (uc.uc_mcontext.fpregs))) {
228			return (set_errno(EFAULT));
229		}
230
231		restorecontext(&uc);
232
233		if ((uc.uc_flags & UC_STACK) && (lwp->lwp_ustack != 0))
234			(void) copyout(&uc.uc_stack, (stack_t *)lwp->lwp_ustack,
235			    sizeof (uc.uc_stack));
236		return (0);
237
238	case GETUSTACK:
239		if (copyout(&lwp->lwp_ustack, arg, sizeof (caddr_t)))
240			return (set_errno(EFAULT));
241		return (0);
242
243	case SETUSTACK:
244		if (copyin(arg, &dummy_stk, sizeof (dummy_stk)))
245			return (set_errno(EFAULT));
246		lwp->lwp_ustack = (uintptr_t)arg;
247		return (0);
248	}
249}
250
251#ifdef _SYSCALL32_IMPL
252
253/*
254 * Save user context for 32-bit processes.
255 */
256void
257savecontext32(ucontext32_t *ucp, const k_sigset_t *mask)
258{
259	proc_t *p = ttoproc(curthread);
260	klwp_t *lwp = ttolwp(curthread);
261	struct regs *rp = lwptoregs(lwp);
262
263	bzero(&ucp->uc_mcontext.fpregs, sizeof (ucontext32_t) -
264	    offsetof(ucontext32_t, uc_mcontext.fpregs));
265
266	ucp->uc_flags = UC_ALL;
267	ucp->uc_link = (caddr32_t)lwp->lwp_oldcontext;
268
269	if (lwp->lwp_ustack == NULL ||
270	    copyin((void *)lwp->lwp_ustack, &ucp->uc_stack,
271	    sizeof (ucp->uc_stack)) != 0 ||
272	    ucp->uc_stack.ss_size == 0) {
273
274		if (lwp->lwp_sigaltstack.ss_flags == SS_ONSTACK) {
275			ucp->uc_stack.ss_sp =
276			    (caddr32_t)(uintptr_t)lwp->lwp_sigaltstack.ss_sp;
277			ucp->uc_stack.ss_size =
278			    (size32_t)lwp->lwp_sigaltstack.ss_size;
279			ucp->uc_stack.ss_flags = SS_ONSTACK;
280		} else {
281			ucp->uc_stack.ss_sp = (caddr32_t)(uintptr_t)
282			    (p->p_usrstack - p->p_stksize);
283			ucp->uc_stack.ss_size = (size32_t)p->p_stksize;
284			ucp->uc_stack.ss_flags = 0;
285		}
286	}
287
288	/*
289	 * If either the trace flag or REQUEST_STEP is set, arrange
290	 * for single-stepping and turn off the trace flag.
291	 */
292	if ((rp->r_ps & PS_T) || (lwp->lwp_pcb.pcb_flags & REQUEST_STEP)) {
293		/*
294		 * Clear PS_T so that saved user context won't have trace
295		 * flag set.
296		 */
297		rp->r_ps &= ~PS_T;
298
299		if (!(lwp->lwp_pcb.pcb_flags & REQUEST_NOSTEP)) {
300			lwp->lwp_pcb.pcb_flags |= DEBUG_PENDING;
301			/*
302			 * See comments in savecontext().
303			 */
304			aston(curthread);
305		}
306	}
307
308	getgregs32(lwp, ucp->uc_mcontext.gregs);
309	if (lwp->lwp_pcb.pcb_fpu.fpu_flags & FPU_EN)
310		getfpregs32(lwp, &ucp->uc_mcontext.fpregs);
311	else
312		ucp->uc_flags &= ~UC_FPU;
313
314	sigktou(mask, &ucp->uc_sigmask);
315}
316
317int
318getsetcontext32(int flag, void *arg)
319{
320	ucontext32_t uc;
321	ucontext_t ucnat;
322	ucontext32_t *ucp;
323	klwp_t *lwp = ttolwp(curthread);
324	caddr32_t ustack32;
325	stack32_t dummy_stk32;
326
327	switch (flag) {
328	default:
329		return (set_errno(EINVAL));
330
331	case GETCONTEXT:
332		schedctl_finish_sigblock(curthread);
333		savecontext32(&uc, &curthread->t_hold);
334		if (uc.uc_flags & UC_SIGMASK)
335			SIGSET_NATIVE_TO_BRAND(&uc.uc_sigmask);
336		if (copyout(&uc, arg, sizeof (uc)))
337			return (set_errno(EFAULT));
338		return (0);
339
340	case SETCONTEXT:
341		ucp = arg;
342		if (ucp == NULL)
343			exit(CLD_EXITED, 0);
344		if (copyin(ucp, &uc, sizeof (uc) -
345		    sizeof (uc.uc_filler) -
346		    sizeof (uc.uc_mcontext.fpregs))) {
347			return (set_errno(EFAULT));
348		}
349		if (uc.uc_flags & UC_SIGMASK)
350			SIGSET_BRAND_TO_NATIVE(&uc.uc_sigmask);
351		if ((uc.uc_flags & UC_FPU) &&
352		    copyin(&ucp->uc_mcontext.fpregs, &uc.uc_mcontext.fpregs,
353		    sizeof (uc.uc_mcontext.fpregs))) {
354			return (set_errno(EFAULT));
355		}
356
357		ucontext_32ton(&uc, &ucnat);
358		restorecontext(&ucnat);
359
360		if ((uc.uc_flags & UC_STACK) && (lwp->lwp_ustack != 0))
361			(void) copyout(&uc.uc_stack,
362			    (stack32_t *)lwp->lwp_ustack, sizeof (uc.uc_stack));
363		return (0);
364
365	case GETUSTACK:
366		ustack32 = (caddr32_t)lwp->lwp_ustack;
367		if (copyout(&ustack32, arg, sizeof (ustack32)))
368			return (set_errno(EFAULT));
369		return (0);
370
371	case SETUSTACK:
372		if (copyin(arg, &dummy_stk32, sizeof (dummy_stk32)))
373			return (set_errno(EFAULT));
374		lwp->lwp_ustack = (uintptr_t)arg;
375		return (0);
376	}
377}
378
379#endif	/* _SYSCALL32_IMPL */
380