1/*
2 * Copyright (c) 2000-2007 Apple Inc. All rights reserved.
3 *
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
14 *
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
17 *
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
25 *
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27 */
28#ifdef	MACH_BSD
29#include <mach_rt.h>
30#include <mach_debug.h>
31#include <mach_ldebug.h>
32
33#include <mach/kern_return.h>
34#include <mach/mach_traps.h>
35#include <mach/thread_status.h>
36#include <mach/vm_param.h>
37
38#include <kern/counters.h>
39#include <kern/cpu_data.h>
40#include <kern/mach_param.h>
41#include <kern/task.h>
42#include <kern/thread.h>
43#include <kern/sched_prim.h>
44#include <kern/misc_protos.h>
45#include <kern/assert.h>
46#include <kern/spl.h>
47#include <kern/syscall_sw.h>
48#include <ipc/ipc_port.h>
49#include <vm/vm_kern.h>
50#include <vm/pmap.h>
51
52#include <i386/cpu_data.h>
53#include <i386/cpu_number.h>
54#include <i386/thread.h>
55#include <i386/eflags.h>
56#include <i386/proc_reg.h>
57#include <i386/seg.h>
58#include <i386/tss.h>
59#include <i386/user_ldt.h>
60#include <i386/fpu.h>
61#include <i386/machdep_call.h>
62#include <i386/misc_protos.h>
63#include <i386/cpu_data.h>
64#include <i386/cpu_number.h>
65#include <i386/mp_desc.h>
66#include <i386/vmparam.h>
67#include <i386/trap.h>
68#include <mach/i386/syscall_sw.h>
69#include <sys/syscall.h>
70#include <sys/kdebug.h>
71#include <sys/errno.h>
72#include <../bsd/sys/sysent.h>
73
74kern_return_t
75thread_userstack(
76    thread_t,
77    int,
78    thread_state_t,
79    unsigned int,
80    mach_vm_offset_t *,
81	int *
82);
83
84kern_return_t
85thread_entrypoint(
86    thread_t,
87    int,
88    thread_state_t,
89    unsigned int,
90    mach_vm_offset_t *
91);
92
93void * find_user_regs(thread_t);
94
95unsigned int get_msr_exportmask(void);
96
97unsigned int get_msr_nbits(void);
98
99unsigned int get_msr_rbits(void);
100
101kern_return_t
102thread_compose_cthread_desc(unsigned int addr, pcb_t pcb);
103
104void IOSleep(int);
105extern void throttle_lowpri_io(boolean_t);
106
107void thread_set_cthreadself(thread_t thread, uint64_t pself, int isLP64);
108
109/*
110 * thread_userstack:
111 *
112 * Return the user stack pointer from the machine
113 * dependent thread state info.
114 */
115kern_return_t
116thread_userstack(
117    __unused thread_t   thread,
118    int                 flavor,
119    thread_state_t      tstate,
120    __unused unsigned int        count,
121    user_addr_t    *user_stack,
122	int					*customstack
123)
124{
125	if (customstack)
126		*customstack = 0;
127
128	switch (flavor) {
129	case x86_THREAD_STATE32:
130		{
131			x86_thread_state32_t *state25;
132
133			state25 = (x86_thread_state32_t *) tstate;
134
135			if (state25->esp)
136				*user_stack = state25->esp;
137			else
138				*user_stack = VM_USRSTACK32;
139			if (customstack && state25->esp)
140				*customstack = 1;
141			else
142				*customstack = 0;
143			break;
144		}
145
146	case x86_THREAD_STATE64:
147		{
148			x86_thread_state64_t *state25;
149
150			state25 = (x86_thread_state64_t *) tstate;
151
152			if (state25->rsp)
153				*user_stack = state25->rsp;
154			else
155				*user_stack = VM_USRSTACK64;
156			if (customstack && state25->rsp)
157				*customstack = 1;
158			else
159				*customstack = 0;
160			break;
161		}
162
163	default:
164		return (KERN_INVALID_ARGUMENT);
165	}
166
167	return (KERN_SUCCESS);
168}
169
170
171kern_return_t
172thread_entrypoint(
173    __unused thread_t   thread,
174    int                 flavor,
175    thread_state_t      tstate,
176    __unused unsigned int        count,
177    mach_vm_offset_t    *entry_point
178)
179{
180	/*
181	 * Set a default.
182	 */
183	if (*entry_point == 0)
184		*entry_point = VM_MIN_ADDRESS;
185
186	switch (flavor) {
187	case x86_THREAD_STATE32:
188		{
189			x86_thread_state32_t *state25;
190
191			state25 = (i386_thread_state_t *) tstate;
192			*entry_point = state25->eip ? state25->eip: VM_MIN_ADDRESS;
193			break;
194		}
195
196	case x86_THREAD_STATE64:
197		{
198			x86_thread_state64_t *state25;
199
200			state25 = (x86_thread_state64_t *) tstate;
201			*entry_point = state25->rip ? state25->rip: VM_MIN_ADDRESS64;
202			break;
203		}
204	}
205	return (KERN_SUCCESS);
206}
207
208
209/*
210 * Duplicate parent state in child
211 * for U**X fork.
212 */
213kern_return_t
214machine_thread_dup(
215    thread_t		parent,
216    thread_t		child
217)
218{
219
220	pcb_t		parent_pcb;
221	pcb_t		child_pcb;
222
223	if ((child_pcb = child->machine.pcb) == NULL ||
224	    (parent_pcb = parent->machine.pcb) == NULL)
225		return (KERN_FAILURE);
226	/*
227	 * Copy over the x86_saved_state registers
228	 */
229	if (cpu_mode_is64bit()) {
230		if (thread_is_64bit(parent))
231			bcopy(USER_REGS64(parent), USER_REGS64(child), sizeof(x86_saved_state64_t));
232		else
233			bcopy(USER_REGS32(parent), USER_REGS32(child), sizeof(x86_saved_state_compat32_t));
234	} else
235		bcopy(USER_REGS32(parent), USER_REGS32(child), sizeof(x86_saved_state32_t));
236
237	/*
238	 * Check to see if parent is using floating point
239	 * and if so, copy the registers to the child
240	 */
241	fpu_dup_fxstate(parent, child);
242
243#ifdef	MACH_BSD
244	/*
245	 * Copy the parent's cthread id and USER_CTHREAD descriptor, if 32-bit.
246	 */
247	child_pcb->cthread_self = parent_pcb->cthread_self;
248	if (!thread_is_64bit(parent))
249		child_pcb->cthread_desc = parent_pcb->cthread_desc;
250
251	/*
252	 * FIXME - should a user specified LDT, TSS and V86 info
253	 * be duplicated as well?? - probably not.
254	 */
255	// duplicate any use LDT entry that was set I think this is appropriate.
256        if (parent_pcb->uldt_selector!= 0) {
257	        child_pcb->uldt_selector = parent_pcb->uldt_selector;
258		child_pcb->uldt_desc = parent_pcb->uldt_desc;
259	}
260#endif
261
262	return (KERN_SUCCESS);
263}
264
265/*
266 * FIXME - thread_set_child
267 */
268
269void thread_set_child(thread_t child, int pid);
270void
271thread_set_child(thread_t child, int pid)
272{
273	if (thread_is_64bit(child)) {
274		x86_saved_state64_t	*iss64;
275
276		iss64 = USER_REGS64(child);
277
278		iss64->rax = pid;
279		iss64->rdx = 1;
280		iss64->isf.rflags &= ~EFL_CF;
281	} else {
282		x86_saved_state32_t	*iss32;
283
284		iss32 = USER_REGS32(child);
285
286		iss32->eax = pid;
287		iss32->edx = 1;
288		iss32->efl &= ~EFL_CF;
289	}
290}
291
292
293void thread_set_parent(thread_t parent, int pid);
294
295void
296thread_set_parent(thread_t parent, int pid)
297{
298	if (thread_is_64bit(parent)) {
299		x86_saved_state64_t	*iss64;
300
301		iss64 = USER_REGS64(parent);
302
303		iss64->rax = pid;
304		iss64->rdx = 0;
305		iss64->isf.rflags &= ~EFL_CF;
306	} else {
307		x86_saved_state32_t	*iss32;
308
309		iss32 = USER_REGS32(parent);
310
311		iss32->eax = pid;
312		iss32->edx = 0;
313		iss32->efl &= ~EFL_CF;
314	}
315}
316
317
318/*
319 * System Call handling code
320 */
321
322extern long fuword(vm_offset_t);
323
324
325
326void
327machdep_syscall(x86_saved_state_t *state)
328{
329	int			args[machdep_call_count];
330	int			trapno;
331	int			nargs;
332	machdep_call_t		*entry;
333	x86_saved_state32_t	*regs;
334
335	assert(is_saved_state32(state));
336	regs = saved_state32(state);
337
338	trapno = regs->eax;
339#if DEBUG_TRACE
340	kprintf("machdep_syscall(0x%08x) code=%d\n", regs, trapno);
341#endif
342
343	if (trapno < 0 || trapno >= machdep_call_count) {
344		regs->eax = (unsigned int)kern_invalid(NULL);
345
346		thread_exception_return();
347		/* NOTREACHED */
348	}
349	entry = &machdep_call_table[trapno];
350	nargs = entry->nargs;
351
352	if (nargs != 0) {
353		if (copyin((user_addr_t) regs->uesp + sizeof (int),
354				(char *) args, (nargs * sizeof (int)))) {
355			regs->eax = KERN_INVALID_ADDRESS;
356
357			thread_exception_return();
358			/* NOTREACHED */
359		}
360	}
361	switch (nargs) {
362	case 0:
363		regs->eax = (*entry->routine.args_0)();
364		break;
365	case 1:
366		regs->eax = (*entry->routine.args_1)(args[0]);
367		break;
368	case 2:
369		regs->eax = (*entry->routine.args_2)(args[0],args[1]);
370		break;
371	case 3:
372		if (!entry->bsd_style)
373			regs->eax = (*entry->routine.args_3)(args[0],args[1],args[2]);
374		else {
375			int	error;
376			uint32_t	rval;
377
378			error = (*entry->routine.args_bsd_3)(&rval, args[0], args[1], args[2]);
379			if (error) {
380				regs->eax = error;
381				regs->efl |= EFL_CF;	/* carry bit */
382			} else {
383				regs->eax = rval;
384				regs->efl &= ~EFL_CF;
385			}
386		}
387		break;
388	case 4:
389		regs->eax = (*entry->routine.args_4)(args[0], args[1], args[2], args[3]);
390		break;
391
392	default:
393		panic("machdep_syscall: too many args");
394	}
395	if (current_thread()->funnel_lock)
396		(void) thread_funnel_set(current_thread()->funnel_lock, FALSE);
397
398	throttle_lowpri_io(TRUE);
399
400	thread_exception_return();
401	/* NOTREACHED */
402}
403
404
405void
406machdep_syscall64(x86_saved_state_t *state)
407{
408	int			trapno;
409	machdep_call_t		*entry;
410	x86_saved_state64_t	*regs;
411
412	assert(is_saved_state64(state));
413	regs = saved_state64(state);
414
415	trapno = regs->rax & SYSCALL_NUMBER_MASK;
416
417	if (trapno < 0 || trapno >= machdep_call_count) {
418		regs->rax = (unsigned int)kern_invalid(NULL);
419
420		thread_exception_return();
421		/* NOTREACHED */
422	}
423	entry = &machdep_call_table64[trapno];
424
425	switch (entry->nargs) {
426	case 0:
427		regs->rax = (*entry->routine.args_0)();
428		break;
429	case 1:
430		regs->rax = (*entry->routine.args64_1)(regs->rdi);
431		break;
432	default:
433		panic("machdep_syscall64: too many args");
434	}
435	if (current_thread()->funnel_lock)
436		(void) thread_funnel_set(current_thread()->funnel_lock, FALSE);
437
438	throttle_lowpri_io(TRUE);
439
440	thread_exception_return();
441	/* NOTREACHED */
442}
443
444
445kern_return_t
446thread_compose_cthread_desc(unsigned int addr, pcb_t pcb)
447{
448  struct real_descriptor desc;
449
450  mp_disable_preemption();
451
452  desc.limit_low = 1;
453  desc.limit_high = 0;
454  desc.base_low = addr & 0xffff;
455  desc.base_med = (addr >> 16) & 0xff;
456  desc.base_high = (addr >> 24) & 0xff;
457  desc.access = ACC_P|ACC_PL_U|ACC_DATA_W;
458  desc.granularity = SZ_32|SZ_G;
459  pcb->cthread_desc = desc;
460  *ldt_desc_p(USER_CTHREAD) = desc;
461
462  mp_enable_preemption();
463
464  return(KERN_SUCCESS);
465}
466
467kern_return_t
468thread_set_cthread_self(uint32_t self)
469{
470	current_thread()->machine.pcb->cthread_self = (uint64_t) self;
471
472	return (KERN_SUCCESS);
473}
474
475kern_return_t
476thread_get_cthread_self(void)
477{
478	return ((kern_return_t)current_thread()->machine.pcb->cthread_self);
479}
480
481kern_return_t
482thread_fast_set_cthread_self(uint32_t self)
483{
484	pcb_t			pcb;
485	x86_saved_state32_t	*iss;
486
487	pcb = (pcb_t)current_thread()->machine.pcb;
488	thread_compose_cthread_desc(self, pcb);
489	pcb->cthread_self = (uint64_t) self; /* preserve old func too */
490	iss = saved_state32(pcb->iss);
491	iss->gs = USER_CTHREAD;
492
493	return (USER_CTHREAD);
494}
495
496void
497thread_set_cthreadself(thread_t thread, uint64_t pself, int isLP64)
498{
499	if (isLP64 == 0) {
500		pcb_t			pcb;
501		x86_saved_state32_t	*iss;
502
503		pcb = (pcb_t)thread->machine.pcb;
504		thread_compose_cthread_desc(pself, pcb);
505		pcb->cthread_self = (uint64_t) pself; /* preserve old func too */
506		iss = saved_state32(pcb->iss);
507		iss->gs = USER_CTHREAD;
508	} else {
509		pcb_t			pcb;
510		x86_saved_state64_t	*iss;
511
512		pcb = thread->machine.pcb;
513
514	/* check for canonical address, set 0 otherwise  */
515		if (!IS_USERADDR64_CANONICAL(pself))
516			pself = 0ULL;
517		pcb->cthread_self = pself;
518
519		/* XXX for 64-in-32 */
520		iss = saved_state64(pcb->iss);
521		iss->gs = USER_CTHREAD;
522		thread_compose_cthread_desc((uint32_t) pself, pcb);
523	}
524}
525
526
527kern_return_t
528thread_fast_set_cthread_self64(uint64_t self)
529{
530	pcb_t			pcb;
531	x86_saved_state64_t	*iss;
532
533	pcb = current_thread()->machine.pcb;
534
535	/* check for canonical address, set 0 otherwise  */
536	if (!IS_USERADDR64_CANONICAL(self))
537		self = 0ULL;
538	pcb->cthread_self = self;
539	current_cpu_datap()->cpu_uber.cu_user_gs_base = self;
540
541	/* XXX for 64-in-32 */
542	iss = saved_state64(pcb->iss);
543	iss->gs = USER_CTHREAD;
544	thread_compose_cthread_desc((uint32_t) self, pcb);
545
546	return (USER_CTHREAD);
547}
548
549/*
550 * thread_set_user_ldt routine is the interface for the user level
551 * settable ldt entry feature.  allowing a user to create arbitrary
552 * ldt entries seems to be too large of a security hole, so instead
553 * this mechanism is in place to allow user level processes to have
554 * an ldt entry that can be used in conjunction with the FS register.
555 *
556 * Swapping occurs inside the pcb.c file along with initialization
557 * when a thread is created. The basic functioning theory is that the
558 * pcb->uldt_selector variable will contain either 0 meaning the
559 * process has not set up any entry, or the selector to be used in
560 * the FS register. pcb->uldt_desc contains the actual descriptor the
561 * user has set up stored in machine usable ldt format.
562 *
563 * Currently one entry is shared by all threads (USER_SETTABLE), but
564 * this could be changed in the future by changing how this routine
565 * allocates the selector. There seems to be no real reason at this
566 * time to have this added feature, but in the future it might be
567 * needed.
568 *
569 * address is the linear address of the start of the data area size
570 * is the size in bytes of the area flags should always be set to 0
571 * for now. in the future it could be used to set R/W permisions or
572 * other functions. Currently the segment is created as a data segment
573 * up to 1 megabyte in size with full read/write permisions only.
574 *
575 * this call returns the segment selector or -1 if any error occurs
576 */
577kern_return_t
578thread_set_user_ldt(uint32_t address, uint32_t size, uint32_t flags)
579{
580	pcb_t pcb;
581	struct fake_descriptor temp;
582	int mycpu;
583
584	if (flags != 0)
585		return -1;		// flags not supported
586	if (size > 0xFFFFF)
587		return -1;		// size too big, 1 meg is the limit
588
589	mp_disable_preemption();
590	mycpu = cpu_number();
591
592	// create a "fake" descriptor so we can use fix_desc()
593	// to build a real one...
594	//   32 bit default operation size
595	//   standard read/write perms for a data segment
596	pcb = (pcb_t)current_thread()->machine.pcb;
597	temp.offset = address;
598	temp.lim_or_seg = size;
599	temp.size_or_wdct = SZ_32;
600	temp.access = ACC_P|ACC_PL_U|ACC_DATA_W;
601
602	// turn this into a real descriptor
603	fix_desc(&temp,1);
604
605	// set up our data in the pcb
606	pcb->uldt_desc = *(struct real_descriptor*)&temp;
607	pcb->uldt_selector = USER_SETTABLE;		// set the selector value
608
609	// now set it up in the current table...
610	*ldt_desc_p(USER_SETTABLE) = *(struct real_descriptor*)&temp;
611
612	mp_enable_preemption();
613
614	return USER_SETTABLE;
615}
616
617#endif	/* MACH_BSD */
618
619
620typedef kern_return_t (*mach_call_t)(void *);
621
622struct mach_call_args {
623	syscall_arg_t arg1;
624	syscall_arg_t arg2;
625	syscall_arg_t arg3;
626	syscall_arg_t arg4;
627	syscall_arg_t arg5;
628	syscall_arg_t arg6;
629	syscall_arg_t arg7;
630	syscall_arg_t arg8;
631	syscall_arg_t arg9;
632};
633
634static kern_return_t
635mach_call_arg_munger32(uint32_t sp, int nargs, int call_number, struct mach_call_args *args);
636
637
638static kern_return_t
639mach_call_arg_munger32(uint32_t sp, int nargs, int call_number, struct mach_call_args *args)
640{
641	unsigned int args32[9];
642
643	if (copyin((user_addr_t)(sp + sizeof(int)), (char *)args32, nargs * sizeof (int)))
644		return KERN_INVALID_ARGUMENT;
645
646	switch (nargs) {
647	case 9: args->arg9 = args32[8];
648	case 8: args->arg8 = args32[7];
649	case 7: args->arg7 = args32[6];
650	case 6: args->arg6 = args32[5];
651	case 5: args->arg5 = args32[4];
652	case 4: args->arg4 = args32[3];
653	case 3: args->arg3 = args32[2];
654	case 2: args->arg2 = args32[1];
655	case 1: args->arg1 = args32[0];
656	}
657	if (call_number == 90) {
658		/* munge_l for mach_wait_until_trap() */
659		args->arg1 = (((uint64_t)(args32[0])) | ((((uint64_t)(args32[1]))<<32)));
660	}
661	if (call_number == 93) {
662		/* munge_wl for mk_timer_arm_trap() */
663		args->arg2 = (((uint64_t)(args32[1])) | ((((uint64_t)(args32[2]))<<32)));
664	}
665
666	return KERN_SUCCESS;
667}
668
669
670__private_extern__ void mach_call_munger(x86_saved_state_t *state);
671
672void
673mach_call_munger(x86_saved_state_t *state)
674{
675	int argc;
676	int call_number;
677	mach_call_t mach_call;
678	kern_return_t retval;
679	struct mach_call_args args = { 0, 0, 0, 0, 0, 0, 0, 0, 0 };
680	x86_saved_state32_t	*regs;
681
682	assert(is_saved_state32(state));
683	regs = saved_state32(state);
684
685	call_number = -(regs->eax);
686#if DEBUG_TRACE
687	kprintf("mach_call_munger(0x%08x) code=%d\n", regs, call_number);
688#endif
689
690	if (call_number < 0 || call_number >= mach_trap_count) {
691		i386_exception(EXC_SYSCALL, call_number, 1);
692		/* NOTREACHED */
693	}
694	mach_call = (mach_call_t)mach_trap_table[call_number].mach_trap_function;
695
696	if (mach_call == (mach_call_t)kern_invalid) {
697		i386_exception(EXC_SYSCALL, call_number, 1);
698		/* NOTREACHED */
699	}
700
701	argc = mach_trap_table[call_number].mach_trap_arg_count;
702	if (argc) {
703		retval = mach_call_arg_munger32(regs->uesp, argc, call_number, &args);
704		if (retval != KERN_SUCCESS) {
705			regs->eax = retval;
706
707			thread_exception_return();
708			/* NOTREACHED */
709		}
710	}
711	KERNEL_DEBUG_CONSTANT(MACHDBG_CODE(DBG_MACH_EXCP_SC, (call_number)) | DBG_FUNC_START,
712			(int) args.arg1, (int) args.arg2, (int) args.arg3, (int) args.arg4, 0);
713
714	retval = mach_call(&args);
715
716	KERNEL_DEBUG_CONSTANT(MACHDBG_CODE(DBG_MACH_EXCP_SC,(call_number)) | DBG_FUNC_END,
717			retval, 0, 0, 0, 0);
718	regs->eax = retval;
719
720	throttle_lowpri_io(TRUE);
721
722	thread_exception_return();
723	/* NOTREACHED */
724}
725
726
727__private_extern__ void mach_call_munger64(x86_saved_state_t *regs);
728
729void
730mach_call_munger64(x86_saved_state_t *state)
731{
732	int call_number;
733	int argc;
734	mach_call_t mach_call;
735	x86_saved_state64_t	*regs;
736
737	assert(is_saved_state64(state));
738	regs = saved_state64(state);
739
740	call_number = regs->rax & SYSCALL_NUMBER_MASK;
741
742	KERNEL_DEBUG_CONSTANT(MACHDBG_CODE(DBG_MACH_EXCP_SC,
743					   (call_number)) | DBG_FUNC_START,
744			      (int) regs->rdi, (int) regs->rsi,
745			      (int) regs->rdx, (int) regs->r10, 0);
746
747	if (call_number < 0 || call_number >= mach_trap_count) {
748	        i386_exception(EXC_SYSCALL, regs->rax, 1);
749		/* NOTREACHED */
750	}
751	mach_call = (mach_call_t)mach_trap_table[call_number].mach_trap_function;
752
753	if (mach_call == (mach_call_t)kern_invalid) {
754	        i386_exception(EXC_SYSCALL, regs->rax, 1);
755		/* NOTREACHED */
756	}
757	argc = mach_trap_table[call_number].mach_trap_arg_count;
758
759	if (argc > 6) {
760	        int copyin_count;
761
762		copyin_count = (argc - 6) * sizeof(uint64_t);
763
764	        if (copyin((user_addr_t)(regs->isf.rsp + sizeof(user_addr_t)), (char *)&regs->v_arg6, copyin_count)) {
765		        regs->rax = KERN_INVALID_ARGUMENT;
766
767			thread_exception_return();
768			/* NOTREACHED */
769		}
770	}
771	regs->rax = (uint64_t)mach_call((void *)(&regs->rdi));
772
773	KERNEL_DEBUG_CONSTANT(MACHDBG_CODE(DBG_MACH_EXCP_SC,
774					   (call_number)) | DBG_FUNC_END,
775			      (int)regs->rax, 0, 0, 0, 0);
776
777	throttle_lowpri_io(TRUE);
778
779	thread_exception_return();
780	/* NOTREACHED */
781}
782
783
784/*
785 * thread_setuserstack:
786 *
787 * Sets the user stack pointer into the machine
788 * dependent thread state info.
789 */
790void
791thread_setuserstack(
792	thread_t	thread,
793	mach_vm_address_t	user_stack)
794{
795	if (thread_is_64bit(thread)) {
796		x86_saved_state64_t	*iss64;
797
798		iss64 = USER_REGS64(thread);
799
800		iss64->isf.rsp = (uint64_t)user_stack;
801	} else {
802		x86_saved_state32_t	*iss32;
803
804		iss32 = USER_REGS32(thread);
805
806		iss32->uesp = CAST_DOWN(unsigned int, user_stack);
807	}
808}
809
810/*
811 * thread_adjuserstack:
812 *
813 * Returns the adjusted user stack pointer from the machine
814 * dependent thread state info.  Used for small (<2G) deltas.
815 */
816uint64_t
817thread_adjuserstack(
818	thread_t	thread,
819	int		adjust)
820{
821	if (thread_is_64bit(thread)) {
822		x86_saved_state64_t	*iss64;
823
824		iss64 = USER_REGS64(thread);
825
826		iss64->isf.rsp += adjust;
827
828		return iss64->isf.rsp;
829	} else {
830		x86_saved_state32_t	*iss32;
831
832		iss32 = USER_REGS32(thread);
833
834		iss32->uesp += adjust;
835
836		return CAST_USER_ADDR_T(iss32->uesp);
837	}
838}
839
840/*
841 * thread_setentrypoint:
842 *
843 * Sets the user PC into the machine
844 * dependent thread state info.
845 */
846void
847thread_setentrypoint(thread_t thread, mach_vm_address_t entry)
848{
849	if (thread_is_64bit(thread)) {
850		x86_saved_state64_t	*iss64;
851
852		iss64 = USER_REGS64(thread);
853
854		iss64->isf.rip = (uint64_t)entry;
855	} else {
856		x86_saved_state32_t	*iss32;
857
858		iss32 = USER_REGS32(thread);
859
860		iss32->eip = CAST_DOWN(unsigned int, entry);
861	}
862}
863
864
865kern_return_t
866thread_setsinglestep(thread_t thread, int on)
867{
868	if (thread_is_64bit(thread)) {
869		x86_saved_state64_t	*iss64;
870
871		iss64 = USER_REGS64(thread);
872
873		if (on)
874			iss64->isf.rflags |= EFL_TF;
875		else
876			iss64->isf.rflags &= ~EFL_TF;
877	} else {
878		x86_saved_state32_t	*iss32;
879
880		iss32 = USER_REGS32(thread);
881
882		if (on)
883			iss32->efl |= EFL_TF;
884		else
885			iss32->efl &= ~EFL_TF;
886	}
887
888	return (KERN_SUCCESS);
889}
890
891
892
893/* XXX this should be a struct savearea so that CHUD will work better on x86 */
894void *
895find_user_regs(thread_t thread)
896{
897	return USER_STATE(thread);
898}
899
900void *
901get_user_regs(thread_t th)
902{
903	if (th->machine.pcb)
904		return(USER_STATE(th));
905	else {
906		printf("[get_user_regs: thread does not have pcb]");
907		return NULL;
908	}
909}
910
911#if CONFIG_DTRACE
912/*
913 * DTrace would like to have a peek at the kernel interrupt state, if available.
914 * Based on osfmk/chud/i386/chud_thread_i386.c:chudxnu_thread_get_state(), which see.
915 */
916x86_saved_state32_t *find_kern_regs(thread_t);
917
918x86_saved_state32_t *
919find_kern_regs(thread_t thread)
920{
921	if (thread == current_thread() &&
922		NULL != current_cpu_datap()->cpu_int_state &&
923		!(USER_STATE(thread) == current_cpu_datap()->cpu_int_state &&
924		  current_cpu_datap()->cpu_interrupt_level == 1)) {
925
926		return saved_state32(current_cpu_datap()->cpu_int_state);
927	} else {
928		return NULL;
929	}
930}
931
932vm_offset_t dtrace_get_cpu_int_stack_top(void);
933
934vm_offset_t
935dtrace_get_cpu_int_stack_top(void)
936{
937	return current_cpu_datap()->cpu_int_stack_top;
938}
939#endif
940