1/*
2 * Copyright 2018, J��r��me Duval, jerome.duval@gmail.com.
3 * Copyright 2012, Alex Smith, alex@alex-smith.me.uk.
4 * Distributed under the terms of the MIT License.
5 */
6
7
8#include <asm_defs.h>
9
10#include <thread_types.h>
11
12#include <arch/x86/descriptors.h>
13#include <arch/x86/arch_altcodepatch.h>
14#include <arch/x86/arch_cpu.h>
15#include <arch/x86/arch_kernel.h>
16
17#include "asm_offsets.h"
18#include "syscall_numbers.h"
19#include "syscall_table.h"
20
21
22// Push the remainder of the interrupt frame onto the stack.
23#define PUSH_IFRAME_BOTTOM(iframeType)	\
24	push	%rax;	/* orig_rax */		\
25	push	%rax;						\
26	push	%rbx;						\
27	push	%rcx;						\
28	push	%rdx;						\
29	push	%rdi;						\
30	push	%rsi;						\
31	push	%rbp;						\
32	push	%r8;						\
33	push	%r9;						\
34	push	%r10;						\
35	push	%r11;						\
36	push	%r12;						\
37	push	%r13;						\
38	push	%r14;						\
39	push	%r15;						\
40	pushq	$0;							\
41	push	$iframeType;
42
43
44// Restore the interrupt frame.
45#define RESTORE_IFRAME()				\
46	add		$16, %rsp;					\
47	pop		%r15;						\
48	pop		%r14;						\
49	pop		%r13;						\
50	pop		%r12;						\
51	pop		%r11;						\
52	pop		%r10;						\
53	pop		%r9;						\
54	pop		%r8;						\
55	pop		%rbp;						\
56	pop		%rsi;						\
57	pop		%rdi;						\
58	pop		%rdx;						\
59	pop		%rcx;						\
60	pop		%rbx;						\
61	pop		%rax;						\
62	addq	$24, %rsp;
63
64
65// The macros below require R12 to contain the current thread pointer. R12 is
66// callee-save so will be preserved through all function calls and only needs
67// to be obtained once. R13 is used to store the system call start time, will
68// also be preserved.
69
70#define LOCK_THREAD_TIME()										\
71	leaq	THREAD_time_lock(%r12), %rdi;						\
72	call	acquire_spinlock;
73
74#define UNLOCK_THREAD_TIME()									\
75	leaq	THREAD_time_lock(%r12), %rdi;						\
76	call	release_spinlock;									\
77
78#define UPDATE_THREAD_USER_TIME()								\
79	LOCK_THREAD_TIME()											\
80																\
81	call	system_time;										\
82																\
83	/* Preserve system_time for post syscall debug */			\
84	movq	%rax, %r13;											\
85																\
86	/* thread->user_time += now - thread->last_time; */			\
87	subq	THREAD_last_time(%r12), %rax;						\
88	addq	%rax, THREAD_user_time(%r12);						\
89																\
90	/* thread->last_time = now; */								\
91	movq	%r13, THREAD_last_time(%r12);						\
92																\
93	/* thread->in_kernel = true; */								\
94	movb	$1, THREAD_in_kernel(%r12);							\
95																\
96	UNLOCK_THREAD_TIME()
97
98#define UPDATE_THREAD_KERNEL_TIME()								\
99	LOCK_THREAD_TIME()											\
100																\
101	call	system_time;										\
102	movq	%rax, %r13;											\
103																\
104	/* thread->kernel_time += now - thread->last_time; */		\
105	subq	THREAD_last_time(%r12), %rax;						\
106	addq	%rax, THREAD_kernel_time(%r12);						\
107																\
108	/* thread->last_time = now; */								\
109	movq	%r13, THREAD_last_time(%r12);						\
110																\
111	/* thread->in_kernel = false; */							\
112	movb	$0, THREAD_in_kernel(%r12);							\
113																\
114	UNLOCK_THREAD_TIME()
115
116#define STOP_USER_DEBUGGING()									\
117	testl	$(THREAD_FLAGS_BREAKPOINTS_INSTALLED				\
118			| THREAD_FLAGS_SINGLE_STEP), THREAD_flags(%r12);	\
119	jz		1f;													\
120	call	x86_exit_user_debug_at_kernel_entry;				\
121  1:
122
123#define CLEAR_FPU_STATE() \
124	pxor %xmm0, %xmm0; \
125	pxor %xmm1, %xmm1; \
126	pxor %xmm2, %xmm2; \
127	pxor %xmm3, %xmm3; \
128	pxor %xmm4, %xmm4; \
129	pxor %xmm5, %xmm5; \
130	pxor %xmm6, %xmm6; \
131	pxor %xmm7, %xmm7; \
132	pxor %xmm8, %xmm8; \
133	pxor %xmm9, %xmm9; \
134	pxor %xmm10, %xmm10; \
135	pxor %xmm11, %xmm11; \
136	pxor %xmm12, %xmm12; \
137	pxor %xmm13, %xmm13; \
138	pxor %xmm14, %xmm14; \
139	pxor %xmm15, %xmm15
140
141// The following code defines the interrupt service routines for all 256
142// interrupts. It creates a block of handlers, each 16 bytes, that the IDT
143// initialization code just loops through.
144
145// Interrupt with no error code, pushes a 0 error code.
146#define DEFINE_ISR(nr)					\
147	.align 16;							\
148	ASM_CLAC							\
149	push	$0;							\
150	push	$nr;						\
151	jmp		int_bottom;
152
153// Interrupt with an error code.
154#define DEFINE_ISR_E(nr)				\
155	.align 16;							\
156	ASM_CLAC							\
157	push	$nr;						\
158	jmp		int_bottom;
159
160// Array of interrupt service routines.
161.align 16
162SYMBOL(isr_array):
163	// Exceptions (0-19) and reserved interrupts (20-31).
164	DEFINE_ISR(0)
165	DEFINE_ISR(1)
166	DEFINE_ISR(2)
167	DEFINE_ISR(3)
168	DEFINE_ISR(4)
169	DEFINE_ISR(5)
170	DEFINE_ISR(6)
171	DEFINE_ISR(7)
172	DEFINE_ISR_E(8)
173	DEFINE_ISR(9)
174	DEFINE_ISR_E(10)
175	DEFINE_ISR_E(11)
176	DEFINE_ISR_E(12)
177	DEFINE_ISR_E(13)
178	DEFINE_ISR_E(14)
179	DEFINE_ISR(15)
180	DEFINE_ISR(16)
181	DEFINE_ISR_E(17)
182	DEFINE_ISR(18)
183	DEFINE_ISR(19)
184	DEFINE_ISR(20)
185	DEFINE_ISR(21)
186	DEFINE_ISR(22)
187	DEFINE_ISR(23)
188	DEFINE_ISR(24)
189	DEFINE_ISR(25)
190	DEFINE_ISR(26)
191	DEFINE_ISR(27)
192	DEFINE_ISR(28)
193	DEFINE_ISR(29)
194	DEFINE_ISR(30)
195	DEFINE_ISR(31)
196
197	// User-defined ISRs (32-255) - none take an error code.
198	.Lintr = 32
199	.rept 224
200		DEFINE_ISR(.Lintr)
201		.Lintr = .Lintr+1
202	.endr
203
204
205// Common interrupt handling code.
206STATIC_FUNCTION(int_bottom):
207	// Coming from user-mode requires special handling.
208	testl	$3, 24(%rsp)
209	jnz		int_bottom_user
210
211	// Push the rest of the interrupt frame to the stack.
212	PUSH_IFRAME_BOTTOM(IFRAME_TYPE_OTHER)
213
214	cld
215
216	// Frame pointer is the iframe.
217	movq	%rsp, %rbp
218
219	// Set the RF (resume flag) in RFLAGS. This prevents an instruction
220	// breakpoint on the instruction we're returning to to trigger a debug
221	// exception.
222	orq		$X86_EFLAGS_RESUME, IFRAME_flags(%rbp)
223
224	// xsave needs a 64-byte alignment
225	andq	$~63, %rsp
226	movq	(gFPUSaveLength), %rcx
227	subq	%rcx, %rsp
228	leaq	(%rsp), %rdi
229	shrq	$3, %rcx
230	movq	$0, %rax
231	rep stosq
232	movl	(gXsaveMask), %eax
233	movl	(gXsaveMask+4), %edx
234	movq	%rsp, %rdi
235	CODEPATCH_START
236	fxsaveq	(%rdi)
237	CODEPATCH_END(ALTCODEPATCH_TAG_XSAVE)
238
239	// Call the interrupt handler.
240	movq	%rbp, %rdi
241	movq	IFRAME_vector(%rbp), %rax
242	call	*gInterruptHandlerTable(, %rax, 8)
243
244	movl	(gXsaveMask), %eax
245	movl	(gXsaveMask+4), %edx
246	movq	%rsp, %rdi
247	CODEPATCH_START
248	fxrstorq	(%rdi)
249	CODEPATCH_END(ALTCODEPATCH_TAG_XRSTOR)
250	movq	%rbp, %rsp
251
252	// Restore the saved registers.
253	RESTORE_IFRAME()
254
255	iretq
256FUNCTION_END(int_bottom)
257
258
259// Handler for an interrupt that occurred in user-mode.
260STATIC_FUNCTION(int_bottom_user):
261	// Load the kernel GS segment base.
262	swapgs
263	lfence
264
265	// Push the rest of the interrupt frame to the stack.
266	PUSH_IFRAME_BOTTOM(IFRAME_TYPE_OTHER)
267	cld
268
269	// Frame pointer is the iframe.
270	movq	%rsp, %rbp
271
272	// xsave needs a 64-byte alignment
273	andq	$~63, %rsp
274	movq	(gFPUSaveLength), %rcx
275	subq	%rcx, %rsp
276	leaq	(%rsp), %rdi
277	shrq	$3, %rcx
278	movq	$0, %rax
279	rep stosq
280	movl	(gXsaveMask), %eax
281	movl	(gXsaveMask+4), %edx
282
283	movq	%rsp, %rdi
284	CODEPATCH_START
285	fxsaveq	(%rdi)
286	CODEPATCH_END(ALTCODEPATCH_TAG_XSAVE)
287
288	movq	%rsp, IFRAME_fpu(%rbp)
289
290	// Set the RF (resume flag) in RFLAGS. This prevents an instruction
291	// breakpoint on the instruction we're returning to to trigger a debug
292	// exception.
293	orq		$X86_EFLAGS_RESUME, IFRAME_flags(%rbp)
294
295	// Get thread pointer.
296	movq	%gs:0, %r12
297
298	STOP_USER_DEBUGGING()
299	UPDATE_THREAD_USER_TIME()
300
301	// Call the interrupt handler.
302	movq	%rbp, %rdi
303	movq	IFRAME_vector(%rbp), %rax
304	call	*gInterruptHandlerTable(, %rax, 8)
305
306	// If there are no signals pending or we're not debugging, we can avoid
307	// most of the work here, just need to update the kernel time.
308	testl	$(THREAD_FLAGS_DEBUGGER_INSTALLED | THREAD_FLAGS_SIGNALS_PENDING \
309			| THREAD_FLAGS_DEBUG_THREAD | THREAD_FLAGS_BREAKPOINTS_DEFINED \
310			| THREAD_FLAGS_TRAP_FOR_CORE_DUMP) \
311			, THREAD_flags(%r12)
312	jnz		.Lkernel_exit_work
313
314	cli
315
316	UPDATE_THREAD_KERNEL_TIME()
317
318	movl	(gXsaveMask), %eax
319	movl	(gXsaveMask+4), %edx
320	movq	%rsp, %rdi
321	CODEPATCH_START
322	fxrstorq	(%rdi)
323	CODEPATCH_END(ALTCODEPATCH_TAG_XRSTOR)
324	movq	%rbp, %rsp
325
326	// Restore the saved registers.
327	RESTORE_IFRAME()
328
329	// Restore the previous GS base and return.
330	swapgs
331	iretq
332
333.Lkernel_exit_work:
334	// Slow path for return to userland.
335
336	// Do we need to handle signals?
337	testl	$(THREAD_FLAGS_SIGNALS_PENDING | THREAD_FLAGS_DEBUG_THREAD \
338			| THREAD_FLAGS_TRAP_FOR_CORE_DUMP) \
339			, THREAD_flags(%r12)
340	jnz		.Lkernel_exit_handle_signals
341	cli
342	call	thread_at_kernel_exit_no_signals
343
344.Lkernel_exit_work_done:
345	// Install breakpoints, if defined.
346	testl	$THREAD_FLAGS_BREAKPOINTS_DEFINED, THREAD_flags(%r12)
347	jz		1f
348	movq	%rbp, %rdi
349	call	x86_init_user_debug_at_kernel_exit
3501:
351	movl	(gXsaveMask), %eax
352	movl	(gXsaveMask+4), %edx
353	movq	%rsp, %rdi
354	CODEPATCH_START
355	fxrstorq	(%rdi)
356	CODEPATCH_END(ALTCODEPATCH_TAG_XRSTOR)
357	movq	%rbp, %rsp
358
359	// Restore the saved registers.
360	RESTORE_IFRAME()
361
362	// Restore the previous GS base and return.
363	swapgs
364	iretq
365
366.Lkernel_exit_handle_signals:
367	// thread_at_kernel_exit requires interrupts to be enabled, it will disable
368	// them after.
369	sti
370	call	thread_at_kernel_exit
371	jmp		.Lkernel_exit_work_done
372FUNCTION_END(int_bottom_user)
373
374
375// SYSCALL entry point.
376FUNCTION(x86_64_syscall_entry):
377	// Upon entry, RSP still points at the user stack.  Load the kernel GS
378	// segment base address, which points at the current thread's arch_thread
379	// structure. This contains our kernel stack pointer and a temporary
380	// scratch space to store the user stack pointer in before we can push it
381	// to the stack.
382	swapgs
383	movq	%rsp, %gs:ARCH_THREAD_user_rsp
384	movq	%gs:ARCH_THREAD_syscall_rsp, %rsp
385
386	// The following pushes de-align the stack by 8 bytes, so account for that first.
387	sub 	$8, %rsp
388
389	// Set up an iframe on the stack (R11 = saved RFLAGS, RCX = saved RIP).
390	push	$USER_DATA_SELECTOR			// ss
391	push	%gs:ARCH_THREAD_user_rsp	// rsp
392	push	%r11						// flags
393	push	$USER_CODE_SELECTOR			// cs
394	push	%rcx						// ip
395	push	$0							// error_code
396	push	$99							// vector
397	PUSH_IFRAME_BOTTOM(IFRAME_TYPE_SYSCALL)
398
399	cld
400
401	// Frame pointer is the iframe.
402	movq	%rsp, %rbp
403
404	// Preserve call number (R14 is callee-save), get thread pointer.
405	movq	%rax, %r14
406	movq	%gs:0, %r12
407
408	STOP_USER_DEBUGGING()
409	UPDATE_THREAD_USER_TIME()
410
411	// No longer need interrupts disabled.
412	sti
413
414	// Check whether the syscall number is valid.
415	cmpq	$SYSCALL_COUNT, %r14
416	jae		.Lsyscall_return
417
418	// Get the system call table entry. Note I'm hardcoding the shift because
419	// sizeof(syscall_info) is 16 and scale factors of 16 aren't supported,
420	// so can't just do leaq kSyscallInfos(, %rax, SYSCALL_INFO_sizeof).
421	movq	%r14, %rax
422	shlq	$4, %rax
423	leaq	kSyscallInfos(, %rax, 1), %rax
424
425	// Check the number of call arguments, greater than 6 (6 * 8 = 48) requires
426	// a stack copy.
427	movq	SYSCALL_INFO_parameter_size(%rax), %rcx
428	cmpq	$48, %rcx
429	ja		.Lsyscall_stack_args
430
431.Lperform_syscall:
432	testl	$THREAD_FLAGS_DEBUGGER_INSTALLED, THREAD_flags(%r12)
433	jnz		.Lpre_syscall_debug
434
435.Lpre_syscall_debug_done:
436	// Restore the arguments from the iframe. UPDATE_THREAD_USER_TIME() makes
437	// 2 function calls which means they may have been overwritten. Note that
438	// argument 4 is in R10 on the frame rather than RCX as RCX is used by
439	// SYSCALL.
440	movq	IFRAME_di(%rbp), %rdi
441	movq	IFRAME_si(%rbp), %rsi
442	movq	IFRAME_dx(%rbp), %rdx
443	movq	IFRAME_r10(%rbp), %rcx
444	movq	IFRAME_r8(%rbp), %r8
445	movq	IFRAME_r9(%rbp), %r9
446
447	// TODO: pre-syscall tracing
448
449	// Call the function and save its return value.
450	call	*SYSCALL_INFO_function(%rax)
451	movq	%rax, IFRAME_ax(%rbp)
452
453	// TODO: post-syscall tracing
454
455.Lsyscall_return:
456	// Restore the original stack pointer and return.
457	movq	%rbp, %rsp
458
459	// Clear the restarted flag.
460	testl	$THREAD_FLAGS_SYSCALL_RESTARTED, THREAD_flags(%r12)
461	jz		2f
4621:
463	movl	THREAD_flags(%r12), %eax
464	movl	%eax, %edx
465	andl	$~THREAD_FLAGS_SYSCALL_RESTARTED, %edx
466	lock
467	cmpxchgl	%edx, THREAD_flags(%r12)
468	jnz		1b
4692:
470	testl	$(THREAD_FLAGS_DEBUGGER_INSTALLED | THREAD_FLAGS_SIGNALS_PENDING \
471			| THREAD_FLAGS_DEBUG_THREAD | THREAD_FLAGS_BREAKPOINTS_DEFINED \
472			| THREAD_FLAGS_TRAP_FOR_CORE_DUMP | THREAD_FLAGS_RESTART_SYSCALL) \
473			, THREAD_flags(%r12)
474	jnz		.Lpost_syscall_work
475
476	cli
477
478	UPDATE_THREAD_KERNEL_TIME()
479
480	// If we've just restored a signal frame, use the IRET path.
481	cmpq	$SYSCALL_RESTORE_SIGNAL_FRAME, %r14
482	je		.Lrestore_fpu
483
484	CLEAR_FPU_STATE()
485
486	// Restore the iframe and RCX/R11 for SYSRET.
487	RESTORE_IFRAME()
488	pop		%rcx
489	addq	$8, %rsp
490	pop		%r11
491	pop		%rsp
492
493	// Restore previous GS base and return.
494	swapgs
495	sysretq
496
497.Lpre_syscall_debug:
498	// user_debug_pre_syscall expects a pointer to a block of arguments, need
499	// to push the register arguments onto the stack.
500	push	IFRAME_r9(%rbp)
501	push	IFRAME_r8(%rbp)
502	push	IFRAME_r10(%rbp)
503	push	IFRAME_dx(%rbp)
504	push	IFRAME_si(%rbp)
505	push	IFRAME_di(%rbp)
506	movq	%r14, %rdi				// syscall number
507	movq	%rsp, %rsi
508	subq	$8, %rsp
509	push	%rax
510	call	user_debug_pre_syscall
511	pop		%rax
512	addq	$56, %rsp
513	jmp		.Lpre_syscall_debug_done
514
515.Lpost_syscall_work:
516	testl	$THREAD_FLAGS_DEBUGGER_INSTALLED, THREAD_flags(%r12)
517	jz		1f
518
519	// Post-syscall debugging. Same as above, need a block of arguments.
520	push	IFRAME_r9(%rbp)
521	push	IFRAME_r8(%rbp)
522	push	IFRAME_r10(%rbp)
523	push	IFRAME_dx(%rbp)
524	push	IFRAME_si(%rbp)
525	push	IFRAME_di(%rbp)
526	movq	%r14, %rdi				// syscall number
527	movq	%rsp, %rsi
528	movq	IFRAME_ax(%rbp), %rdx	// return value
529	movq	%r13, %rcx				// start time, preserved earlier
530	call	user_debug_post_syscall
531	addq	$48, %rsp
5321:
533	// Do we need to handle signals?
534	testl	$(THREAD_FLAGS_SIGNALS_PENDING | THREAD_FLAGS_DEBUG_THREAD \
535			| THREAD_FLAGS_TRAP_FOR_CORE_DUMP) \
536			, THREAD_flags(%r12)
537	jnz		.Lpost_syscall_handle_signals
538	cli
539	call	thread_at_kernel_exit_no_signals
540
541.Lpost_syscall_work_done:
542	// Handle syscall restarting.
543	testl	$THREAD_FLAGS_RESTART_SYSCALL, THREAD_flags(%r12)
544	jz		1f
545	movq	%rsp, %rdi
546	call	x86_restart_syscall
5471:
548	// Install breakpoints, if defined.
549	testl	$THREAD_FLAGS_BREAKPOINTS_DEFINED, THREAD_flags(%r12)
550	jz		1f
551	movq	%rbp, %rdi
552	call	x86_init_user_debug_at_kernel_exit
5531:
554	// On this return path it is possible that the frame has been modified,
555	// for example to execute a signal handler. In this case it is safer to
556	// return via IRET.
557	CLEAR_FPU_STATE()
558	jmp .Liret
559
560.Lrestore_fpu:
561	movq	IFRAME_fpu(%rbp), %rdi
562
563	movl	(gXsaveMask), %eax
564	movl	(gXsaveMask+4), %edx
565	CODEPATCH_START
566	fxrstorq	(%rdi)
567	CODEPATCH_END(ALTCODEPATCH_TAG_XRSTOR)
568.Liret:
569	// Restore the saved registers.
570	RESTORE_IFRAME()
571
572	// Restore the previous GS base and return.
573	swapgs
574	iretq
575
576.Lpost_syscall_handle_signals:
577	call	thread_at_kernel_exit
578	jmp		.Lpost_syscall_work_done
579
580.Lsyscall_stack_args:
581	// Some arguments are on the stack, work out what we need to copy. 6
582	// arguments (48 bytes) are already in registers.
583	// RAX = syscall table entry address, RCX = argument size.
584	subq	$48, %rcx
585
586	// Get the address to copy from.
587	movq	IFRAME_user_sp(%rbp), %rsi
588	addq	$8, %rsi
589	movabs	$(USER_BASE + USER_SIZE), %rdx
590	cmp		%rdx, %rsi
591	jae		.Lbad_syscall_args
592
593	// Make space on the stack.
594	subq	%rcx, %rsp
595	andq	$~15, %rsp
596	movq	%rsp, %rdi
597
598	// Set a fault handler.
599	movq	$.Lbad_syscall_args, THREAD_fault_handler(%r12)
600
601	ASM_STAC
602
603	// Copy them by quadwords.
604	shrq	$3, %rcx
605	rep
606	movsq
607	ASM_CLAC
608	movq	$0, THREAD_fault_handler(%r12)
609
610	// Perform the call.
611	jmp		.Lperform_syscall
612
613.Lbad_syscall_args:
614	movq	$0, THREAD_fault_handler(%r12)
615	movq	%rbp, %rsp
616	jmp		.Lsyscall_return
617FUNCTION_END(x86_64_syscall_entry)
618
619
620/*!	\fn void x86_return_to_userland(iframe* frame)
621	\brief Returns to the userland environment given by \a frame.
622
623	Before returning to userland all potentially necessary kernel exit work is
624	done.
625
626	\a frame must point to a location somewhere on the caller's stack (e.g. a
627	local variable).
628	The function must be called with interrupts disabled.
629
630	\param frame The iframe defining the userland environment.
631*/
632FUNCTION(x86_return_to_userland):
633	movq	%rdi, %rbp
634	movq	%rbp, %rsp
635
636	// Perform kernel exit work.
637	movq	%gs:0, %r12
638	testl	$(THREAD_FLAGS_DEBUGGER_INSTALLED | THREAD_FLAGS_SIGNALS_PENDING \
639			| THREAD_FLAGS_DEBUG_THREAD | THREAD_FLAGS_BREAKPOINTS_DEFINED \
640			| THREAD_FLAGS_TRAP_FOR_CORE_DUMP) \
641			, THREAD_flags(%r12)
642	jnz		.Luserland_return_work
643
644	// update the thread's kernel time and return
645	UPDATE_THREAD_KERNEL_TIME()
646
647	// Restore the frame and return.
648	RESTORE_IFRAME()
649	swapgs
650	iretq
651.Luserland_return_work:
652	// Slow path for return to userland.
653
654	// Do we need to handle signals?
655	testl	$(THREAD_FLAGS_SIGNALS_PENDING | THREAD_FLAGS_DEBUG_THREAD \
656			| THREAD_FLAGS_TRAP_FOR_CORE_DUMP) \
657			, THREAD_flags(%r12)
658	jnz		.Luserland_return_handle_signals
659	cli
660	call	thread_at_kernel_exit_no_signals
661
662.Luserland_return_work_done:
663	// Install breakpoints, if defined.
664	testl	$THREAD_FLAGS_BREAKPOINTS_DEFINED, THREAD_flags(%r12)
665	jz		1f
666	movq	%rbp, %rdi
667	call	x86_init_user_debug_at_kernel_exit
6681:
669	// Restore the saved registers.
670	RESTORE_IFRAME()
671
672	// Restore the previous GS base and return.
673	swapgs
674	iretq
675.Luserland_return_handle_signals:
676	// thread_at_kernel_exit requires interrupts to be enabled, it will disable
677	// them after.
678	sti
679	call	thread_at_kernel_exit
680	jmp		.Luserland_return_work_done
681FUNCTION_END(x86_return_to_userland)
682