1/*
2 * CDDL HEADER START
3 *
4 * The contents of this file are subject to the terms of the
5 * Common Development and Distribution License, Version 1.0 only
6 * (the "License").  You may not use this file except in compliance
7 * with the License.
8 *
9 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
10 * or http://www.opensolaris.org/os/licensing.
11 * See the License for the specific language governing permissions
12 * and limitations under the License.
13 *
14 * When distributing Covered Code, include this CDDL HEADER in each
15 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
16 * If applicable, add the following below this CDDL HEADER, with the
17 * fields enclosed by brackets "[]" replaced with your own identifying
18 * information: Portions Copyright [yyyy] [name of copyright owner]
19 *
20 * CDDL HEADER END
21 *
22 * $FreeBSD$
23 */
24/*
25 * Copyright 2004 Sun Microsystems, Inc.  All rights reserved.
26 * Use is subject to license terms.
27 */
28
29#define _ASM
30
31#include <machine/asmacros.h>
32#include <sys/cpuvar_defs.h>
33#include <sys/dtrace.h>
34
35#include "assym.s"
36
37	.globl	calltrap
38	.type	calltrap,@function
39	ENTRY(dtrace_invop_start)
40
41	pushl	%eax			/* push %eax -- may be return value */
42	pushl	%esp			/* push stack pointer */
43	addl	$48, (%esp)		/* adjust to incoming args */
44	pushl	40(%esp)		/* push calling EIP */
45
46	/*
47	 * Call dtrace_invop to let it check if the exception was
48	 * a fbt one. The return value in %eax will tell us what
49	 * dtrace_invop wants us to do.
50	 */
51	call	dtrace_invop
52
53	/*
54	 * We pushed 3 times for the arguments to dtrace_invop,
55	 * so we need to increment the stack pointer to get rid of
56	 * those values.
57	 */
58	addl	$12, %esp
59	ALTENTRY(dtrace_invop_callsite)
60	cmpl	$DTRACE_INVOP_PUSHL_EBP, %eax
61	je	invop_push
62	cmpl	$DTRACE_INVOP_POPL_EBP, %eax
63	je	invop_pop
64	cmpl	$DTRACE_INVOP_LEAVE, %eax
65	je	invop_leave
66	cmpl	$DTRACE_INVOP_NOP, %eax
67	je	invop_nop
68
69	/* When all else fails handle the trap in the usual way. */
70	jmpl	*dtrace_invop_calltrap_addr
71
72invop_push:
73	/*
74	 * We must emulate a "pushl %ebp".  To do this, we pull the stack
75	 * down 4 bytes, and then store the base pointer.
76	 */
77	popal
78	subl	$4, %esp		/* make room for %ebp */
79	pushl	%eax			/* push temp */
80	movl	8(%esp), %eax		/* load calling EIP */
81	incl	%eax			/* increment over LOCK prefix */
82	movl	%eax, 4(%esp)		/* store calling EIP */
83	movl	12(%esp), %eax		/* load calling CS */
84	movl	%eax, 8(%esp)		/* store calling CS */
85	movl	16(%esp), %eax		/* load calling EFLAGS */
86	movl	%eax, 12(%esp)		/* store calling EFLAGS */
87	movl	%ebp, 16(%esp)		/* push %ebp */
88	popl	%eax			/* pop off temp */
89	iret				/* Return from interrupt. */
90invop_pop:
91	/*
92	 * We must emulate a "popl %ebp".  To do this, we do the opposite of
93	 * the above:  we remove the %ebp from the stack, and squeeze up the
94	 * saved state from the trap.
95	 */
96	popal
97	pushl	%eax			/* push temp */
98	movl	16(%esp), %ebp		/* pop %ebp */
99	movl	12(%esp), %eax		/* load calling EFLAGS */
100	movl	%eax, 16(%esp)		/* store calling EFLAGS */
101	movl	8(%esp), %eax		/* load calling CS */
102	movl	%eax, 12(%esp)		/* store calling CS */
103	movl	4(%esp), %eax		/* load calling EIP */
104	incl	%eax			/* increment over LOCK prefix */
105	movl	%eax, 8(%esp)		/* store calling EIP */
106	popl	%eax			/* pop off temp */
107	addl	$4, %esp		/* adjust stack pointer */
108	iret				/* Return from interrupt. */
109invop_leave:
110	/*
111	 * We must emulate a "leave", which is the same as a "movl %ebp, %esp"
112	 * followed by a "popl %ebp".  This looks similar to the above, but
113	 * requires two temporaries:  one for the new base pointer, and one
114	 * for the staging register.
115	 */
116	popa
117	pushl	%eax			/* push temp */
118	pushl	%ebx			/* push temp */
119	movl	%ebp, %ebx		/* set temp to old %ebp */
120	movl	(%ebx), %ebp		/* pop %ebp */
121	movl	16(%esp), %eax		/* load calling EFLAGS */
122	movl	%eax, (%ebx)		/* store calling EFLAGS */
123	movl	12(%esp), %eax		/* load calling CS */
124	movl	%eax, -4(%ebx)		/* store calling CS */
125	movl	8(%esp), %eax		/* load calling EIP */
126	incl	%eax			/* increment over LOCK prefix */
127	movl	%eax, -8(%ebx)		/* store calling EIP */
128	subl	$8, %ebx		/* adjust for three pushes, one pop */
129	movl	%ebx, 8(%esp)		/* temporarily store new %esp */
130	popl	%ebx			/* pop off temp */
131	popl	%eax			/* pop off temp */
132	movl	(%esp), %esp		/* set stack pointer */
133	iret				/* return from interrupt */
134invop_nop:
135	/*
136	 * We must emulate a "nop".  This is obviously not hard:  we need only
137	 * advance the %eip by one.
138	 */
139	popa
140	incl	(%esp)
141	iret				/* return from interrupt */
142
143	END(dtrace_invop_start)
144
145/*
146void dtrace_invop_init(void)
147*/
148	ENTRY(dtrace_invop_init)
149	movl	$dtrace_invop_start, dtrace_invop_jump_addr
150	ret
151	END(dtrace_invop_init)
152
153/*
154void dtrace_invop_uninit(void)
155*/
156	ENTRY(dtrace_invop_uninit)
157	movl	$0, dtrace_invop_jump_addr
158	ret
159	END(dtrace_invop_uninit)
160
161/*
162greg_t dtrace_getfp(void)
163*/
164
165	ENTRY(dtrace_getfp)
166	movl	%ebp, %eax
167	ret
168	END(dtrace_getfp)
169
170/*
171uint32_t dtrace_cas32(uint32_t *target, uint32_t cmp, uint32_t new)
172*/
173
174	ENTRY(dtrace_cas32)
175	ALTENTRY(dtrace_casptr)
176	movl	4(%esp), %edx
177	movl	8(%esp), %eax
178	movl	12(%esp), %ecx
179	lock
180	cmpxchgl %ecx, (%edx)
181	ret
182	END(dtrace_casptr)
183	END(dtrace_cas32)
184
185/*
186uintptr_t dtrace_caller(int aframes)
187*/
188
189	ENTRY(dtrace_caller)
190	movl	$-1, %eax
191	ret
192	END(dtrace_caller)
193
194/*
195void dtrace_copy(uintptr_t src, uintptr_t dest, size_t size)
196*/
197
198	ENTRY(dtrace_copy)
199	pushl	%ebp
200	movl	%esp, %ebp
201	pushl	%esi
202	pushl	%edi
203
204	movl	8(%ebp), %esi		/* Load source address */
205	movl	12(%ebp), %edi		/* Load destination address */
206	movl	16(%ebp), %ecx		/* Load count */
207	repz				/* Repeat for count... */
208	smovb				/*   move from %ds:si to %es:di */
209
210	popl	%edi
211	popl	%esi
212	movl	%ebp, %esp
213	popl	%ebp
214	ret
215	END(dtrace_copy)
216
217/*
218void dtrace_copystr(uintptr_t uaddr, uintptr_t kaddr, size_t size)
219*/
220
221	ENTRY(dtrace_copystr)
222
223	pushl	%ebp			/* Setup stack frame */
224	movl	%esp, %ebp
225	pushl	%ebx			/* Save registers */
226
227	movl	8(%ebp), %ebx		/* Load source address */
228	movl	12(%ebp), %edx		/* Load destination address */
229	movl	16(%ebp), %ecx		/* Load count */
230
2310:
232	movb	(%ebx), %al		/* Load from source */
233	movb	%al, (%edx)		/* Store to destination */
234	incl	%ebx			/* Increment source pointer */
235	incl	%edx			/* Increment destination pointer */
236	decl	%ecx			/* Decrement remaining count */
237	cmpb	$0, %al
238	je	1f
239	cmpl	$0, %ecx
240	jne	0b
241
2421:
243	popl	%ebx
244	movl	%ebp, %esp
245	popl	%ebp
246	ret
247
248	END(dtrace_copystr)
249
250/*
251uintptr_t dtrace_fulword(void *addr)
252*/
253
254	ENTRY(dtrace_fulword)
255	movl	4(%esp), %ecx
256	xorl	%eax, %eax
257	movl	(%ecx), %eax
258	ret
259	END(dtrace_fulword)
260
261/*
262uint8_t dtrace_fuword8_nocheck(void *addr)
263*/
264
265	ENTRY(dtrace_fuword8_nocheck)
266	movl	4(%esp), %ecx
267	xorl	%eax, %eax
268	movzbl	(%ecx), %eax
269	ret
270	END(dtrace_fuword8_nocheck)
271
272/*
273uint16_t dtrace_fuword16_nocheck(void *addr)
274*/
275
276	ENTRY(dtrace_fuword16_nocheck)
277	movl	4(%esp), %ecx
278	xorl	%eax, %eax
279	movzwl	(%ecx), %eax
280	ret
281	END(dtrace_fuword16_nocheck)
282
283/*
284uint32_t dtrace_fuword32_nocheck(void *addr)
285*/
286
287	ENTRY(dtrace_fuword32_nocheck)
288	movl	4(%esp), %ecx
289	xorl	%eax, %eax
290	movl	(%ecx), %eax
291	ret
292	END(dtrace_fuword32_nocheck)
293
294/*
295uint64_t dtrace_fuword64_nocheck(void *addr)
296*/
297
298	ENTRY(dtrace_fuword64_nocheck)
299	movl	4(%esp), %ecx
300	xorl	%eax, %eax
301	xorl	%edx, %edx
302	movl	(%ecx), %eax
303	movl	4(%ecx), %edx
304	ret
305	END(dtrace_fuword64_nocheck)
306
307/*
308void dtrace_probe_error(dtrace_state_t *state, dtrace_epid_t epid, int which, int fault, int fltoffs, uintptr_t illval)
309*/
310
311	ENTRY(dtrace_probe_error)
312	pushl	%ebp
313	movl	%esp, %ebp
314	pushl	0x1c(%ebp)
315	pushl	0x18(%ebp)
316	pushl	0x14(%ebp)
317	pushl	0x10(%ebp)
318	pushl	0xc(%ebp)
319	pushl	0x8(%ebp)
320	pushl	dtrace_probeid_error
321	call	dtrace_probe
322	movl	%ebp, %esp
323	popl	%ebp
324	ret
325	END(dtrace_probe_error)
326
327/*
328void dtrace_membar_producer(void)
329*/
330
331	ENTRY(dtrace_membar_producer)
332	rep;	ret	/* use 2 byte return instruction when branch target */
333			/* AMD Software Optimization Guide - Section 6.2 */
334	END(dtrace_membar_producer)
335
336/*
337void dtrace_membar_consumer(void)
338*/
339
340	ENTRY(dtrace_membar_consumer)
341	rep;	ret	/* use 2 byte return instruction when branch target */
342			/* AMD Software Optimization Guide - Section 6.2 */
343	END(dtrace_membar_consumer)
344
345/*
346dtrace_icookie_t dtrace_interrupt_disable(void)
347*/
348	ENTRY(dtrace_interrupt_disable)
349	pushfl
350	popl	%eax
351	cli
352	ret
353	END(dtrace_interrupt_disable)
354
355/*
356void dtrace_interrupt_enable(dtrace_icookie_t cookie)
357*/
358	ENTRY(dtrace_interrupt_enable)
359	movl	4(%esp), %eax
360	pushl	%eax
361	popfl
362	ret
363	END(dtrace_interrupt_enable)
364
365/*
366 * The panic() and cmn_err() functions invoke vpanic() as a common entry point
367 * into the panic code implemented in panicsys().  vpanic() is responsible
368 * for passing through the format string and arguments, and constructing a
369 * regs structure on the stack into which it saves the current register
370 * values.  If we are not dying due to a fatal trap, these registers will
371 * then be preserved in panicbuf as the current processor state.  Before
372 * invoking panicsys(), vpanic() activates the first panic trigger (see
373 * common/os/panic.c) and switches to the panic_stack if successful.  Note that
374 * DTrace takes a slightly different panic path if it must panic from probe
375 * context.  Instead of calling panic, it calls into dtrace_vpanic(), which
376 * sets up the initial stack as vpanic does, calls dtrace_panic_trigger(), and
377 * branches back into vpanic().
378 */
379/*
380void vpanic(const char *format, va_list alist)
381*/
382	ENTRY(vpanic)				/* Initial stack layout: */
383
384	pushl	%ebp				/* | %eip | 20 */
385	movl	%esp, %ebp			/* | %ebp | 16 */
386	pushl	%eax				/* | %eax | 12 */
387	pushl	%ebx				/* | %ebx |  8 */
388	pushl	%ecx				/* | %ecx |  4 */
389	pushl	%edx				/* | %edx |  0 */
390
391	movl	%esp, %ebx			/* %ebx = current stack pointer */
392
393	lea	panic_quiesce, %eax		/* %eax = &panic_quiesce */
394	pushl	%eax				/* push &panic_quiesce */
395	call	panic_trigger			/* %eax = panic_trigger() */
396	addl	$4, %esp			/* reset stack pointer */
397
398vpanic_common:
399	cmpl	$0, %eax			/* if (%eax == 0) */
400	je	0f				/*   goto 0f; */
401
402	/*
403	 * If panic_trigger() was successful, we are the first to initiate a
404	 * panic: we now switch to the reserved panic_stack before continuing.
405	 */
406	lea	panic_stack, %esp		/* %esp  = panic_stack */
407	addl	$PANICSTKSIZE, %esp		/* %esp += PANICSTKSIZE */
408
4090:	subl	$REGSIZE, %esp			/* allocate struct regs */
410
411	/*
412	 * Now that we've got everything set up, store the register values as
413	 * they were when we entered vpanic() to the designated location in
414	 * the regs structure we allocated on the stack.
415	 */
416#ifdef notyet
417	mov	%gs, %edx
418	mov	%edx, REGOFF_GS(%esp)
419	mov	%fs, %edx
420	mov	%edx, REGOFF_FS(%esp)
421	mov	%es, %edx
422	mov	%edx, REGOFF_ES(%esp)
423	mov	%ds, %edx
424	mov	%edx, REGOFF_DS(%esp)
425	movl	%edi, REGOFF_EDI(%esp)
426	movl	%esi, REGOFF_ESI(%esp)
427	movl	16(%ebx), %ecx
428	movl	%ecx, REGOFF_EBP(%esp)
429	movl	%ebx, %ecx
430	addl	$20, %ecx
431	movl	%ecx, REGOFF_ESP(%esp)
432	movl	8(%ebx), %ecx
433	movl	%ecx, REGOFF_EBX(%esp)
434	movl	0(%ebx), %ecx
435	movl	%ecx, REGOFF_EDX(%esp)
436	movl	4(%ebx), %ecx
437	movl	%ecx, REGOFF_ECX(%esp)
438	movl	12(%ebx), %ecx
439	movl	%ecx, REGOFF_EAX(%esp)
440	movl	$0, REGOFF_TRAPNO(%esp)
441	movl	$0, REGOFF_ERR(%esp)
442	lea	vpanic, %ecx
443	movl	%ecx, REGOFF_EIP(%esp)
444	mov	%cs, %edx
445	movl	%edx, REGOFF_CS(%esp)
446	pushfl
447	popl	%ecx
448	movl	%ecx, REGOFF_EFL(%esp)
449	movl	$0, REGOFF_UESP(%esp)
450	mov	%ss, %edx
451	movl	%edx, REGOFF_SS(%esp)
452
453	movl	%esp, %ecx			/* %ecx = &regs */
454	pushl	%eax				/* push on_panic_stack */
455	pushl	%ecx				/* push &regs */
456	movl	12(%ebp), %ecx			/* %ecx = alist */
457	pushl	%ecx				/* push alist */
458	movl	8(%ebp), %ecx			/* %ecx = format */
459	pushl	%ecx				/* push format */
460	call	panicsys			/* panicsys(); */
461	addl	$16, %esp			/* pop arguments */
462
463	addl	$REGSIZE, %esp
464#endif
465	popl	%edx
466	popl	%ecx
467	popl	%ebx
468	popl	%eax
469	leave
470	ret
471	END(vpanic)
472
473/*
474void dtrace_vpanic(const char *format, va_list alist)
475*/
476	ENTRY(dtrace_vpanic)			/* Initial stack layout: */
477
478	pushl	%ebp				/* | %eip | 20 */
479	movl	%esp, %ebp			/* | %ebp | 16 */
480	pushl	%eax				/* | %eax | 12 */
481	pushl	%ebx				/* | %ebx |  8 */
482	pushl	%ecx				/* | %ecx |  4 */
483	pushl	%edx				/* | %edx |  0 */
484
485	movl	%esp, %ebx			/* %ebx = current stack pointer */
486
487	lea	panic_quiesce, %eax		/* %eax = &panic_quiesce */
488	pushl	%eax				/* push &panic_quiesce */
489	call	dtrace_panic_trigger		/* %eax = dtrace_panic_trigger() */
490	addl	$4, %esp			/* reset stack pointer */
491	jmp	vpanic_common			/* jump back to common code */
492
493	END(dtrace_vpanic)
494
495/*
496int
497panic_trigger(int *tp)
498*/
499	ENTRY(panic_trigger)
500	xorl	%eax, %eax
501	movl	$0xdefacedd, %edx
502	lock
503	  xchgl	%edx, (%edi)
504	cmpl	$0, %edx
505	je	0f
506	movl	$0, %eax
507	ret
5080:	movl	$1, %eax
509	ret
510	END(panic_trigger)
511
512/*
513int
514dtrace_panic_trigger(int *tp)
515*/
516	ENTRY(dtrace_panic_trigger)
517	xorl	%eax, %eax
518	movl	$0xdefacedd, %edx
519	lock
520	  xchgl	%edx, (%edi)
521	cmpl	$0, %edx
522	je	0f
523	movl	$0, %eax
524	ret
5250:	movl	$1, %eax
526	ret
527	END(dtrace_panic_trigger)
528