dtrace_asm.S revision 1.3
1/* $NetBSD: dtrace_asm.S,v 1.3 2010/03/13 22:31:15 christos Exp $ */ 2 3/* 4 * CDDL HEADER START 5 * 6 * The contents of this file are subject to the terms of the 7 * Common Development and Distribution License, Version 1.0 only 8 * (the "License"). You may not use this file except in compliance 9 * with the License. 10 * 11 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE 12 * or http://www.opensolaris.org/os/licensing. 13 * See the License for the specific language governing permissions 14 * and limitations under the License. 15 * 16 * When distributing Covered Code, include this CDDL HEADER in each 17 * file and include the License file at usr/src/OPENSOLARIS.LICENSE. 18 * If applicable, add the following below this CDDL HEADER, with the 19 * fields enclosed by brackets "[]" replaced with your own identifying 20 * information: Portions Copyright [yyyy] [name of copyright owner] 21 * 22 * CDDL HEADER END 23 * 24 * $FreeBSD: src/sys/cddl/dev/dtrace/i386/dtrace_asm.S,v 1.1.4.1 2009/08/03 08:13:06 kensmith Exp $ 25 */ 26/* 27 * Copyright 2004 Sun Microsystems, Inc. All rights reserved. 28 * Use is subject to license terms. 29 */ 30 31#define _ASM 32 33#include <sys/cpuvar_defs.h> 34#include <sys/dtrace.h> 35#include <machine/asm.h> 36#include <machine/frameasm.h> 37 38 ENTRY(dtrace_invop_start) 39 40 pushl %eax /* push %eax -- may be return value */ 41 pushl %esp /* push stack pointer */ 42 addl $48, (%esp) /* adjust to incoming args */ 43 pushl 40(%esp) /* push calling EIP */ 44 45 /* 46 * Call dtrace_invop to let it check if the exception was 47 * a fbt one. The return value in %eax will tell us what 48 * dtrace_invop wants us to do. 49 */ 50 call dtrace_invop 51 52 /* 53 * We pushed 3 times for the arguments to dtrace_invop, 54 * so we need to increment the stack pointer to get rid of 55 * those values. 56 */ 57 addl $12, %esp 58// ALTENTRY(dtrace_invop_callsite) 59 cmpl $DTRACE_INVOP_PUSHL_EBP, %eax 60 je invop_push 61 cmpl $DTRACE_INVOP_POPL_EBP, %eax 62 je invop_pop 63 cmpl $DTRACE_INVOP_LEAVE, %eax 64 je invop_leave 65 cmpl $DTRACE_INVOP_NOP, %eax 66 je invop_nop 67 68 /* When all else fails handle the trap in the usual way. */ 69 jmpl *dtrace_invop_calltrap_addr 70 71invop_push: 72 /* 73 * We must emulate a "pushl %ebp". To do this, we pull the stack 74 * down 4 bytes, and then store the base pointer. 75 */ 76 popal 77 subl $4, %esp /* make room for %ebp */ 78 pushl %eax /* push temp */ 79 movl 8(%esp), %eax /* load calling EIP */ 80 incl %eax /* increment over LOCK prefix */ 81 movl %eax, 4(%esp) /* store calling EIP */ 82 movl 12(%esp), %eax /* load calling CS */ 83 movl %eax, 8(%esp) /* store calling CS */ 84 movl 16(%esp), %eax /* load calling EFLAGS */ 85 movl %eax, 12(%esp) /* store calling EFLAGS */ 86 movl %ebp, 16(%esp) /* push %ebp */ 87 popl %eax /* pop off temp */ 88 iret /* Return from interrupt. */ 89invop_pop: 90 /* 91 * We must emulate a "popl %ebp". To do this, we do the opposite of 92 * the above: we remove the %ebp from the stack, and squeeze up the 93 * saved state from the trap. 94 */ 95 popal 96 pushl %eax /* push temp */ 97 movl 16(%esp), %ebp /* pop %ebp */ 98 movl 12(%esp), %eax /* load calling EFLAGS */ 99 movl %eax, 16(%esp) /* store calling EFLAGS */ 100 movl 8(%esp), %eax /* load calling CS */ 101 movl %eax, 12(%esp) /* store calling CS */ 102 movl 4(%esp), %eax /* load calling EIP */ 103 incl %eax /* increment over LOCK prefix */ 104 movl %eax, 8(%esp) /* store calling EIP */ 105 popl %eax /* pop off temp */ 106 addl $4, %esp /* adjust stack pointer */ 107 iret /* Return from interrupt. */ 108invop_leave: 109 /* 110 * We must emulate a "leave", which is the same as a "movl %ebp, %esp" 111 * followed by a "popl %ebp". This looks similar to the above, but 112 * requires two temporaries: one for the new base pointer, and one 113 * for the staging register. 114 */ 115 popa 116 pushl %eax /* push temp */ 117 pushl %ebx /* push temp */ 118 movl %ebp, %ebx /* set temp to old %ebp */ 119 movl (%ebx), %ebp /* pop %ebp */ 120 movl 16(%esp), %eax /* load calling EFLAGS */ 121 movl %eax, (%ebx) /* store calling EFLAGS */ 122 movl 12(%esp), %eax /* load calling CS */ 123 movl %eax, -4(%ebx) /* store calling CS */ 124 movl 8(%esp), %eax /* load calling EIP */ 125 incl %eax /* increment over LOCK prefix */ 126 movl %eax, -8(%ebx) /* store calling EIP */ 127 movl %ebx, -4(%esp) /* temporarily store new %esp */ 128 popl %ebx /* pop off temp */ 129 popl %eax /* pop off temp */ 130 movl -12(%esp), %esp /* set stack pointer */ 131 subl $8, %esp /* adjust for three pushes, one pop */ 132 iret /* return from interrupt */ 133invop_nop: 134 /* 135 * We must emulate a "nop". This is obviously not hard: we need only 136 * advance the %eip by one. 137 */ 138 popa 139 incl (%esp) 140 iret /* return from interrupt */ 141 142 END(dtrace_invop_start) 143 144/* 145void dtrace_invop_init(void) 146*/ 147 ENTRY(dtrace_invop_init) 148 movl $dtrace_invop_start, dtrace_invop_jump_addr 149 ret 150 END(dtrace_invop_init) 151 152/* 153void dtrace_invop_uninit(void) 154*/ 155 ENTRY(dtrace_invop_uninit) 156 movl $0, dtrace_invop_jump_addr 157 ret 158 END(dtrace_invop_uninit) 159 160/* 161greg_t dtrace_getfp(void) 162*/ 163 164 ENTRY(dtrace_getfp) 165 movl %ebp, %eax 166 ret 167 END(dtrace_getfp) 168 169/* 170uint32_t dtrace_cas32(uint32_t *target, uint32_t cmp, uint32_t new) 171*/ 172 173 ENTRY(dtrace_cas32) 174 movl 4(%esp), %edx 175 movl 8(%esp), %eax 176 movl 12(%esp), %ecx 177 lock 178 cmpxchgl %ecx, (%edx) 179 ret 180 END(dtrace_cas32) 181 182/* 183uint32_t dtrace_casptr(uint32_t *target, uint32_t cmp, uint32_t new) 184*/ 185 186 ENTRY(dtrace_casptr) 187 movl 4(%esp), %edx 188 movl 8(%esp), %eax 189 movl 12(%esp), %ecx 190 lock 191 cmpxchgl %ecx, (%edx) 192 ret 193 END(dtrace_casptr) 194 195 196/* 197uintptr_t dtrace_caller(int aframes) 198*/ 199 200 ENTRY(dtrace_caller) 201 movl $-1, %eax 202 ret 203 END(dtrace_caller) 204 205/* 206void dtrace_copy(uintptr_t src, uintptr_t dest, size_t size) 207*/ 208 209 ENTRY(dtrace_copy) 210 pushl %ebp 211 movl %esp, %ebp 212 pushl %esi 213 pushl %edi 214 215 movl 8(%ebp), %esi /* Load source address */ 216 movl 12(%ebp), %edi /* Load destination address */ 217 movl 16(%ebp), %ecx /* Load count */ 218 repz /* Repeat for count... */ 219 smovb /* move from %ds:si to %es:di */ 220 221 popl %edi 222 popl %esi 223 movl %ebp, %esp 224 popl %ebp 225 ret 226 END(dtrace_copy) 227 228/* 229void dtrace_copystr(uintptr_t uaddr, uintptr_t kaddr, size_t size) 230*/ 231 232 ENTRY(dtrace_copystr) 233 234 pushl %ebp /* Setup stack frame */ 235 movl %esp, %ebp 236 pushl %ebx /* Save registers */ 237 238 movl 8(%ebp), %ebx /* Load source address */ 239 movl 12(%ebp), %edx /* Load destination address */ 240 movl 16(%ebp), %ecx /* Load count */ 241 2420: 243 movb (%ebx), %al /* Load from source */ 244 movb %al, (%edx) /* Store to destination */ 245 incl %ebx /* Increment source pointer */ 246 incl %edx /* Increment destination pointer */ 247 decl %ecx /* Decrement remaining count */ 248 cmpb $0, %al 249 je 1f 250 cmpl $0, %ecx 251 jne 0b 252 2531: 254 popl %ebx 255 movl %ebp, %esp 256 popl %ebp 257 ret 258 259 END(dtrace_copystr) 260 261/* 262uintptr_t dtrace_fulword(void *addr) 263*/ 264 265 ENTRY(dtrace_fulword) 266 movl 4(%esp), %ecx 267 xorl %eax, %eax 268 movl (%ecx), %eax 269 ret 270 END(dtrace_fulword) 271 272/* 273uint8_t dtrace_fuword8_nocheck(void *addr) 274*/ 275 276 ENTRY(dtrace_fuword8_nocheck) 277 movl 4(%esp), %ecx 278 xorl %eax, %eax 279 movzbl (%ecx), %eax 280 ret 281 END(dtrace_fuword8_nocheck) 282 283/* 284uint16_t dtrace_fuword16_nocheck(void *addr) 285*/ 286 287 ENTRY(dtrace_fuword16_nocheck) 288 movl 4(%esp), %ecx 289 xorl %eax, %eax 290 movzwl (%ecx), %eax 291 ret 292 END(dtrace_fuword16_nocheck) 293 294/* 295uint32_t dtrace_fuword32_nocheck(void *addr) 296*/ 297 298 ENTRY(dtrace_fuword32_nocheck) 299 movl 4(%esp), %ecx 300 xorl %eax, %eax 301 movl (%ecx), %eax 302 ret 303 END(dtrace_fuword32_nocheck) 304 305/* 306uint64_t dtrace_fuword64_nocheck(void *addr) 307*/ 308 309 ENTRY(dtrace_fuword64_nocheck) 310 movl 4(%esp), %ecx 311 xorl %eax, %eax 312 xorl %edx, %edx 313 movl (%ecx), %eax 314 movl 4(%ecx), %edx 315 ret 316 END(dtrace_fuword64_nocheck) 317 318/* 319void dtrace_probe_error(dtrace_state_t *state, dtrace_epid_t epid, int which, int fault, int fltoffs, uintptr_t illval) 320*/ 321 322 ENTRY(dtrace_probe_error) 323 pushl %ebp 324 movl %esp, %ebp 325 pushl 0x1c(%ebp) 326 pushl 0x18(%ebp) 327 pushl 0x14(%ebp) 328 pushl 0x10(%ebp) 329 pushl 0xc(%ebp) 330 pushl 0x8(%ebp) 331 pushl dtrace_probeid_error 332 call dtrace_probe 333 movl %ebp, %esp 334 popl %ebp 335 ret 336 END(dtrace_probe_error) 337 338/* 339void dtrace_membar_producer(void) 340*/ 341 342 ENTRY(dtrace_membar_producer) 343 rep; ret /* use 2 byte return instruction when branch target */ 344 /* AMD Software Optimization Guide - Section 6.2 */ 345 END(dtrace_membar_producer) 346 347/* 348void dtrace_membar_consumer(void) 349*/ 350 351 ENTRY(dtrace_membar_consumer) 352 rep; ret /* use 2 byte return instruction when branch target */ 353 /* AMD Software Optimization Guide - Section 6.2 */ 354 END(dtrace_membar_consumer) 355 356/* 357dtrace_icookie_t dtrace_interrupt_disable(void) 358*/ 359 ENTRY(dtrace_interrupt_disable) 360 pushfl 361 popl %eax 362 cli 363 ret 364 END(dtrace_interrupt_disable) 365 366/* 367void dtrace_interrupt_enable(dtrace_icookie_t cookie) 368*/ 369 ENTRY(dtrace_interrupt_enable) 370 movl 4(%esp), %eax 371 pushl %eax 372 popfl 373 ret 374 END(dtrace_interrupt_enable) 375 376/* 377 * The panic() and cmn_err() functions invoke vpanic() as a common entry point 378 * into the panic code implemented in panicsys(). vpanic() is responsible 379 * for passing through the format string and arguments, and constructing a 380 * regs structure on the stack into which it saves the current register 381 * values. If we are not dying due to a fatal trap, these registers will 382 * then be preserved in panicbuf as the current processor state. Before 383 * invoking panicsys(), vpanic() activates the first panic trigger (see 384 * common/os/panic.c) and switches to the panic_stack if successful. Note that 385 * DTrace takes a slightly different panic path if it must panic from probe 386 * context. Instead of calling panic, it calls into dtrace_vpanic(), which 387 * sets up the initial stack as vpanic does, calls dtrace_panic_trigger(), and 388 * branches back into vpanic(). 389 */ 390/* 391void vpanic(const char *format, va_list alist) 392*/ 393 ENTRY(vpanic) /* Initial stack layout: */ 394 395 pushl %ebp /* | %eip | 20 */ 396 movl %esp, %ebp /* | %ebp | 16 */ 397 pushl %eax /* | %eax | 12 */ 398 pushl %ebx /* | %ebx | 8 */ 399 pushl %ecx /* | %ecx | 4 */ 400 pushl %edx /* | %edx | 0 */ 401 402 movl %esp, %ebx /* %ebx = current stack pointer */ 403 404 lea panic_quiesce, %eax /* %eax = &panic_quiesce */ 405 pushl %eax /* push &panic_quiesce */ 406 call panic_trigger /* %eax = panic_trigger() */ 407 addl $4, %esp /* reset stack pointer */ 408 409vpanic_common: 410 cmpl $0, %eax /* if (%eax == 0) */ 411 je 0f /* goto 0f; */ 412 413 /* 414 * If panic_trigger() was successful, we are the first to initiate a 415 * panic: we now switch to the reserved panic_stack before continuing. 416 */ 417 lea panic_stack, %esp /* %esp = panic_stack */ 418 addl $PANICSTKSIZE, %esp /* %esp += PANICSTKSIZE */ 419 4200: subl $REGSIZE, %esp /* allocate struct regs */ 421 422 /* 423 * Now that we've got everything set up, store the register values as 424 * they were when we entered vpanic() to the designated location in 425 * the regs structure we allocated on the stack. 426 */ 427#ifdef notyet 428 mov %gs, %edx 429 mov %edx, REGOFF_GS(%esp) 430 mov %fs, %edx 431 mov %edx, REGOFF_FS(%esp) 432 mov %es, %edx 433 mov %edx, REGOFF_ES(%esp) 434 mov %ds, %edx 435 mov %edx, REGOFF_DS(%esp) 436 movl %edi, REGOFF_EDI(%esp) 437 movl %esi, REGOFF_ESI(%esp) 438 movl 16(%ebx), %ecx 439 movl %ecx, REGOFF_EBP(%esp) 440 movl %ebx, %ecx 441 addl $20, %ecx 442 movl %ecx, REGOFF_ESP(%esp) 443 movl 8(%ebx), %ecx 444 movl %ecx, REGOFF_EBX(%esp) 445 movl 0(%ebx), %ecx 446 movl %ecx, REGOFF_EDX(%esp) 447 movl 4(%ebx), %ecx 448 movl %ecx, REGOFF_ECX(%esp) 449 movl 12(%ebx), %ecx 450 movl %ecx, REGOFF_EAX(%esp) 451 movl $0, REGOFF_TRAPNO(%esp) 452 movl $0, REGOFF_ERR(%esp) 453 lea vpanic, %ecx 454 movl %ecx, REGOFF_EIP(%esp) 455 mov %cs, %edx 456 movl %edx, REGOFF_CS(%esp) 457 pushfl 458 popl %ecx 459 movl %ecx, REGOFF_EFL(%esp) 460 movl $0, REGOFF_UESP(%esp) 461 mov %ss, %edx 462 movl %edx, REGOFF_SS(%esp) 463 464 movl %esp, %ecx /* %ecx = ®s */ 465 pushl %eax /* push on_panic_stack */ 466 pushl %ecx /* push ®s */ 467 movl 12(%ebp), %ecx /* %ecx = alist */ 468 pushl %ecx /* push alist */ 469 movl 8(%ebp), %ecx /* %ecx = format */ 470 pushl %ecx /* push format */ 471 call panicsys /* panicsys(); */ 472 addl $16, %esp /* pop arguments */ 473 474 addl $REGSIZE, %esp 475#endif 476 popl %edx 477 popl %ecx 478 popl %ebx 479 popl %eax 480 leave 481 ret 482 END(vpanic) 483 484/* 485void dtrace_vpanic(const char *format, va_list alist) 486*/ 487 ENTRY(dtrace_vpanic) /* Initial stack layout: */ 488 489 pushl %ebp /* | %eip | 20 */ 490 movl %esp, %ebp /* | %ebp | 16 */ 491 pushl %eax /* | %eax | 12 */ 492 pushl %ebx /* | %ebx | 8 */ 493 pushl %ecx /* | %ecx | 4 */ 494 pushl %edx /* | %edx | 0 */ 495 496 movl %esp, %ebx /* %ebx = current stack pointer */ 497 498 lea panic_quiesce, %eax /* %eax = &panic_quiesce */ 499 pushl %eax /* push &panic_quiesce */ 500 call dtrace_panic_trigger /* %eax = dtrace_panic_trigger() */ 501 addl $4, %esp /* reset stack pointer */ 502 jmp vpanic_common /* jump back to common code */ 503 504 END(dtrace_vpanic) 505 506/* 507int 508panic_trigger(int *tp) 509*/ 510 ENTRY(panic_trigger) 511 xorl %eax, %eax 512 movl $0xdefacedd, %edx 513 lock 514 xchgl %edx, (%edi) 515 cmpl $0, %edx 516 je 0f 517 movl $0, %eax 518 ret 5190: movl $1, %eax 520 ret 521 END(panic_trigger) 522 523/* 524int 525dtrace_panic_trigger(int *tp) 526*/ 527 ENTRY(dtrace_panic_trigger) 528 xorl %eax, %eax 529 movl $0xdefacedd, %edx 530 lock 531 xchgl %edx, (%edi) 532 cmpl $0, %edx 533 je 0f 534 movl $0, %eax 535 ret 5360: movl $1, %eax 537 ret 538 END(dtrace_panic_trigger) 539