dtrace_asm.S revision 1.4
1/* $NetBSD: dtrace_asm.S,v 1.4 2016/05/14 21:19:05 chs Exp $ */ 2 3/* 4 * CDDL HEADER START 5 * 6 * The contents of this file are subject to the terms of the 7 * Common Development and Distribution License, Version 1.0 only 8 * (the "License"). You may not use this file except in compliance 9 * with the License. 10 * 11 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE 12 * or http://www.opensolaris.org/os/licensing. 13 * See the License for the specific language governing permissions 14 * and limitations under the License. 15 * 16 * When distributing Covered Code, include this CDDL HEADER in each 17 * file and include the License file at usr/src/OPENSOLARIS.LICENSE. 18 * If applicable, add the following below this CDDL HEADER, with the 19 * fields enclosed by brackets "[]" replaced with your own identifying 20 * information: Portions Copyright [yyyy] [name of copyright owner] 21 * 22 * CDDL HEADER END 23 * 24 * $FreeBSD: src/sys/cddl/dev/dtrace/i386/dtrace_asm.S,v 1.1.4.1 2009/08/03 08:13:06 kensmith Exp $ 25 */ 26/* 27 * Copyright 2004 Sun Microsystems, Inc. All rights reserved. 28 * Use is subject to license terms. 29 */ 30 31#define _ASM 32 33#include <sys/cpuvar_defs.h> 34#include <sys/dtrace.h> 35#include <machine/asm.h> 36#include <machine/frameasm.h> 37 38 ENTRY(dtrace_invop_start) 39 40 pushl %eax /* push %eax -- may be return value */ 41 pushl %esp /* push stack pointer */ 42 addl $48, (%esp) /* adjust to incoming args */ 43 pushl 40(%esp) /* push calling EIP */ 44 45 /* 46 * Call dtrace_invop to let it check if the exception was 47 * a fbt one. The return value in %eax will tell us what 48 * dtrace_invop wants us to do. 49 */ 50 call dtrace_invop 51 ALTENTRY(dtrace_invop_callsite) 52 addl $12, %esp 53 cmpl $DTRACE_INVOP_PUSHL_EBP, %eax 54 je invop_push 55 cmpl $DTRACE_INVOP_POPL_EBP, %eax 56 je invop_pop 57 cmpl $DTRACE_INVOP_LEAVE, %eax 58 je invop_leave 59 cmpl $DTRACE_INVOP_NOP, %eax 60 je invop_nop 61 62 /* When all else fails handle the trap in the usual way. */ 63 jmpl *dtrace_invop_calltrap_addr 64 65invop_push: 66 /* 67 * We must emulate a "pushl %ebp". To do this, we pull the stack 68 * down 4 bytes, and then store the base pointer. 69 */ 70 popal 71 subl $4, %esp /* make room for %ebp */ 72 pushl %eax /* push temp */ 73 movl 8(%esp), %eax /* load calling EIP */ 74 incl %eax /* increment over LOCK prefix */ 75 movl %eax, 4(%esp) /* store calling EIP */ 76 movl 12(%esp), %eax /* load calling CS */ 77 movl %eax, 8(%esp) /* store calling CS */ 78 movl 16(%esp), %eax /* load calling EFLAGS */ 79 movl %eax, 12(%esp) /* store calling EFLAGS */ 80 movl %ebp, 16(%esp) /* push %ebp */ 81 popl %eax /* pop off temp */ 82 iret /* Return from interrupt. */ 83invop_pop: 84 /* 85 * We must emulate a "popl %ebp". To do this, we do the opposite of 86 * the above: we remove the %ebp from the stack, and squeeze up the 87 * saved state from the trap. 88 */ 89 popal 90 pushl %eax /* push temp */ 91 movl 16(%esp), %ebp /* pop %ebp */ 92 movl 12(%esp), %eax /* load calling EFLAGS */ 93 movl %eax, 16(%esp) /* store calling EFLAGS */ 94 movl 8(%esp), %eax /* load calling CS */ 95 movl %eax, 12(%esp) /* store calling CS */ 96 movl 4(%esp), %eax /* load calling EIP */ 97 incl %eax /* increment over LOCK prefix */ 98 movl %eax, 8(%esp) /* store calling EIP */ 99 popl %eax /* pop off temp */ 100 addl $4, %esp /* adjust stack pointer */ 101 iret /* Return from interrupt. */ 102invop_leave: 103 /* 104 * We must emulate a "leave", which is the same as a "movl %ebp, %esp" 105 * followed by a "popl %ebp". This looks similar to the above, but 106 * requires two temporaries: one for the new base pointer, and one 107 * for the staging register. 108 */ 109 popa 110 pushl %eax /* push temp */ 111 pushl %ebx /* push temp */ 112 movl %ebp, %ebx /* set temp to old %ebp */ 113 movl (%ebx), %ebp /* pop %ebp */ 114 movl 16(%esp), %eax /* load calling EFLAGS */ 115 movl %eax, (%ebx) /* store calling EFLAGS */ 116 movl 12(%esp), %eax /* load calling CS */ 117 movl %eax, -4(%ebx) /* store calling CS */ 118 movl 8(%esp), %eax /* load calling EIP */ 119 incl %eax /* increment over LOCK prefix */ 120 movl %eax, -8(%ebx) /* store calling EIP */ 121 movl %ebx, -4(%esp) /* temporarily store new %esp */ 122 popl %ebx /* pop off temp */ 123 popl %eax /* pop off temp */ 124 movl -12(%esp), %esp /* set stack pointer */ 125 subl $8, %esp /* adjust for three pushes, one pop */ 126 iret /* return from interrupt */ 127invop_nop: 128 /* 129 * We must emulate a "nop". This is obviously not hard: we need only 130 * advance the %eip by one. 131 */ 132 popa 133 incl (%esp) 134 iret /* return from interrupt */ 135 136 END(dtrace_invop_start) 137 138/* 139void dtrace_invop_init(void) 140*/ 141 ENTRY(dtrace_invop_init) 142 movl $dtrace_invop_start, dtrace_invop_jump_addr 143 ret 144 END(dtrace_invop_init) 145 146/* 147void dtrace_invop_uninit(void) 148*/ 149 ENTRY(dtrace_invop_uninit) 150 movl $0, dtrace_invop_jump_addr 151 ret 152 END(dtrace_invop_uninit) 153 154/* 155greg_t dtrace_getfp(void) 156*/ 157 158 ENTRY(dtrace_getfp) 159 movl %ebp, %eax 160 ret 161 END(dtrace_getfp) 162 163/* 164uint32_t dtrace_cas32(uint32_t *target, uint32_t cmp, uint32_t new) 165*/ 166 167 ENTRY(dtrace_cas32) 168 movl 4(%esp), %edx 169 movl 8(%esp), %eax 170 movl 12(%esp), %ecx 171 lock 172 cmpxchgl %ecx, (%edx) 173 ret 174 END(dtrace_cas32) 175 176/* 177uint32_t dtrace_casptr(uint32_t *target, uint32_t cmp, uint32_t new) 178*/ 179 180 ENTRY(dtrace_casptr) 181 movl 4(%esp), %edx 182 movl 8(%esp), %eax 183 movl 12(%esp), %ecx 184 lock 185 cmpxchgl %ecx, (%edx) 186 ret 187 END(dtrace_casptr) 188 189 190/* 191uintptr_t dtrace_caller(int aframes) 192*/ 193 194 ENTRY(dtrace_caller) 195 movl $-1, %eax 196 ret 197 END(dtrace_caller) 198 199/* 200void dtrace_copy(uintptr_t src, uintptr_t dest, size_t size) 201*/ 202 203 ENTRY(dtrace_copy) 204 pushl %ebp 205 movl %esp, %ebp 206 pushl %esi 207 pushl %edi 208 209 movl 8(%ebp), %esi /* Load source address */ 210 movl 12(%ebp), %edi /* Load destination address */ 211 movl 16(%ebp), %ecx /* Load count */ 212 repz /* Repeat for count... */ 213 smovb /* move from %ds:si to %es:di */ 214 215 popl %edi 216 popl %esi 217 movl %ebp, %esp 218 popl %ebp 219 ret 220 END(dtrace_copy) 221 222/* 223void dtrace_copystr(uintptr_t uaddr, uintptr_t kaddr, size_t size) 224*/ 225 226 ENTRY(dtrace_copystr) 227 228 pushl %ebp /* Setup stack frame */ 229 movl %esp, %ebp 230 pushl %ebx /* Save registers */ 231 232 movl 8(%ebp), %ebx /* Load source address */ 233 movl 12(%ebp), %edx /* Load destination address */ 234 movl 16(%ebp), %ecx /* Load count */ 235 2360: 237 movb (%ebx), %al /* Load from source */ 238 movb %al, (%edx) /* Store to destination */ 239 incl %ebx /* Increment source pointer */ 240 incl %edx /* Increment destination pointer */ 241 decl %ecx /* Decrement remaining count */ 242 cmpb $0, %al 243 je 1f 244 cmpl $0, %ecx 245 jne 0b 246 2471: 248 popl %ebx 249 movl %ebp, %esp 250 popl %ebp 251 ret 252 253 END(dtrace_copystr) 254 255/* 256uintptr_t dtrace_fulword(void *addr) 257*/ 258 259 ENTRY(dtrace_fulword) 260 movl 4(%esp), %ecx 261 xorl %eax, %eax 262 movl (%ecx), %eax 263 ret 264 END(dtrace_fulword) 265 266/* 267uint8_t dtrace_fuword8_nocheck(void *addr) 268*/ 269 270 ENTRY(dtrace_fuword8_nocheck) 271 movl 4(%esp), %ecx 272 xorl %eax, %eax 273 movzbl (%ecx), %eax 274 ret 275 END(dtrace_fuword8_nocheck) 276 277/* 278uint16_t dtrace_fuword16_nocheck(void *addr) 279*/ 280 281 ENTRY(dtrace_fuword16_nocheck) 282 movl 4(%esp), %ecx 283 xorl %eax, %eax 284 movzwl (%ecx), %eax 285 ret 286 END(dtrace_fuword16_nocheck) 287 288/* 289uint32_t dtrace_fuword32_nocheck(void *addr) 290*/ 291 292 ENTRY(dtrace_fuword32_nocheck) 293 movl 4(%esp), %ecx 294 xorl %eax, %eax 295 movl (%ecx), %eax 296 ret 297 END(dtrace_fuword32_nocheck) 298 299/* 300uint64_t dtrace_fuword64_nocheck(void *addr) 301*/ 302 303 ENTRY(dtrace_fuword64_nocheck) 304 movl 4(%esp), %ecx 305 xorl %eax, %eax 306 xorl %edx, %edx 307 movl (%ecx), %eax 308 movl 4(%ecx), %edx 309 ret 310 END(dtrace_fuword64_nocheck) 311 312/* 313void dtrace_probe_error(dtrace_state_t *state, dtrace_epid_t epid, int which, int fault, int fltoffs, uintptr_t illval) 314*/ 315 316 ENTRY(dtrace_probe_error) 317 pushl %ebp 318 movl %esp, %ebp 319 pushl 0x1c(%ebp) 320 pushl 0x18(%ebp) 321 pushl 0x14(%ebp) 322 pushl 0x10(%ebp) 323 pushl 0xc(%ebp) 324 pushl 0x8(%ebp) 325 pushl dtrace_probeid_error 326 call dtrace_probe 327 movl %ebp, %esp 328 popl %ebp 329 ret 330 END(dtrace_probe_error) 331 332/* 333void dtrace_membar_producer(void) 334*/ 335 336 ENTRY(dtrace_membar_producer) 337 rep; ret /* use 2 byte return instruction when branch target */ 338 /* AMD Software Optimization Guide - Section 6.2 */ 339 END(dtrace_membar_producer) 340 341/* 342void dtrace_membar_consumer(void) 343*/ 344 345 ENTRY(dtrace_membar_consumer) 346 rep; ret /* use 2 byte return instruction when branch target */ 347 /* AMD Software Optimization Guide - Section 6.2 */ 348 END(dtrace_membar_consumer) 349 350/* 351dtrace_icookie_t dtrace_interrupt_disable(void) 352*/ 353 ENTRY(dtrace_interrupt_disable) 354 pushfl 355 popl %eax 356 cli 357 ret 358 END(dtrace_interrupt_disable) 359 360/* 361void dtrace_interrupt_enable(dtrace_icookie_t cookie) 362*/ 363 ENTRY(dtrace_interrupt_enable) 364 movl 4(%esp), %eax 365 pushl %eax 366 popfl 367 ret 368 END(dtrace_interrupt_enable) 369 370/* 371 * The panic() and cmn_err() functions invoke vpanic() as a common entry point 372 * into the panic code implemented in panicsys(). vpanic() is responsible 373 * for passing through the format string and arguments, and constructing a 374 * regs structure on the stack into which it saves the current register 375 * values. If we are not dying due to a fatal trap, these registers will 376 * then be preserved in panicbuf as the current processor state. Before 377 * invoking panicsys(), vpanic() activates the first panic trigger (see 378 * common/os/panic.c) and switches to the panic_stack if successful. Note that 379 * DTrace takes a slightly different panic path if it must panic from probe 380 * context. Instead of calling panic, it calls into dtrace_vpanic(), which 381 * sets up the initial stack as vpanic does, calls dtrace_panic_trigger(), and 382 * branches back into vpanic(). 383 */ 384/* 385void vpanic(const char *format, va_list alist) 386*/ 387 ENTRY(vpanic) /* Initial stack layout: */ 388 389 pushl %ebp /* | %eip | 20 */ 390 movl %esp, %ebp /* | %ebp | 16 */ 391 pushl %eax /* | %eax | 12 */ 392 pushl %ebx /* | %ebx | 8 */ 393 pushl %ecx /* | %ecx | 4 */ 394 pushl %edx /* | %edx | 0 */ 395 396 movl %esp, %ebx /* %ebx = current stack pointer */ 397 398 lea panic_quiesce, %eax /* %eax = &panic_quiesce */ 399 pushl %eax /* push &panic_quiesce */ 400 call panic_trigger /* %eax = panic_trigger() */ 401 addl $4, %esp /* reset stack pointer */ 402 403vpanic_common: 404 cmpl $0, %eax /* if (%eax == 0) */ 405 je 0f /* goto 0f; */ 406 407 /* 408 * If panic_trigger() was successful, we are the first to initiate a 409 * panic: we now switch to the reserved panic_stack before continuing. 410 */ 411 lea panic_stack, %esp /* %esp = panic_stack */ 412 addl $PANICSTKSIZE, %esp /* %esp += PANICSTKSIZE */ 413 4140: subl $REGSIZE, %esp /* allocate struct regs */ 415 416 /* 417 * Now that we've got everything set up, store the register values as 418 * they were when we entered vpanic() to the designated location in 419 * the regs structure we allocated on the stack. 420 */ 421#ifdef notyet 422 mov %gs, %edx 423 mov %edx, REGOFF_GS(%esp) 424 mov %fs, %edx 425 mov %edx, REGOFF_FS(%esp) 426 mov %es, %edx 427 mov %edx, REGOFF_ES(%esp) 428 mov %ds, %edx 429 mov %edx, REGOFF_DS(%esp) 430 movl %edi, REGOFF_EDI(%esp) 431 movl %esi, REGOFF_ESI(%esp) 432 movl 16(%ebx), %ecx 433 movl %ecx, REGOFF_EBP(%esp) 434 movl %ebx, %ecx 435 addl $20, %ecx 436 movl %ecx, REGOFF_ESP(%esp) 437 movl 8(%ebx), %ecx 438 movl %ecx, REGOFF_EBX(%esp) 439 movl 0(%ebx), %ecx 440 movl %ecx, REGOFF_EDX(%esp) 441 movl 4(%ebx), %ecx 442 movl %ecx, REGOFF_ECX(%esp) 443 movl 12(%ebx), %ecx 444 movl %ecx, REGOFF_EAX(%esp) 445 movl $0, REGOFF_TRAPNO(%esp) 446 movl $0, REGOFF_ERR(%esp) 447 lea vpanic, %ecx 448 movl %ecx, REGOFF_EIP(%esp) 449 mov %cs, %edx 450 movl %edx, REGOFF_CS(%esp) 451 pushfl 452 popl %ecx 453 movl %ecx, REGOFF_EFL(%esp) 454 movl $0, REGOFF_UESP(%esp) 455 mov %ss, %edx 456 movl %edx, REGOFF_SS(%esp) 457 458 movl %esp, %ecx /* %ecx = ®s */ 459 pushl %eax /* push on_panic_stack */ 460 pushl %ecx /* push ®s */ 461 movl 12(%ebp), %ecx /* %ecx = alist */ 462 pushl %ecx /* push alist */ 463 movl 8(%ebp), %ecx /* %ecx = format */ 464 pushl %ecx /* push format */ 465 call panicsys /* panicsys(); */ 466 addl $16, %esp /* pop arguments */ 467 468 addl $REGSIZE, %esp 469#endif 470 popl %edx 471 popl %ecx 472 popl %ebx 473 popl %eax 474 leave 475 ret 476 END(vpanic) 477 478/* 479void dtrace_vpanic(const char *format, va_list alist) 480*/ 481 ENTRY(dtrace_vpanic) /* Initial stack layout: */ 482 483 pushl %ebp /* | %eip | 20 */ 484 movl %esp, %ebp /* | %ebp | 16 */ 485 pushl %eax /* | %eax | 12 */ 486 pushl %ebx /* | %ebx | 8 */ 487 pushl %ecx /* | %ecx | 4 */ 488 pushl %edx /* | %edx | 0 */ 489 490 movl %esp, %ebx /* %ebx = current stack pointer */ 491 492 lea panic_quiesce, %eax /* %eax = &panic_quiesce */ 493 pushl %eax /* push &panic_quiesce */ 494 call dtrace_panic_trigger /* %eax = dtrace_panic_trigger() */ 495 addl $4, %esp /* reset stack pointer */ 496 jmp vpanic_common /* jump back to common code */ 497 498 END(dtrace_vpanic) 499 500/* 501int 502panic_trigger(int *tp) 503*/ 504 ENTRY(panic_trigger) 505 xorl %eax, %eax 506 movl $0xdefacedd, %edx 507 lock 508 xchgl %edx, (%edi) 509 cmpl $0, %edx 510 je 0f 511 movl $0, %eax 512 ret 5130: movl $1, %eax 514 ret 515 END(panic_trigger) 516 517/* 518int 519dtrace_panic_trigger(int *tp) 520*/ 521 ENTRY(dtrace_panic_trigger) 522 xorl %eax, %eax 523 movl $0xdefacedd, %edx 524 lock 525 xchgl %edx, (%edi) 526 cmpl $0, %edx 527 je 0f 528 movl $0, %eax 529 ret 5300: movl $1, %eax 531 ret 532 END(dtrace_panic_trigger) 533