1/* 2 * Kernel Probes (KProbes) 3 * arch/i386/kernel/kprobes.c 4 * 5 * This program is free software; you can redistribute it and/or modify 6 * it under the terms of the GNU General Public License as published by 7 * the Free Software Foundation; either version 2 of the License, or 8 * (at your option) any later version. 9 * 10 * This program is distributed in the hope that it will be useful, 11 * but WITHOUT ANY WARRANTY; without even the implied warranty of 12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 13 * GNU General Public License for more details. 14 * 15 * You should have received a copy of the GNU General Public License 16 * along with this program; if not, write to the Free Software 17 * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. 18 * 19 * Copyright (C) IBM Corporation, 2002, 2004 20 * 21 * 2002-Oct Created by Vamsi Krishna S <vamsi_krishna@in.ibm.com> Kernel 22 * Probes initial implementation ( includes contributions from 23 * Rusty Russell). 24 * 2004-July Suparna Bhattacharya <suparna@in.ibm.com> added jumper probes 25 * interface to access function arguments. 26 * 2005-May Hien Nguyen <hien@us.ibm.com>, Jim Keniston 27 * <jkenisto@us.ibm.com> and Prasanna S Panchamukhi 28 * <prasanna@in.ibm.com> added function-return probes. 29 */ 30 31#include <linux/kprobes.h> 32#include <linux/ptrace.h> 33#include <linux/preempt.h> 34#include <linux/kdebug.h> 35#include <asm/cacheflush.h> 36#include <asm/desc.h> 37#include <asm/uaccess.h> 38 39void jprobe_return_end(void); 40 41DEFINE_PER_CPU(struct kprobe *, current_kprobe) = NULL; 42DEFINE_PER_CPU(struct kprobe_ctlblk, kprobe_ctlblk); 43 44/* insert a jmp code */ 45static __always_inline void set_jmp_op(void *from, void *to) 46{ 47 struct __arch_jmp_op { 48 char op; 49 long raddr; 50 } __attribute__((packed)) *jop; 51 jop = (struct __arch_jmp_op *)from; 52 jop->raddr = (long)(to) - ((long)(from) + 5); 53 jop->op = RELATIVEJUMP_INSTRUCTION; 54} 55 56/* 57 * returns non-zero if opcodes can be boosted. 58 */ 59static __always_inline int can_boost(kprobe_opcode_t *opcodes) 60{ 61#define W(row,b0,b1,b2,b3,b4,b5,b6,b7,b8,b9,ba,bb,bc,bd,be,bf) \ 62 (((b0##UL << 0x0)|(b1##UL << 0x1)|(b2##UL << 0x2)|(b3##UL << 0x3) | \ 63 (b4##UL << 0x4)|(b5##UL << 0x5)|(b6##UL << 0x6)|(b7##UL << 0x7) | \ 64 (b8##UL << 0x8)|(b9##UL << 0x9)|(ba##UL << 0xa)|(bb##UL << 0xb) | \ 65 (bc##UL << 0xc)|(bd##UL << 0xd)|(be##UL << 0xe)|(bf##UL << 0xf)) \ 66 << (row % 32)) 67 /* 68 * Undefined/reserved opcodes, conditional jump, Opcode Extension 69 * Groups, and some special opcodes can not be boost. 70 */ 71 static const unsigned long twobyte_is_boostable[256 / 32] = { 72 /* 0 1 2 3 4 5 6 7 8 9 a b c d e f */ 73 /* ------------------------------- */ 74 W(0x00, 0,0,1,1,0,0,1,0,1,1,0,0,0,0,0,0)| /* 00 */ 75 W(0x10, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0), /* 10 */ 76 W(0x20, 1,1,1,1,0,0,0,0,0,0,0,0,0,0,0,0)| /* 20 */ 77 W(0x30, 0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0), /* 30 */ 78 W(0x40, 1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1)| /* 40 */ 79 W(0x50, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0), /* 50 */ 80 W(0x60, 1,1,1,1,1,1,1,1,1,1,1,1,0,0,1,1)| /* 60 */ 81 W(0x70, 0,0,0,0,1,1,1,1,0,0,0,0,0,0,1,1), /* 70 */ 82 W(0x80, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0)| /* 80 */ 83 W(0x90, 1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1), /* 90 */ 84 W(0xa0, 1,1,0,1,1,1,0,0,1,1,0,1,1,1,0,1)| /* a0 */ 85 W(0xb0, 1,1,1,1,1,1,1,1,0,0,0,1,1,1,1,1), /* b0 */ 86 W(0xc0, 1,1,0,0,0,0,0,0,1,1,1,1,1,1,1,1)| /* c0 */ 87 W(0xd0, 0,1,1,1,0,1,0,0,1,1,0,1,1,1,0,1), /* d0 */ 88 W(0xe0, 0,1,1,0,0,1,0,0,1,1,0,1,1,1,0,1)| /* e0 */ 89 W(0xf0, 0,1,1,1,0,1,0,0,1,1,1,0,1,1,1,0) /* f0 */ 90 /* ------------------------------- */ 91 /* 0 1 2 3 4 5 6 7 8 9 a b c d e f */ 92 }; 93#undef W 94 kprobe_opcode_t opcode; 95 kprobe_opcode_t *orig_opcodes = opcodes; 96retry: 97 if (opcodes - orig_opcodes > MAX_INSN_SIZE - 1) 98 return 0; 99 opcode = *(opcodes++); 100 101 /* 2nd-byte opcode */ 102 if (opcode == 0x0f) { 103 if (opcodes - orig_opcodes > MAX_INSN_SIZE - 1) 104 return 0; 105 return test_bit(*opcodes, twobyte_is_boostable); 106 } 107 108 switch (opcode & 0xf0) { 109 case 0x60: 110 if (0x63 < opcode && opcode < 0x67) 111 goto retry; /* prefixes */ 112 /* can't boost Address-size override and bound */ 113 return (opcode != 0x62 && opcode != 0x67); 114 case 0x70: 115 return 0; /* can't boost conditional jump */ 116 case 0xc0: 117 /* can't boost software-interruptions */ 118 return (0xc1 < opcode && opcode < 0xcc) || opcode == 0xcf; 119 case 0xd0: 120 /* can boost AA* and XLAT */ 121 return (opcode == 0xd4 || opcode == 0xd5 || opcode == 0xd7); 122 case 0xe0: 123 /* can boost in/out and absolute jmps */ 124 return ((opcode & 0x04) || opcode == 0xea); 125 case 0xf0: 126 if ((opcode & 0x0c) == 0 && opcode != 0xf1) 127 goto retry; /* lock/rep(ne) prefix */ 128 /* clear and set flags can be boost */ 129 return (opcode == 0xf5 || (0xf7 < opcode && opcode < 0xfe)); 130 default: 131 if (opcode == 0x26 || opcode == 0x36 || opcode == 0x3e) 132 goto retry; /* prefixes */ 133 /* can't boost CS override and call */ 134 return (opcode != 0x2e && opcode != 0x9a); 135 } 136} 137 138/* 139 * returns non-zero if opcode modifies the interrupt flag. 140 */ 141static int __kprobes is_IF_modifier(kprobe_opcode_t opcode) 142{ 143 switch (opcode) { 144 case 0xfa: /* cli */ 145 case 0xfb: /* sti */ 146 case 0xcf: /* iret/iretd */ 147 case 0x9d: /* popf/popfd */ 148 return 1; 149 } 150 return 0; 151} 152 153int __kprobes arch_prepare_kprobe(struct kprobe *p) 154{ 155 /* insn: must be on special executable page on i386. */ 156 p->ainsn.insn = get_insn_slot(); 157 if (!p->ainsn.insn) 158 return -ENOMEM; 159 160 memcpy(p->ainsn.insn, p->addr, MAX_INSN_SIZE * sizeof(kprobe_opcode_t)); 161 p->opcode = *p->addr; 162 if (can_boost(p->addr)) { 163 p->ainsn.boostable = 0; 164 } else { 165 p->ainsn.boostable = -1; 166 } 167 return 0; 168} 169 170void __kprobes arch_arm_kprobe(struct kprobe *p) 171{ 172 *p->addr = BREAKPOINT_INSTRUCTION; 173 flush_icache_range((unsigned long) p->addr, 174 (unsigned long) p->addr + sizeof(kprobe_opcode_t)); 175} 176 177void __kprobes arch_disarm_kprobe(struct kprobe *p) 178{ 179 *p->addr = p->opcode; 180 flush_icache_range((unsigned long) p->addr, 181 (unsigned long) p->addr + sizeof(kprobe_opcode_t)); 182} 183 184void __kprobes arch_remove_kprobe(struct kprobe *p) 185{ 186 mutex_lock(&kprobe_mutex); 187 free_insn_slot(p->ainsn.insn, (p->ainsn.boostable == 1)); 188 mutex_unlock(&kprobe_mutex); 189} 190 191static void __kprobes save_previous_kprobe(struct kprobe_ctlblk *kcb) 192{ 193 kcb->prev_kprobe.kp = kprobe_running(); 194 kcb->prev_kprobe.status = kcb->kprobe_status; 195 kcb->prev_kprobe.old_eflags = kcb->kprobe_old_eflags; 196 kcb->prev_kprobe.saved_eflags = kcb->kprobe_saved_eflags; 197} 198 199static void __kprobes restore_previous_kprobe(struct kprobe_ctlblk *kcb) 200{ 201 __get_cpu_var(current_kprobe) = kcb->prev_kprobe.kp; 202 kcb->kprobe_status = kcb->prev_kprobe.status; 203 kcb->kprobe_old_eflags = kcb->prev_kprobe.old_eflags; 204 kcb->kprobe_saved_eflags = kcb->prev_kprobe.saved_eflags; 205} 206 207static void __kprobes set_current_kprobe(struct kprobe *p, struct pt_regs *regs, 208 struct kprobe_ctlblk *kcb) 209{ 210 __get_cpu_var(current_kprobe) = p; 211 kcb->kprobe_saved_eflags = kcb->kprobe_old_eflags 212 = (regs->eflags & (TF_MASK | IF_MASK)); 213 if (is_IF_modifier(p->opcode)) 214 kcb->kprobe_saved_eflags &= ~IF_MASK; 215} 216 217static void __kprobes prepare_singlestep(struct kprobe *p, struct pt_regs *regs) 218{ 219 regs->eflags |= TF_MASK; 220 regs->eflags &= ~IF_MASK; 221 /*single step inline if the instruction is an int3*/ 222 if (p->opcode == BREAKPOINT_INSTRUCTION) 223 regs->eip = (unsigned long)p->addr; 224 else 225 regs->eip = (unsigned long)p->ainsn.insn; 226} 227 228/* Called with kretprobe_lock held */ 229void __kprobes arch_prepare_kretprobe(struct kretprobe_instance *ri, 230 struct pt_regs *regs) 231{ 232 unsigned long *sara = (unsigned long *)®s->esp; 233 234 ri->ret_addr = (kprobe_opcode_t *) *sara; 235 236 /* Replace the return addr with trampoline addr */ 237 *sara = (unsigned long) &kretprobe_trampoline; 238} 239 240/* 241 * Interrupts are disabled on entry as trap3 is an interrupt gate and they 242 * remain disabled thorough out this function. 243 */ 244static int __kprobes kprobe_handler(struct pt_regs *regs) 245{ 246 struct kprobe *p; 247 int ret = 0; 248 kprobe_opcode_t *addr; 249 struct kprobe_ctlblk *kcb; 250 251 addr = (kprobe_opcode_t *)(regs->eip - sizeof(kprobe_opcode_t)); 252 253 /* 254 * We don't want to be preempted for the entire 255 * duration of kprobe processing 256 */ 257 preempt_disable(); 258 kcb = get_kprobe_ctlblk(); 259 260 /* Check we're not actually recursing */ 261 if (kprobe_running()) { 262 p = get_kprobe(addr); 263 if (p) { 264 if (kcb->kprobe_status == KPROBE_HIT_SS && 265 *p->ainsn.insn == BREAKPOINT_INSTRUCTION) { 266 regs->eflags &= ~TF_MASK; 267 regs->eflags |= kcb->kprobe_saved_eflags; 268 goto no_kprobe; 269 } 270 /* We have reentered the kprobe_handler(), since 271 * another probe was hit while within the handler. 272 * We here save the original kprobes variables and 273 * just single step on the instruction of the new probe 274 * without calling any user handlers. 275 */ 276 save_previous_kprobe(kcb); 277 set_current_kprobe(p, regs, kcb); 278 kprobes_inc_nmissed_count(p); 279 prepare_singlestep(p, regs); 280 kcb->kprobe_status = KPROBE_REENTER; 281 return 1; 282 } else { 283 if (*addr != BREAKPOINT_INSTRUCTION) { 284 /* The breakpoint instruction was removed by 285 * another cpu right after we hit, no further 286 * handling of this interrupt is appropriate 287 */ 288 regs->eip -= sizeof(kprobe_opcode_t); 289 ret = 1; 290 goto no_kprobe; 291 } 292 p = __get_cpu_var(current_kprobe); 293 if (p->break_handler && p->break_handler(p, regs)) { 294 goto ss_probe; 295 } 296 } 297 goto no_kprobe; 298 } 299 300 p = get_kprobe(addr); 301 if (!p) { 302 if (*addr != BREAKPOINT_INSTRUCTION) { 303 /* 304 * The breakpoint instruction was removed right 305 * after we hit it. Another cpu has removed 306 * either a probepoint or a debugger breakpoint 307 * at this address. In either case, no further 308 * handling of this interrupt is appropriate. 309 * Back up over the (now missing) int3 and run 310 * the original instruction. 311 */ 312 regs->eip -= sizeof(kprobe_opcode_t); 313 ret = 1; 314 } 315 /* Not one of ours: let kernel handle it */ 316 goto no_kprobe; 317 } 318 319 set_current_kprobe(p, regs, kcb); 320 kcb->kprobe_status = KPROBE_HIT_ACTIVE; 321 322 if (p->pre_handler && p->pre_handler(p, regs)) 323 /* handler has already set things up, so skip ss setup */ 324 return 1; 325 326ss_probe: 327#if !defined(CONFIG_PREEMPT) || defined(CONFIG_PM) 328 if (p->ainsn.boostable == 1 && !p->post_handler){ 329 /* Boost up -- we can execute copied instructions directly */ 330 reset_current_kprobe(); 331 regs->eip = (unsigned long)p->ainsn.insn; 332 preempt_enable_no_resched(); 333 return 1; 334 } 335#endif 336 prepare_singlestep(p, regs); 337 kcb->kprobe_status = KPROBE_HIT_SS; 338 return 1; 339 340no_kprobe: 341 preempt_enable_no_resched(); 342 return ret; 343} 344 345/* 346 * For function-return probes, init_kprobes() establishes a probepoint 347 * here. When a retprobed function returns, this probe is hit and 348 * trampoline_probe_handler() runs, calling the kretprobe's handler. 349 */ 350 void __kprobes kretprobe_trampoline_holder(void) 351 { 352 asm volatile ( ".global kretprobe_trampoline\n" 353 "kretprobe_trampoline: \n" 354 " pushf\n" 355 /* skip cs, eip, orig_eax */ 356 " subl $12, %esp\n" 357 " pushl %fs\n" 358 " pushl %ds\n" 359 " pushl %es\n" 360 " pushl %eax\n" 361 " pushl %ebp\n" 362 " pushl %edi\n" 363 " pushl %esi\n" 364 " pushl %edx\n" 365 " pushl %ecx\n" 366 " pushl %ebx\n" 367 " movl %esp, %eax\n" 368 " call trampoline_handler\n" 369 /* move eflags to cs */ 370 " movl 52(%esp), %edx\n" 371 " movl %edx, 48(%esp)\n" 372 /* save true return address on eflags */ 373 " movl %eax, 52(%esp)\n" 374 " popl %ebx\n" 375 " popl %ecx\n" 376 " popl %edx\n" 377 " popl %esi\n" 378 " popl %edi\n" 379 " popl %ebp\n" 380 " popl %eax\n" 381 /* skip eip, orig_eax, es, ds, fs */ 382 " addl $20, %esp\n" 383 " popf\n" 384 " ret\n"); 385} 386 387/* 388 * Called from kretprobe_trampoline 389 */ 390fastcall void *__kprobes trampoline_handler(struct pt_regs *regs) 391{ 392 struct kretprobe_instance *ri = NULL; 393 struct hlist_head *head, empty_rp; 394 struct hlist_node *node, *tmp; 395 unsigned long flags, orig_ret_address = 0; 396 unsigned long trampoline_address =(unsigned long)&kretprobe_trampoline; 397 398 INIT_HLIST_HEAD(&empty_rp); 399 spin_lock_irqsave(&kretprobe_lock, flags); 400 head = kretprobe_inst_table_head(current); 401 /* fixup registers */ 402 regs->xcs = __KERNEL_CS | get_kernel_rpl(); 403 regs->eip = trampoline_address; 404 regs->orig_eax = 0xffffffff; 405 406 /* 407 * It is possible to have multiple instances associated with a given 408 * task either because an multiple functions in the call path 409 * have a return probe installed on them, and/or more then one return 410 * return probe was registered for a target function. 411 * 412 * We can handle this because: 413 * - instances are always inserted at the head of the list 414 * - when multiple return probes are registered for the same 415 * function, the first instance's ret_addr will point to the 416 * real return address, and all the rest will point to 417 * kretprobe_trampoline 418 */ 419 hlist_for_each_entry_safe(ri, node, tmp, head, hlist) { 420 if (ri->task != current) 421 /* another task is sharing our hash bucket */ 422 continue; 423 424 if (ri->rp && ri->rp->handler){ 425 __get_cpu_var(current_kprobe) = &ri->rp->kp; 426 get_kprobe_ctlblk()->kprobe_status = KPROBE_HIT_ACTIVE; 427 ri->rp->handler(ri, regs); 428 __get_cpu_var(current_kprobe) = NULL; 429 } 430 431 orig_ret_address = (unsigned long)ri->ret_addr; 432 recycle_rp_inst(ri, &empty_rp); 433 434 if (orig_ret_address != trampoline_address) 435 /* 436 * This is the real return address. Any other 437 * instances associated with this task are for 438 * other calls deeper on the call stack 439 */ 440 break; 441 } 442 443 kretprobe_assert(ri, orig_ret_address, trampoline_address); 444 spin_unlock_irqrestore(&kretprobe_lock, flags); 445 446 hlist_for_each_entry_safe(ri, node, tmp, &empty_rp, hlist) { 447 hlist_del(&ri->hlist); 448 kfree(ri); 449 } 450 return (void*)orig_ret_address; 451} 452 453/* 454 * Called after single-stepping. p->addr is the address of the 455 * instruction whose first byte has been replaced by the "int 3" 456 * instruction. To avoid the SMP problems that can occur when we 457 * temporarily put back the original opcode to single-step, we 458 * single-stepped a copy of the instruction. The address of this 459 * copy is p->ainsn.insn. 460 * 461 * This function prepares to return from the post-single-step 462 * interrupt. We have to fix up the stack as follows: 463 * 464 * 0) Except in the case of absolute or indirect jump or call instructions, 465 * the new eip is relative to the copied instruction. We need to make 466 * it relative to the original instruction. 467 * 468 * 1) If the single-stepped instruction was pushfl, then the TF and IF 469 * flags are set in the just-pushed eflags, and may need to be cleared. 470 * 471 * 2) If the single-stepped instruction was a call, the return address 472 * that is atop the stack is the address following the copied instruction. 473 * We need to make it the address following the original instruction. 474 * 475 * This function also checks instruction size for preparing direct execution. 476 */ 477static void __kprobes resume_execution(struct kprobe *p, 478 struct pt_regs *regs, struct kprobe_ctlblk *kcb) 479{ 480 unsigned long *tos = (unsigned long *)®s->esp; 481 unsigned long copy_eip = (unsigned long)p->ainsn.insn; 482 unsigned long orig_eip = (unsigned long)p->addr; 483 484 regs->eflags &= ~TF_MASK; 485 switch (p->ainsn.insn[0]) { 486 case 0x9c: /* pushfl */ 487 *tos &= ~(TF_MASK | IF_MASK); 488 *tos |= kcb->kprobe_old_eflags; 489 break; 490 case 0xc2: /* iret/ret/lret */ 491 case 0xc3: 492 case 0xca: 493 case 0xcb: 494 case 0xcf: 495 case 0xea: /* jmp absolute -- eip is correct */ 496 /* eip is already adjusted, no more changes required */ 497 p->ainsn.boostable = 1; 498 goto no_change; 499 case 0xe8: /* call relative - Fix return addr */ 500 *tos = orig_eip + (*tos - copy_eip); 501 break; 502 case 0x9a: /* call absolute -- same as call absolute, indirect */ 503 *tos = orig_eip + (*tos - copy_eip); 504 goto no_change; 505 case 0xff: 506 if ((p->ainsn.insn[1] & 0x30) == 0x10) { 507 /* 508 * call absolute, indirect 509 * Fix return addr; eip is correct. 510 * But this is not boostable 511 */ 512 *tos = orig_eip + (*tos - copy_eip); 513 goto no_change; 514 } else if (((p->ainsn.insn[1] & 0x31) == 0x20) || /* jmp near, absolute indirect */ 515 ((p->ainsn.insn[1] & 0x31) == 0x21)) { /* jmp far, absolute indirect */ 516 /* eip is correct. And this is boostable */ 517 p->ainsn.boostable = 1; 518 goto no_change; 519 } 520 default: 521 break; 522 } 523 524 if (p->ainsn.boostable == 0) { 525 if ((regs->eip > copy_eip) && 526 (regs->eip - copy_eip) + 5 < MAX_INSN_SIZE) { 527 /* 528 * These instructions can be executed directly if it 529 * jumps back to correct address. 530 */ 531 set_jmp_op((void *)regs->eip, 532 (void *)orig_eip + (regs->eip - copy_eip)); 533 p->ainsn.boostable = 1; 534 } else { 535 p->ainsn.boostable = -1; 536 } 537 } 538 539 regs->eip = orig_eip + (regs->eip - copy_eip); 540 541no_change: 542 return; 543} 544 545/* 546 * Interrupts are disabled on entry as trap1 is an interrupt gate and they 547 * remain disabled thoroughout this function. 548 */ 549static int __kprobes post_kprobe_handler(struct pt_regs *regs) 550{ 551 struct kprobe *cur = kprobe_running(); 552 struct kprobe_ctlblk *kcb = get_kprobe_ctlblk(); 553 554 if (!cur) 555 return 0; 556 557 if ((kcb->kprobe_status != KPROBE_REENTER) && cur->post_handler) { 558 kcb->kprobe_status = KPROBE_HIT_SSDONE; 559 cur->post_handler(cur, regs, 0); 560 } 561 562 resume_execution(cur, regs, kcb); 563 regs->eflags |= kcb->kprobe_saved_eflags; 564 565 /*Restore back the original saved kprobes variables and continue. */ 566 if (kcb->kprobe_status == KPROBE_REENTER) { 567 restore_previous_kprobe(kcb); 568 goto out; 569 } 570 reset_current_kprobe(); 571out: 572 preempt_enable_no_resched(); 573 574 /* 575 * if somebody else is singlestepping across a probe point, eflags 576 * will have TF set, in which case, continue the remaining processing 577 * of do_debug, as if this is not a probe hit. 578 */ 579 if (regs->eflags & TF_MASK) 580 return 0; 581 582 return 1; 583} 584 585static int __kprobes kprobe_fault_handler(struct pt_regs *regs, int trapnr) 586{ 587 struct kprobe *cur = kprobe_running(); 588 struct kprobe_ctlblk *kcb = get_kprobe_ctlblk(); 589 590 switch(kcb->kprobe_status) { 591 case KPROBE_HIT_SS: 592 case KPROBE_REENTER: 593 /* 594 * We are here because the instruction being single 595 * stepped caused a page fault. We reset the current 596 * kprobe and the eip points back to the probe address 597 * and allow the page fault handler to continue as a 598 * normal page fault. 599 */ 600 regs->eip = (unsigned long)cur->addr; 601 regs->eflags |= kcb->kprobe_old_eflags; 602 if (kcb->kprobe_status == KPROBE_REENTER) 603 restore_previous_kprobe(kcb); 604 else 605 reset_current_kprobe(); 606 preempt_enable_no_resched(); 607 break; 608 case KPROBE_HIT_ACTIVE: 609 case KPROBE_HIT_SSDONE: 610 /* 611 * We increment the nmissed count for accounting, 612 * we can also use npre/npostfault count for accouting 613 * these specific fault cases. 614 */ 615 kprobes_inc_nmissed_count(cur); 616 617 /* 618 * We come here because instructions in the pre/post 619 * handler caused the page_fault, this could happen 620 * if handler tries to access user space by 621 * copy_from_user(), get_user() etc. Let the 622 * user-specified handler try to fix it first. 623 */ 624 if (cur->fault_handler && cur->fault_handler(cur, regs, trapnr)) 625 return 1; 626 627 /* 628 * In case the user-specified fault handler returned 629 * zero, try to fix up. 630 */ 631 if (fixup_exception(regs)) 632 return 1; 633 634 /* 635 * fixup_exception() could not handle it, 636 * Let do_page_fault() fix it. 637 */ 638 break; 639 default: 640 break; 641 } 642 return 0; 643} 644 645/* 646 * Wrapper routine to for handling exceptions. 647 */ 648int __kprobes kprobe_exceptions_notify(struct notifier_block *self, 649 unsigned long val, void *data) 650{ 651 struct die_args *args = (struct die_args *)data; 652 int ret = NOTIFY_DONE; 653 654 if (args->regs && user_mode_vm(args->regs)) 655 return ret; 656 657 switch (val) { 658 case DIE_INT3: 659 if (kprobe_handler(args->regs)) 660 ret = NOTIFY_STOP; 661 break; 662 case DIE_DEBUG: 663 if (post_kprobe_handler(args->regs)) 664 ret = NOTIFY_STOP; 665 break; 666 case DIE_GPF: 667 case DIE_PAGE_FAULT: 668 /* kprobe_running() needs smp_processor_id() */ 669 preempt_disable(); 670 if (kprobe_running() && 671 kprobe_fault_handler(args->regs, args->trapnr)) 672 ret = NOTIFY_STOP; 673 preempt_enable(); 674 break; 675 default: 676 break; 677 } 678 return ret; 679} 680 681int __kprobes setjmp_pre_handler(struct kprobe *p, struct pt_regs *regs) 682{ 683 struct jprobe *jp = container_of(p, struct jprobe, kp); 684 unsigned long addr; 685 struct kprobe_ctlblk *kcb = get_kprobe_ctlblk(); 686 687 kcb->jprobe_saved_regs = *regs; 688 kcb->jprobe_saved_esp = ®s->esp; 689 addr = (unsigned long)(kcb->jprobe_saved_esp); 690 691 /* 692 * TBD: As Linus pointed out, gcc assumes that the callee 693 * owns the argument space and could overwrite it, e.g. 694 * tailcall optimization. So, to be absolutely safe 695 * we also save and restore enough stack bytes to cover 696 * the argument area. 697 */ 698 memcpy(kcb->jprobes_stack, (kprobe_opcode_t *)addr, 699 MIN_STACK_SIZE(addr)); 700 regs->eflags &= ~IF_MASK; 701 regs->eip = (unsigned long)(jp->entry); 702 return 1; 703} 704 705void __kprobes jprobe_return(void) 706{ 707 struct kprobe_ctlblk *kcb = get_kprobe_ctlblk(); 708 709 asm volatile (" xchgl %%ebx,%%esp \n" 710 " int3 \n" 711 " .globl jprobe_return_end \n" 712 " jprobe_return_end: \n" 713 " nop \n"::"b" 714 (kcb->jprobe_saved_esp):"memory"); 715} 716 717int __kprobes longjmp_break_handler(struct kprobe *p, struct pt_regs *regs) 718{ 719 struct kprobe_ctlblk *kcb = get_kprobe_ctlblk(); 720 u8 *addr = (u8 *) (regs->eip - 1); 721 unsigned long stack_addr = (unsigned long)(kcb->jprobe_saved_esp); 722 struct jprobe *jp = container_of(p, struct jprobe, kp); 723 724 if ((addr > (u8 *) jprobe_return) && (addr < (u8 *) jprobe_return_end)) { 725 if (®s->esp != kcb->jprobe_saved_esp) { 726 struct pt_regs *saved_regs = 727 container_of(kcb->jprobe_saved_esp, 728 struct pt_regs, esp); 729 printk("current esp %p does not match saved esp %p\n", 730 ®s->esp, kcb->jprobe_saved_esp); 731 printk("Saved registers for jprobe %p\n", jp); 732 show_registers(saved_regs); 733 printk("Current registers\n"); 734 show_registers(regs); 735 BUG(); 736 } 737 *regs = kcb->jprobe_saved_regs; 738 memcpy((kprobe_opcode_t *) stack_addr, kcb->jprobes_stack, 739 MIN_STACK_SIZE(stack_addr)); 740 preempt_enable_no_resched(); 741 return 1; 742 } 743 return 0; 744} 745 746int __kprobes arch_trampoline_kprobe(struct kprobe *p) 747{ 748 return 0; 749} 750 751int __init arch_init_kprobes(void) 752{ 753 return 0; 754} 755