304#endif 305 306#if defined(sun) 307#define curcpu CPU->cpu_id 308#endif 309 310 311/* 312 * DTrace Provider Variables 313 * 314 * These are the variables relating to DTrace as a provider (that is, the 315 * provider of the BEGIN, END, and ERROR probes). 316 */ 317static dtrace_pattr_t dtrace_provider_attr = { 318{ DTRACE_STABILITY_STABLE, DTRACE_STABILITY_STABLE, DTRACE_CLASS_COMMON }, 319{ DTRACE_STABILITY_PRIVATE, DTRACE_STABILITY_PRIVATE, DTRACE_CLASS_UNKNOWN }, 320{ DTRACE_STABILITY_PRIVATE, DTRACE_STABILITY_PRIVATE, DTRACE_CLASS_UNKNOWN }, 321{ DTRACE_STABILITY_STABLE, DTRACE_STABILITY_STABLE, DTRACE_CLASS_COMMON }, 322{ DTRACE_STABILITY_STABLE, DTRACE_STABILITY_STABLE, DTRACE_CLASS_COMMON }, 323}; 324 325static void 326dtrace_nullop(void) 327{} 328 329static dtrace_pops_t dtrace_provider_ops = { 330 (void (*)(void *, dtrace_probedesc_t *))dtrace_nullop, 331 (void (*)(void *, modctl_t *))dtrace_nullop, 332 (void (*)(void *, dtrace_id_t, void *))dtrace_nullop, 333 (void (*)(void *, dtrace_id_t, void *))dtrace_nullop, 334 (void (*)(void *, dtrace_id_t, void *))dtrace_nullop, 335 (void (*)(void *, dtrace_id_t, void *))dtrace_nullop, 336 NULL, 337 NULL, 338 NULL, 339 (void (*)(void *, dtrace_id_t, void *))dtrace_nullop 340}; 341 342static dtrace_id_t dtrace_probeid_begin; /* special BEGIN probe */ 343static dtrace_id_t dtrace_probeid_end; /* special END probe */ 344dtrace_id_t dtrace_probeid_error; /* special ERROR probe */ 345 346/* 347 * DTrace Helper Tracing Variables 348 */ 349uint32_t dtrace_helptrace_next = 0; 350uint32_t dtrace_helptrace_nlocals; 351char *dtrace_helptrace_buffer; 352int dtrace_helptrace_bufsize = 512 * 1024; 353 354#ifdef DEBUG 355int dtrace_helptrace_enabled = 1; 356#else 357int dtrace_helptrace_enabled = 0; 358#endif 359 360/* 361 * DTrace Error Hashing 362 * 363 * On DEBUG kernels, DTrace will track the errors that has seen in a hash 364 * table. This is very useful for checking coverage of tests that are 365 * expected to induce DIF or DOF processing errors, and may be useful for 366 * debugging problems in the DIF code generator or in DOF generation . The 367 * error hash may be examined with the ::dtrace_errhash MDB dcmd. 368 */ 369#ifdef DEBUG 370static dtrace_errhash_t dtrace_errhash[DTRACE_ERRHASHSZ]; 371static const char *dtrace_errlast; 372static kthread_t *dtrace_errthread; 373static kmutex_t dtrace_errlock; 374#endif 375 376/* 377 * DTrace Macros and Constants 378 * 379 * These are various macros that are useful in various spots in the 380 * implementation, along with a few random constants that have no meaning 381 * outside of the implementation. There is no real structure to this cpp 382 * mishmash -- but is there ever? 383 */ 384#define DTRACE_HASHSTR(hash, probe) \ 385 dtrace_hash_str(*((char **)((uintptr_t)(probe) + (hash)->dth_stroffs))) 386 387#define DTRACE_HASHNEXT(hash, probe) \ 388 (dtrace_probe_t **)((uintptr_t)(probe) + (hash)->dth_nextoffs) 389 390#define DTRACE_HASHPREV(hash, probe) \ 391 (dtrace_probe_t **)((uintptr_t)(probe) + (hash)->dth_prevoffs) 392 393#define DTRACE_HASHEQ(hash, lhs, rhs) \ 394 (strcmp(*((char **)((uintptr_t)(lhs) + (hash)->dth_stroffs)), \ 395 *((char **)((uintptr_t)(rhs) + (hash)->dth_stroffs))) == 0) 396 397#define DTRACE_AGGHASHSIZE_SLEW 17 398 399#define DTRACE_V4MAPPED_OFFSET (sizeof (uint32_t) * 3) 400 401/* 402 * The key for a thread-local variable consists of the lower 61 bits of the 403 * t_did, plus the 3 bits of the highest active interrupt above LOCK_LEVEL. 404 * We add DIF_VARIABLE_MAX to t_did to assure that the thread key is never 405 * equal to a variable identifier. This is necessary (but not sufficient) to 406 * assure that global associative arrays never collide with thread-local 407 * variables. To guarantee that they cannot collide, we must also define the 408 * order for keying dynamic variables. That order is: 409 * 410 * [ key0 ] ... [ keyn ] [ variable-key ] [ tls-key ] 411 * 412 * Because the variable-key and the tls-key are in orthogonal spaces, there is 413 * no way for a global variable key signature to match a thread-local key 414 * signature. 415 */ 416#if defined(sun) 417#define DTRACE_TLS_THRKEY(where) { \ 418 uint_t intr = 0; \ 419 uint_t actv = CPU->cpu_intr_actv >> (LOCK_LEVEL + 1); \ 420 for (; actv; actv >>= 1) \ 421 intr++; \ 422 ASSERT(intr < (1 << 3)); \ 423 (where) = ((curthread->t_did + DIF_VARIABLE_MAX) & \ 424 (((uint64_t)1 << 61) - 1)) | ((uint64_t)intr << 61); \ 425} 426#else 427#define DTRACE_TLS_THRKEY(where) { \ 428 solaris_cpu_t *_c = &solaris_cpu[curcpu]; \ 429 uint_t intr = 0; \ 430 uint_t actv = _c->cpu_intr_actv; \ 431 for (; actv; actv >>= 1) \ 432 intr++; \ 433 ASSERT(intr < (1 << 3)); \ 434 (where) = ((curthread->td_tid + DIF_VARIABLE_MAX) & \ 435 (((uint64_t)1 << 61) - 1)) | ((uint64_t)intr << 61); \ 436} 437#endif 438 439#define DT_BSWAP_8(x) ((x) & 0xff) 440#define DT_BSWAP_16(x) ((DT_BSWAP_8(x) << 8) | DT_BSWAP_8((x) >> 8)) 441#define DT_BSWAP_32(x) ((DT_BSWAP_16(x) << 16) | DT_BSWAP_16((x) >> 16)) 442#define DT_BSWAP_64(x) ((DT_BSWAP_32(x) << 32) | DT_BSWAP_32((x) >> 32)) 443 444#define DT_MASK_LO 0x00000000FFFFFFFFULL 445 446#define DTRACE_STORE(type, tomax, offset, what) \ 447 *((type *)((uintptr_t)(tomax) + (uintptr_t)offset)) = (type)(what); 448 449#ifndef __x86 450#define DTRACE_ALIGNCHECK(addr, size, flags) \ 451 if (addr & (size - 1)) { \ 452 *flags |= CPU_DTRACE_BADALIGN; \ 453 cpu_core[curcpu].cpuc_dtrace_illval = addr; \ 454 return (0); \ 455 } 456#else 457#define DTRACE_ALIGNCHECK(addr, size, flags) 458#endif 459 460/* 461 * Test whether a range of memory starting at testaddr of size testsz falls 462 * within the range of memory described by addr, sz. We take care to avoid 463 * problems with overflow and underflow of the unsigned quantities, and 464 * disallow all negative sizes. Ranges of size 0 are allowed. 465 */ 466#define DTRACE_INRANGE(testaddr, testsz, baseaddr, basesz) \ 467 ((testaddr) - (baseaddr) < (basesz) && \ 468 (testaddr) + (testsz) - (baseaddr) <= (basesz) && \ 469 (testaddr) + (testsz) >= (testaddr)) 470 471/* 472 * Test whether alloc_sz bytes will fit in the scratch region. We isolate 473 * alloc_sz on the righthand side of the comparison in order to avoid overflow 474 * or underflow in the comparison with it. This is simpler than the INRANGE 475 * check above, because we know that the dtms_scratch_ptr is valid in the 476 * range. Allocations of size zero are allowed. 477 */ 478#define DTRACE_INSCRATCH(mstate, alloc_sz) \ 479 ((mstate)->dtms_scratch_base + (mstate)->dtms_scratch_size - \ 480 (mstate)->dtms_scratch_ptr >= (alloc_sz)) 481 482#define DTRACE_LOADFUNC(bits) \ 483/*CSTYLED*/ \ 484uint##bits##_t \ 485dtrace_load##bits(uintptr_t addr) \ 486{ \ 487 size_t size = bits / NBBY; \ 488 /*CSTYLED*/ \ 489 uint##bits##_t rval; \ 490 int i; \ 491 volatile uint16_t *flags = (volatile uint16_t *) \ 492 &cpu_core[curcpu].cpuc_dtrace_flags; \ 493 \ 494 DTRACE_ALIGNCHECK(addr, size, flags); \ 495 \ 496 for (i = 0; i < dtrace_toxranges; i++) { \ 497 if (addr >= dtrace_toxrange[i].dtt_limit) \ 498 continue; \ 499 \ 500 if (addr + size <= dtrace_toxrange[i].dtt_base) \ 501 continue; \ 502 \ 503 /* \ 504 * This address falls within a toxic region; return 0. \ 505 */ \ 506 *flags |= CPU_DTRACE_BADADDR; \ 507 cpu_core[curcpu].cpuc_dtrace_illval = addr; \ 508 return (0); \ 509 } \ 510 \ 511 *flags |= CPU_DTRACE_NOFAULT; \ 512 /*CSTYLED*/ \ 513 rval = *((volatile uint##bits##_t *)addr); \ 514 *flags &= ~CPU_DTRACE_NOFAULT; \ 515 \ 516 return (!(*flags & CPU_DTRACE_FAULT) ? rval : 0); \ 517} 518 519#ifdef _LP64 520#define dtrace_loadptr dtrace_load64 521#else 522#define dtrace_loadptr dtrace_load32 523#endif 524 525#define DTRACE_DYNHASH_FREE 0 526#define DTRACE_DYNHASH_SINK 1 527#define DTRACE_DYNHASH_VALID 2 528 529#define DTRACE_MATCH_NEXT 0 530#define DTRACE_MATCH_DONE 1 531#define DTRACE_ANCHORED(probe) ((probe)->dtpr_func[0] != '\0') 532#define DTRACE_STATE_ALIGN 64 533 534#define DTRACE_FLAGS2FLT(flags) \ 535 (((flags) & CPU_DTRACE_BADADDR) ? DTRACEFLT_BADADDR : \ 536 ((flags) & CPU_DTRACE_ILLOP) ? DTRACEFLT_ILLOP : \ 537 ((flags) & CPU_DTRACE_DIVZERO) ? DTRACEFLT_DIVZERO : \ 538 ((flags) & CPU_DTRACE_KPRIV) ? DTRACEFLT_KPRIV : \ 539 ((flags) & CPU_DTRACE_UPRIV) ? DTRACEFLT_UPRIV : \ 540 ((flags) & CPU_DTRACE_TUPOFLOW) ? DTRACEFLT_TUPOFLOW : \ 541 ((flags) & CPU_DTRACE_BADALIGN) ? DTRACEFLT_BADALIGN : \ 542 ((flags) & CPU_DTRACE_NOSCRATCH) ? DTRACEFLT_NOSCRATCH : \ 543 ((flags) & CPU_DTRACE_BADSTACK) ? DTRACEFLT_BADSTACK : \ 544 DTRACEFLT_UNKNOWN) 545 546#define DTRACEACT_ISSTRING(act) \ 547 ((act)->dta_kind == DTRACEACT_DIFEXPR && \ 548 (act)->dta_difo->dtdo_rtype.dtdt_kind == DIF_TYPE_STRING) 549 550/* Function prototype definitions: */ 551static size_t dtrace_strlen(const char *, size_t); 552static dtrace_probe_t *dtrace_probe_lookup_id(dtrace_id_t id); 553static void dtrace_enabling_provide(dtrace_provider_t *); 554static int dtrace_enabling_match(dtrace_enabling_t *, int *); 555static void dtrace_enabling_matchall(void); 556static void dtrace_enabling_reap(void); 557static dtrace_state_t *dtrace_anon_grab(void); 558static uint64_t dtrace_helper(int, dtrace_mstate_t *, 559 dtrace_state_t *, uint64_t, uint64_t); 560static dtrace_helpers_t *dtrace_helpers_create(proc_t *); 561static void dtrace_buffer_drop(dtrace_buffer_t *); 562static int dtrace_buffer_consumed(dtrace_buffer_t *, hrtime_t when); 563static intptr_t dtrace_buffer_reserve(dtrace_buffer_t *, size_t, size_t, 564 dtrace_state_t *, dtrace_mstate_t *); 565static int dtrace_state_option(dtrace_state_t *, dtrace_optid_t, 566 dtrace_optval_t); 567static int dtrace_ecb_create_enable(dtrace_probe_t *, void *); 568static void dtrace_helper_provider_destroy(dtrace_helper_provider_t *); 569uint16_t dtrace_load16(uintptr_t); 570uint32_t dtrace_load32(uintptr_t); 571uint64_t dtrace_load64(uintptr_t); 572uint8_t dtrace_load8(uintptr_t); 573void dtrace_dynvar_clean(dtrace_dstate_t *); 574dtrace_dynvar_t *dtrace_dynvar(dtrace_dstate_t *, uint_t, dtrace_key_t *, 575 size_t, dtrace_dynvar_op_t, dtrace_mstate_t *, dtrace_vstate_t *); 576uintptr_t dtrace_dif_varstr(uintptr_t, dtrace_state_t *, dtrace_mstate_t *); 577 578/* 579 * DTrace Probe Context Functions 580 * 581 * These functions are called from probe context. Because probe context is 582 * any context in which C may be called, arbitrarily locks may be held, 583 * interrupts may be disabled, we may be in arbitrary dispatched state, etc. 584 * As a result, functions called from probe context may only call other DTrace 585 * support functions -- they may not interact at all with the system at large. 586 * (Note that the ASSERT macro is made probe-context safe by redefining it in 587 * terms of dtrace_assfail(), a probe-context safe function.) If arbitrary 588 * loads are to be performed from probe context, they _must_ be in terms of 589 * the safe dtrace_load*() variants. 590 * 591 * Some functions in this block are not actually called from probe context; 592 * for these functions, there will be a comment above the function reading 593 * "Note: not called from probe context." 594 */ 595void 596dtrace_panic(const char *format, ...) 597{ 598 va_list alist; 599 600 va_start(alist, format); 601 dtrace_vpanic(format, alist); 602 va_end(alist); 603} 604 605int 606dtrace_assfail(const char *a, const char *f, int l) 607{ 608 dtrace_panic("assertion failed: %s, file: %s, line: %d", a, f, l); 609 610 /* 611 * We just need something here that even the most clever compiler 612 * cannot optimize away. 613 */ 614 return (a[(uintptr_t)f]); 615} 616 617/* 618 * Atomically increment a specified error counter from probe context. 619 */ 620static void 621dtrace_error(uint32_t *counter) 622{ 623 /* 624 * Most counters stored to in probe context are per-CPU counters. 625 * However, there are some error conditions that are sufficiently 626 * arcane that they don't merit per-CPU storage. If these counters 627 * are incremented concurrently on different CPUs, scalability will be 628 * adversely affected -- but we don't expect them to be white-hot in a 629 * correctly constructed enabling... 630 */ 631 uint32_t oval, nval; 632 633 do { 634 oval = *counter; 635 636 if ((nval = oval + 1) == 0) { 637 /* 638 * If the counter would wrap, set it to 1 -- assuring 639 * that the counter is never zero when we have seen 640 * errors. (The counter must be 32-bits because we 641 * aren't guaranteed a 64-bit compare&swap operation.) 642 * To save this code both the infamy of being fingered 643 * by a priggish news story and the indignity of being 644 * the target of a neo-puritan witch trial, we're 645 * carefully avoiding any colorful description of the 646 * likelihood of this condition -- but suffice it to 647 * say that it is only slightly more likely than the 648 * overflow of predicate cache IDs, as discussed in 649 * dtrace_predicate_create(). 650 */ 651 nval = 1; 652 } 653 } while (dtrace_cas32(counter, oval, nval) != oval); 654} 655 656/* 657 * Use the DTRACE_LOADFUNC macro to define functions for each of loading a 658 * uint8_t, a uint16_t, a uint32_t and a uint64_t. 659 */ 660DTRACE_LOADFUNC(8) 661DTRACE_LOADFUNC(16) 662DTRACE_LOADFUNC(32) 663DTRACE_LOADFUNC(64) 664 665static int 666dtrace_inscratch(uintptr_t dest, size_t size, dtrace_mstate_t *mstate) 667{ 668 if (dest < mstate->dtms_scratch_base) 669 return (0); 670 671 if (dest + size < dest) 672 return (0); 673 674 if (dest + size > mstate->dtms_scratch_ptr) 675 return (0); 676 677 return (1); 678} 679 680static int 681dtrace_canstore_statvar(uint64_t addr, size_t sz, 682 dtrace_statvar_t **svars, int nsvars) 683{ 684 int i; 685 686 for (i = 0; i < nsvars; i++) { 687 dtrace_statvar_t *svar = svars[i]; 688 689 if (svar == NULL || svar->dtsv_size == 0) 690 continue; 691 692 if (DTRACE_INRANGE(addr, sz, svar->dtsv_data, svar->dtsv_size)) 693 return (1); 694 } 695 696 return (0); 697} 698 699/* 700 * Check to see if the address is within a memory region to which a store may 701 * be issued. This includes the DTrace scratch areas, and any DTrace variable 702 * region. The caller of dtrace_canstore() is responsible for performing any 703 * alignment checks that are needed before stores are actually executed. 704 */ 705static int 706dtrace_canstore(uint64_t addr, size_t sz, dtrace_mstate_t *mstate, 707 dtrace_vstate_t *vstate) 708{ 709 /* 710 * First, check to see if the address is in scratch space... 711 */ 712 if (DTRACE_INRANGE(addr, sz, mstate->dtms_scratch_base, 713 mstate->dtms_scratch_size)) 714 return (1); 715 716 /* 717 * Now check to see if it's a dynamic variable. This check will pick 718 * up both thread-local variables and any global dynamically-allocated 719 * variables. 720 */ 721 if (DTRACE_INRANGE(addr, sz, (uintptr_t)vstate->dtvs_dynvars.dtds_base, 722 vstate->dtvs_dynvars.dtds_size)) { 723 dtrace_dstate_t *dstate = &vstate->dtvs_dynvars; 724 uintptr_t base = (uintptr_t)dstate->dtds_base + 725 (dstate->dtds_hashsize * sizeof (dtrace_dynhash_t)); 726 uintptr_t chunkoffs; 727 728 /* 729 * Before we assume that we can store here, we need to make 730 * sure that it isn't in our metadata -- storing to our 731 * dynamic variable metadata would corrupt our state. For 732 * the range to not include any dynamic variable metadata, 733 * it must: 734 * 735 * (1) Start above the hash table that is at the base of 736 * the dynamic variable space 737 * 738 * (2) Have a starting chunk offset that is beyond the 739 * dtrace_dynvar_t that is at the base of every chunk 740 * 741 * (3) Not span a chunk boundary 742 * 743 */ 744 if (addr < base) 745 return (0); 746 747 chunkoffs = (addr - base) % dstate->dtds_chunksize; 748 749 if (chunkoffs < sizeof (dtrace_dynvar_t)) 750 return (0); 751 752 if (chunkoffs + sz > dstate->dtds_chunksize) 753 return (0); 754 755 return (1); 756 } 757 758 /* 759 * Finally, check the static local and global variables. These checks 760 * take the longest, so we perform them last. 761 */ 762 if (dtrace_canstore_statvar(addr, sz, 763 vstate->dtvs_locals, vstate->dtvs_nlocals)) 764 return (1); 765 766 if (dtrace_canstore_statvar(addr, sz, 767 vstate->dtvs_globals, vstate->dtvs_nglobals)) 768 return (1); 769 770 return (0); 771} 772 773 774/* 775 * Convenience routine to check to see if the address is within a memory 776 * region in which a load may be issued given the user's privilege level; 777 * if not, it sets the appropriate error flags and loads 'addr' into the 778 * illegal value slot. 779 * 780 * DTrace subroutines (DIF_SUBR_*) should use this helper to implement 781 * appropriate memory access protection. 782 */ 783static int 784dtrace_canload(uint64_t addr, size_t sz, dtrace_mstate_t *mstate, 785 dtrace_vstate_t *vstate) 786{ 787 volatile uintptr_t *illval = &cpu_core[curcpu].cpuc_dtrace_illval; 788 789 /* 790 * If we hold the privilege to read from kernel memory, then 791 * everything is readable. 792 */ 793 if ((mstate->dtms_access & DTRACE_ACCESS_KERNEL) != 0) 794 return (1); 795 796 /* 797 * You can obviously read that which you can store. 798 */ 799 if (dtrace_canstore(addr, sz, mstate, vstate)) 800 return (1); 801 802 /* 803 * We're allowed to read from our own string table. 804 */ 805 if (DTRACE_INRANGE(addr, sz, (uintptr_t)mstate->dtms_difo->dtdo_strtab, 806 mstate->dtms_difo->dtdo_strlen)) 807 return (1); 808 809 DTRACE_CPUFLAG_SET(CPU_DTRACE_KPRIV); 810 *illval = addr; 811 return (0); 812} 813 814/* 815 * Convenience routine to check to see if a given string is within a memory 816 * region in which a load may be issued given the user's privilege level; 817 * this exists so that we don't need to issue unnecessary dtrace_strlen() 818 * calls in the event that the user has all privileges. 819 */ 820static int 821dtrace_strcanload(uint64_t addr, size_t sz, dtrace_mstate_t *mstate, 822 dtrace_vstate_t *vstate) 823{ 824 size_t strsz; 825 826 /* 827 * If we hold the privilege to read from kernel memory, then 828 * everything is readable. 829 */ 830 if ((mstate->dtms_access & DTRACE_ACCESS_KERNEL) != 0) 831 return (1); 832 833 strsz = 1 + dtrace_strlen((char *)(uintptr_t)addr, sz); 834 if (dtrace_canload(addr, strsz, mstate, vstate)) 835 return (1); 836 837 return (0); 838} 839 840/* 841 * Convenience routine to check to see if a given variable is within a memory 842 * region in which a load may be issued given the user's privilege level. 843 */ 844static int 845dtrace_vcanload(void *src, dtrace_diftype_t *type, dtrace_mstate_t *mstate, 846 dtrace_vstate_t *vstate) 847{ 848 size_t sz; 849 ASSERT(type->dtdt_flags & DIF_TF_BYREF); 850 851 /* 852 * If we hold the privilege to read from kernel memory, then 853 * everything is readable. 854 */ 855 if ((mstate->dtms_access & DTRACE_ACCESS_KERNEL) != 0) 856 return (1); 857 858 if (type->dtdt_kind == DIF_TYPE_STRING) 859 sz = dtrace_strlen(src, 860 vstate->dtvs_state->dts_options[DTRACEOPT_STRSIZE]) + 1; 861 else 862 sz = type->dtdt_size; 863 864 return (dtrace_canload((uintptr_t)src, sz, mstate, vstate)); 865} 866 867/* 868 * Compare two strings using safe loads. 869 */ 870static int 871dtrace_strncmp(char *s1, char *s2, size_t limit) 872{ 873 uint8_t c1, c2; 874 volatile uint16_t *flags; 875 876 if (s1 == s2 || limit == 0) 877 return (0); 878 879 flags = (volatile uint16_t *)&cpu_core[curcpu].cpuc_dtrace_flags; 880 881 do { 882 if (s1 == NULL) { 883 c1 = '\0'; 884 } else { 885 c1 = dtrace_load8((uintptr_t)s1++); 886 } 887 888 if (s2 == NULL) { 889 c2 = '\0'; 890 } else { 891 c2 = dtrace_load8((uintptr_t)s2++); 892 } 893 894 if (c1 != c2) 895 return (c1 - c2); 896 } while (--limit && c1 != '\0' && !(*flags & CPU_DTRACE_FAULT)); 897 898 return (0); 899} 900 901/* 902 * Compute strlen(s) for a string using safe memory accesses. The additional 903 * len parameter is used to specify a maximum length to ensure completion. 904 */ 905static size_t 906dtrace_strlen(const char *s, size_t lim) 907{ 908 uint_t len; 909 910 for (len = 0; len != lim; len++) { 911 if (dtrace_load8((uintptr_t)s++) == '\0') 912 break; 913 } 914 915 return (len); 916} 917 918/* 919 * Check if an address falls within a toxic region. 920 */ 921static int 922dtrace_istoxic(uintptr_t kaddr, size_t size) 923{ 924 uintptr_t taddr, tsize; 925 int i; 926 927 for (i = 0; i < dtrace_toxranges; i++) { 928 taddr = dtrace_toxrange[i].dtt_base; 929 tsize = dtrace_toxrange[i].dtt_limit - taddr; 930 931 if (kaddr - taddr < tsize) { 932 DTRACE_CPUFLAG_SET(CPU_DTRACE_BADADDR); 933 cpu_core[curcpu].cpuc_dtrace_illval = kaddr; 934 return (1); 935 } 936 937 if (taddr - kaddr < size) { 938 DTRACE_CPUFLAG_SET(CPU_DTRACE_BADADDR); 939 cpu_core[curcpu].cpuc_dtrace_illval = taddr; 940 return (1); 941 } 942 } 943 944 return (0); 945} 946 947/* 948 * Copy src to dst using safe memory accesses. The src is assumed to be unsafe 949 * memory specified by the DIF program. The dst is assumed to be safe memory 950 * that we can store to directly because it is managed by DTrace. As with 951 * standard bcopy, overlapping copies are handled properly. 952 */ 953static void 954dtrace_bcopy(const void *src, void *dst, size_t len) 955{ 956 if (len != 0) { 957 uint8_t *s1 = dst; 958 const uint8_t *s2 = src; 959 960 if (s1 <= s2) { 961 do { 962 *s1++ = dtrace_load8((uintptr_t)s2++); 963 } while (--len != 0); 964 } else { 965 s2 += len; 966 s1 += len; 967 968 do { 969 *--s1 = dtrace_load8((uintptr_t)--s2); 970 } while (--len != 0); 971 } 972 } 973} 974 975/* 976 * Copy src to dst using safe memory accesses, up to either the specified 977 * length, or the point that a nul byte is encountered. The src is assumed to 978 * be unsafe memory specified by the DIF program. The dst is assumed to be 979 * safe memory that we can store to directly because it is managed by DTrace. 980 * Unlike dtrace_bcopy(), overlapping regions are not handled. 981 */ 982static void 983dtrace_strcpy(const void *src, void *dst, size_t len) 984{ 985 if (len != 0) { 986 uint8_t *s1 = dst, c; 987 const uint8_t *s2 = src; 988 989 do { 990 *s1++ = c = dtrace_load8((uintptr_t)s2++); 991 } while (--len != 0 && c != '\0'); 992 } 993} 994 995/* 996 * Copy src to dst, deriving the size and type from the specified (BYREF) 997 * variable type. The src is assumed to be unsafe memory specified by the DIF 998 * program. The dst is assumed to be DTrace variable memory that is of the 999 * specified type; we assume that we can store to directly. 1000 */ 1001static void 1002dtrace_vcopy(void *src, void *dst, dtrace_diftype_t *type) 1003{ 1004 ASSERT(type->dtdt_flags & DIF_TF_BYREF); 1005 1006 if (type->dtdt_kind == DIF_TYPE_STRING) { 1007 dtrace_strcpy(src, dst, type->dtdt_size); 1008 } else { 1009 dtrace_bcopy(src, dst, type->dtdt_size); 1010 } 1011} 1012 1013/* 1014 * Compare s1 to s2 using safe memory accesses. The s1 data is assumed to be 1015 * unsafe memory specified by the DIF program. The s2 data is assumed to be 1016 * safe memory that we can access directly because it is managed by DTrace. 1017 */ 1018static int 1019dtrace_bcmp(const void *s1, const void *s2, size_t len) 1020{ 1021 volatile uint16_t *flags; 1022 1023 flags = (volatile uint16_t *)&cpu_core[curcpu].cpuc_dtrace_flags; 1024 1025 if (s1 == s2) 1026 return (0); 1027 1028 if (s1 == NULL || s2 == NULL) 1029 return (1); 1030 1031 if (s1 != s2 && len != 0) { 1032 const uint8_t *ps1 = s1; 1033 const uint8_t *ps2 = s2; 1034 1035 do { 1036 if (dtrace_load8((uintptr_t)ps1++) != *ps2++) 1037 return (1); 1038 } while (--len != 0 && !(*flags & CPU_DTRACE_FAULT)); 1039 } 1040 return (0); 1041} 1042 1043/* 1044 * Zero the specified region using a simple byte-by-byte loop. Note that this 1045 * is for safe DTrace-managed memory only. 1046 */ 1047static void 1048dtrace_bzero(void *dst, size_t len) 1049{ 1050 uchar_t *cp; 1051 1052 for (cp = dst; len != 0; len--) 1053 *cp++ = 0; 1054} 1055 1056static void 1057dtrace_add_128(uint64_t *addend1, uint64_t *addend2, uint64_t *sum) 1058{ 1059 uint64_t result[2]; 1060 1061 result[0] = addend1[0] + addend2[0]; 1062 result[1] = addend1[1] + addend2[1] + 1063 (result[0] < addend1[0] || result[0] < addend2[0] ? 1 : 0); 1064 1065 sum[0] = result[0]; 1066 sum[1] = result[1]; 1067} 1068 1069/* 1070 * Shift the 128-bit value in a by b. If b is positive, shift left. 1071 * If b is negative, shift right. 1072 */ 1073static void 1074dtrace_shift_128(uint64_t *a, int b) 1075{ 1076 uint64_t mask; 1077 1078 if (b == 0) 1079 return; 1080 1081 if (b < 0) { 1082 b = -b; 1083 if (b >= 64) { 1084 a[0] = a[1] >> (b - 64); 1085 a[1] = 0; 1086 } else { 1087 a[0] >>= b; 1088 mask = 1LL << (64 - b); 1089 mask -= 1; 1090 a[0] |= ((a[1] & mask) << (64 - b)); 1091 a[1] >>= b; 1092 } 1093 } else { 1094 if (b >= 64) { 1095 a[1] = a[0] << (b - 64); 1096 a[0] = 0; 1097 } else { 1098 a[1] <<= b; 1099 mask = a[0] >> (64 - b); 1100 a[1] |= mask; 1101 a[0] <<= b; 1102 } 1103 } 1104} 1105 1106/* 1107 * The basic idea is to break the 2 64-bit values into 4 32-bit values, 1108 * use native multiplication on those, and then re-combine into the 1109 * resulting 128-bit value. 1110 * 1111 * (hi1 << 32 + lo1) * (hi2 << 32 + lo2) = 1112 * hi1 * hi2 << 64 + 1113 * hi1 * lo2 << 32 + 1114 * hi2 * lo1 << 32 + 1115 * lo1 * lo2 1116 */ 1117static void 1118dtrace_multiply_128(uint64_t factor1, uint64_t factor2, uint64_t *product) 1119{ 1120 uint64_t hi1, hi2, lo1, lo2; 1121 uint64_t tmp[2]; 1122 1123 hi1 = factor1 >> 32; 1124 hi2 = factor2 >> 32; 1125 1126 lo1 = factor1 & DT_MASK_LO; 1127 lo2 = factor2 & DT_MASK_LO; 1128 1129 product[0] = lo1 * lo2; 1130 product[1] = hi1 * hi2; 1131 1132 tmp[0] = hi1 * lo2; 1133 tmp[1] = 0; 1134 dtrace_shift_128(tmp, 32); 1135 dtrace_add_128(product, tmp, product); 1136 1137 tmp[0] = hi2 * lo1; 1138 tmp[1] = 0; 1139 dtrace_shift_128(tmp, 32); 1140 dtrace_add_128(product, tmp, product); 1141} 1142 1143/* 1144 * This privilege check should be used by actions and subroutines to 1145 * verify that the user credentials of the process that enabled the 1146 * invoking ECB match the target credentials 1147 */ 1148static int 1149dtrace_priv_proc_common_user(dtrace_state_t *state) 1150{ 1151 cred_t *cr, *s_cr = state->dts_cred.dcr_cred; 1152 1153 /* 1154 * We should always have a non-NULL state cred here, since if cred 1155 * is null (anonymous tracing), we fast-path bypass this routine. 1156 */ 1157 ASSERT(s_cr != NULL); 1158 1159 if ((cr = CRED()) != NULL && 1160 s_cr->cr_uid == cr->cr_uid && 1161 s_cr->cr_uid == cr->cr_ruid && 1162 s_cr->cr_uid == cr->cr_suid && 1163 s_cr->cr_gid == cr->cr_gid && 1164 s_cr->cr_gid == cr->cr_rgid && 1165 s_cr->cr_gid == cr->cr_sgid) 1166 return (1); 1167 1168 return (0); 1169} 1170 1171/* 1172 * This privilege check should be used by actions and subroutines to 1173 * verify that the zone of the process that enabled the invoking ECB 1174 * matches the target credentials 1175 */ 1176static int 1177dtrace_priv_proc_common_zone(dtrace_state_t *state) 1178{ 1179#if defined(sun) 1180 cred_t *cr, *s_cr = state->dts_cred.dcr_cred; 1181 1182 /* 1183 * We should always have a non-NULL state cred here, since if cred 1184 * is null (anonymous tracing), we fast-path bypass this routine. 1185 */ 1186 ASSERT(s_cr != NULL); 1187 1188 if ((cr = CRED()) != NULL && 1189 s_cr->cr_zone == cr->cr_zone) 1190 return (1); 1191 1192 return (0); 1193#else 1194 return (1); 1195#endif 1196} 1197 1198/* 1199 * This privilege check should be used by actions and subroutines to 1200 * verify that the process has not setuid or changed credentials. 1201 */ 1202static int 1203dtrace_priv_proc_common_nocd(void) 1204{ 1205 proc_t *proc; 1206 1207 if ((proc = ttoproc(curthread)) != NULL && 1208 !(proc->p_flag & SNOCD)) 1209 return (1); 1210 1211 return (0); 1212} 1213 1214static int 1215dtrace_priv_proc_destructive(dtrace_state_t *state) 1216{ 1217 int action = state->dts_cred.dcr_action; 1218 1219 if (((action & DTRACE_CRA_PROC_DESTRUCTIVE_ALLZONE) == 0) && 1220 dtrace_priv_proc_common_zone(state) == 0) 1221 goto bad; 1222 1223 if (((action & DTRACE_CRA_PROC_DESTRUCTIVE_ALLUSER) == 0) && 1224 dtrace_priv_proc_common_user(state) == 0) 1225 goto bad; 1226 1227 if (((action & DTRACE_CRA_PROC_DESTRUCTIVE_CREDCHG) == 0) && 1228 dtrace_priv_proc_common_nocd() == 0) 1229 goto bad; 1230 1231 return (1); 1232 1233bad: 1234 cpu_core[curcpu].cpuc_dtrace_flags |= CPU_DTRACE_UPRIV; 1235 1236 return (0); 1237} 1238 1239static int 1240dtrace_priv_proc_control(dtrace_state_t *state) 1241{ 1242 if (state->dts_cred.dcr_action & DTRACE_CRA_PROC_CONTROL) 1243 return (1); 1244 1245 if (dtrace_priv_proc_common_zone(state) && 1246 dtrace_priv_proc_common_user(state) && 1247 dtrace_priv_proc_common_nocd()) 1248 return (1); 1249 1250 cpu_core[curcpu].cpuc_dtrace_flags |= CPU_DTRACE_UPRIV; 1251 1252 return (0); 1253} 1254 1255static int 1256dtrace_priv_proc(dtrace_state_t *state) 1257{ 1258 if (state->dts_cred.dcr_action & DTRACE_CRA_PROC) 1259 return (1); 1260 1261 cpu_core[curcpu].cpuc_dtrace_flags |= CPU_DTRACE_UPRIV; 1262 1263 return (0); 1264} 1265 1266static int 1267dtrace_priv_kernel(dtrace_state_t *state) 1268{ 1269 if (state->dts_cred.dcr_action & DTRACE_CRA_KERNEL) 1270 return (1); 1271 1272 cpu_core[curcpu].cpuc_dtrace_flags |= CPU_DTRACE_KPRIV; 1273 1274 return (0); 1275} 1276 1277static int 1278dtrace_priv_kernel_destructive(dtrace_state_t *state) 1279{ 1280 if (state->dts_cred.dcr_action & DTRACE_CRA_KERNEL_DESTRUCTIVE) 1281 return (1); 1282 1283 cpu_core[curcpu].cpuc_dtrace_flags |= CPU_DTRACE_KPRIV; 1284 1285 return (0); 1286} 1287 1288/* 1289 * Note: not called from probe context. This function is called 1290 * asynchronously (and at a regular interval) from outside of probe context to 1291 * clean the dirty dynamic variable lists on all CPUs. Dynamic variable 1292 * cleaning is explained in detail in <sys/dtrace_impl.h>. 1293 */ 1294void 1295dtrace_dynvar_clean(dtrace_dstate_t *dstate) 1296{ 1297 dtrace_dynvar_t *dirty; 1298 dtrace_dstate_percpu_t *dcpu; 1299 int i, work = 0; 1300 1301 for (i = 0; i < NCPU; i++) { 1302 dcpu = &dstate->dtds_percpu[i]; 1303 1304 ASSERT(dcpu->dtdsc_rinsing == NULL); 1305 1306 /* 1307 * If the dirty list is NULL, there is no dirty work to do. 1308 */ 1309 if (dcpu->dtdsc_dirty == NULL) 1310 continue; 1311 1312 /* 1313 * If the clean list is non-NULL, then we're not going to do 1314 * any work for this CPU -- it means that there has not been 1315 * a dtrace_dynvar() allocation on this CPU (or from this CPU) 1316 * since the last time we cleaned house. 1317 */ 1318 if (dcpu->dtdsc_clean != NULL) 1319 continue; 1320 1321 work = 1; 1322 1323 /* 1324 * Atomically move the dirty list aside. 1325 */ 1326 do { 1327 dirty = dcpu->dtdsc_dirty; 1328 1329 /* 1330 * Before we zap the dirty list, set the rinsing list. 1331 * (This allows for a potential assertion in 1332 * dtrace_dynvar(): if a free dynamic variable appears 1333 * on a hash chain, either the dirty list or the 1334 * rinsing list for some CPU must be non-NULL.) 1335 */ 1336 dcpu->dtdsc_rinsing = dirty; 1337 dtrace_membar_producer(); 1338 } while (dtrace_casptr(&dcpu->dtdsc_dirty, 1339 dirty, NULL) != dirty); 1340 } 1341 1342 if (!work) { 1343 /* 1344 * We have no work to do; we can simply return. 1345 */ 1346 return; 1347 } 1348 1349 dtrace_sync(); 1350 1351 for (i = 0; i < NCPU; i++) { 1352 dcpu = &dstate->dtds_percpu[i]; 1353 1354 if (dcpu->dtdsc_rinsing == NULL) 1355 continue; 1356 1357 /* 1358 * We are now guaranteed that no hash chain contains a pointer 1359 * into this dirty list; we can make it clean. 1360 */ 1361 ASSERT(dcpu->dtdsc_clean == NULL); 1362 dcpu->dtdsc_clean = dcpu->dtdsc_rinsing; 1363 dcpu->dtdsc_rinsing = NULL; 1364 } 1365 1366 /* 1367 * Before we actually set the state to be DTRACE_DSTATE_CLEAN, make 1368 * sure that all CPUs have seen all of the dtdsc_clean pointers. 1369 * This prevents a race whereby a CPU incorrectly decides that 1370 * the state should be something other than DTRACE_DSTATE_CLEAN 1371 * after dtrace_dynvar_clean() has completed. 1372 */ 1373 dtrace_sync(); 1374 1375 dstate->dtds_state = DTRACE_DSTATE_CLEAN; 1376} 1377 1378/* 1379 * Depending on the value of the op parameter, this function looks-up, 1380 * allocates or deallocates an arbitrarily-keyed dynamic variable. If an 1381 * allocation is requested, this function will return a pointer to a 1382 * dtrace_dynvar_t corresponding to the allocated variable -- or NULL if no 1383 * variable can be allocated. If NULL is returned, the appropriate counter 1384 * will be incremented. 1385 */ 1386dtrace_dynvar_t * 1387dtrace_dynvar(dtrace_dstate_t *dstate, uint_t nkeys, 1388 dtrace_key_t *key, size_t dsize, dtrace_dynvar_op_t op, 1389 dtrace_mstate_t *mstate, dtrace_vstate_t *vstate) 1390{ 1391 uint64_t hashval = DTRACE_DYNHASH_VALID; 1392 dtrace_dynhash_t *hash = dstate->dtds_hash; 1393 dtrace_dynvar_t *free, *new_free, *next, *dvar, *start, *prev = NULL; 1394 processorid_t me = curcpu, cpu = me; 1395 dtrace_dstate_percpu_t *dcpu = &dstate->dtds_percpu[me]; 1396 size_t bucket, ksize; 1397 size_t chunksize = dstate->dtds_chunksize; 1398 uintptr_t kdata, lock, nstate; 1399 uint_t i; 1400 1401 ASSERT(nkeys != 0); 1402 1403 /* 1404 * Hash the key. As with aggregations, we use Jenkins' "One-at-a-time" 1405 * algorithm. For the by-value portions, we perform the algorithm in 1406 * 16-bit chunks (as opposed to 8-bit chunks). This speeds things up a 1407 * bit, and seems to have only a minute effect on distribution. For 1408 * the by-reference data, we perform "One-at-a-time" iterating (safely) 1409 * over each referenced byte. It's painful to do this, but it's much 1410 * better than pathological hash distribution. The efficacy of the 1411 * hashing algorithm (and a comparison with other algorithms) may be 1412 * found by running the ::dtrace_dynstat MDB dcmd. 1413 */ 1414 for (i = 0; i < nkeys; i++) { 1415 if (key[i].dttk_size == 0) { 1416 uint64_t val = key[i].dttk_value; 1417 1418 hashval += (val >> 48) & 0xffff; 1419 hashval += (hashval << 10); 1420 hashval ^= (hashval >> 6); 1421 1422 hashval += (val >> 32) & 0xffff; 1423 hashval += (hashval << 10); 1424 hashval ^= (hashval >> 6); 1425 1426 hashval += (val >> 16) & 0xffff; 1427 hashval += (hashval << 10); 1428 hashval ^= (hashval >> 6); 1429 1430 hashval += val & 0xffff; 1431 hashval += (hashval << 10); 1432 hashval ^= (hashval >> 6); 1433 } else { 1434 /* 1435 * This is incredibly painful, but it beats the hell 1436 * out of the alternative. 1437 */ 1438 uint64_t j, size = key[i].dttk_size; 1439 uintptr_t base = (uintptr_t)key[i].dttk_value; 1440 1441 if (!dtrace_canload(base, size, mstate, vstate)) 1442 break; 1443 1444 for (j = 0; j < size; j++) { 1445 hashval += dtrace_load8(base + j); 1446 hashval += (hashval << 10); 1447 hashval ^= (hashval >> 6); 1448 } 1449 } 1450 } 1451 1452 if (DTRACE_CPUFLAG_ISSET(CPU_DTRACE_FAULT)) 1453 return (NULL); 1454 1455 hashval += (hashval << 3); 1456 hashval ^= (hashval >> 11); 1457 hashval += (hashval << 15); 1458 1459 /* 1460 * There is a remote chance (ideally, 1 in 2^31) that our hashval 1461 * comes out to be one of our two sentinel hash values. If this 1462 * actually happens, we set the hashval to be a value known to be a 1463 * non-sentinel value. 1464 */ 1465 if (hashval == DTRACE_DYNHASH_FREE || hashval == DTRACE_DYNHASH_SINK) 1466 hashval = DTRACE_DYNHASH_VALID; 1467 1468 /* 1469 * Yes, it's painful to do a divide here. If the cycle count becomes 1470 * important here, tricks can be pulled to reduce it. (However, it's 1471 * critical that hash collisions be kept to an absolute minimum; 1472 * they're much more painful than a divide.) It's better to have a 1473 * solution that generates few collisions and still keeps things 1474 * relatively simple. 1475 */ 1476 bucket = hashval % dstate->dtds_hashsize; 1477 1478 if (op == DTRACE_DYNVAR_DEALLOC) { 1479 volatile uintptr_t *lockp = &hash[bucket].dtdh_lock; 1480 1481 for (;;) { 1482 while ((lock = *lockp) & 1) 1483 continue; 1484 1485 if (dtrace_casptr((volatile void *)lockp, 1486 (volatile void *)lock, (volatile void *)(lock + 1)) == (void *)lock) 1487 break; 1488 } 1489 1490 dtrace_membar_producer(); 1491 } 1492 1493top: 1494 prev = NULL; 1495 lock = hash[bucket].dtdh_lock; 1496 1497 dtrace_membar_consumer(); 1498 1499 start = hash[bucket].dtdh_chain; 1500 ASSERT(start != NULL && (start->dtdv_hashval == DTRACE_DYNHASH_SINK || 1501 start->dtdv_hashval != DTRACE_DYNHASH_FREE || 1502 op != DTRACE_DYNVAR_DEALLOC)); 1503 1504 for (dvar = start; dvar != NULL; dvar = dvar->dtdv_next) { 1505 dtrace_tuple_t *dtuple = &dvar->dtdv_tuple; 1506 dtrace_key_t *dkey = &dtuple->dtt_key[0]; 1507 1508 if (dvar->dtdv_hashval != hashval) { 1509 if (dvar->dtdv_hashval == DTRACE_DYNHASH_SINK) { 1510 /* 1511 * We've reached the sink, and therefore the 1512 * end of the hash chain; we can kick out of 1513 * the loop knowing that we have seen a valid 1514 * snapshot of state. 1515 */ 1516 ASSERT(dvar->dtdv_next == NULL); 1517 ASSERT(dvar == &dtrace_dynhash_sink); 1518 break; 1519 } 1520 1521 if (dvar->dtdv_hashval == DTRACE_DYNHASH_FREE) { 1522 /* 1523 * We've gone off the rails: somewhere along 1524 * the line, one of the members of this hash 1525 * chain was deleted. Note that we could also 1526 * detect this by simply letting this loop run 1527 * to completion, as we would eventually hit 1528 * the end of the dirty list. However, we 1529 * want to avoid running the length of the 1530 * dirty list unnecessarily (it might be quite 1531 * long), so we catch this as early as 1532 * possible by detecting the hash marker. In 1533 * this case, we simply set dvar to NULL and 1534 * break; the conditional after the loop will 1535 * send us back to top. 1536 */ 1537 dvar = NULL; 1538 break; 1539 } 1540 1541 goto next; 1542 } 1543 1544 if (dtuple->dtt_nkeys != nkeys) 1545 goto next; 1546 1547 for (i = 0; i < nkeys; i++, dkey++) { 1548 if (dkey->dttk_size != key[i].dttk_size) 1549 goto next; /* size or type mismatch */ 1550 1551 if (dkey->dttk_size != 0) { 1552 if (dtrace_bcmp( 1553 (void *)(uintptr_t)key[i].dttk_value, 1554 (void *)(uintptr_t)dkey->dttk_value, 1555 dkey->dttk_size)) 1556 goto next; 1557 } else { 1558 if (dkey->dttk_value != key[i].dttk_value) 1559 goto next; 1560 } 1561 } 1562 1563 if (op != DTRACE_DYNVAR_DEALLOC) 1564 return (dvar); 1565 1566 ASSERT(dvar->dtdv_next == NULL || 1567 dvar->dtdv_next->dtdv_hashval != DTRACE_DYNHASH_FREE); 1568 1569 if (prev != NULL) { 1570 ASSERT(hash[bucket].dtdh_chain != dvar); 1571 ASSERT(start != dvar); 1572 ASSERT(prev->dtdv_next == dvar); 1573 prev->dtdv_next = dvar->dtdv_next; 1574 } else { 1575 if (dtrace_casptr(&hash[bucket].dtdh_chain, 1576 start, dvar->dtdv_next) != start) { 1577 /* 1578 * We have failed to atomically swing the 1579 * hash table head pointer, presumably because 1580 * of a conflicting allocation on another CPU. 1581 * We need to reread the hash chain and try 1582 * again. 1583 */ 1584 goto top; 1585 } 1586 } 1587 1588 dtrace_membar_producer(); 1589 1590 /* 1591 * Now set the hash value to indicate that it's free. 1592 */ 1593 ASSERT(hash[bucket].dtdh_chain != dvar); 1594 dvar->dtdv_hashval = DTRACE_DYNHASH_FREE; 1595 1596 dtrace_membar_producer(); 1597 1598 /* 1599 * Set the next pointer to point at the dirty list, and 1600 * atomically swing the dirty pointer to the newly freed dvar. 1601 */ 1602 do { 1603 next = dcpu->dtdsc_dirty; 1604 dvar->dtdv_next = next; 1605 } while (dtrace_casptr(&dcpu->dtdsc_dirty, next, dvar) != next); 1606 1607 /* 1608 * Finally, unlock this hash bucket. 1609 */ 1610 ASSERT(hash[bucket].dtdh_lock == lock); 1611 ASSERT(lock & 1); 1612 hash[bucket].dtdh_lock++; 1613 1614 return (NULL); 1615next: 1616 prev = dvar; 1617 continue; 1618 } 1619 1620 if (dvar == NULL) { 1621 /* 1622 * If dvar is NULL, it is because we went off the rails: 1623 * one of the elements that we traversed in the hash chain 1624 * was deleted while we were traversing it. In this case, 1625 * we assert that we aren't doing a dealloc (deallocs lock 1626 * the hash bucket to prevent themselves from racing with 1627 * one another), and retry the hash chain traversal. 1628 */ 1629 ASSERT(op != DTRACE_DYNVAR_DEALLOC); 1630 goto top; 1631 } 1632 1633 if (op != DTRACE_DYNVAR_ALLOC) { 1634 /* 1635 * If we are not to allocate a new variable, we want to 1636 * return NULL now. Before we return, check that the value 1637 * of the lock word hasn't changed. If it has, we may have 1638 * seen an inconsistent snapshot. 1639 */ 1640 if (op == DTRACE_DYNVAR_NOALLOC) { 1641 if (hash[bucket].dtdh_lock != lock) 1642 goto top; 1643 } else { 1644 ASSERT(op == DTRACE_DYNVAR_DEALLOC); 1645 ASSERT(hash[bucket].dtdh_lock == lock); 1646 ASSERT(lock & 1); 1647 hash[bucket].dtdh_lock++; 1648 } 1649 1650 return (NULL); 1651 } 1652 1653 /* 1654 * We need to allocate a new dynamic variable. The size we need is the 1655 * size of dtrace_dynvar plus the size of nkeys dtrace_key_t's plus the 1656 * size of any auxiliary key data (rounded up to 8-byte alignment) plus 1657 * the size of any referred-to data (dsize). We then round the final 1658 * size up to the chunksize for allocation. 1659 */ 1660 for (ksize = 0, i = 0; i < nkeys; i++) 1661 ksize += P2ROUNDUP(key[i].dttk_size, sizeof (uint64_t)); 1662 1663 /* 1664 * This should be pretty much impossible, but could happen if, say, 1665 * strange DIF specified the tuple. Ideally, this should be an 1666 * assertion and not an error condition -- but that requires that the 1667 * chunksize calculation in dtrace_difo_chunksize() be absolutely 1668 * bullet-proof. (That is, it must not be able to be fooled by 1669 * malicious DIF.) Given the lack of backwards branches in DIF, 1670 * solving this would presumably not amount to solving the Halting 1671 * Problem -- but it still seems awfully hard. 1672 */ 1673 if (sizeof (dtrace_dynvar_t) + sizeof (dtrace_key_t) * (nkeys - 1) + 1674 ksize + dsize > chunksize) { 1675 dcpu->dtdsc_drops++; 1676 return (NULL); 1677 } 1678 1679 nstate = DTRACE_DSTATE_EMPTY; 1680 1681 do { 1682retry: 1683 free = dcpu->dtdsc_free; 1684 1685 if (free == NULL) { 1686 dtrace_dynvar_t *clean = dcpu->dtdsc_clean; 1687 void *rval; 1688 1689 if (clean == NULL) { 1690 /* 1691 * We're out of dynamic variable space on 1692 * this CPU. Unless we have tried all CPUs, 1693 * we'll try to allocate from a different 1694 * CPU. 1695 */ 1696 switch (dstate->dtds_state) { 1697 case DTRACE_DSTATE_CLEAN: { 1698 void *sp = &dstate->dtds_state; 1699 1700 if (++cpu >= NCPU) 1701 cpu = 0; 1702 1703 if (dcpu->dtdsc_dirty != NULL && 1704 nstate == DTRACE_DSTATE_EMPTY) 1705 nstate = DTRACE_DSTATE_DIRTY; 1706 1707 if (dcpu->dtdsc_rinsing != NULL) 1708 nstate = DTRACE_DSTATE_RINSING; 1709 1710 dcpu = &dstate->dtds_percpu[cpu]; 1711 1712 if (cpu != me) 1713 goto retry; 1714 1715 (void) dtrace_cas32(sp, 1716 DTRACE_DSTATE_CLEAN, nstate); 1717 1718 /* 1719 * To increment the correct bean 1720 * counter, take another lap. 1721 */ 1722 goto retry; 1723 } 1724 1725 case DTRACE_DSTATE_DIRTY: 1726 dcpu->dtdsc_dirty_drops++; 1727 break; 1728 1729 case DTRACE_DSTATE_RINSING: 1730 dcpu->dtdsc_rinsing_drops++; 1731 break; 1732 1733 case DTRACE_DSTATE_EMPTY: 1734 dcpu->dtdsc_drops++; 1735 break; 1736 } 1737 1738 DTRACE_CPUFLAG_SET(CPU_DTRACE_DROP); 1739 return (NULL); 1740 } 1741 1742 /* 1743 * The clean list appears to be non-empty. We want to 1744 * move the clean list to the free list; we start by 1745 * moving the clean pointer aside. 1746 */ 1747 if (dtrace_casptr(&dcpu->dtdsc_clean, 1748 clean, NULL) != clean) { 1749 /* 1750 * We are in one of two situations: 1751 * 1752 * (a) The clean list was switched to the 1753 * free list by another CPU. 1754 * 1755 * (b) The clean list was added to by the 1756 * cleansing cyclic. 1757 * 1758 * In either of these situations, we can 1759 * just reattempt the free list allocation. 1760 */ 1761 goto retry; 1762 } 1763 1764 ASSERT(clean->dtdv_hashval == DTRACE_DYNHASH_FREE); 1765 1766 /* 1767 * Now we'll move the clean list to the free list. 1768 * It's impossible for this to fail: the only way 1769 * the free list can be updated is through this 1770 * code path, and only one CPU can own the clean list. 1771 * Thus, it would only be possible for this to fail if 1772 * this code were racing with dtrace_dynvar_clean(). 1773 * (That is, if dtrace_dynvar_clean() updated the clean 1774 * list, and we ended up racing to update the free 1775 * list.) This race is prevented by the dtrace_sync() 1776 * in dtrace_dynvar_clean() -- which flushes the 1777 * owners of the clean lists out before resetting 1778 * the clean lists. 1779 */ 1780 rval = dtrace_casptr(&dcpu->dtdsc_free, NULL, clean); 1781 ASSERT(rval == NULL); 1782 goto retry; 1783 } 1784 1785 dvar = free; 1786 new_free = dvar->dtdv_next; 1787 } while (dtrace_casptr(&dcpu->dtdsc_free, free, new_free) != free); 1788 1789 /* 1790 * We have now allocated a new chunk. We copy the tuple keys into the 1791 * tuple array and copy any referenced key data into the data space 1792 * following the tuple array. As we do this, we relocate dttk_value 1793 * in the final tuple to point to the key data address in the chunk. 1794 */ 1795 kdata = (uintptr_t)&dvar->dtdv_tuple.dtt_key[nkeys]; 1796 dvar->dtdv_data = (void *)(kdata + ksize); 1797 dvar->dtdv_tuple.dtt_nkeys = nkeys; 1798 1799 for (i = 0; i < nkeys; i++) { 1800 dtrace_key_t *dkey = &dvar->dtdv_tuple.dtt_key[i]; 1801 size_t kesize = key[i].dttk_size; 1802 1803 if (kesize != 0) { 1804 dtrace_bcopy( 1805 (const void *)(uintptr_t)key[i].dttk_value, 1806 (void *)kdata, kesize); 1807 dkey->dttk_value = kdata; 1808 kdata += P2ROUNDUP(kesize, sizeof (uint64_t)); 1809 } else { 1810 dkey->dttk_value = key[i].dttk_value; 1811 } 1812 1813 dkey->dttk_size = kesize; 1814 } 1815 1816 ASSERT(dvar->dtdv_hashval == DTRACE_DYNHASH_FREE); 1817 dvar->dtdv_hashval = hashval; 1818 dvar->dtdv_next = start; 1819 1820 if (dtrace_casptr(&hash[bucket].dtdh_chain, start, dvar) == start) 1821 return (dvar); 1822 1823 /* 1824 * The cas has failed. Either another CPU is adding an element to 1825 * this hash chain, or another CPU is deleting an element from this 1826 * hash chain. The simplest way to deal with both of these cases 1827 * (though not necessarily the most efficient) is to free our 1828 * allocated block and tail-call ourselves. Note that the free is 1829 * to the dirty list and _not_ to the free list. This is to prevent 1830 * races with allocators, above. 1831 */ 1832 dvar->dtdv_hashval = DTRACE_DYNHASH_FREE; 1833 1834 dtrace_membar_producer(); 1835 1836 do { 1837 free = dcpu->dtdsc_dirty; 1838 dvar->dtdv_next = free; 1839 } while (dtrace_casptr(&dcpu->dtdsc_dirty, free, dvar) != free); 1840 1841 return (dtrace_dynvar(dstate, nkeys, key, dsize, op, mstate, vstate)); 1842} 1843 1844/*ARGSUSED*/ 1845static void 1846dtrace_aggregate_min(uint64_t *oval, uint64_t nval, uint64_t arg) 1847{ 1848 if ((int64_t)nval < (int64_t)*oval) 1849 *oval = nval; 1850} 1851 1852/*ARGSUSED*/ 1853static void 1854dtrace_aggregate_max(uint64_t *oval, uint64_t nval, uint64_t arg) 1855{ 1856 if ((int64_t)nval > (int64_t)*oval) 1857 *oval = nval; 1858} 1859 1860static void 1861dtrace_aggregate_quantize(uint64_t *quanta, uint64_t nval, uint64_t incr) 1862{ 1863 int i, zero = DTRACE_QUANTIZE_ZEROBUCKET; 1864 int64_t val = (int64_t)nval; 1865 1866 if (val < 0) { 1867 for (i = 0; i < zero; i++) { 1868 if (val <= DTRACE_QUANTIZE_BUCKETVAL(i)) { 1869 quanta[i] += incr; 1870 return; 1871 } 1872 } 1873 } else { 1874 for (i = zero + 1; i < DTRACE_QUANTIZE_NBUCKETS; i++) { 1875 if (val < DTRACE_QUANTIZE_BUCKETVAL(i)) { 1876 quanta[i - 1] += incr; 1877 return; 1878 } 1879 } 1880 1881 quanta[DTRACE_QUANTIZE_NBUCKETS - 1] += incr; 1882 return; 1883 } 1884 1885 ASSERT(0); 1886} 1887 1888static void 1889dtrace_aggregate_lquantize(uint64_t *lquanta, uint64_t nval, uint64_t incr) 1890{ 1891 uint64_t arg = *lquanta++; 1892 int32_t base = DTRACE_LQUANTIZE_BASE(arg); 1893 uint16_t step = DTRACE_LQUANTIZE_STEP(arg); 1894 uint16_t levels = DTRACE_LQUANTIZE_LEVELS(arg); 1895 int32_t val = (int32_t)nval, level; 1896 1897 ASSERT(step != 0); 1898 ASSERT(levels != 0); 1899 1900 if (val < base) { 1901 /* 1902 * This is an underflow. 1903 */ 1904 lquanta[0] += incr; 1905 return; 1906 } 1907 1908 level = (val - base) / step; 1909 1910 if (level < levels) { 1911 lquanta[level + 1] += incr; 1912 return; 1913 } 1914 1915 /* 1916 * This is an overflow. 1917 */ 1918 lquanta[levels + 1] += incr; 1919} 1920 1921static int 1922dtrace_aggregate_llquantize_bucket(uint16_t factor, uint16_t low, 1923 uint16_t high, uint16_t nsteps, int64_t value) 1924{ 1925 int64_t this = 1, last, next; 1926 int base = 1, order; 1927 1928 ASSERT(factor <= nsteps); 1929 ASSERT(nsteps % factor == 0); 1930 1931 for (order = 0; order < low; order++) 1932 this *= factor; 1933 1934 /* 1935 * If our value is less than our factor taken to the power of the 1936 * low order of magnitude, it goes into the zeroth bucket. 1937 */ 1938 if (value < (last = this)) 1939 return (0); 1940 1941 for (this *= factor; order <= high; order++) { 1942 int nbuckets = this > nsteps ? nsteps : this; 1943 1944 if ((next = this * factor) < this) { 1945 /* 1946 * We should not generally get log/linear quantizations 1947 * with a high magnitude that allows 64-bits to 1948 * overflow, but we nonetheless protect against this 1949 * by explicitly checking for overflow, and clamping 1950 * our value accordingly. 1951 */ 1952 value = this - 1; 1953 } 1954 1955 if (value < this) { 1956 /* 1957 * If our value lies within this order of magnitude, 1958 * determine its position by taking the offset within 1959 * the order of magnitude, dividing by the bucket 1960 * width, and adding to our (accumulated) base. 1961 */ 1962 return (base + (value - last) / (this / nbuckets)); 1963 } 1964 1965 base += nbuckets - (nbuckets / factor); 1966 last = this; 1967 this = next; 1968 } 1969 1970 /* 1971 * Our value is greater than or equal to our factor taken to the 1972 * power of one plus the high magnitude -- return the top bucket. 1973 */ 1974 return (base); 1975} 1976 1977static void 1978dtrace_aggregate_llquantize(uint64_t *llquanta, uint64_t nval, uint64_t incr) 1979{ 1980 uint64_t arg = *llquanta++; 1981 uint16_t factor = DTRACE_LLQUANTIZE_FACTOR(arg); 1982 uint16_t low = DTRACE_LLQUANTIZE_LOW(arg); 1983 uint16_t high = DTRACE_LLQUANTIZE_HIGH(arg); 1984 uint16_t nsteps = DTRACE_LLQUANTIZE_NSTEP(arg); 1985 1986 llquanta[dtrace_aggregate_llquantize_bucket(factor, 1987 low, high, nsteps, nval)] += incr; 1988} 1989 1990/*ARGSUSED*/ 1991static void 1992dtrace_aggregate_avg(uint64_t *data, uint64_t nval, uint64_t arg) 1993{ 1994 data[0]++; 1995 data[1] += nval; 1996} 1997 1998/*ARGSUSED*/ 1999static void 2000dtrace_aggregate_stddev(uint64_t *data, uint64_t nval, uint64_t arg) 2001{ 2002 int64_t snval = (int64_t)nval; 2003 uint64_t tmp[2]; 2004 2005 data[0]++; 2006 data[1] += nval; 2007 2008 /* 2009 * What we want to say here is: 2010 * 2011 * data[2] += nval * nval; 2012 * 2013 * But given that nval is 64-bit, we could easily overflow, so 2014 * we do this as 128-bit arithmetic. 2015 */ 2016 if (snval < 0) 2017 snval = -snval; 2018 2019 dtrace_multiply_128((uint64_t)snval, (uint64_t)snval, tmp); 2020 dtrace_add_128(data + 2, tmp, data + 2); 2021} 2022 2023/*ARGSUSED*/ 2024static void 2025dtrace_aggregate_count(uint64_t *oval, uint64_t nval, uint64_t arg) 2026{ 2027 *oval = *oval + 1; 2028} 2029 2030/*ARGSUSED*/ 2031static void 2032dtrace_aggregate_sum(uint64_t *oval, uint64_t nval, uint64_t arg) 2033{ 2034 *oval += nval; 2035} 2036 2037/* 2038 * Aggregate given the tuple in the principal data buffer, and the aggregating 2039 * action denoted by the specified dtrace_aggregation_t. The aggregation 2040 * buffer is specified as the buf parameter. This routine does not return 2041 * failure; if there is no space in the aggregation buffer, the data will be 2042 * dropped, and a corresponding counter incremented. 2043 */ 2044static void 2045dtrace_aggregate(dtrace_aggregation_t *agg, dtrace_buffer_t *dbuf, 2046 intptr_t offset, dtrace_buffer_t *buf, uint64_t expr, uint64_t arg) 2047{ 2048 dtrace_recdesc_t *rec = &agg->dtag_action.dta_rec; 2049 uint32_t i, ndx, size, fsize; 2050 uint32_t align = sizeof (uint64_t) - 1; 2051 dtrace_aggbuffer_t *agb; 2052 dtrace_aggkey_t *key; 2053 uint32_t hashval = 0, limit, isstr; 2054 caddr_t tomax, data, kdata; 2055 dtrace_actkind_t action; 2056 dtrace_action_t *act; 2057 uintptr_t offs; 2058 2059 if (buf == NULL) 2060 return; 2061 2062 if (!agg->dtag_hasarg) { 2063 /* 2064 * Currently, only quantize() and lquantize() take additional 2065 * arguments, and they have the same semantics: an increment 2066 * value that defaults to 1 when not present. If additional 2067 * aggregating actions take arguments, the setting of the 2068 * default argument value will presumably have to become more 2069 * sophisticated... 2070 */ 2071 arg = 1; 2072 } 2073 2074 action = agg->dtag_action.dta_kind - DTRACEACT_AGGREGATION; 2075 size = rec->dtrd_offset - agg->dtag_base; 2076 fsize = size + rec->dtrd_size; 2077 2078 ASSERT(dbuf->dtb_tomax != NULL); 2079 data = dbuf->dtb_tomax + offset + agg->dtag_base; 2080 2081 if ((tomax = buf->dtb_tomax) == NULL) { 2082 dtrace_buffer_drop(buf); 2083 return; 2084 } 2085 2086 /* 2087 * The metastructure is always at the bottom of the buffer. 2088 */ 2089 agb = (dtrace_aggbuffer_t *)(tomax + buf->dtb_size - 2090 sizeof (dtrace_aggbuffer_t)); 2091 2092 if (buf->dtb_offset == 0) { 2093 /* 2094 * We just kludge up approximately 1/8th of the size to be 2095 * buckets. If this guess ends up being routinely 2096 * off-the-mark, we may need to dynamically readjust this 2097 * based on past performance. 2098 */ 2099 uintptr_t hashsize = (buf->dtb_size >> 3) / sizeof (uintptr_t); 2100 2101 if ((uintptr_t)agb - hashsize * sizeof (dtrace_aggkey_t *) < 2102 (uintptr_t)tomax || hashsize == 0) { 2103 /* 2104 * We've been given a ludicrously small buffer; 2105 * increment our drop count and leave. 2106 */ 2107 dtrace_buffer_drop(buf); 2108 return; 2109 } 2110 2111 /* 2112 * And now, a pathetic attempt to try to get a an odd (or 2113 * perchance, a prime) hash size for better hash distribution. 2114 */ 2115 if (hashsize > (DTRACE_AGGHASHSIZE_SLEW << 3)) 2116 hashsize -= DTRACE_AGGHASHSIZE_SLEW; 2117 2118 agb->dtagb_hashsize = hashsize; 2119 agb->dtagb_hash = (dtrace_aggkey_t **)((uintptr_t)agb - 2120 agb->dtagb_hashsize * sizeof (dtrace_aggkey_t *)); 2121 agb->dtagb_free = (uintptr_t)agb->dtagb_hash; 2122 2123 for (i = 0; i < agb->dtagb_hashsize; i++) 2124 agb->dtagb_hash[i] = NULL; 2125 } 2126 2127 ASSERT(agg->dtag_first != NULL); 2128 ASSERT(agg->dtag_first->dta_intuple); 2129 2130 /* 2131 * Calculate the hash value based on the key. Note that we _don't_ 2132 * include the aggid in the hashing (but we will store it as part of 2133 * the key). The hashing algorithm is Bob Jenkins' "One-at-a-time" 2134 * algorithm: a simple, quick algorithm that has no known funnels, and 2135 * gets good distribution in practice. The efficacy of the hashing 2136 * algorithm (and a comparison with other algorithms) may be found by 2137 * running the ::dtrace_aggstat MDB dcmd. 2138 */ 2139 for (act = agg->dtag_first; act->dta_intuple; act = act->dta_next) { 2140 i = act->dta_rec.dtrd_offset - agg->dtag_base; 2141 limit = i + act->dta_rec.dtrd_size; 2142 ASSERT(limit <= size); 2143 isstr = DTRACEACT_ISSTRING(act); 2144 2145 for (; i < limit; i++) { 2146 hashval += data[i]; 2147 hashval += (hashval << 10); 2148 hashval ^= (hashval >> 6); 2149 2150 if (isstr && data[i] == '\0') 2151 break; 2152 } 2153 } 2154 2155 hashval += (hashval << 3); 2156 hashval ^= (hashval >> 11); 2157 hashval += (hashval << 15); 2158 2159 /* 2160 * Yes, the divide here is expensive -- but it's generally the least 2161 * of the performance issues given the amount of data that we iterate 2162 * over to compute hash values, compare data, etc. 2163 */ 2164 ndx = hashval % agb->dtagb_hashsize; 2165 2166 for (key = agb->dtagb_hash[ndx]; key != NULL; key = key->dtak_next) { 2167 ASSERT((caddr_t)key >= tomax); 2168 ASSERT((caddr_t)key < tomax + buf->dtb_size); 2169 2170 if (hashval != key->dtak_hashval || key->dtak_size != size) 2171 continue; 2172 2173 kdata = key->dtak_data; 2174 ASSERT(kdata >= tomax && kdata < tomax + buf->dtb_size); 2175 2176 for (act = agg->dtag_first; act->dta_intuple; 2177 act = act->dta_next) { 2178 i = act->dta_rec.dtrd_offset - agg->dtag_base; 2179 limit = i + act->dta_rec.dtrd_size; 2180 ASSERT(limit <= size); 2181 isstr = DTRACEACT_ISSTRING(act); 2182 2183 for (; i < limit; i++) { 2184 if (kdata[i] != data[i]) 2185 goto next; 2186 2187 if (isstr && data[i] == '\0') 2188 break; 2189 } 2190 } 2191 2192 if (action != key->dtak_action) { 2193 /* 2194 * We are aggregating on the same value in the same 2195 * aggregation with two different aggregating actions. 2196 * (This should have been picked up in the compiler, 2197 * so we may be dealing with errant or devious DIF.) 2198 * This is an error condition; we indicate as much, 2199 * and return. 2200 */ 2201 DTRACE_CPUFLAG_SET(CPU_DTRACE_ILLOP); 2202 return; 2203 } 2204 2205 /* 2206 * This is a hit: we need to apply the aggregator to 2207 * the value at this key. 2208 */ 2209 agg->dtag_aggregate((uint64_t *)(kdata + size), expr, arg); 2210 return; 2211next: 2212 continue; 2213 } 2214 2215 /* 2216 * We didn't find it. We need to allocate some zero-filled space, 2217 * link it into the hash table appropriately, and apply the aggregator 2218 * to the (zero-filled) value. 2219 */ 2220 offs = buf->dtb_offset; 2221 while (offs & (align - 1)) 2222 offs += sizeof (uint32_t); 2223 2224 /* 2225 * If we don't have enough room to both allocate a new key _and_ 2226 * its associated data, increment the drop count and return. 2227 */ 2228 if ((uintptr_t)tomax + offs + fsize > 2229 agb->dtagb_free - sizeof (dtrace_aggkey_t)) { 2230 dtrace_buffer_drop(buf); 2231 return; 2232 } 2233 2234 /*CONSTCOND*/ 2235 ASSERT(!(sizeof (dtrace_aggkey_t) & (sizeof (uintptr_t) - 1))); 2236 key = (dtrace_aggkey_t *)(agb->dtagb_free - sizeof (dtrace_aggkey_t)); 2237 agb->dtagb_free -= sizeof (dtrace_aggkey_t); 2238 2239 key->dtak_data = kdata = tomax + offs; 2240 buf->dtb_offset = offs + fsize; 2241 2242 /* 2243 * Now copy the data across. 2244 */ 2245 *((dtrace_aggid_t *)kdata) = agg->dtag_id; 2246 2247 for (i = sizeof (dtrace_aggid_t); i < size; i++) 2248 kdata[i] = data[i]; 2249 2250 /* 2251 * Because strings are not zeroed out by default, we need to iterate 2252 * looking for actions that store strings, and we need to explicitly 2253 * pad these strings out with zeroes. 2254 */ 2255 for (act = agg->dtag_first; act->dta_intuple; act = act->dta_next) { 2256 int nul; 2257 2258 if (!DTRACEACT_ISSTRING(act)) 2259 continue; 2260 2261 i = act->dta_rec.dtrd_offset - agg->dtag_base; 2262 limit = i + act->dta_rec.dtrd_size; 2263 ASSERT(limit <= size); 2264 2265 for (nul = 0; i < limit; i++) { 2266 if (nul) { 2267 kdata[i] = '\0'; 2268 continue; 2269 } 2270 2271 if (data[i] != '\0') 2272 continue; 2273 2274 nul = 1; 2275 } 2276 } 2277 2278 for (i = size; i < fsize; i++) 2279 kdata[i] = 0; 2280 2281 key->dtak_hashval = hashval; 2282 key->dtak_size = size; 2283 key->dtak_action = action; 2284 key->dtak_next = agb->dtagb_hash[ndx]; 2285 agb->dtagb_hash[ndx] = key; 2286 2287 /* 2288 * Finally, apply the aggregator. 2289 */ 2290 *((uint64_t *)(key->dtak_data + size)) = agg->dtag_initial; 2291 agg->dtag_aggregate((uint64_t *)(key->dtak_data + size), expr, arg); 2292} 2293 2294/* 2295 * Given consumer state, this routine finds a speculation in the INACTIVE 2296 * state and transitions it into the ACTIVE state. If there is no speculation 2297 * in the INACTIVE state, 0 is returned. In this case, no error counter is 2298 * incremented -- it is up to the caller to take appropriate action. 2299 */ 2300static int 2301dtrace_speculation(dtrace_state_t *state) 2302{ 2303 int i = 0; 2304 dtrace_speculation_state_t current; 2305 uint32_t *stat = &state->dts_speculations_unavail, count; 2306 2307 while (i < state->dts_nspeculations) { 2308 dtrace_speculation_t *spec = &state->dts_speculations[i]; 2309 2310 current = spec->dtsp_state; 2311 2312 if (current != DTRACESPEC_INACTIVE) { 2313 if (current == DTRACESPEC_COMMITTINGMANY || 2314 current == DTRACESPEC_COMMITTING || 2315 current == DTRACESPEC_DISCARDING) 2316 stat = &state->dts_speculations_busy; 2317 i++; 2318 continue; 2319 } 2320 2321 if (dtrace_cas32((uint32_t *)&spec->dtsp_state, 2322 current, DTRACESPEC_ACTIVE) == current) 2323 return (i + 1); 2324 } 2325 2326 /* 2327 * We couldn't find a speculation. If we found as much as a single 2328 * busy speculation buffer, we'll attribute this failure as "busy" 2329 * instead of "unavail". 2330 */ 2331 do { 2332 count = *stat; 2333 } while (dtrace_cas32(stat, count, count + 1) != count); 2334 2335 return (0); 2336} 2337 2338/* 2339 * This routine commits an active speculation. If the specified speculation 2340 * is not in a valid state to perform a commit(), this routine will silently do 2341 * nothing. The state of the specified speculation is transitioned according 2342 * to the state transition diagram outlined in <sys/dtrace_impl.h> 2343 */ 2344static void 2345dtrace_speculation_commit(dtrace_state_t *state, processorid_t cpu, 2346 dtrace_specid_t which) 2347{ 2348 dtrace_speculation_t *spec; 2349 dtrace_buffer_t *src, *dest; 2350 uintptr_t daddr, saddr, dlimit, slimit; 2351 dtrace_speculation_state_t current, new = 0; 2352 intptr_t offs; 2353 uint64_t timestamp; 2354 2355 if (which == 0) 2356 return; 2357 2358 if (which > state->dts_nspeculations) { 2359 cpu_core[cpu].cpuc_dtrace_flags |= CPU_DTRACE_ILLOP; 2360 return; 2361 } 2362 2363 spec = &state->dts_speculations[which - 1]; 2364 src = &spec->dtsp_buffer[cpu]; 2365 dest = &state->dts_buffer[cpu]; 2366 2367 do { 2368 current = spec->dtsp_state; 2369 2370 if (current == DTRACESPEC_COMMITTINGMANY) 2371 break; 2372 2373 switch (current) { 2374 case DTRACESPEC_INACTIVE: 2375 case DTRACESPEC_DISCARDING: 2376 return; 2377 2378 case DTRACESPEC_COMMITTING: 2379 /* 2380 * This is only possible if we are (a) commit()'ing 2381 * without having done a prior speculate() on this CPU 2382 * and (b) racing with another commit() on a different 2383 * CPU. There's nothing to do -- we just assert that 2384 * our offset is 0. 2385 */ 2386 ASSERT(src->dtb_offset == 0); 2387 return; 2388 2389 case DTRACESPEC_ACTIVE: 2390 new = DTRACESPEC_COMMITTING; 2391 break; 2392 2393 case DTRACESPEC_ACTIVEONE: 2394 /* 2395 * This speculation is active on one CPU. If our 2396 * buffer offset is non-zero, we know that the one CPU 2397 * must be us. Otherwise, we are committing on a 2398 * different CPU from the speculate(), and we must 2399 * rely on being asynchronously cleaned. 2400 */ 2401 if (src->dtb_offset != 0) { 2402 new = DTRACESPEC_COMMITTING; 2403 break; 2404 } 2405 /*FALLTHROUGH*/ 2406 2407 case DTRACESPEC_ACTIVEMANY: 2408 new = DTRACESPEC_COMMITTINGMANY; 2409 break; 2410 2411 default: 2412 ASSERT(0); 2413 } 2414 } while (dtrace_cas32((uint32_t *)&spec->dtsp_state, 2415 current, new) != current); 2416 2417 /* 2418 * We have set the state to indicate that we are committing this 2419 * speculation. Now reserve the necessary space in the destination 2420 * buffer. 2421 */ 2422 if ((offs = dtrace_buffer_reserve(dest, src->dtb_offset, 2423 sizeof (uint64_t), state, NULL)) < 0) { 2424 dtrace_buffer_drop(dest); 2425 goto out; 2426 } 2427 2428 /* 2429 * We have sufficient space to copy the speculative buffer into the 2430 * primary buffer. First, modify the speculative buffer, filling 2431 * in the timestamp of all entries with the current time. The data 2432 * must have the commit() time rather than the time it was traced, 2433 * so that all entries in the primary buffer are in timestamp order. 2434 */ 2435 timestamp = dtrace_gethrtime(); 2436 saddr = (uintptr_t)src->dtb_tomax; 2437 slimit = saddr + src->dtb_offset; 2438 while (saddr < slimit) { 2439 size_t size; 2440 dtrace_rechdr_t *dtrh = (dtrace_rechdr_t *)saddr; 2441 2442 if (dtrh->dtrh_epid == DTRACE_EPIDNONE) { 2443 saddr += sizeof (dtrace_epid_t); 2444 continue; 2445 } 2446 ASSERT3U(dtrh->dtrh_epid, <=, state->dts_necbs); 2447 size = state->dts_ecbs[dtrh->dtrh_epid - 1]->dte_size; 2448 2449 ASSERT3U(saddr + size, <=, slimit); 2450 ASSERT3U(size, >=, sizeof (dtrace_rechdr_t)); 2451 ASSERT3U(DTRACE_RECORD_LOAD_TIMESTAMP(dtrh), ==, UINT64_MAX); 2452 2453 DTRACE_RECORD_STORE_TIMESTAMP(dtrh, timestamp); 2454 2455 saddr += size; 2456 } 2457 2458 /* 2459 * Copy the buffer across. (Note that this is a 2460 * highly subobtimal bcopy(); in the unlikely event that this becomes 2461 * a serious performance issue, a high-performance DTrace-specific 2462 * bcopy() should obviously be invented.) 2463 */ 2464 daddr = (uintptr_t)dest->dtb_tomax + offs; 2465 dlimit = daddr + src->dtb_offset; 2466 saddr = (uintptr_t)src->dtb_tomax; 2467 2468 /* 2469 * First, the aligned portion. 2470 */ 2471 while (dlimit - daddr >= sizeof (uint64_t)) { 2472 *((uint64_t *)daddr) = *((uint64_t *)saddr); 2473 2474 daddr += sizeof (uint64_t); 2475 saddr += sizeof (uint64_t); 2476 } 2477 2478 /* 2479 * Now any left-over bit... 2480 */ 2481 while (dlimit - daddr) 2482 *((uint8_t *)daddr++) = *((uint8_t *)saddr++); 2483 2484 /* 2485 * Finally, commit the reserved space in the destination buffer. 2486 */ 2487 dest->dtb_offset = offs + src->dtb_offset; 2488 2489out: 2490 /* 2491 * If we're lucky enough to be the only active CPU on this speculation 2492 * buffer, we can just set the state back to DTRACESPEC_INACTIVE. 2493 */ 2494 if (current == DTRACESPEC_ACTIVE || 2495 (current == DTRACESPEC_ACTIVEONE && new == DTRACESPEC_COMMITTING)) { 2496 uint32_t rval = dtrace_cas32((uint32_t *)&spec->dtsp_state, 2497 DTRACESPEC_COMMITTING, DTRACESPEC_INACTIVE); 2498 2499 ASSERT(rval == DTRACESPEC_COMMITTING); 2500 } 2501 2502 src->dtb_offset = 0; 2503 src->dtb_xamot_drops += src->dtb_drops; 2504 src->dtb_drops = 0; 2505} 2506 2507/* 2508 * This routine discards an active speculation. If the specified speculation 2509 * is not in a valid state to perform a discard(), this routine will silently 2510 * do nothing. The state of the specified speculation is transitioned 2511 * according to the state transition diagram outlined in <sys/dtrace_impl.h> 2512 */ 2513static void 2514dtrace_speculation_discard(dtrace_state_t *state, processorid_t cpu, 2515 dtrace_specid_t which) 2516{ 2517 dtrace_speculation_t *spec; 2518 dtrace_speculation_state_t current, new = 0; 2519 dtrace_buffer_t *buf; 2520 2521 if (which == 0) 2522 return; 2523 2524 if (which > state->dts_nspeculations) { 2525 cpu_core[cpu].cpuc_dtrace_flags |= CPU_DTRACE_ILLOP; 2526 return; 2527 } 2528 2529 spec = &state->dts_speculations[which - 1]; 2530 buf = &spec->dtsp_buffer[cpu]; 2531 2532 do { 2533 current = spec->dtsp_state; 2534 2535 switch (current) { 2536 case DTRACESPEC_INACTIVE: 2537 case DTRACESPEC_COMMITTINGMANY: 2538 case DTRACESPEC_COMMITTING: 2539 case DTRACESPEC_DISCARDING: 2540 return; 2541 2542 case DTRACESPEC_ACTIVE: 2543 case DTRACESPEC_ACTIVEMANY: 2544 new = DTRACESPEC_DISCARDING; 2545 break; 2546 2547 case DTRACESPEC_ACTIVEONE: 2548 if (buf->dtb_offset != 0) { 2549 new = DTRACESPEC_INACTIVE; 2550 } else { 2551 new = DTRACESPEC_DISCARDING; 2552 } 2553 break; 2554 2555 default: 2556 ASSERT(0); 2557 } 2558 } while (dtrace_cas32((uint32_t *)&spec->dtsp_state, 2559 current, new) != current); 2560 2561 buf->dtb_offset = 0; 2562 buf->dtb_drops = 0; 2563} 2564 2565/* 2566 * Note: not called from probe context. This function is called 2567 * asynchronously from cross call context to clean any speculations that are 2568 * in the COMMITTINGMANY or DISCARDING states. These speculations may not be 2569 * transitioned back to the INACTIVE state until all CPUs have cleaned the 2570 * speculation. 2571 */ 2572static void 2573dtrace_speculation_clean_here(dtrace_state_t *state) 2574{ 2575 dtrace_icookie_t cookie; 2576 processorid_t cpu = curcpu; 2577 dtrace_buffer_t *dest = &state->dts_buffer[cpu]; 2578 dtrace_specid_t i; 2579 2580 cookie = dtrace_interrupt_disable(); 2581 2582 if (dest->dtb_tomax == NULL) { 2583 dtrace_interrupt_enable(cookie); 2584 return; 2585 } 2586 2587 for (i = 0; i < state->dts_nspeculations; i++) { 2588 dtrace_speculation_t *spec = &state->dts_speculations[i]; 2589 dtrace_buffer_t *src = &spec->dtsp_buffer[cpu]; 2590 2591 if (src->dtb_tomax == NULL) 2592 continue; 2593 2594 if (spec->dtsp_state == DTRACESPEC_DISCARDING) { 2595 src->dtb_offset = 0; 2596 continue; 2597 } 2598 2599 if (spec->dtsp_state != DTRACESPEC_COMMITTINGMANY) 2600 continue; 2601 2602 if (src->dtb_offset == 0) 2603 continue; 2604 2605 dtrace_speculation_commit(state, cpu, i + 1); 2606 } 2607 2608 dtrace_interrupt_enable(cookie); 2609} 2610 2611/* 2612 * Note: not called from probe context. This function is called 2613 * asynchronously (and at a regular interval) to clean any speculations that 2614 * are in the COMMITTINGMANY or DISCARDING states. If it discovers that there 2615 * is work to be done, it cross calls all CPUs to perform that work; 2616 * COMMITMANY and DISCARDING speculations may not be transitioned back to the 2617 * INACTIVE state until they have been cleaned by all CPUs. 2618 */ 2619static void 2620dtrace_speculation_clean(dtrace_state_t *state) 2621{ 2622 int work = 0, rv; 2623 dtrace_specid_t i; 2624 2625 for (i = 0; i < state->dts_nspeculations; i++) { 2626 dtrace_speculation_t *spec = &state->dts_speculations[i]; 2627 2628 ASSERT(!spec->dtsp_cleaning); 2629 2630 if (spec->dtsp_state != DTRACESPEC_DISCARDING && 2631 spec->dtsp_state != DTRACESPEC_COMMITTINGMANY) 2632 continue; 2633 2634 work++; 2635 spec->dtsp_cleaning = 1; 2636 } 2637 2638 if (!work) 2639 return; 2640 2641 dtrace_xcall(DTRACE_CPUALL, 2642 (dtrace_xcall_t)dtrace_speculation_clean_here, state); 2643 2644 /* 2645 * We now know that all CPUs have committed or discarded their 2646 * speculation buffers, as appropriate. We can now set the state 2647 * to inactive. 2648 */ 2649 for (i = 0; i < state->dts_nspeculations; i++) { 2650 dtrace_speculation_t *spec = &state->dts_speculations[i]; 2651 dtrace_speculation_state_t current, new; 2652 2653 if (!spec->dtsp_cleaning) 2654 continue; 2655 2656 current = spec->dtsp_state; 2657 ASSERT(current == DTRACESPEC_DISCARDING || 2658 current == DTRACESPEC_COMMITTINGMANY); 2659 2660 new = DTRACESPEC_INACTIVE; 2661 2662 rv = dtrace_cas32((uint32_t *)&spec->dtsp_state, current, new); 2663 ASSERT(rv == current); 2664 spec->dtsp_cleaning = 0; 2665 } 2666} 2667 2668/* 2669 * Called as part of a speculate() to get the speculative buffer associated 2670 * with a given speculation. Returns NULL if the specified speculation is not 2671 * in an ACTIVE state. If the speculation is in the ACTIVEONE state -- and 2672 * the active CPU is not the specified CPU -- the speculation will be 2673 * atomically transitioned into the ACTIVEMANY state. 2674 */ 2675static dtrace_buffer_t * 2676dtrace_speculation_buffer(dtrace_state_t *state, processorid_t cpuid, 2677 dtrace_specid_t which) 2678{ 2679 dtrace_speculation_t *spec; 2680 dtrace_speculation_state_t current, new = 0; 2681 dtrace_buffer_t *buf; 2682 2683 if (which == 0) 2684 return (NULL); 2685 2686 if (which > state->dts_nspeculations) { 2687 cpu_core[cpuid].cpuc_dtrace_flags |= CPU_DTRACE_ILLOP; 2688 return (NULL); 2689 } 2690 2691 spec = &state->dts_speculations[which - 1]; 2692 buf = &spec->dtsp_buffer[cpuid]; 2693 2694 do { 2695 current = spec->dtsp_state; 2696 2697 switch (current) { 2698 case DTRACESPEC_INACTIVE: 2699 case DTRACESPEC_COMMITTINGMANY: 2700 case DTRACESPEC_DISCARDING: 2701 return (NULL); 2702 2703 case DTRACESPEC_COMMITTING: 2704 ASSERT(buf->dtb_offset == 0); 2705 return (NULL); 2706 2707 case DTRACESPEC_ACTIVEONE: 2708 /* 2709 * This speculation is currently active on one CPU. 2710 * Check the offset in the buffer; if it's non-zero, 2711 * that CPU must be us (and we leave the state alone). 2712 * If it's zero, assume that we're starting on a new 2713 * CPU -- and change the state to indicate that the 2714 * speculation is active on more than one CPU. 2715 */ 2716 if (buf->dtb_offset != 0) 2717 return (buf); 2718 2719 new = DTRACESPEC_ACTIVEMANY; 2720 break; 2721 2722 case DTRACESPEC_ACTIVEMANY: 2723 return (buf); 2724 2725 case DTRACESPEC_ACTIVE: 2726 new = DTRACESPEC_ACTIVEONE; 2727 break; 2728 2729 default: 2730 ASSERT(0); 2731 } 2732 } while (dtrace_cas32((uint32_t *)&spec->dtsp_state, 2733 current, new) != current); 2734 2735 ASSERT(new == DTRACESPEC_ACTIVEONE || new == DTRACESPEC_ACTIVEMANY); 2736 return (buf); 2737} 2738 2739/* 2740 * Return a string. In the event that the user lacks the privilege to access 2741 * arbitrary kernel memory, we copy the string out to scratch memory so that we 2742 * don't fail access checking. 2743 * 2744 * dtrace_dif_variable() uses this routine as a helper for various 2745 * builtin values such as 'execname' and 'probefunc.' 2746 */ 2747uintptr_t 2748dtrace_dif_varstr(uintptr_t addr, dtrace_state_t *state, 2749 dtrace_mstate_t *mstate) 2750{ 2751 uint64_t size = state->dts_options[DTRACEOPT_STRSIZE]; 2752 uintptr_t ret; 2753 size_t strsz; 2754 2755 /* 2756 * The easy case: this probe is allowed to read all of memory, so 2757 * we can just return this as a vanilla pointer. 2758 */ 2759 if ((mstate->dtms_access & DTRACE_ACCESS_KERNEL) != 0) 2760 return (addr); 2761 2762 /* 2763 * This is the tougher case: we copy the string in question from 2764 * kernel memory into scratch memory and return it that way: this 2765 * ensures that we won't trip up when access checking tests the 2766 * BYREF return value. 2767 */ 2768 strsz = dtrace_strlen((char *)addr, size) + 1; 2769 2770 if (mstate->dtms_scratch_ptr + strsz > 2771 mstate->dtms_scratch_base + mstate->dtms_scratch_size) { 2772 DTRACE_CPUFLAG_SET(CPU_DTRACE_NOSCRATCH); 2773 return (0); 2774 } 2775 2776 dtrace_strcpy((const void *)addr, (void *)mstate->dtms_scratch_ptr, 2777 strsz); 2778 ret = mstate->dtms_scratch_ptr; 2779 mstate->dtms_scratch_ptr += strsz; 2780 return (ret); 2781} 2782 2783/* 2784 * Return a string from a memoy address which is known to have one or 2785 * more concatenated, individually zero terminated, sub-strings. 2786 * In the event that the user lacks the privilege to access 2787 * arbitrary kernel memory, we copy the string out to scratch memory so that we 2788 * don't fail access checking. 2789 * 2790 * dtrace_dif_variable() uses this routine as a helper for various 2791 * builtin values such as 'execargs'. 2792 */ 2793static uintptr_t 2794dtrace_dif_varstrz(uintptr_t addr, size_t strsz, dtrace_state_t *state, 2795 dtrace_mstate_t *mstate) 2796{ 2797 char *p; 2798 size_t i; 2799 uintptr_t ret; 2800 2801 if (mstate->dtms_scratch_ptr + strsz > 2802 mstate->dtms_scratch_base + mstate->dtms_scratch_size) { 2803 DTRACE_CPUFLAG_SET(CPU_DTRACE_NOSCRATCH); 2804 return (0); 2805 } 2806 2807 dtrace_bcopy((const void *)addr, (void *)mstate->dtms_scratch_ptr, 2808 strsz); 2809 2810 /* Replace sub-string termination characters with a space. */ 2811 for (p = (char *) mstate->dtms_scratch_ptr, i = 0; i < strsz - 1; 2812 p++, i++) 2813 if (*p == '\0') 2814 *p = ' '; 2815 2816 ret = mstate->dtms_scratch_ptr; 2817 mstate->dtms_scratch_ptr += strsz; 2818 return (ret); 2819} 2820 2821/* 2822 * This function implements the DIF emulator's variable lookups. The emulator 2823 * passes a reserved variable identifier and optional built-in array index. 2824 */ 2825static uint64_t 2826dtrace_dif_variable(dtrace_mstate_t *mstate, dtrace_state_t *state, uint64_t v, 2827 uint64_t ndx) 2828{ 2829 /* 2830 * If we're accessing one of the uncached arguments, we'll turn this 2831 * into a reference in the args array. 2832 */ 2833 if (v >= DIF_VAR_ARG0 && v <= DIF_VAR_ARG9) { 2834 ndx = v - DIF_VAR_ARG0; 2835 v = DIF_VAR_ARGS; 2836 } 2837 2838 switch (v) { 2839 case DIF_VAR_ARGS: 2840 ASSERT(mstate->dtms_present & DTRACE_MSTATE_ARGS); 2841 if (ndx >= sizeof (mstate->dtms_arg) / 2842 sizeof (mstate->dtms_arg[0])) { 2843 int aframes = mstate->dtms_probe->dtpr_aframes + 2; 2844 dtrace_provider_t *pv; 2845 uint64_t val; 2846 2847 pv = mstate->dtms_probe->dtpr_provider; 2848 if (pv->dtpv_pops.dtps_getargval != NULL) 2849 val = pv->dtpv_pops.dtps_getargval(pv->dtpv_arg, 2850 mstate->dtms_probe->dtpr_id, 2851 mstate->dtms_probe->dtpr_arg, ndx, aframes); 2852 else 2853 val = dtrace_getarg(ndx, aframes); 2854 2855 /* 2856 * This is regrettably required to keep the compiler 2857 * from tail-optimizing the call to dtrace_getarg(). 2858 * The condition always evaluates to true, but the 2859 * compiler has no way of figuring that out a priori. 2860 * (None of this would be necessary if the compiler 2861 * could be relied upon to _always_ tail-optimize 2862 * the call to dtrace_getarg() -- but it can't.) 2863 */ 2864 if (mstate->dtms_probe != NULL) 2865 return (val); 2866 2867 ASSERT(0); 2868 } 2869 2870 return (mstate->dtms_arg[ndx]); 2871 2872#if defined(sun) 2873 case DIF_VAR_UREGS: { 2874 klwp_t *lwp; 2875 2876 if (!dtrace_priv_proc(state)) 2877 return (0); 2878 2879 if ((lwp = curthread->t_lwp) == NULL) { 2880 DTRACE_CPUFLAG_SET(CPU_DTRACE_BADADDR); 2881 cpu_core[curcpu].cpuc_dtrace_illval = NULL; 2882 return (0); 2883 } 2884 2885 return (dtrace_getreg(lwp->lwp_regs, ndx)); 2886 return (0); 2887 } 2888#else 2889 case DIF_VAR_UREGS: { 2890 struct trapframe *tframe; 2891 2892 if (!dtrace_priv_proc(state)) 2893 return (0); 2894 2895 if ((tframe = curthread->td_frame) == NULL) { 2896 DTRACE_CPUFLAG_SET(CPU_DTRACE_BADADDR); 2897 cpu_core[curcpu].cpuc_dtrace_illval = 0; 2898 return (0); 2899 } 2900 2901 return (dtrace_getreg(tframe, ndx)); 2902 } 2903#endif 2904 2905 case DIF_VAR_CURTHREAD: 2906 if (!dtrace_priv_kernel(state)) 2907 return (0); 2908 return ((uint64_t)(uintptr_t)curthread); 2909 2910 case DIF_VAR_TIMESTAMP: 2911 if (!(mstate->dtms_present & DTRACE_MSTATE_TIMESTAMP)) { 2912 mstate->dtms_timestamp = dtrace_gethrtime(); 2913 mstate->dtms_present |= DTRACE_MSTATE_TIMESTAMP; 2914 } 2915 return (mstate->dtms_timestamp); 2916 2917 case DIF_VAR_VTIMESTAMP: 2918 ASSERT(dtrace_vtime_references != 0); 2919 return (curthread->t_dtrace_vtime); 2920 2921 case DIF_VAR_WALLTIMESTAMP: 2922 if (!(mstate->dtms_present & DTRACE_MSTATE_WALLTIMESTAMP)) { 2923 mstate->dtms_walltimestamp = dtrace_gethrestime(); 2924 mstate->dtms_present |= DTRACE_MSTATE_WALLTIMESTAMP; 2925 } 2926 return (mstate->dtms_walltimestamp); 2927 2928#if defined(sun) 2929 case DIF_VAR_IPL: 2930 if (!dtrace_priv_kernel(state)) 2931 return (0); 2932 if (!(mstate->dtms_present & DTRACE_MSTATE_IPL)) { 2933 mstate->dtms_ipl = dtrace_getipl(); 2934 mstate->dtms_present |= DTRACE_MSTATE_IPL; 2935 } 2936 return (mstate->dtms_ipl); 2937#endif 2938 2939 case DIF_VAR_EPID: 2940 ASSERT(mstate->dtms_present & DTRACE_MSTATE_EPID); 2941 return (mstate->dtms_epid); 2942 2943 case DIF_VAR_ID: 2944 ASSERT(mstate->dtms_present & DTRACE_MSTATE_PROBE); 2945 return (mstate->dtms_probe->dtpr_id); 2946 2947 case DIF_VAR_STACKDEPTH: 2948 if (!dtrace_priv_kernel(state)) 2949 return (0); 2950 if (!(mstate->dtms_present & DTRACE_MSTATE_STACKDEPTH)) { 2951 int aframes = mstate->dtms_probe->dtpr_aframes + 2; 2952 2953 mstate->dtms_stackdepth = dtrace_getstackdepth(aframes); 2954 mstate->dtms_present |= DTRACE_MSTATE_STACKDEPTH; 2955 } 2956 return (mstate->dtms_stackdepth); 2957 2958 case DIF_VAR_USTACKDEPTH: 2959 if (!dtrace_priv_proc(state)) 2960 return (0); 2961 if (!(mstate->dtms_present & DTRACE_MSTATE_USTACKDEPTH)) { 2962 /* 2963 * See comment in DIF_VAR_PID. 2964 */ 2965 if (DTRACE_ANCHORED(mstate->dtms_probe) && 2966 CPU_ON_INTR(CPU)) { 2967 mstate->dtms_ustackdepth = 0; 2968 } else { 2969 DTRACE_CPUFLAG_SET(CPU_DTRACE_NOFAULT); 2970 mstate->dtms_ustackdepth = 2971 dtrace_getustackdepth(); 2972 DTRACE_CPUFLAG_CLEAR(CPU_DTRACE_NOFAULT); 2973 } 2974 mstate->dtms_present |= DTRACE_MSTATE_USTACKDEPTH; 2975 } 2976 return (mstate->dtms_ustackdepth); 2977 2978 case DIF_VAR_CALLER: 2979 if (!dtrace_priv_kernel(state)) 2980 return (0); 2981 if (!(mstate->dtms_present & DTRACE_MSTATE_CALLER)) { 2982 int aframes = mstate->dtms_probe->dtpr_aframes + 2; 2983 2984 if (!DTRACE_ANCHORED(mstate->dtms_probe)) { 2985 /* 2986 * If this is an unanchored probe, we are 2987 * required to go through the slow path: 2988 * dtrace_caller() only guarantees correct 2989 * results for anchored probes. 2990 */ 2991 pc_t caller[2] = {0, 0}; 2992 2993 dtrace_getpcstack(caller, 2, aframes, 2994 (uint32_t *)(uintptr_t)mstate->dtms_arg[0]); 2995 mstate->dtms_caller = caller[1]; 2996 } else if ((mstate->dtms_caller = 2997 dtrace_caller(aframes)) == -1) { 2998 /* 2999 * We have failed to do this the quick way; 3000 * we must resort to the slower approach of 3001 * calling dtrace_getpcstack(). 3002 */ 3003 pc_t caller = 0; 3004 3005 dtrace_getpcstack(&caller, 1, aframes, NULL); 3006 mstate->dtms_caller = caller; 3007 } 3008 3009 mstate->dtms_present |= DTRACE_MSTATE_CALLER; 3010 } 3011 return (mstate->dtms_caller); 3012 3013 case DIF_VAR_UCALLER: 3014 if (!dtrace_priv_proc(state)) 3015 return (0); 3016 3017 if (!(mstate->dtms_present & DTRACE_MSTATE_UCALLER)) { 3018 uint64_t ustack[3]; 3019 3020 /* 3021 * dtrace_getupcstack() fills in the first uint64_t 3022 * with the current PID. The second uint64_t will 3023 * be the program counter at user-level. The third 3024 * uint64_t will contain the caller, which is what 3025 * we're after. 3026 */ 3027 ustack[2] = 0; 3028 DTRACE_CPUFLAG_SET(CPU_DTRACE_NOFAULT); 3029 dtrace_getupcstack(ustack, 3); 3030 DTRACE_CPUFLAG_CLEAR(CPU_DTRACE_NOFAULT); 3031 mstate->dtms_ucaller = ustack[2]; 3032 mstate->dtms_present |= DTRACE_MSTATE_UCALLER; 3033 } 3034 3035 return (mstate->dtms_ucaller); 3036 3037 case DIF_VAR_PROBEPROV: 3038 ASSERT(mstate->dtms_present & DTRACE_MSTATE_PROBE); 3039 return (dtrace_dif_varstr( 3040 (uintptr_t)mstate->dtms_probe->dtpr_provider->dtpv_name, 3041 state, mstate)); 3042 3043 case DIF_VAR_PROBEMOD: 3044 ASSERT(mstate->dtms_present & DTRACE_MSTATE_PROBE); 3045 return (dtrace_dif_varstr( 3046 (uintptr_t)mstate->dtms_probe->dtpr_mod, 3047 state, mstate)); 3048 3049 case DIF_VAR_PROBEFUNC: 3050 ASSERT(mstate->dtms_present & DTRACE_MSTATE_PROBE); 3051 return (dtrace_dif_varstr( 3052 (uintptr_t)mstate->dtms_probe->dtpr_func, 3053 state, mstate)); 3054 3055 case DIF_VAR_PROBENAME: 3056 ASSERT(mstate->dtms_present & DTRACE_MSTATE_PROBE); 3057 return (dtrace_dif_varstr( 3058 (uintptr_t)mstate->dtms_probe->dtpr_name, 3059 state, mstate)); 3060 3061 case DIF_VAR_PID: 3062 if (!dtrace_priv_proc(state)) 3063 return (0); 3064 3065#if defined(sun) 3066 /* 3067 * Note that we are assuming that an unanchored probe is 3068 * always due to a high-level interrupt. (And we're assuming 3069 * that there is only a single high level interrupt.) 3070 */ 3071 if (DTRACE_ANCHORED(mstate->dtms_probe) && CPU_ON_INTR(CPU)) 3072 return (pid0.pid_id); 3073 3074 /* 3075 * It is always safe to dereference one's own t_procp pointer: 3076 * it always points to a valid, allocated proc structure. 3077 * Further, it is always safe to dereference the p_pidp member 3078 * of one's own proc structure. (These are truisms becuase 3079 * threads and processes don't clean up their own state -- 3080 * they leave that task to whomever reaps them.) 3081 */ 3082 return ((uint64_t)curthread->t_procp->p_pidp->pid_id); 3083#else 3084 return ((uint64_t)curproc->p_pid); 3085#endif 3086 3087 case DIF_VAR_PPID: 3088 if (!dtrace_priv_proc(state)) 3089 return (0); 3090 3091#if defined(sun) 3092 /* 3093 * See comment in DIF_VAR_PID. 3094 */ 3095 if (DTRACE_ANCHORED(mstate->dtms_probe) && CPU_ON_INTR(CPU)) 3096 return (pid0.pid_id); 3097 3098 /* 3099 * It is always safe to dereference one's own t_procp pointer: 3100 * it always points to a valid, allocated proc structure. 3101 * (This is true because threads don't clean up their own 3102 * state -- they leave that task to whomever reaps them.) 3103 */ 3104 return ((uint64_t)curthread->t_procp->p_ppid); 3105#else 3106 return ((uint64_t)curproc->p_pptr->p_pid); 3107#endif 3108 3109 case DIF_VAR_TID: 3110#if defined(sun) 3111 /* 3112 * See comment in DIF_VAR_PID. 3113 */ 3114 if (DTRACE_ANCHORED(mstate->dtms_probe) && CPU_ON_INTR(CPU)) 3115 return (0); 3116#endif 3117 3118 return ((uint64_t)curthread->t_tid); 3119 3120 case DIF_VAR_EXECARGS: { 3121 struct pargs *p_args = curthread->td_proc->p_args; 3122 3123 if (p_args == NULL) 3124 return(0); 3125 3126 return (dtrace_dif_varstrz( 3127 (uintptr_t) p_args->ar_args, p_args->ar_length, state, mstate)); 3128 } 3129 3130 case DIF_VAR_EXECNAME: 3131#if defined(sun) 3132 if (!dtrace_priv_proc(state)) 3133 return (0); 3134 3135 /* 3136 * See comment in DIF_VAR_PID. 3137 */ 3138 if (DTRACE_ANCHORED(mstate->dtms_probe) && CPU_ON_INTR(CPU)) 3139 return ((uint64_t)(uintptr_t)p0.p_user.u_comm); 3140 3141 /* 3142 * It is always safe to dereference one's own t_procp pointer: 3143 * it always points to a valid, allocated proc structure. 3144 * (This is true because threads don't clean up their own 3145 * state -- they leave that task to whomever reaps them.) 3146 */ 3147 return (dtrace_dif_varstr( 3148 (uintptr_t)curthread->t_procp->p_user.u_comm, 3149 state, mstate)); 3150#else 3151 return (dtrace_dif_varstr( 3152 (uintptr_t) curthread->td_proc->p_comm, state, mstate)); 3153#endif 3154 3155 case DIF_VAR_ZONENAME: 3156#if defined(sun) 3157 if (!dtrace_priv_proc(state)) 3158 return (0); 3159 3160 /* 3161 * See comment in DIF_VAR_PID. 3162 */ 3163 if (DTRACE_ANCHORED(mstate->dtms_probe) && CPU_ON_INTR(CPU)) 3164 return ((uint64_t)(uintptr_t)p0.p_zone->zone_name); 3165 3166 /* 3167 * It is always safe to dereference one's own t_procp pointer: 3168 * it always points to a valid, allocated proc structure. 3169 * (This is true because threads don't clean up their own 3170 * state -- they leave that task to whomever reaps them.) 3171 */ 3172 return (dtrace_dif_varstr( 3173 (uintptr_t)curthread->t_procp->p_zone->zone_name, 3174 state, mstate)); 3175#else 3176 return (0); 3177#endif 3178 3179 case DIF_VAR_UID: 3180 if (!dtrace_priv_proc(state)) 3181 return (0); 3182 3183#if defined(sun) 3184 /* 3185 * See comment in DIF_VAR_PID. 3186 */ 3187 if (DTRACE_ANCHORED(mstate->dtms_probe) && CPU_ON_INTR(CPU)) 3188 return ((uint64_t)p0.p_cred->cr_uid); 3189#endif 3190 3191 /* 3192 * It is always safe to dereference one's own t_procp pointer: 3193 * it always points to a valid, allocated proc structure. 3194 * (This is true because threads don't clean up their own 3195 * state -- they leave that task to whomever reaps them.) 3196 * 3197 * Additionally, it is safe to dereference one's own process 3198 * credential, since this is never NULL after process birth. 3199 */ 3200 return ((uint64_t)curthread->t_procp->p_cred->cr_uid); 3201 3202 case DIF_VAR_GID: 3203 if (!dtrace_priv_proc(state)) 3204 return (0); 3205 3206#if defined(sun) 3207 /* 3208 * See comment in DIF_VAR_PID. 3209 */ 3210 if (DTRACE_ANCHORED(mstate->dtms_probe) && CPU_ON_INTR(CPU)) 3211 return ((uint64_t)p0.p_cred->cr_gid); 3212#endif 3213 3214 /* 3215 * It is always safe to dereference one's own t_procp pointer: 3216 * it always points to a valid, allocated proc structure. 3217 * (This is true because threads don't clean up their own 3218 * state -- they leave that task to whomever reaps them.) 3219 * 3220 * Additionally, it is safe to dereference one's own process 3221 * credential, since this is never NULL after process birth. 3222 */ 3223 return ((uint64_t)curthread->t_procp->p_cred->cr_gid); 3224 3225 case DIF_VAR_ERRNO: { 3226#if defined(sun) 3227 klwp_t *lwp; 3228 if (!dtrace_priv_proc(state)) 3229 return (0); 3230 3231 /* 3232 * See comment in DIF_VAR_PID. 3233 */ 3234 if (DTRACE_ANCHORED(mstate->dtms_probe) && CPU_ON_INTR(CPU)) 3235 return (0); 3236 3237 /* 3238 * It is always safe to dereference one's own t_lwp pointer in 3239 * the event that this pointer is non-NULL. (This is true 3240 * because threads and lwps don't clean up their own state -- 3241 * they leave that task to whomever reaps them.) 3242 */ 3243 if ((lwp = curthread->t_lwp) == NULL) 3244 return (0); 3245 3246 return ((uint64_t)lwp->lwp_errno); 3247#else 3248 return (curthread->td_errno); 3249#endif 3250 } 3251#if !defined(sun) 3252 case DIF_VAR_CPU: { 3253 return curcpu; 3254 } 3255#endif 3256 default: 3257 DTRACE_CPUFLAG_SET(CPU_DTRACE_ILLOP); 3258 return (0); 3259 } 3260} 3261 3262/* 3263 * Emulate the execution of DTrace ID subroutines invoked by the call opcode. 3264 * Notice that we don't bother validating the proper number of arguments or 3265 * their types in the tuple stack. This isn't needed because all argument 3266 * interpretation is safe because of our load safety -- the worst that can 3267 * happen is that a bogus program can obtain bogus results. 3268 */ 3269static void 3270dtrace_dif_subr(uint_t subr, uint_t rd, uint64_t *regs, 3271 dtrace_key_t *tupregs, int nargs, 3272 dtrace_mstate_t *mstate, dtrace_state_t *state) 3273{ 3274 volatile uint16_t *flags = &cpu_core[curcpu].cpuc_dtrace_flags; 3275 volatile uintptr_t *illval = &cpu_core[curcpu].cpuc_dtrace_illval; 3276 dtrace_vstate_t *vstate = &state->dts_vstate; 3277 3278#if defined(sun) 3279 union { 3280 mutex_impl_t mi; 3281 uint64_t mx; 3282 } m; 3283 3284 union { 3285 krwlock_t ri; 3286 uintptr_t rw; 3287 } r; 3288#else 3289 struct thread *lowner; 3290 union { 3291 struct lock_object *li; 3292 uintptr_t lx; 3293 } l; 3294#endif 3295 3296 switch (subr) { 3297 case DIF_SUBR_RAND: 3298 regs[rd] = (dtrace_gethrtime() * 2416 + 374441) % 1771875; 3299 break; 3300 3301#if defined(sun) 3302 case DIF_SUBR_MUTEX_OWNED: 3303 if (!dtrace_canload(tupregs[0].dttk_value, sizeof (kmutex_t), 3304 mstate, vstate)) { 3305 regs[rd] = 0; 3306 break; 3307 } 3308 3309 m.mx = dtrace_load64(tupregs[0].dttk_value); 3310 if (MUTEX_TYPE_ADAPTIVE(&m.mi)) 3311 regs[rd] = MUTEX_OWNER(&m.mi) != MUTEX_NO_OWNER; 3312 else 3313 regs[rd] = LOCK_HELD(&m.mi.m_spin.m_spinlock); 3314 break; 3315 3316 case DIF_SUBR_MUTEX_OWNER: 3317 if (!dtrace_canload(tupregs[0].dttk_value, sizeof (kmutex_t), 3318 mstate, vstate)) { 3319 regs[rd] = 0; 3320 break; 3321 } 3322 3323 m.mx = dtrace_load64(tupregs[0].dttk_value); 3324 if (MUTEX_TYPE_ADAPTIVE(&m.mi) && 3325 MUTEX_OWNER(&m.mi) != MUTEX_NO_OWNER) 3326 regs[rd] = (uintptr_t)MUTEX_OWNER(&m.mi); 3327 else 3328 regs[rd] = 0; 3329 break; 3330 3331 case DIF_SUBR_MUTEX_TYPE_ADAPTIVE: 3332 if (!dtrace_canload(tupregs[0].dttk_value, sizeof (kmutex_t), 3333 mstate, vstate)) { 3334 regs[rd] = 0; 3335 break; 3336 } 3337 3338 m.mx = dtrace_load64(tupregs[0].dttk_value); 3339 regs[rd] = MUTEX_TYPE_ADAPTIVE(&m.mi); 3340 break; 3341 3342 case DIF_SUBR_MUTEX_TYPE_SPIN: 3343 if (!dtrace_canload(tupregs[0].dttk_value, sizeof (kmutex_t), 3344 mstate, vstate)) { 3345 regs[rd] = 0; 3346 break; 3347 } 3348 3349 m.mx = dtrace_load64(tupregs[0].dttk_value); 3350 regs[rd] = MUTEX_TYPE_SPIN(&m.mi); 3351 break; 3352 3353 case DIF_SUBR_RW_READ_HELD: { 3354 uintptr_t tmp; 3355 3356 if (!dtrace_canload(tupregs[0].dttk_value, sizeof (uintptr_t), 3357 mstate, vstate)) { 3358 regs[rd] = 0; 3359 break; 3360 } 3361 3362 r.rw = dtrace_loadptr(tupregs[0].dttk_value); 3363 regs[rd] = _RW_READ_HELD(&r.ri, tmp); 3364 break; 3365 } 3366 3367 case DIF_SUBR_RW_WRITE_HELD: 3368 if (!dtrace_canload(tupregs[0].dttk_value, sizeof (krwlock_t), 3369 mstate, vstate)) { 3370 regs[rd] = 0; 3371 break; 3372 } 3373 3374 r.rw = dtrace_loadptr(tupregs[0].dttk_value); 3375 regs[rd] = _RW_WRITE_HELD(&r.ri); 3376 break; 3377 3378 case DIF_SUBR_RW_ISWRITER: 3379 if (!dtrace_canload(tupregs[0].dttk_value, sizeof (krwlock_t), 3380 mstate, vstate)) { 3381 regs[rd] = 0; 3382 break; 3383 } 3384 3385 r.rw = dtrace_loadptr(tupregs[0].dttk_value); 3386 regs[rd] = _RW_ISWRITER(&r.ri); 3387 break; 3388 3389#else 3390 case DIF_SUBR_MUTEX_OWNED: 3391 if (!dtrace_canload(tupregs[0].dttk_value, 3392 sizeof (struct lock_object), mstate, vstate)) { 3393 regs[rd] = 0; 3394 break; 3395 } 3396 l.lx = dtrace_loadptr((uintptr_t)&tupregs[0].dttk_value); 3397 regs[rd] = LOCK_CLASS(l.li)->lc_owner(l.li, &lowner); 3398 break; 3399 3400 case DIF_SUBR_MUTEX_OWNER: 3401 if (!dtrace_canload(tupregs[0].dttk_value, 3402 sizeof (struct lock_object), mstate, vstate)) { 3403 regs[rd] = 0; 3404 break; 3405 } 3406 l.lx = dtrace_loadptr((uintptr_t)&tupregs[0].dttk_value); 3407 LOCK_CLASS(l.li)->lc_owner(l.li, &lowner); 3408 regs[rd] = (uintptr_t)lowner; 3409 break; 3410 3411 case DIF_SUBR_MUTEX_TYPE_ADAPTIVE: 3412 if (!dtrace_canload(tupregs[0].dttk_value, sizeof (struct mtx), 3413 mstate, vstate)) { 3414 regs[rd] = 0; 3415 break; 3416 } 3417 l.lx = dtrace_loadptr((uintptr_t)&tupregs[0].dttk_value); 3418 /* XXX - should be only LC_SLEEPABLE? */ 3419 regs[rd] = (LOCK_CLASS(l.li)->lc_flags & 3420 (LC_SLEEPLOCK | LC_SLEEPABLE)) != 0; 3421 break; 3422 3423 case DIF_SUBR_MUTEX_TYPE_SPIN: 3424 if (!dtrace_canload(tupregs[0].dttk_value, sizeof (struct mtx), 3425 mstate, vstate)) { 3426 regs[rd] = 0; 3427 break; 3428 } 3429 l.lx = dtrace_loadptr((uintptr_t)&tupregs[0].dttk_value); 3430 regs[rd] = (LOCK_CLASS(l.li)->lc_flags & LC_SPINLOCK) != 0; 3431 break; 3432 3433 case DIF_SUBR_RW_READ_HELD: 3434 case DIF_SUBR_SX_SHARED_HELD: 3435 if (!dtrace_canload(tupregs[0].dttk_value, sizeof (uintptr_t), 3436 mstate, vstate)) { 3437 regs[rd] = 0; 3438 break; 3439 } 3440 l.lx = dtrace_loadptr((uintptr_t)&tupregs[0].dttk_value); 3441 regs[rd] = LOCK_CLASS(l.li)->lc_owner(l.li, &lowner) && 3442 lowner == NULL; 3443 break; 3444 3445 case DIF_SUBR_RW_WRITE_HELD: 3446 case DIF_SUBR_SX_EXCLUSIVE_HELD: 3447 if (!dtrace_canload(tupregs[0].dttk_value, sizeof (uintptr_t), 3448 mstate, vstate)) { 3449 regs[rd] = 0; 3450 break; 3451 } 3452 l.lx = dtrace_loadptr(tupregs[0].dttk_value); 3453 LOCK_CLASS(l.li)->lc_owner(l.li, &lowner); 3454 regs[rd] = (lowner == curthread); 3455 break; 3456 3457 case DIF_SUBR_RW_ISWRITER: 3458 case DIF_SUBR_SX_ISEXCLUSIVE: 3459 if (!dtrace_canload(tupregs[0].dttk_value, sizeof (uintptr_t), 3460 mstate, vstate)) { 3461 regs[rd] = 0; 3462 break; 3463 } 3464 l.lx = dtrace_loadptr(tupregs[0].dttk_value); 3465 regs[rd] = LOCK_CLASS(l.li)->lc_owner(l.li, &lowner) && 3466 lowner != NULL; 3467 break; 3468#endif /* ! defined(sun) */ 3469 3470 case DIF_SUBR_BCOPY: { 3471 /* 3472 * We need to be sure that the destination is in the scratch 3473 * region -- no other region is allowed. 3474 */ 3475 uintptr_t src = tupregs[0].dttk_value; 3476 uintptr_t dest = tupregs[1].dttk_value; 3477 size_t size = tupregs[2].dttk_value; 3478 3479 if (!dtrace_inscratch(dest, size, mstate)) { 3480 *flags |= CPU_DTRACE_BADADDR; 3481 *illval = regs[rd]; 3482 break; 3483 } 3484 3485 if (!dtrace_canload(src, size, mstate, vstate)) { 3486 regs[rd] = 0; 3487 break; 3488 } 3489 3490 dtrace_bcopy((void *)src, (void *)dest, size); 3491 break; 3492 } 3493 3494 case DIF_SUBR_ALLOCA: 3495 case DIF_SUBR_COPYIN: { 3496 uintptr_t dest = P2ROUNDUP(mstate->dtms_scratch_ptr, 8); 3497 uint64_t size = 3498 tupregs[subr == DIF_SUBR_ALLOCA ? 0 : 1].dttk_value; 3499 size_t scratch_size = (dest - mstate->dtms_scratch_ptr) + size; 3500 3501 /* 3502 * This action doesn't require any credential checks since 3503 * probes will not activate in user contexts to which the 3504 * enabling user does not have permissions. 3505 */ 3506 3507 /* 3508 * Rounding up the user allocation size could have overflowed 3509 * a large, bogus allocation (like -1ULL) to 0. 3510 */ 3511 if (scratch_size < size || 3512 !DTRACE_INSCRATCH(mstate, scratch_size)) { 3513 DTRACE_CPUFLAG_SET(CPU_DTRACE_NOSCRATCH); 3514 regs[rd] = 0; 3515 break; 3516 } 3517 3518 if (subr == DIF_SUBR_COPYIN) { 3519 DTRACE_CPUFLAG_SET(CPU_DTRACE_NOFAULT); 3520 dtrace_copyin(tupregs[0].dttk_value, dest, size, flags); 3521 DTRACE_CPUFLAG_CLEAR(CPU_DTRACE_NOFAULT); 3522 } 3523 3524 mstate->dtms_scratch_ptr += scratch_size; 3525 regs[rd] = dest; 3526 break; 3527 } 3528 3529 case DIF_SUBR_COPYINTO: { 3530 uint64_t size = tupregs[1].dttk_value; 3531 uintptr_t dest = tupregs[2].dttk_value; 3532 3533 /* 3534 * This action doesn't require any credential checks since 3535 * probes will not activate in user contexts to which the 3536 * enabling user does not have permissions. 3537 */ 3538 if (!dtrace_inscratch(dest, size, mstate)) { 3539 *flags |= CPU_DTRACE_BADADDR; 3540 *illval = regs[rd]; 3541 break; 3542 } 3543 3544 DTRACE_CPUFLAG_SET(CPU_DTRACE_NOFAULT); 3545 dtrace_copyin(tupregs[0].dttk_value, dest, size, flags); 3546 DTRACE_CPUFLAG_CLEAR(CPU_DTRACE_NOFAULT); 3547 break; 3548 } 3549 3550 case DIF_SUBR_COPYINSTR: { 3551 uintptr_t dest = mstate->dtms_scratch_ptr; 3552 uint64_t size = state->dts_options[DTRACEOPT_STRSIZE]; 3553 3554 if (nargs > 1 && tupregs[1].dttk_value < size) 3555 size = tupregs[1].dttk_value + 1; 3556 3557 /* 3558 * This action doesn't require any credential checks since 3559 * probes will not activate in user contexts to which the 3560 * enabling user does not have permissions. 3561 */ 3562 if (!DTRACE_INSCRATCH(mstate, size)) { 3563 DTRACE_CPUFLAG_SET(CPU_DTRACE_NOSCRATCH); 3564 regs[rd] = 0; 3565 break; 3566 } 3567 3568 DTRACE_CPUFLAG_SET(CPU_DTRACE_NOFAULT); 3569 dtrace_copyinstr(tupregs[0].dttk_value, dest, size, flags); 3570 DTRACE_CPUFLAG_CLEAR(CPU_DTRACE_NOFAULT); 3571 3572 ((char *)dest)[size - 1] = '\0'; 3573 mstate->dtms_scratch_ptr += size; 3574 regs[rd] = dest; 3575 break; 3576 } 3577 3578#if defined(sun) 3579 case DIF_SUBR_MSGSIZE: 3580 case DIF_SUBR_MSGDSIZE: { 3581 uintptr_t baddr = tupregs[0].dttk_value, daddr; 3582 uintptr_t wptr, rptr; 3583 size_t count = 0; 3584 int cont = 0; 3585 3586 while (baddr != 0 && !(*flags & CPU_DTRACE_FAULT)) { 3587 3588 if (!dtrace_canload(baddr, sizeof (mblk_t), mstate, 3589 vstate)) { 3590 regs[rd] = 0; 3591 break; 3592 } 3593 3594 wptr = dtrace_loadptr(baddr + 3595 offsetof(mblk_t, b_wptr)); 3596 3597 rptr = dtrace_loadptr(baddr + 3598 offsetof(mblk_t, b_rptr)); 3599 3600 if (wptr < rptr) { 3601 *flags |= CPU_DTRACE_BADADDR; 3602 *illval = tupregs[0].dttk_value; 3603 break; 3604 } 3605 3606 daddr = dtrace_loadptr(baddr + 3607 offsetof(mblk_t, b_datap)); 3608 3609 baddr = dtrace_loadptr(baddr + 3610 offsetof(mblk_t, b_cont)); 3611 3612 /* 3613 * We want to prevent against denial-of-service here, 3614 * so we're only going to search the list for 3615 * dtrace_msgdsize_max mblks. 3616 */ 3617 if (cont++ > dtrace_msgdsize_max) { 3618 *flags |= CPU_DTRACE_ILLOP; 3619 break; 3620 } 3621 3622 if (subr == DIF_SUBR_MSGDSIZE) { 3623 if (dtrace_load8(daddr + 3624 offsetof(dblk_t, db_type)) != M_DATA) 3625 continue; 3626 } 3627 3628 count += wptr - rptr; 3629 } 3630 3631 if (!(*flags & CPU_DTRACE_FAULT)) 3632 regs[rd] = count; 3633 3634 break; 3635 } 3636#endif 3637 3638 case DIF_SUBR_PROGENYOF: { 3639 pid_t pid = tupregs[0].dttk_value; 3640 proc_t *p; 3641 int rval = 0; 3642 3643 DTRACE_CPUFLAG_SET(CPU_DTRACE_NOFAULT); 3644 3645 for (p = curthread->t_procp; p != NULL; p = p->p_parent) { 3646#if defined(sun) 3647 if (p->p_pidp->pid_id == pid) { 3648#else 3649 if (p->p_pid == pid) { 3650#endif 3651 rval = 1; 3652 break; 3653 } 3654 } 3655 3656 DTRACE_CPUFLAG_CLEAR(CPU_DTRACE_NOFAULT); 3657 3658 regs[rd] = rval; 3659 break; 3660 } 3661 3662 case DIF_SUBR_SPECULATION: 3663 regs[rd] = dtrace_speculation(state); 3664 break; 3665 3666 case DIF_SUBR_COPYOUT: { 3667 uintptr_t kaddr = tupregs[0].dttk_value; 3668 uintptr_t uaddr = tupregs[1].dttk_value; 3669 uint64_t size = tupregs[2].dttk_value; 3670 3671 if (!dtrace_destructive_disallow && 3672 dtrace_priv_proc_control(state) && 3673 !dtrace_istoxic(kaddr, size)) { 3674 DTRACE_CPUFLAG_SET(CPU_DTRACE_NOFAULT); 3675 dtrace_copyout(kaddr, uaddr, size, flags); 3676 DTRACE_CPUFLAG_CLEAR(CPU_DTRACE_NOFAULT); 3677 } 3678 break; 3679 } 3680 3681 case DIF_SUBR_COPYOUTSTR: { 3682 uintptr_t kaddr = tupregs[0].dttk_value; 3683 uintptr_t uaddr = tupregs[1].dttk_value; 3684 uint64_t size = tupregs[2].dttk_value; 3685 3686 if (!dtrace_destructive_disallow && 3687 dtrace_priv_proc_control(state) && 3688 !dtrace_istoxic(kaddr, size)) { 3689 DTRACE_CPUFLAG_SET(CPU_DTRACE_NOFAULT); 3690 dtrace_copyoutstr(kaddr, uaddr, size, flags); 3691 DTRACE_CPUFLAG_CLEAR(CPU_DTRACE_NOFAULT); 3692 } 3693 break; 3694 } 3695 3696 case DIF_SUBR_STRLEN: { 3697 size_t sz; 3698 uintptr_t addr = (uintptr_t)tupregs[0].dttk_value; 3699 sz = dtrace_strlen((char *)addr, 3700 state->dts_options[DTRACEOPT_STRSIZE]); 3701 3702 if (!dtrace_canload(addr, sz + 1, mstate, vstate)) { 3703 regs[rd] = 0; 3704 break; 3705 } 3706 3707 regs[rd] = sz; 3708 3709 break; 3710 } 3711 3712 case DIF_SUBR_STRCHR: 3713 case DIF_SUBR_STRRCHR: { 3714 /* 3715 * We're going to iterate over the string looking for the 3716 * specified character. We will iterate until we have reached 3717 * the string length or we have found the character. If this 3718 * is DIF_SUBR_STRRCHR, we will look for the last occurrence 3719 * of the specified character instead of the first. 3720 */ 3721 uintptr_t saddr = tupregs[0].dttk_value; 3722 uintptr_t addr = tupregs[0].dttk_value; 3723 uintptr_t limit = addr + state->dts_options[DTRACEOPT_STRSIZE]; 3724 char c, target = (char)tupregs[1].dttk_value; 3725 3726 for (regs[rd] = 0; addr < limit; addr++) { 3727 if ((c = dtrace_load8(addr)) == target) { 3728 regs[rd] = addr; 3729 3730 if (subr == DIF_SUBR_STRCHR) 3731 break; 3732 } 3733 3734 if (c == '\0') 3735 break; 3736 } 3737 3738 if (!dtrace_canload(saddr, addr - saddr, mstate, vstate)) { 3739 regs[rd] = 0; 3740 break; 3741 } 3742 3743 break; 3744 } 3745 3746 case DIF_SUBR_STRSTR: 3747 case DIF_SUBR_INDEX: 3748 case DIF_SUBR_RINDEX: { 3749 /* 3750 * We're going to iterate over the string looking for the 3751 * specified string. We will iterate until we have reached 3752 * the string length or we have found the string. (Yes, this 3753 * is done in the most naive way possible -- but considering 3754 * that the string we're searching for is likely to be 3755 * relatively short, the complexity of Rabin-Karp or similar 3756 * hardly seems merited.) 3757 */ 3758 char *addr = (char *)(uintptr_t)tupregs[0].dttk_value; 3759 char *substr = (char *)(uintptr_t)tupregs[1].dttk_value; 3760 uint64_t size = state->dts_options[DTRACEOPT_STRSIZE]; 3761 size_t len = dtrace_strlen(addr, size); 3762 size_t sublen = dtrace_strlen(substr, size); 3763 char *limit = addr + len, *orig = addr; 3764 int notfound = subr == DIF_SUBR_STRSTR ? 0 : -1; 3765 int inc = 1; 3766 3767 regs[rd] = notfound; 3768 3769 if (!dtrace_canload((uintptr_t)addr, len + 1, mstate, vstate)) { 3770 regs[rd] = 0; 3771 break; 3772 } 3773 3774 if (!dtrace_canload((uintptr_t)substr, sublen + 1, mstate, 3775 vstate)) { 3776 regs[rd] = 0; 3777 break; 3778 } 3779 3780 /* 3781 * strstr() and index()/rindex() have similar semantics if 3782 * both strings are the empty string: strstr() returns a 3783 * pointer to the (empty) string, and index() and rindex() 3784 * both return index 0 (regardless of any position argument). 3785 */ 3786 if (sublen == 0 && len == 0) { 3787 if (subr == DIF_SUBR_STRSTR) 3788 regs[rd] = (uintptr_t)addr; 3789 else 3790 regs[rd] = 0; 3791 break; 3792 } 3793 3794 if (subr != DIF_SUBR_STRSTR) { 3795 if (subr == DIF_SUBR_RINDEX) { 3796 limit = orig - 1; 3797 addr += len; 3798 inc = -1; 3799 } 3800 3801 /* 3802 * Both index() and rindex() take an optional position 3803 * argument that denotes the starting position. 3804 */ 3805 if (nargs == 3) { 3806 int64_t pos = (int64_t)tupregs[2].dttk_value; 3807 3808 /* 3809 * If the position argument to index() is 3810 * negative, Perl implicitly clamps it at 3811 * zero. This semantic is a little surprising 3812 * given the special meaning of negative 3813 * positions to similar Perl functions like 3814 * substr(), but it appears to reflect a 3815 * notion that index() can start from a 3816 * negative index and increment its way up to 3817 * the string. Given this notion, Perl's 3818 * rindex() is at least self-consistent in 3819 * that it implicitly clamps positions greater 3820 * than the string length to be the string 3821 * length. Where Perl completely loses 3822 * coherence, however, is when the specified 3823 * substring is the empty string (""). In 3824 * this case, even if the position is 3825 * negative, rindex() returns 0 -- and even if 3826 * the position is greater than the length, 3827 * index() returns the string length. These 3828 * semantics violate the notion that index() 3829 * should never return a value less than the 3830 * specified position and that rindex() should 3831 * never return a value greater than the 3832 * specified position. (One assumes that 3833 * these semantics are artifacts of Perl's 3834 * implementation and not the results of 3835 * deliberate design -- it beggars belief that 3836 * even Larry Wall could desire such oddness.) 3837 * While in the abstract one would wish for 3838 * consistent position semantics across 3839 * substr(), index() and rindex() -- or at the 3840 * very least self-consistent position 3841 * semantics for index() and rindex() -- we 3842 * instead opt to keep with the extant Perl 3843 * semantics, in all their broken glory. (Do 3844 * we have more desire to maintain Perl's 3845 * semantics than Perl does? Probably.) 3846 */ 3847 if (subr == DIF_SUBR_RINDEX) { 3848 if (pos < 0) { 3849 if (sublen == 0) 3850 regs[rd] = 0; 3851 break; 3852 } 3853 3854 if (pos > len) 3855 pos = len; 3856 } else { 3857 if (pos < 0) 3858 pos = 0; 3859 3860 if (pos >= len) { 3861 if (sublen == 0) 3862 regs[rd] = len; 3863 break; 3864 } 3865 } 3866 3867 addr = orig + pos; 3868 } 3869 } 3870 3871 for (regs[rd] = notfound; addr != limit; addr += inc) { 3872 if (dtrace_strncmp(addr, substr, sublen) == 0) { 3873 if (subr != DIF_SUBR_STRSTR) { 3874 /* 3875 * As D index() and rindex() are 3876 * modeled on Perl (and not on awk), 3877 * we return a zero-based (and not a 3878 * one-based) index. (For you Perl 3879 * weenies: no, we're not going to add 3880 * $[ -- and shouldn't you be at a con 3881 * or something?) 3882 */ 3883 regs[rd] = (uintptr_t)(addr - orig); 3884 break; 3885 } 3886 3887 ASSERT(subr == DIF_SUBR_STRSTR); 3888 regs[rd] = (uintptr_t)addr; 3889 break; 3890 } 3891 } 3892 3893 break; 3894 } 3895 3896 case DIF_SUBR_STRTOK: { 3897 uintptr_t addr = tupregs[0].dttk_value; 3898 uintptr_t tokaddr = tupregs[1].dttk_value; 3899 uint64_t size = state->dts_options[DTRACEOPT_STRSIZE]; 3900 uintptr_t limit, toklimit = tokaddr + size; 3901 uint8_t c = 0, tokmap[32]; /* 256 / 8 */ 3902 char *dest = (char *)mstate->dtms_scratch_ptr; 3903 int i; 3904 3905 /* 3906 * Check both the token buffer and (later) the input buffer, 3907 * since both could be non-scratch addresses. 3908 */ 3909 if (!dtrace_strcanload(tokaddr, size, mstate, vstate)) { 3910 regs[rd] = 0; 3911 break; 3912 } 3913 3914 if (!DTRACE_INSCRATCH(mstate, size)) { 3915 DTRACE_CPUFLAG_SET(CPU_DTRACE_NOSCRATCH); 3916 regs[rd] = 0; 3917 break; 3918 } 3919 3920 if (addr == 0) { 3921 /* 3922 * If the address specified is NULL, we use our saved 3923 * strtok pointer from the mstate. Note that this 3924 * means that the saved strtok pointer is _only_ 3925 * valid within multiple enablings of the same probe -- 3926 * it behaves like an implicit clause-local variable. 3927 */ 3928 addr = mstate->dtms_strtok; 3929 } else { 3930 /* 3931 * If the user-specified address is non-NULL we must 3932 * access check it. This is the only time we have 3933 * a chance to do so, since this address may reside 3934 * in the string table of this clause-- future calls 3935 * (when we fetch addr from mstate->dtms_strtok) 3936 * would fail this access check. 3937 */ 3938 if (!dtrace_strcanload(addr, size, mstate, vstate)) { 3939 regs[rd] = 0; 3940 break; 3941 } 3942 } 3943 3944 /* 3945 * First, zero the token map, and then process the token 3946 * string -- setting a bit in the map for every character 3947 * found in the token string. 3948 */ 3949 for (i = 0; i < sizeof (tokmap); i++) 3950 tokmap[i] = 0; 3951 3952 for (; tokaddr < toklimit; tokaddr++) { 3953 if ((c = dtrace_load8(tokaddr)) == '\0') 3954 break; 3955 3956 ASSERT((c >> 3) < sizeof (tokmap)); 3957 tokmap[c >> 3] |= (1 << (c & 0x7)); 3958 } 3959 3960 for (limit = addr + size; addr < limit; addr++) { 3961 /* 3962 * We're looking for a character that is _not_ contained 3963 * in the token string. 3964 */ 3965 if ((c = dtrace_load8(addr)) == '\0') 3966 break; 3967 3968 if (!(tokmap[c >> 3] & (1 << (c & 0x7)))) 3969 break; 3970 } 3971 3972 if (c == '\0') { 3973 /* 3974 * We reached the end of the string without finding 3975 * any character that was not in the token string. 3976 * We return NULL in this case, and we set the saved 3977 * address to NULL as well. 3978 */ 3979 regs[rd] = 0; 3980 mstate->dtms_strtok = 0; 3981 break; 3982 } 3983 3984 /* 3985 * From here on, we're copying into the destination string. 3986 */ 3987 for (i = 0; addr < limit && i < size - 1; addr++) { 3988 if ((c = dtrace_load8(addr)) == '\0') 3989 break; 3990 3991 if (tokmap[c >> 3] & (1 << (c & 0x7))) 3992 break; 3993 3994 ASSERT(i < size); 3995 dest[i++] = c; 3996 } 3997 3998 ASSERT(i < size); 3999 dest[i] = '\0'; 4000 regs[rd] = (uintptr_t)dest; 4001 mstate->dtms_scratch_ptr += size; 4002 mstate->dtms_strtok = addr; 4003 break; 4004 } 4005 4006 case DIF_SUBR_SUBSTR: { 4007 uintptr_t s = tupregs[0].dttk_value; 4008 uint64_t size = state->dts_options[DTRACEOPT_STRSIZE]; 4009 char *d = (char *)mstate->dtms_scratch_ptr; 4010 int64_t index = (int64_t)tupregs[1].dttk_value; 4011 int64_t remaining = (int64_t)tupregs[2].dttk_value; 4012 size_t len = dtrace_strlen((char *)s, size); 4013 int64_t i = 0; 4014 4015 if (!dtrace_canload(s, len + 1, mstate, vstate)) { 4016 regs[rd] = 0; 4017 break; 4018 } 4019 4020 if (!DTRACE_INSCRATCH(mstate, size)) { 4021 DTRACE_CPUFLAG_SET(CPU_DTRACE_NOSCRATCH); 4022 regs[rd] = 0; 4023 break; 4024 } 4025 4026 if (nargs <= 2) 4027 remaining = (int64_t)size; 4028 4029 if (index < 0) { 4030 index += len; 4031 4032 if (index < 0 && index + remaining > 0) { 4033 remaining += index; 4034 index = 0; 4035 } 4036 } 4037 4038 if (index >= len || index < 0) { 4039 remaining = 0; 4040 } else if (remaining < 0) { 4041 remaining += len - index; 4042 } else if (index + remaining > size) { 4043 remaining = size - index; 4044 } 4045 4046 for (i = 0; i < remaining; i++) { 4047 if ((d[i] = dtrace_load8(s + index + i)) == '\0') 4048 break; 4049 } 4050 4051 d[i] = '\0'; 4052 4053 mstate->dtms_scratch_ptr += size; 4054 regs[rd] = (uintptr_t)d; 4055 break; 4056 } 4057 4058 case DIF_SUBR_TOUPPER: 4059 case DIF_SUBR_TOLOWER: { 4060 uintptr_t s = tupregs[0].dttk_value; 4061 uint64_t size = state->dts_options[DTRACEOPT_STRSIZE]; 4062 char *dest = (char *)mstate->dtms_scratch_ptr, c; 4063 size_t len = dtrace_strlen((char *)s, size); 4064 char lower, upper, convert; 4065 int64_t i; 4066 4067 if (subr == DIF_SUBR_TOUPPER) { 4068 lower = 'a'; 4069 upper = 'z'; 4070 convert = 'A'; 4071 } else { 4072 lower = 'A'; 4073 upper = 'Z'; 4074 convert = 'a'; 4075 } 4076 4077 if (!dtrace_canload(s, len + 1, mstate, vstate)) { 4078 regs[rd] = 0; 4079 break; 4080 } 4081 4082 if (!DTRACE_INSCRATCH(mstate, size)) { 4083 DTRACE_CPUFLAG_SET(CPU_DTRACE_NOSCRATCH); 4084 regs[rd] = 0; 4085 break; 4086 } 4087 4088 for (i = 0; i < size - 1; i++) { 4089 if ((c = dtrace_load8(s + i)) == '\0') 4090 break; 4091 4092 if (c >= lower && c <= upper) 4093 c = convert + (c - lower); 4094 4095 dest[i] = c; 4096 } 4097 4098 ASSERT(i < size); 4099 dest[i] = '\0'; 4100 regs[rd] = (uintptr_t)dest; 4101 mstate->dtms_scratch_ptr += size; 4102 break; 4103 } 4104 4105#if defined(sun) 4106 case DIF_SUBR_GETMAJOR: 4107#ifdef _LP64 4108 regs[rd] = (tupregs[0].dttk_value >> NBITSMINOR64) & MAXMAJ64; 4109#else 4110 regs[rd] = (tupregs[0].dttk_value >> NBITSMINOR) & MAXMAJ; 4111#endif 4112 break; 4113 4114 case DIF_SUBR_GETMINOR: 4115#ifdef _LP64 4116 regs[rd] = tupregs[0].dttk_value & MAXMIN64; 4117#else 4118 regs[rd] = tupregs[0].dttk_value & MAXMIN; 4119#endif 4120 break; 4121 4122 case DIF_SUBR_DDI_PATHNAME: { 4123 /* 4124 * This one is a galactic mess. We are going to roughly 4125 * emulate ddi_pathname(), but it's made more complicated 4126 * by the fact that we (a) want to include the minor name and 4127 * (b) must proceed iteratively instead of recursively. 4128 */ 4129 uintptr_t dest = mstate->dtms_scratch_ptr; 4130 uint64_t size = state->dts_options[DTRACEOPT_STRSIZE]; 4131 char *start = (char *)dest, *end = start + size - 1; 4132 uintptr_t daddr = tupregs[0].dttk_value; 4133 int64_t minor = (int64_t)tupregs[1].dttk_value; 4134 char *s; 4135 int i, len, depth = 0; 4136 4137 /* 4138 * Due to all the pointer jumping we do and context we must 4139 * rely upon, we just mandate that the user must have kernel 4140 * read privileges to use this routine. 4141 */ 4142 if ((mstate->dtms_access & DTRACE_ACCESS_KERNEL) == 0) { 4143 *flags |= CPU_DTRACE_KPRIV; 4144 *illval = daddr; 4145 regs[rd] = 0; 4146 } 4147 4148 if (!DTRACE_INSCRATCH(mstate, size)) { 4149 DTRACE_CPUFLAG_SET(CPU_DTRACE_NOSCRATCH); 4150 regs[rd] = 0; 4151 break; 4152 } 4153 4154 *end = '\0'; 4155 4156 /* 4157 * We want to have a name for the minor. In order to do this, 4158 * we need to walk the minor list from the devinfo. We want 4159 * to be sure that we don't infinitely walk a circular list, 4160 * so we check for circularity by sending a scout pointer 4161 * ahead two elements for every element that we iterate over; 4162 * if the list is circular, these will ultimately point to the 4163 * same element. You may recognize this little trick as the 4164 * answer to a stupid interview question -- one that always 4165 * seems to be asked by those who had to have it laboriously 4166 * explained to them, and who can't even concisely describe 4167 * the conditions under which one would be forced to resort to 4168 * this technique. Needless to say, those conditions are 4169 * found here -- and probably only here. Is this the only use 4170 * of this infamous trick in shipping, production code? If it 4171 * isn't, it probably should be... 4172 */ 4173 if (minor != -1) { 4174 uintptr_t maddr = dtrace_loadptr(daddr + 4175 offsetof(struct dev_info, devi_minor)); 4176 4177 uintptr_t next = offsetof(struct ddi_minor_data, next); 4178 uintptr_t name = offsetof(struct ddi_minor_data, 4179 d_minor) + offsetof(struct ddi_minor, name); 4180 uintptr_t dev = offsetof(struct ddi_minor_data, 4181 d_minor) + offsetof(struct ddi_minor, dev); 4182 uintptr_t scout; 4183 4184 if (maddr != NULL) 4185 scout = dtrace_loadptr(maddr + next); 4186 4187 while (maddr != NULL && !(*flags & CPU_DTRACE_FAULT)) { 4188 uint64_t m; 4189#ifdef _LP64 4190 m = dtrace_load64(maddr + dev) & MAXMIN64; 4191#else 4192 m = dtrace_load32(maddr + dev) & MAXMIN; 4193#endif 4194 if (m != minor) { 4195 maddr = dtrace_loadptr(maddr + next); 4196 4197 if (scout == NULL) 4198 continue; 4199 4200 scout = dtrace_loadptr(scout + next); 4201 4202 if (scout == NULL) 4203 continue; 4204 4205 scout = dtrace_loadptr(scout + next); 4206 4207 if (scout == NULL) 4208 continue; 4209 4210 if (scout == maddr) { 4211 *flags |= CPU_DTRACE_ILLOP; 4212 break; 4213 } 4214 4215 continue; 4216 } 4217 4218 /* 4219 * We have the minor data. Now we need to 4220 * copy the minor's name into the end of the 4221 * pathname. 4222 */ 4223 s = (char *)dtrace_loadptr(maddr + name); 4224 len = dtrace_strlen(s, size); 4225 4226 if (*flags & CPU_DTRACE_FAULT) 4227 break; 4228 4229 if (len != 0) { 4230 if ((end -= (len + 1)) < start) 4231 break; 4232 4233 *end = ':'; 4234 } 4235 4236 for (i = 1; i <= len; i++) 4237 end[i] = dtrace_load8((uintptr_t)s++); 4238 break; 4239 } 4240 } 4241 4242 while (daddr != NULL && !(*flags & CPU_DTRACE_FAULT)) { 4243 ddi_node_state_t devi_state; 4244 4245 devi_state = dtrace_load32(daddr + 4246 offsetof(struct dev_info, devi_node_state)); 4247 4248 if (*flags & CPU_DTRACE_FAULT) 4249 break; 4250 4251 if (devi_state >= DS_INITIALIZED) { 4252 s = (char *)dtrace_loadptr(daddr + 4253 offsetof(struct dev_info, devi_addr)); 4254 len = dtrace_strlen(s, size); 4255 4256 if (*flags & CPU_DTRACE_FAULT) 4257 break; 4258 4259 if (len != 0) { 4260 if ((end -= (len + 1)) < start) 4261 break; 4262 4263 *end = '@'; 4264 } 4265 4266 for (i = 1; i <= len; i++) 4267 end[i] = dtrace_load8((uintptr_t)s++); 4268 } 4269 4270 /* 4271 * Now for the node name... 4272 */ 4273 s = (char *)dtrace_loadptr(daddr + 4274 offsetof(struct dev_info, devi_node_name)); 4275 4276 daddr = dtrace_loadptr(daddr + 4277 offsetof(struct dev_info, devi_parent)); 4278 4279 /* 4280 * If our parent is NULL (that is, if we're the root 4281 * node), we're going to use the special path 4282 * "devices". 4283 */ 4284 if (daddr == 0) 4285 s = "devices"; 4286 4287 len = dtrace_strlen(s, size); 4288 if (*flags & CPU_DTRACE_FAULT) 4289 break; 4290 4291 if ((end -= (len + 1)) < start) 4292 break; 4293 4294 for (i = 1; i <= len; i++) 4295 end[i] = dtrace_load8((uintptr_t)s++); 4296 *end = '/'; 4297 4298 if (depth++ > dtrace_devdepth_max) { 4299 *flags |= CPU_DTRACE_ILLOP; 4300 break; 4301 } 4302 } 4303 4304 if (end < start) 4305 DTRACE_CPUFLAG_SET(CPU_DTRACE_NOSCRATCH); 4306 4307 if (daddr == 0) { 4308 regs[rd] = (uintptr_t)end; 4309 mstate->dtms_scratch_ptr += size; 4310 } 4311 4312 break; 4313 } 4314#endif 4315 4316 case DIF_SUBR_STRJOIN: { 4317 char *d = (char *)mstate->dtms_scratch_ptr; 4318 uint64_t size = state->dts_options[DTRACEOPT_STRSIZE]; 4319 uintptr_t s1 = tupregs[0].dttk_value; 4320 uintptr_t s2 = tupregs[1].dttk_value; 4321 int i = 0; 4322 4323 if (!dtrace_strcanload(s1, size, mstate, vstate) || 4324 !dtrace_strcanload(s2, size, mstate, vstate)) { 4325 regs[rd] = 0; 4326 break; 4327 } 4328 4329 if (!DTRACE_INSCRATCH(mstate, size)) { 4330 DTRACE_CPUFLAG_SET(CPU_DTRACE_NOSCRATCH); 4331 regs[rd] = 0; 4332 break; 4333 } 4334 4335 for (;;) { 4336 if (i >= size) { 4337 DTRACE_CPUFLAG_SET(CPU_DTRACE_NOSCRATCH); 4338 regs[rd] = 0; 4339 break; 4340 } 4341 4342 if ((d[i++] = dtrace_load8(s1++)) == '\0') { 4343 i--; 4344 break; 4345 } 4346 } 4347 4348 for (;;) { 4349 if (i >= size) { 4350 DTRACE_CPUFLAG_SET(CPU_DTRACE_NOSCRATCH); 4351 regs[rd] = 0; 4352 break; 4353 } 4354 4355 if ((d[i++] = dtrace_load8(s2++)) == '\0') 4356 break; 4357 } 4358 4359 if (i < size) { 4360 mstate->dtms_scratch_ptr += i; 4361 regs[rd] = (uintptr_t)d; 4362 } 4363 4364 break; 4365 } 4366 4367 case DIF_SUBR_LLTOSTR: { 4368 int64_t i = (int64_t)tupregs[0].dttk_value; 4369 uint64_t val, digit; 4370 uint64_t size = 65; /* enough room for 2^64 in binary */ 4371 char *end = (char *)mstate->dtms_scratch_ptr + size - 1; 4372 int base = 10; 4373 4374 if (nargs > 1) { 4375 if ((base = tupregs[1].dttk_value) <= 1 || 4376 base > ('z' - 'a' + 1) + ('9' - '0' + 1)) { 4377 *flags |= CPU_DTRACE_ILLOP; 4378 break; 4379 } 4380 } 4381 4382 val = (base == 10 && i < 0) ? i * -1 : i; 4383 4384 if (!DTRACE_INSCRATCH(mstate, size)) { 4385 DTRACE_CPUFLAG_SET(CPU_DTRACE_NOSCRATCH); 4386 regs[rd] = 0; 4387 break; 4388 } 4389 4390 for (*end-- = '\0'; val; val /= base) { 4391 if ((digit = val % base) <= '9' - '0') { 4392 *end-- = '0' + digit; 4393 } else { 4394 *end-- = 'a' + (digit - ('9' - '0') - 1); 4395 } 4396 } 4397 4398 if (i == 0 && base == 16) 4399 *end-- = '0'; 4400 4401 if (base == 16) 4402 *end-- = 'x'; 4403 4404 if (i == 0 || base == 8 || base == 16) 4405 *end-- = '0'; 4406 4407 if (i < 0 && base == 10) 4408 *end-- = '-'; 4409 4410 regs[rd] = (uintptr_t)end + 1; 4411 mstate->dtms_scratch_ptr += size; 4412 break; 4413 } 4414 4415 case DIF_SUBR_HTONS: 4416 case DIF_SUBR_NTOHS: 4417#if BYTE_ORDER == BIG_ENDIAN 4418 regs[rd] = (uint16_t)tupregs[0].dttk_value; 4419#else 4420 regs[rd] = DT_BSWAP_16((uint16_t)tupregs[0].dttk_value); 4421#endif 4422 break; 4423 4424 4425 case DIF_SUBR_HTONL: 4426 case DIF_SUBR_NTOHL: 4427#if BYTE_ORDER == BIG_ENDIAN 4428 regs[rd] = (uint32_t)tupregs[0].dttk_value; 4429#else 4430 regs[rd] = DT_BSWAP_32((uint32_t)tupregs[0].dttk_value); 4431#endif 4432 break; 4433 4434 4435 case DIF_SUBR_HTONLL: 4436 case DIF_SUBR_NTOHLL: 4437#if BYTE_ORDER == BIG_ENDIAN 4438 regs[rd] = (uint64_t)tupregs[0].dttk_value; 4439#else 4440 regs[rd] = DT_BSWAP_64((uint64_t)tupregs[0].dttk_value); 4441#endif 4442 break; 4443 4444 4445 case DIF_SUBR_DIRNAME: 4446 case DIF_SUBR_BASENAME: { 4447 char *dest = (char *)mstate->dtms_scratch_ptr; 4448 uint64_t size = state->dts_options[DTRACEOPT_STRSIZE]; 4449 uintptr_t src = tupregs[0].dttk_value; 4450 int i, j, len = dtrace_strlen((char *)src, size); 4451 int lastbase = -1, firstbase = -1, lastdir = -1; 4452 int start, end; 4453 4454 if (!dtrace_canload(src, len + 1, mstate, vstate)) { 4455 regs[rd] = 0; 4456 break; 4457 } 4458 4459 if (!DTRACE_INSCRATCH(mstate, size)) { 4460 DTRACE_CPUFLAG_SET(CPU_DTRACE_NOSCRATCH); 4461 regs[rd] = 0; 4462 break; 4463 } 4464 4465 /* 4466 * The basename and dirname for a zero-length string is 4467 * defined to be "." 4468 */ 4469 if (len == 0) { 4470 len = 1; 4471 src = (uintptr_t)"."; 4472 } 4473 4474 /* 4475 * Start from the back of the string, moving back toward the 4476 * front until we see a character that isn't a slash. That 4477 * character is the last character in the basename. 4478 */ 4479 for (i = len - 1; i >= 0; i--) { 4480 if (dtrace_load8(src + i) != '/') 4481 break; 4482 } 4483 4484 if (i >= 0) 4485 lastbase = i; 4486 4487 /* 4488 * Starting from the last character in the basename, move 4489 * towards the front until we find a slash. The character 4490 * that we processed immediately before that is the first 4491 * character in the basename. 4492 */ 4493 for (; i >= 0; i--) { 4494 if (dtrace_load8(src + i) == '/') 4495 break; 4496 } 4497 4498 if (i >= 0) 4499 firstbase = i + 1; 4500 4501 /* 4502 * Now keep going until we find a non-slash character. That 4503 * character is the last character in the dirname. 4504 */ 4505 for (; i >= 0; i--) { 4506 if (dtrace_load8(src + i) != '/') 4507 break; 4508 } 4509 4510 if (i >= 0) 4511 lastdir = i; 4512 4513 ASSERT(!(lastbase == -1 && firstbase != -1)); 4514 ASSERT(!(firstbase == -1 && lastdir != -1)); 4515 4516 if (lastbase == -1) { 4517 /* 4518 * We didn't find a non-slash character. We know that 4519 * the length is non-zero, so the whole string must be 4520 * slashes. In either the dirname or the basename 4521 * case, we return '/'. 4522 */ 4523 ASSERT(firstbase == -1); 4524 firstbase = lastbase = lastdir = 0; 4525 } 4526 4527 if (firstbase == -1) { 4528 /* 4529 * The entire string consists only of a basename 4530 * component. If we're looking for dirname, we need 4531 * to change our string to be just "."; if we're 4532 * looking for a basename, we'll just set the first 4533 * character of the basename to be 0. 4534 */ 4535 if (subr == DIF_SUBR_DIRNAME) { 4536 ASSERT(lastdir == -1); 4537 src = (uintptr_t)"."; 4538 lastdir = 0; 4539 } else { 4540 firstbase = 0; 4541 } 4542 } 4543 4544 if (subr == DIF_SUBR_DIRNAME) { 4545 if (lastdir == -1) { 4546 /* 4547 * We know that we have a slash in the name -- 4548 * or lastdir would be set to 0, above. And 4549 * because lastdir is -1, we know that this 4550 * slash must be the first character. (That 4551 * is, the full string must be of the form 4552 * "/basename".) In this case, the last 4553 * character of the directory name is 0. 4554 */ 4555 lastdir = 0; 4556 } 4557 4558 start = 0; 4559 end = lastdir; 4560 } else { 4561 ASSERT(subr == DIF_SUBR_BASENAME); 4562 ASSERT(firstbase != -1 && lastbase != -1); 4563 start = firstbase; 4564 end = lastbase; 4565 } 4566 4567 for (i = start, j = 0; i <= end && j < size - 1; i++, j++) 4568 dest[j] = dtrace_load8(src + i); 4569 4570 dest[j] = '\0'; 4571 regs[rd] = (uintptr_t)dest; 4572 mstate->dtms_scratch_ptr += size; 4573 break; 4574 } 4575 4576 case DIF_SUBR_CLEANPATH: { 4577 char *dest = (char *)mstate->dtms_scratch_ptr, c; 4578 uint64_t size = state->dts_options[DTRACEOPT_STRSIZE]; 4579 uintptr_t src = tupregs[0].dttk_value; 4580 int i = 0, j = 0; 4581 4582 if (!dtrace_strcanload(src, size, mstate, vstate)) { 4583 regs[rd] = 0; 4584 break; 4585 } 4586 4587 if (!DTRACE_INSCRATCH(mstate, size)) { 4588 DTRACE_CPUFLAG_SET(CPU_DTRACE_NOSCRATCH); 4589 regs[rd] = 0; 4590 break; 4591 } 4592 4593 /* 4594 * Move forward, loading each character. 4595 */ 4596 do { 4597 c = dtrace_load8(src + i++); 4598next: 4599 if (j + 5 >= size) /* 5 = strlen("/..c\0") */ 4600 break; 4601 4602 if (c != '/') { 4603 dest[j++] = c; 4604 continue; 4605 } 4606 4607 c = dtrace_load8(src + i++); 4608 4609 if (c == '/') { 4610 /* 4611 * We have two slashes -- we can just advance 4612 * to the next character. 4613 */ 4614 goto next; 4615 } 4616 4617 if (c != '.') { 4618 /* 4619 * This is not "." and it's not ".." -- we can 4620 * just store the "/" and this character and 4621 * drive on. 4622 */ 4623 dest[j++] = '/'; 4624 dest[j++] = c; 4625 continue; 4626 } 4627 4628 c = dtrace_load8(src + i++); 4629 4630 if (c == '/') { 4631 /* 4632 * This is a "/./" component. We're not going 4633 * to store anything in the destination buffer; 4634 * we're just going to go to the next component. 4635 */ 4636 goto next; 4637 } 4638 4639 if (c != '.') { 4640 /* 4641 * This is not ".." -- we can just store the 4642 * "/." and this character and continue 4643 * processing. 4644 */ 4645 dest[j++] = '/'; 4646 dest[j++] = '.'; 4647 dest[j++] = c; 4648 continue; 4649 } 4650 4651 c = dtrace_load8(src + i++); 4652 4653 if (c != '/' && c != '\0') { 4654 /* 4655 * This is not ".." -- it's "..[mumble]". 4656 * We'll store the "/.." and this character 4657 * and continue processing. 4658 */ 4659 dest[j++] = '/'; 4660 dest[j++] = '.'; 4661 dest[j++] = '.'; 4662 dest[j++] = c; 4663 continue; 4664 } 4665 4666 /* 4667 * This is "/../" or "/..\0". We need to back up 4668 * our destination pointer until we find a "/". 4669 */ 4670 i--; 4671 while (j != 0 && dest[--j] != '/') 4672 continue; 4673 4674 if (c == '\0') 4675 dest[++j] = '/'; 4676 } while (c != '\0'); 4677 4678 dest[j] = '\0'; 4679 regs[rd] = (uintptr_t)dest; 4680 mstate->dtms_scratch_ptr += size; 4681 break; 4682 } 4683 4684 case DIF_SUBR_INET_NTOA: 4685 case DIF_SUBR_INET_NTOA6: 4686 case DIF_SUBR_INET_NTOP: { 4687 size_t size; 4688 int af, argi, i; 4689 char *base, *end; 4690 4691 if (subr == DIF_SUBR_INET_NTOP) { 4692 af = (int)tupregs[0].dttk_value; 4693 argi = 1; 4694 } else { 4695 af = subr == DIF_SUBR_INET_NTOA ? AF_INET: AF_INET6; 4696 argi = 0; 4697 } 4698 4699 if (af == AF_INET) { 4700 ipaddr_t ip4; 4701 uint8_t *ptr8, val; 4702 4703 /* 4704 * Safely load the IPv4 address. 4705 */ 4706 ip4 = dtrace_load32(tupregs[argi].dttk_value); 4707 4708 /* 4709 * Check an IPv4 string will fit in scratch. 4710 */ 4711 size = INET_ADDRSTRLEN; 4712 if (!DTRACE_INSCRATCH(mstate, size)) { 4713 DTRACE_CPUFLAG_SET(CPU_DTRACE_NOSCRATCH); 4714 regs[rd] = 0; 4715 break; 4716 } 4717 base = (char *)mstate->dtms_scratch_ptr; 4718 end = (char *)mstate->dtms_scratch_ptr + size - 1; 4719 4720 /* 4721 * Stringify as a dotted decimal quad. 4722 */ 4723 *end-- = '\0'; 4724 ptr8 = (uint8_t *)&ip4; 4725 for (i = 3; i >= 0; i--) { 4726 val = ptr8[i]; 4727 4728 if (val == 0) { 4729 *end-- = '0'; 4730 } else { 4731 for (; val; val /= 10) { 4732 *end-- = '0' + (val % 10); 4733 } 4734 } 4735 4736 if (i > 0) 4737 *end-- = '.'; 4738 } 4739 ASSERT(end + 1 >= base); 4740 4741 } else if (af == AF_INET6) { 4742 struct in6_addr ip6; 4743 int firstzero, tryzero, numzero, v6end; 4744 uint16_t val; 4745 const char digits[] = "0123456789abcdef"; 4746 4747 /* 4748 * Stringify using RFC 1884 convention 2 - 16 bit 4749 * hexadecimal values with a zero-run compression. 4750 * Lower case hexadecimal digits are used. 4751 * eg, fe80::214:4fff:fe0b:76c8. 4752 * The IPv4 embedded form is returned for inet_ntop, 4753 * just the IPv4 string is returned for inet_ntoa6. 4754 */ 4755 4756 /* 4757 * Safely load the IPv6 address. 4758 */ 4759 dtrace_bcopy( 4760 (void *)(uintptr_t)tupregs[argi].dttk_value, 4761 (void *)(uintptr_t)&ip6, sizeof (struct in6_addr)); 4762 4763 /* 4764 * Check an IPv6 string will fit in scratch. 4765 */ 4766 size = INET6_ADDRSTRLEN; 4767 if (!DTRACE_INSCRATCH(mstate, size)) { 4768 DTRACE_CPUFLAG_SET(CPU_DTRACE_NOSCRATCH); 4769 regs[rd] = 0; 4770 break; 4771 } 4772 base = (char *)mstate->dtms_scratch_ptr; 4773 end = (char *)mstate->dtms_scratch_ptr + size - 1; 4774 *end-- = '\0'; 4775 4776 /* 4777 * Find the longest run of 16 bit zero values 4778 * for the single allowed zero compression - "::". 4779 */ 4780 firstzero = -1; 4781 tryzero = -1; 4782 numzero = 1; 4783 for (i = 0; i < sizeof (struct in6_addr); i++) { 4784#if defined(sun) 4785 if (ip6._S6_un._S6_u8[i] == 0 && 4786#else 4787 if (ip6.__u6_addr.__u6_addr8[i] == 0 && 4788#endif 4789 tryzero == -1 && i % 2 == 0) { 4790 tryzero = i; 4791 continue; 4792 } 4793 4794 if (tryzero != -1 && 4795#if defined(sun) 4796 (ip6._S6_un._S6_u8[i] != 0 || 4797#else 4798 (ip6.__u6_addr.__u6_addr8[i] != 0 || 4799#endif 4800 i == sizeof (struct in6_addr) - 1)) { 4801 4802 if (i - tryzero <= numzero) { 4803 tryzero = -1; 4804 continue; 4805 } 4806 4807 firstzero = tryzero; 4808 numzero = i - i % 2 - tryzero; 4809 tryzero = -1; 4810 4811#if defined(sun) 4812 if (ip6._S6_un._S6_u8[i] == 0 && 4813#else 4814 if (ip6.__u6_addr.__u6_addr8[i] == 0 && 4815#endif 4816 i == sizeof (struct in6_addr) - 1) 4817 numzero += 2; 4818 } 4819 } 4820 ASSERT(firstzero + numzero <= sizeof (struct in6_addr)); 4821 4822 /* 4823 * Check for an IPv4 embedded address. 4824 */ 4825 v6end = sizeof (struct in6_addr) - 2; 4826 if (IN6_IS_ADDR_V4MAPPED(&ip6) || 4827 IN6_IS_ADDR_V4COMPAT(&ip6)) { 4828 for (i = sizeof (struct in6_addr) - 1; 4829 i >= DTRACE_V4MAPPED_OFFSET; i--) { 4830 ASSERT(end >= base); 4831 4832#if defined(sun) 4833 val = ip6._S6_un._S6_u8[i]; 4834#else 4835 val = ip6.__u6_addr.__u6_addr8[i]; 4836#endif 4837 4838 if (val == 0) { 4839 *end-- = '0'; 4840 } else { 4841 for (; val; val /= 10) { 4842 *end-- = '0' + val % 10; 4843 } 4844 } 4845 4846 if (i > DTRACE_V4MAPPED_OFFSET) 4847 *end-- = '.'; 4848 } 4849 4850 if (subr == DIF_SUBR_INET_NTOA6) 4851 goto inetout; 4852 4853 /* 4854 * Set v6end to skip the IPv4 address that 4855 * we have already stringified. 4856 */ 4857 v6end = 10; 4858 } 4859 4860 /* 4861 * Build the IPv6 string by working through the 4862 * address in reverse. 4863 */ 4864 for (i = v6end; i >= 0; i -= 2) { 4865 ASSERT(end >= base); 4866 4867 if (i == firstzero + numzero - 2) { 4868 *end-- = ':'; 4869 *end-- = ':'; 4870 i -= numzero - 2; 4871 continue; 4872 } 4873 4874 if (i < 14 && i != firstzero - 2) 4875 *end-- = ':'; 4876 4877#if defined(sun) 4878 val = (ip6._S6_un._S6_u8[i] << 8) + 4879 ip6._S6_un._S6_u8[i + 1]; 4880#else 4881 val = (ip6.__u6_addr.__u6_addr8[i] << 8) + 4882 ip6.__u6_addr.__u6_addr8[i + 1]; 4883#endif 4884 4885 if (val == 0) { 4886 *end-- = '0'; 4887 } else { 4888 for (; val; val /= 16) { 4889 *end-- = digits[val % 16]; 4890 } 4891 } 4892 } 4893 ASSERT(end + 1 >= base); 4894 4895 } else { 4896 /* 4897 * The user didn't use AH_INET or AH_INET6. 4898 */ 4899 DTRACE_CPUFLAG_SET(CPU_DTRACE_ILLOP); 4900 regs[rd] = 0; 4901 break; 4902 } 4903 4904inetout: regs[rd] = (uintptr_t)end + 1; 4905 mstate->dtms_scratch_ptr += size; 4906 break; 4907 } 4908 4909 case DIF_SUBR_MEMREF: { 4910 uintptr_t size = 2 * sizeof(uintptr_t); 4911 uintptr_t *memref = (uintptr_t *) P2ROUNDUP(mstate->dtms_scratch_ptr, sizeof(uintptr_t)); 4912 size_t scratch_size = ((uintptr_t) memref - mstate->dtms_scratch_ptr) + size; 4913 4914 /* address and length */ 4915 memref[0] = tupregs[0].dttk_value; 4916 memref[1] = tupregs[1].dttk_value; 4917 4918 regs[rd] = (uintptr_t) memref; 4919 mstate->dtms_scratch_ptr += scratch_size; 4920 break; 4921 } 4922 4923 case DIF_SUBR_TYPEREF: { 4924 uintptr_t size = 4 * sizeof(uintptr_t); 4925 uintptr_t *typeref = (uintptr_t *) P2ROUNDUP(mstate->dtms_scratch_ptr, sizeof(uintptr_t)); 4926 size_t scratch_size = ((uintptr_t) typeref - mstate->dtms_scratch_ptr) + size; 4927 4928 /* address, num_elements, type_str, type_len */ 4929 typeref[0] = tupregs[0].dttk_value; 4930 typeref[1] = tupregs[1].dttk_value; 4931 typeref[2] = tupregs[2].dttk_value; 4932 typeref[3] = tupregs[3].dttk_value; 4933 4934 regs[rd] = (uintptr_t) typeref; 4935 mstate->dtms_scratch_ptr += scratch_size; 4936 break; 4937 } 4938 } 4939} 4940 4941/* 4942 * Emulate the execution of DTrace IR instructions specified by the given 4943 * DIF object. This function is deliberately void of assertions as all of 4944 * the necessary checks are handled by a call to dtrace_difo_validate(). 4945 */ 4946static uint64_t 4947dtrace_dif_emulate(dtrace_difo_t *difo, dtrace_mstate_t *mstate, 4948 dtrace_vstate_t *vstate, dtrace_state_t *state) 4949{ 4950 const dif_instr_t *text = difo->dtdo_buf; 4951 const uint_t textlen = difo->dtdo_len; 4952 const char *strtab = difo->dtdo_strtab; 4953 const uint64_t *inttab = difo->dtdo_inttab; 4954 4955 uint64_t rval = 0; 4956 dtrace_statvar_t *svar; 4957 dtrace_dstate_t *dstate = &vstate->dtvs_dynvars; 4958 dtrace_difv_t *v; 4959 volatile uint16_t *flags = &cpu_core[curcpu].cpuc_dtrace_flags; 4960 volatile uintptr_t *illval = &cpu_core[curcpu].cpuc_dtrace_illval; 4961 4962 dtrace_key_t tupregs[DIF_DTR_NREGS + 2]; /* +2 for thread and id */ 4963 uint64_t regs[DIF_DIR_NREGS]; 4964 uint64_t *tmp; 4965 4966 uint8_t cc_n = 0, cc_z = 0, cc_v = 0, cc_c = 0; 4967 int64_t cc_r; 4968 uint_t pc = 0, id, opc = 0; 4969 uint8_t ttop = 0; 4970 dif_instr_t instr; 4971 uint_t r1, r2, rd; 4972 4973 /* 4974 * We stash the current DIF object into the machine state: we need it 4975 * for subsequent access checking. 4976 */ 4977 mstate->dtms_difo = difo; 4978 4979 regs[DIF_REG_R0] = 0; /* %r0 is fixed at zero */ 4980 4981 while (pc < textlen && !(*flags & CPU_DTRACE_FAULT)) { 4982 opc = pc; 4983 4984 instr = text[pc++]; 4985 r1 = DIF_INSTR_R1(instr); 4986 r2 = DIF_INSTR_R2(instr); 4987 rd = DIF_INSTR_RD(instr); 4988 4989 switch (DIF_INSTR_OP(instr)) { 4990 case DIF_OP_OR: 4991 regs[rd] = regs[r1] | regs[r2]; 4992 break; 4993 case DIF_OP_XOR: 4994 regs[rd] = regs[r1] ^ regs[r2]; 4995 break; 4996 case DIF_OP_AND: 4997 regs[rd] = regs[r1] & regs[r2]; 4998 break; 4999 case DIF_OP_SLL: 5000 regs[rd] = regs[r1] << regs[r2]; 5001 break; 5002 case DIF_OP_SRL: 5003 regs[rd] = regs[r1] >> regs[r2]; 5004 break; 5005 case DIF_OP_SUB: 5006 regs[rd] = regs[r1] - regs[r2]; 5007 break; 5008 case DIF_OP_ADD: 5009 regs[rd] = regs[r1] + regs[r2]; 5010 break; 5011 case DIF_OP_MUL: 5012 regs[rd] = regs[r1] * regs[r2]; 5013 break; 5014 case DIF_OP_SDIV: 5015 if (regs[r2] == 0) { 5016 regs[rd] = 0; 5017 *flags |= CPU_DTRACE_DIVZERO; 5018 } else { 5019 regs[rd] = (int64_t)regs[r1] / 5020 (int64_t)regs[r2]; 5021 } 5022 break; 5023 5024 case DIF_OP_UDIV: 5025 if (regs[r2] == 0) { 5026 regs[rd] = 0; 5027 *flags |= CPU_DTRACE_DIVZERO; 5028 } else { 5029 regs[rd] = regs[r1] / regs[r2]; 5030 } 5031 break; 5032 5033 case DIF_OP_SREM: 5034 if (regs[r2] == 0) { 5035 regs[rd] = 0; 5036 *flags |= CPU_DTRACE_DIVZERO; 5037 } else { 5038 regs[rd] = (int64_t)regs[r1] % 5039 (int64_t)regs[r2]; 5040 } 5041 break; 5042 5043 case DIF_OP_UREM: 5044 if (regs[r2] == 0) { 5045 regs[rd] = 0; 5046 *flags |= CPU_DTRACE_DIVZERO; 5047 } else { 5048 regs[rd] = regs[r1] % regs[r2]; 5049 } 5050 break; 5051 5052 case DIF_OP_NOT: 5053 regs[rd] = ~regs[r1]; 5054 break; 5055 case DIF_OP_MOV: 5056 regs[rd] = regs[r1]; 5057 break; 5058 case DIF_OP_CMP: 5059 cc_r = regs[r1] - regs[r2]; 5060 cc_n = cc_r < 0; 5061 cc_z = cc_r == 0; 5062 cc_v = 0; 5063 cc_c = regs[r1] < regs[r2]; 5064 break; 5065 case DIF_OP_TST: 5066 cc_n = cc_v = cc_c = 0; 5067 cc_z = regs[r1] == 0; 5068 break; 5069 case DIF_OP_BA: 5070 pc = DIF_INSTR_LABEL(instr); 5071 break; 5072 case DIF_OP_BE: 5073 if (cc_z) 5074 pc = DIF_INSTR_LABEL(instr); 5075 break; 5076 case DIF_OP_BNE: 5077 if (cc_z == 0) 5078 pc = DIF_INSTR_LABEL(instr); 5079 break; 5080 case DIF_OP_BG: 5081 if ((cc_z | (cc_n ^ cc_v)) == 0) 5082 pc = DIF_INSTR_LABEL(instr); 5083 break; 5084 case DIF_OP_BGU: 5085 if ((cc_c | cc_z) == 0) 5086 pc = DIF_INSTR_LABEL(instr); 5087 break; 5088 case DIF_OP_BGE: 5089 if ((cc_n ^ cc_v) == 0) 5090 pc = DIF_INSTR_LABEL(instr); 5091 break; 5092 case DIF_OP_BGEU: 5093 if (cc_c == 0) 5094 pc = DIF_INSTR_LABEL(instr); 5095 break; 5096 case DIF_OP_BL: 5097 if (cc_n ^ cc_v) 5098 pc = DIF_INSTR_LABEL(instr); 5099 break; 5100 case DIF_OP_BLU: 5101 if (cc_c) 5102 pc = DIF_INSTR_LABEL(instr); 5103 break; 5104 case DIF_OP_BLE: 5105 if (cc_z | (cc_n ^ cc_v)) 5106 pc = DIF_INSTR_LABEL(instr); 5107 break; 5108 case DIF_OP_BLEU: 5109 if (cc_c | cc_z) 5110 pc = DIF_INSTR_LABEL(instr); 5111 break; 5112 case DIF_OP_RLDSB: 5113 if (!dtrace_canstore(regs[r1], 1, mstate, vstate)) { 5114 *flags |= CPU_DTRACE_KPRIV; 5115 *illval = regs[r1]; 5116 break; 5117 } 5118 /*FALLTHROUGH*/ 5119 case DIF_OP_LDSB: 5120 regs[rd] = (int8_t)dtrace_load8(regs[r1]); 5121 break; 5122 case DIF_OP_RLDSH: 5123 if (!dtrace_canstore(regs[r1], 2, mstate, vstate)) { 5124 *flags |= CPU_DTRACE_KPRIV; 5125 *illval = regs[r1]; 5126 break; 5127 } 5128 /*FALLTHROUGH*/ 5129 case DIF_OP_LDSH: 5130 regs[rd] = (int16_t)dtrace_load16(regs[r1]); 5131 break; 5132 case DIF_OP_RLDSW: 5133 if (!dtrace_canstore(regs[r1], 4, mstate, vstate)) { 5134 *flags |= CPU_DTRACE_KPRIV; 5135 *illval = regs[r1]; 5136 break; 5137 } 5138 /*FALLTHROUGH*/ 5139 case DIF_OP_LDSW: 5140 regs[rd] = (int32_t)dtrace_load32(regs[r1]); 5141 break; 5142 case DIF_OP_RLDUB: 5143 if (!dtrace_canstore(regs[r1], 1, mstate, vstate)) { 5144 *flags |= CPU_DTRACE_KPRIV; 5145 *illval = regs[r1]; 5146 break; 5147 } 5148 /*FALLTHROUGH*/ 5149 case DIF_OP_LDUB: 5150 regs[rd] = dtrace_load8(regs[r1]); 5151 break; 5152 case DIF_OP_RLDUH: 5153 if (!dtrace_canstore(regs[r1], 2, mstate, vstate)) { 5154 *flags |= CPU_DTRACE_KPRIV; 5155 *illval = regs[r1]; 5156 break; 5157 } 5158 /*FALLTHROUGH*/ 5159 case DIF_OP_LDUH: 5160 regs[rd] = dtrace_load16(regs[r1]); 5161 break; 5162 case DIF_OP_RLDUW: 5163 if (!dtrace_canstore(regs[r1], 4, mstate, vstate)) { 5164 *flags |= CPU_DTRACE_KPRIV; 5165 *illval = regs[r1]; 5166 break; 5167 } 5168 /*FALLTHROUGH*/ 5169 case DIF_OP_LDUW: 5170 regs[rd] = dtrace_load32(regs[r1]); 5171 break; 5172 case DIF_OP_RLDX: 5173 if (!dtrace_canstore(regs[r1], 8, mstate, vstate)) { 5174 *flags |= CPU_DTRACE_KPRIV; 5175 *illval = regs[r1]; 5176 break; 5177 } 5178 /*FALLTHROUGH*/ 5179 case DIF_OP_LDX: 5180 regs[rd] = dtrace_load64(regs[r1]); 5181 break; 5182 case DIF_OP_ULDSB: 5183 regs[rd] = (int8_t) 5184 dtrace_fuword8((void *)(uintptr_t)regs[r1]); 5185 break; 5186 case DIF_OP_ULDSH: 5187 regs[rd] = (int16_t) 5188 dtrace_fuword16((void *)(uintptr_t)regs[r1]); 5189 break; 5190 case DIF_OP_ULDSW: 5191 regs[rd] = (int32_t) 5192 dtrace_fuword32((void *)(uintptr_t)regs[r1]); 5193 break; 5194 case DIF_OP_ULDUB: 5195 regs[rd] = 5196 dtrace_fuword8((void *)(uintptr_t)regs[r1]); 5197 break; 5198 case DIF_OP_ULDUH: 5199 regs[rd] = 5200 dtrace_fuword16((void *)(uintptr_t)regs[r1]); 5201 break; 5202 case DIF_OP_ULDUW: 5203 regs[rd] = 5204 dtrace_fuword32((void *)(uintptr_t)regs[r1]); 5205 break; 5206 case DIF_OP_ULDX: 5207 regs[rd] = 5208 dtrace_fuword64((void *)(uintptr_t)regs[r1]); 5209 break; 5210 case DIF_OP_RET: 5211 rval = regs[rd]; 5212 pc = textlen; 5213 break; 5214 case DIF_OP_NOP: 5215 break; 5216 case DIF_OP_SETX: 5217 regs[rd] = inttab[DIF_INSTR_INTEGER(instr)]; 5218 break; 5219 case DIF_OP_SETS: 5220 regs[rd] = (uint64_t)(uintptr_t) 5221 (strtab + DIF_INSTR_STRING(instr)); 5222 break; 5223 case DIF_OP_SCMP: { 5224 size_t sz = state->dts_options[DTRACEOPT_STRSIZE]; 5225 uintptr_t s1 = regs[r1]; 5226 uintptr_t s2 = regs[r2]; 5227 5228 if (s1 != 0 && 5229 !dtrace_strcanload(s1, sz, mstate, vstate)) 5230 break; 5231 if (s2 != 0 && 5232 !dtrace_strcanload(s2, sz, mstate, vstate)) 5233 break; 5234 5235 cc_r = dtrace_strncmp((char *)s1, (char *)s2, sz); 5236 5237 cc_n = cc_r < 0; 5238 cc_z = cc_r == 0; 5239 cc_v = cc_c = 0; 5240 break; 5241 } 5242 case DIF_OP_LDGA: 5243 regs[rd] = dtrace_dif_variable(mstate, state, 5244 r1, regs[r2]); 5245 break; 5246 case DIF_OP_LDGS: 5247 id = DIF_INSTR_VAR(instr); 5248 5249 if (id >= DIF_VAR_OTHER_UBASE) { 5250 uintptr_t a; 5251 5252 id -= DIF_VAR_OTHER_UBASE; 5253 svar = vstate->dtvs_globals[id]; 5254 ASSERT(svar != NULL); 5255 v = &svar->dtsv_var; 5256 5257 if (!(v->dtdv_type.dtdt_flags & DIF_TF_BYREF)) { 5258 regs[rd] = svar->dtsv_data; 5259 break; 5260 } 5261 5262 a = (uintptr_t)svar->dtsv_data; 5263 5264 if (*(uint8_t *)a == UINT8_MAX) { 5265 /* 5266 * If the 0th byte is set to UINT8_MAX 5267 * then this is to be treated as a 5268 * reference to a NULL variable. 5269 */ 5270 regs[rd] = 0; 5271 } else { 5272 regs[rd] = a + sizeof (uint64_t); 5273 } 5274 5275 break; 5276 } 5277 5278 regs[rd] = dtrace_dif_variable(mstate, state, id, 0); 5279 break; 5280 5281 case DIF_OP_STGS: 5282 id = DIF_INSTR_VAR(instr); 5283 5284 ASSERT(id >= DIF_VAR_OTHER_UBASE); 5285 id -= DIF_VAR_OTHER_UBASE; 5286 5287 svar = vstate->dtvs_globals[id]; 5288 ASSERT(svar != NULL); 5289 v = &svar->dtsv_var; 5290 5291 if (v->dtdv_type.dtdt_flags & DIF_TF_BYREF) { 5292 uintptr_t a = (uintptr_t)svar->dtsv_data; 5293 5294 ASSERT(a != 0); 5295 ASSERT(svar->dtsv_size != 0); 5296 5297 if (regs[rd] == 0) { 5298 *(uint8_t *)a = UINT8_MAX; 5299 break; 5300 } else { 5301 *(uint8_t *)a = 0; 5302 a += sizeof (uint64_t); 5303 } 5304 if (!dtrace_vcanload( 5305 (void *)(uintptr_t)regs[rd], &v->dtdv_type, 5306 mstate, vstate)) 5307 break; 5308 5309 dtrace_vcopy((void *)(uintptr_t)regs[rd], 5310 (void *)a, &v->dtdv_type); 5311 break; 5312 } 5313 5314 svar->dtsv_data = regs[rd]; 5315 break; 5316 5317 case DIF_OP_LDTA: 5318 /* 5319 * There are no DTrace built-in thread-local arrays at 5320 * present. This opcode is saved for future work. 5321 */ 5322 *flags |= CPU_DTRACE_ILLOP; 5323 regs[rd] = 0; 5324 break; 5325 5326 case DIF_OP_LDLS: 5327 id = DIF_INSTR_VAR(instr); 5328 5329 if (id < DIF_VAR_OTHER_UBASE) { 5330 /* 5331 * For now, this has no meaning. 5332 */ 5333 regs[rd] = 0; 5334 break; 5335 } 5336 5337 id -= DIF_VAR_OTHER_UBASE; 5338 5339 ASSERT(id < vstate->dtvs_nlocals); 5340 ASSERT(vstate->dtvs_locals != NULL); 5341 5342 svar = vstate->dtvs_locals[id]; 5343 ASSERT(svar != NULL); 5344 v = &svar->dtsv_var; 5345 5346 if (v->dtdv_type.dtdt_flags & DIF_TF_BYREF) { 5347 uintptr_t a = (uintptr_t)svar->dtsv_data; 5348 size_t sz = v->dtdv_type.dtdt_size; 5349 5350 sz += sizeof (uint64_t); 5351 ASSERT(svar->dtsv_size == NCPU * sz); 5352 a += curcpu * sz; 5353 5354 if (*(uint8_t *)a == UINT8_MAX) { 5355 /* 5356 * If the 0th byte is set to UINT8_MAX 5357 * then this is to be treated as a 5358 * reference to a NULL variable. 5359 */ 5360 regs[rd] = 0; 5361 } else { 5362 regs[rd] = a + sizeof (uint64_t); 5363 } 5364 5365 break; 5366 } 5367 5368 ASSERT(svar->dtsv_size == NCPU * sizeof (uint64_t)); 5369 tmp = (uint64_t *)(uintptr_t)svar->dtsv_data; 5370 regs[rd] = tmp[curcpu]; 5371 break; 5372 5373 case DIF_OP_STLS: 5374 id = DIF_INSTR_VAR(instr); 5375 5376 ASSERT(id >= DIF_VAR_OTHER_UBASE); 5377 id -= DIF_VAR_OTHER_UBASE; 5378 ASSERT(id < vstate->dtvs_nlocals); 5379 5380 ASSERT(vstate->dtvs_locals != NULL); 5381 svar = vstate->dtvs_locals[id]; 5382 ASSERT(svar != NULL); 5383 v = &svar->dtsv_var; 5384 5385 if (v->dtdv_type.dtdt_flags & DIF_TF_BYREF) { 5386 uintptr_t a = (uintptr_t)svar->dtsv_data; 5387 size_t sz = v->dtdv_type.dtdt_size; 5388 5389 sz += sizeof (uint64_t); 5390 ASSERT(svar->dtsv_size == NCPU * sz); 5391 a += curcpu * sz; 5392 5393 if (regs[rd] == 0) { 5394 *(uint8_t *)a = UINT8_MAX; 5395 break; 5396 } else { 5397 *(uint8_t *)a = 0; 5398 a += sizeof (uint64_t); 5399 } 5400 5401 if (!dtrace_vcanload( 5402 (void *)(uintptr_t)regs[rd], &v->dtdv_type, 5403 mstate, vstate)) 5404 break; 5405 5406 dtrace_vcopy((void *)(uintptr_t)regs[rd], 5407 (void *)a, &v->dtdv_type); 5408 break; 5409 } 5410 5411 ASSERT(svar->dtsv_size == NCPU * sizeof (uint64_t)); 5412 tmp = (uint64_t *)(uintptr_t)svar->dtsv_data; 5413 tmp[curcpu] = regs[rd]; 5414 break; 5415 5416 case DIF_OP_LDTS: { 5417 dtrace_dynvar_t *dvar; 5418 dtrace_key_t *key; 5419 5420 id = DIF_INSTR_VAR(instr); 5421 ASSERT(id >= DIF_VAR_OTHER_UBASE); 5422 id -= DIF_VAR_OTHER_UBASE; 5423 v = &vstate->dtvs_tlocals[id]; 5424 5425 key = &tupregs[DIF_DTR_NREGS]; 5426 key[0].dttk_value = (uint64_t)id; 5427 key[0].dttk_size = 0; 5428 DTRACE_TLS_THRKEY(key[1].dttk_value); 5429 key[1].dttk_size = 0; 5430 5431 dvar = dtrace_dynvar(dstate, 2, key, 5432 sizeof (uint64_t), DTRACE_DYNVAR_NOALLOC, 5433 mstate, vstate); 5434 5435 if (dvar == NULL) { 5436 regs[rd] = 0; 5437 break; 5438 } 5439 5440 if (v->dtdv_type.dtdt_flags & DIF_TF_BYREF) { 5441 regs[rd] = (uint64_t)(uintptr_t)dvar->dtdv_data; 5442 } else { 5443 regs[rd] = *((uint64_t *)dvar->dtdv_data); 5444 } 5445 5446 break; 5447 } 5448 5449 case DIF_OP_STTS: { 5450 dtrace_dynvar_t *dvar; 5451 dtrace_key_t *key; 5452 5453 id = DIF_INSTR_VAR(instr); 5454 ASSERT(id >= DIF_VAR_OTHER_UBASE); 5455 id -= DIF_VAR_OTHER_UBASE; 5456 5457 key = &tupregs[DIF_DTR_NREGS]; 5458 key[0].dttk_value = (uint64_t)id; 5459 key[0].dttk_size = 0; 5460 DTRACE_TLS_THRKEY(key[1].dttk_value); 5461 key[1].dttk_size = 0; 5462 v = &vstate->dtvs_tlocals[id]; 5463 5464 dvar = dtrace_dynvar(dstate, 2, key, 5465 v->dtdv_type.dtdt_size > sizeof (uint64_t) ? 5466 v->dtdv_type.dtdt_size : sizeof (uint64_t), 5467 regs[rd] ? DTRACE_DYNVAR_ALLOC : 5468 DTRACE_DYNVAR_DEALLOC, mstate, vstate); 5469 5470 /* 5471 * Given that we're storing to thread-local data, 5472 * we need to flush our predicate cache. 5473 */ 5474 curthread->t_predcache = 0; 5475 5476 if (dvar == NULL) 5477 break; 5478 5479 if (v->dtdv_type.dtdt_flags & DIF_TF_BYREF) { 5480 if (!dtrace_vcanload( 5481 (void *)(uintptr_t)regs[rd], 5482 &v->dtdv_type, mstate, vstate)) 5483 break; 5484 5485 dtrace_vcopy((void *)(uintptr_t)regs[rd], 5486 dvar->dtdv_data, &v->dtdv_type); 5487 } else { 5488 *((uint64_t *)dvar->dtdv_data) = regs[rd]; 5489 } 5490 5491 break; 5492 } 5493 5494 case DIF_OP_SRA: 5495 regs[rd] = (int64_t)regs[r1] >> regs[r2]; 5496 break; 5497 5498 case DIF_OP_CALL: 5499 dtrace_dif_subr(DIF_INSTR_SUBR(instr), rd, 5500 regs, tupregs, ttop, mstate, state); 5501 break; 5502 5503 case DIF_OP_PUSHTR: 5504 if (ttop == DIF_DTR_NREGS) { 5505 *flags |= CPU_DTRACE_TUPOFLOW; 5506 break; 5507 } 5508 5509 if (r1 == DIF_TYPE_STRING) { 5510 /* 5511 * If this is a string type and the size is 0, 5512 * we'll use the system-wide default string 5513 * size. Note that we are _not_ looking at 5514 * the value of the DTRACEOPT_STRSIZE option; 5515 * had this been set, we would expect to have 5516 * a non-zero size value in the "pushtr". 5517 */ 5518 tupregs[ttop].dttk_size = 5519 dtrace_strlen((char *)(uintptr_t)regs[rd], 5520 regs[r2] ? regs[r2] : 5521 dtrace_strsize_default) + 1; 5522 } else { 5523 tupregs[ttop].dttk_size = regs[r2]; 5524 } 5525 5526 tupregs[ttop++].dttk_value = regs[rd]; 5527 break; 5528 5529 case DIF_OP_PUSHTV: 5530 if (ttop == DIF_DTR_NREGS) { 5531 *flags |= CPU_DTRACE_TUPOFLOW; 5532 break; 5533 } 5534 5535 tupregs[ttop].dttk_value = regs[rd]; 5536 tupregs[ttop++].dttk_size = 0; 5537 break; 5538 5539 case DIF_OP_POPTS: 5540 if (ttop != 0) 5541 ttop--; 5542 break; 5543 5544 case DIF_OP_FLUSHTS: 5545 ttop = 0; 5546 break; 5547 5548 case DIF_OP_LDGAA: 5549 case DIF_OP_LDTAA: { 5550 dtrace_dynvar_t *dvar; 5551 dtrace_key_t *key = tupregs; 5552 uint_t nkeys = ttop; 5553 5554 id = DIF_INSTR_VAR(instr); 5555 ASSERT(id >= DIF_VAR_OTHER_UBASE); 5556 id -= DIF_VAR_OTHER_UBASE; 5557 5558 key[nkeys].dttk_value = (uint64_t)id; 5559 key[nkeys++].dttk_size = 0; 5560 5561 if (DIF_INSTR_OP(instr) == DIF_OP_LDTAA) { 5562 DTRACE_TLS_THRKEY(key[nkeys].dttk_value); 5563 key[nkeys++].dttk_size = 0; 5564 v = &vstate->dtvs_tlocals[id]; 5565 } else { 5566 v = &vstate->dtvs_globals[id]->dtsv_var; 5567 } 5568 5569 dvar = dtrace_dynvar(dstate, nkeys, key, 5570 v->dtdv_type.dtdt_size > sizeof (uint64_t) ? 5571 v->dtdv_type.dtdt_size : sizeof (uint64_t), 5572 DTRACE_DYNVAR_NOALLOC, mstate, vstate); 5573 5574 if (dvar == NULL) { 5575 regs[rd] = 0; 5576 break; 5577 } 5578 5579 if (v->dtdv_type.dtdt_flags & DIF_TF_BYREF) { 5580 regs[rd] = (uint64_t)(uintptr_t)dvar->dtdv_data; 5581 } else { 5582 regs[rd] = *((uint64_t *)dvar->dtdv_data); 5583 } 5584 5585 break; 5586 } 5587 5588 case DIF_OP_STGAA: 5589 case DIF_OP_STTAA: { 5590 dtrace_dynvar_t *dvar; 5591 dtrace_key_t *key = tupregs; 5592 uint_t nkeys = ttop; 5593 5594 id = DIF_INSTR_VAR(instr); 5595 ASSERT(id >= DIF_VAR_OTHER_UBASE); 5596 id -= DIF_VAR_OTHER_UBASE; 5597 5598 key[nkeys].dttk_value = (uint64_t)id; 5599 key[nkeys++].dttk_size = 0; 5600 5601 if (DIF_INSTR_OP(instr) == DIF_OP_STTAA) { 5602 DTRACE_TLS_THRKEY(key[nkeys].dttk_value); 5603 key[nkeys++].dttk_size = 0; 5604 v = &vstate->dtvs_tlocals[id]; 5605 } else { 5606 v = &vstate->dtvs_globals[id]->dtsv_var; 5607 } 5608 5609 dvar = dtrace_dynvar(dstate, nkeys, key, 5610 v->dtdv_type.dtdt_size > sizeof (uint64_t) ? 5611 v->dtdv_type.dtdt_size : sizeof (uint64_t), 5612 regs[rd] ? DTRACE_DYNVAR_ALLOC : 5613 DTRACE_DYNVAR_DEALLOC, mstate, vstate); 5614 5615 if (dvar == NULL) 5616 break; 5617 5618 if (v->dtdv_type.dtdt_flags & DIF_TF_BYREF) { 5619 if (!dtrace_vcanload( 5620 (void *)(uintptr_t)regs[rd], &v->dtdv_type, 5621 mstate, vstate)) 5622 break; 5623 5624 dtrace_vcopy((void *)(uintptr_t)regs[rd], 5625 dvar->dtdv_data, &v->dtdv_type); 5626 } else { 5627 *((uint64_t *)dvar->dtdv_data) = regs[rd]; 5628 } 5629 5630 break; 5631 } 5632 5633 case DIF_OP_ALLOCS: { 5634 uintptr_t ptr = P2ROUNDUP(mstate->dtms_scratch_ptr, 8); 5635 size_t size = ptr - mstate->dtms_scratch_ptr + regs[r1]; 5636 5637 /* 5638 * Rounding up the user allocation size could have 5639 * overflowed large, bogus allocations (like -1ULL) to 5640 * 0. 5641 */ 5642 if (size < regs[r1] || 5643 !DTRACE_INSCRATCH(mstate, size)) { 5644 DTRACE_CPUFLAG_SET(CPU_DTRACE_NOSCRATCH); 5645 regs[rd] = 0; 5646 break; 5647 } 5648 5649 dtrace_bzero((void *) mstate->dtms_scratch_ptr, size); 5650 mstate->dtms_scratch_ptr += size; 5651 regs[rd] = ptr; 5652 break; 5653 } 5654 5655 case DIF_OP_COPYS: 5656 if (!dtrace_canstore(regs[rd], regs[r2], 5657 mstate, vstate)) { 5658 *flags |= CPU_DTRACE_BADADDR; 5659 *illval = regs[rd]; 5660 break; 5661 } 5662 5663 if (!dtrace_canload(regs[r1], regs[r2], mstate, vstate)) 5664 break; 5665 5666 dtrace_bcopy((void *)(uintptr_t)regs[r1], 5667 (void *)(uintptr_t)regs[rd], (size_t)regs[r2]); 5668 break; 5669 5670 case DIF_OP_STB: 5671 if (!dtrace_canstore(regs[rd], 1, mstate, vstate)) { 5672 *flags |= CPU_DTRACE_BADADDR; 5673 *illval = regs[rd]; 5674 break; 5675 } 5676 *((uint8_t *)(uintptr_t)regs[rd]) = (uint8_t)regs[r1]; 5677 break; 5678 5679 case DIF_OP_STH: 5680 if (!dtrace_canstore(regs[rd], 2, mstate, vstate)) { 5681 *flags |= CPU_DTRACE_BADADDR; 5682 *illval = regs[rd]; 5683 break; 5684 } 5685 if (regs[rd] & 1) { 5686 *flags |= CPU_DTRACE_BADALIGN; 5687 *illval = regs[rd]; 5688 break; 5689 } 5690 *((uint16_t *)(uintptr_t)regs[rd]) = (uint16_t)regs[r1]; 5691 break; 5692 5693 case DIF_OP_STW: 5694 if (!dtrace_canstore(regs[rd], 4, mstate, vstate)) { 5695 *flags |= CPU_DTRACE_BADADDR; 5696 *illval = regs[rd]; 5697 break; 5698 } 5699 if (regs[rd] & 3) { 5700 *flags |= CPU_DTRACE_BADALIGN; 5701 *illval = regs[rd]; 5702 break; 5703 } 5704 *((uint32_t *)(uintptr_t)regs[rd]) = (uint32_t)regs[r1]; 5705 break; 5706 5707 case DIF_OP_STX: 5708 if (!dtrace_canstore(regs[rd], 8, mstate, vstate)) { 5709 *flags |= CPU_DTRACE_BADADDR; 5710 *illval = regs[rd]; 5711 break; 5712 } 5713 if (regs[rd] & 7) { 5714 *flags |= CPU_DTRACE_BADALIGN; 5715 *illval = regs[rd]; 5716 break; 5717 } 5718 *((uint64_t *)(uintptr_t)regs[rd]) = regs[r1]; 5719 break; 5720 } 5721 } 5722 5723 if (!(*flags & CPU_DTRACE_FAULT)) 5724 return (rval); 5725 5726 mstate->dtms_fltoffs = opc * sizeof (dif_instr_t); 5727 mstate->dtms_present |= DTRACE_MSTATE_FLTOFFS; 5728 5729 return (0); 5730} 5731 5732static void 5733dtrace_action_breakpoint(dtrace_ecb_t *ecb) 5734{ 5735 dtrace_probe_t *probe = ecb->dte_probe; 5736 dtrace_provider_t *prov = probe->dtpr_provider; 5737 char c[DTRACE_FULLNAMELEN + 80], *str; 5738 char *msg = "dtrace: breakpoint action at probe "; 5739 char *ecbmsg = " (ecb "; 5740 uintptr_t mask = (0xf << (sizeof (uintptr_t) * NBBY / 4)); 5741 uintptr_t val = (uintptr_t)ecb; 5742 int shift = (sizeof (uintptr_t) * NBBY) - 4, i = 0; 5743 5744 if (dtrace_destructive_disallow) 5745 return; 5746 5747 /* 5748 * It's impossible to be taking action on the NULL probe. 5749 */ 5750 ASSERT(probe != NULL); 5751 5752 /* 5753 * This is a poor man's (destitute man's?) sprintf(): we want to 5754 * print the provider name, module name, function name and name of 5755 * the probe, along with the hex address of the ECB with the breakpoint 5756 * action -- all of which we must place in the character buffer by 5757 * hand. 5758 */ 5759 while (*msg != '\0') 5760 c[i++] = *msg++; 5761 5762 for (str = prov->dtpv_name; *str != '\0'; str++) 5763 c[i++] = *str; 5764 c[i++] = ':'; 5765 5766 for (str = probe->dtpr_mod; *str != '\0'; str++) 5767 c[i++] = *str; 5768 c[i++] = ':'; 5769 5770 for (str = probe->dtpr_func; *str != '\0'; str++) 5771 c[i++] = *str; 5772 c[i++] = ':'; 5773 5774 for (str = probe->dtpr_name; *str != '\0'; str++) 5775 c[i++] = *str; 5776 5777 while (*ecbmsg != '\0') 5778 c[i++] = *ecbmsg++; 5779 5780 while (shift >= 0) { 5781 mask = (uintptr_t)0xf << shift; 5782 5783 if (val >= ((uintptr_t)1 << shift)) 5784 c[i++] = "0123456789abcdef"[(val & mask) >> shift]; 5785 shift -= 4; 5786 } 5787 5788 c[i++] = ')'; 5789 c[i] = '\0'; 5790 5791#if defined(sun) 5792 debug_enter(c); 5793#else 5794 kdb_enter(KDB_WHY_DTRACE, "breakpoint action"); 5795#endif 5796} 5797 5798static void 5799dtrace_action_panic(dtrace_ecb_t *ecb) 5800{ 5801 dtrace_probe_t *probe = ecb->dte_probe; 5802 5803 /* 5804 * It's impossible to be taking action on the NULL probe. 5805 */ 5806 ASSERT(probe != NULL); 5807 5808 if (dtrace_destructive_disallow) 5809 return; 5810 5811 if (dtrace_panicked != NULL) 5812 return; 5813 5814 if (dtrace_casptr(&dtrace_panicked, NULL, curthread) != NULL) 5815 return; 5816 5817 /* 5818 * We won the right to panic. (We want to be sure that only one 5819 * thread calls panic() from dtrace_probe(), and that panic() is 5820 * called exactly once.) 5821 */ 5822 dtrace_panic("dtrace: panic action at probe %s:%s:%s:%s (ecb %p)", 5823 probe->dtpr_provider->dtpv_name, probe->dtpr_mod, 5824 probe->dtpr_func, probe->dtpr_name, (void *)ecb); 5825} 5826 5827static void 5828dtrace_action_raise(uint64_t sig) 5829{ 5830 if (dtrace_destructive_disallow) 5831 return; 5832 5833 if (sig >= NSIG) { 5834 DTRACE_CPUFLAG_SET(CPU_DTRACE_ILLOP); 5835 return; 5836 } 5837 5838#if defined(sun) 5839 /* 5840 * raise() has a queue depth of 1 -- we ignore all subsequent 5841 * invocations of the raise() action. 5842 */ 5843 if (curthread->t_dtrace_sig == 0) 5844 curthread->t_dtrace_sig = (uint8_t)sig; 5845 5846 curthread->t_sig_check = 1; 5847 aston(curthread); 5848#else 5849 struct proc *p = curproc; 5850 PROC_LOCK(p); 5851 kern_psignal(p, sig); 5852 PROC_UNLOCK(p); 5853#endif 5854} 5855 5856static void 5857dtrace_action_stop(void) 5858{ 5859 if (dtrace_destructive_disallow) 5860 return; 5861 5862#if defined(sun) 5863 if (!curthread->t_dtrace_stop) { 5864 curthread->t_dtrace_stop = 1; 5865 curthread->t_sig_check = 1; 5866 aston(curthread); 5867 } 5868#else 5869 struct proc *p = curproc; 5870 PROC_LOCK(p); 5871 kern_psignal(p, SIGSTOP); 5872 PROC_UNLOCK(p); 5873#endif 5874} 5875 5876static void 5877dtrace_action_chill(dtrace_mstate_t *mstate, hrtime_t val) 5878{ 5879 hrtime_t now; 5880 volatile uint16_t *flags; 5881#if defined(sun) 5882 cpu_t *cpu = CPU; 5883#else 5884 cpu_t *cpu = &solaris_cpu[curcpu]; 5885#endif 5886 5887 if (dtrace_destructive_disallow) 5888 return; 5889 5890 flags = (volatile uint16_t *)&cpu_core[cpu->cpu_id].cpuc_dtrace_flags; 5891 5892 now = dtrace_gethrtime(); 5893 5894 if (now - cpu->cpu_dtrace_chillmark > dtrace_chill_interval) { 5895 /* 5896 * We need to advance the mark to the current time. 5897 */ 5898 cpu->cpu_dtrace_chillmark = now; 5899 cpu->cpu_dtrace_chilled = 0; 5900 } 5901 5902 /* 5903 * Now check to see if the requested chill time would take us over 5904 * the maximum amount of time allowed in the chill interval. (Or 5905 * worse, if the calculation itself induces overflow.) 5906 */ 5907 if (cpu->cpu_dtrace_chilled + val > dtrace_chill_max || 5908 cpu->cpu_dtrace_chilled + val < cpu->cpu_dtrace_chilled) { 5909 *flags |= CPU_DTRACE_ILLOP; 5910 return; 5911 } 5912 5913 while (dtrace_gethrtime() - now < val) 5914 continue; 5915 5916 /* 5917 * Normally, we assure that the value of the variable "timestamp" does 5918 * not change within an ECB. The presence of chill() represents an 5919 * exception to this rule, however. 5920 */ 5921 mstate->dtms_present &= ~DTRACE_MSTATE_TIMESTAMP; 5922 cpu->cpu_dtrace_chilled += val; 5923} 5924 5925static void 5926dtrace_action_ustack(dtrace_mstate_t *mstate, dtrace_state_t *state, 5927 uint64_t *buf, uint64_t arg) 5928{ 5929 int nframes = DTRACE_USTACK_NFRAMES(arg); 5930 int strsize = DTRACE_USTACK_STRSIZE(arg); 5931 uint64_t *pcs = &buf[1], *fps; 5932 char *str = (char *)&pcs[nframes]; 5933 int size, offs = 0, i, j; 5934 uintptr_t old = mstate->dtms_scratch_ptr, saved; 5935 uint16_t *flags = &cpu_core[curcpu].cpuc_dtrace_flags; 5936 char *sym; 5937 5938 /* 5939 * Should be taking a faster path if string space has not been 5940 * allocated. 5941 */ 5942 ASSERT(strsize != 0); 5943 5944 /* 5945 * We will first allocate some temporary space for the frame pointers. 5946 */ 5947 fps = (uint64_t *)P2ROUNDUP(mstate->dtms_scratch_ptr, 8); 5948 size = (uintptr_t)fps - mstate->dtms_scratch_ptr + 5949 (nframes * sizeof (uint64_t)); 5950 5951 if (!DTRACE_INSCRATCH(mstate, size)) { 5952 /* 5953 * Not enough room for our frame pointers -- need to indicate 5954 * that we ran out of scratch space. 5955 */ 5956 DTRACE_CPUFLAG_SET(CPU_DTRACE_NOSCRATCH); 5957 return; 5958 } 5959 5960 mstate->dtms_scratch_ptr += size; 5961 saved = mstate->dtms_scratch_ptr; 5962 5963 /* 5964 * Now get a stack with both program counters and frame pointers. 5965 */ 5966 DTRACE_CPUFLAG_SET(CPU_DTRACE_NOFAULT); 5967 dtrace_getufpstack(buf, fps, nframes + 1); 5968 DTRACE_CPUFLAG_CLEAR(CPU_DTRACE_NOFAULT); 5969 5970 /* 5971 * If that faulted, we're cooked. 5972 */ 5973 if (*flags & CPU_DTRACE_FAULT) 5974 goto out; 5975 5976 /* 5977 * Now we want to walk up the stack, calling the USTACK helper. For 5978 * each iteration, we restore the scratch pointer. 5979 */ 5980 for (i = 0; i < nframes; i++) { 5981 mstate->dtms_scratch_ptr = saved; 5982 5983 if (offs >= strsize) 5984 break; 5985 5986 sym = (char *)(uintptr_t)dtrace_helper( 5987 DTRACE_HELPER_ACTION_USTACK, 5988 mstate, state, pcs[i], fps[i]); 5989 5990 /* 5991 * If we faulted while running the helper, we're going to 5992 * clear the fault and null out the corresponding string. 5993 */ 5994 if (*flags & CPU_DTRACE_FAULT) { 5995 *flags &= ~CPU_DTRACE_FAULT; 5996 str[offs++] = '\0'; 5997 continue; 5998 } 5999 6000 if (sym == NULL) { 6001 str[offs++] = '\0'; 6002 continue; 6003 } 6004 6005 DTRACE_CPUFLAG_SET(CPU_DTRACE_NOFAULT); 6006 6007 /* 6008 * Now copy in the string that the helper returned to us. 6009 */ 6010 for (j = 0; offs + j < strsize; j++) { 6011 if ((str[offs + j] = sym[j]) == '\0') 6012 break; 6013 } 6014 6015 DTRACE_CPUFLAG_CLEAR(CPU_DTRACE_NOFAULT); 6016 6017 offs += j + 1; 6018 } 6019 6020 if (offs >= strsize) { 6021 /* 6022 * If we didn't have room for all of the strings, we don't 6023 * abort processing -- this needn't be a fatal error -- but we 6024 * still want to increment a counter (dts_stkstroverflows) to 6025 * allow this condition to be warned about. (If this is from 6026 * a jstack() action, it is easily tuned via jstackstrsize.) 6027 */ 6028 dtrace_error(&state->dts_stkstroverflows); 6029 } 6030 6031 while (offs < strsize) 6032 str[offs++] = '\0'; 6033 6034out: 6035 mstate->dtms_scratch_ptr = old; 6036} 6037 6038/* 6039 * If you're looking for the epicenter of DTrace, you just found it. This 6040 * is the function called by the provider to fire a probe -- from which all 6041 * subsequent probe-context DTrace activity emanates. 6042 */ 6043void 6044dtrace_probe(dtrace_id_t id, uintptr_t arg0, uintptr_t arg1, 6045 uintptr_t arg2, uintptr_t arg3, uintptr_t arg4) 6046{ 6047 processorid_t cpuid; 6048 dtrace_icookie_t cookie; 6049 dtrace_probe_t *probe; 6050 dtrace_mstate_t mstate; 6051 dtrace_ecb_t *ecb; 6052 dtrace_action_t *act; 6053 intptr_t offs; 6054 size_t size; 6055 int vtime, onintr; 6056 volatile uint16_t *flags; 6057 hrtime_t now; 6058 6059 if (panicstr != NULL) 6060 return; 6061 6062#if defined(sun) 6063 /* 6064 * Kick out immediately if this CPU is still being born (in which case 6065 * curthread will be set to -1) or the current thread can't allow 6066 * probes in its current context. 6067 */ 6068 if (((uintptr_t)curthread & 1) || (curthread->t_flag & T_DONTDTRACE)) 6069 return; 6070#endif 6071 6072 cookie = dtrace_interrupt_disable(); 6073 probe = dtrace_probes[id - 1]; 6074 cpuid = curcpu; 6075 onintr = CPU_ON_INTR(CPU); 6076 6077 if (!onintr && probe->dtpr_predcache != DTRACE_CACHEIDNONE && 6078 probe->dtpr_predcache == curthread->t_predcache) { 6079 /* 6080 * We have hit in the predicate cache; we know that 6081 * this predicate would evaluate to be false. 6082 */ 6083 dtrace_interrupt_enable(cookie); 6084 return; 6085 } 6086 6087#if defined(sun) 6088 if (panic_quiesce) { 6089#else 6090 if (panicstr != NULL) { 6091#endif 6092 /* 6093 * We don't trace anything if we're panicking. 6094 */ 6095 dtrace_interrupt_enable(cookie); 6096 return; 6097 } 6098 6099 now = dtrace_gethrtime(); 6100 vtime = dtrace_vtime_references != 0; 6101 6102 if (vtime && curthread->t_dtrace_start) 6103 curthread->t_dtrace_vtime += now - curthread->t_dtrace_start; 6104 6105 mstate.dtms_difo = NULL; 6106 mstate.dtms_probe = probe; 6107 mstate.dtms_strtok = 0; 6108 mstate.dtms_arg[0] = arg0; 6109 mstate.dtms_arg[1] = arg1; 6110 mstate.dtms_arg[2] = arg2; 6111 mstate.dtms_arg[3] = arg3; 6112 mstate.dtms_arg[4] = arg4; 6113 6114 flags = (volatile uint16_t *)&cpu_core[cpuid].cpuc_dtrace_flags; 6115 6116 for (ecb = probe->dtpr_ecb; ecb != NULL; ecb = ecb->dte_next) { 6117 dtrace_predicate_t *pred = ecb->dte_predicate; 6118 dtrace_state_t *state = ecb->dte_state; 6119 dtrace_buffer_t *buf = &state->dts_buffer[cpuid]; 6120 dtrace_buffer_t *aggbuf = &state->dts_aggbuffer[cpuid]; 6121 dtrace_vstate_t *vstate = &state->dts_vstate; 6122 dtrace_provider_t *prov = probe->dtpr_provider; 6123 uint64_t tracememsize = 0; 6124 int committed = 0; 6125 caddr_t tomax; 6126 6127 /* 6128 * A little subtlety with the following (seemingly innocuous) 6129 * declaration of the automatic 'val': by looking at the 6130 * code, you might think that it could be declared in the 6131 * action processing loop, below. (That is, it's only used in 6132 * the action processing loop.) However, it must be declared 6133 * out of that scope because in the case of DIF expression 6134 * arguments to aggregating actions, one iteration of the 6135 * action loop will use the last iteration's value. 6136 */ 6137 uint64_t val = 0; 6138 6139 mstate.dtms_present = DTRACE_MSTATE_ARGS | DTRACE_MSTATE_PROBE; 6140 *flags &= ~CPU_DTRACE_ERROR; 6141 6142 if (prov == dtrace_provider) { 6143 /* 6144 * If dtrace itself is the provider of this probe, 6145 * we're only going to continue processing the ECB if 6146 * arg0 (the dtrace_state_t) is equal to the ECB's 6147 * creating state. (This prevents disjoint consumers 6148 * from seeing one another's metaprobes.) 6149 */ 6150 if (arg0 != (uint64_t)(uintptr_t)state) 6151 continue; 6152 } 6153 6154 if (state->dts_activity != DTRACE_ACTIVITY_ACTIVE) { 6155 /* 6156 * We're not currently active. If our provider isn't 6157 * the dtrace pseudo provider, we're not interested. 6158 */ 6159 if (prov != dtrace_provider) 6160 continue; 6161 6162 /* 6163 * Now we must further check if we are in the BEGIN 6164 * probe. If we are, we will only continue processing 6165 * if we're still in WARMUP -- if one BEGIN enabling 6166 * has invoked the exit() action, we don't want to 6167 * evaluate subsequent BEGIN enablings. 6168 */ 6169 if (probe->dtpr_id == dtrace_probeid_begin && 6170 state->dts_activity != DTRACE_ACTIVITY_WARMUP) { 6171 ASSERT(state->dts_activity == 6172 DTRACE_ACTIVITY_DRAINING); 6173 continue; 6174 } 6175 } 6176 6177 if (ecb->dte_cond) { 6178 /* 6179 * If the dte_cond bits indicate that this 6180 * consumer is only allowed to see user-mode firings 6181 * of this probe, call the provider's dtps_usermode() 6182 * entry point to check that the probe was fired 6183 * while in a user context. Skip this ECB if that's 6184 * not the case. 6185 */ 6186 if ((ecb->dte_cond & DTRACE_COND_USERMODE) && 6187 prov->dtpv_pops.dtps_usermode(prov->dtpv_arg, 6188 probe->dtpr_id, probe->dtpr_arg) == 0) 6189 continue; 6190 6191#if defined(sun) 6192 /* 6193 * This is more subtle than it looks. We have to be 6194 * absolutely certain that CRED() isn't going to 6195 * change out from under us so it's only legit to 6196 * examine that structure if we're in constrained 6197 * situations. Currently, the only times we'll this 6198 * check is if a non-super-user has enabled the 6199 * profile or syscall providers -- providers that 6200 * allow visibility of all processes. For the 6201 * profile case, the check above will ensure that 6202 * we're examining a user context. 6203 */ 6204 if (ecb->dte_cond & DTRACE_COND_OWNER) { 6205 cred_t *cr; 6206 cred_t *s_cr = 6207 ecb->dte_state->dts_cred.dcr_cred; 6208 proc_t *proc; 6209 6210 ASSERT(s_cr != NULL); 6211 6212 if ((cr = CRED()) == NULL || 6213 s_cr->cr_uid != cr->cr_uid || 6214 s_cr->cr_uid != cr->cr_ruid || 6215 s_cr->cr_uid != cr->cr_suid || 6216 s_cr->cr_gid != cr->cr_gid || 6217 s_cr->cr_gid != cr->cr_rgid || 6218 s_cr->cr_gid != cr->cr_sgid || 6219 (proc = ttoproc(curthread)) == NULL || 6220 (proc->p_flag & SNOCD)) 6221 continue; 6222 } 6223 6224 if (ecb->dte_cond & DTRACE_COND_ZONEOWNER) { 6225 cred_t *cr; 6226 cred_t *s_cr = 6227 ecb->dte_state->dts_cred.dcr_cred; 6228 6229 ASSERT(s_cr != NULL); 6230 6231 if ((cr = CRED()) == NULL || 6232 s_cr->cr_zone->zone_id != 6233 cr->cr_zone->zone_id) 6234 continue; 6235 } 6236#endif 6237 } 6238 6239 if (now - state->dts_alive > dtrace_deadman_timeout) { 6240 /* 6241 * We seem to be dead. Unless we (a) have kernel 6242 * destructive permissions (b) have explicitly enabled 6243 * destructive actions and (c) destructive actions have 6244 * not been disabled, we're going to transition into 6245 * the KILLED state, from which no further processing 6246 * on this state will be performed. 6247 */ 6248 if (!dtrace_priv_kernel_destructive(state) || 6249 !state->dts_cred.dcr_destructive || 6250 dtrace_destructive_disallow) { 6251 void *activity = &state->dts_activity; 6252 dtrace_activity_t current; 6253 6254 do { 6255 current = state->dts_activity; 6256 } while (dtrace_cas32(activity, current, 6257 DTRACE_ACTIVITY_KILLED) != current); 6258 6259 continue; 6260 } 6261 } 6262 6263 if ((offs = dtrace_buffer_reserve(buf, ecb->dte_needed, 6264 ecb->dte_alignment, state, &mstate)) < 0) 6265 continue; 6266 6267 tomax = buf->dtb_tomax; 6268 ASSERT(tomax != NULL); 6269 6270 if (ecb->dte_size != 0) { 6271 dtrace_rechdr_t dtrh; 6272 if (!(mstate.dtms_present & DTRACE_MSTATE_TIMESTAMP)) { 6273 mstate.dtms_timestamp = dtrace_gethrtime(); 6274 mstate.dtms_present |= DTRACE_MSTATE_TIMESTAMP; 6275 } 6276 ASSERT3U(ecb->dte_size, >=, sizeof (dtrace_rechdr_t)); 6277 dtrh.dtrh_epid = ecb->dte_epid; 6278 DTRACE_RECORD_STORE_TIMESTAMP(&dtrh, 6279 mstate.dtms_timestamp); 6280 *((dtrace_rechdr_t *)(tomax + offs)) = dtrh; 6281 } 6282 6283 mstate.dtms_epid = ecb->dte_epid; 6284 mstate.dtms_present |= DTRACE_MSTATE_EPID; 6285 6286 if (state->dts_cred.dcr_visible & DTRACE_CRV_KERNEL) 6287 mstate.dtms_access = DTRACE_ACCESS_KERNEL; 6288 else 6289 mstate.dtms_access = 0; 6290 6291 if (pred != NULL) { 6292 dtrace_difo_t *dp = pred->dtp_difo; 6293 int rval; 6294 6295 rval = dtrace_dif_emulate(dp, &mstate, vstate, state); 6296 6297 if (!(*flags & CPU_DTRACE_ERROR) && !rval) { 6298 dtrace_cacheid_t cid = probe->dtpr_predcache; 6299 6300 if (cid != DTRACE_CACHEIDNONE && !onintr) { 6301 /* 6302 * Update the predicate cache... 6303 */ 6304 ASSERT(cid == pred->dtp_cacheid); 6305 curthread->t_predcache = cid; 6306 } 6307 6308 continue; 6309 } 6310 } 6311 6312 for (act = ecb->dte_action; !(*flags & CPU_DTRACE_ERROR) && 6313 act != NULL; act = act->dta_next) { 6314 size_t valoffs; 6315 dtrace_difo_t *dp; 6316 dtrace_recdesc_t *rec = &act->dta_rec; 6317 6318 size = rec->dtrd_size; 6319 valoffs = offs + rec->dtrd_offset; 6320 6321 if (DTRACEACT_ISAGG(act->dta_kind)) { 6322 uint64_t v = 0xbad; 6323 dtrace_aggregation_t *agg; 6324 6325 agg = (dtrace_aggregation_t *)act; 6326 6327 if ((dp = act->dta_difo) != NULL) 6328 v = dtrace_dif_emulate(dp, 6329 &mstate, vstate, state); 6330 6331 if (*flags & CPU_DTRACE_ERROR) 6332 continue; 6333 6334 /* 6335 * Note that we always pass the expression 6336 * value from the previous iteration of the 6337 * action loop. This value will only be used 6338 * if there is an expression argument to the 6339 * aggregating action, denoted by the 6340 * dtag_hasarg field. 6341 */ 6342 dtrace_aggregate(agg, buf, 6343 offs, aggbuf, v, val); 6344 continue; 6345 } 6346 6347 switch (act->dta_kind) { 6348 case DTRACEACT_STOP: 6349 if (dtrace_priv_proc_destructive(state)) 6350 dtrace_action_stop(); 6351 continue; 6352 6353 case DTRACEACT_BREAKPOINT: 6354 if (dtrace_priv_kernel_destructive(state)) 6355 dtrace_action_breakpoint(ecb); 6356 continue; 6357 6358 case DTRACEACT_PANIC: 6359 if (dtrace_priv_kernel_destructive(state)) 6360 dtrace_action_panic(ecb); 6361 continue; 6362 6363 case DTRACEACT_STACK: 6364 if (!dtrace_priv_kernel(state)) 6365 continue; 6366 6367 dtrace_getpcstack((pc_t *)(tomax + valoffs), 6368 size / sizeof (pc_t), probe->dtpr_aframes, 6369 DTRACE_ANCHORED(probe) ? NULL : 6370 (uint32_t *)arg0); 6371 continue; 6372 6373 case DTRACEACT_JSTACK: 6374 case DTRACEACT_USTACK: 6375 if (!dtrace_priv_proc(state)) 6376 continue; 6377 6378 /* 6379 * See comment in DIF_VAR_PID. 6380 */ 6381 if (DTRACE_ANCHORED(mstate.dtms_probe) && 6382 CPU_ON_INTR(CPU)) { 6383 int depth = DTRACE_USTACK_NFRAMES( 6384 rec->dtrd_arg) + 1; 6385 6386 dtrace_bzero((void *)(tomax + valoffs), 6387 DTRACE_USTACK_STRSIZE(rec->dtrd_arg) 6388 + depth * sizeof (uint64_t)); 6389 6390 continue; 6391 } 6392 6393 if (DTRACE_USTACK_STRSIZE(rec->dtrd_arg) != 0 && 6394 curproc->p_dtrace_helpers != NULL) { 6395 /* 6396 * This is the slow path -- we have 6397 * allocated string space, and we're 6398 * getting the stack of a process that 6399 * has helpers. Call into a separate 6400 * routine to perform this processing. 6401 */ 6402 dtrace_action_ustack(&mstate, state, 6403 (uint64_t *)(tomax + valoffs), 6404 rec->dtrd_arg); 6405 continue; 6406 } 6407 6408 DTRACE_CPUFLAG_SET(CPU_DTRACE_NOFAULT); 6409 dtrace_getupcstack((uint64_t *) 6410 (tomax + valoffs), 6411 DTRACE_USTACK_NFRAMES(rec->dtrd_arg) + 1); 6412 DTRACE_CPUFLAG_CLEAR(CPU_DTRACE_NOFAULT); 6413 continue; 6414 6415 default: 6416 break; 6417 } 6418 6419 dp = act->dta_difo; 6420 ASSERT(dp != NULL); 6421 6422 val = dtrace_dif_emulate(dp, &mstate, vstate, state); 6423 6424 if (*flags & CPU_DTRACE_ERROR) 6425 continue; 6426 6427 switch (act->dta_kind) { 6428 case DTRACEACT_SPECULATE: { 6429 dtrace_rechdr_t *dtrh; 6430 6431 ASSERT(buf == &state->dts_buffer[cpuid]); 6432 buf = dtrace_speculation_buffer(state, 6433 cpuid, val); 6434 6435 if (buf == NULL) { 6436 *flags |= CPU_DTRACE_DROP; 6437 continue; 6438 } 6439 6440 offs = dtrace_buffer_reserve(buf, 6441 ecb->dte_needed, ecb->dte_alignment, 6442 state, NULL); 6443 6444 if (offs < 0) { 6445 *flags |= CPU_DTRACE_DROP; 6446 continue; 6447 } 6448 6449 tomax = buf->dtb_tomax; 6450 ASSERT(tomax != NULL); 6451 6452 if (ecb->dte_size == 0) 6453 continue; 6454 6455 ASSERT3U(ecb->dte_size, >=, 6456 sizeof (dtrace_rechdr_t)); 6457 dtrh = ((void *)(tomax + offs)); 6458 dtrh->dtrh_epid = ecb->dte_epid; 6459 /* 6460 * When the speculation is committed, all of 6461 * the records in the speculative buffer will 6462 * have their timestamps set to the commit 6463 * time. Until then, it is set to a sentinel 6464 * value, for debugability. 6465 */ 6466 DTRACE_RECORD_STORE_TIMESTAMP(dtrh, UINT64_MAX); 6467 continue; 6468 } 6469 6470 case DTRACEACT_PRINTM: { 6471 /* The DIF returns a 'memref'. */ 6472 uintptr_t *memref = (uintptr_t *)(uintptr_t) val; 6473 6474 /* Get the size from the memref. */ 6475 size = memref[1]; 6476 6477 /* 6478 * Check if the size exceeds the allocated 6479 * buffer size. 6480 */ 6481 if (size + sizeof(uintptr_t) > dp->dtdo_rtype.dtdt_size) { 6482 /* Flag a drop! */ 6483 *flags |= CPU_DTRACE_DROP; 6484 continue; 6485 } 6486 6487 /* Store the size in the buffer first. */ 6488 DTRACE_STORE(uintptr_t, tomax, 6489 valoffs, size); 6490 6491 /* 6492 * Offset the buffer address to the start 6493 * of the data. 6494 */ 6495 valoffs += sizeof(uintptr_t); 6496 6497 /* 6498 * Reset to the memory address rather than 6499 * the memref array, then let the BYREF 6500 * code below do the work to store the 6501 * memory data in the buffer. 6502 */ 6503 val = memref[0]; 6504 break; 6505 } 6506 6507 case DTRACEACT_PRINTT: { 6508 /* The DIF returns a 'typeref'. */ 6509 uintptr_t *typeref = (uintptr_t *)(uintptr_t) val; 6510 char c = '\0' + 1; 6511 size_t s; 6512 6513 /* 6514 * Get the type string length and round it 6515 * up so that the data that follows is 6516 * aligned for easy access. 6517 */ 6518 size_t typs = strlen((char *) typeref[2]) + 1; 6519 typs = roundup(typs, sizeof(uintptr_t)); 6520 6521 /* 6522 *Get the size from the typeref using the 6523 * number of elements and the type size. 6524 */ 6525 size = typeref[1] * typeref[3]; 6526 6527 /* 6528 * Check if the size exceeds the allocated 6529 * buffer size. 6530 */ 6531 if (size + typs + 2 * sizeof(uintptr_t) > dp->dtdo_rtype.dtdt_size) { 6532 /* Flag a drop! */ 6533 *flags |= CPU_DTRACE_DROP; 6534 6535 } 6536 6537 /* Store the size in the buffer first. */ 6538 DTRACE_STORE(uintptr_t, tomax, 6539 valoffs, size); 6540 valoffs += sizeof(uintptr_t); 6541 6542 /* Store the type size in the buffer. */ 6543 DTRACE_STORE(uintptr_t, tomax, 6544 valoffs, typeref[3]); 6545 valoffs += sizeof(uintptr_t); 6546 6547 val = typeref[2]; 6548 6549 for (s = 0; s < typs; s++) { 6550 if (c != '\0') 6551 c = dtrace_load8(val++); 6552 6553 DTRACE_STORE(uint8_t, tomax, 6554 valoffs++, c); 6555 } 6556 6557 /* 6558 * Reset to the memory address rather than 6559 * the typeref array, then let the BYREF 6560 * code below do the work to store the 6561 * memory data in the buffer. 6562 */ 6563 val = typeref[0]; 6564 break; 6565 } 6566 6567 case DTRACEACT_CHILL: 6568 if (dtrace_priv_kernel_destructive(state)) 6569 dtrace_action_chill(&mstate, val); 6570 continue; 6571 6572 case DTRACEACT_RAISE: 6573 if (dtrace_priv_proc_destructive(state)) 6574 dtrace_action_raise(val); 6575 continue; 6576 6577 case DTRACEACT_COMMIT: 6578 ASSERT(!committed); 6579 6580 /* 6581 * We need to commit our buffer state. 6582 */ 6583 if (ecb->dte_size) 6584 buf->dtb_offset = offs + ecb->dte_size; 6585 buf = &state->dts_buffer[cpuid]; 6586 dtrace_speculation_commit(state, cpuid, val); 6587 committed = 1; 6588 continue; 6589 6590 case DTRACEACT_DISCARD: 6591 dtrace_speculation_discard(state, cpuid, val); 6592 continue; 6593 6594 case DTRACEACT_DIFEXPR: 6595 case DTRACEACT_LIBACT: 6596 case DTRACEACT_PRINTF: 6597 case DTRACEACT_PRINTA: 6598 case DTRACEACT_SYSTEM: 6599 case DTRACEACT_FREOPEN: 6600 case DTRACEACT_TRACEMEM: 6601 break; 6602 6603 case DTRACEACT_TRACEMEM_DYNSIZE: 6604 tracememsize = val; 6605 break; 6606 6607 case DTRACEACT_SYM: 6608 case DTRACEACT_MOD: 6609 if (!dtrace_priv_kernel(state)) 6610 continue; 6611 break; 6612 6613 case DTRACEACT_USYM: 6614 case DTRACEACT_UMOD: 6615 case DTRACEACT_UADDR: { 6616#if defined(sun) 6617 struct pid *pid = curthread->t_procp->p_pidp; 6618#endif 6619 6620 if (!dtrace_priv_proc(state)) 6621 continue; 6622 6623 DTRACE_STORE(uint64_t, tomax, 6624#if defined(sun) 6625 valoffs, (uint64_t)pid->pid_id); 6626#else 6627 valoffs, (uint64_t) curproc->p_pid); 6628#endif 6629 DTRACE_STORE(uint64_t, tomax, 6630 valoffs + sizeof (uint64_t), val); 6631 6632 continue; 6633 } 6634 6635 case DTRACEACT_EXIT: { 6636 /* 6637 * For the exit action, we are going to attempt 6638 * to atomically set our activity to be 6639 * draining. If this fails (either because 6640 * another CPU has beat us to the exit action, 6641 * or because our current activity is something 6642 * other than ACTIVE or WARMUP), we will 6643 * continue. This assures that the exit action 6644 * can be successfully recorded at most once 6645 * when we're in the ACTIVE state. If we're 6646 * encountering the exit() action while in 6647 * COOLDOWN, however, we want to honor the new 6648 * status code. (We know that we're the only 6649 * thread in COOLDOWN, so there is no race.) 6650 */ 6651 void *activity = &state->dts_activity; 6652 dtrace_activity_t current = state->dts_activity; 6653 6654 if (current == DTRACE_ACTIVITY_COOLDOWN) 6655 break; 6656 6657 if (current != DTRACE_ACTIVITY_WARMUP) 6658 current = DTRACE_ACTIVITY_ACTIVE; 6659 6660 if (dtrace_cas32(activity, current, 6661 DTRACE_ACTIVITY_DRAINING) != current) { 6662 *flags |= CPU_DTRACE_DROP; 6663 continue; 6664 } 6665 6666 break; 6667 } 6668 6669 default: 6670 ASSERT(0); 6671 } 6672 6673 if (dp->dtdo_rtype.dtdt_flags & DIF_TF_BYREF) { 6674 uintptr_t end = valoffs + size; 6675 6676 if (tracememsize != 0 && 6677 valoffs + tracememsize < end) { 6678 end = valoffs + tracememsize; 6679 tracememsize = 0; 6680 } 6681 6682 if (!dtrace_vcanload((void *)(uintptr_t)val, 6683 &dp->dtdo_rtype, &mstate, vstate)) 6684 continue; 6685 6686 /* 6687 * If this is a string, we're going to only 6688 * load until we find the zero byte -- after 6689 * which we'll store zero bytes. 6690 */ 6691 if (dp->dtdo_rtype.dtdt_kind == 6692 DIF_TYPE_STRING) { 6693 char c = '\0' + 1; 6694 int intuple = act->dta_intuple; 6695 size_t s; 6696 6697 for (s = 0; s < size; s++) { 6698 if (c != '\0') 6699 c = dtrace_load8(val++); 6700 6701 DTRACE_STORE(uint8_t, tomax, 6702 valoffs++, c); 6703 6704 if (c == '\0' && intuple) 6705 break; 6706 } 6707 6708 continue; 6709 } 6710 6711 while (valoffs < end) { 6712 DTRACE_STORE(uint8_t, tomax, valoffs++, 6713 dtrace_load8(val++)); 6714 } 6715 6716 continue; 6717 } 6718 6719 switch (size) { 6720 case 0: 6721 break; 6722 6723 case sizeof (uint8_t): 6724 DTRACE_STORE(uint8_t, tomax, valoffs, val); 6725 break; 6726 case sizeof (uint16_t): 6727 DTRACE_STORE(uint16_t, tomax, valoffs, val); 6728 break; 6729 case sizeof (uint32_t): 6730 DTRACE_STORE(uint32_t, tomax, valoffs, val); 6731 break; 6732 case sizeof (uint64_t): 6733 DTRACE_STORE(uint64_t, tomax, valoffs, val); 6734 break; 6735 default: 6736 /* 6737 * Any other size should have been returned by 6738 * reference, not by value. 6739 */ 6740 ASSERT(0); 6741 break; 6742 } 6743 } 6744 6745 if (*flags & CPU_DTRACE_DROP) 6746 continue; 6747 6748 if (*flags & CPU_DTRACE_FAULT) { 6749 int ndx; 6750 dtrace_action_t *err; 6751 6752 buf->dtb_errors++; 6753 6754 if (probe->dtpr_id == dtrace_probeid_error) { 6755 /* 6756 * There's nothing we can do -- we had an 6757 * error on the error probe. We bump an 6758 * error counter to at least indicate that 6759 * this condition happened. 6760 */ 6761 dtrace_error(&state->dts_dblerrors); 6762 continue; 6763 } 6764 6765 if (vtime) { 6766 /* 6767 * Before recursing on dtrace_probe(), we 6768 * need to explicitly clear out our start 6769 * time to prevent it from being accumulated 6770 * into t_dtrace_vtime. 6771 */ 6772 curthread->t_dtrace_start = 0; 6773 } 6774 6775 /* 6776 * Iterate over the actions to figure out which action 6777 * we were processing when we experienced the error. 6778 * Note that act points _past_ the faulting action; if 6779 * act is ecb->dte_action, the fault was in the 6780 * predicate, if it's ecb->dte_action->dta_next it's 6781 * in action #1, and so on. 6782 */ 6783 for (err = ecb->dte_action, ndx = 0; 6784 err != act; err = err->dta_next, ndx++) 6785 continue; 6786 6787 dtrace_probe_error(state, ecb->dte_epid, ndx, 6788 (mstate.dtms_present & DTRACE_MSTATE_FLTOFFS) ? 6789 mstate.dtms_fltoffs : -1, DTRACE_FLAGS2FLT(*flags), 6790 cpu_core[cpuid].cpuc_dtrace_illval); 6791 6792 continue; 6793 } 6794 6795 if (!committed) 6796 buf->dtb_offset = offs + ecb->dte_size; 6797 } 6798 6799 if (vtime) 6800 curthread->t_dtrace_start = dtrace_gethrtime(); 6801 6802 dtrace_interrupt_enable(cookie); 6803} 6804 6805/* 6806 * DTrace Probe Hashing Functions 6807 * 6808 * The functions in this section (and indeed, the functions in remaining 6809 * sections) are not _called_ from probe context. (Any exceptions to this are 6810 * marked with a "Note:".) Rather, they are called from elsewhere in the 6811 * DTrace framework to look-up probes in, add probes to and remove probes from 6812 * the DTrace probe hashes. (Each probe is hashed by each element of the 6813 * probe tuple -- allowing for fast lookups, regardless of what was 6814 * specified.) 6815 */ 6816static uint_t 6817dtrace_hash_str(const char *p) 6818{ 6819 unsigned int g; 6820 uint_t hval = 0; 6821 6822 while (*p) { 6823 hval = (hval << 4) + *p++; 6824 if ((g = (hval & 0xf0000000)) != 0) 6825 hval ^= g >> 24; 6826 hval &= ~g; 6827 } 6828 return (hval); 6829} 6830 6831static dtrace_hash_t * 6832dtrace_hash_create(uintptr_t stroffs, uintptr_t nextoffs, uintptr_t prevoffs) 6833{ 6834 dtrace_hash_t *hash = kmem_zalloc(sizeof (dtrace_hash_t), KM_SLEEP); 6835 6836 hash->dth_stroffs = stroffs; 6837 hash->dth_nextoffs = nextoffs; 6838 hash->dth_prevoffs = prevoffs; 6839 6840 hash->dth_size = 1; 6841 hash->dth_mask = hash->dth_size - 1; 6842 6843 hash->dth_tab = kmem_zalloc(hash->dth_size * 6844 sizeof (dtrace_hashbucket_t *), KM_SLEEP); 6845 6846 return (hash); 6847} 6848 6849static void 6850dtrace_hash_destroy(dtrace_hash_t *hash) 6851{ 6852#ifdef DEBUG 6853 int i; 6854 6855 for (i = 0; i < hash->dth_size; i++) 6856 ASSERT(hash->dth_tab[i] == NULL); 6857#endif 6858 6859 kmem_free(hash->dth_tab, 6860 hash->dth_size * sizeof (dtrace_hashbucket_t *)); 6861 kmem_free(hash, sizeof (dtrace_hash_t)); 6862} 6863 6864static void 6865dtrace_hash_resize(dtrace_hash_t *hash) 6866{ 6867 int size = hash->dth_size, i, ndx; 6868 int new_size = hash->dth_size << 1; 6869 int new_mask = new_size - 1; 6870 dtrace_hashbucket_t **new_tab, *bucket, *next; 6871 6872 ASSERT((new_size & new_mask) == 0); 6873 6874 new_tab = kmem_zalloc(new_size * sizeof (void *), KM_SLEEP); 6875 6876 for (i = 0; i < size; i++) { 6877 for (bucket = hash->dth_tab[i]; bucket != NULL; bucket = next) { 6878 dtrace_probe_t *probe = bucket->dthb_chain; 6879 6880 ASSERT(probe != NULL); 6881 ndx = DTRACE_HASHSTR(hash, probe) & new_mask; 6882 6883 next = bucket->dthb_next; 6884 bucket->dthb_next = new_tab[ndx]; 6885 new_tab[ndx] = bucket; 6886 } 6887 } 6888 6889 kmem_free(hash->dth_tab, hash->dth_size * sizeof (void *)); 6890 hash->dth_tab = new_tab; 6891 hash->dth_size = new_size; 6892 hash->dth_mask = new_mask; 6893} 6894 6895static void 6896dtrace_hash_add(dtrace_hash_t *hash, dtrace_probe_t *new) 6897{ 6898 int hashval = DTRACE_HASHSTR(hash, new); 6899 int ndx = hashval & hash->dth_mask; 6900 dtrace_hashbucket_t *bucket = hash->dth_tab[ndx]; 6901 dtrace_probe_t **nextp, **prevp; 6902 6903 for (; bucket != NULL; bucket = bucket->dthb_next) { 6904 if (DTRACE_HASHEQ(hash, bucket->dthb_chain, new)) 6905 goto add; 6906 } 6907 6908 if ((hash->dth_nbuckets >> 1) > hash->dth_size) { 6909 dtrace_hash_resize(hash); 6910 dtrace_hash_add(hash, new); 6911 return; 6912 } 6913 6914 bucket = kmem_zalloc(sizeof (dtrace_hashbucket_t), KM_SLEEP); 6915 bucket->dthb_next = hash->dth_tab[ndx]; 6916 hash->dth_tab[ndx] = bucket; 6917 hash->dth_nbuckets++; 6918 6919add: 6920 nextp = DTRACE_HASHNEXT(hash, new); 6921 ASSERT(*nextp == NULL && *(DTRACE_HASHPREV(hash, new)) == NULL); 6922 *nextp = bucket->dthb_chain; 6923 6924 if (bucket->dthb_chain != NULL) { 6925 prevp = DTRACE_HASHPREV(hash, bucket->dthb_chain); 6926 ASSERT(*prevp == NULL); 6927 *prevp = new; 6928 } 6929 6930 bucket->dthb_chain = new; 6931 bucket->dthb_len++; 6932} 6933 6934static dtrace_probe_t * 6935dtrace_hash_lookup(dtrace_hash_t *hash, dtrace_probe_t *template) 6936{ 6937 int hashval = DTRACE_HASHSTR(hash, template); 6938 int ndx = hashval & hash->dth_mask; 6939 dtrace_hashbucket_t *bucket = hash->dth_tab[ndx]; 6940 6941 for (; bucket != NULL; bucket = bucket->dthb_next) { 6942 if (DTRACE_HASHEQ(hash, bucket->dthb_chain, template)) 6943 return (bucket->dthb_chain); 6944 } 6945 6946 return (NULL); 6947} 6948 6949static int 6950dtrace_hash_collisions(dtrace_hash_t *hash, dtrace_probe_t *template) 6951{ 6952 int hashval = DTRACE_HASHSTR(hash, template); 6953 int ndx = hashval & hash->dth_mask; 6954 dtrace_hashbucket_t *bucket = hash->dth_tab[ndx]; 6955 6956 for (; bucket != NULL; bucket = bucket->dthb_next) { 6957 if (DTRACE_HASHEQ(hash, bucket->dthb_chain, template)) 6958 return (bucket->dthb_len); 6959 } 6960 6961 return (0); 6962} 6963 6964static void 6965dtrace_hash_remove(dtrace_hash_t *hash, dtrace_probe_t *probe) 6966{ 6967 int ndx = DTRACE_HASHSTR(hash, probe) & hash->dth_mask; 6968 dtrace_hashbucket_t *bucket = hash->dth_tab[ndx]; 6969 6970 dtrace_probe_t **prevp = DTRACE_HASHPREV(hash, probe); 6971 dtrace_probe_t **nextp = DTRACE_HASHNEXT(hash, probe); 6972 6973 /* 6974 * Find the bucket that we're removing this probe from. 6975 */ 6976 for (; bucket != NULL; bucket = bucket->dthb_next) { 6977 if (DTRACE_HASHEQ(hash, bucket->dthb_chain, probe)) 6978 break; 6979 } 6980 6981 ASSERT(bucket != NULL); 6982 6983 if (*prevp == NULL) { 6984 if (*nextp == NULL) { 6985 /* 6986 * The removed probe was the only probe on this 6987 * bucket; we need to remove the bucket. 6988 */ 6989 dtrace_hashbucket_t *b = hash->dth_tab[ndx]; 6990 6991 ASSERT(bucket->dthb_chain == probe); 6992 ASSERT(b != NULL); 6993 6994 if (b == bucket) { 6995 hash->dth_tab[ndx] = bucket->dthb_next; 6996 } else { 6997 while (b->dthb_next != bucket) 6998 b = b->dthb_next; 6999 b->dthb_next = bucket->dthb_next; 7000 } 7001 7002 ASSERT(hash->dth_nbuckets > 0); 7003 hash->dth_nbuckets--; 7004 kmem_free(bucket, sizeof (dtrace_hashbucket_t)); 7005 return; 7006 } 7007 7008 bucket->dthb_chain = *nextp; 7009 } else { 7010 *(DTRACE_HASHNEXT(hash, *prevp)) = *nextp; 7011 } 7012 7013 if (*nextp != NULL) 7014 *(DTRACE_HASHPREV(hash, *nextp)) = *prevp; 7015} 7016 7017/* 7018 * DTrace Utility Functions 7019 * 7020 * These are random utility functions that are _not_ called from probe context. 7021 */ 7022static int 7023dtrace_badattr(const dtrace_attribute_t *a) 7024{ 7025 return (a->dtat_name > DTRACE_STABILITY_MAX || 7026 a->dtat_data > DTRACE_STABILITY_MAX || 7027 a->dtat_class > DTRACE_CLASS_MAX); 7028} 7029 7030/* 7031 * Return a duplicate copy of a string. If the specified string is NULL, 7032 * this function returns a zero-length string. 7033 */ 7034static char * 7035dtrace_strdup(const char *str) 7036{ 7037 char *new = kmem_zalloc((str != NULL ? strlen(str) : 0) + 1, KM_SLEEP); 7038 7039 if (str != NULL) 7040 (void) strcpy(new, str); 7041 7042 return (new); 7043} 7044 7045#define DTRACE_ISALPHA(c) \ 7046 (((c) >= 'a' && (c) <= 'z') || ((c) >= 'A' && (c) <= 'Z')) 7047 7048static int 7049dtrace_badname(const char *s) 7050{ 7051 char c; 7052 7053 if (s == NULL || (c = *s++) == '\0') 7054 return (0); 7055 7056 if (!DTRACE_ISALPHA(c) && c != '-' && c != '_' && c != '.') 7057 return (1); 7058 7059 while ((c = *s++) != '\0') { 7060 if (!DTRACE_ISALPHA(c) && (c < '0' || c > '9') && 7061 c != '-' && c != '_' && c != '.' && c != '`') 7062 return (1); 7063 } 7064 7065 return (0); 7066} 7067 7068static void 7069dtrace_cred2priv(cred_t *cr, uint32_t *privp, uid_t *uidp, zoneid_t *zoneidp) 7070{ 7071 uint32_t priv; 7072 7073#if defined(sun) 7074 if (cr == NULL || PRIV_POLICY_ONLY(cr, PRIV_ALL, B_FALSE)) { 7075 /* 7076 * For DTRACE_PRIV_ALL, the uid and zoneid don't matter. 7077 */ 7078 priv = DTRACE_PRIV_ALL; 7079 } else { 7080 *uidp = crgetuid(cr); 7081 *zoneidp = crgetzoneid(cr); 7082 7083 priv = 0; 7084 if (PRIV_POLICY_ONLY(cr, PRIV_DTRACE_KERNEL, B_FALSE)) 7085 priv |= DTRACE_PRIV_KERNEL | DTRACE_PRIV_USER; 7086 else if (PRIV_POLICY_ONLY(cr, PRIV_DTRACE_USER, B_FALSE)) 7087 priv |= DTRACE_PRIV_USER; 7088 if (PRIV_POLICY_ONLY(cr, PRIV_DTRACE_PROC, B_FALSE)) 7089 priv |= DTRACE_PRIV_PROC; 7090 if (PRIV_POLICY_ONLY(cr, PRIV_PROC_OWNER, B_FALSE)) 7091 priv |= DTRACE_PRIV_OWNER; 7092 if (PRIV_POLICY_ONLY(cr, PRIV_PROC_ZONE, B_FALSE)) 7093 priv |= DTRACE_PRIV_ZONEOWNER; 7094 } 7095#else 7096 priv = DTRACE_PRIV_ALL; 7097#endif 7098 7099 *privp = priv; 7100} 7101 7102#ifdef DTRACE_ERRDEBUG 7103static void 7104dtrace_errdebug(const char *str) 7105{ 7106 int hval = dtrace_hash_str(str) % DTRACE_ERRHASHSZ; 7107 int occupied = 0; 7108 7109 mutex_enter(&dtrace_errlock); 7110 dtrace_errlast = str; 7111 dtrace_errthread = curthread; 7112 7113 while (occupied++ < DTRACE_ERRHASHSZ) { 7114 if (dtrace_errhash[hval].dter_msg == str) { 7115 dtrace_errhash[hval].dter_count++; 7116 goto out; 7117 } 7118 7119 if (dtrace_errhash[hval].dter_msg != NULL) { 7120 hval = (hval + 1) % DTRACE_ERRHASHSZ; 7121 continue; 7122 } 7123 7124 dtrace_errhash[hval].dter_msg = str; 7125 dtrace_errhash[hval].dter_count = 1; 7126 goto out; 7127 } 7128 7129 panic("dtrace: undersized error hash"); 7130out: 7131 mutex_exit(&dtrace_errlock); 7132} 7133#endif 7134 7135/* 7136 * DTrace Matching Functions 7137 * 7138 * These functions are used to match groups of probes, given some elements of 7139 * a probe tuple, or some globbed expressions for elements of a probe tuple. 7140 */ 7141static int 7142dtrace_match_priv(const dtrace_probe_t *prp, uint32_t priv, uid_t uid, 7143 zoneid_t zoneid) 7144{ 7145 if (priv != DTRACE_PRIV_ALL) { 7146 uint32_t ppriv = prp->dtpr_provider->dtpv_priv.dtpp_flags; 7147 uint32_t match = priv & ppriv; 7148 7149 /* 7150 * No PRIV_DTRACE_* privileges... 7151 */ 7152 if ((priv & (DTRACE_PRIV_PROC | DTRACE_PRIV_USER | 7153 DTRACE_PRIV_KERNEL)) == 0) 7154 return (0); 7155 7156 /* 7157 * No matching bits, but there were bits to match... 7158 */ 7159 if (match == 0 && ppriv != 0) 7160 return (0); 7161 7162 /* 7163 * Need to have permissions to the process, but don't... 7164 */ 7165 if (((ppriv & ~match) & DTRACE_PRIV_OWNER) != 0 && 7166 uid != prp->dtpr_provider->dtpv_priv.dtpp_uid) { 7167 return (0); 7168 } 7169 7170 /* 7171 * Need to be in the same zone unless we possess the 7172 * privilege to examine all zones. 7173 */ 7174 if (((ppriv & ~match) & DTRACE_PRIV_ZONEOWNER) != 0 && 7175 zoneid != prp->dtpr_provider->dtpv_priv.dtpp_zoneid) { 7176 return (0); 7177 } 7178 } 7179 7180 return (1); 7181} 7182 7183/* 7184 * dtrace_match_probe compares a dtrace_probe_t to a pre-compiled key, which 7185 * consists of input pattern strings and an ops-vector to evaluate them. 7186 * This function returns >0 for match, 0 for no match, and <0 for error. 7187 */ 7188static int 7189dtrace_match_probe(const dtrace_probe_t *prp, const dtrace_probekey_t *pkp, 7190 uint32_t priv, uid_t uid, zoneid_t zoneid) 7191{ 7192 dtrace_provider_t *pvp = prp->dtpr_provider; 7193 int rv; 7194 7195 if (pvp->dtpv_defunct) 7196 return (0); 7197 7198 if ((rv = pkp->dtpk_pmatch(pvp->dtpv_name, pkp->dtpk_prov, 0)) <= 0) 7199 return (rv); 7200 7201 if ((rv = pkp->dtpk_mmatch(prp->dtpr_mod, pkp->dtpk_mod, 0)) <= 0) 7202 return (rv); 7203 7204 if ((rv = pkp->dtpk_fmatch(prp->dtpr_func, pkp->dtpk_func, 0)) <= 0) 7205 return (rv); 7206 7207 if ((rv = pkp->dtpk_nmatch(prp->dtpr_name, pkp->dtpk_name, 0)) <= 0) 7208 return (rv); 7209 7210 if (dtrace_match_priv(prp, priv, uid, zoneid) == 0) 7211 return (0); 7212 7213 return (rv); 7214} 7215 7216/* 7217 * dtrace_match_glob() is a safe kernel implementation of the gmatch(3GEN) 7218 * interface for matching a glob pattern 'p' to an input string 's'. Unlike 7219 * libc's version, the kernel version only applies to 8-bit ASCII strings. 7220 * In addition, all of the recursion cases except for '*' matching have been 7221 * unwound. For '*', we still implement recursive evaluation, but a depth 7222 * counter is maintained and matching is aborted if we recurse too deep. 7223 * The function returns 0 if no match, >0 if match, and <0 if recursion error. 7224 */ 7225static int 7226dtrace_match_glob(const char *s, const char *p, int depth) 7227{ 7228 const char *olds; 7229 char s1, c; 7230 int gs; 7231 7232 if (depth > DTRACE_PROBEKEY_MAXDEPTH) 7233 return (-1); 7234 7235 if (s == NULL) 7236 s = ""; /* treat NULL as empty string */ 7237 7238top: 7239 olds = s; 7240 s1 = *s++; 7241 7242 if (p == NULL) 7243 return (0); 7244 7245 if ((c = *p++) == '\0') 7246 return (s1 == '\0'); 7247 7248 switch (c) { 7249 case '[': { 7250 int ok = 0, notflag = 0; 7251 char lc = '\0'; 7252 7253 if (s1 == '\0') 7254 return (0); 7255 7256 if (*p == '!') { 7257 notflag = 1; 7258 p++; 7259 } 7260 7261 if ((c = *p++) == '\0') 7262 return (0); 7263 7264 do { 7265 if (c == '-' && lc != '\0' && *p != ']') { 7266 if ((c = *p++) == '\0') 7267 return (0); 7268 if (c == '\\' && (c = *p++) == '\0') 7269 return (0); 7270 7271 if (notflag) { 7272 if (s1 < lc || s1 > c) 7273 ok++; 7274 else 7275 return (0); 7276 } else if (lc <= s1 && s1 <= c) 7277 ok++; 7278 7279 } else if (c == '\\' && (c = *p++) == '\0') 7280 return (0); 7281 7282 lc = c; /* save left-hand 'c' for next iteration */ 7283 7284 if (notflag) { 7285 if (s1 != c) 7286 ok++; 7287 else 7288 return (0); 7289 } else if (s1 == c) 7290 ok++; 7291 7292 if ((c = *p++) == '\0') 7293 return (0); 7294 7295 } while (c != ']'); 7296 7297 if (ok) 7298 goto top; 7299 7300 return (0); 7301 } 7302 7303 case '\\': 7304 if ((c = *p++) == '\0') 7305 return (0); 7306 /*FALLTHRU*/ 7307 7308 default: 7309 if (c != s1) 7310 return (0); 7311 /*FALLTHRU*/ 7312 7313 case '?': 7314 if (s1 != '\0') 7315 goto top; 7316 return (0); 7317 7318 case '*': 7319 while (*p == '*') 7320 p++; /* consecutive *'s are identical to a single one */ 7321 7322 if (*p == '\0') 7323 return (1); 7324 7325 for (s = olds; *s != '\0'; s++) { 7326 if ((gs = dtrace_match_glob(s, p, depth + 1)) != 0) 7327 return (gs); 7328 } 7329 7330 return (0); 7331 } 7332} 7333 7334/*ARGSUSED*/ 7335static int 7336dtrace_match_string(const char *s, const char *p, int depth) 7337{ 7338 return (s != NULL && strcmp(s, p) == 0); 7339} 7340 7341/*ARGSUSED*/ 7342static int 7343dtrace_match_nul(const char *s, const char *p, int depth) 7344{ 7345 return (1); /* always match the empty pattern */ 7346} 7347 7348/*ARGSUSED*/ 7349static int 7350dtrace_match_nonzero(const char *s, const char *p, int depth) 7351{ 7352 return (s != NULL && s[0] != '\0'); 7353} 7354 7355static int 7356dtrace_match(const dtrace_probekey_t *pkp, uint32_t priv, uid_t uid, 7357 zoneid_t zoneid, int (*matched)(dtrace_probe_t *, void *), void *arg) 7358{ 7359 dtrace_probe_t template, *probe; 7360 dtrace_hash_t *hash = NULL; 7361 int len, best = INT_MAX, nmatched = 0; 7362 dtrace_id_t i; 7363 7364 ASSERT(MUTEX_HELD(&dtrace_lock)); 7365 7366 /* 7367 * If the probe ID is specified in the key, just lookup by ID and 7368 * invoke the match callback once if a matching probe is found. 7369 */ 7370 if (pkp->dtpk_id != DTRACE_IDNONE) { 7371 if ((probe = dtrace_probe_lookup_id(pkp->dtpk_id)) != NULL && 7372 dtrace_match_probe(probe, pkp, priv, uid, zoneid) > 0) { 7373 (void) (*matched)(probe, arg); 7374 nmatched++; 7375 } 7376 return (nmatched); 7377 } 7378 7379 template.dtpr_mod = (char *)pkp->dtpk_mod; 7380 template.dtpr_func = (char *)pkp->dtpk_func; 7381 template.dtpr_name = (char *)pkp->dtpk_name; 7382 7383 /* 7384 * We want to find the most distinct of the module name, function 7385 * name, and name. So for each one that is not a glob pattern or 7386 * empty string, we perform a lookup in the corresponding hash and 7387 * use the hash table with the fewest collisions to do our search. 7388 */ 7389 if (pkp->dtpk_mmatch == &dtrace_match_string && 7390 (len = dtrace_hash_collisions(dtrace_bymod, &template)) < best) { 7391 best = len; 7392 hash = dtrace_bymod; 7393 } 7394 7395 if (pkp->dtpk_fmatch == &dtrace_match_string && 7396 (len = dtrace_hash_collisions(dtrace_byfunc, &template)) < best) { 7397 best = len; 7398 hash = dtrace_byfunc; 7399 } 7400 7401 if (pkp->dtpk_nmatch == &dtrace_match_string && 7402 (len = dtrace_hash_collisions(dtrace_byname, &template)) < best) { 7403 best = len; 7404 hash = dtrace_byname; 7405 } 7406 7407 /* 7408 * If we did not select a hash table, iterate over every probe and 7409 * invoke our callback for each one that matches our input probe key. 7410 */ 7411 if (hash == NULL) { 7412 for (i = 0; i < dtrace_nprobes; i++) { 7413 if ((probe = dtrace_probes[i]) == NULL || 7414 dtrace_match_probe(probe, pkp, priv, uid, 7415 zoneid) <= 0) 7416 continue; 7417 7418 nmatched++; 7419 7420 if ((*matched)(probe, arg) != DTRACE_MATCH_NEXT) 7421 break; 7422 } 7423 7424 return (nmatched); 7425 } 7426 7427 /* 7428 * If we selected a hash table, iterate over each probe of the same key 7429 * name and invoke the callback for every probe that matches the other 7430 * attributes of our input probe key. 7431 */ 7432 for (probe = dtrace_hash_lookup(hash, &template); probe != NULL; 7433 probe = *(DTRACE_HASHNEXT(hash, probe))) { 7434 7435 if (dtrace_match_probe(probe, pkp, priv, uid, zoneid) <= 0) 7436 continue; 7437 7438 nmatched++; 7439 7440 if ((*matched)(probe, arg) != DTRACE_MATCH_NEXT) 7441 break; 7442 } 7443 7444 return (nmatched); 7445} 7446 7447/* 7448 * Return the function pointer dtrace_probecmp() should use to compare the 7449 * specified pattern with a string. For NULL or empty patterns, we select 7450 * dtrace_match_nul(). For glob pattern strings, we use dtrace_match_glob(). 7451 * For non-empty non-glob strings, we use dtrace_match_string(). 7452 */ 7453static dtrace_probekey_f * 7454dtrace_probekey_func(const char *p) 7455{ 7456 char c; 7457 7458 if (p == NULL || *p == '\0') 7459 return (&dtrace_match_nul); 7460 7461 while ((c = *p++) != '\0') { 7462 if (c == '[' || c == '?' || c == '*' || c == '\\') 7463 return (&dtrace_match_glob); 7464 } 7465 7466 return (&dtrace_match_string); 7467} 7468 7469/* 7470 * Build a probe comparison key for use with dtrace_match_probe() from the 7471 * given probe description. By convention, a null key only matches anchored 7472 * probes: if each field is the empty string, reset dtpk_fmatch to 7473 * dtrace_match_nonzero(). 7474 */ 7475static void 7476dtrace_probekey(dtrace_probedesc_t *pdp, dtrace_probekey_t *pkp) 7477{ 7478 pkp->dtpk_prov = pdp->dtpd_provider; 7479 pkp->dtpk_pmatch = dtrace_probekey_func(pdp->dtpd_provider); 7480 7481 pkp->dtpk_mod = pdp->dtpd_mod; 7482 pkp->dtpk_mmatch = dtrace_probekey_func(pdp->dtpd_mod); 7483 7484 pkp->dtpk_func = pdp->dtpd_func; 7485 pkp->dtpk_fmatch = dtrace_probekey_func(pdp->dtpd_func); 7486 7487 pkp->dtpk_name = pdp->dtpd_name; 7488 pkp->dtpk_nmatch = dtrace_probekey_func(pdp->dtpd_name); 7489 7490 pkp->dtpk_id = pdp->dtpd_id; 7491 7492 if (pkp->dtpk_id == DTRACE_IDNONE && 7493 pkp->dtpk_pmatch == &dtrace_match_nul && 7494 pkp->dtpk_mmatch == &dtrace_match_nul && 7495 pkp->dtpk_fmatch == &dtrace_match_nul && 7496 pkp->dtpk_nmatch == &dtrace_match_nul) 7497 pkp->dtpk_fmatch = &dtrace_match_nonzero; 7498} 7499 7500/* 7501 * DTrace Provider-to-Framework API Functions 7502 * 7503 * These functions implement much of the Provider-to-Framework API, as 7504 * described in <sys/dtrace.h>. The parts of the API not in this section are 7505 * the functions in the API for probe management (found below), and 7506 * dtrace_probe() itself (found above). 7507 */ 7508 7509/* 7510 * Register the calling provider with the DTrace framework. This should 7511 * generally be called by DTrace providers in their attach(9E) entry point. 7512 */ 7513int 7514dtrace_register(const char *name, const dtrace_pattr_t *pap, uint32_t priv, 7515 cred_t *cr, const dtrace_pops_t *pops, void *arg, dtrace_provider_id_t *idp) 7516{ 7517 dtrace_provider_t *provider; 7518 7519 if (name == NULL || pap == NULL || pops == NULL || idp == NULL) { 7520 cmn_err(CE_WARN, "failed to register provider '%s': invalid " 7521 "arguments", name ? name : "<NULL>"); 7522 return (EINVAL); 7523 } 7524 7525 if (name[0] == '\0' || dtrace_badname(name)) { 7526 cmn_err(CE_WARN, "failed to register provider '%s': invalid " 7527 "provider name", name); 7528 return (EINVAL); 7529 } 7530 7531 if ((pops->dtps_provide == NULL && pops->dtps_provide_module == NULL) || 7532 pops->dtps_enable == NULL || pops->dtps_disable == NULL || 7533 pops->dtps_destroy == NULL || 7534 ((pops->dtps_resume == NULL) != (pops->dtps_suspend == NULL))) { 7535 cmn_err(CE_WARN, "failed to register provider '%s': invalid " 7536 "provider ops", name); 7537 return (EINVAL); 7538 } 7539 7540 if (dtrace_badattr(&pap->dtpa_provider) || 7541 dtrace_badattr(&pap->dtpa_mod) || 7542 dtrace_badattr(&pap->dtpa_func) || 7543 dtrace_badattr(&pap->dtpa_name) || 7544 dtrace_badattr(&pap->dtpa_args)) { 7545 cmn_err(CE_WARN, "failed to register provider '%s': invalid " 7546 "provider attributes", name); 7547 return (EINVAL); 7548 } 7549 7550 if (priv & ~DTRACE_PRIV_ALL) { 7551 cmn_err(CE_WARN, "failed to register provider '%s': invalid " 7552 "privilege attributes", name); 7553 return (EINVAL); 7554 } 7555 7556 if ((priv & DTRACE_PRIV_KERNEL) && 7557 (priv & (DTRACE_PRIV_USER | DTRACE_PRIV_OWNER)) && 7558 pops->dtps_usermode == NULL) { 7559 cmn_err(CE_WARN, "failed to register provider '%s': need " 7560 "dtps_usermode() op for given privilege attributes", name); 7561 return (EINVAL); 7562 } 7563 7564 provider = kmem_zalloc(sizeof (dtrace_provider_t), KM_SLEEP); 7565 provider->dtpv_name = kmem_alloc(strlen(name) + 1, KM_SLEEP); 7566 (void) strcpy(provider->dtpv_name, name); 7567 7568 provider->dtpv_attr = *pap; 7569 provider->dtpv_priv.dtpp_flags = priv; 7570 if (cr != NULL) { 7571 provider->dtpv_priv.dtpp_uid = crgetuid(cr); 7572 provider->dtpv_priv.dtpp_zoneid = crgetzoneid(cr); 7573 } 7574 provider->dtpv_pops = *pops; 7575 7576 if (pops->dtps_provide == NULL) { 7577 ASSERT(pops->dtps_provide_module != NULL); 7578 provider->dtpv_pops.dtps_provide = 7579 (void (*)(void *, dtrace_probedesc_t *))dtrace_nullop; 7580 } 7581 7582 if (pops->dtps_provide_module == NULL) { 7583 ASSERT(pops->dtps_provide != NULL); 7584 provider->dtpv_pops.dtps_provide_module = 7585 (void (*)(void *, modctl_t *))dtrace_nullop; 7586 } 7587 7588 if (pops->dtps_suspend == NULL) { 7589 ASSERT(pops->dtps_resume == NULL); 7590 provider->dtpv_pops.dtps_suspend = 7591 (void (*)(void *, dtrace_id_t, void *))dtrace_nullop; 7592 provider->dtpv_pops.dtps_resume = 7593 (void (*)(void *, dtrace_id_t, void *))dtrace_nullop; 7594 } 7595 7596 provider->dtpv_arg = arg; 7597 *idp = (dtrace_provider_id_t)provider; 7598 7599 if (pops == &dtrace_provider_ops) { 7600 ASSERT(MUTEX_HELD(&dtrace_provider_lock)); 7601 ASSERT(MUTEX_HELD(&dtrace_lock)); 7602 ASSERT(dtrace_anon.dta_enabling == NULL); 7603 7604 /* 7605 * We make sure that the DTrace provider is at the head of 7606 * the provider chain. 7607 */ 7608 provider->dtpv_next = dtrace_provider; 7609 dtrace_provider = provider; 7610 return (0); 7611 } 7612 7613 mutex_enter(&dtrace_provider_lock); 7614 mutex_enter(&dtrace_lock); 7615 7616 /* 7617 * If there is at least one provider registered, we'll add this 7618 * provider after the first provider. 7619 */ 7620 if (dtrace_provider != NULL) { 7621 provider->dtpv_next = dtrace_provider->dtpv_next; 7622 dtrace_provider->dtpv_next = provider; 7623 } else { 7624 dtrace_provider = provider; 7625 } 7626 7627 if (dtrace_retained != NULL) { 7628 dtrace_enabling_provide(provider); 7629 7630 /* 7631 * Now we need to call dtrace_enabling_matchall() -- which 7632 * will acquire cpu_lock and dtrace_lock. We therefore need 7633 * to drop all of our locks before calling into it... 7634 */ 7635 mutex_exit(&dtrace_lock); 7636 mutex_exit(&dtrace_provider_lock); 7637 dtrace_enabling_matchall(); 7638 7639 return (0); 7640 } 7641 7642 mutex_exit(&dtrace_lock); 7643 mutex_exit(&dtrace_provider_lock); 7644 7645 return (0); 7646} 7647 7648/* 7649 * Unregister the specified provider from the DTrace framework. This should 7650 * generally be called by DTrace providers in their detach(9E) entry point. 7651 */ 7652int 7653dtrace_unregister(dtrace_provider_id_t id) 7654{ 7655 dtrace_provider_t *old = (dtrace_provider_t *)id; 7656 dtrace_provider_t *prev = NULL; 7657 int i, self = 0, noreap = 0; 7658 dtrace_probe_t *probe, *first = NULL; 7659 7660 if (old->dtpv_pops.dtps_enable == 7661 (void (*)(void *, dtrace_id_t, void *))dtrace_nullop) { 7662 /* 7663 * If DTrace itself is the provider, we're called with locks 7664 * already held. 7665 */ 7666 ASSERT(old == dtrace_provider); 7667#if defined(sun) 7668 ASSERT(dtrace_devi != NULL); 7669#endif 7670 ASSERT(MUTEX_HELD(&dtrace_provider_lock)); 7671 ASSERT(MUTEX_HELD(&dtrace_lock)); 7672 self = 1; 7673 7674 if (dtrace_provider->dtpv_next != NULL) { 7675 /* 7676 * There's another provider here; return failure. 7677 */ 7678 return (EBUSY); 7679 } 7680 } else { 7681 mutex_enter(&dtrace_provider_lock); 7682#if defined(sun) 7683 mutex_enter(&mod_lock); 7684#endif 7685 mutex_enter(&dtrace_lock); 7686 } 7687 7688 /* 7689 * If anyone has /dev/dtrace open, or if there are anonymous enabled 7690 * probes, we refuse to let providers slither away, unless this 7691 * provider has already been explicitly invalidated. 7692 */ 7693 if (!old->dtpv_defunct && 7694 (dtrace_opens || (dtrace_anon.dta_state != NULL && 7695 dtrace_anon.dta_state->dts_necbs > 0))) { 7696 if (!self) { 7697 mutex_exit(&dtrace_lock); 7698#if defined(sun) 7699 mutex_exit(&mod_lock); 7700#endif 7701 mutex_exit(&dtrace_provider_lock); 7702 } 7703 return (EBUSY); 7704 } 7705 7706 /* 7707 * Attempt to destroy the probes associated with this provider. 7708 */ 7709 for (i = 0; i < dtrace_nprobes; i++) { 7710 if ((probe = dtrace_probes[i]) == NULL) 7711 continue; 7712 7713 if (probe->dtpr_provider != old) 7714 continue; 7715 7716 if (probe->dtpr_ecb == NULL) 7717 continue; 7718 7719 /* 7720 * If we are trying to unregister a defunct provider, and the 7721 * provider was made defunct within the interval dictated by 7722 * dtrace_unregister_defunct_reap, we'll (asynchronously) 7723 * attempt to reap our enablings. To denote that the provider 7724 * should reattempt to unregister itself at some point in the 7725 * future, we will return a differentiable error code (EAGAIN 7726 * instead of EBUSY) in this case. 7727 */ 7728 if (dtrace_gethrtime() - old->dtpv_defunct > 7729 dtrace_unregister_defunct_reap) 7730 noreap = 1; 7731 7732 if (!self) { 7733 mutex_exit(&dtrace_lock); 7734#if defined(sun) 7735 mutex_exit(&mod_lock); 7736#endif 7737 mutex_exit(&dtrace_provider_lock); 7738 } 7739 7740 if (noreap) 7741 return (EBUSY); 7742 7743 (void) taskq_dispatch(dtrace_taskq, 7744 (task_func_t *)dtrace_enabling_reap, NULL, TQ_SLEEP); 7745 7746 return (EAGAIN); 7747 } 7748 7749 /* 7750 * All of the probes for this provider are disabled; we can safely 7751 * remove all of them from their hash chains and from the probe array. 7752 */ 7753 for (i = 0; i < dtrace_nprobes; i++) { 7754 if ((probe = dtrace_probes[i]) == NULL) 7755 continue; 7756 7757 if (probe->dtpr_provider != old) 7758 continue; 7759 7760 dtrace_probes[i] = NULL; 7761 7762 dtrace_hash_remove(dtrace_bymod, probe); 7763 dtrace_hash_remove(dtrace_byfunc, probe); 7764 dtrace_hash_remove(dtrace_byname, probe); 7765 7766 if (first == NULL) { 7767 first = probe; 7768 probe->dtpr_nextmod = NULL; 7769 } else { 7770 probe->dtpr_nextmod = first; 7771 first = probe; 7772 } 7773 } 7774 7775 /* 7776 * The provider's probes have been removed from the hash chains and 7777 * from the probe array. Now issue a dtrace_sync() to be sure that 7778 * everyone has cleared out from any probe array processing. 7779 */ 7780 dtrace_sync(); 7781 7782 for (probe = first; probe != NULL; probe = first) { 7783 first = probe->dtpr_nextmod; 7784 7785 old->dtpv_pops.dtps_destroy(old->dtpv_arg, probe->dtpr_id, 7786 probe->dtpr_arg); 7787 kmem_free(probe->dtpr_mod, strlen(probe->dtpr_mod) + 1); 7788 kmem_free(probe->dtpr_func, strlen(probe->dtpr_func) + 1); 7789 kmem_free(probe->dtpr_name, strlen(probe->dtpr_name) + 1); 7790#if defined(sun) 7791 vmem_free(dtrace_arena, (void *)(uintptr_t)(probe->dtpr_id), 1); 7792#else 7793 free_unr(dtrace_arena, probe->dtpr_id); 7794#endif 7795 kmem_free(probe, sizeof (dtrace_probe_t)); 7796 } 7797 7798 if ((prev = dtrace_provider) == old) { 7799#if defined(sun) 7800 ASSERT(self || dtrace_devi == NULL); 7801 ASSERT(old->dtpv_next == NULL || dtrace_devi == NULL); 7802#endif 7803 dtrace_provider = old->dtpv_next; 7804 } else { 7805 while (prev != NULL && prev->dtpv_next != old) 7806 prev = prev->dtpv_next; 7807 7808 if (prev == NULL) { 7809 panic("attempt to unregister non-existent " 7810 "dtrace provider %p\n", (void *)id); 7811 } 7812 7813 prev->dtpv_next = old->dtpv_next; 7814 } 7815 7816 if (!self) { 7817 mutex_exit(&dtrace_lock); 7818#if defined(sun) 7819 mutex_exit(&mod_lock); 7820#endif 7821 mutex_exit(&dtrace_provider_lock); 7822 } 7823 7824 kmem_free(old->dtpv_name, strlen(old->dtpv_name) + 1); 7825 kmem_free(old, sizeof (dtrace_provider_t)); 7826 7827 return (0); 7828} 7829 7830/* 7831 * Invalidate the specified provider. All subsequent probe lookups for the 7832 * specified provider will fail, but its probes will not be removed. 7833 */ 7834void 7835dtrace_invalidate(dtrace_provider_id_t id) 7836{ 7837 dtrace_provider_t *pvp = (dtrace_provider_t *)id; 7838 7839 ASSERT(pvp->dtpv_pops.dtps_enable != 7840 (void (*)(void *, dtrace_id_t, void *))dtrace_nullop); 7841 7842 mutex_enter(&dtrace_provider_lock); 7843 mutex_enter(&dtrace_lock); 7844 7845 pvp->dtpv_defunct = dtrace_gethrtime(); 7846 7847 mutex_exit(&dtrace_lock); 7848 mutex_exit(&dtrace_provider_lock); 7849} 7850 7851/* 7852 * Indicate whether or not DTrace has attached. 7853 */ 7854int 7855dtrace_attached(void) 7856{ 7857 /* 7858 * dtrace_provider will be non-NULL iff the DTrace driver has 7859 * attached. (It's non-NULL because DTrace is always itself a 7860 * provider.) 7861 */ 7862 return (dtrace_provider != NULL); 7863} 7864 7865/* 7866 * Remove all the unenabled probes for the given provider. This function is 7867 * not unlike dtrace_unregister(), except that it doesn't remove the provider 7868 * -- just as many of its associated probes as it can. 7869 */ 7870int 7871dtrace_condense(dtrace_provider_id_t id) 7872{ 7873 dtrace_provider_t *prov = (dtrace_provider_t *)id; 7874 int i; 7875 dtrace_probe_t *probe; 7876 7877 /* 7878 * Make sure this isn't the dtrace provider itself. 7879 */ 7880 ASSERT(prov->dtpv_pops.dtps_enable != 7881 (void (*)(void *, dtrace_id_t, void *))dtrace_nullop); 7882 7883 mutex_enter(&dtrace_provider_lock); 7884 mutex_enter(&dtrace_lock); 7885 7886 /* 7887 * Attempt to destroy the probes associated with this provider. 7888 */ 7889 for (i = 0; i < dtrace_nprobes; i++) { 7890 if ((probe = dtrace_probes[i]) == NULL) 7891 continue; 7892 7893 if (probe->dtpr_provider != prov) 7894 continue; 7895 7896 if (probe->dtpr_ecb != NULL) 7897 continue; 7898 7899 dtrace_probes[i] = NULL; 7900 7901 dtrace_hash_remove(dtrace_bymod, probe); 7902 dtrace_hash_remove(dtrace_byfunc, probe); 7903 dtrace_hash_remove(dtrace_byname, probe); 7904 7905 prov->dtpv_pops.dtps_destroy(prov->dtpv_arg, i + 1, 7906 probe->dtpr_arg); 7907 kmem_free(probe->dtpr_mod, strlen(probe->dtpr_mod) + 1); 7908 kmem_free(probe->dtpr_func, strlen(probe->dtpr_func) + 1); 7909 kmem_free(probe->dtpr_name, strlen(probe->dtpr_name) + 1); 7910 kmem_free(probe, sizeof (dtrace_probe_t)); 7911#if defined(sun) 7912 vmem_free(dtrace_arena, (void *)((uintptr_t)i + 1), 1); 7913#else 7914 free_unr(dtrace_arena, i + 1); 7915#endif 7916 } 7917 7918 mutex_exit(&dtrace_lock); 7919 mutex_exit(&dtrace_provider_lock); 7920 7921 return (0); 7922} 7923 7924/* 7925 * DTrace Probe Management Functions 7926 * 7927 * The functions in this section perform the DTrace probe management, 7928 * including functions to create probes, look-up probes, and call into the 7929 * providers to request that probes be provided. Some of these functions are 7930 * in the Provider-to-Framework API; these functions can be identified by the 7931 * fact that they are not declared "static". 7932 */ 7933 7934/* 7935 * Create a probe with the specified module name, function name, and name. 7936 */ 7937dtrace_id_t 7938dtrace_probe_create(dtrace_provider_id_t prov, const char *mod, 7939 const char *func, const char *name, int aframes, void *arg) 7940{ 7941 dtrace_probe_t *probe, **probes; 7942 dtrace_provider_t *provider = (dtrace_provider_t *)prov; 7943 dtrace_id_t id; 7944 7945 if (provider == dtrace_provider) { 7946 ASSERT(MUTEX_HELD(&dtrace_lock)); 7947 } else { 7948 mutex_enter(&dtrace_lock); 7949 } 7950 7951#if defined(sun) 7952 id = (dtrace_id_t)(uintptr_t)vmem_alloc(dtrace_arena, 1, 7953 VM_BESTFIT | VM_SLEEP); 7954#else 7955 id = alloc_unr(dtrace_arena); 7956#endif 7957 probe = kmem_zalloc(sizeof (dtrace_probe_t), KM_SLEEP); 7958 7959 probe->dtpr_id = id; 7960 probe->dtpr_gen = dtrace_probegen++; 7961 probe->dtpr_mod = dtrace_strdup(mod); 7962 probe->dtpr_func = dtrace_strdup(func); 7963 probe->dtpr_name = dtrace_strdup(name); 7964 probe->dtpr_arg = arg; 7965 probe->dtpr_aframes = aframes; 7966 probe->dtpr_provider = provider; 7967 7968 dtrace_hash_add(dtrace_bymod, probe); 7969 dtrace_hash_add(dtrace_byfunc, probe); 7970 dtrace_hash_add(dtrace_byname, probe); 7971 7972 if (id - 1 >= dtrace_nprobes) { 7973 size_t osize = dtrace_nprobes * sizeof (dtrace_probe_t *); 7974 size_t nsize = osize << 1; 7975 7976 if (nsize == 0) { 7977 ASSERT(osize == 0); 7978 ASSERT(dtrace_probes == NULL); 7979 nsize = sizeof (dtrace_probe_t *); 7980 } 7981 7982 probes = kmem_zalloc(nsize, KM_SLEEP); 7983 7984 if (dtrace_probes == NULL) { 7985 ASSERT(osize == 0); 7986 dtrace_probes = probes; 7987 dtrace_nprobes = 1; 7988 } else { 7989 dtrace_probe_t **oprobes = dtrace_probes; 7990 7991 bcopy(oprobes, probes, osize); 7992 dtrace_membar_producer(); 7993 dtrace_probes = probes; 7994 7995 dtrace_sync(); 7996 7997 /* 7998 * All CPUs are now seeing the new probes array; we can 7999 * safely free the old array. 8000 */ 8001 kmem_free(oprobes, osize); 8002 dtrace_nprobes <<= 1; 8003 } 8004 8005 ASSERT(id - 1 < dtrace_nprobes); 8006 } 8007 8008 ASSERT(dtrace_probes[id - 1] == NULL); 8009 dtrace_probes[id - 1] = probe; 8010 8011 if (provider != dtrace_provider) 8012 mutex_exit(&dtrace_lock); 8013 8014 return (id); 8015} 8016 8017static dtrace_probe_t * 8018dtrace_probe_lookup_id(dtrace_id_t id) 8019{ 8020 ASSERT(MUTEX_HELD(&dtrace_lock)); 8021 8022 if (id == 0 || id > dtrace_nprobes) 8023 return (NULL); 8024 8025 return (dtrace_probes[id - 1]); 8026} 8027 8028static int 8029dtrace_probe_lookup_match(dtrace_probe_t *probe, void *arg) 8030{ 8031 *((dtrace_id_t *)arg) = probe->dtpr_id; 8032 8033 return (DTRACE_MATCH_DONE); 8034} 8035 8036/* 8037 * Look up a probe based on provider and one or more of module name, function 8038 * name and probe name. 8039 */ 8040dtrace_id_t 8041dtrace_probe_lookup(dtrace_provider_id_t prid, char *mod, 8042 char *func, char *name) 8043{ 8044 dtrace_probekey_t pkey; 8045 dtrace_id_t id; 8046 int match; 8047 8048 pkey.dtpk_prov = ((dtrace_provider_t *)prid)->dtpv_name; 8049 pkey.dtpk_pmatch = &dtrace_match_string; 8050 pkey.dtpk_mod = mod; 8051 pkey.dtpk_mmatch = mod ? &dtrace_match_string : &dtrace_match_nul; 8052 pkey.dtpk_func = func; 8053 pkey.dtpk_fmatch = func ? &dtrace_match_string : &dtrace_match_nul; 8054 pkey.dtpk_name = name; 8055 pkey.dtpk_nmatch = name ? &dtrace_match_string : &dtrace_match_nul; 8056 pkey.dtpk_id = DTRACE_IDNONE; 8057 8058 mutex_enter(&dtrace_lock); 8059 match = dtrace_match(&pkey, DTRACE_PRIV_ALL, 0, 0, 8060 dtrace_probe_lookup_match, &id); 8061 mutex_exit(&dtrace_lock); 8062 8063 ASSERT(match == 1 || match == 0); 8064 return (match ? id : 0); 8065} 8066 8067/* 8068 * Returns the probe argument associated with the specified probe. 8069 */ 8070void * 8071dtrace_probe_arg(dtrace_provider_id_t id, dtrace_id_t pid) 8072{ 8073 dtrace_probe_t *probe; 8074 void *rval = NULL; 8075 8076 mutex_enter(&dtrace_lock); 8077 8078 if ((probe = dtrace_probe_lookup_id(pid)) != NULL && 8079 probe->dtpr_provider == (dtrace_provider_t *)id) 8080 rval = probe->dtpr_arg; 8081 8082 mutex_exit(&dtrace_lock); 8083 8084 return (rval); 8085} 8086 8087/* 8088 * Copy a probe into a probe description. 8089 */ 8090static void 8091dtrace_probe_description(const dtrace_probe_t *prp, dtrace_probedesc_t *pdp) 8092{ 8093 bzero(pdp, sizeof (dtrace_probedesc_t)); 8094 pdp->dtpd_id = prp->dtpr_id; 8095 8096 (void) strncpy(pdp->dtpd_provider, 8097 prp->dtpr_provider->dtpv_name, DTRACE_PROVNAMELEN - 1); 8098 8099 (void) strncpy(pdp->dtpd_mod, prp->dtpr_mod, DTRACE_MODNAMELEN - 1); 8100 (void) strncpy(pdp->dtpd_func, prp->dtpr_func, DTRACE_FUNCNAMELEN - 1); 8101 (void) strncpy(pdp->dtpd_name, prp->dtpr_name, DTRACE_NAMELEN - 1); 8102} 8103 8104/* 8105 * Called to indicate that a probe -- or probes -- should be provided by a 8106 * specfied provider. If the specified description is NULL, the provider will 8107 * be told to provide all of its probes. (This is done whenever a new 8108 * consumer comes along, or whenever a retained enabling is to be matched.) If 8109 * the specified description is non-NULL, the provider is given the 8110 * opportunity to dynamically provide the specified probe, allowing providers 8111 * to support the creation of probes on-the-fly. (So-called _autocreated_ 8112 * probes.) If the provider is NULL, the operations will be applied to all 8113 * providers; if the provider is non-NULL the operations will only be applied 8114 * to the specified provider. The dtrace_provider_lock must be held, and the 8115 * dtrace_lock must _not_ be held -- the provider's dtps_provide() operation 8116 * will need to grab the dtrace_lock when it reenters the framework through 8117 * dtrace_probe_lookup(), dtrace_probe_create(), etc. 8118 */ 8119static void 8120dtrace_probe_provide(dtrace_probedesc_t *desc, dtrace_provider_t *prv) 8121{ 8122#if defined(sun) 8123 modctl_t *ctl; 8124#endif 8125 int all = 0; 8126 8127 ASSERT(MUTEX_HELD(&dtrace_provider_lock)); 8128 8129 if (prv == NULL) { 8130 all = 1; 8131 prv = dtrace_provider; 8132 } 8133 8134 do { 8135 /* 8136 * First, call the blanket provide operation. 8137 */ 8138 prv->dtpv_pops.dtps_provide(prv->dtpv_arg, desc); 8139 8140#if defined(sun) 8141 /* 8142 * Now call the per-module provide operation. We will grab 8143 * mod_lock to prevent the list from being modified. Note 8144 * that this also prevents the mod_busy bits from changing. 8145 * (mod_busy can only be changed with mod_lock held.) 8146 */ 8147 mutex_enter(&mod_lock); 8148 8149 ctl = &modules; 8150 do { 8151 if (ctl->mod_busy || ctl->mod_mp == NULL) 8152 continue; 8153 8154 prv->dtpv_pops.dtps_provide_module(prv->dtpv_arg, ctl); 8155 8156 } while ((ctl = ctl->mod_next) != &modules); 8157 8158 mutex_exit(&mod_lock); 8159#endif 8160 } while (all && (prv = prv->dtpv_next) != NULL); 8161} 8162 8163#if defined(sun) 8164/* 8165 * Iterate over each probe, and call the Framework-to-Provider API function 8166 * denoted by offs. 8167 */ 8168static void 8169dtrace_probe_foreach(uintptr_t offs) 8170{ 8171 dtrace_provider_t *prov; 8172 void (*func)(void *, dtrace_id_t, void *); 8173 dtrace_probe_t *probe; 8174 dtrace_icookie_t cookie; 8175 int i; 8176 8177 /* 8178 * We disable interrupts to walk through the probe array. This is 8179 * safe -- the dtrace_sync() in dtrace_unregister() assures that we 8180 * won't see stale data. 8181 */ 8182 cookie = dtrace_interrupt_disable(); 8183 8184 for (i = 0; i < dtrace_nprobes; i++) { 8185 if ((probe = dtrace_probes[i]) == NULL) 8186 continue; 8187 8188 if (probe->dtpr_ecb == NULL) { 8189 /* 8190 * This probe isn't enabled -- don't call the function. 8191 */ 8192 continue; 8193 } 8194 8195 prov = probe->dtpr_provider; 8196 func = *((void(**)(void *, dtrace_id_t, void *)) 8197 ((uintptr_t)&prov->dtpv_pops + offs)); 8198 8199 func(prov->dtpv_arg, i + 1, probe->dtpr_arg); 8200 } 8201 8202 dtrace_interrupt_enable(cookie); 8203} 8204#endif 8205 8206static int 8207dtrace_probe_enable(dtrace_probedesc_t *desc, dtrace_enabling_t *enab) 8208{ 8209 dtrace_probekey_t pkey; 8210 uint32_t priv; 8211 uid_t uid; 8212 zoneid_t zoneid; 8213 8214 ASSERT(MUTEX_HELD(&dtrace_lock)); 8215 dtrace_ecb_create_cache = NULL; 8216 8217 if (desc == NULL) { 8218 /* 8219 * If we're passed a NULL description, we're being asked to 8220 * create an ECB with a NULL probe. 8221 */ 8222 (void) dtrace_ecb_create_enable(NULL, enab); 8223 return (0); 8224 } 8225 8226 dtrace_probekey(desc, &pkey); 8227 dtrace_cred2priv(enab->dten_vstate->dtvs_state->dts_cred.dcr_cred, 8228 &priv, &uid, &zoneid); 8229 8230 return (dtrace_match(&pkey, priv, uid, zoneid, dtrace_ecb_create_enable, 8231 enab)); 8232} 8233 8234/* 8235 * DTrace Helper Provider Functions 8236 */ 8237static void 8238dtrace_dofattr2attr(dtrace_attribute_t *attr, const dof_attr_t dofattr) 8239{ 8240 attr->dtat_name = DOF_ATTR_NAME(dofattr); 8241 attr->dtat_data = DOF_ATTR_DATA(dofattr); 8242 attr->dtat_class = DOF_ATTR_CLASS(dofattr); 8243} 8244 8245static void 8246dtrace_dofprov2hprov(dtrace_helper_provdesc_t *hprov, 8247 const dof_provider_t *dofprov, char *strtab) 8248{ 8249 hprov->dthpv_provname = strtab + dofprov->dofpv_name; 8250 dtrace_dofattr2attr(&hprov->dthpv_pattr.dtpa_provider, 8251 dofprov->dofpv_provattr); 8252 dtrace_dofattr2attr(&hprov->dthpv_pattr.dtpa_mod, 8253 dofprov->dofpv_modattr); 8254 dtrace_dofattr2attr(&hprov->dthpv_pattr.dtpa_func, 8255 dofprov->dofpv_funcattr); 8256 dtrace_dofattr2attr(&hprov->dthpv_pattr.dtpa_name, 8257 dofprov->dofpv_nameattr); 8258 dtrace_dofattr2attr(&hprov->dthpv_pattr.dtpa_args, 8259 dofprov->dofpv_argsattr); 8260} 8261 8262static void 8263dtrace_helper_provide_one(dof_helper_t *dhp, dof_sec_t *sec, pid_t pid) 8264{ 8265 uintptr_t daddr = (uintptr_t)dhp->dofhp_dof; 8266 dof_hdr_t *dof = (dof_hdr_t *)daddr; 8267 dof_sec_t *str_sec, *prb_sec, *arg_sec, *off_sec, *enoff_sec; 8268 dof_provider_t *provider; 8269 dof_probe_t *probe; 8270 uint32_t *off, *enoff; 8271 uint8_t *arg; 8272 char *strtab; 8273 uint_t i, nprobes; 8274 dtrace_helper_provdesc_t dhpv; 8275 dtrace_helper_probedesc_t dhpb; 8276 dtrace_meta_t *meta = dtrace_meta_pid; 8277 dtrace_mops_t *mops = &meta->dtm_mops; 8278 void *parg; 8279 8280 provider = (dof_provider_t *)(uintptr_t)(daddr + sec->dofs_offset); 8281 str_sec = (dof_sec_t *)(uintptr_t)(daddr + dof->dofh_secoff + 8282 provider->dofpv_strtab * dof->dofh_secsize); 8283 prb_sec = (dof_sec_t *)(uintptr_t)(daddr + dof->dofh_secoff + 8284 provider->dofpv_probes * dof->dofh_secsize); 8285 arg_sec = (dof_sec_t *)(uintptr_t)(daddr + dof->dofh_secoff + 8286 provider->dofpv_prargs * dof->dofh_secsize); 8287 off_sec = (dof_sec_t *)(uintptr_t)(daddr + dof->dofh_secoff + 8288 provider->dofpv_proffs * dof->dofh_secsize); 8289 8290 strtab = (char *)(uintptr_t)(daddr + str_sec->dofs_offset); 8291 off = (uint32_t *)(uintptr_t)(daddr + off_sec->dofs_offset); 8292 arg = (uint8_t *)(uintptr_t)(daddr + arg_sec->dofs_offset); 8293 enoff = NULL; 8294 8295 /* 8296 * See dtrace_helper_provider_validate(). 8297 */ 8298 if (dof->dofh_ident[DOF_ID_VERSION] != DOF_VERSION_1 && 8299 provider->dofpv_prenoffs != DOF_SECT_NONE) { 8300 enoff_sec = (dof_sec_t *)(uintptr_t)(daddr + dof->dofh_secoff + 8301 provider->dofpv_prenoffs * dof->dofh_secsize); 8302 enoff = (uint32_t *)(uintptr_t)(daddr + enoff_sec->dofs_offset); 8303 } 8304 8305 nprobes = prb_sec->dofs_size / prb_sec->dofs_entsize; 8306 8307 /* 8308 * Create the provider. 8309 */ 8310 dtrace_dofprov2hprov(&dhpv, provider, strtab); 8311 8312 if ((parg = mops->dtms_provide_pid(meta->dtm_arg, &dhpv, pid)) == NULL) 8313 return; 8314 8315 meta->dtm_count++; 8316 8317 /* 8318 * Create the probes. 8319 */ 8320 for (i = 0; i < nprobes; i++) { 8321 probe = (dof_probe_t *)(uintptr_t)(daddr + 8322 prb_sec->dofs_offset + i * prb_sec->dofs_entsize); 8323 8324 dhpb.dthpb_mod = dhp->dofhp_mod; 8325 dhpb.dthpb_func = strtab + probe->dofpr_func; 8326 dhpb.dthpb_name = strtab + probe->dofpr_name; 8327 dhpb.dthpb_base = probe->dofpr_addr; 8328 dhpb.dthpb_offs = off + probe->dofpr_offidx; 8329 dhpb.dthpb_noffs = probe->dofpr_noffs; 8330 if (enoff != NULL) { 8331 dhpb.dthpb_enoffs = enoff + probe->dofpr_enoffidx; 8332 dhpb.dthpb_nenoffs = probe->dofpr_nenoffs; 8333 } else { 8334 dhpb.dthpb_enoffs = NULL; 8335 dhpb.dthpb_nenoffs = 0; 8336 } 8337 dhpb.dthpb_args = arg + probe->dofpr_argidx; 8338 dhpb.dthpb_nargc = probe->dofpr_nargc; 8339 dhpb.dthpb_xargc = probe->dofpr_xargc; 8340 dhpb.dthpb_ntypes = strtab + probe->dofpr_nargv; 8341 dhpb.dthpb_xtypes = strtab + probe->dofpr_xargv; 8342 8343 mops->dtms_create_probe(meta->dtm_arg, parg, &dhpb); 8344 } 8345} 8346 8347static void 8348dtrace_helper_provide(dof_helper_t *dhp, pid_t pid) 8349{ 8350 uintptr_t daddr = (uintptr_t)dhp->dofhp_dof; 8351 dof_hdr_t *dof = (dof_hdr_t *)daddr; 8352 int i; 8353 8354 ASSERT(MUTEX_HELD(&dtrace_meta_lock)); 8355 8356 for (i = 0; i < dof->dofh_secnum; i++) { 8357 dof_sec_t *sec = (dof_sec_t *)(uintptr_t)(daddr + 8358 dof->dofh_secoff + i * dof->dofh_secsize); 8359 8360 if (sec->dofs_type != DOF_SECT_PROVIDER) 8361 continue; 8362 8363 dtrace_helper_provide_one(dhp, sec, pid); 8364 } 8365 8366 /* 8367 * We may have just created probes, so we must now rematch against 8368 * any retained enablings. Note that this call will acquire both 8369 * cpu_lock and dtrace_lock; the fact that we are holding 8370 * dtrace_meta_lock now is what defines the ordering with respect to 8371 * these three locks. 8372 */ 8373 dtrace_enabling_matchall(); 8374} 8375 8376static void 8377dtrace_helper_provider_remove_one(dof_helper_t *dhp, dof_sec_t *sec, pid_t pid) 8378{ 8379 uintptr_t daddr = (uintptr_t)dhp->dofhp_dof; 8380 dof_hdr_t *dof = (dof_hdr_t *)daddr; 8381 dof_sec_t *str_sec; 8382 dof_provider_t *provider; 8383 char *strtab; 8384 dtrace_helper_provdesc_t dhpv; 8385 dtrace_meta_t *meta = dtrace_meta_pid; 8386 dtrace_mops_t *mops = &meta->dtm_mops; 8387 8388 provider = (dof_provider_t *)(uintptr_t)(daddr + sec->dofs_offset); 8389 str_sec = (dof_sec_t *)(uintptr_t)(daddr + dof->dofh_secoff + 8390 provider->dofpv_strtab * dof->dofh_secsize); 8391 8392 strtab = (char *)(uintptr_t)(daddr + str_sec->dofs_offset); 8393 8394 /* 8395 * Create the provider. 8396 */ 8397 dtrace_dofprov2hprov(&dhpv, provider, strtab); 8398 8399 mops->dtms_remove_pid(meta->dtm_arg, &dhpv, pid); 8400 8401 meta->dtm_count--; 8402} 8403 8404static void 8405dtrace_helper_provider_remove(dof_helper_t *dhp, pid_t pid) 8406{ 8407 uintptr_t daddr = (uintptr_t)dhp->dofhp_dof; 8408 dof_hdr_t *dof = (dof_hdr_t *)daddr; 8409 int i; 8410 8411 ASSERT(MUTEX_HELD(&dtrace_meta_lock)); 8412 8413 for (i = 0; i < dof->dofh_secnum; i++) { 8414 dof_sec_t *sec = (dof_sec_t *)(uintptr_t)(daddr + 8415 dof->dofh_secoff + i * dof->dofh_secsize); 8416 8417 if (sec->dofs_type != DOF_SECT_PROVIDER) 8418 continue; 8419 8420 dtrace_helper_provider_remove_one(dhp, sec, pid); 8421 } 8422} 8423 8424/* 8425 * DTrace Meta Provider-to-Framework API Functions 8426 * 8427 * These functions implement the Meta Provider-to-Framework API, as described 8428 * in <sys/dtrace.h>. 8429 */ 8430int 8431dtrace_meta_register(const char *name, const dtrace_mops_t *mops, void *arg, 8432 dtrace_meta_provider_id_t *idp) 8433{ 8434 dtrace_meta_t *meta; 8435 dtrace_helpers_t *help, *next; 8436 int i; 8437 8438 *idp = DTRACE_METAPROVNONE; 8439 8440 /* 8441 * We strictly don't need the name, but we hold onto it for 8442 * debuggability. All hail error queues! 8443 */ 8444 if (name == NULL) { 8445 cmn_err(CE_WARN, "failed to register meta-provider: " 8446 "invalid name"); 8447 return (EINVAL); 8448 } 8449 8450 if (mops == NULL || 8451 mops->dtms_create_probe == NULL || 8452 mops->dtms_provide_pid == NULL || 8453 mops->dtms_remove_pid == NULL) { 8454 cmn_err(CE_WARN, "failed to register meta-register %s: " 8455 "invalid ops", name); 8456 return (EINVAL); 8457 } 8458 8459 meta = kmem_zalloc(sizeof (dtrace_meta_t), KM_SLEEP); 8460 meta->dtm_mops = *mops; 8461 meta->dtm_name = kmem_alloc(strlen(name) + 1, KM_SLEEP); 8462 (void) strcpy(meta->dtm_name, name); 8463 meta->dtm_arg = arg; 8464 8465 mutex_enter(&dtrace_meta_lock); 8466 mutex_enter(&dtrace_lock); 8467 8468 if (dtrace_meta_pid != NULL) { 8469 mutex_exit(&dtrace_lock); 8470 mutex_exit(&dtrace_meta_lock); 8471 cmn_err(CE_WARN, "failed to register meta-register %s: " 8472 "user-land meta-provider exists", name); 8473 kmem_free(meta->dtm_name, strlen(meta->dtm_name) + 1); 8474 kmem_free(meta, sizeof (dtrace_meta_t)); 8475 return (EINVAL); 8476 } 8477 8478 dtrace_meta_pid = meta; 8479 *idp = (dtrace_meta_provider_id_t)meta; 8480 8481 /* 8482 * If there are providers and probes ready to go, pass them 8483 * off to the new meta provider now. 8484 */ 8485 8486 help = dtrace_deferred_pid; 8487 dtrace_deferred_pid = NULL; 8488 8489 mutex_exit(&dtrace_lock); 8490 8491 while (help != NULL) { 8492 for (i = 0; i < help->dthps_nprovs; i++) { 8493 dtrace_helper_provide(&help->dthps_provs[i]->dthp_prov, 8494 help->dthps_pid); 8495 } 8496 8497 next = help->dthps_next; 8498 help->dthps_next = NULL; 8499 help->dthps_prev = NULL; 8500 help->dthps_deferred = 0; 8501 help = next; 8502 } 8503 8504 mutex_exit(&dtrace_meta_lock); 8505 8506 return (0); 8507} 8508 8509int 8510dtrace_meta_unregister(dtrace_meta_provider_id_t id) 8511{ 8512 dtrace_meta_t **pp, *old = (dtrace_meta_t *)id; 8513 8514 mutex_enter(&dtrace_meta_lock); 8515 mutex_enter(&dtrace_lock); 8516 8517 if (old == dtrace_meta_pid) { 8518 pp = &dtrace_meta_pid; 8519 } else { 8520 panic("attempt to unregister non-existent " 8521 "dtrace meta-provider %p\n", (void *)old); 8522 } 8523 8524 if (old->dtm_count != 0) { 8525 mutex_exit(&dtrace_lock); 8526 mutex_exit(&dtrace_meta_lock); 8527 return (EBUSY); 8528 } 8529 8530 *pp = NULL; 8531 8532 mutex_exit(&dtrace_lock); 8533 mutex_exit(&dtrace_meta_lock); 8534 8535 kmem_free(old->dtm_name, strlen(old->dtm_name) + 1); 8536 kmem_free(old, sizeof (dtrace_meta_t)); 8537 8538 return (0); 8539} 8540 8541 8542/* 8543 * DTrace DIF Object Functions 8544 */ 8545static int 8546dtrace_difo_err(uint_t pc, const char *format, ...) 8547{ 8548 if (dtrace_err_verbose) { 8549 va_list alist; 8550 8551 (void) uprintf("dtrace DIF object error: [%u]: ", pc); 8552 va_start(alist, format); 8553 (void) vuprintf(format, alist); 8554 va_end(alist); 8555 } 8556 8557#ifdef DTRACE_ERRDEBUG 8558 dtrace_errdebug(format); 8559#endif 8560 return (1); 8561} 8562 8563/* 8564 * Validate a DTrace DIF object by checking the IR instructions. The following 8565 * rules are currently enforced by dtrace_difo_validate(): 8566 * 8567 * 1. Each instruction must have a valid opcode 8568 * 2. Each register, string, variable, or subroutine reference must be valid 8569 * 3. No instruction can modify register %r0 (must be zero) 8570 * 4. All instruction reserved bits must be set to zero 8571 * 5. The last instruction must be a "ret" instruction 8572 * 6. All branch targets must reference a valid instruction _after_ the branch 8573 */ 8574static int 8575dtrace_difo_validate(dtrace_difo_t *dp, dtrace_vstate_t *vstate, uint_t nregs, 8576 cred_t *cr) 8577{ 8578 int err = 0, i; 8579 int (*efunc)(uint_t pc, const char *, ...) = dtrace_difo_err; 8580 int kcheckload; 8581 uint_t pc; 8582 8583 kcheckload = cr == NULL || 8584 (vstate->dtvs_state->dts_cred.dcr_visible & DTRACE_CRV_KERNEL) == 0; 8585 8586 dp->dtdo_destructive = 0; 8587 8588 for (pc = 0; pc < dp->dtdo_len && err == 0; pc++) { 8589 dif_instr_t instr = dp->dtdo_buf[pc]; 8590 8591 uint_t r1 = DIF_INSTR_R1(instr); 8592 uint_t r2 = DIF_INSTR_R2(instr); 8593 uint_t rd = DIF_INSTR_RD(instr); 8594 uint_t rs = DIF_INSTR_RS(instr); 8595 uint_t label = DIF_INSTR_LABEL(instr); 8596 uint_t v = DIF_INSTR_VAR(instr); 8597 uint_t subr = DIF_INSTR_SUBR(instr); 8598 uint_t type = DIF_INSTR_TYPE(instr); 8599 uint_t op = DIF_INSTR_OP(instr); 8600 8601 switch (op) { 8602 case DIF_OP_OR: 8603 case DIF_OP_XOR: 8604 case DIF_OP_AND: 8605 case DIF_OP_SLL: 8606 case DIF_OP_SRL: 8607 case DIF_OP_SRA: 8608 case DIF_OP_SUB: 8609 case DIF_OP_ADD: 8610 case DIF_OP_MUL: 8611 case DIF_OP_SDIV: 8612 case DIF_OP_UDIV: 8613 case DIF_OP_SREM: 8614 case DIF_OP_UREM: 8615 case DIF_OP_COPYS: 8616 if (r1 >= nregs) 8617 err += efunc(pc, "invalid register %u\n", r1); 8618 if (r2 >= nregs) 8619 err += efunc(pc, "invalid register %u\n", r2); 8620 if (rd >= nregs) 8621 err += efunc(pc, "invalid register %u\n", rd); 8622 if (rd == 0) 8623 err += efunc(pc, "cannot write to %r0\n"); 8624 break; 8625 case DIF_OP_NOT: 8626 case DIF_OP_MOV: 8627 case DIF_OP_ALLOCS: 8628 if (r1 >= nregs) 8629 err += efunc(pc, "invalid register %u\n", r1); 8630 if (r2 != 0) 8631 err += efunc(pc, "non-zero reserved bits\n"); 8632 if (rd >= nregs) 8633 err += efunc(pc, "invalid register %u\n", rd); 8634 if (rd == 0) 8635 err += efunc(pc, "cannot write to %r0\n"); 8636 break; 8637 case DIF_OP_LDSB: 8638 case DIF_OP_LDSH: 8639 case DIF_OP_LDSW: 8640 case DIF_OP_LDUB: 8641 case DIF_OP_LDUH: 8642 case DIF_OP_LDUW: 8643 case DIF_OP_LDX: 8644 if (r1 >= nregs) 8645 err += efunc(pc, "invalid register %u\n", r1); 8646 if (r2 != 0) 8647 err += efunc(pc, "non-zero reserved bits\n"); 8648 if (rd >= nregs) 8649 err += efunc(pc, "invalid register %u\n", rd); 8650 if (rd == 0) 8651 err += efunc(pc, "cannot write to %r0\n"); 8652 if (kcheckload) 8653 dp->dtdo_buf[pc] = DIF_INSTR_LOAD(op + 8654 DIF_OP_RLDSB - DIF_OP_LDSB, r1, rd); 8655 break; 8656 case DIF_OP_RLDSB: 8657 case DIF_OP_RLDSH: 8658 case DIF_OP_RLDSW: 8659 case DIF_OP_RLDUB: 8660 case DIF_OP_RLDUH: 8661 case DIF_OP_RLDUW: 8662 case DIF_OP_RLDX: 8663 if (r1 >= nregs) 8664 err += efunc(pc, "invalid register %u\n", r1); 8665 if (r2 != 0) 8666 err += efunc(pc, "non-zero reserved bits\n"); 8667 if (rd >= nregs) 8668 err += efunc(pc, "invalid register %u\n", rd); 8669 if (rd == 0) 8670 err += efunc(pc, "cannot write to %r0\n"); 8671 break; 8672 case DIF_OP_ULDSB: 8673 case DIF_OP_ULDSH: 8674 case DIF_OP_ULDSW: 8675 case DIF_OP_ULDUB: 8676 case DIF_OP_ULDUH: 8677 case DIF_OP_ULDUW: 8678 case DIF_OP_ULDX: 8679 if (r1 >= nregs) 8680 err += efunc(pc, "invalid register %u\n", r1); 8681 if (r2 != 0) 8682 err += efunc(pc, "non-zero reserved bits\n"); 8683 if (rd >= nregs) 8684 err += efunc(pc, "invalid register %u\n", rd); 8685 if (rd == 0) 8686 err += efunc(pc, "cannot write to %r0\n"); 8687 break; 8688 case DIF_OP_STB: 8689 case DIF_OP_STH: 8690 case DIF_OP_STW: 8691 case DIF_OP_STX: 8692 if (r1 >= nregs) 8693 err += efunc(pc, "invalid register %u\n", r1); 8694 if (r2 != 0) 8695 err += efunc(pc, "non-zero reserved bits\n"); 8696 if (rd >= nregs) 8697 err += efunc(pc, "invalid register %u\n", rd); 8698 if (rd == 0) 8699 err += efunc(pc, "cannot write to 0 address\n"); 8700 break; 8701 case DIF_OP_CMP: 8702 case DIF_OP_SCMP: 8703 if (r1 >= nregs) 8704 err += efunc(pc, "invalid register %u\n", r1); 8705 if (r2 >= nregs) 8706 err += efunc(pc, "invalid register %u\n", r2); 8707 if (rd != 0) 8708 err += efunc(pc, "non-zero reserved bits\n"); 8709 break; 8710 case DIF_OP_TST: 8711 if (r1 >= nregs) 8712 err += efunc(pc, "invalid register %u\n", r1); 8713 if (r2 != 0 || rd != 0) 8714 err += efunc(pc, "non-zero reserved bits\n"); 8715 break; 8716 case DIF_OP_BA: 8717 case DIF_OP_BE: 8718 case DIF_OP_BNE: 8719 case DIF_OP_BG: 8720 case DIF_OP_BGU: 8721 case DIF_OP_BGE: 8722 case DIF_OP_BGEU: 8723 case DIF_OP_BL: 8724 case DIF_OP_BLU: 8725 case DIF_OP_BLE: 8726 case DIF_OP_BLEU: 8727 if (label >= dp->dtdo_len) { 8728 err += efunc(pc, "invalid branch target %u\n", 8729 label); 8730 } 8731 if (label <= pc) { 8732 err += efunc(pc, "backward branch to %u\n", 8733 label); 8734 } 8735 break; 8736 case DIF_OP_RET: 8737 if (r1 != 0 || r2 != 0) 8738 err += efunc(pc, "non-zero reserved bits\n"); 8739 if (rd >= nregs) 8740 err += efunc(pc, "invalid register %u\n", rd); 8741 break; 8742 case DIF_OP_NOP: 8743 case DIF_OP_POPTS: 8744 case DIF_OP_FLUSHTS: 8745 if (r1 != 0 || r2 != 0 || rd != 0) 8746 err += efunc(pc, "non-zero reserved bits\n"); 8747 break; 8748 case DIF_OP_SETX: 8749 if (DIF_INSTR_INTEGER(instr) >= dp->dtdo_intlen) { 8750 err += efunc(pc, "invalid integer ref %u\n", 8751 DIF_INSTR_INTEGER(instr)); 8752 } 8753 if (rd >= nregs) 8754 err += efunc(pc, "invalid register %u\n", rd); 8755 if (rd == 0) 8756 err += efunc(pc, "cannot write to %r0\n"); 8757 break; 8758 case DIF_OP_SETS: 8759 if (DIF_INSTR_STRING(instr) >= dp->dtdo_strlen) { 8760 err += efunc(pc, "invalid string ref %u\n", 8761 DIF_INSTR_STRING(instr)); 8762 } 8763 if (rd >= nregs) 8764 err += efunc(pc, "invalid register %u\n", rd); 8765 if (rd == 0) 8766 err += efunc(pc, "cannot write to %r0\n"); 8767 break; 8768 case DIF_OP_LDGA: 8769 case DIF_OP_LDTA: 8770 if (r1 > DIF_VAR_ARRAY_MAX) 8771 err += efunc(pc, "invalid array %u\n", r1); 8772 if (r2 >= nregs) 8773 err += efunc(pc, "invalid register %u\n", r2); 8774 if (rd >= nregs) 8775 err += efunc(pc, "invalid register %u\n", rd); 8776 if (rd == 0) 8777 err += efunc(pc, "cannot write to %r0\n"); 8778 break; 8779 case DIF_OP_LDGS: 8780 case DIF_OP_LDTS: 8781 case DIF_OP_LDLS: 8782 case DIF_OP_LDGAA: 8783 case DIF_OP_LDTAA: 8784 if (v < DIF_VAR_OTHER_MIN || v > DIF_VAR_OTHER_MAX) 8785 err += efunc(pc, "invalid variable %u\n", v); 8786 if (rd >= nregs) 8787 err += efunc(pc, "invalid register %u\n", rd); 8788 if (rd == 0) 8789 err += efunc(pc, "cannot write to %r0\n"); 8790 break; 8791 case DIF_OP_STGS: 8792 case DIF_OP_STTS: 8793 case DIF_OP_STLS: 8794 case DIF_OP_STGAA: 8795 case DIF_OP_STTAA: 8796 if (v < DIF_VAR_OTHER_UBASE || v > DIF_VAR_OTHER_MAX) 8797 err += efunc(pc, "invalid variable %u\n", v); 8798 if (rs >= nregs) 8799 err += efunc(pc, "invalid register %u\n", rd); 8800 break; 8801 case DIF_OP_CALL: 8802 if (subr > DIF_SUBR_MAX) 8803 err += efunc(pc, "invalid subr %u\n", subr); 8804 if (rd >= nregs) 8805 err += efunc(pc, "invalid register %u\n", rd); 8806 if (rd == 0) 8807 err += efunc(pc, "cannot write to %r0\n"); 8808 8809 if (subr == DIF_SUBR_COPYOUT || 8810 subr == DIF_SUBR_COPYOUTSTR) { 8811 dp->dtdo_destructive = 1; 8812 } 8813 break; 8814 case DIF_OP_PUSHTR: 8815 if (type != DIF_TYPE_STRING && type != DIF_TYPE_CTF) 8816 err += efunc(pc, "invalid ref type %u\n", type); 8817 if (r2 >= nregs) 8818 err += efunc(pc, "invalid register %u\n", r2); 8819 if (rs >= nregs) 8820 err += efunc(pc, "invalid register %u\n", rs); 8821 break; 8822 case DIF_OP_PUSHTV: 8823 if (type != DIF_TYPE_CTF) 8824 err += efunc(pc, "invalid val type %u\n", type); 8825 if (r2 >= nregs) 8826 err += efunc(pc, "invalid register %u\n", r2); 8827 if (rs >= nregs) 8828 err += efunc(pc, "invalid register %u\n", rs); 8829 break; 8830 default: 8831 err += efunc(pc, "invalid opcode %u\n", 8832 DIF_INSTR_OP(instr)); 8833 } 8834 } 8835 8836 if (dp->dtdo_len != 0 && 8837 DIF_INSTR_OP(dp->dtdo_buf[dp->dtdo_len - 1]) != DIF_OP_RET) { 8838 err += efunc(dp->dtdo_len - 1, 8839 "expected 'ret' as last DIF instruction\n"); 8840 } 8841 8842 if (!(dp->dtdo_rtype.dtdt_flags & DIF_TF_BYREF)) { 8843 /* 8844 * If we're not returning by reference, the size must be either 8845 * 0 or the size of one of the base types. 8846 */ 8847 switch (dp->dtdo_rtype.dtdt_size) { 8848 case 0: 8849 case sizeof (uint8_t): 8850 case sizeof (uint16_t): 8851 case sizeof (uint32_t): 8852 case sizeof (uint64_t): 8853 break; 8854 8855 default: 8856 err += efunc(dp->dtdo_len - 1, "bad return size"); 8857 } 8858 } 8859 8860 for (i = 0; i < dp->dtdo_varlen && err == 0; i++) { 8861 dtrace_difv_t *v = &dp->dtdo_vartab[i], *existing = NULL; 8862 dtrace_diftype_t *vt, *et; 8863 uint_t id, ndx; 8864 8865 if (v->dtdv_scope != DIFV_SCOPE_GLOBAL && 8866 v->dtdv_scope != DIFV_SCOPE_THREAD && 8867 v->dtdv_scope != DIFV_SCOPE_LOCAL) { 8868 err += efunc(i, "unrecognized variable scope %d\n", 8869 v->dtdv_scope); 8870 break; 8871 } 8872 8873 if (v->dtdv_kind != DIFV_KIND_ARRAY && 8874 v->dtdv_kind != DIFV_KIND_SCALAR) { 8875 err += efunc(i, "unrecognized variable type %d\n", 8876 v->dtdv_kind); 8877 break; 8878 } 8879 8880 if ((id = v->dtdv_id) > DIF_VARIABLE_MAX) { 8881 err += efunc(i, "%d exceeds variable id limit\n", id); 8882 break; 8883 } 8884 8885 if (id < DIF_VAR_OTHER_UBASE) 8886 continue; 8887 8888 /* 8889 * For user-defined variables, we need to check that this 8890 * definition is identical to any previous definition that we 8891 * encountered. 8892 */ 8893 ndx = id - DIF_VAR_OTHER_UBASE; 8894 8895 switch (v->dtdv_scope) { 8896 case DIFV_SCOPE_GLOBAL: 8897 if (ndx < vstate->dtvs_nglobals) { 8898 dtrace_statvar_t *svar; 8899 8900 if ((svar = vstate->dtvs_globals[ndx]) != NULL) 8901 existing = &svar->dtsv_var; 8902 } 8903 8904 break; 8905 8906 case DIFV_SCOPE_THREAD: 8907 if (ndx < vstate->dtvs_ntlocals) 8908 existing = &vstate->dtvs_tlocals[ndx]; 8909 break; 8910 8911 case DIFV_SCOPE_LOCAL: 8912 if (ndx < vstate->dtvs_nlocals) { 8913 dtrace_statvar_t *svar; 8914 8915 if ((svar = vstate->dtvs_locals[ndx]) != NULL) 8916 existing = &svar->dtsv_var; 8917 } 8918 8919 break; 8920 } 8921 8922 vt = &v->dtdv_type; 8923 8924 if (vt->dtdt_flags & DIF_TF_BYREF) { 8925 if (vt->dtdt_size == 0) { 8926 err += efunc(i, "zero-sized variable\n"); 8927 break; 8928 } 8929 8930 if (v->dtdv_scope == DIFV_SCOPE_GLOBAL && 8931 vt->dtdt_size > dtrace_global_maxsize) { 8932 err += efunc(i, "oversized by-ref global\n"); 8933 break; 8934 } 8935 } 8936 8937 if (existing == NULL || existing->dtdv_id == 0) 8938 continue; 8939 8940 ASSERT(existing->dtdv_id == v->dtdv_id); 8941 ASSERT(existing->dtdv_scope == v->dtdv_scope); 8942 8943 if (existing->dtdv_kind != v->dtdv_kind) 8944 err += efunc(i, "%d changed variable kind\n", id); 8945 8946 et = &existing->dtdv_type; 8947 8948 if (vt->dtdt_flags != et->dtdt_flags) { 8949 err += efunc(i, "%d changed variable type flags\n", id); 8950 break; 8951 } 8952 8953 if (vt->dtdt_size != 0 && vt->dtdt_size != et->dtdt_size) { 8954 err += efunc(i, "%d changed variable type size\n", id); 8955 break; 8956 } 8957 } 8958 8959 return (err); 8960} 8961 8962/* 8963 * Validate a DTrace DIF object that it is to be used as a helper. Helpers 8964 * are much more constrained than normal DIFOs. Specifically, they may 8965 * not: 8966 * 8967 * 1. Make calls to subroutines other than copyin(), copyinstr() or 8968 * miscellaneous string routines 8969 * 2. Access DTrace variables other than the args[] array, and the 8970 * curthread, pid, ppid, tid, execname, zonename, uid and gid variables. 8971 * 3. Have thread-local variables. 8972 * 4. Have dynamic variables. 8973 */ 8974static int 8975dtrace_difo_validate_helper(dtrace_difo_t *dp) 8976{ 8977 int (*efunc)(uint_t pc, const char *, ...) = dtrace_difo_err; 8978 int err = 0; 8979 uint_t pc; 8980 8981 for (pc = 0; pc < dp->dtdo_len; pc++) { 8982 dif_instr_t instr = dp->dtdo_buf[pc]; 8983 8984 uint_t v = DIF_INSTR_VAR(instr); 8985 uint_t subr = DIF_INSTR_SUBR(instr); 8986 uint_t op = DIF_INSTR_OP(instr); 8987 8988 switch (op) { 8989 case DIF_OP_OR: 8990 case DIF_OP_XOR: 8991 case DIF_OP_AND: 8992 case DIF_OP_SLL: 8993 case DIF_OP_SRL: 8994 case DIF_OP_SRA: 8995 case DIF_OP_SUB: 8996 case DIF_OP_ADD: 8997 case DIF_OP_MUL: 8998 case DIF_OP_SDIV: 8999 case DIF_OP_UDIV: 9000 case DIF_OP_SREM: 9001 case DIF_OP_UREM: 9002 case DIF_OP_COPYS: 9003 case DIF_OP_NOT: 9004 case DIF_OP_MOV: 9005 case DIF_OP_RLDSB: 9006 case DIF_OP_RLDSH: 9007 case DIF_OP_RLDSW: 9008 case DIF_OP_RLDUB: 9009 case DIF_OP_RLDUH: 9010 case DIF_OP_RLDUW: 9011 case DIF_OP_RLDX: 9012 case DIF_OP_ULDSB: 9013 case DIF_OP_ULDSH: 9014 case DIF_OP_ULDSW: 9015 case DIF_OP_ULDUB: 9016 case DIF_OP_ULDUH: 9017 case DIF_OP_ULDUW: 9018 case DIF_OP_ULDX: 9019 case DIF_OP_STB: 9020 case DIF_OP_STH: 9021 case DIF_OP_STW: 9022 case DIF_OP_STX: 9023 case DIF_OP_ALLOCS: 9024 case DIF_OP_CMP: 9025 case DIF_OP_SCMP: 9026 case DIF_OP_TST: 9027 case DIF_OP_BA: 9028 case DIF_OP_BE: 9029 case DIF_OP_BNE: 9030 case DIF_OP_BG: 9031 case DIF_OP_BGU: 9032 case DIF_OP_BGE: 9033 case DIF_OP_BGEU: 9034 case DIF_OP_BL: 9035 case DIF_OP_BLU: 9036 case DIF_OP_BLE: 9037 case DIF_OP_BLEU: 9038 case DIF_OP_RET: 9039 case DIF_OP_NOP: 9040 case DIF_OP_POPTS: 9041 case DIF_OP_FLUSHTS: 9042 case DIF_OP_SETX: 9043 case DIF_OP_SETS: 9044 case DIF_OP_LDGA: 9045 case DIF_OP_LDLS: 9046 case DIF_OP_STGS: 9047 case DIF_OP_STLS: 9048 case DIF_OP_PUSHTR: 9049 case DIF_OP_PUSHTV: 9050 break; 9051 9052 case DIF_OP_LDGS: 9053 if (v >= DIF_VAR_OTHER_UBASE) 9054 break; 9055 9056 if (v >= DIF_VAR_ARG0 && v <= DIF_VAR_ARG9) 9057 break; 9058 9059 if (v == DIF_VAR_CURTHREAD || v == DIF_VAR_PID || 9060 v == DIF_VAR_PPID || v == DIF_VAR_TID || 9061 v == DIF_VAR_EXECARGS || 9062 v == DIF_VAR_EXECNAME || v == DIF_VAR_ZONENAME || 9063 v == DIF_VAR_UID || v == DIF_VAR_GID) 9064 break; 9065 9066 err += efunc(pc, "illegal variable %u\n", v); 9067 break; 9068 9069 case DIF_OP_LDTA: 9070 case DIF_OP_LDTS: 9071 case DIF_OP_LDGAA: 9072 case DIF_OP_LDTAA: 9073 err += efunc(pc, "illegal dynamic variable load\n"); 9074 break; 9075 9076 case DIF_OP_STTS: 9077 case DIF_OP_STGAA: 9078 case DIF_OP_STTAA: 9079 err += efunc(pc, "illegal dynamic variable store\n"); 9080 break; 9081 9082 case DIF_OP_CALL: 9083 if (subr == DIF_SUBR_ALLOCA || 9084 subr == DIF_SUBR_BCOPY || 9085 subr == DIF_SUBR_COPYIN || 9086 subr == DIF_SUBR_COPYINTO || 9087 subr == DIF_SUBR_COPYINSTR || 9088 subr == DIF_SUBR_INDEX || 9089 subr == DIF_SUBR_INET_NTOA || 9090 subr == DIF_SUBR_INET_NTOA6 || 9091 subr == DIF_SUBR_INET_NTOP || 9092 subr == DIF_SUBR_LLTOSTR || 9093 subr == DIF_SUBR_RINDEX || 9094 subr == DIF_SUBR_STRCHR || 9095 subr == DIF_SUBR_STRJOIN || 9096 subr == DIF_SUBR_STRRCHR || 9097 subr == DIF_SUBR_STRSTR || 9098 subr == DIF_SUBR_HTONS || 9099 subr == DIF_SUBR_HTONL || 9100 subr == DIF_SUBR_HTONLL || 9101 subr == DIF_SUBR_NTOHS || 9102 subr == DIF_SUBR_NTOHL || 9103 subr == DIF_SUBR_NTOHLL || 9104 subr == DIF_SUBR_MEMREF || 9105 subr == DIF_SUBR_TYPEREF) 9106 break; 9107 9108 err += efunc(pc, "invalid subr %u\n", subr); 9109 break; 9110 9111 default: 9112 err += efunc(pc, "invalid opcode %u\n", 9113 DIF_INSTR_OP(instr)); 9114 } 9115 } 9116 9117 return (err); 9118} 9119 9120/* 9121 * Returns 1 if the expression in the DIF object can be cached on a per-thread 9122 * basis; 0 if not. 9123 */ 9124static int 9125dtrace_difo_cacheable(dtrace_difo_t *dp) 9126{ 9127 int i; 9128 9129 if (dp == NULL) 9130 return (0); 9131 9132 for (i = 0; i < dp->dtdo_varlen; i++) { 9133 dtrace_difv_t *v = &dp->dtdo_vartab[i]; 9134 9135 if (v->dtdv_scope != DIFV_SCOPE_GLOBAL) 9136 continue; 9137 9138 switch (v->dtdv_id) { 9139 case DIF_VAR_CURTHREAD: 9140 case DIF_VAR_PID: 9141 case DIF_VAR_TID: 9142 case DIF_VAR_EXECARGS: 9143 case DIF_VAR_EXECNAME: 9144 case DIF_VAR_ZONENAME: 9145 break; 9146 9147 default: 9148 return (0); 9149 } 9150 } 9151 9152 /* 9153 * This DIF object may be cacheable. Now we need to look for any 9154 * array loading instructions, any memory loading instructions, or 9155 * any stores to thread-local variables. 9156 */ 9157 for (i = 0; i < dp->dtdo_len; i++) { 9158 uint_t op = DIF_INSTR_OP(dp->dtdo_buf[i]); 9159 9160 if ((op >= DIF_OP_LDSB && op <= DIF_OP_LDX) || 9161 (op >= DIF_OP_ULDSB && op <= DIF_OP_ULDX) || 9162 (op >= DIF_OP_RLDSB && op <= DIF_OP_RLDX) || 9163 op == DIF_OP_LDGA || op == DIF_OP_STTS) 9164 return (0); 9165 } 9166 9167 return (1); 9168} 9169 9170static void 9171dtrace_difo_hold(dtrace_difo_t *dp) 9172{ 9173 int i; 9174 9175 ASSERT(MUTEX_HELD(&dtrace_lock)); 9176 9177 dp->dtdo_refcnt++; 9178 ASSERT(dp->dtdo_refcnt != 0); 9179 9180 /* 9181 * We need to check this DIF object for references to the variable 9182 * DIF_VAR_VTIMESTAMP. 9183 */ 9184 for (i = 0; i < dp->dtdo_varlen; i++) { 9185 dtrace_difv_t *v = &dp->dtdo_vartab[i]; 9186 9187 if (v->dtdv_id != DIF_VAR_VTIMESTAMP) 9188 continue; 9189 9190 if (dtrace_vtime_references++ == 0) 9191 dtrace_vtime_enable(); 9192 } 9193} 9194 9195/* 9196 * This routine calculates the dynamic variable chunksize for a given DIF 9197 * object. The calculation is not fool-proof, and can probably be tricked by 9198 * malicious DIF -- but it works for all compiler-generated DIF. Because this 9199 * calculation is likely imperfect, dtrace_dynvar() is able to gracefully fail 9200 * if a dynamic variable size exceeds the chunksize. 9201 */ 9202static void 9203dtrace_difo_chunksize(dtrace_difo_t *dp, dtrace_vstate_t *vstate) 9204{ 9205 uint64_t sval = 0; 9206 dtrace_key_t tupregs[DIF_DTR_NREGS + 2]; /* +2 for thread and id */ 9207 const dif_instr_t *text = dp->dtdo_buf; 9208 uint_t pc, srd = 0; 9209 uint_t ttop = 0; 9210 size_t size, ksize; 9211 uint_t id, i; 9212 9213 for (pc = 0; pc < dp->dtdo_len; pc++) { 9214 dif_instr_t instr = text[pc]; 9215 uint_t op = DIF_INSTR_OP(instr); 9216 uint_t rd = DIF_INSTR_RD(instr); 9217 uint_t r1 = DIF_INSTR_R1(instr); 9218 uint_t nkeys = 0; 9219 uchar_t scope = 0; 9220 9221 dtrace_key_t *key = tupregs; 9222 9223 switch (op) { 9224 case DIF_OP_SETX: 9225 sval = dp->dtdo_inttab[DIF_INSTR_INTEGER(instr)]; 9226 srd = rd; 9227 continue; 9228 9229 case DIF_OP_STTS: 9230 key = &tupregs[DIF_DTR_NREGS]; 9231 key[0].dttk_size = 0; 9232 key[1].dttk_size = 0; 9233 nkeys = 2; 9234 scope = DIFV_SCOPE_THREAD; 9235 break; 9236 9237 case DIF_OP_STGAA: 9238 case DIF_OP_STTAA: 9239 nkeys = ttop; 9240 9241 if (DIF_INSTR_OP(instr) == DIF_OP_STTAA) 9242 key[nkeys++].dttk_size = 0; 9243 9244 key[nkeys++].dttk_size = 0; 9245 9246 if (op == DIF_OP_STTAA) { 9247 scope = DIFV_SCOPE_THREAD; 9248 } else { 9249 scope = DIFV_SCOPE_GLOBAL; 9250 } 9251 9252 break; 9253 9254 case DIF_OP_PUSHTR: 9255 if (ttop == DIF_DTR_NREGS) 9256 return; 9257 9258 if ((srd == 0 || sval == 0) && r1 == DIF_TYPE_STRING) { 9259 /* 9260 * If the register for the size of the "pushtr" 9261 * is %r0 (or the value is 0) and the type is 9262 * a string, we'll use the system-wide default 9263 * string size. 9264 */ 9265 tupregs[ttop++].dttk_size = 9266 dtrace_strsize_default; 9267 } else { 9268 if (srd == 0) 9269 return; 9270 9271 tupregs[ttop++].dttk_size = sval; 9272 } 9273 9274 break; 9275 9276 case DIF_OP_PUSHTV: 9277 if (ttop == DIF_DTR_NREGS) 9278 return; 9279 9280 tupregs[ttop++].dttk_size = 0; 9281 break; 9282 9283 case DIF_OP_FLUSHTS: 9284 ttop = 0; 9285 break; 9286 9287 case DIF_OP_POPTS: 9288 if (ttop != 0) 9289 ttop--; 9290 break; 9291 } 9292 9293 sval = 0; 9294 srd = 0; 9295 9296 if (nkeys == 0) 9297 continue; 9298 9299 /* 9300 * We have a dynamic variable allocation; calculate its size. 9301 */ 9302 for (ksize = 0, i = 0; i < nkeys; i++) 9303 ksize += P2ROUNDUP(key[i].dttk_size, sizeof (uint64_t)); 9304 9305 size = sizeof (dtrace_dynvar_t); 9306 size += sizeof (dtrace_key_t) * (nkeys - 1); 9307 size += ksize; 9308 9309 /* 9310 * Now we need to determine the size of the stored data. 9311 */ 9312 id = DIF_INSTR_VAR(instr); 9313 9314 for (i = 0; i < dp->dtdo_varlen; i++) { 9315 dtrace_difv_t *v = &dp->dtdo_vartab[i]; 9316 9317 if (v->dtdv_id == id && v->dtdv_scope == scope) { 9318 size += v->dtdv_type.dtdt_size; 9319 break; 9320 } 9321 } 9322 9323 if (i == dp->dtdo_varlen) 9324 return; 9325 9326 /* 9327 * We have the size. If this is larger than the chunk size 9328 * for our dynamic variable state, reset the chunk size. 9329 */ 9330 size = P2ROUNDUP(size, sizeof (uint64_t)); 9331 9332 if (size > vstate->dtvs_dynvars.dtds_chunksize) 9333 vstate->dtvs_dynvars.dtds_chunksize = size; 9334 } 9335} 9336 9337static void 9338dtrace_difo_init(dtrace_difo_t *dp, dtrace_vstate_t *vstate) 9339{ 9340 int i, oldsvars, osz, nsz, otlocals, ntlocals; 9341 uint_t id; 9342 9343 ASSERT(MUTEX_HELD(&dtrace_lock)); 9344 ASSERT(dp->dtdo_buf != NULL && dp->dtdo_len != 0); 9345 9346 for (i = 0; i < dp->dtdo_varlen; i++) { 9347 dtrace_difv_t *v = &dp->dtdo_vartab[i]; 9348 dtrace_statvar_t *svar, ***svarp = NULL; 9349 size_t dsize = 0; 9350 uint8_t scope = v->dtdv_scope; 9351 int *np = NULL; 9352 9353 if ((id = v->dtdv_id) < DIF_VAR_OTHER_UBASE) 9354 continue; 9355 9356 id -= DIF_VAR_OTHER_UBASE; 9357 9358 switch (scope) { 9359 case DIFV_SCOPE_THREAD: 9360 while (id >= (otlocals = vstate->dtvs_ntlocals)) { 9361 dtrace_difv_t *tlocals; 9362 9363 if ((ntlocals = (otlocals << 1)) == 0) 9364 ntlocals = 1; 9365 9366 osz = otlocals * sizeof (dtrace_difv_t); 9367 nsz = ntlocals * sizeof (dtrace_difv_t); 9368 9369 tlocals = kmem_zalloc(nsz, KM_SLEEP); 9370 9371 if (osz != 0) { 9372 bcopy(vstate->dtvs_tlocals, 9373 tlocals, osz); 9374 kmem_free(vstate->dtvs_tlocals, osz); 9375 } 9376 9377 vstate->dtvs_tlocals = tlocals; 9378 vstate->dtvs_ntlocals = ntlocals; 9379 } 9380 9381 vstate->dtvs_tlocals[id] = *v; 9382 continue; 9383 9384 case DIFV_SCOPE_LOCAL: 9385 np = &vstate->dtvs_nlocals; 9386 svarp = &vstate->dtvs_locals; 9387 9388 if (v->dtdv_type.dtdt_flags & DIF_TF_BYREF) 9389 dsize = NCPU * (v->dtdv_type.dtdt_size + 9390 sizeof (uint64_t)); 9391 else 9392 dsize = NCPU * sizeof (uint64_t); 9393 9394 break; 9395 9396 case DIFV_SCOPE_GLOBAL: 9397 np = &vstate->dtvs_nglobals; 9398 svarp = &vstate->dtvs_globals; 9399 9400 if (v->dtdv_type.dtdt_flags & DIF_TF_BYREF) 9401 dsize = v->dtdv_type.dtdt_size + 9402 sizeof (uint64_t); 9403 9404 break; 9405 9406 default: 9407 ASSERT(0); 9408 } 9409 9410 while (id >= (oldsvars = *np)) { 9411 dtrace_statvar_t **statics; 9412 int newsvars, oldsize, newsize; 9413 9414 if ((newsvars = (oldsvars << 1)) == 0) 9415 newsvars = 1; 9416 9417 oldsize = oldsvars * sizeof (dtrace_statvar_t *); 9418 newsize = newsvars * sizeof (dtrace_statvar_t *); 9419 9420 statics = kmem_zalloc(newsize, KM_SLEEP); 9421 9422 if (oldsize != 0) { 9423 bcopy(*svarp, statics, oldsize); 9424 kmem_free(*svarp, oldsize); 9425 } 9426 9427 *svarp = statics; 9428 *np = newsvars; 9429 } 9430 9431 if ((svar = (*svarp)[id]) == NULL) { 9432 svar = kmem_zalloc(sizeof (dtrace_statvar_t), KM_SLEEP); 9433 svar->dtsv_var = *v; 9434 9435 if ((svar->dtsv_size = dsize) != 0) { 9436 svar->dtsv_data = (uint64_t)(uintptr_t) 9437 kmem_zalloc(dsize, KM_SLEEP); 9438 } 9439 9440 (*svarp)[id] = svar; 9441 } 9442 9443 svar->dtsv_refcnt++; 9444 } 9445 9446 dtrace_difo_chunksize(dp, vstate); 9447 dtrace_difo_hold(dp); 9448} 9449 9450static dtrace_difo_t * 9451dtrace_difo_duplicate(dtrace_difo_t *dp, dtrace_vstate_t *vstate) 9452{ 9453 dtrace_difo_t *new; 9454 size_t sz; 9455 9456 ASSERT(dp->dtdo_buf != NULL); 9457 ASSERT(dp->dtdo_refcnt != 0); 9458 9459 new = kmem_zalloc(sizeof (dtrace_difo_t), KM_SLEEP); 9460 9461 ASSERT(dp->dtdo_buf != NULL); 9462 sz = dp->dtdo_len * sizeof (dif_instr_t); 9463 new->dtdo_buf = kmem_alloc(sz, KM_SLEEP); 9464 bcopy(dp->dtdo_buf, new->dtdo_buf, sz); 9465 new->dtdo_len = dp->dtdo_len; 9466 9467 if (dp->dtdo_strtab != NULL) { 9468 ASSERT(dp->dtdo_strlen != 0); 9469 new->dtdo_strtab = kmem_alloc(dp->dtdo_strlen, KM_SLEEP); 9470 bcopy(dp->dtdo_strtab, new->dtdo_strtab, dp->dtdo_strlen); 9471 new->dtdo_strlen = dp->dtdo_strlen; 9472 } 9473 9474 if (dp->dtdo_inttab != NULL) { 9475 ASSERT(dp->dtdo_intlen != 0); 9476 sz = dp->dtdo_intlen * sizeof (uint64_t); 9477 new->dtdo_inttab = kmem_alloc(sz, KM_SLEEP); 9478 bcopy(dp->dtdo_inttab, new->dtdo_inttab, sz); 9479 new->dtdo_intlen = dp->dtdo_intlen; 9480 } 9481 9482 if (dp->dtdo_vartab != NULL) { 9483 ASSERT(dp->dtdo_varlen != 0); 9484 sz = dp->dtdo_varlen * sizeof (dtrace_difv_t); 9485 new->dtdo_vartab = kmem_alloc(sz, KM_SLEEP); 9486 bcopy(dp->dtdo_vartab, new->dtdo_vartab, sz); 9487 new->dtdo_varlen = dp->dtdo_varlen; 9488 } 9489 9490 dtrace_difo_init(new, vstate); 9491 return (new); 9492} 9493 9494static void 9495dtrace_difo_destroy(dtrace_difo_t *dp, dtrace_vstate_t *vstate) 9496{ 9497 int i; 9498 9499 ASSERT(dp->dtdo_refcnt == 0); 9500 9501 for (i = 0; i < dp->dtdo_varlen; i++) { 9502 dtrace_difv_t *v = &dp->dtdo_vartab[i]; 9503 dtrace_statvar_t *svar, **svarp = NULL; 9504 uint_t id; 9505 uint8_t scope = v->dtdv_scope; 9506 int *np = NULL; 9507 9508 switch (scope) { 9509 case DIFV_SCOPE_THREAD: 9510 continue; 9511 9512 case DIFV_SCOPE_LOCAL: 9513 np = &vstate->dtvs_nlocals; 9514 svarp = vstate->dtvs_locals; 9515 break; 9516 9517 case DIFV_SCOPE_GLOBAL: 9518 np = &vstate->dtvs_nglobals; 9519 svarp = vstate->dtvs_globals; 9520 break; 9521 9522 default: 9523 ASSERT(0); 9524 } 9525 9526 if ((id = v->dtdv_id) < DIF_VAR_OTHER_UBASE) 9527 continue; 9528 9529 id -= DIF_VAR_OTHER_UBASE; 9530 ASSERT(id < *np); 9531 9532 svar = svarp[id]; 9533 ASSERT(svar != NULL); 9534 ASSERT(svar->dtsv_refcnt > 0); 9535 9536 if (--svar->dtsv_refcnt > 0) 9537 continue; 9538 9539 if (svar->dtsv_size != 0) { 9540 ASSERT(svar->dtsv_data != 0); 9541 kmem_free((void *)(uintptr_t)svar->dtsv_data, 9542 svar->dtsv_size); 9543 } 9544 9545 kmem_free(svar, sizeof (dtrace_statvar_t)); 9546 svarp[id] = NULL; 9547 } 9548 9549 if (dp->dtdo_buf != NULL) 9550 kmem_free(dp->dtdo_buf, dp->dtdo_len * sizeof (dif_instr_t)); 9551 if (dp->dtdo_inttab != NULL) 9552 kmem_free(dp->dtdo_inttab, dp->dtdo_intlen * sizeof (uint64_t)); 9553 if (dp->dtdo_strtab != NULL) 9554 kmem_free(dp->dtdo_strtab, dp->dtdo_strlen); 9555 if (dp->dtdo_vartab != NULL) 9556 kmem_free(dp->dtdo_vartab, dp->dtdo_varlen * sizeof (dtrace_difv_t)); 9557 9558 kmem_free(dp, sizeof (dtrace_difo_t)); 9559} 9560 9561static void 9562dtrace_difo_release(dtrace_difo_t *dp, dtrace_vstate_t *vstate) 9563{ 9564 int i; 9565 9566 ASSERT(MUTEX_HELD(&dtrace_lock)); 9567 ASSERT(dp->dtdo_refcnt != 0); 9568 9569 for (i = 0; i < dp->dtdo_varlen; i++) { 9570 dtrace_difv_t *v = &dp->dtdo_vartab[i]; 9571 9572 if (v->dtdv_id != DIF_VAR_VTIMESTAMP) 9573 continue; 9574 9575 ASSERT(dtrace_vtime_references > 0); 9576 if (--dtrace_vtime_references == 0) 9577 dtrace_vtime_disable(); 9578 } 9579 9580 if (--dp->dtdo_refcnt == 0) 9581 dtrace_difo_destroy(dp, vstate); 9582} 9583 9584/* 9585 * DTrace Format Functions 9586 */ 9587static uint16_t 9588dtrace_format_add(dtrace_state_t *state, char *str) 9589{ 9590 char *fmt, **new; 9591 uint16_t ndx, len = strlen(str) + 1; 9592 9593 fmt = kmem_zalloc(len, KM_SLEEP); 9594 bcopy(str, fmt, len); 9595 9596 for (ndx = 0; ndx < state->dts_nformats; ndx++) { 9597 if (state->dts_formats[ndx] == NULL) { 9598 state->dts_formats[ndx] = fmt; 9599 return (ndx + 1); 9600 } 9601 } 9602 9603 if (state->dts_nformats == USHRT_MAX) { 9604 /* 9605 * This is only likely if a denial-of-service attack is being 9606 * attempted. As such, it's okay to fail silently here. 9607 */ 9608 kmem_free(fmt, len); 9609 return (0); 9610 } 9611 9612 /* 9613 * For simplicity, we always resize the formats array to be exactly the 9614 * number of formats. 9615 */ 9616 ndx = state->dts_nformats++; 9617 new = kmem_alloc((ndx + 1) * sizeof (char *), KM_SLEEP); 9618 9619 if (state->dts_formats != NULL) { 9620 ASSERT(ndx != 0); 9621 bcopy(state->dts_formats, new, ndx * sizeof (char *)); 9622 kmem_free(state->dts_formats, ndx * sizeof (char *)); 9623 } 9624 9625 state->dts_formats = new; 9626 state->dts_formats[ndx] = fmt; 9627 9628 return (ndx + 1); 9629} 9630 9631static void 9632dtrace_format_remove(dtrace_state_t *state, uint16_t format) 9633{ 9634 char *fmt; 9635 9636 ASSERT(state->dts_formats != NULL); 9637 ASSERT(format <= state->dts_nformats); 9638 ASSERT(state->dts_formats[format - 1] != NULL); 9639 9640 fmt = state->dts_formats[format - 1]; 9641 kmem_free(fmt, strlen(fmt) + 1); 9642 state->dts_formats[format - 1] = NULL; 9643} 9644 9645static void 9646dtrace_format_destroy(dtrace_state_t *state) 9647{ 9648 int i; 9649 9650 if (state->dts_nformats == 0) { 9651 ASSERT(state->dts_formats == NULL); 9652 return; 9653 } 9654 9655 ASSERT(state->dts_formats != NULL); 9656 9657 for (i = 0; i < state->dts_nformats; i++) { 9658 char *fmt = state->dts_formats[i]; 9659 9660 if (fmt == NULL) 9661 continue; 9662 9663 kmem_free(fmt, strlen(fmt) + 1); 9664 } 9665 9666 kmem_free(state->dts_formats, state->dts_nformats * sizeof (char *)); 9667 state->dts_nformats = 0; 9668 state->dts_formats = NULL; 9669} 9670 9671/* 9672 * DTrace Predicate Functions 9673 */ 9674static dtrace_predicate_t * 9675dtrace_predicate_create(dtrace_difo_t *dp) 9676{ 9677 dtrace_predicate_t *pred; 9678 9679 ASSERT(MUTEX_HELD(&dtrace_lock)); 9680 ASSERT(dp->dtdo_refcnt != 0); 9681 9682 pred = kmem_zalloc(sizeof (dtrace_predicate_t), KM_SLEEP); 9683 pred->dtp_difo = dp; 9684 pred->dtp_refcnt = 1; 9685 9686 if (!dtrace_difo_cacheable(dp)) 9687 return (pred); 9688 9689 if (dtrace_predcache_id == DTRACE_CACHEIDNONE) { 9690 /* 9691 * This is only theoretically possible -- we have had 2^32 9692 * cacheable predicates on this machine. We cannot allow any 9693 * more predicates to become cacheable: as unlikely as it is, 9694 * there may be a thread caching a (now stale) predicate cache 9695 * ID. (N.B.: the temptation is being successfully resisted to 9696 * have this cmn_err() "Holy shit -- we executed this code!") 9697 */ 9698 return (pred); 9699 } 9700 9701 pred->dtp_cacheid = dtrace_predcache_id++; 9702 9703 return (pred); 9704} 9705 9706static void 9707dtrace_predicate_hold(dtrace_predicate_t *pred) 9708{ 9709 ASSERT(MUTEX_HELD(&dtrace_lock)); 9710 ASSERT(pred->dtp_difo != NULL && pred->dtp_difo->dtdo_refcnt != 0); 9711 ASSERT(pred->dtp_refcnt > 0); 9712 9713 pred->dtp_refcnt++; 9714} 9715 9716static void 9717dtrace_predicate_release(dtrace_predicate_t *pred, dtrace_vstate_t *vstate) 9718{ 9719 dtrace_difo_t *dp = pred->dtp_difo; 9720 9721 ASSERT(MUTEX_HELD(&dtrace_lock)); 9722 ASSERT(dp != NULL && dp->dtdo_refcnt != 0); 9723 ASSERT(pred->dtp_refcnt > 0); 9724 9725 if (--pred->dtp_refcnt == 0) { 9726 dtrace_difo_release(pred->dtp_difo, vstate); 9727 kmem_free(pred, sizeof (dtrace_predicate_t)); 9728 } 9729} 9730 9731/* 9732 * DTrace Action Description Functions 9733 */ 9734static dtrace_actdesc_t * 9735dtrace_actdesc_create(dtrace_actkind_t kind, uint32_t ntuple, 9736 uint64_t uarg, uint64_t arg) 9737{ 9738 dtrace_actdesc_t *act; 9739 9740#if defined(sun) 9741 ASSERT(!DTRACEACT_ISPRINTFLIKE(kind) || (arg != NULL && 9742 arg >= KERNELBASE) || (arg == NULL && kind == DTRACEACT_PRINTA)); 9743#endif 9744 9745 act = kmem_zalloc(sizeof (dtrace_actdesc_t), KM_SLEEP); 9746 act->dtad_kind = kind; 9747 act->dtad_ntuple = ntuple; 9748 act->dtad_uarg = uarg; 9749 act->dtad_arg = arg; 9750 act->dtad_refcnt = 1; 9751 9752 return (act); 9753} 9754 9755static void 9756dtrace_actdesc_hold(dtrace_actdesc_t *act) 9757{ 9758 ASSERT(act->dtad_refcnt >= 1); 9759 act->dtad_refcnt++; 9760} 9761 9762static void 9763dtrace_actdesc_release(dtrace_actdesc_t *act, dtrace_vstate_t *vstate) 9764{ 9765 dtrace_actkind_t kind = act->dtad_kind; 9766 dtrace_difo_t *dp; 9767 9768 ASSERT(act->dtad_refcnt >= 1); 9769 9770 if (--act->dtad_refcnt != 0) 9771 return; 9772 9773 if ((dp = act->dtad_difo) != NULL) 9774 dtrace_difo_release(dp, vstate); 9775 9776 if (DTRACEACT_ISPRINTFLIKE(kind)) { 9777 char *str = (char *)(uintptr_t)act->dtad_arg; 9778 9779#if defined(sun) 9780 ASSERT((str != NULL && (uintptr_t)str >= KERNELBASE) || 9781 (str == NULL && act->dtad_kind == DTRACEACT_PRINTA)); 9782#endif 9783 9784 if (str != NULL) 9785 kmem_free(str, strlen(str) + 1); 9786 } 9787 9788 kmem_free(act, sizeof (dtrace_actdesc_t)); 9789} 9790 9791/* 9792 * DTrace ECB Functions 9793 */ 9794static dtrace_ecb_t * 9795dtrace_ecb_add(dtrace_state_t *state, dtrace_probe_t *probe) 9796{ 9797 dtrace_ecb_t *ecb; 9798 dtrace_epid_t epid; 9799 9800 ASSERT(MUTEX_HELD(&dtrace_lock)); 9801 9802 ecb = kmem_zalloc(sizeof (dtrace_ecb_t), KM_SLEEP); 9803 ecb->dte_predicate = NULL; 9804 ecb->dte_probe = probe; 9805 9806 /* 9807 * The default size is the size of the default action: recording 9808 * the header. 9809 */ 9810 ecb->dte_size = ecb->dte_needed = sizeof (dtrace_rechdr_t); 9811 ecb->dte_alignment = sizeof (dtrace_epid_t); 9812 9813 epid = state->dts_epid++; 9814 9815 if (epid - 1 >= state->dts_necbs) { 9816 dtrace_ecb_t **oecbs = state->dts_ecbs, **ecbs; 9817 int necbs = state->dts_necbs << 1; 9818 9819 ASSERT(epid == state->dts_necbs + 1); 9820 9821 if (necbs == 0) { 9822 ASSERT(oecbs == NULL); 9823 necbs = 1; 9824 } 9825 9826 ecbs = kmem_zalloc(necbs * sizeof (*ecbs), KM_SLEEP); 9827 9828 if (oecbs != NULL) 9829 bcopy(oecbs, ecbs, state->dts_necbs * sizeof (*ecbs)); 9830 9831 dtrace_membar_producer(); 9832 state->dts_ecbs = ecbs; 9833 9834 if (oecbs != NULL) { 9835 /* 9836 * If this state is active, we must dtrace_sync() 9837 * before we can free the old dts_ecbs array: we're 9838 * coming in hot, and there may be active ring 9839 * buffer processing (which indexes into the dts_ecbs 9840 * array) on another CPU. 9841 */ 9842 if (state->dts_activity != DTRACE_ACTIVITY_INACTIVE) 9843 dtrace_sync(); 9844 9845 kmem_free(oecbs, state->dts_necbs * sizeof (*ecbs)); 9846 } 9847 9848 dtrace_membar_producer(); 9849 state->dts_necbs = necbs; 9850 } 9851 9852 ecb->dte_state = state; 9853 9854 ASSERT(state->dts_ecbs[epid - 1] == NULL); 9855 dtrace_membar_producer(); 9856 state->dts_ecbs[(ecb->dte_epid = epid) - 1] = ecb; 9857 9858 return (ecb); 9859} 9860 9861static void 9862dtrace_ecb_enable(dtrace_ecb_t *ecb) 9863{ 9864 dtrace_probe_t *probe = ecb->dte_probe; 9865 9866 ASSERT(MUTEX_HELD(&cpu_lock)); 9867 ASSERT(MUTEX_HELD(&dtrace_lock)); 9868 ASSERT(ecb->dte_next == NULL); 9869 9870 if (probe == NULL) { 9871 /* 9872 * This is the NULL probe -- there's nothing to do. 9873 */ 9874 return; 9875 } 9876 9877 if (probe->dtpr_ecb == NULL) { 9878 dtrace_provider_t *prov = probe->dtpr_provider; 9879 9880 /* 9881 * We're the first ECB on this probe. 9882 */ 9883 probe->dtpr_ecb = probe->dtpr_ecb_last = ecb; 9884 9885 if (ecb->dte_predicate != NULL) 9886 probe->dtpr_predcache = ecb->dte_predicate->dtp_cacheid; 9887 9888 prov->dtpv_pops.dtps_enable(prov->dtpv_arg, 9889 probe->dtpr_id, probe->dtpr_arg); 9890 } else { 9891 /* 9892 * This probe is already active. Swing the last pointer to 9893 * point to the new ECB, and issue a dtrace_sync() to assure 9894 * that all CPUs have seen the change. 9895 */ 9896 ASSERT(probe->dtpr_ecb_last != NULL); 9897 probe->dtpr_ecb_last->dte_next = ecb; 9898 probe->dtpr_ecb_last = ecb; 9899 probe->dtpr_predcache = 0; 9900 9901 dtrace_sync(); 9902 } 9903} 9904 9905static void 9906dtrace_ecb_resize(dtrace_ecb_t *ecb) 9907{ 9908 dtrace_action_t *act; 9909 uint32_t curneeded = UINT32_MAX; 9910 uint32_t aggbase = UINT32_MAX; 9911 9912 /* 9913 * If we record anything, we always record the dtrace_rechdr_t. (And 9914 * we always record it first.) 9915 */ 9916 ecb->dte_size = sizeof (dtrace_rechdr_t); 9917 ecb->dte_alignment = sizeof (dtrace_epid_t); 9918 9919 for (act = ecb->dte_action; act != NULL; act = act->dta_next) { 9920 dtrace_recdesc_t *rec = &act->dta_rec; 9921 ASSERT(rec->dtrd_size > 0 || rec->dtrd_alignment == 1); 9922 9923 ecb->dte_alignment = MAX(ecb->dte_alignment, 9924 rec->dtrd_alignment); 9925 9926 if (DTRACEACT_ISAGG(act->dta_kind)) { 9927 dtrace_aggregation_t *agg = (dtrace_aggregation_t *)act; 9928 9929 ASSERT(rec->dtrd_size != 0); 9930 ASSERT(agg->dtag_first != NULL); 9931 ASSERT(act->dta_prev->dta_intuple); 9932 ASSERT(aggbase != UINT32_MAX); 9933 ASSERT(curneeded != UINT32_MAX); 9934 9935 agg->dtag_base = aggbase; 9936 9937 curneeded = P2ROUNDUP(curneeded, rec->dtrd_alignment); 9938 rec->dtrd_offset = curneeded; 9939 curneeded += rec->dtrd_size; 9940 ecb->dte_needed = MAX(ecb->dte_needed, curneeded); 9941 9942 aggbase = UINT32_MAX; 9943 curneeded = UINT32_MAX; 9944 } else if (act->dta_intuple) { 9945 if (curneeded == UINT32_MAX) { 9946 /* 9947 * This is the first record in a tuple. Align 9948 * curneeded to be at offset 4 in an 8-byte 9949 * aligned block. 9950 */ 9951 ASSERT(act->dta_prev == NULL || 9952 !act->dta_prev->dta_intuple); 9953 ASSERT3U(aggbase, ==, UINT32_MAX); 9954 curneeded = P2PHASEUP(ecb->dte_size, 9955 sizeof (uint64_t), sizeof (dtrace_aggid_t)); 9956 9957 aggbase = curneeded - sizeof (dtrace_aggid_t); 9958 ASSERT(IS_P2ALIGNED(aggbase, 9959 sizeof (uint64_t))); 9960 } 9961 curneeded = P2ROUNDUP(curneeded, rec->dtrd_alignment); 9962 rec->dtrd_offset = curneeded; 9963 curneeded += rec->dtrd_size; 9964 } else { 9965 /* tuples must be followed by an aggregation */ 9966 ASSERT(act->dta_prev == NULL || 9967 !act->dta_prev->dta_intuple); 9968 9969 ecb->dte_size = P2ROUNDUP(ecb->dte_size, 9970 rec->dtrd_alignment); 9971 rec->dtrd_offset = ecb->dte_size; 9972 ecb->dte_size += rec->dtrd_size; 9973 ecb->dte_needed = MAX(ecb->dte_needed, ecb->dte_size); 9974 } 9975 } 9976 9977 if ((act = ecb->dte_action) != NULL && 9978 !(act->dta_kind == DTRACEACT_SPECULATE && act->dta_next == NULL) && 9979 ecb->dte_size == sizeof (dtrace_rechdr_t)) { 9980 /* 9981 * If the size is still sizeof (dtrace_rechdr_t), then all 9982 * actions store no data; set the size to 0. 9983 */ 9984 ecb->dte_size = 0; 9985 } 9986 9987 ecb->dte_size = P2ROUNDUP(ecb->dte_size, sizeof (dtrace_epid_t)); 9988 ecb->dte_needed = P2ROUNDUP(ecb->dte_needed, (sizeof (dtrace_epid_t))); 9989 ecb->dte_state->dts_needed = MAX(ecb->dte_state->dts_needed, 9990 ecb->dte_needed); 9991} 9992 9993static dtrace_action_t * 9994dtrace_ecb_aggregation_create(dtrace_ecb_t *ecb, dtrace_actdesc_t *desc) 9995{ 9996 dtrace_aggregation_t *agg; 9997 size_t size = sizeof (uint64_t); 9998 int ntuple = desc->dtad_ntuple; 9999 dtrace_action_t *act; 10000 dtrace_recdesc_t *frec; 10001 dtrace_aggid_t aggid; 10002 dtrace_state_t *state = ecb->dte_state; 10003 10004 agg = kmem_zalloc(sizeof (dtrace_aggregation_t), KM_SLEEP); 10005 agg->dtag_ecb = ecb; 10006 10007 ASSERT(DTRACEACT_ISAGG(desc->dtad_kind)); 10008 10009 switch (desc->dtad_kind) { 10010 case DTRACEAGG_MIN: 10011 agg->dtag_initial = INT64_MAX; 10012 agg->dtag_aggregate = dtrace_aggregate_min; 10013 break; 10014 10015 case DTRACEAGG_MAX: 10016 agg->dtag_initial = INT64_MIN; 10017 agg->dtag_aggregate = dtrace_aggregate_max; 10018 break; 10019 10020 case DTRACEAGG_COUNT: 10021 agg->dtag_aggregate = dtrace_aggregate_count; 10022 break; 10023 10024 case DTRACEAGG_QUANTIZE: 10025 agg->dtag_aggregate = dtrace_aggregate_quantize; 10026 size = (((sizeof (uint64_t) * NBBY) - 1) * 2 + 1) * 10027 sizeof (uint64_t); 10028 break; 10029 10030 case DTRACEAGG_LQUANTIZE: { 10031 uint16_t step = DTRACE_LQUANTIZE_STEP(desc->dtad_arg); 10032 uint16_t levels = DTRACE_LQUANTIZE_LEVELS(desc->dtad_arg); 10033 10034 agg->dtag_initial = desc->dtad_arg; 10035 agg->dtag_aggregate = dtrace_aggregate_lquantize; 10036 10037 if (step == 0 || levels == 0) 10038 goto err; 10039 10040 size = levels * sizeof (uint64_t) + 3 * sizeof (uint64_t); 10041 break; 10042 } 10043 10044 case DTRACEAGG_LLQUANTIZE: { 10045 uint16_t factor = DTRACE_LLQUANTIZE_FACTOR(desc->dtad_arg); 10046 uint16_t low = DTRACE_LLQUANTIZE_LOW(desc->dtad_arg); 10047 uint16_t high = DTRACE_LLQUANTIZE_HIGH(desc->dtad_arg); 10048 uint16_t nsteps = DTRACE_LLQUANTIZE_NSTEP(desc->dtad_arg); 10049 int64_t v; 10050 10051 agg->dtag_initial = desc->dtad_arg; 10052 agg->dtag_aggregate = dtrace_aggregate_llquantize; 10053 10054 if (factor < 2 || low >= high || nsteps < factor) 10055 goto err; 10056 10057 /* 10058 * Now check that the number of steps evenly divides a power 10059 * of the factor. (This assures both integer bucket size and 10060 * linearity within each magnitude.) 10061 */ 10062 for (v = factor; v < nsteps; v *= factor) 10063 continue; 10064 10065 if ((v % nsteps) || (nsteps % factor)) 10066 goto err; 10067 10068 size = (dtrace_aggregate_llquantize_bucket(factor, 10069 low, high, nsteps, INT64_MAX) + 2) * sizeof (uint64_t); 10070 break; 10071 } 10072 10073 case DTRACEAGG_AVG: 10074 agg->dtag_aggregate = dtrace_aggregate_avg; 10075 size = sizeof (uint64_t) * 2; 10076 break; 10077 10078 case DTRACEAGG_STDDEV: 10079 agg->dtag_aggregate = dtrace_aggregate_stddev; 10080 size = sizeof (uint64_t) * 4; 10081 break; 10082 10083 case DTRACEAGG_SUM: 10084 agg->dtag_aggregate = dtrace_aggregate_sum; 10085 break; 10086 10087 default: 10088 goto err; 10089 } 10090 10091 agg->dtag_action.dta_rec.dtrd_size = size; 10092 10093 if (ntuple == 0) 10094 goto err; 10095 10096 /* 10097 * We must make sure that we have enough actions for the n-tuple. 10098 */ 10099 for (act = ecb->dte_action_last; act != NULL; act = act->dta_prev) { 10100 if (DTRACEACT_ISAGG(act->dta_kind)) 10101 break; 10102 10103 if (--ntuple == 0) { 10104 /* 10105 * This is the action with which our n-tuple begins. 10106 */ 10107 agg->dtag_first = act; 10108 goto success; 10109 } 10110 } 10111 10112 /* 10113 * This n-tuple is short by ntuple elements. Return failure. 10114 */ 10115 ASSERT(ntuple != 0); 10116err: 10117 kmem_free(agg, sizeof (dtrace_aggregation_t)); 10118 return (NULL); 10119 10120success: 10121 /* 10122 * If the last action in the tuple has a size of zero, it's actually 10123 * an expression argument for the aggregating action. 10124 */ 10125 ASSERT(ecb->dte_action_last != NULL); 10126 act = ecb->dte_action_last; 10127 10128 if (act->dta_kind == DTRACEACT_DIFEXPR) { 10129 ASSERT(act->dta_difo != NULL); 10130 10131 if (act->dta_difo->dtdo_rtype.dtdt_size == 0) 10132 agg->dtag_hasarg = 1; 10133 } 10134 10135 /* 10136 * We need to allocate an id for this aggregation. 10137 */ 10138#if defined(sun) 10139 aggid = (dtrace_aggid_t)(uintptr_t)vmem_alloc(state->dts_aggid_arena, 1, 10140 VM_BESTFIT | VM_SLEEP); 10141#else 10142 aggid = alloc_unr(state->dts_aggid_arena); 10143#endif 10144 10145 if (aggid - 1 >= state->dts_naggregations) { 10146 dtrace_aggregation_t **oaggs = state->dts_aggregations; 10147 dtrace_aggregation_t **aggs; 10148 int naggs = state->dts_naggregations << 1; 10149 int onaggs = state->dts_naggregations; 10150 10151 ASSERT(aggid == state->dts_naggregations + 1); 10152 10153 if (naggs == 0) { 10154 ASSERT(oaggs == NULL); 10155 naggs = 1; 10156 } 10157 10158 aggs = kmem_zalloc(naggs * sizeof (*aggs), KM_SLEEP); 10159 10160 if (oaggs != NULL) { 10161 bcopy(oaggs, aggs, onaggs * sizeof (*aggs)); 10162 kmem_free(oaggs, onaggs * sizeof (*aggs)); 10163 } 10164 10165 state->dts_aggregations = aggs; 10166 state->dts_naggregations = naggs; 10167 } 10168 10169 ASSERT(state->dts_aggregations[aggid - 1] == NULL); 10170 state->dts_aggregations[(agg->dtag_id = aggid) - 1] = agg; 10171 10172 frec = &agg->dtag_first->dta_rec; 10173 if (frec->dtrd_alignment < sizeof (dtrace_aggid_t)) 10174 frec->dtrd_alignment = sizeof (dtrace_aggid_t); 10175 10176 for (act = agg->dtag_first; act != NULL; act = act->dta_next) { 10177 ASSERT(!act->dta_intuple); 10178 act->dta_intuple = 1; 10179 } 10180 10181 return (&agg->dtag_action); 10182} 10183 10184static void 10185dtrace_ecb_aggregation_destroy(dtrace_ecb_t *ecb, dtrace_action_t *act) 10186{ 10187 dtrace_aggregation_t *agg = (dtrace_aggregation_t *)act; 10188 dtrace_state_t *state = ecb->dte_state; 10189 dtrace_aggid_t aggid = agg->dtag_id; 10190 10191 ASSERT(DTRACEACT_ISAGG(act->dta_kind)); 10192#if defined(sun) 10193 vmem_free(state->dts_aggid_arena, (void *)(uintptr_t)aggid, 1); 10194#else 10195 free_unr(state->dts_aggid_arena, aggid); 10196#endif 10197 10198 ASSERT(state->dts_aggregations[aggid - 1] == agg); 10199 state->dts_aggregations[aggid - 1] = NULL; 10200 10201 kmem_free(agg, sizeof (dtrace_aggregation_t)); 10202} 10203 10204static int 10205dtrace_ecb_action_add(dtrace_ecb_t *ecb, dtrace_actdesc_t *desc) 10206{ 10207 dtrace_action_t *action, *last; 10208 dtrace_difo_t *dp = desc->dtad_difo; 10209 uint32_t size = 0, align = sizeof (uint8_t), mask; 10210 uint16_t format = 0; 10211 dtrace_recdesc_t *rec; 10212 dtrace_state_t *state = ecb->dte_state; 10213 dtrace_optval_t *opt = state->dts_options, nframes = 0, strsize; 10214 uint64_t arg = desc->dtad_arg; 10215 10216 ASSERT(MUTEX_HELD(&dtrace_lock)); 10217 ASSERT(ecb->dte_action == NULL || ecb->dte_action->dta_refcnt == 1); 10218 10219 if (DTRACEACT_ISAGG(desc->dtad_kind)) { 10220 /* 10221 * If this is an aggregating action, there must be neither 10222 * a speculate nor a commit on the action chain. 10223 */ 10224 dtrace_action_t *act; 10225 10226 for (act = ecb->dte_action; act != NULL; act = act->dta_next) { 10227 if (act->dta_kind == DTRACEACT_COMMIT) 10228 return (EINVAL); 10229 10230 if (act->dta_kind == DTRACEACT_SPECULATE) 10231 return (EINVAL); 10232 } 10233 10234 action = dtrace_ecb_aggregation_create(ecb, desc); 10235 10236 if (action == NULL) 10237 return (EINVAL); 10238 } else { 10239 if (DTRACEACT_ISDESTRUCTIVE(desc->dtad_kind) || 10240 (desc->dtad_kind == DTRACEACT_DIFEXPR && 10241 dp != NULL && dp->dtdo_destructive)) { 10242 state->dts_destructive = 1; 10243 } 10244 10245 switch (desc->dtad_kind) { 10246 case DTRACEACT_PRINTF: 10247 case DTRACEACT_PRINTA: 10248 case DTRACEACT_SYSTEM: 10249 case DTRACEACT_FREOPEN: 10250 case DTRACEACT_DIFEXPR: 10251 /* 10252 * We know that our arg is a string -- turn it into a 10253 * format. 10254 */ 10255 if (arg == 0) { 10256 ASSERT(desc->dtad_kind == DTRACEACT_PRINTA || 10257 desc->dtad_kind == DTRACEACT_DIFEXPR); 10258 format = 0; 10259 } else { 10260 ASSERT(arg != 0); 10261#if defined(sun) 10262 ASSERT(arg > KERNELBASE); 10263#endif 10264 format = dtrace_format_add(state, 10265 (char *)(uintptr_t)arg); 10266 } 10267 10268 /*FALLTHROUGH*/ 10269 case DTRACEACT_LIBACT: 10270 case DTRACEACT_TRACEMEM: 10271 case DTRACEACT_TRACEMEM_DYNSIZE: 10272 if (dp == NULL) 10273 return (EINVAL); 10274 10275 if ((size = dp->dtdo_rtype.dtdt_size) != 0) 10276 break; 10277 10278 if (dp->dtdo_rtype.dtdt_kind == DIF_TYPE_STRING) { 10279 if (!(dp->dtdo_rtype.dtdt_flags & DIF_TF_BYREF)) 10280 return (EINVAL); 10281 10282 size = opt[DTRACEOPT_STRSIZE]; 10283 } 10284 10285 break; 10286 10287 case DTRACEACT_STACK: 10288 if ((nframes = arg) == 0) { 10289 nframes = opt[DTRACEOPT_STACKFRAMES]; 10290 ASSERT(nframes > 0); 10291 arg = nframes; 10292 } 10293 10294 size = nframes * sizeof (pc_t); 10295 break; 10296 10297 case DTRACEACT_JSTACK: 10298 if ((strsize = DTRACE_USTACK_STRSIZE(arg)) == 0) 10299 strsize = opt[DTRACEOPT_JSTACKSTRSIZE]; 10300 10301 if ((nframes = DTRACE_USTACK_NFRAMES(arg)) == 0) 10302 nframes = opt[DTRACEOPT_JSTACKFRAMES]; 10303 10304 arg = DTRACE_USTACK_ARG(nframes, strsize); 10305 10306 /*FALLTHROUGH*/ 10307 case DTRACEACT_USTACK: 10308 if (desc->dtad_kind != DTRACEACT_JSTACK && 10309 (nframes = DTRACE_USTACK_NFRAMES(arg)) == 0) { 10310 strsize = DTRACE_USTACK_STRSIZE(arg); 10311 nframes = opt[DTRACEOPT_USTACKFRAMES]; 10312 ASSERT(nframes > 0); 10313 arg = DTRACE_USTACK_ARG(nframes, strsize); 10314 } 10315 10316 /* 10317 * Save a slot for the pid. 10318 */ 10319 size = (nframes + 1) * sizeof (uint64_t); 10320 size += DTRACE_USTACK_STRSIZE(arg); 10321 size = P2ROUNDUP(size, (uint32_t)(sizeof (uintptr_t))); 10322 10323 break; 10324 10325 case DTRACEACT_SYM: 10326 case DTRACEACT_MOD: 10327 if (dp == NULL || ((size = dp->dtdo_rtype.dtdt_size) != 10328 sizeof (uint64_t)) || 10329 (dp->dtdo_rtype.dtdt_flags & DIF_TF_BYREF)) 10330 return (EINVAL); 10331 break; 10332 10333 case DTRACEACT_USYM: 10334 case DTRACEACT_UMOD: 10335 case DTRACEACT_UADDR: 10336 if (dp == NULL || 10337 (dp->dtdo_rtype.dtdt_size != sizeof (uint64_t)) || 10338 (dp->dtdo_rtype.dtdt_flags & DIF_TF_BYREF)) 10339 return (EINVAL); 10340 10341 /* 10342 * We have a slot for the pid, plus a slot for the 10343 * argument. To keep things simple (aligned with 10344 * bitness-neutral sizing), we store each as a 64-bit 10345 * quantity. 10346 */ 10347 size = 2 * sizeof (uint64_t); 10348 break; 10349 10350 case DTRACEACT_STOP: 10351 case DTRACEACT_BREAKPOINT: 10352 case DTRACEACT_PANIC: 10353 break; 10354 10355 case DTRACEACT_CHILL: 10356 case DTRACEACT_DISCARD: 10357 case DTRACEACT_RAISE: 10358 if (dp == NULL) 10359 return (EINVAL); 10360 break; 10361 10362 case DTRACEACT_EXIT: 10363 if (dp == NULL || 10364 (size = dp->dtdo_rtype.dtdt_size) != sizeof (int) || 10365 (dp->dtdo_rtype.dtdt_flags & DIF_TF_BYREF)) 10366 return (EINVAL); 10367 break; 10368 10369 case DTRACEACT_SPECULATE: 10370 if (ecb->dte_size > sizeof (dtrace_rechdr_t)) 10371 return (EINVAL); 10372 10373 if (dp == NULL) 10374 return (EINVAL); 10375 10376 state->dts_speculates = 1; 10377 break; 10378 10379 case DTRACEACT_PRINTM: 10380 size = dp->dtdo_rtype.dtdt_size; 10381 break; 10382 10383 case DTRACEACT_PRINTT: 10384 size = dp->dtdo_rtype.dtdt_size; 10385 break; 10386 10387 case DTRACEACT_COMMIT: { 10388 dtrace_action_t *act = ecb->dte_action; 10389 10390 for (; act != NULL; act = act->dta_next) { 10391 if (act->dta_kind == DTRACEACT_COMMIT) 10392 return (EINVAL); 10393 } 10394 10395 if (dp == NULL) 10396 return (EINVAL); 10397 break; 10398 } 10399 10400 default: 10401 return (EINVAL); 10402 } 10403 10404 if (size != 0 || desc->dtad_kind == DTRACEACT_SPECULATE) { 10405 /* 10406 * If this is a data-storing action or a speculate, 10407 * we must be sure that there isn't a commit on the 10408 * action chain. 10409 */ 10410 dtrace_action_t *act = ecb->dte_action; 10411 10412 for (; act != NULL; act = act->dta_next) { 10413 if (act->dta_kind == DTRACEACT_COMMIT) 10414 return (EINVAL); 10415 } 10416 } 10417 10418 action = kmem_zalloc(sizeof (dtrace_action_t), KM_SLEEP); 10419 action->dta_rec.dtrd_size = size; 10420 } 10421 10422 action->dta_refcnt = 1; 10423 rec = &action->dta_rec; 10424 size = rec->dtrd_size; 10425 10426 for (mask = sizeof (uint64_t) - 1; size != 0 && mask > 0; mask >>= 1) { 10427 if (!(size & mask)) { 10428 align = mask + 1; 10429 break; 10430 } 10431 } 10432 10433 action->dta_kind = desc->dtad_kind; 10434 10435 if ((action->dta_difo = dp) != NULL) 10436 dtrace_difo_hold(dp); 10437 10438 rec->dtrd_action = action->dta_kind; 10439 rec->dtrd_arg = arg; 10440 rec->dtrd_uarg = desc->dtad_uarg; 10441 rec->dtrd_alignment = (uint16_t)align; 10442 rec->dtrd_format = format; 10443 10444 if ((last = ecb->dte_action_last) != NULL) { 10445 ASSERT(ecb->dte_action != NULL); 10446 action->dta_prev = last; 10447 last->dta_next = action; 10448 } else { 10449 ASSERT(ecb->dte_action == NULL); 10450 ecb->dte_action = action; 10451 } 10452 10453 ecb->dte_action_last = action; 10454 10455 return (0); 10456} 10457 10458static void 10459dtrace_ecb_action_remove(dtrace_ecb_t *ecb) 10460{ 10461 dtrace_action_t *act = ecb->dte_action, *next; 10462 dtrace_vstate_t *vstate = &ecb->dte_state->dts_vstate; 10463 dtrace_difo_t *dp; 10464 uint16_t format; 10465 10466 if (act != NULL && act->dta_refcnt > 1) { 10467 ASSERT(act->dta_next == NULL || act->dta_next->dta_refcnt == 1); 10468 act->dta_refcnt--; 10469 } else { 10470 for (; act != NULL; act = next) { 10471 next = act->dta_next; 10472 ASSERT(next != NULL || act == ecb->dte_action_last); 10473 ASSERT(act->dta_refcnt == 1); 10474 10475 if ((format = act->dta_rec.dtrd_format) != 0) 10476 dtrace_format_remove(ecb->dte_state, format); 10477 10478 if ((dp = act->dta_difo) != NULL) 10479 dtrace_difo_release(dp, vstate); 10480 10481 if (DTRACEACT_ISAGG(act->dta_kind)) { 10482 dtrace_ecb_aggregation_destroy(ecb, act); 10483 } else { 10484 kmem_free(act, sizeof (dtrace_action_t)); 10485 } 10486 } 10487 } 10488 10489 ecb->dte_action = NULL; 10490 ecb->dte_action_last = NULL; 10491 ecb->dte_size = 0; 10492} 10493 10494static void 10495dtrace_ecb_disable(dtrace_ecb_t *ecb) 10496{ 10497 /* 10498 * We disable the ECB by removing it from its probe. 10499 */ 10500 dtrace_ecb_t *pecb, *prev = NULL; 10501 dtrace_probe_t *probe = ecb->dte_probe; 10502 10503 ASSERT(MUTEX_HELD(&dtrace_lock)); 10504 10505 if (probe == NULL) { 10506 /* 10507 * This is the NULL probe; there is nothing to disable. 10508 */ 10509 return; 10510 } 10511 10512 for (pecb = probe->dtpr_ecb; pecb != NULL; pecb = pecb->dte_next) { 10513 if (pecb == ecb) 10514 break; 10515 prev = pecb; 10516 } 10517 10518 ASSERT(pecb != NULL); 10519 10520 if (prev == NULL) { 10521 probe->dtpr_ecb = ecb->dte_next; 10522 } else { 10523 prev->dte_next = ecb->dte_next; 10524 } 10525 10526 if (ecb == probe->dtpr_ecb_last) { 10527 ASSERT(ecb->dte_next == NULL); 10528 probe->dtpr_ecb_last = prev; 10529 } 10530 10531 /* 10532 * The ECB has been disconnected from the probe; now sync to assure 10533 * that all CPUs have seen the change before returning. 10534 */ 10535 dtrace_sync(); 10536 10537 if (probe->dtpr_ecb == NULL) { 10538 /* 10539 * That was the last ECB on the probe; clear the predicate 10540 * cache ID for the probe, disable it and sync one more time 10541 * to assure that we'll never hit it again. 10542 */ 10543 dtrace_provider_t *prov = probe->dtpr_provider; 10544 10545 ASSERT(ecb->dte_next == NULL); 10546 ASSERT(probe->dtpr_ecb_last == NULL); 10547 probe->dtpr_predcache = DTRACE_CACHEIDNONE; 10548 prov->dtpv_pops.dtps_disable(prov->dtpv_arg, 10549 probe->dtpr_id, probe->dtpr_arg); 10550 dtrace_sync(); 10551 } else { 10552 /* 10553 * There is at least one ECB remaining on the probe. If there 10554 * is _exactly_ one, set the probe's predicate cache ID to be 10555 * the predicate cache ID of the remaining ECB. 10556 */ 10557 ASSERT(probe->dtpr_ecb_last != NULL); 10558 ASSERT(probe->dtpr_predcache == DTRACE_CACHEIDNONE); 10559 10560 if (probe->dtpr_ecb == probe->dtpr_ecb_last) { 10561 dtrace_predicate_t *p = probe->dtpr_ecb->dte_predicate; 10562 10563 ASSERT(probe->dtpr_ecb->dte_next == NULL); 10564 10565 if (p != NULL) 10566 probe->dtpr_predcache = p->dtp_cacheid; 10567 } 10568 10569 ecb->dte_next = NULL; 10570 } 10571} 10572 10573static void 10574dtrace_ecb_destroy(dtrace_ecb_t *ecb) 10575{ 10576 dtrace_state_t *state = ecb->dte_state; 10577 dtrace_vstate_t *vstate = &state->dts_vstate; 10578 dtrace_predicate_t *pred; 10579 dtrace_epid_t epid = ecb->dte_epid; 10580 10581 ASSERT(MUTEX_HELD(&dtrace_lock)); 10582 ASSERT(ecb->dte_next == NULL); 10583 ASSERT(ecb->dte_probe == NULL || ecb->dte_probe->dtpr_ecb != ecb); 10584 10585 if ((pred = ecb->dte_predicate) != NULL) 10586 dtrace_predicate_release(pred, vstate); 10587 10588 dtrace_ecb_action_remove(ecb); 10589 10590 ASSERT(state->dts_ecbs[epid - 1] == ecb); 10591 state->dts_ecbs[epid - 1] = NULL; 10592 10593 kmem_free(ecb, sizeof (dtrace_ecb_t)); 10594} 10595 10596static dtrace_ecb_t * 10597dtrace_ecb_create(dtrace_state_t *state, dtrace_probe_t *probe, 10598 dtrace_enabling_t *enab) 10599{ 10600 dtrace_ecb_t *ecb; 10601 dtrace_predicate_t *pred; 10602 dtrace_actdesc_t *act; 10603 dtrace_provider_t *prov; 10604 dtrace_ecbdesc_t *desc = enab->dten_current; 10605 10606 ASSERT(MUTEX_HELD(&dtrace_lock)); 10607 ASSERT(state != NULL); 10608 10609 ecb = dtrace_ecb_add(state, probe); 10610 ecb->dte_uarg = desc->dted_uarg; 10611 10612 if ((pred = desc->dted_pred.dtpdd_predicate) != NULL) { 10613 dtrace_predicate_hold(pred); 10614 ecb->dte_predicate = pred; 10615 } 10616 10617 if (probe != NULL) { 10618 /* 10619 * If the provider shows more leg than the consumer is old 10620 * enough to see, we need to enable the appropriate implicit 10621 * predicate bits to prevent the ecb from activating at 10622 * revealing times. 10623 * 10624 * Providers specifying DTRACE_PRIV_USER at register time 10625 * are stating that they need the /proc-style privilege 10626 * model to be enforced, and this is what DTRACE_COND_OWNER 10627 * and DTRACE_COND_ZONEOWNER will then do at probe time. 10628 */ 10629 prov = probe->dtpr_provider; 10630 if (!(state->dts_cred.dcr_visible & DTRACE_CRV_ALLPROC) && 10631 (prov->dtpv_priv.dtpp_flags & DTRACE_PRIV_USER)) 10632 ecb->dte_cond |= DTRACE_COND_OWNER; 10633 10634 if (!(state->dts_cred.dcr_visible & DTRACE_CRV_ALLZONE) && 10635 (prov->dtpv_priv.dtpp_flags & DTRACE_PRIV_USER)) 10636 ecb->dte_cond |= DTRACE_COND_ZONEOWNER; 10637 10638 /* 10639 * If the provider shows us kernel innards and the user 10640 * is lacking sufficient privilege, enable the 10641 * DTRACE_COND_USERMODE implicit predicate. 10642 */ 10643 if (!(state->dts_cred.dcr_visible & DTRACE_CRV_KERNEL) && 10644 (prov->dtpv_priv.dtpp_flags & DTRACE_PRIV_KERNEL)) 10645 ecb->dte_cond |= DTRACE_COND_USERMODE; 10646 } 10647 10648 if (dtrace_ecb_create_cache != NULL) { 10649 /* 10650 * If we have a cached ecb, we'll use its action list instead 10651 * of creating our own (saving both time and space). 10652 */ 10653 dtrace_ecb_t *cached = dtrace_ecb_create_cache; 10654 dtrace_action_t *act = cached->dte_action; 10655 10656 if (act != NULL) { 10657 ASSERT(act->dta_refcnt > 0); 10658 act->dta_refcnt++; 10659 ecb->dte_action = act; 10660 ecb->dte_action_last = cached->dte_action_last; 10661 ecb->dte_needed = cached->dte_needed; 10662 ecb->dte_size = cached->dte_size; 10663 ecb->dte_alignment = cached->dte_alignment; 10664 } 10665 10666 return (ecb); 10667 } 10668 10669 for (act = desc->dted_action; act != NULL; act = act->dtad_next) { 10670 if ((enab->dten_error = dtrace_ecb_action_add(ecb, act)) != 0) { 10671 dtrace_ecb_destroy(ecb); 10672 return (NULL); 10673 } 10674 } 10675 10676 dtrace_ecb_resize(ecb); 10677 10678 return (dtrace_ecb_create_cache = ecb); 10679} 10680 10681static int 10682dtrace_ecb_create_enable(dtrace_probe_t *probe, void *arg) 10683{ 10684 dtrace_ecb_t *ecb; 10685 dtrace_enabling_t *enab = arg; 10686 dtrace_state_t *state = enab->dten_vstate->dtvs_state; 10687 10688 ASSERT(state != NULL); 10689 10690 if (probe != NULL && probe->dtpr_gen < enab->dten_probegen) { 10691 /* 10692 * This probe was created in a generation for which this 10693 * enabling has previously created ECBs; we don't want to 10694 * enable it again, so just kick out. 10695 */ 10696 return (DTRACE_MATCH_NEXT); 10697 } 10698 10699 if ((ecb = dtrace_ecb_create(state, probe, enab)) == NULL) 10700 return (DTRACE_MATCH_DONE); 10701 10702 dtrace_ecb_enable(ecb); 10703 return (DTRACE_MATCH_NEXT); 10704} 10705 10706static dtrace_ecb_t * 10707dtrace_epid2ecb(dtrace_state_t *state, dtrace_epid_t id) 10708{ 10709 dtrace_ecb_t *ecb; 10710 10711 ASSERT(MUTEX_HELD(&dtrace_lock)); 10712 10713 if (id == 0 || id > state->dts_necbs) 10714 return (NULL); 10715 10716 ASSERT(state->dts_necbs > 0 && state->dts_ecbs != NULL); 10717 ASSERT((ecb = state->dts_ecbs[id - 1]) == NULL || ecb->dte_epid == id); 10718 10719 return (state->dts_ecbs[id - 1]); 10720} 10721 10722static dtrace_aggregation_t * 10723dtrace_aggid2agg(dtrace_state_t *state, dtrace_aggid_t id) 10724{ 10725 dtrace_aggregation_t *agg; 10726 10727 ASSERT(MUTEX_HELD(&dtrace_lock)); 10728 10729 if (id == 0 || id > state->dts_naggregations) 10730 return (NULL); 10731 10732 ASSERT(state->dts_naggregations > 0 && state->dts_aggregations != NULL); 10733 ASSERT((agg = state->dts_aggregations[id - 1]) == NULL || 10734 agg->dtag_id == id); 10735 10736 return (state->dts_aggregations[id - 1]); 10737} 10738 10739/* 10740 * DTrace Buffer Functions 10741 * 10742 * The following functions manipulate DTrace buffers. Most of these functions 10743 * are called in the context of establishing or processing consumer state; 10744 * exceptions are explicitly noted. 10745 */ 10746 10747/* 10748 * Note: called from cross call context. This function switches the two 10749 * buffers on a given CPU. The atomicity of this operation is assured by 10750 * disabling interrupts while the actual switch takes place; the disabling of 10751 * interrupts serializes the execution with any execution of dtrace_probe() on 10752 * the same CPU. 10753 */ 10754static void 10755dtrace_buffer_switch(dtrace_buffer_t *buf) 10756{ 10757 caddr_t tomax = buf->dtb_tomax; 10758 caddr_t xamot = buf->dtb_xamot; 10759 dtrace_icookie_t cookie; 10760 hrtime_t now; 10761 10762 ASSERT(!(buf->dtb_flags & DTRACEBUF_NOSWITCH)); 10763 ASSERT(!(buf->dtb_flags & DTRACEBUF_RING)); 10764 10765 cookie = dtrace_interrupt_disable(); 10766 now = dtrace_gethrtime(); 10767 buf->dtb_tomax = xamot; 10768 buf->dtb_xamot = tomax; 10769 buf->dtb_xamot_drops = buf->dtb_drops; 10770 buf->dtb_xamot_offset = buf->dtb_offset; 10771 buf->dtb_xamot_errors = buf->dtb_errors; 10772 buf->dtb_xamot_flags = buf->dtb_flags; 10773 buf->dtb_offset = 0; 10774 buf->dtb_drops = 0; 10775 buf->dtb_errors = 0; 10776 buf->dtb_flags &= ~(DTRACEBUF_ERROR | DTRACEBUF_DROPPED); 10777 buf->dtb_interval = now - buf->dtb_switched; 10778 buf->dtb_switched = now; 10779 dtrace_interrupt_enable(cookie); 10780} 10781 10782/* 10783 * Note: called from cross call context. This function activates a buffer 10784 * on a CPU. As with dtrace_buffer_switch(), the atomicity of the operation 10785 * is guaranteed by the disabling of interrupts. 10786 */ 10787static void 10788dtrace_buffer_activate(dtrace_state_t *state) 10789{ 10790 dtrace_buffer_t *buf; 10791 dtrace_icookie_t cookie = dtrace_interrupt_disable(); 10792 10793 buf = &state->dts_buffer[curcpu]; 10794 10795 if (buf->dtb_tomax != NULL) { 10796 /* 10797 * We might like to assert that the buffer is marked inactive, 10798 * but this isn't necessarily true: the buffer for the CPU 10799 * that processes the BEGIN probe has its buffer activated 10800 * manually. In this case, we take the (harmless) action 10801 * re-clearing the bit INACTIVE bit. 10802 */ 10803 buf->dtb_flags &= ~DTRACEBUF_INACTIVE; 10804 } 10805 10806 dtrace_interrupt_enable(cookie); 10807} 10808 10809static int 10810dtrace_buffer_alloc(dtrace_buffer_t *bufs, size_t size, int flags, 10811 processorid_t cpu) 10812{ 10813#if defined(sun) 10814 cpu_t *cp; 10815#endif 10816 dtrace_buffer_t *buf; 10817 10818#if defined(sun) 10819 ASSERT(MUTEX_HELD(&cpu_lock)); 10820 ASSERT(MUTEX_HELD(&dtrace_lock)); 10821 10822 if (size > dtrace_nonroot_maxsize && 10823 !PRIV_POLICY_CHOICE(CRED(), PRIV_ALL, B_FALSE)) 10824 return (EFBIG); 10825 10826 cp = cpu_list; 10827 10828 do { 10829 if (cpu != DTRACE_CPUALL && cpu != cp->cpu_id) 10830 continue; 10831 10832 buf = &bufs[cp->cpu_id]; 10833 10834 /* 10835 * If there is already a buffer allocated for this CPU, it 10836 * is only possible that this is a DR event. In this case, 10837 */ 10838 if (buf->dtb_tomax != NULL) { 10839 ASSERT(buf->dtb_size == size); 10840 continue; 10841 } 10842 10843 ASSERT(buf->dtb_xamot == NULL); 10844 10845 if ((buf->dtb_tomax = kmem_zalloc(size, KM_NOSLEEP)) == NULL) 10846 goto err; 10847 10848 buf->dtb_size = size; 10849 buf->dtb_flags = flags; 10850 buf->dtb_offset = 0; 10851 buf->dtb_drops = 0; 10852 10853 if (flags & DTRACEBUF_NOSWITCH) 10854 continue; 10855 10856 if ((buf->dtb_xamot = kmem_zalloc(size, KM_NOSLEEP)) == NULL) 10857 goto err; 10858 } while ((cp = cp->cpu_next) != cpu_list); 10859 10860 return (0); 10861 10862err: 10863 cp = cpu_list; 10864 10865 do { 10866 if (cpu != DTRACE_CPUALL && cpu != cp->cpu_id) 10867 continue; 10868 10869 buf = &bufs[cp->cpu_id]; 10870 10871 if (buf->dtb_xamot != NULL) { 10872 ASSERT(buf->dtb_tomax != NULL); 10873 ASSERT(buf->dtb_size == size); 10874 kmem_free(buf->dtb_xamot, size); 10875 } 10876 10877 if (buf->dtb_tomax != NULL) { 10878 ASSERT(buf->dtb_size == size); 10879 kmem_free(buf->dtb_tomax, size); 10880 } 10881 10882 buf->dtb_tomax = NULL; 10883 buf->dtb_xamot = NULL; 10884 buf->dtb_size = 0; 10885 } while ((cp = cp->cpu_next) != cpu_list); 10886 10887 return (ENOMEM); 10888#else 10889 int i; 10890 10891#if defined(__amd64__) || defined(__mips__) || defined(__powerpc__) 10892 /* 10893 * FreeBSD isn't good at limiting the amount of memory we 10894 * ask to malloc, so let's place a limit here before trying 10895 * to do something that might well end in tears at bedtime. 10896 */ 10897 if (size > physmem * PAGE_SIZE / (128 * (mp_maxid + 1))) 10898 return(ENOMEM); 10899#endif 10900 10901 ASSERT(MUTEX_HELD(&dtrace_lock)); 10902 CPU_FOREACH(i) { 10903 if (cpu != DTRACE_CPUALL && cpu != i) 10904 continue; 10905 10906 buf = &bufs[i]; 10907 10908 /* 10909 * If there is already a buffer allocated for this CPU, it 10910 * is only possible that this is a DR event. In this case, 10911 * the buffer size must match our specified size. 10912 */ 10913 if (buf->dtb_tomax != NULL) { 10914 ASSERT(buf->dtb_size == size); 10915 continue; 10916 } 10917 10918 ASSERT(buf->dtb_xamot == NULL); 10919 10920 if ((buf->dtb_tomax = kmem_zalloc(size, KM_NOSLEEP)) == NULL) 10921 goto err; 10922 10923 buf->dtb_size = size; 10924 buf->dtb_flags = flags; 10925 buf->dtb_offset = 0; 10926 buf->dtb_drops = 0; 10927 10928 if (flags & DTRACEBUF_NOSWITCH) 10929 continue; 10930 10931 if ((buf->dtb_xamot = kmem_zalloc(size, KM_NOSLEEP)) == NULL) 10932 goto err; 10933 } 10934 10935 return (0); 10936 10937err: 10938 /* 10939 * Error allocating memory, so free the buffers that were 10940 * allocated before the failed allocation. 10941 */ 10942 CPU_FOREACH(i) { 10943 if (cpu != DTRACE_CPUALL && cpu != i) 10944 continue; 10945 10946 buf = &bufs[i]; 10947 10948 if (buf->dtb_xamot != NULL) { 10949 ASSERT(buf->dtb_tomax != NULL); 10950 ASSERT(buf->dtb_size == size); 10951 kmem_free(buf->dtb_xamot, size); 10952 } 10953 10954 if (buf->dtb_tomax != NULL) { 10955 ASSERT(buf->dtb_size == size); 10956 kmem_free(buf->dtb_tomax, size); 10957 } 10958 10959 buf->dtb_tomax = NULL; 10960 buf->dtb_xamot = NULL; 10961 buf->dtb_size = 0; 10962 10963 } 10964 10965 return (ENOMEM); 10966#endif 10967} 10968 10969/* 10970 * Note: called from probe context. This function just increments the drop 10971 * count on a buffer. It has been made a function to allow for the 10972 * possibility of understanding the source of mysterious drop counts. (A 10973 * problem for which one may be particularly disappointed that DTrace cannot 10974 * be used to understand DTrace.) 10975 */ 10976static void 10977dtrace_buffer_drop(dtrace_buffer_t *buf) 10978{ 10979 buf->dtb_drops++; 10980} 10981 10982/* 10983 * Note: called from probe context. This function is called to reserve space 10984 * in a buffer. If mstate is non-NULL, sets the scratch base and size in the 10985 * mstate. Returns the new offset in the buffer, or a negative value if an 10986 * error has occurred. 10987 */ 10988static intptr_t 10989dtrace_buffer_reserve(dtrace_buffer_t *buf, size_t needed, size_t align, 10990 dtrace_state_t *state, dtrace_mstate_t *mstate) 10991{ 10992 intptr_t offs = buf->dtb_offset, soffs; 10993 intptr_t woffs; 10994 caddr_t tomax; 10995 size_t total; 10996 10997 if (buf->dtb_flags & DTRACEBUF_INACTIVE) 10998 return (-1); 10999 11000 if ((tomax = buf->dtb_tomax) == NULL) { 11001 dtrace_buffer_drop(buf); 11002 return (-1); 11003 } 11004 11005 if (!(buf->dtb_flags & (DTRACEBUF_RING | DTRACEBUF_FILL))) { 11006 while (offs & (align - 1)) { 11007 /* 11008 * Assert that our alignment is off by a number which 11009 * is itself sizeof (uint32_t) aligned. 11010 */ 11011 ASSERT(!((align - (offs & (align - 1))) & 11012 (sizeof (uint32_t) - 1))); 11013 DTRACE_STORE(uint32_t, tomax, offs, DTRACE_EPIDNONE); 11014 offs += sizeof (uint32_t); 11015 } 11016 11017 if ((soffs = offs + needed) > buf->dtb_size) { 11018 dtrace_buffer_drop(buf); 11019 return (-1); 11020 } 11021 11022 if (mstate == NULL) 11023 return (offs); 11024 11025 mstate->dtms_scratch_base = (uintptr_t)tomax + soffs; 11026 mstate->dtms_scratch_size = buf->dtb_size - soffs; 11027 mstate->dtms_scratch_ptr = mstate->dtms_scratch_base; 11028 11029 return (offs); 11030 } 11031 11032 if (buf->dtb_flags & DTRACEBUF_FILL) { 11033 if (state->dts_activity != DTRACE_ACTIVITY_COOLDOWN && 11034 (buf->dtb_flags & DTRACEBUF_FULL)) 11035 return (-1); 11036 goto out; 11037 } 11038 11039 total = needed + (offs & (align - 1)); 11040 11041 /* 11042 * For a ring buffer, life is quite a bit more complicated. Before 11043 * we can store any padding, we need to adjust our wrapping offset. 11044 * (If we've never before wrapped or we're not about to, no adjustment 11045 * is required.) 11046 */ 11047 if ((buf->dtb_flags & DTRACEBUF_WRAPPED) || 11048 offs + total > buf->dtb_size) { 11049 woffs = buf->dtb_xamot_offset; 11050 11051 if (offs + total > buf->dtb_size) { 11052 /* 11053 * We can't fit in the end of the buffer. First, a 11054 * sanity check that we can fit in the buffer at all. 11055 */ 11056 if (total > buf->dtb_size) { 11057 dtrace_buffer_drop(buf); 11058 return (-1); 11059 } 11060 11061 /* 11062 * We're going to be storing at the top of the buffer, 11063 * so now we need to deal with the wrapped offset. We 11064 * only reset our wrapped offset to 0 if it is 11065 * currently greater than the current offset. If it 11066 * is less than the current offset, it is because a 11067 * previous allocation induced a wrap -- but the 11068 * allocation didn't subsequently take the space due 11069 * to an error or false predicate evaluation. In this 11070 * case, we'll just leave the wrapped offset alone: if 11071 * the wrapped offset hasn't been advanced far enough 11072 * for this allocation, it will be adjusted in the 11073 * lower loop. 11074 */ 11075 if (buf->dtb_flags & DTRACEBUF_WRAPPED) { 11076 if (woffs >= offs) 11077 woffs = 0; 11078 } else { 11079 woffs = 0; 11080 } 11081 11082 /* 11083 * Now we know that we're going to be storing to the 11084 * top of the buffer and that there is room for us 11085 * there. We need to clear the buffer from the current 11086 * offset to the end (there may be old gunk there). 11087 */ 11088 while (offs < buf->dtb_size) 11089 tomax[offs++] = 0; 11090 11091 /* 11092 * We need to set our offset to zero. And because we 11093 * are wrapping, we need to set the bit indicating as 11094 * much. We can also adjust our needed space back 11095 * down to the space required by the ECB -- we know 11096 * that the top of the buffer is aligned. 11097 */ 11098 offs = 0; 11099 total = needed; 11100 buf->dtb_flags |= DTRACEBUF_WRAPPED; 11101 } else { 11102 /* 11103 * There is room for us in the buffer, so we simply 11104 * need to check the wrapped offset. 11105 */ 11106 if (woffs < offs) { 11107 /* 11108 * The wrapped offset is less than the offset. 11109 * This can happen if we allocated buffer space 11110 * that induced a wrap, but then we didn't 11111 * subsequently take the space due to an error 11112 * or false predicate evaluation. This is 11113 * okay; we know that _this_ allocation isn't 11114 * going to induce a wrap. We still can't 11115 * reset the wrapped offset to be zero, 11116 * however: the space may have been trashed in 11117 * the previous failed probe attempt. But at 11118 * least the wrapped offset doesn't need to 11119 * be adjusted at all... 11120 */ 11121 goto out; 11122 } 11123 } 11124 11125 while (offs + total > woffs) { 11126 dtrace_epid_t epid = *(uint32_t *)(tomax + woffs); 11127 size_t size; 11128 11129 if (epid == DTRACE_EPIDNONE) { 11130 size = sizeof (uint32_t); 11131 } else { 11132 ASSERT3U(epid, <=, state->dts_necbs); 11133 ASSERT(state->dts_ecbs[epid - 1] != NULL); 11134 11135 size = state->dts_ecbs[epid - 1]->dte_size; 11136 } 11137 11138 ASSERT(woffs + size <= buf->dtb_size); 11139 ASSERT(size != 0); 11140 11141 if (woffs + size == buf->dtb_size) { 11142 /* 11143 * We've reached the end of the buffer; we want 11144 * to set the wrapped offset to 0 and break 11145 * out. However, if the offs is 0, then we're 11146 * in a strange edge-condition: the amount of 11147 * space that we want to reserve plus the size 11148 * of the record that we're overwriting is 11149 * greater than the size of the buffer. This 11150 * is problematic because if we reserve the 11151 * space but subsequently don't consume it (due 11152 * to a failed predicate or error) the wrapped 11153 * offset will be 0 -- yet the EPID at offset 0 11154 * will not be committed. This situation is 11155 * relatively easy to deal with: if we're in 11156 * this case, the buffer is indistinguishable 11157 * from one that hasn't wrapped; we need only 11158 * finish the job by clearing the wrapped bit, 11159 * explicitly setting the offset to be 0, and 11160 * zero'ing out the old data in the buffer. 11161 */ 11162 if (offs == 0) { 11163 buf->dtb_flags &= ~DTRACEBUF_WRAPPED; 11164 buf->dtb_offset = 0; 11165 woffs = total; 11166 11167 while (woffs < buf->dtb_size) 11168 tomax[woffs++] = 0; 11169 } 11170 11171 woffs = 0; 11172 break; 11173 } 11174 11175 woffs += size; 11176 } 11177 11178 /* 11179 * We have a wrapped offset. It may be that the wrapped offset 11180 * has become zero -- that's okay. 11181 */ 11182 buf->dtb_xamot_offset = woffs; 11183 } 11184 11185out: 11186 /* 11187 * Now we can plow the buffer with any necessary padding. 11188 */ 11189 while (offs & (align - 1)) { 11190 /* 11191 * Assert that our alignment is off by a number which 11192 * is itself sizeof (uint32_t) aligned. 11193 */ 11194 ASSERT(!((align - (offs & (align - 1))) & 11195 (sizeof (uint32_t) - 1))); 11196 DTRACE_STORE(uint32_t, tomax, offs, DTRACE_EPIDNONE); 11197 offs += sizeof (uint32_t); 11198 } 11199 11200 if (buf->dtb_flags & DTRACEBUF_FILL) { 11201 if (offs + needed > buf->dtb_size - state->dts_reserve) { 11202 buf->dtb_flags |= DTRACEBUF_FULL; 11203 return (-1); 11204 } 11205 } 11206 11207 if (mstate == NULL) 11208 return (offs); 11209 11210 /* 11211 * For ring buffers and fill buffers, the scratch space is always 11212 * the inactive buffer. 11213 */ 11214 mstate->dtms_scratch_base = (uintptr_t)buf->dtb_xamot; 11215 mstate->dtms_scratch_size = buf->dtb_size; 11216 mstate->dtms_scratch_ptr = mstate->dtms_scratch_base; 11217 11218 return (offs); 11219} 11220 11221static void 11222dtrace_buffer_polish(dtrace_buffer_t *buf) 11223{ 11224 ASSERT(buf->dtb_flags & DTRACEBUF_RING); 11225 ASSERT(MUTEX_HELD(&dtrace_lock)); 11226 11227 if (!(buf->dtb_flags & DTRACEBUF_WRAPPED)) 11228 return; 11229 11230 /* 11231 * We need to polish the ring buffer. There are three cases: 11232 * 11233 * - The first (and presumably most common) is that there is no gap 11234 * between the buffer offset and the wrapped offset. In this case, 11235 * there is nothing in the buffer that isn't valid data; we can 11236 * mark the buffer as polished and return. 11237 * 11238 * - The second (less common than the first but still more common 11239 * than the third) is that there is a gap between the buffer offset 11240 * and the wrapped offset, and the wrapped offset is larger than the 11241 * buffer offset. This can happen because of an alignment issue, or 11242 * can happen because of a call to dtrace_buffer_reserve() that 11243 * didn't subsequently consume the buffer space. In this case, 11244 * we need to zero the data from the buffer offset to the wrapped 11245 * offset. 11246 * 11247 * - The third (and least common) is that there is a gap between the 11248 * buffer offset and the wrapped offset, but the wrapped offset is 11249 * _less_ than the buffer offset. This can only happen because a 11250 * call to dtrace_buffer_reserve() induced a wrap, but the space 11251 * was not subsequently consumed. In this case, we need to zero the 11252 * space from the offset to the end of the buffer _and_ from the 11253 * top of the buffer to the wrapped offset. 11254 */ 11255 if (buf->dtb_offset < buf->dtb_xamot_offset) { 11256 bzero(buf->dtb_tomax + buf->dtb_offset, 11257 buf->dtb_xamot_offset - buf->dtb_offset); 11258 } 11259 11260 if (buf->dtb_offset > buf->dtb_xamot_offset) { 11261 bzero(buf->dtb_tomax + buf->dtb_offset, 11262 buf->dtb_size - buf->dtb_offset); 11263 bzero(buf->dtb_tomax, buf->dtb_xamot_offset); 11264 } 11265} 11266 11267/* 11268 * This routine determines if data generated at the specified time has likely 11269 * been entirely consumed at user-level. This routine is called to determine 11270 * if an ECB on a defunct probe (but for an active enabling) can be safely 11271 * disabled and destroyed. 11272 */ 11273static int 11274dtrace_buffer_consumed(dtrace_buffer_t *bufs, hrtime_t when) 11275{ 11276 int i; 11277 11278 for (i = 0; i < NCPU; i++) { 11279 dtrace_buffer_t *buf = &bufs[i]; 11280 11281 if (buf->dtb_size == 0) 11282 continue; 11283 11284 if (buf->dtb_flags & DTRACEBUF_RING) 11285 return (0); 11286 11287 if (!buf->dtb_switched && buf->dtb_offset != 0) 11288 return (0); 11289 11290 if (buf->dtb_switched - buf->dtb_interval < when) 11291 return (0); 11292 } 11293 11294 return (1); 11295} 11296 11297static void 11298dtrace_buffer_free(dtrace_buffer_t *bufs) 11299{ 11300 int i; 11301 11302 for (i = 0; i < NCPU; i++) { 11303 dtrace_buffer_t *buf = &bufs[i]; 11304 11305 if (buf->dtb_tomax == NULL) { 11306 ASSERT(buf->dtb_xamot == NULL); 11307 ASSERT(buf->dtb_size == 0); 11308 continue; 11309 } 11310 11311 if (buf->dtb_xamot != NULL) { 11312 ASSERT(!(buf->dtb_flags & DTRACEBUF_NOSWITCH)); 11313 kmem_free(buf->dtb_xamot, buf->dtb_size); 11314 } 11315 11316 kmem_free(buf->dtb_tomax, buf->dtb_size); 11317 buf->dtb_size = 0; 11318 buf->dtb_tomax = NULL; 11319 buf->dtb_xamot = NULL; 11320 } 11321} 11322 11323/* 11324 * DTrace Enabling Functions 11325 */ 11326static dtrace_enabling_t * 11327dtrace_enabling_create(dtrace_vstate_t *vstate) 11328{ 11329 dtrace_enabling_t *enab; 11330 11331 enab = kmem_zalloc(sizeof (dtrace_enabling_t), KM_SLEEP); 11332 enab->dten_vstate = vstate; 11333 11334 return (enab); 11335} 11336 11337static void 11338dtrace_enabling_add(dtrace_enabling_t *enab, dtrace_ecbdesc_t *ecb) 11339{ 11340 dtrace_ecbdesc_t **ndesc; 11341 size_t osize, nsize; 11342 11343 /* 11344 * We can't add to enablings after we've enabled them, or after we've 11345 * retained them. 11346 */ 11347 ASSERT(enab->dten_probegen == 0); 11348 ASSERT(enab->dten_next == NULL && enab->dten_prev == NULL); 11349 11350 if (enab->dten_ndesc < enab->dten_maxdesc) { 11351 enab->dten_desc[enab->dten_ndesc++] = ecb; 11352 return; 11353 } 11354 11355 osize = enab->dten_maxdesc * sizeof (dtrace_enabling_t *); 11356 11357 if (enab->dten_maxdesc == 0) { 11358 enab->dten_maxdesc = 1; 11359 } else { 11360 enab->dten_maxdesc <<= 1; 11361 } 11362 11363 ASSERT(enab->dten_ndesc < enab->dten_maxdesc); 11364 11365 nsize = enab->dten_maxdesc * sizeof (dtrace_enabling_t *); 11366 ndesc = kmem_zalloc(nsize, KM_SLEEP); 11367 bcopy(enab->dten_desc, ndesc, osize); 11368 if (enab->dten_desc != NULL) 11369 kmem_free(enab->dten_desc, osize); 11370 11371 enab->dten_desc = ndesc; 11372 enab->dten_desc[enab->dten_ndesc++] = ecb; 11373} 11374 11375static void 11376dtrace_enabling_addlike(dtrace_enabling_t *enab, dtrace_ecbdesc_t *ecb, 11377 dtrace_probedesc_t *pd) 11378{ 11379 dtrace_ecbdesc_t *new; 11380 dtrace_predicate_t *pred; 11381 dtrace_actdesc_t *act; 11382 11383 /* 11384 * We're going to create a new ECB description that matches the 11385 * specified ECB in every way, but has the specified probe description. 11386 */ 11387 new = kmem_zalloc(sizeof (dtrace_ecbdesc_t), KM_SLEEP); 11388 11389 if ((pred = ecb->dted_pred.dtpdd_predicate) != NULL) 11390 dtrace_predicate_hold(pred); 11391 11392 for (act = ecb->dted_action; act != NULL; act = act->dtad_next) 11393 dtrace_actdesc_hold(act); 11394 11395 new->dted_action = ecb->dted_action; 11396 new->dted_pred = ecb->dted_pred; 11397 new->dted_probe = *pd; 11398 new->dted_uarg = ecb->dted_uarg; 11399 11400 dtrace_enabling_add(enab, new); 11401} 11402 11403static void 11404dtrace_enabling_dump(dtrace_enabling_t *enab) 11405{ 11406 int i; 11407 11408 for (i = 0; i < enab->dten_ndesc; i++) { 11409 dtrace_probedesc_t *desc = &enab->dten_desc[i]->dted_probe; 11410 11411 cmn_err(CE_NOTE, "enabling probe %d (%s:%s:%s:%s)", i, 11412 desc->dtpd_provider, desc->dtpd_mod, 11413 desc->dtpd_func, desc->dtpd_name); 11414 } 11415} 11416 11417static void 11418dtrace_enabling_destroy(dtrace_enabling_t *enab) 11419{ 11420 int i; 11421 dtrace_ecbdesc_t *ep; 11422 dtrace_vstate_t *vstate = enab->dten_vstate; 11423 11424 ASSERT(MUTEX_HELD(&dtrace_lock)); 11425 11426 for (i = 0; i < enab->dten_ndesc; i++) { 11427 dtrace_actdesc_t *act, *next; 11428 dtrace_predicate_t *pred; 11429 11430 ep = enab->dten_desc[i]; 11431 11432 if ((pred = ep->dted_pred.dtpdd_predicate) != NULL) 11433 dtrace_predicate_release(pred, vstate); 11434 11435 for (act = ep->dted_action; act != NULL; act = next) { 11436 next = act->dtad_next; 11437 dtrace_actdesc_release(act, vstate); 11438 } 11439 11440 kmem_free(ep, sizeof (dtrace_ecbdesc_t)); 11441 } 11442 11443 if (enab->dten_desc != NULL) 11444 kmem_free(enab->dten_desc, 11445 enab->dten_maxdesc * sizeof (dtrace_enabling_t *)); 11446 11447 /* 11448 * If this was a retained enabling, decrement the dts_nretained count 11449 * and take it off of the dtrace_retained list. 11450 */ 11451 if (enab->dten_prev != NULL || enab->dten_next != NULL || 11452 dtrace_retained == enab) { 11453 ASSERT(enab->dten_vstate->dtvs_state != NULL); 11454 ASSERT(enab->dten_vstate->dtvs_state->dts_nretained > 0); 11455 enab->dten_vstate->dtvs_state->dts_nretained--; 11456 } 11457 11458 if (enab->dten_prev == NULL) { 11459 if (dtrace_retained == enab) { 11460 dtrace_retained = enab->dten_next; 11461 11462 if (dtrace_retained != NULL) 11463 dtrace_retained->dten_prev = NULL; 11464 } 11465 } else { 11466 ASSERT(enab != dtrace_retained); 11467 ASSERT(dtrace_retained != NULL); 11468 enab->dten_prev->dten_next = enab->dten_next; 11469 } 11470 11471 if (enab->dten_next != NULL) { 11472 ASSERT(dtrace_retained != NULL); 11473 enab->dten_next->dten_prev = enab->dten_prev; 11474 } 11475 11476 kmem_free(enab, sizeof (dtrace_enabling_t)); 11477} 11478 11479static int 11480dtrace_enabling_retain(dtrace_enabling_t *enab) 11481{ 11482 dtrace_state_t *state; 11483 11484 ASSERT(MUTEX_HELD(&dtrace_lock)); 11485 ASSERT(enab->dten_next == NULL && enab->dten_prev == NULL); 11486 ASSERT(enab->dten_vstate != NULL); 11487 11488 state = enab->dten_vstate->dtvs_state; 11489 ASSERT(state != NULL); 11490 11491 /* 11492 * We only allow each state to retain dtrace_retain_max enablings. 11493 */ 11494 if (state->dts_nretained >= dtrace_retain_max) 11495 return (ENOSPC); 11496 11497 state->dts_nretained++; 11498 11499 if (dtrace_retained == NULL) { 11500 dtrace_retained = enab; 11501 return (0); 11502 } 11503 11504 enab->dten_next = dtrace_retained; 11505 dtrace_retained->dten_prev = enab; 11506 dtrace_retained = enab; 11507 11508 return (0); 11509} 11510 11511static int 11512dtrace_enabling_replicate(dtrace_state_t *state, dtrace_probedesc_t *match, 11513 dtrace_probedesc_t *create) 11514{ 11515 dtrace_enabling_t *new, *enab; 11516 int found = 0, err = ENOENT; 11517 11518 ASSERT(MUTEX_HELD(&dtrace_lock)); 11519 ASSERT(strlen(match->dtpd_provider) < DTRACE_PROVNAMELEN); 11520 ASSERT(strlen(match->dtpd_mod) < DTRACE_MODNAMELEN); 11521 ASSERT(strlen(match->dtpd_func) < DTRACE_FUNCNAMELEN); 11522 ASSERT(strlen(match->dtpd_name) < DTRACE_NAMELEN); 11523 11524 new = dtrace_enabling_create(&state->dts_vstate); 11525 11526 /* 11527 * Iterate over all retained enablings, looking for enablings that 11528 * match the specified state. 11529 */ 11530 for (enab = dtrace_retained; enab != NULL; enab = enab->dten_next) { 11531 int i; 11532 11533 /* 11534 * dtvs_state can only be NULL for helper enablings -- and 11535 * helper enablings can't be retained. 11536 */ 11537 ASSERT(enab->dten_vstate->dtvs_state != NULL); 11538 11539 if (enab->dten_vstate->dtvs_state != state) 11540 continue; 11541 11542 /* 11543 * Now iterate over each probe description; we're looking for 11544 * an exact match to the specified probe description. 11545 */ 11546 for (i = 0; i < enab->dten_ndesc; i++) { 11547 dtrace_ecbdesc_t *ep = enab->dten_desc[i]; 11548 dtrace_probedesc_t *pd = &ep->dted_probe; 11549 11550 if (strcmp(pd->dtpd_provider, match->dtpd_provider)) 11551 continue; 11552 11553 if (strcmp(pd->dtpd_mod, match->dtpd_mod)) 11554 continue; 11555 11556 if (strcmp(pd->dtpd_func, match->dtpd_func)) 11557 continue; 11558 11559 if (strcmp(pd->dtpd_name, match->dtpd_name)) 11560 continue; 11561 11562 /* 11563 * We have a winning probe! Add it to our growing 11564 * enabling. 11565 */ 11566 found = 1; 11567 dtrace_enabling_addlike(new, ep, create); 11568 } 11569 } 11570 11571 if (!found || (err = dtrace_enabling_retain(new)) != 0) { 11572 dtrace_enabling_destroy(new); 11573 return (err); 11574 } 11575 11576 return (0); 11577} 11578 11579static void 11580dtrace_enabling_retract(dtrace_state_t *state) 11581{ 11582 dtrace_enabling_t *enab, *next; 11583 11584 ASSERT(MUTEX_HELD(&dtrace_lock)); 11585 11586 /* 11587 * Iterate over all retained enablings, destroy the enablings retained 11588 * for the specified state. 11589 */ 11590 for (enab = dtrace_retained; enab != NULL; enab = next) { 11591 next = enab->dten_next; 11592 11593 /* 11594 * dtvs_state can only be NULL for helper enablings -- and 11595 * helper enablings can't be retained. 11596 */ 11597 ASSERT(enab->dten_vstate->dtvs_state != NULL); 11598 11599 if (enab->dten_vstate->dtvs_state == state) { 11600 ASSERT(state->dts_nretained > 0); 11601 dtrace_enabling_destroy(enab); 11602 } 11603 } 11604 11605 ASSERT(state->dts_nretained == 0); 11606} 11607 11608static int 11609dtrace_enabling_match(dtrace_enabling_t *enab, int *nmatched) 11610{ 11611 int i = 0; 11612 int matched = 0; 11613 11614 ASSERT(MUTEX_HELD(&cpu_lock)); 11615 ASSERT(MUTEX_HELD(&dtrace_lock)); 11616 11617 for (i = 0; i < enab->dten_ndesc; i++) { 11618 dtrace_ecbdesc_t *ep = enab->dten_desc[i]; 11619 11620 enab->dten_current = ep; 11621 enab->dten_error = 0; 11622 11623 matched += dtrace_probe_enable(&ep->dted_probe, enab); 11624 11625 if (enab->dten_error != 0) { 11626 /* 11627 * If we get an error half-way through enabling the 11628 * probes, we kick out -- perhaps with some number of 11629 * them enabled. Leaving enabled probes enabled may 11630 * be slightly confusing for user-level, but we expect 11631 * that no one will attempt to actually drive on in 11632 * the face of such errors. If this is an anonymous 11633 * enabling (indicated with a NULL nmatched pointer), 11634 * we cmn_err() a message. We aren't expecting to 11635 * get such an error -- such as it can exist at all, 11636 * it would be a result of corrupted DOF in the driver 11637 * properties. 11638 */ 11639 if (nmatched == NULL) { 11640 cmn_err(CE_WARN, "dtrace_enabling_match() " 11641 "error on %p: %d", (void *)ep, 11642 enab->dten_error); 11643 } 11644 11645 return (enab->dten_error); 11646 } 11647 } 11648 11649 enab->dten_probegen = dtrace_probegen; 11650 if (nmatched != NULL) 11651 *nmatched = matched; 11652 11653 return (0); 11654} 11655 11656static void 11657dtrace_enabling_matchall(void) 11658{ 11659 dtrace_enabling_t *enab; 11660 11661 mutex_enter(&cpu_lock); 11662 mutex_enter(&dtrace_lock); 11663 11664 /* 11665 * Iterate over all retained enablings to see if any probes match 11666 * against them. We only perform this operation on enablings for which 11667 * we have sufficient permissions by virtue of being in the global zone 11668 * or in the same zone as the DTrace client. Because we can be called 11669 * after dtrace_detach() has been called, we cannot assert that there 11670 * are retained enablings. We can safely load from dtrace_retained, 11671 * however: the taskq_destroy() at the end of dtrace_detach() will 11672 * block pending our completion. 11673 */ 11674 for (enab = dtrace_retained; enab != NULL; enab = enab->dten_next) { 11675#if defined(sun) 11676 cred_t *cr = enab->dten_vstate->dtvs_state->dts_cred.dcr_cred; 11677 11678 if (INGLOBALZONE(curproc) || getzoneid() == crgetzoneid(cr)) 11679#endif 11680 (void) dtrace_enabling_match(enab, NULL); 11681 } 11682 11683 mutex_exit(&dtrace_lock); 11684 mutex_exit(&cpu_lock); 11685} 11686 11687/* 11688 * If an enabling is to be enabled without having matched probes (that is, if 11689 * dtrace_state_go() is to be called on the underlying dtrace_state_t), the 11690 * enabling must be _primed_ by creating an ECB for every ECB description. 11691 * This must be done to assure that we know the number of speculations, the 11692 * number of aggregations, the minimum buffer size needed, etc. before we 11693 * transition out of DTRACE_ACTIVITY_INACTIVE. To do this without actually 11694 * enabling any probes, we create ECBs for every ECB decription, but with a 11695 * NULL probe -- which is exactly what this function does. 11696 */ 11697static void 11698dtrace_enabling_prime(dtrace_state_t *state) 11699{ 11700 dtrace_enabling_t *enab; 11701 int i; 11702 11703 for (enab = dtrace_retained; enab != NULL; enab = enab->dten_next) { 11704 ASSERT(enab->dten_vstate->dtvs_state != NULL); 11705 11706 if (enab->dten_vstate->dtvs_state != state) 11707 continue; 11708 11709 /* 11710 * We don't want to prime an enabling more than once, lest 11711 * we allow a malicious user to induce resource exhaustion. 11712 * (The ECBs that result from priming an enabling aren't 11713 * leaked -- but they also aren't deallocated until the 11714 * consumer state is destroyed.) 11715 */ 11716 if (enab->dten_primed) 11717 continue; 11718 11719 for (i = 0; i < enab->dten_ndesc; i++) { 11720 enab->dten_current = enab->dten_desc[i]; 11721 (void) dtrace_probe_enable(NULL, enab); 11722 } 11723 11724 enab->dten_primed = 1; 11725 } 11726} 11727 11728/* 11729 * Called to indicate that probes should be provided due to retained 11730 * enablings. This is implemented in terms of dtrace_probe_provide(), but it 11731 * must take an initial lap through the enabling calling the dtps_provide() 11732 * entry point explicitly to allow for autocreated probes. 11733 */ 11734static void 11735dtrace_enabling_provide(dtrace_provider_t *prv) 11736{ 11737 int i, all = 0; 11738 dtrace_probedesc_t desc; 11739 11740 ASSERT(MUTEX_HELD(&dtrace_lock)); 11741 ASSERT(MUTEX_HELD(&dtrace_provider_lock)); 11742 11743 if (prv == NULL) { 11744 all = 1; 11745 prv = dtrace_provider; 11746 } 11747 11748 do { 11749 dtrace_enabling_t *enab = dtrace_retained; 11750 void *parg = prv->dtpv_arg; 11751 11752 for (; enab != NULL; enab = enab->dten_next) { 11753 for (i = 0; i < enab->dten_ndesc; i++) { 11754 desc = enab->dten_desc[i]->dted_probe; 11755 mutex_exit(&dtrace_lock); 11756 prv->dtpv_pops.dtps_provide(parg, &desc); 11757 mutex_enter(&dtrace_lock); 11758 } 11759 } 11760 } while (all && (prv = prv->dtpv_next) != NULL); 11761 11762 mutex_exit(&dtrace_lock); 11763 dtrace_probe_provide(NULL, all ? NULL : prv); 11764 mutex_enter(&dtrace_lock); 11765} 11766 11767/* 11768 * Called to reap ECBs that are attached to probes from defunct providers. 11769 */ 11770static void 11771dtrace_enabling_reap(void) 11772{ 11773 dtrace_provider_t *prov; 11774 dtrace_probe_t *probe; 11775 dtrace_ecb_t *ecb; 11776 hrtime_t when; 11777 int i; 11778 11779 mutex_enter(&cpu_lock); 11780 mutex_enter(&dtrace_lock); 11781 11782 for (i = 0; i < dtrace_nprobes; i++) { 11783 if ((probe = dtrace_probes[i]) == NULL) 11784 continue; 11785 11786 if (probe->dtpr_ecb == NULL) 11787 continue; 11788 11789 prov = probe->dtpr_provider; 11790 11791 if ((when = prov->dtpv_defunct) == 0) 11792 continue; 11793 11794 /* 11795 * We have ECBs on a defunct provider: we want to reap these 11796 * ECBs to allow the provider to unregister. The destruction 11797 * of these ECBs must be done carefully: if we destroy the ECB 11798 * and the consumer later wishes to consume an EPID that 11799 * corresponds to the destroyed ECB (and if the EPID metadata 11800 * has not been previously consumed), the consumer will abort 11801 * processing on the unknown EPID. To reduce (but not, sadly, 11802 * eliminate) the possibility of this, we will only destroy an 11803 * ECB for a defunct provider if, for the state that 11804 * corresponds to the ECB: 11805 * 11806 * (a) There is no speculative tracing (which can effectively 11807 * cache an EPID for an arbitrary amount of time). 11808 * 11809 * (b) The principal buffers have been switched twice since the 11810 * provider became defunct. 11811 * 11812 * (c) The aggregation buffers are of zero size or have been 11813 * switched twice since the provider became defunct. 11814 * 11815 * We use dts_speculates to determine (a) and call a function 11816 * (dtrace_buffer_consumed()) to determine (b) and (c). Note 11817 * that as soon as we've been unable to destroy one of the ECBs 11818 * associated with the probe, we quit trying -- reaping is only 11819 * fruitful in as much as we can destroy all ECBs associated 11820 * with the defunct provider's probes. 11821 */ 11822 while ((ecb = probe->dtpr_ecb) != NULL) { 11823 dtrace_state_t *state = ecb->dte_state; 11824 dtrace_buffer_t *buf = state->dts_buffer; 11825 dtrace_buffer_t *aggbuf = state->dts_aggbuffer; 11826 11827 if (state->dts_speculates) 11828 break; 11829 11830 if (!dtrace_buffer_consumed(buf, when)) 11831 break; 11832 11833 if (!dtrace_buffer_consumed(aggbuf, when)) 11834 break; 11835 11836 dtrace_ecb_disable(ecb); 11837 ASSERT(probe->dtpr_ecb != ecb); 11838 dtrace_ecb_destroy(ecb); 11839 } 11840 } 11841 11842 mutex_exit(&dtrace_lock); 11843 mutex_exit(&cpu_lock); 11844} 11845 11846/* 11847 * DTrace DOF Functions 11848 */ 11849/*ARGSUSED*/ 11850static void 11851dtrace_dof_error(dof_hdr_t *dof, const char *str) 11852{ 11853 if (dtrace_err_verbose) 11854 cmn_err(CE_WARN, "failed to process DOF: %s", str); 11855 11856#ifdef DTRACE_ERRDEBUG 11857 dtrace_errdebug(str); 11858#endif 11859} 11860 11861/* 11862 * Create DOF out of a currently enabled state. Right now, we only create 11863 * DOF containing the run-time options -- but this could be expanded to create 11864 * complete DOF representing the enabled state. 11865 */ 11866static dof_hdr_t * 11867dtrace_dof_create(dtrace_state_t *state) 11868{ 11869 dof_hdr_t *dof; 11870 dof_sec_t *sec; 11871 dof_optdesc_t *opt; 11872 int i, len = sizeof (dof_hdr_t) + 11873 roundup(sizeof (dof_sec_t), sizeof (uint64_t)) + 11874 sizeof (dof_optdesc_t) * DTRACEOPT_MAX; 11875 11876 ASSERT(MUTEX_HELD(&dtrace_lock)); 11877 11878 dof = kmem_zalloc(len, KM_SLEEP); 11879 dof->dofh_ident[DOF_ID_MAG0] = DOF_MAG_MAG0; 11880 dof->dofh_ident[DOF_ID_MAG1] = DOF_MAG_MAG1; 11881 dof->dofh_ident[DOF_ID_MAG2] = DOF_MAG_MAG2; 11882 dof->dofh_ident[DOF_ID_MAG3] = DOF_MAG_MAG3; 11883 11884 dof->dofh_ident[DOF_ID_MODEL] = DOF_MODEL_NATIVE; 11885 dof->dofh_ident[DOF_ID_ENCODING] = DOF_ENCODE_NATIVE; 11886 dof->dofh_ident[DOF_ID_VERSION] = DOF_VERSION; 11887 dof->dofh_ident[DOF_ID_DIFVERS] = DIF_VERSION; 11888 dof->dofh_ident[DOF_ID_DIFIREG] = DIF_DIR_NREGS; 11889 dof->dofh_ident[DOF_ID_DIFTREG] = DIF_DTR_NREGS; 11890 11891 dof->dofh_flags = 0; 11892 dof->dofh_hdrsize = sizeof (dof_hdr_t); 11893 dof->dofh_secsize = sizeof (dof_sec_t); 11894 dof->dofh_secnum = 1; /* only DOF_SECT_OPTDESC */ 11895 dof->dofh_secoff = sizeof (dof_hdr_t); 11896 dof->dofh_loadsz = len; 11897 dof->dofh_filesz = len; 11898 dof->dofh_pad = 0; 11899 11900 /* 11901 * Fill in the option section header... 11902 */ 11903 sec = (dof_sec_t *)((uintptr_t)dof + sizeof (dof_hdr_t)); 11904 sec->dofs_type = DOF_SECT_OPTDESC; 11905 sec->dofs_align = sizeof (uint64_t); 11906 sec->dofs_flags = DOF_SECF_LOAD; 11907 sec->dofs_entsize = sizeof (dof_optdesc_t); 11908 11909 opt = (dof_optdesc_t *)((uintptr_t)sec + 11910 roundup(sizeof (dof_sec_t), sizeof (uint64_t))); 11911 11912 sec->dofs_offset = (uintptr_t)opt - (uintptr_t)dof; 11913 sec->dofs_size = sizeof (dof_optdesc_t) * DTRACEOPT_MAX; 11914 11915 for (i = 0; i < DTRACEOPT_MAX; i++) { 11916 opt[i].dofo_option = i; 11917 opt[i].dofo_strtab = DOF_SECIDX_NONE; 11918 opt[i].dofo_value = state->dts_options[i]; 11919 } 11920 11921 return (dof); 11922} 11923 11924static dof_hdr_t * 11925dtrace_dof_copyin(uintptr_t uarg, int *errp) 11926{ 11927 dof_hdr_t hdr, *dof; 11928 11929 ASSERT(!MUTEX_HELD(&dtrace_lock)); 11930 11931 /* 11932 * First, we're going to copyin() the sizeof (dof_hdr_t). 11933 */ 11934 if (copyin((void *)uarg, &hdr, sizeof (hdr)) != 0) { 11935 dtrace_dof_error(NULL, "failed to copyin DOF header"); 11936 *errp = EFAULT; 11937 return (NULL); 11938 } 11939 11940 /* 11941 * Now we'll allocate the entire DOF and copy it in -- provided 11942 * that the length isn't outrageous. 11943 */ 11944 if (hdr.dofh_loadsz >= dtrace_dof_maxsize) { 11945 dtrace_dof_error(&hdr, "load size exceeds maximum"); 11946 *errp = E2BIG; 11947 return (NULL); 11948 } 11949 11950 if (hdr.dofh_loadsz < sizeof (hdr)) { 11951 dtrace_dof_error(&hdr, "invalid load size"); 11952 *errp = EINVAL; 11953 return (NULL); 11954 } 11955 11956 dof = kmem_alloc(hdr.dofh_loadsz, KM_SLEEP); 11957 11958 if (copyin((void *)uarg, dof, hdr.dofh_loadsz) != 0) { 11959 kmem_free(dof, hdr.dofh_loadsz); 11960 *errp = EFAULT; 11961 return (NULL); 11962 } 11963 11964 return (dof); 11965} 11966 11967#if !defined(sun) 11968static __inline uchar_t 11969dtrace_dof_char(char c) { 11970 switch (c) { 11971 case '0': 11972 case '1': 11973 case '2': 11974 case '3': 11975 case '4': 11976 case '5': 11977 case '6': 11978 case '7': 11979 case '8': 11980 case '9': 11981 return (c - '0'); 11982 case 'A': 11983 case 'B': 11984 case 'C': 11985 case 'D': 11986 case 'E': 11987 case 'F': 11988 return (c - 'A' + 10); 11989 case 'a': 11990 case 'b': 11991 case 'c': 11992 case 'd': 11993 case 'e': 11994 case 'f': 11995 return (c - 'a' + 10); 11996 } 11997 /* Should not reach here. */ 11998 return (0); 11999} 12000#endif 12001 12002static dof_hdr_t * 12003dtrace_dof_property(const char *name) 12004{ 12005 uchar_t *buf; 12006 uint64_t loadsz; 12007 unsigned int len, i; 12008 dof_hdr_t *dof; 12009 12010#if defined(sun) 12011 /* 12012 * Unfortunately, array of values in .conf files are always (and 12013 * only) interpreted to be integer arrays. We must read our DOF 12014 * as an integer array, and then squeeze it into a byte array. 12015 */ 12016 if (ddi_prop_lookup_int_array(DDI_DEV_T_ANY, dtrace_devi, 0, 12017 (char *)name, (int **)&buf, &len) != DDI_PROP_SUCCESS) 12018 return (NULL); 12019 12020 for (i = 0; i < len; i++) 12021 buf[i] = (uchar_t)(((int *)buf)[i]); 12022 12023 if (len < sizeof (dof_hdr_t)) { 12024 ddi_prop_free(buf); 12025 dtrace_dof_error(NULL, "truncated header"); 12026 return (NULL); 12027 } 12028 12029 if (len < (loadsz = ((dof_hdr_t *)buf)->dofh_loadsz)) { 12030 ddi_prop_free(buf); 12031 dtrace_dof_error(NULL, "truncated DOF"); 12032 return (NULL); 12033 } 12034 12035 if (loadsz >= dtrace_dof_maxsize) { 12036 ddi_prop_free(buf); 12037 dtrace_dof_error(NULL, "oversized DOF"); 12038 return (NULL); 12039 } 12040 12041 dof = kmem_alloc(loadsz, KM_SLEEP); 12042 bcopy(buf, dof, loadsz); 12043 ddi_prop_free(buf); 12044#else 12045 char *p; 12046 char *p_env; 12047 12048 if ((p_env = getenv(name)) == NULL) 12049 return (NULL); 12050 12051 len = strlen(p_env) / 2; 12052 12053 buf = kmem_alloc(len, KM_SLEEP); 12054 12055 dof = (dof_hdr_t *) buf; 12056 12057 p = p_env; 12058 12059 for (i = 0; i < len; i++) { 12060 buf[i] = (dtrace_dof_char(p[0]) << 4) | 12061 dtrace_dof_char(p[1]); 12062 p += 2; 12063 } 12064 12065 freeenv(p_env); 12066 12067 if (len < sizeof (dof_hdr_t)) { 12068 kmem_free(buf, 0); 12069 dtrace_dof_error(NULL, "truncated header"); 12070 return (NULL); 12071 } 12072 12073 if (len < (loadsz = dof->dofh_loadsz)) { 12074 kmem_free(buf, 0); 12075 dtrace_dof_error(NULL, "truncated DOF"); 12076 return (NULL); 12077 } 12078 12079 if (loadsz >= dtrace_dof_maxsize) { 12080 kmem_free(buf, 0); 12081 dtrace_dof_error(NULL, "oversized DOF"); 12082 return (NULL); 12083 } 12084#endif 12085 12086 return (dof); 12087} 12088 12089static void 12090dtrace_dof_destroy(dof_hdr_t *dof) 12091{ 12092 kmem_free(dof, dof->dofh_loadsz); 12093} 12094 12095/* 12096 * Return the dof_sec_t pointer corresponding to a given section index. If the 12097 * index is not valid, dtrace_dof_error() is called and NULL is returned. If 12098 * a type other than DOF_SECT_NONE is specified, the header is checked against 12099 * this type and NULL is returned if the types do not match. 12100 */ 12101static dof_sec_t * 12102dtrace_dof_sect(dof_hdr_t *dof, uint32_t type, dof_secidx_t i) 12103{ 12104 dof_sec_t *sec = (dof_sec_t *)(uintptr_t) 12105 ((uintptr_t)dof + dof->dofh_secoff + i * dof->dofh_secsize); 12106 12107 if (i >= dof->dofh_secnum) { 12108 dtrace_dof_error(dof, "referenced section index is invalid"); 12109 return (NULL); 12110 } 12111 12112 if (!(sec->dofs_flags & DOF_SECF_LOAD)) { 12113 dtrace_dof_error(dof, "referenced section is not loadable"); 12114 return (NULL); 12115 } 12116 12117 if (type != DOF_SECT_NONE && type != sec->dofs_type) { 12118 dtrace_dof_error(dof, "referenced section is the wrong type"); 12119 return (NULL); 12120 } 12121 12122 return (sec); 12123} 12124 12125static dtrace_probedesc_t * 12126dtrace_dof_probedesc(dof_hdr_t *dof, dof_sec_t *sec, dtrace_probedesc_t *desc) 12127{ 12128 dof_probedesc_t *probe; 12129 dof_sec_t *strtab; 12130 uintptr_t daddr = (uintptr_t)dof; 12131 uintptr_t str; 12132 size_t size; 12133 12134 if (sec->dofs_type != DOF_SECT_PROBEDESC) { 12135 dtrace_dof_error(dof, "invalid probe section"); 12136 return (NULL); 12137 } 12138 12139 if (sec->dofs_align != sizeof (dof_secidx_t)) { 12140 dtrace_dof_error(dof, "bad alignment in probe description"); 12141 return (NULL); 12142 } 12143 12144 if (sec->dofs_offset + sizeof (dof_probedesc_t) > dof->dofh_loadsz) { 12145 dtrace_dof_error(dof, "truncated probe description"); 12146 return (NULL); 12147 } 12148 12149 probe = (dof_probedesc_t *)(uintptr_t)(daddr + sec->dofs_offset); 12150 strtab = dtrace_dof_sect(dof, DOF_SECT_STRTAB, probe->dofp_strtab); 12151 12152 if (strtab == NULL) 12153 return (NULL); 12154 12155 str = daddr + strtab->dofs_offset; 12156 size = strtab->dofs_size; 12157 12158 if (probe->dofp_provider >= strtab->dofs_size) { 12159 dtrace_dof_error(dof, "corrupt probe provider"); 12160 return (NULL); 12161 } 12162 12163 (void) strncpy(desc->dtpd_provider, 12164 (char *)(str + probe->dofp_provider), 12165 MIN(DTRACE_PROVNAMELEN - 1, size - probe->dofp_provider)); 12166 12167 if (probe->dofp_mod >= strtab->dofs_size) { 12168 dtrace_dof_error(dof, "corrupt probe module"); 12169 return (NULL); 12170 } 12171 12172 (void) strncpy(desc->dtpd_mod, (char *)(str + probe->dofp_mod), 12173 MIN(DTRACE_MODNAMELEN - 1, size - probe->dofp_mod)); 12174 12175 if (probe->dofp_func >= strtab->dofs_size) { 12176 dtrace_dof_error(dof, "corrupt probe function"); 12177 return (NULL); 12178 } 12179 12180 (void) strncpy(desc->dtpd_func, (char *)(str + probe->dofp_func), 12181 MIN(DTRACE_FUNCNAMELEN - 1, size - probe->dofp_func)); 12182 12183 if (probe->dofp_name >= strtab->dofs_size) { 12184 dtrace_dof_error(dof, "corrupt probe name"); 12185 return (NULL); 12186 } 12187 12188 (void) strncpy(desc->dtpd_name, (char *)(str + probe->dofp_name), 12189 MIN(DTRACE_NAMELEN - 1, size - probe->dofp_name)); 12190 12191 return (desc); 12192} 12193 12194static dtrace_difo_t * 12195dtrace_dof_difo(dof_hdr_t *dof, dof_sec_t *sec, dtrace_vstate_t *vstate, 12196 cred_t *cr) 12197{ 12198 dtrace_difo_t *dp; 12199 size_t ttl = 0; 12200 dof_difohdr_t *dofd; 12201 uintptr_t daddr = (uintptr_t)dof; 12202 size_t max = dtrace_difo_maxsize; 12203 int i, l, n; 12204 12205 static const struct { 12206 int section; 12207 int bufoffs; 12208 int lenoffs; 12209 int entsize; 12210 int align; 12211 const char *msg; 12212 } difo[] = { 12213 { DOF_SECT_DIF, offsetof(dtrace_difo_t, dtdo_buf), 12214 offsetof(dtrace_difo_t, dtdo_len), sizeof (dif_instr_t), 12215 sizeof (dif_instr_t), "multiple DIF sections" }, 12216 12217 { DOF_SECT_INTTAB, offsetof(dtrace_difo_t, dtdo_inttab), 12218 offsetof(dtrace_difo_t, dtdo_intlen), sizeof (uint64_t), 12219 sizeof (uint64_t), "multiple integer tables" }, 12220 12221 { DOF_SECT_STRTAB, offsetof(dtrace_difo_t, dtdo_strtab), 12222 offsetof(dtrace_difo_t, dtdo_strlen), 0, 12223 sizeof (char), "multiple string tables" }, 12224 12225 { DOF_SECT_VARTAB, offsetof(dtrace_difo_t, dtdo_vartab), 12226 offsetof(dtrace_difo_t, dtdo_varlen), sizeof (dtrace_difv_t), 12227 sizeof (uint_t), "multiple variable tables" }, 12228 12229 { DOF_SECT_NONE, 0, 0, 0, 0, NULL } 12230 }; 12231 12232 if (sec->dofs_type != DOF_SECT_DIFOHDR) { 12233 dtrace_dof_error(dof, "invalid DIFO header section"); 12234 return (NULL); 12235 } 12236 12237 if (sec->dofs_align != sizeof (dof_secidx_t)) { 12238 dtrace_dof_error(dof, "bad alignment in DIFO header"); 12239 return (NULL); 12240 } 12241 12242 if (sec->dofs_size < sizeof (dof_difohdr_t) || 12243 sec->dofs_size % sizeof (dof_secidx_t)) { 12244 dtrace_dof_error(dof, "bad size in DIFO header"); 12245 return (NULL); 12246 } 12247 12248 dofd = (dof_difohdr_t *)(uintptr_t)(daddr + sec->dofs_offset); 12249 n = (sec->dofs_size - sizeof (*dofd)) / sizeof (dof_secidx_t) + 1; 12250 12251 dp = kmem_zalloc(sizeof (dtrace_difo_t), KM_SLEEP); 12252 dp->dtdo_rtype = dofd->dofd_rtype; 12253 12254 for (l = 0; l < n; l++) { 12255 dof_sec_t *subsec; 12256 void **bufp; 12257 uint32_t *lenp; 12258 12259 if ((subsec = dtrace_dof_sect(dof, DOF_SECT_NONE, 12260 dofd->dofd_links[l])) == NULL) 12261 goto err; /* invalid section link */ 12262 12263 if (ttl + subsec->dofs_size > max) { 12264 dtrace_dof_error(dof, "exceeds maximum size"); 12265 goto err; 12266 } 12267 12268 ttl += subsec->dofs_size; 12269 12270 for (i = 0; difo[i].section != DOF_SECT_NONE; i++) { 12271 if (subsec->dofs_type != difo[i].section) 12272 continue; 12273 12274 if (!(subsec->dofs_flags & DOF_SECF_LOAD)) { 12275 dtrace_dof_error(dof, "section not loaded"); 12276 goto err; 12277 } 12278 12279 if (subsec->dofs_align != difo[i].align) { 12280 dtrace_dof_error(dof, "bad alignment"); 12281 goto err; 12282 } 12283 12284 bufp = (void **)((uintptr_t)dp + difo[i].bufoffs); 12285 lenp = (uint32_t *)((uintptr_t)dp + difo[i].lenoffs); 12286 12287 if (*bufp != NULL) { 12288 dtrace_dof_error(dof, difo[i].msg); 12289 goto err; 12290 } 12291 12292 if (difo[i].entsize != subsec->dofs_entsize) { 12293 dtrace_dof_error(dof, "entry size mismatch"); 12294 goto err; 12295 } 12296 12297 if (subsec->dofs_entsize != 0 && 12298 (subsec->dofs_size % subsec->dofs_entsize) != 0) { 12299 dtrace_dof_error(dof, "corrupt entry size"); 12300 goto err; 12301 } 12302 12303 *lenp = subsec->dofs_size; 12304 *bufp = kmem_alloc(subsec->dofs_size, KM_SLEEP); 12305 bcopy((char *)(uintptr_t)(daddr + subsec->dofs_offset), 12306 *bufp, subsec->dofs_size); 12307 12308 if (subsec->dofs_entsize != 0) 12309 *lenp /= subsec->dofs_entsize; 12310 12311 break; 12312 } 12313 12314 /* 12315 * If we encounter a loadable DIFO sub-section that is not 12316 * known to us, assume this is a broken program and fail. 12317 */ 12318 if (difo[i].section == DOF_SECT_NONE && 12319 (subsec->dofs_flags & DOF_SECF_LOAD)) { 12320 dtrace_dof_error(dof, "unrecognized DIFO subsection"); 12321 goto err; 12322 } 12323 } 12324 12325 if (dp->dtdo_buf == NULL) { 12326 /* 12327 * We can't have a DIF object without DIF text. 12328 */ 12329 dtrace_dof_error(dof, "missing DIF text"); 12330 goto err; 12331 } 12332 12333 /* 12334 * Before we validate the DIF object, run through the variable table 12335 * looking for the strings -- if any of their size are under, we'll set 12336 * their size to be the system-wide default string size. Note that 12337 * this should _not_ happen if the "strsize" option has been set -- 12338 * in this case, the compiler should have set the size to reflect the 12339 * setting of the option. 12340 */ 12341 for (i = 0; i < dp->dtdo_varlen; i++) { 12342 dtrace_difv_t *v = &dp->dtdo_vartab[i]; 12343 dtrace_diftype_t *t = &v->dtdv_type; 12344 12345 if (v->dtdv_id < DIF_VAR_OTHER_UBASE) 12346 continue; 12347 12348 if (t->dtdt_kind == DIF_TYPE_STRING && t->dtdt_size == 0) 12349 t->dtdt_size = dtrace_strsize_default; 12350 } 12351 12352 if (dtrace_difo_validate(dp, vstate, DIF_DIR_NREGS, cr) != 0) 12353 goto err; 12354 12355 dtrace_difo_init(dp, vstate); 12356 return (dp); 12357 12358err: 12359 kmem_free(dp->dtdo_buf, dp->dtdo_len * sizeof (dif_instr_t)); 12360 kmem_free(dp->dtdo_inttab, dp->dtdo_intlen * sizeof (uint64_t)); 12361 kmem_free(dp->dtdo_strtab, dp->dtdo_strlen); 12362 kmem_free(dp->dtdo_vartab, dp->dtdo_varlen * sizeof (dtrace_difv_t)); 12363 12364 kmem_free(dp, sizeof (dtrace_difo_t)); 12365 return (NULL); 12366} 12367 12368static dtrace_predicate_t * 12369dtrace_dof_predicate(dof_hdr_t *dof, dof_sec_t *sec, dtrace_vstate_t *vstate, 12370 cred_t *cr) 12371{ 12372 dtrace_difo_t *dp; 12373 12374 if ((dp = dtrace_dof_difo(dof, sec, vstate, cr)) == NULL) 12375 return (NULL); 12376 12377 return (dtrace_predicate_create(dp)); 12378} 12379 12380static dtrace_actdesc_t * 12381dtrace_dof_actdesc(dof_hdr_t *dof, dof_sec_t *sec, dtrace_vstate_t *vstate, 12382 cred_t *cr) 12383{ 12384 dtrace_actdesc_t *act, *first = NULL, *last = NULL, *next; 12385 dof_actdesc_t *desc; 12386 dof_sec_t *difosec; 12387 size_t offs; 12388 uintptr_t daddr = (uintptr_t)dof; 12389 uint64_t arg; 12390 dtrace_actkind_t kind; 12391 12392 if (sec->dofs_type != DOF_SECT_ACTDESC) { 12393 dtrace_dof_error(dof, "invalid action section"); 12394 return (NULL); 12395 } 12396 12397 if (sec->dofs_offset + sizeof (dof_actdesc_t) > dof->dofh_loadsz) { 12398 dtrace_dof_error(dof, "truncated action description"); 12399 return (NULL); 12400 } 12401 12402 if (sec->dofs_align != sizeof (uint64_t)) { 12403 dtrace_dof_error(dof, "bad alignment in action description"); 12404 return (NULL); 12405 } 12406 12407 if (sec->dofs_size < sec->dofs_entsize) { 12408 dtrace_dof_error(dof, "section entry size exceeds total size"); 12409 return (NULL); 12410 } 12411 12412 if (sec->dofs_entsize != sizeof (dof_actdesc_t)) { 12413 dtrace_dof_error(dof, "bad entry size in action description"); 12414 return (NULL); 12415 } 12416 12417 if (sec->dofs_size / sec->dofs_entsize > dtrace_actions_max) { 12418 dtrace_dof_error(dof, "actions exceed dtrace_actions_max"); 12419 return (NULL); 12420 } 12421 12422 for (offs = 0; offs < sec->dofs_size; offs += sec->dofs_entsize) { 12423 desc = (dof_actdesc_t *)(daddr + 12424 (uintptr_t)sec->dofs_offset + offs); 12425 kind = (dtrace_actkind_t)desc->dofa_kind; 12426 12427 if ((DTRACEACT_ISPRINTFLIKE(kind) && 12428 (kind != DTRACEACT_PRINTA || 12429 desc->dofa_strtab != DOF_SECIDX_NONE)) || 12430 (kind == DTRACEACT_DIFEXPR && 12431 desc->dofa_strtab != DOF_SECIDX_NONE)) { 12432 dof_sec_t *strtab; 12433 char *str, *fmt; 12434 uint64_t i; 12435 12436 /* 12437 * The argument to these actions is an index into the 12438 * DOF string table. For printf()-like actions, this 12439 * is the format string. For print(), this is the 12440 * CTF type of the expression result. 12441 */ 12442 if ((strtab = dtrace_dof_sect(dof, 12443 DOF_SECT_STRTAB, desc->dofa_strtab)) == NULL) 12444 goto err; 12445 12446 str = (char *)((uintptr_t)dof + 12447 (uintptr_t)strtab->dofs_offset); 12448 12449 for (i = desc->dofa_arg; i < strtab->dofs_size; i++) { 12450 if (str[i] == '\0') 12451 break; 12452 } 12453 12454 if (i >= strtab->dofs_size) { 12455 dtrace_dof_error(dof, "bogus format string"); 12456 goto err; 12457 } 12458 12459 if (i == desc->dofa_arg) { 12460 dtrace_dof_error(dof, "empty format string"); 12461 goto err; 12462 } 12463 12464 i -= desc->dofa_arg; 12465 fmt = kmem_alloc(i + 1, KM_SLEEP); 12466 bcopy(&str[desc->dofa_arg], fmt, i + 1); 12467 arg = (uint64_t)(uintptr_t)fmt; 12468 } else { 12469 if (kind == DTRACEACT_PRINTA) { 12470 ASSERT(desc->dofa_strtab == DOF_SECIDX_NONE); 12471 arg = 0; 12472 } else { 12473 arg = desc->dofa_arg; 12474 } 12475 } 12476 12477 act = dtrace_actdesc_create(kind, desc->dofa_ntuple, 12478 desc->dofa_uarg, arg); 12479 12480 if (last != NULL) { 12481 last->dtad_next = act; 12482 } else { 12483 first = act; 12484 } 12485 12486 last = act; 12487 12488 if (desc->dofa_difo == DOF_SECIDX_NONE) 12489 continue; 12490 12491 if ((difosec = dtrace_dof_sect(dof, 12492 DOF_SECT_DIFOHDR, desc->dofa_difo)) == NULL) 12493 goto err; 12494 12495 act->dtad_difo = dtrace_dof_difo(dof, difosec, vstate, cr); 12496 12497 if (act->dtad_difo == NULL) 12498 goto err; 12499 } 12500 12501 ASSERT(first != NULL); 12502 return (first); 12503 12504err: 12505 for (act = first; act != NULL; act = next) { 12506 next = act->dtad_next; 12507 dtrace_actdesc_release(act, vstate); 12508 } 12509 12510 return (NULL); 12511} 12512 12513static dtrace_ecbdesc_t * 12514dtrace_dof_ecbdesc(dof_hdr_t *dof, dof_sec_t *sec, dtrace_vstate_t *vstate, 12515 cred_t *cr) 12516{ 12517 dtrace_ecbdesc_t *ep; 12518 dof_ecbdesc_t *ecb; 12519 dtrace_probedesc_t *desc; 12520 dtrace_predicate_t *pred = NULL; 12521 12522 if (sec->dofs_size < sizeof (dof_ecbdesc_t)) { 12523 dtrace_dof_error(dof, "truncated ECB description"); 12524 return (NULL); 12525 } 12526 12527 if (sec->dofs_align != sizeof (uint64_t)) { 12528 dtrace_dof_error(dof, "bad alignment in ECB description"); 12529 return (NULL); 12530 } 12531 12532 ecb = (dof_ecbdesc_t *)((uintptr_t)dof + (uintptr_t)sec->dofs_offset); 12533 sec = dtrace_dof_sect(dof, DOF_SECT_PROBEDESC, ecb->dofe_probes); 12534 12535 if (sec == NULL) 12536 return (NULL); 12537 12538 ep = kmem_zalloc(sizeof (dtrace_ecbdesc_t), KM_SLEEP); 12539 ep->dted_uarg = ecb->dofe_uarg; 12540 desc = &ep->dted_probe; 12541 12542 if (dtrace_dof_probedesc(dof, sec, desc) == NULL) 12543 goto err; 12544 12545 if (ecb->dofe_pred != DOF_SECIDX_NONE) { 12546 if ((sec = dtrace_dof_sect(dof, 12547 DOF_SECT_DIFOHDR, ecb->dofe_pred)) == NULL) 12548 goto err; 12549 12550 if ((pred = dtrace_dof_predicate(dof, sec, vstate, cr)) == NULL) 12551 goto err; 12552 12553 ep->dted_pred.dtpdd_predicate = pred; 12554 } 12555 12556 if (ecb->dofe_actions != DOF_SECIDX_NONE) { 12557 if ((sec = dtrace_dof_sect(dof, 12558 DOF_SECT_ACTDESC, ecb->dofe_actions)) == NULL) 12559 goto err; 12560 12561 ep->dted_action = dtrace_dof_actdesc(dof, sec, vstate, cr); 12562 12563 if (ep->dted_action == NULL) 12564 goto err; 12565 } 12566 12567 return (ep); 12568 12569err: 12570 if (pred != NULL) 12571 dtrace_predicate_release(pred, vstate); 12572 kmem_free(ep, sizeof (dtrace_ecbdesc_t)); 12573 return (NULL); 12574} 12575 12576/* 12577 * Apply the relocations from the specified 'sec' (a DOF_SECT_URELHDR) to the 12578 * specified DOF. At present, this amounts to simply adding 'ubase' to the 12579 * site of any user SETX relocations to account for load object base address. 12580 * In the future, if we need other relocations, this function can be extended. 12581 */ 12582static int 12583dtrace_dof_relocate(dof_hdr_t *dof, dof_sec_t *sec, uint64_t ubase) 12584{ 12585 uintptr_t daddr = (uintptr_t)dof; 12586 dof_relohdr_t *dofr = 12587 (dof_relohdr_t *)(uintptr_t)(daddr + sec->dofs_offset); 12588 dof_sec_t *ss, *rs, *ts; 12589 dof_relodesc_t *r; 12590 uint_t i, n; 12591 12592 if (sec->dofs_size < sizeof (dof_relohdr_t) || 12593 sec->dofs_align != sizeof (dof_secidx_t)) { 12594 dtrace_dof_error(dof, "invalid relocation header"); 12595 return (-1); 12596 } 12597 12598 ss = dtrace_dof_sect(dof, DOF_SECT_STRTAB, dofr->dofr_strtab); 12599 rs = dtrace_dof_sect(dof, DOF_SECT_RELTAB, dofr->dofr_relsec); 12600 ts = dtrace_dof_sect(dof, DOF_SECT_NONE, dofr->dofr_tgtsec); 12601 12602 if (ss == NULL || rs == NULL || ts == NULL) 12603 return (-1); /* dtrace_dof_error() has been called already */ 12604 12605 if (rs->dofs_entsize < sizeof (dof_relodesc_t) || 12606 rs->dofs_align != sizeof (uint64_t)) { 12607 dtrace_dof_error(dof, "invalid relocation section"); 12608 return (-1); 12609 } 12610 12611 r = (dof_relodesc_t *)(uintptr_t)(daddr + rs->dofs_offset); 12612 n = rs->dofs_size / rs->dofs_entsize; 12613 12614 for (i = 0; i < n; i++) { 12615 uintptr_t taddr = daddr + ts->dofs_offset + r->dofr_offset; 12616 12617 switch (r->dofr_type) { 12618 case DOF_RELO_NONE: 12619 break; 12620 case DOF_RELO_SETX: 12621 if (r->dofr_offset >= ts->dofs_size || r->dofr_offset + 12622 sizeof (uint64_t) > ts->dofs_size) { 12623 dtrace_dof_error(dof, "bad relocation offset"); 12624 return (-1); 12625 } 12626 12627 if (!IS_P2ALIGNED(taddr, sizeof (uint64_t))) { 12628 dtrace_dof_error(dof, "misaligned setx relo"); 12629 return (-1); 12630 } 12631 12632 *(uint64_t *)taddr += ubase; 12633 break; 12634 default: 12635 dtrace_dof_error(dof, "invalid relocation type"); 12636 return (-1); 12637 } 12638 12639 r = (dof_relodesc_t *)((uintptr_t)r + rs->dofs_entsize); 12640 } 12641 12642 return (0); 12643} 12644 12645/* 12646 * The dof_hdr_t passed to dtrace_dof_slurp() should be a partially validated 12647 * header: it should be at the front of a memory region that is at least 12648 * sizeof (dof_hdr_t) in size -- and then at least dof_hdr.dofh_loadsz in 12649 * size. It need not be validated in any other way. 12650 */ 12651static int 12652dtrace_dof_slurp(dof_hdr_t *dof, dtrace_vstate_t *vstate, cred_t *cr, 12653 dtrace_enabling_t **enabp, uint64_t ubase, int noprobes) 12654{ 12655 uint64_t len = dof->dofh_loadsz, seclen; 12656 uintptr_t daddr = (uintptr_t)dof; 12657 dtrace_ecbdesc_t *ep; 12658 dtrace_enabling_t *enab; 12659 uint_t i; 12660 12661 ASSERT(MUTEX_HELD(&dtrace_lock)); 12662 ASSERT(dof->dofh_loadsz >= sizeof (dof_hdr_t)); 12663 12664 /* 12665 * Check the DOF header identification bytes. In addition to checking 12666 * valid settings, we also verify that unused bits/bytes are zeroed so 12667 * we can use them later without fear of regressing existing binaries. 12668 */ 12669 if (bcmp(&dof->dofh_ident[DOF_ID_MAG0], 12670 DOF_MAG_STRING, DOF_MAG_STRLEN) != 0) { 12671 dtrace_dof_error(dof, "DOF magic string mismatch"); 12672 return (-1); 12673 } 12674 12675 if (dof->dofh_ident[DOF_ID_MODEL] != DOF_MODEL_ILP32 && 12676 dof->dofh_ident[DOF_ID_MODEL] != DOF_MODEL_LP64) { 12677 dtrace_dof_error(dof, "DOF has invalid data model"); 12678 return (-1); 12679 } 12680 12681 if (dof->dofh_ident[DOF_ID_ENCODING] != DOF_ENCODE_NATIVE) { 12682 dtrace_dof_error(dof, "DOF encoding mismatch"); 12683 return (-1); 12684 } 12685 12686 if (dof->dofh_ident[DOF_ID_VERSION] != DOF_VERSION_1 && 12687 dof->dofh_ident[DOF_ID_VERSION] != DOF_VERSION_2) { 12688 dtrace_dof_error(dof, "DOF version mismatch"); 12689 return (-1); 12690 } 12691 12692 if (dof->dofh_ident[DOF_ID_DIFVERS] != DIF_VERSION_2) { 12693 dtrace_dof_error(dof, "DOF uses unsupported instruction set"); 12694 return (-1); 12695 } 12696 12697 if (dof->dofh_ident[DOF_ID_DIFIREG] > DIF_DIR_NREGS) { 12698 dtrace_dof_error(dof, "DOF uses too many integer registers"); 12699 return (-1); 12700 } 12701 12702 if (dof->dofh_ident[DOF_ID_DIFTREG] > DIF_DTR_NREGS) { 12703 dtrace_dof_error(dof, "DOF uses too many tuple registers"); 12704 return (-1); 12705 } 12706 12707 for (i = DOF_ID_PAD; i < DOF_ID_SIZE; i++) { 12708 if (dof->dofh_ident[i] != 0) { 12709 dtrace_dof_error(dof, "DOF has invalid ident byte set"); 12710 return (-1); 12711 } 12712 } 12713 12714 if (dof->dofh_flags & ~DOF_FL_VALID) { 12715 dtrace_dof_error(dof, "DOF has invalid flag bits set"); 12716 return (-1); 12717 } 12718 12719 if (dof->dofh_secsize == 0) { 12720 dtrace_dof_error(dof, "zero section header size"); 12721 return (-1); 12722 } 12723 12724 /* 12725 * Check that the section headers don't exceed the amount of DOF 12726 * data. Note that we cast the section size and number of sections 12727 * to uint64_t's to prevent possible overflow in the multiplication. 12728 */ 12729 seclen = (uint64_t)dof->dofh_secnum * (uint64_t)dof->dofh_secsize; 12730 12731 if (dof->dofh_secoff > len || seclen > len || 12732 dof->dofh_secoff + seclen > len) { 12733 dtrace_dof_error(dof, "truncated section headers"); 12734 return (-1); 12735 } 12736 12737 if (!IS_P2ALIGNED(dof->dofh_secoff, sizeof (uint64_t))) { 12738 dtrace_dof_error(dof, "misaligned section headers"); 12739 return (-1); 12740 } 12741 12742 if (!IS_P2ALIGNED(dof->dofh_secsize, sizeof (uint64_t))) { 12743 dtrace_dof_error(dof, "misaligned section size"); 12744 return (-1); 12745 } 12746 12747 /* 12748 * Take an initial pass through the section headers to be sure that 12749 * the headers don't have stray offsets. If the 'noprobes' flag is 12750 * set, do not permit sections relating to providers, probes, or args. 12751 */ 12752 for (i = 0; i < dof->dofh_secnum; i++) { 12753 dof_sec_t *sec = (dof_sec_t *)(daddr + 12754 (uintptr_t)dof->dofh_secoff + i * dof->dofh_secsize); 12755 12756 if (noprobes) { 12757 switch (sec->dofs_type) { 12758 case DOF_SECT_PROVIDER: 12759 case DOF_SECT_PROBES: 12760 case DOF_SECT_PRARGS: 12761 case DOF_SECT_PROFFS: 12762 dtrace_dof_error(dof, "illegal sections " 12763 "for enabling"); 12764 return (-1); 12765 } 12766 } 12767 12768 if (!(sec->dofs_flags & DOF_SECF_LOAD)) 12769 continue; /* just ignore non-loadable sections */ 12770 12771 if (sec->dofs_align & (sec->dofs_align - 1)) { 12772 dtrace_dof_error(dof, "bad section alignment"); 12773 return (-1); 12774 } 12775 12776 if (sec->dofs_offset & (sec->dofs_align - 1)) { 12777 dtrace_dof_error(dof, "misaligned section"); 12778 return (-1); 12779 } 12780 12781 if (sec->dofs_offset > len || sec->dofs_size > len || 12782 sec->dofs_offset + sec->dofs_size > len) { 12783 dtrace_dof_error(dof, "corrupt section header"); 12784 return (-1); 12785 } 12786 12787 if (sec->dofs_type == DOF_SECT_STRTAB && *((char *)daddr + 12788 sec->dofs_offset + sec->dofs_size - 1) != '\0') { 12789 dtrace_dof_error(dof, "non-terminating string table"); 12790 return (-1); 12791 } 12792 } 12793 12794 /* 12795 * Take a second pass through the sections and locate and perform any 12796 * relocations that are present. We do this after the first pass to 12797 * be sure that all sections have had their headers validated. 12798 */ 12799 for (i = 0; i < dof->dofh_secnum; i++) { 12800 dof_sec_t *sec = (dof_sec_t *)(daddr + 12801 (uintptr_t)dof->dofh_secoff + i * dof->dofh_secsize); 12802 12803 if (!(sec->dofs_flags & DOF_SECF_LOAD)) 12804 continue; /* skip sections that are not loadable */ 12805 12806 switch (sec->dofs_type) { 12807 case DOF_SECT_URELHDR: 12808 if (dtrace_dof_relocate(dof, sec, ubase) != 0) 12809 return (-1); 12810 break; 12811 } 12812 } 12813 12814 if ((enab = *enabp) == NULL) 12815 enab = *enabp = dtrace_enabling_create(vstate); 12816 12817 for (i = 0; i < dof->dofh_secnum; i++) { 12818 dof_sec_t *sec = (dof_sec_t *)(daddr + 12819 (uintptr_t)dof->dofh_secoff + i * dof->dofh_secsize); 12820 12821 if (sec->dofs_type != DOF_SECT_ECBDESC) 12822 continue; 12823 12824 if ((ep = dtrace_dof_ecbdesc(dof, sec, vstate, cr)) == NULL) { 12825 dtrace_enabling_destroy(enab); 12826 *enabp = NULL; 12827 return (-1); 12828 } 12829 12830 dtrace_enabling_add(enab, ep); 12831 } 12832 12833 return (0); 12834} 12835 12836/* 12837 * Process DOF for any options. This routine assumes that the DOF has been 12838 * at least processed by dtrace_dof_slurp(). 12839 */ 12840static int 12841dtrace_dof_options(dof_hdr_t *dof, dtrace_state_t *state) 12842{ 12843 int i, rval; 12844 uint32_t entsize; 12845 size_t offs; 12846 dof_optdesc_t *desc; 12847 12848 for (i = 0; i < dof->dofh_secnum; i++) { 12849 dof_sec_t *sec = (dof_sec_t *)((uintptr_t)dof + 12850 (uintptr_t)dof->dofh_secoff + i * dof->dofh_secsize); 12851 12852 if (sec->dofs_type != DOF_SECT_OPTDESC) 12853 continue; 12854 12855 if (sec->dofs_align != sizeof (uint64_t)) { 12856 dtrace_dof_error(dof, "bad alignment in " 12857 "option description"); 12858 return (EINVAL); 12859 } 12860 12861 if ((entsize = sec->dofs_entsize) == 0) { 12862 dtrace_dof_error(dof, "zeroed option entry size"); 12863 return (EINVAL); 12864 } 12865 12866 if (entsize < sizeof (dof_optdesc_t)) { 12867 dtrace_dof_error(dof, "bad option entry size"); 12868 return (EINVAL); 12869 } 12870 12871 for (offs = 0; offs < sec->dofs_size; offs += entsize) { 12872 desc = (dof_optdesc_t *)((uintptr_t)dof + 12873 (uintptr_t)sec->dofs_offset + offs); 12874 12875 if (desc->dofo_strtab != DOF_SECIDX_NONE) { 12876 dtrace_dof_error(dof, "non-zero option string"); 12877 return (EINVAL); 12878 } 12879 12880 if (desc->dofo_value == DTRACEOPT_UNSET) { 12881 dtrace_dof_error(dof, "unset option"); 12882 return (EINVAL); 12883 } 12884 12885 if ((rval = dtrace_state_option(state, 12886 desc->dofo_option, desc->dofo_value)) != 0) { 12887 dtrace_dof_error(dof, "rejected option"); 12888 return (rval); 12889 } 12890 } 12891 } 12892 12893 return (0); 12894} 12895 12896/* 12897 * DTrace Consumer State Functions 12898 */ 12899static int 12900dtrace_dstate_init(dtrace_dstate_t *dstate, size_t size) 12901{ 12902 size_t hashsize, maxper, min, chunksize = dstate->dtds_chunksize; 12903 void *base; 12904 uintptr_t limit; 12905 dtrace_dynvar_t *dvar, *next, *start; 12906 int i; 12907 12908 ASSERT(MUTEX_HELD(&dtrace_lock)); 12909 ASSERT(dstate->dtds_base == NULL && dstate->dtds_percpu == NULL); 12910 12911 bzero(dstate, sizeof (dtrace_dstate_t)); 12912 12913 if ((dstate->dtds_chunksize = chunksize) == 0) 12914 dstate->dtds_chunksize = DTRACE_DYNVAR_CHUNKSIZE; 12915 12916 if (size < (min = dstate->dtds_chunksize + sizeof (dtrace_dynhash_t))) 12917 size = min; 12918 12919 if ((base = kmem_zalloc(size, KM_NOSLEEP)) == NULL) 12920 return (ENOMEM); 12921 12922 dstate->dtds_size = size; 12923 dstate->dtds_base = base; 12924 dstate->dtds_percpu = kmem_cache_alloc(dtrace_state_cache, KM_SLEEP); 12925 bzero(dstate->dtds_percpu, NCPU * sizeof (dtrace_dstate_percpu_t)); 12926 12927 hashsize = size / (dstate->dtds_chunksize + sizeof (dtrace_dynhash_t)); 12928 12929 if (hashsize != 1 && (hashsize & 1)) 12930 hashsize--; 12931 12932 dstate->dtds_hashsize = hashsize; 12933 dstate->dtds_hash = dstate->dtds_base; 12934 12935 /* 12936 * Set all of our hash buckets to point to the single sink, and (if 12937 * it hasn't already been set), set the sink's hash value to be the 12938 * sink sentinel value. The sink is needed for dynamic variable 12939 * lookups to know that they have iterated over an entire, valid hash 12940 * chain. 12941 */ 12942 for (i = 0; i < hashsize; i++) 12943 dstate->dtds_hash[i].dtdh_chain = &dtrace_dynhash_sink; 12944 12945 if (dtrace_dynhash_sink.dtdv_hashval != DTRACE_DYNHASH_SINK) 12946 dtrace_dynhash_sink.dtdv_hashval = DTRACE_DYNHASH_SINK; 12947 12948 /* 12949 * Determine number of active CPUs. Divide free list evenly among 12950 * active CPUs. 12951 */ 12952 start = (dtrace_dynvar_t *) 12953 ((uintptr_t)base + hashsize * sizeof (dtrace_dynhash_t)); 12954 limit = (uintptr_t)base + size; 12955 12956 maxper = (limit - (uintptr_t)start) / NCPU; 12957 maxper = (maxper / dstate->dtds_chunksize) * dstate->dtds_chunksize; 12958 12959#if !defined(sun) 12960 CPU_FOREACH(i) { 12961#else 12962 for (i = 0; i < NCPU; i++) { 12963#endif 12964 dstate->dtds_percpu[i].dtdsc_free = dvar = start; 12965 12966 /* 12967 * If we don't even have enough chunks to make it once through 12968 * NCPUs, we're just going to allocate everything to the first 12969 * CPU. And if we're on the last CPU, we're going to allocate 12970 * whatever is left over. In either case, we set the limit to 12971 * be the limit of the dynamic variable space. 12972 */ 12973 if (maxper == 0 || i == NCPU - 1) { 12974 limit = (uintptr_t)base + size; 12975 start = NULL; 12976 } else { 12977 limit = (uintptr_t)start + maxper; 12978 start = (dtrace_dynvar_t *)limit; 12979 } 12980 12981 ASSERT(limit <= (uintptr_t)base + size); 12982 12983 for (;;) { 12984 next = (dtrace_dynvar_t *)((uintptr_t)dvar + 12985 dstate->dtds_chunksize); 12986 12987 if ((uintptr_t)next + dstate->dtds_chunksize >= limit) 12988 break; 12989 12990 dvar->dtdv_next = next; 12991 dvar = next; 12992 } 12993 12994 if (maxper == 0) 12995 break; 12996 } 12997 12998 return (0); 12999} 13000 13001static void 13002dtrace_dstate_fini(dtrace_dstate_t *dstate) 13003{ 13004 ASSERT(MUTEX_HELD(&cpu_lock)); 13005 13006 if (dstate->dtds_base == NULL) 13007 return; 13008 13009 kmem_free(dstate->dtds_base, dstate->dtds_size); 13010 kmem_cache_free(dtrace_state_cache, dstate->dtds_percpu); 13011} 13012 13013static void 13014dtrace_vstate_fini(dtrace_vstate_t *vstate) 13015{ 13016 /* 13017 * Logical XOR, where are you? 13018 */ 13019 ASSERT((vstate->dtvs_nglobals == 0) ^ (vstate->dtvs_globals != NULL)); 13020 13021 if (vstate->dtvs_nglobals > 0) { 13022 kmem_free(vstate->dtvs_globals, vstate->dtvs_nglobals * 13023 sizeof (dtrace_statvar_t *)); 13024 } 13025 13026 if (vstate->dtvs_ntlocals > 0) { 13027 kmem_free(vstate->dtvs_tlocals, vstate->dtvs_ntlocals * 13028 sizeof (dtrace_difv_t)); 13029 } 13030 13031 ASSERT((vstate->dtvs_nlocals == 0) ^ (vstate->dtvs_locals != NULL)); 13032 13033 if (vstate->dtvs_nlocals > 0) { 13034 kmem_free(vstate->dtvs_locals, vstate->dtvs_nlocals * 13035 sizeof (dtrace_statvar_t *)); 13036 } 13037} 13038 13039#if defined(sun) 13040static void 13041dtrace_state_clean(dtrace_state_t *state) 13042{ 13043 if (state->dts_activity == DTRACE_ACTIVITY_INACTIVE) 13044 return; 13045 13046 dtrace_dynvar_clean(&state->dts_vstate.dtvs_dynvars); 13047 dtrace_speculation_clean(state); 13048} 13049 13050static void 13051dtrace_state_deadman(dtrace_state_t *state) 13052{ 13053 hrtime_t now; 13054 13055 dtrace_sync(); 13056 13057 now = dtrace_gethrtime(); 13058 13059 if (state != dtrace_anon.dta_state && 13060 now - state->dts_laststatus >= dtrace_deadman_user) 13061 return; 13062 13063 /* 13064 * We must be sure that dts_alive never appears to be less than the 13065 * value upon entry to dtrace_state_deadman(), and because we lack a 13066 * dtrace_cas64(), we cannot store to it atomically. We thus instead 13067 * store INT64_MAX to it, followed by a memory barrier, followed by 13068 * the new value. This assures that dts_alive never appears to be 13069 * less than its true value, regardless of the order in which the 13070 * stores to the underlying storage are issued. 13071 */ 13072 state->dts_alive = INT64_MAX; 13073 dtrace_membar_producer(); 13074 state->dts_alive = now; 13075} 13076#else 13077static void 13078dtrace_state_clean(void *arg) 13079{ 13080 dtrace_state_t *state = arg; 13081 dtrace_optval_t *opt = state->dts_options; 13082 13083 if (state->dts_activity == DTRACE_ACTIVITY_INACTIVE) 13084 return; 13085 13086 dtrace_dynvar_clean(&state->dts_vstate.dtvs_dynvars); 13087 dtrace_speculation_clean(state); 13088 13089 callout_reset(&state->dts_cleaner, hz * opt[DTRACEOPT_CLEANRATE] / NANOSEC, 13090 dtrace_state_clean, state); 13091} 13092 13093static void 13094dtrace_state_deadman(void *arg) 13095{ 13096 dtrace_state_t *state = arg; 13097 hrtime_t now; 13098 13099 dtrace_sync(); 13100 13101 dtrace_debug_output(); 13102 13103 now = dtrace_gethrtime(); 13104 13105 if (state != dtrace_anon.dta_state && 13106 now - state->dts_laststatus >= dtrace_deadman_user) 13107 return; 13108 13109 /* 13110 * We must be sure that dts_alive never appears to be less than the 13111 * value upon entry to dtrace_state_deadman(), and because we lack a 13112 * dtrace_cas64(), we cannot store to it atomically. We thus instead 13113 * store INT64_MAX to it, followed by a memory barrier, followed by 13114 * the new value. This assures that dts_alive never appears to be 13115 * less than its true value, regardless of the order in which the 13116 * stores to the underlying storage are issued. 13117 */ 13118 state->dts_alive = INT64_MAX; 13119 dtrace_membar_producer(); 13120 state->dts_alive = now; 13121 13122 callout_reset(&state->dts_deadman, hz * dtrace_deadman_interval / NANOSEC, 13123 dtrace_state_deadman, state); 13124} 13125#endif 13126 13127static dtrace_state_t * 13128#if defined(sun) 13129dtrace_state_create(dev_t *devp, cred_t *cr) 13130#else 13131dtrace_state_create(struct cdev *dev) 13132#endif 13133{ 13134#if defined(sun) 13135 minor_t minor; 13136 major_t major; 13137#else 13138 cred_t *cr = NULL; 13139 int m = 0; 13140#endif 13141 char c[30]; 13142 dtrace_state_t *state; 13143 dtrace_optval_t *opt; 13144 int bufsize = NCPU * sizeof (dtrace_buffer_t), i; 13145 13146 ASSERT(MUTEX_HELD(&dtrace_lock)); 13147 ASSERT(MUTEX_HELD(&cpu_lock)); 13148 13149#if defined(sun) 13150 minor = (minor_t)(uintptr_t)vmem_alloc(dtrace_minor, 1, 13151 VM_BESTFIT | VM_SLEEP); 13152 13153 if (ddi_soft_state_zalloc(dtrace_softstate, minor) != DDI_SUCCESS) { 13154 vmem_free(dtrace_minor, (void *)(uintptr_t)minor, 1); 13155 return (NULL); 13156 } 13157 13158 state = ddi_get_soft_state(dtrace_softstate, minor); 13159#else 13160 if (dev != NULL) { 13161 cr = dev->si_cred; 13162 m = dev2unit(dev); 13163 } 13164 13165 /* Allocate memory for the state. */ 13166 state = kmem_zalloc(sizeof(dtrace_state_t), KM_SLEEP); 13167#endif 13168 13169 state->dts_epid = DTRACE_EPIDNONE + 1; 13170 13171 (void) snprintf(c, sizeof (c), "dtrace_aggid_%d", m); 13172#if defined(sun) 13173 state->dts_aggid_arena = vmem_create(c, (void *)1, UINT32_MAX, 1, 13174 NULL, NULL, NULL, 0, VM_SLEEP | VMC_IDENTIFIER); 13175 13176 if (devp != NULL) { 13177 major = getemajor(*devp); 13178 } else { 13179 major = ddi_driver_major(dtrace_devi); 13180 } 13181 13182 state->dts_dev = makedevice(major, minor); 13183 13184 if (devp != NULL) 13185 *devp = state->dts_dev; 13186#else 13187 state->dts_aggid_arena = new_unrhdr(1, INT_MAX, &dtrace_unr_mtx); 13188 state->dts_dev = dev; 13189#endif 13190 13191 /* 13192 * We allocate NCPU buffers. On the one hand, this can be quite 13193 * a bit of memory per instance (nearly 36K on a Starcat). On the 13194 * other hand, it saves an additional memory reference in the probe 13195 * path. 13196 */ 13197 state->dts_buffer = kmem_zalloc(bufsize, KM_SLEEP); 13198 state->dts_aggbuffer = kmem_zalloc(bufsize, KM_SLEEP); 13199 13200#if defined(sun) 13201 state->dts_cleaner = CYCLIC_NONE; 13202 state->dts_deadman = CYCLIC_NONE; 13203#else 13204 callout_init(&state->dts_cleaner, CALLOUT_MPSAFE); 13205 callout_init(&state->dts_deadman, CALLOUT_MPSAFE); 13206#endif 13207 state->dts_vstate.dtvs_state = state; 13208 13209 for (i = 0; i < DTRACEOPT_MAX; i++) 13210 state->dts_options[i] = DTRACEOPT_UNSET; 13211 13212 /* 13213 * Set the default options. 13214 */ 13215 opt = state->dts_options; 13216 opt[DTRACEOPT_BUFPOLICY] = DTRACEOPT_BUFPOLICY_SWITCH; 13217 opt[DTRACEOPT_BUFRESIZE] = DTRACEOPT_BUFRESIZE_AUTO; 13218 opt[DTRACEOPT_NSPEC] = dtrace_nspec_default; 13219 opt[DTRACEOPT_SPECSIZE] = dtrace_specsize_default; 13220 opt[DTRACEOPT_CPU] = (dtrace_optval_t)DTRACE_CPUALL; 13221 opt[DTRACEOPT_STRSIZE] = dtrace_strsize_default; 13222 opt[DTRACEOPT_STACKFRAMES] = dtrace_stackframes_default; 13223 opt[DTRACEOPT_USTACKFRAMES] = dtrace_ustackframes_default; 13224 opt[DTRACEOPT_CLEANRATE] = dtrace_cleanrate_default; 13225 opt[DTRACEOPT_AGGRATE] = dtrace_aggrate_default; 13226 opt[DTRACEOPT_SWITCHRATE] = dtrace_switchrate_default; 13227 opt[DTRACEOPT_STATUSRATE] = dtrace_statusrate_default; 13228 opt[DTRACEOPT_JSTACKFRAMES] = dtrace_jstackframes_default; 13229 opt[DTRACEOPT_JSTACKSTRSIZE] = dtrace_jstackstrsize_default; 13230 13231 state->dts_activity = DTRACE_ACTIVITY_INACTIVE; 13232 13233 /* 13234 * Depending on the user credentials, we set flag bits which alter probe 13235 * visibility or the amount of destructiveness allowed. In the case of 13236 * actual anonymous tracing, or the possession of all privileges, all of 13237 * the normal checks are bypassed. 13238 */ 13239 if (cr == NULL || PRIV_POLICY_ONLY(cr, PRIV_ALL, B_FALSE)) { 13240 state->dts_cred.dcr_visible = DTRACE_CRV_ALL; 13241 state->dts_cred.dcr_action = DTRACE_CRA_ALL; 13242 } else { 13243 /* 13244 * Set up the credentials for this instantiation. We take a 13245 * hold on the credential to prevent it from disappearing on 13246 * us; this in turn prevents the zone_t referenced by this 13247 * credential from disappearing. This means that we can 13248 * examine the credential and the zone from probe context. 13249 */ 13250 crhold(cr); 13251 state->dts_cred.dcr_cred = cr; 13252 13253 /* 13254 * CRA_PROC means "we have *some* privilege for dtrace" and 13255 * unlocks the use of variables like pid, zonename, etc. 13256 */ 13257 if (PRIV_POLICY_ONLY(cr, PRIV_DTRACE_USER, B_FALSE) || 13258 PRIV_POLICY_ONLY(cr, PRIV_DTRACE_PROC, B_FALSE)) { 13259 state->dts_cred.dcr_action |= DTRACE_CRA_PROC; 13260 } 13261 13262 /* 13263 * dtrace_user allows use of syscall and profile providers. 13264 * If the user also has proc_owner and/or proc_zone, we 13265 * extend the scope to include additional visibility and 13266 * destructive power. 13267 */ 13268 if (PRIV_POLICY_ONLY(cr, PRIV_DTRACE_USER, B_FALSE)) { 13269 if (PRIV_POLICY_ONLY(cr, PRIV_PROC_OWNER, B_FALSE)) { 13270 state->dts_cred.dcr_visible |= 13271 DTRACE_CRV_ALLPROC; 13272 13273 state->dts_cred.dcr_action |= 13274 DTRACE_CRA_PROC_DESTRUCTIVE_ALLUSER; 13275 } 13276 13277 if (PRIV_POLICY_ONLY(cr, PRIV_PROC_ZONE, B_FALSE)) { 13278 state->dts_cred.dcr_visible |= 13279 DTRACE_CRV_ALLZONE; 13280 13281 state->dts_cred.dcr_action |= 13282 DTRACE_CRA_PROC_DESTRUCTIVE_ALLZONE; 13283 } 13284 13285 /* 13286 * If we have all privs in whatever zone this is, 13287 * we can do destructive things to processes which 13288 * have altered credentials. 13289 */ 13290#if defined(sun) 13291 if (priv_isequalset(priv_getset(cr, PRIV_EFFECTIVE), 13292 cr->cr_zone->zone_privset)) { 13293 state->dts_cred.dcr_action |= 13294 DTRACE_CRA_PROC_DESTRUCTIVE_CREDCHG; 13295 } 13296#endif 13297 } 13298 13299 /* 13300 * Holding the dtrace_kernel privilege also implies that 13301 * the user has the dtrace_user privilege from a visibility 13302 * perspective. But without further privileges, some 13303 * destructive actions are not available. 13304 */ 13305 if (PRIV_POLICY_ONLY(cr, PRIV_DTRACE_KERNEL, B_FALSE)) { 13306 /* 13307 * Make all probes in all zones visible. However, 13308 * this doesn't mean that all actions become available 13309 * to all zones. 13310 */ 13311 state->dts_cred.dcr_visible |= DTRACE_CRV_KERNEL | 13312 DTRACE_CRV_ALLPROC | DTRACE_CRV_ALLZONE; 13313 13314 state->dts_cred.dcr_action |= DTRACE_CRA_KERNEL | 13315 DTRACE_CRA_PROC; 13316 /* 13317 * Holding proc_owner means that destructive actions 13318 * for *this* zone are allowed. 13319 */ 13320 if (PRIV_POLICY_ONLY(cr, PRIV_PROC_OWNER, B_FALSE)) 13321 state->dts_cred.dcr_action |= 13322 DTRACE_CRA_PROC_DESTRUCTIVE_ALLUSER; 13323 13324 /* 13325 * Holding proc_zone means that destructive actions 13326 * for this user/group ID in all zones is allowed. 13327 */ 13328 if (PRIV_POLICY_ONLY(cr, PRIV_PROC_ZONE, B_FALSE)) 13329 state->dts_cred.dcr_action |= 13330 DTRACE_CRA_PROC_DESTRUCTIVE_ALLZONE; 13331 13332#if defined(sun) 13333 /* 13334 * If we have all privs in whatever zone this is, 13335 * we can do destructive things to processes which 13336 * have altered credentials. 13337 */ 13338 if (priv_isequalset(priv_getset(cr, PRIV_EFFECTIVE), 13339 cr->cr_zone->zone_privset)) { 13340 state->dts_cred.dcr_action |= 13341 DTRACE_CRA_PROC_DESTRUCTIVE_CREDCHG; 13342 } 13343#endif 13344 } 13345 13346 /* 13347 * Holding the dtrace_proc privilege gives control over fasttrap 13348 * and pid providers. We need to grant wider destructive 13349 * privileges in the event that the user has proc_owner and/or 13350 * proc_zone. 13351 */ 13352 if (PRIV_POLICY_ONLY(cr, PRIV_DTRACE_PROC, B_FALSE)) { 13353 if (PRIV_POLICY_ONLY(cr, PRIV_PROC_OWNER, B_FALSE)) 13354 state->dts_cred.dcr_action |= 13355 DTRACE_CRA_PROC_DESTRUCTIVE_ALLUSER; 13356 13357 if (PRIV_POLICY_ONLY(cr, PRIV_PROC_ZONE, B_FALSE)) 13358 state->dts_cred.dcr_action |= 13359 DTRACE_CRA_PROC_DESTRUCTIVE_ALLZONE; 13360 } 13361 } 13362 13363 return (state); 13364} 13365 13366static int 13367dtrace_state_buffer(dtrace_state_t *state, dtrace_buffer_t *buf, int which) 13368{ 13369 dtrace_optval_t *opt = state->dts_options, size; 13370 processorid_t cpu = 0;; 13371 int flags = 0, rval; 13372 13373 ASSERT(MUTEX_HELD(&dtrace_lock)); 13374 ASSERT(MUTEX_HELD(&cpu_lock)); 13375 ASSERT(which < DTRACEOPT_MAX); 13376 ASSERT(state->dts_activity == DTRACE_ACTIVITY_INACTIVE || 13377 (state == dtrace_anon.dta_state && 13378 state->dts_activity == DTRACE_ACTIVITY_ACTIVE)); 13379 13380 if (opt[which] == DTRACEOPT_UNSET || opt[which] == 0) 13381 return (0); 13382 13383 if (opt[DTRACEOPT_CPU] != DTRACEOPT_UNSET) 13384 cpu = opt[DTRACEOPT_CPU]; 13385 13386 if (which == DTRACEOPT_SPECSIZE) 13387 flags |= DTRACEBUF_NOSWITCH; 13388 13389 if (which == DTRACEOPT_BUFSIZE) { 13390 if (opt[DTRACEOPT_BUFPOLICY] == DTRACEOPT_BUFPOLICY_RING) 13391 flags |= DTRACEBUF_RING; 13392 13393 if (opt[DTRACEOPT_BUFPOLICY] == DTRACEOPT_BUFPOLICY_FILL) 13394 flags |= DTRACEBUF_FILL; 13395 13396 if (state != dtrace_anon.dta_state || 13397 state->dts_activity != DTRACE_ACTIVITY_ACTIVE) 13398 flags |= DTRACEBUF_INACTIVE; 13399 } 13400 13401 for (size = opt[which]; size >= sizeof (uint64_t); size >>= 1) { 13402 /* 13403 * The size must be 8-byte aligned. If the size is not 8-byte 13404 * aligned, drop it down by the difference. 13405 */ 13406 if (size & (sizeof (uint64_t) - 1)) 13407 size -= size & (sizeof (uint64_t) - 1); 13408 13409 if (size < state->dts_reserve) { 13410 /* 13411 * Buffers always must be large enough to accommodate 13412 * their prereserved space. We return E2BIG instead 13413 * of ENOMEM in this case to allow for user-level 13414 * software to differentiate the cases. 13415 */ 13416 return (E2BIG); 13417 } 13418 13419 rval = dtrace_buffer_alloc(buf, size, flags, cpu); 13420 13421 if (rval != ENOMEM) { 13422 opt[which] = size; 13423 return (rval); 13424 } 13425 13426 if (opt[DTRACEOPT_BUFRESIZE] == DTRACEOPT_BUFRESIZE_MANUAL) 13427 return (rval); 13428 } 13429 13430 return (ENOMEM); 13431} 13432 13433static int 13434dtrace_state_buffers(dtrace_state_t *state) 13435{ 13436 dtrace_speculation_t *spec = state->dts_speculations; 13437 int rval, i; 13438 13439 if ((rval = dtrace_state_buffer(state, state->dts_buffer, 13440 DTRACEOPT_BUFSIZE)) != 0) 13441 return (rval); 13442 13443 if ((rval = dtrace_state_buffer(state, state->dts_aggbuffer, 13444 DTRACEOPT_AGGSIZE)) != 0) 13445 return (rval); 13446 13447 for (i = 0; i < state->dts_nspeculations; i++) { 13448 if ((rval = dtrace_state_buffer(state, 13449 spec[i].dtsp_buffer, DTRACEOPT_SPECSIZE)) != 0) 13450 return (rval); 13451 } 13452 13453 return (0); 13454} 13455 13456static void 13457dtrace_state_prereserve(dtrace_state_t *state) 13458{ 13459 dtrace_ecb_t *ecb; 13460 dtrace_probe_t *probe; 13461 13462 state->dts_reserve = 0; 13463 13464 if (state->dts_options[DTRACEOPT_BUFPOLICY] != DTRACEOPT_BUFPOLICY_FILL) 13465 return; 13466 13467 /* 13468 * If our buffer policy is a "fill" buffer policy, we need to set the 13469 * prereserved space to be the space required by the END probes. 13470 */ 13471 probe = dtrace_probes[dtrace_probeid_end - 1]; 13472 ASSERT(probe != NULL); 13473 13474 for (ecb = probe->dtpr_ecb; ecb != NULL; ecb = ecb->dte_next) { 13475 if (ecb->dte_state != state) 13476 continue; 13477 13478 state->dts_reserve += ecb->dte_needed + ecb->dte_alignment; 13479 } 13480} 13481 13482static int 13483dtrace_state_go(dtrace_state_t *state, processorid_t *cpu) 13484{ 13485 dtrace_optval_t *opt = state->dts_options, sz, nspec; 13486 dtrace_speculation_t *spec; 13487 dtrace_buffer_t *buf; 13488#if defined(sun) 13489 cyc_handler_t hdlr; 13490 cyc_time_t when; 13491#endif 13492 int rval = 0, i, bufsize = NCPU * sizeof (dtrace_buffer_t); 13493 dtrace_icookie_t cookie; 13494 13495 mutex_enter(&cpu_lock); 13496 mutex_enter(&dtrace_lock); 13497 13498 if (state->dts_activity != DTRACE_ACTIVITY_INACTIVE) { 13499 rval = EBUSY; 13500 goto out; 13501 } 13502 13503 /* 13504 * Before we can perform any checks, we must prime all of the 13505 * retained enablings that correspond to this state. 13506 */ 13507 dtrace_enabling_prime(state); 13508 13509 if (state->dts_destructive && !state->dts_cred.dcr_destructive) { 13510 rval = EACCES; 13511 goto out; 13512 } 13513 13514 dtrace_state_prereserve(state); 13515 13516 /* 13517 * Now we want to do is try to allocate our speculations. 13518 * We do not automatically resize the number of speculations; if 13519 * this fails, we will fail the operation. 13520 */ 13521 nspec = opt[DTRACEOPT_NSPEC]; 13522 ASSERT(nspec != DTRACEOPT_UNSET); 13523 13524 if (nspec > INT_MAX) { 13525 rval = ENOMEM; 13526 goto out; 13527 } 13528 13529 spec = kmem_zalloc(nspec * sizeof (dtrace_speculation_t), KM_NOSLEEP); 13530 13531 if (spec == NULL) { 13532 rval = ENOMEM; 13533 goto out; 13534 } 13535 13536 state->dts_speculations = spec; 13537 state->dts_nspeculations = (int)nspec; 13538 13539 for (i = 0; i < nspec; i++) { 13540 if ((buf = kmem_zalloc(bufsize, KM_NOSLEEP)) == NULL) { 13541 rval = ENOMEM; 13542 goto err; 13543 } 13544 13545 spec[i].dtsp_buffer = buf; 13546 } 13547 13548 if (opt[DTRACEOPT_GRABANON] != DTRACEOPT_UNSET) { 13549 if (dtrace_anon.dta_state == NULL) { 13550 rval = ENOENT; 13551 goto out; 13552 } 13553 13554 if (state->dts_necbs != 0) { 13555 rval = EALREADY; 13556 goto out; 13557 } 13558 13559 state->dts_anon = dtrace_anon_grab(); 13560 ASSERT(state->dts_anon != NULL); 13561 state = state->dts_anon; 13562 13563 /* 13564 * We want "grabanon" to be set in the grabbed state, so we'll 13565 * copy that option value from the grabbing state into the 13566 * grabbed state. 13567 */ 13568 state->dts_options[DTRACEOPT_GRABANON] = 13569 opt[DTRACEOPT_GRABANON]; 13570 13571 *cpu = dtrace_anon.dta_beganon; 13572 13573 /* 13574 * If the anonymous state is active (as it almost certainly 13575 * is if the anonymous enabling ultimately matched anything), 13576 * we don't allow any further option processing -- but we 13577 * don't return failure. 13578 */ 13579 if (state->dts_activity != DTRACE_ACTIVITY_INACTIVE) 13580 goto out; 13581 } 13582 13583 if (opt[DTRACEOPT_AGGSIZE] != DTRACEOPT_UNSET && 13584 opt[DTRACEOPT_AGGSIZE] != 0) { 13585 if (state->dts_aggregations == NULL) { 13586 /* 13587 * We're not going to create an aggregation buffer 13588 * because we don't have any ECBs that contain 13589 * aggregations -- set this option to 0. 13590 */ 13591 opt[DTRACEOPT_AGGSIZE] = 0; 13592 } else { 13593 /* 13594 * If we have an aggregation buffer, we must also have 13595 * a buffer to use as scratch. 13596 */ 13597 if (opt[DTRACEOPT_BUFSIZE] == DTRACEOPT_UNSET || 13598 opt[DTRACEOPT_BUFSIZE] < state->dts_needed) { 13599 opt[DTRACEOPT_BUFSIZE] = state->dts_needed; 13600 } 13601 } 13602 } 13603 13604 if (opt[DTRACEOPT_SPECSIZE] != DTRACEOPT_UNSET && 13605 opt[DTRACEOPT_SPECSIZE] != 0) { 13606 if (!state->dts_speculates) { 13607 /* 13608 * We're not going to create speculation buffers 13609 * because we don't have any ECBs that actually 13610 * speculate -- set the speculation size to 0. 13611 */ 13612 opt[DTRACEOPT_SPECSIZE] = 0; 13613 } 13614 } 13615 13616 /* 13617 * The bare minimum size for any buffer that we're actually going to 13618 * do anything to is sizeof (uint64_t). 13619 */ 13620 sz = sizeof (uint64_t); 13621 13622 if ((state->dts_needed != 0 && opt[DTRACEOPT_BUFSIZE] < sz) || 13623 (state->dts_speculates && opt[DTRACEOPT_SPECSIZE] < sz) || 13624 (state->dts_aggregations != NULL && opt[DTRACEOPT_AGGSIZE] < sz)) { 13625 /* 13626 * A buffer size has been explicitly set to 0 (or to a size 13627 * that will be adjusted to 0) and we need the space -- we 13628 * need to return failure. We return ENOSPC to differentiate 13629 * it from failing to allocate a buffer due to failure to meet 13630 * the reserve (for which we return E2BIG). 13631 */ 13632 rval = ENOSPC; 13633 goto out; 13634 } 13635 13636 if ((rval = dtrace_state_buffers(state)) != 0) 13637 goto err; 13638 13639 if ((sz = opt[DTRACEOPT_DYNVARSIZE]) == DTRACEOPT_UNSET) 13640 sz = dtrace_dstate_defsize; 13641 13642 do { 13643 rval = dtrace_dstate_init(&state->dts_vstate.dtvs_dynvars, sz); 13644 13645 if (rval == 0) 13646 break; 13647 13648 if (opt[DTRACEOPT_BUFRESIZE] == DTRACEOPT_BUFRESIZE_MANUAL) 13649 goto err; 13650 } while (sz >>= 1); 13651 13652 opt[DTRACEOPT_DYNVARSIZE] = sz; 13653 13654 if (rval != 0) 13655 goto err; 13656 13657 if (opt[DTRACEOPT_STATUSRATE] > dtrace_statusrate_max) 13658 opt[DTRACEOPT_STATUSRATE] = dtrace_statusrate_max; 13659 13660 if (opt[DTRACEOPT_CLEANRATE] == 0) 13661 opt[DTRACEOPT_CLEANRATE] = dtrace_cleanrate_max; 13662 13663 if (opt[DTRACEOPT_CLEANRATE] < dtrace_cleanrate_min) 13664 opt[DTRACEOPT_CLEANRATE] = dtrace_cleanrate_min; 13665 13666 if (opt[DTRACEOPT_CLEANRATE] > dtrace_cleanrate_max) 13667 opt[DTRACEOPT_CLEANRATE] = dtrace_cleanrate_max; 13668 13669 state->dts_alive = state->dts_laststatus = dtrace_gethrtime(); 13670#if defined(sun) 13671 hdlr.cyh_func = (cyc_func_t)dtrace_state_clean; 13672 hdlr.cyh_arg = state; 13673 hdlr.cyh_level = CY_LOW_LEVEL; 13674 13675 when.cyt_when = 0; 13676 when.cyt_interval = opt[DTRACEOPT_CLEANRATE]; 13677 13678 state->dts_cleaner = cyclic_add(&hdlr, &when); 13679 13680 hdlr.cyh_func = (cyc_func_t)dtrace_state_deadman; 13681 hdlr.cyh_arg = state; 13682 hdlr.cyh_level = CY_LOW_LEVEL; 13683 13684 when.cyt_when = 0; 13685 when.cyt_interval = dtrace_deadman_interval; 13686 13687 state->dts_deadman = cyclic_add(&hdlr, &when); 13688#else 13689 callout_reset(&state->dts_cleaner, hz * opt[DTRACEOPT_CLEANRATE] / NANOSEC, 13690 dtrace_state_clean, state); 13691 callout_reset(&state->dts_deadman, hz * dtrace_deadman_interval / NANOSEC, 13692 dtrace_state_deadman, state); 13693#endif 13694 13695 state->dts_activity = DTRACE_ACTIVITY_WARMUP; 13696 13697 /* 13698 * Now it's time to actually fire the BEGIN probe. We need to disable 13699 * interrupts here both to record the CPU on which we fired the BEGIN 13700 * probe (the data from this CPU will be processed first at user 13701 * level) and to manually activate the buffer for this CPU. 13702 */ 13703 cookie = dtrace_interrupt_disable(); 13704 *cpu = curcpu; 13705 ASSERT(state->dts_buffer[*cpu].dtb_flags & DTRACEBUF_INACTIVE); 13706 state->dts_buffer[*cpu].dtb_flags &= ~DTRACEBUF_INACTIVE; 13707 13708 dtrace_probe(dtrace_probeid_begin, 13709 (uint64_t)(uintptr_t)state, 0, 0, 0, 0); 13710 dtrace_interrupt_enable(cookie); 13711 /* 13712 * We may have had an exit action from a BEGIN probe; only change our 13713 * state to ACTIVE if we're still in WARMUP. 13714 */ 13715 ASSERT(state->dts_activity == DTRACE_ACTIVITY_WARMUP || 13716 state->dts_activity == DTRACE_ACTIVITY_DRAINING); 13717 13718 if (state->dts_activity == DTRACE_ACTIVITY_WARMUP) 13719 state->dts_activity = DTRACE_ACTIVITY_ACTIVE; 13720 13721 /* 13722 * Regardless of whether or not now we're in ACTIVE or DRAINING, we 13723 * want each CPU to transition its principal buffer out of the 13724 * INACTIVE state. Doing this assures that no CPU will suddenly begin 13725 * processing an ECB halfway down a probe's ECB chain; all CPUs will 13726 * atomically transition from processing none of a state's ECBs to 13727 * processing all of them. 13728 */ 13729 dtrace_xcall(DTRACE_CPUALL, 13730 (dtrace_xcall_t)dtrace_buffer_activate, state); 13731 goto out; 13732 13733err: 13734 dtrace_buffer_free(state->dts_buffer); 13735 dtrace_buffer_free(state->dts_aggbuffer); 13736 13737 if ((nspec = state->dts_nspeculations) == 0) { 13738 ASSERT(state->dts_speculations == NULL); 13739 goto out; 13740 } 13741 13742 spec = state->dts_speculations; 13743 ASSERT(spec != NULL); 13744 13745 for (i = 0; i < state->dts_nspeculations; i++) { 13746 if ((buf = spec[i].dtsp_buffer) == NULL) 13747 break; 13748 13749 dtrace_buffer_free(buf); 13750 kmem_free(buf, bufsize); 13751 } 13752 13753 kmem_free(spec, nspec * sizeof (dtrace_speculation_t)); 13754 state->dts_nspeculations = 0; 13755 state->dts_speculations = NULL; 13756 13757out: 13758 mutex_exit(&dtrace_lock); 13759 mutex_exit(&cpu_lock); 13760 13761 return (rval); 13762} 13763 13764static int 13765dtrace_state_stop(dtrace_state_t *state, processorid_t *cpu) 13766{ 13767 dtrace_icookie_t cookie; 13768 13769 ASSERT(MUTEX_HELD(&dtrace_lock)); 13770 13771 if (state->dts_activity != DTRACE_ACTIVITY_ACTIVE && 13772 state->dts_activity != DTRACE_ACTIVITY_DRAINING) 13773 return (EINVAL); 13774 13775 /* 13776 * We'll set the activity to DTRACE_ACTIVITY_DRAINING, and issue a sync 13777 * to be sure that every CPU has seen it. See below for the details 13778 * on why this is done. 13779 */ 13780 state->dts_activity = DTRACE_ACTIVITY_DRAINING; 13781 dtrace_sync(); 13782 13783 /* 13784 * By this point, it is impossible for any CPU to be still processing 13785 * with DTRACE_ACTIVITY_ACTIVE. We can thus set our activity to 13786 * DTRACE_ACTIVITY_COOLDOWN and know that we're not racing with any 13787 * other CPU in dtrace_buffer_reserve(). This allows dtrace_probe() 13788 * and callees to know that the activity is DTRACE_ACTIVITY_COOLDOWN 13789 * iff we're in the END probe. 13790 */ 13791 state->dts_activity = DTRACE_ACTIVITY_COOLDOWN; 13792 dtrace_sync(); 13793 ASSERT(state->dts_activity == DTRACE_ACTIVITY_COOLDOWN); 13794 13795 /* 13796 * Finally, we can release the reserve and call the END probe. We 13797 * disable interrupts across calling the END probe to allow us to 13798 * return the CPU on which we actually called the END probe. This 13799 * allows user-land to be sure that this CPU's principal buffer is 13800 * processed last. 13801 */ 13802 state->dts_reserve = 0; 13803 13804 cookie = dtrace_interrupt_disable(); 13805 *cpu = curcpu; 13806 dtrace_probe(dtrace_probeid_end, 13807 (uint64_t)(uintptr_t)state, 0, 0, 0, 0); 13808 dtrace_interrupt_enable(cookie); 13809 13810 state->dts_activity = DTRACE_ACTIVITY_STOPPED; 13811 dtrace_sync(); 13812 13813 return (0); 13814} 13815 13816static int 13817dtrace_state_option(dtrace_state_t *state, dtrace_optid_t option, 13818 dtrace_optval_t val) 13819{ 13820 ASSERT(MUTEX_HELD(&dtrace_lock)); 13821 13822 if (state->dts_activity != DTRACE_ACTIVITY_INACTIVE) 13823 return (EBUSY); 13824 13825 if (option >= DTRACEOPT_MAX) 13826 return (EINVAL); 13827 13828 if (option != DTRACEOPT_CPU && val < 0) 13829 return (EINVAL); 13830 13831 switch (option) { 13832 case DTRACEOPT_DESTRUCTIVE: 13833 if (dtrace_destructive_disallow) 13834 return (EACCES); 13835 13836 state->dts_cred.dcr_destructive = 1; 13837 break; 13838 13839 case DTRACEOPT_BUFSIZE: 13840 case DTRACEOPT_DYNVARSIZE: 13841 case DTRACEOPT_AGGSIZE: 13842 case DTRACEOPT_SPECSIZE: 13843 case DTRACEOPT_STRSIZE: 13844 if (val < 0) 13845 return (EINVAL); 13846 13847 if (val >= LONG_MAX) { 13848 /* 13849 * If this is an otherwise negative value, set it to 13850 * the highest multiple of 128m less than LONG_MAX. 13851 * Technically, we're adjusting the size without 13852 * regard to the buffer resizing policy, but in fact, 13853 * this has no effect -- if we set the buffer size to 13854 * ~LONG_MAX and the buffer policy is ultimately set to 13855 * be "manual", the buffer allocation is guaranteed to 13856 * fail, if only because the allocation requires two 13857 * buffers. (We set the the size to the highest 13858 * multiple of 128m because it ensures that the size 13859 * will remain a multiple of a megabyte when 13860 * repeatedly halved -- all the way down to 15m.) 13861 */ 13862 val = LONG_MAX - (1 << 27) + 1; 13863 } 13864 } 13865 13866 state->dts_options[option] = val; 13867 13868 return (0); 13869} 13870 13871static void 13872dtrace_state_destroy(dtrace_state_t *state) 13873{ 13874 dtrace_ecb_t *ecb; 13875 dtrace_vstate_t *vstate = &state->dts_vstate; 13876#if defined(sun) 13877 minor_t minor = getminor(state->dts_dev); 13878#endif 13879 int i, bufsize = NCPU * sizeof (dtrace_buffer_t); 13880 dtrace_speculation_t *spec = state->dts_speculations; 13881 int nspec = state->dts_nspeculations; 13882 uint32_t match; 13883 13884 ASSERT(MUTEX_HELD(&dtrace_lock)); 13885 ASSERT(MUTEX_HELD(&cpu_lock)); 13886 13887 /* 13888 * First, retract any retained enablings for this state. 13889 */ 13890 dtrace_enabling_retract(state); 13891 ASSERT(state->dts_nretained == 0); 13892 13893 if (state->dts_activity == DTRACE_ACTIVITY_ACTIVE || 13894 state->dts_activity == DTRACE_ACTIVITY_DRAINING) { 13895 /* 13896 * We have managed to come into dtrace_state_destroy() on a 13897 * hot enabling -- almost certainly because of a disorderly 13898 * shutdown of a consumer. (That is, a consumer that is 13899 * exiting without having called dtrace_stop().) In this case, 13900 * we're going to set our activity to be KILLED, and then 13901 * issue a sync to be sure that everyone is out of probe 13902 * context before we start blowing away ECBs. 13903 */ 13904 state->dts_activity = DTRACE_ACTIVITY_KILLED; 13905 dtrace_sync(); 13906 } 13907 13908 /* 13909 * Release the credential hold we took in dtrace_state_create(). 13910 */ 13911 if (state->dts_cred.dcr_cred != NULL) 13912 crfree(state->dts_cred.dcr_cred); 13913 13914 /* 13915 * Now we can safely disable and destroy any enabled probes. Because 13916 * any DTRACE_PRIV_KERNEL probes may actually be slowing our progress 13917 * (especially if they're all enabled), we take two passes through the 13918 * ECBs: in the first, we disable just DTRACE_PRIV_KERNEL probes, and 13919 * in the second we disable whatever is left over. 13920 */ 13921 for (match = DTRACE_PRIV_KERNEL; ; match = 0) { 13922 for (i = 0; i < state->dts_necbs; i++) { 13923 if ((ecb = state->dts_ecbs[i]) == NULL) 13924 continue; 13925 13926 if (match && ecb->dte_probe != NULL) { 13927 dtrace_probe_t *probe = ecb->dte_probe; 13928 dtrace_provider_t *prov = probe->dtpr_provider; 13929 13930 if (!(prov->dtpv_priv.dtpp_flags & match)) 13931 continue; 13932 } 13933 13934 dtrace_ecb_disable(ecb); 13935 dtrace_ecb_destroy(ecb); 13936 } 13937 13938 if (!match) 13939 break; 13940 } 13941 13942 /* 13943 * Before we free the buffers, perform one more sync to assure that 13944 * every CPU is out of probe context. 13945 */ 13946 dtrace_sync(); 13947 13948 dtrace_buffer_free(state->dts_buffer); 13949 dtrace_buffer_free(state->dts_aggbuffer); 13950 13951 for (i = 0; i < nspec; i++) 13952 dtrace_buffer_free(spec[i].dtsp_buffer); 13953 13954#if defined(sun) 13955 if (state->dts_cleaner != CYCLIC_NONE) 13956 cyclic_remove(state->dts_cleaner); 13957 13958 if (state->dts_deadman != CYCLIC_NONE) 13959 cyclic_remove(state->dts_deadman); 13960#else 13961 callout_stop(&state->dts_cleaner); 13962 callout_drain(&state->dts_cleaner); 13963 callout_stop(&state->dts_deadman); 13964 callout_drain(&state->dts_deadman); 13965#endif 13966 13967 dtrace_dstate_fini(&vstate->dtvs_dynvars); 13968 dtrace_vstate_fini(vstate); 13969 if (state->dts_ecbs != NULL) 13970 kmem_free(state->dts_ecbs, state->dts_necbs * sizeof (dtrace_ecb_t *)); 13971 13972 if (state->dts_aggregations != NULL) { 13973#ifdef DEBUG 13974 for (i = 0; i < state->dts_naggregations; i++) 13975 ASSERT(state->dts_aggregations[i] == NULL); 13976#endif 13977 ASSERT(state->dts_naggregations > 0); 13978 kmem_free(state->dts_aggregations, 13979 state->dts_naggregations * sizeof (dtrace_aggregation_t *)); 13980 } 13981 13982 kmem_free(state->dts_buffer, bufsize); 13983 kmem_free(state->dts_aggbuffer, bufsize); 13984 13985 for (i = 0; i < nspec; i++) 13986 kmem_free(spec[i].dtsp_buffer, bufsize); 13987 13988 if (spec != NULL) 13989 kmem_free(spec, nspec * sizeof (dtrace_speculation_t)); 13990 13991 dtrace_format_destroy(state); 13992 13993 if (state->dts_aggid_arena != NULL) { 13994#if defined(sun) 13995 vmem_destroy(state->dts_aggid_arena); 13996#else 13997 delete_unrhdr(state->dts_aggid_arena); 13998#endif 13999 state->dts_aggid_arena = NULL; 14000 } 14001#if defined(sun) 14002 ddi_soft_state_free(dtrace_softstate, minor); 14003 vmem_free(dtrace_minor, (void *)(uintptr_t)minor, 1); 14004#endif 14005} 14006 14007/* 14008 * DTrace Anonymous Enabling Functions 14009 */ 14010static dtrace_state_t * 14011dtrace_anon_grab(void) 14012{ 14013 dtrace_state_t *state; 14014 14015 ASSERT(MUTEX_HELD(&dtrace_lock)); 14016 14017 if ((state = dtrace_anon.dta_state) == NULL) { 14018 ASSERT(dtrace_anon.dta_enabling == NULL); 14019 return (NULL); 14020 } 14021 14022 ASSERT(dtrace_anon.dta_enabling != NULL); 14023 ASSERT(dtrace_retained != NULL); 14024 14025 dtrace_enabling_destroy(dtrace_anon.dta_enabling); 14026 dtrace_anon.dta_enabling = NULL; 14027 dtrace_anon.dta_state = NULL; 14028 14029 return (state); 14030} 14031 14032static void 14033dtrace_anon_property(void) 14034{ 14035 int i, rv; 14036 dtrace_state_t *state; 14037 dof_hdr_t *dof; 14038 char c[32]; /* enough for "dof-data-" + digits */ 14039 14040 ASSERT(MUTEX_HELD(&dtrace_lock)); 14041 ASSERT(MUTEX_HELD(&cpu_lock)); 14042 14043 for (i = 0; ; i++) { 14044 (void) snprintf(c, sizeof (c), "dof-data-%d", i); 14045 14046 dtrace_err_verbose = 1; 14047 14048 if ((dof = dtrace_dof_property(c)) == NULL) { 14049 dtrace_err_verbose = 0; 14050 break; 14051 } 14052 14053#if defined(sun) 14054 /* 14055 * We want to create anonymous state, so we need to transition 14056 * the kernel debugger to indicate that DTrace is active. If 14057 * this fails (e.g. because the debugger has modified text in 14058 * some way), we won't continue with the processing. 14059 */ 14060 if (kdi_dtrace_set(KDI_DTSET_DTRACE_ACTIVATE) != 0) { 14061 cmn_err(CE_NOTE, "kernel debugger active; anonymous " 14062 "enabling ignored."); 14063 dtrace_dof_destroy(dof); 14064 break; 14065 } 14066#endif 14067 14068 /* 14069 * If we haven't allocated an anonymous state, we'll do so now. 14070 */ 14071 if ((state = dtrace_anon.dta_state) == NULL) { 14072#if defined(sun) 14073 state = dtrace_state_create(NULL, NULL); 14074#else 14075 state = dtrace_state_create(NULL); 14076#endif 14077 dtrace_anon.dta_state = state; 14078 14079 if (state == NULL) { 14080 /* 14081 * This basically shouldn't happen: the only 14082 * failure mode from dtrace_state_create() is a 14083 * failure of ddi_soft_state_zalloc() that 14084 * itself should never happen. Still, the 14085 * interface allows for a failure mode, and 14086 * we want to fail as gracefully as possible: 14087 * we'll emit an error message and cease 14088 * processing anonymous state in this case. 14089 */ 14090 cmn_err(CE_WARN, "failed to create " 14091 "anonymous state"); 14092 dtrace_dof_destroy(dof); 14093 break; 14094 } 14095 } 14096 14097 rv = dtrace_dof_slurp(dof, &state->dts_vstate, CRED(), 14098 &dtrace_anon.dta_enabling, 0, B_TRUE); 14099 14100 if (rv == 0) 14101 rv = dtrace_dof_options(dof, state); 14102 14103 dtrace_err_verbose = 0; 14104 dtrace_dof_destroy(dof); 14105 14106 if (rv != 0) { 14107 /* 14108 * This is malformed DOF; chuck any anonymous state 14109 * that we created. 14110 */ 14111 ASSERT(dtrace_anon.dta_enabling == NULL); 14112 dtrace_state_destroy(state); 14113 dtrace_anon.dta_state = NULL; 14114 break; 14115 } 14116 14117 ASSERT(dtrace_anon.dta_enabling != NULL); 14118 } 14119 14120 if (dtrace_anon.dta_enabling != NULL) { 14121 int rval; 14122 14123 /* 14124 * dtrace_enabling_retain() can only fail because we are 14125 * trying to retain more enablings than are allowed -- but 14126 * we only have one anonymous enabling, and we are guaranteed 14127 * to be allowed at least one retained enabling; we assert 14128 * that dtrace_enabling_retain() returns success. 14129 */ 14130 rval = dtrace_enabling_retain(dtrace_anon.dta_enabling); 14131 ASSERT(rval == 0); 14132 14133 dtrace_enabling_dump(dtrace_anon.dta_enabling); 14134 } 14135} 14136 14137/* 14138 * DTrace Helper Functions 14139 */ 14140static void 14141dtrace_helper_trace(dtrace_helper_action_t *helper, 14142 dtrace_mstate_t *mstate, dtrace_vstate_t *vstate, int where) 14143{ 14144 uint32_t size, next, nnext, i; 14145 dtrace_helptrace_t *ent; 14146 uint16_t flags = cpu_core[curcpu].cpuc_dtrace_flags; 14147 14148 if (!dtrace_helptrace_enabled) 14149 return; 14150 14151 ASSERT(vstate->dtvs_nlocals <= dtrace_helptrace_nlocals); 14152 14153 /* 14154 * What would a tracing framework be without its own tracing 14155 * framework? (Well, a hell of a lot simpler, for starters...) 14156 */ 14157 size = sizeof (dtrace_helptrace_t) + dtrace_helptrace_nlocals * 14158 sizeof (uint64_t) - sizeof (uint64_t); 14159 14160 /* 14161 * Iterate until we can allocate a slot in the trace buffer. 14162 */ 14163 do { 14164 next = dtrace_helptrace_next; 14165 14166 if (next + size < dtrace_helptrace_bufsize) { 14167 nnext = next + size; 14168 } else { 14169 nnext = size; 14170 } 14171 } while (dtrace_cas32(&dtrace_helptrace_next, next, nnext) != next); 14172 14173 /* 14174 * We have our slot; fill it in. 14175 */ 14176 if (nnext == size) 14177 next = 0; 14178 14179 ent = (dtrace_helptrace_t *)&dtrace_helptrace_buffer[next]; 14180 ent->dtht_helper = helper; 14181 ent->dtht_where = where; 14182 ent->dtht_nlocals = vstate->dtvs_nlocals; 14183 14184 ent->dtht_fltoffs = (mstate->dtms_present & DTRACE_MSTATE_FLTOFFS) ? 14185 mstate->dtms_fltoffs : -1; 14186 ent->dtht_fault = DTRACE_FLAGS2FLT(flags); 14187 ent->dtht_illval = cpu_core[curcpu].cpuc_dtrace_illval; 14188 14189 for (i = 0; i < vstate->dtvs_nlocals; i++) { 14190 dtrace_statvar_t *svar; 14191 14192 if ((svar = vstate->dtvs_locals[i]) == NULL) 14193 continue; 14194 14195 ASSERT(svar->dtsv_size >= NCPU * sizeof (uint64_t)); 14196 ent->dtht_locals[i] = 14197 ((uint64_t *)(uintptr_t)svar->dtsv_data)[curcpu]; 14198 } 14199} 14200 14201static uint64_t 14202dtrace_helper(int which, dtrace_mstate_t *mstate, 14203 dtrace_state_t *state, uint64_t arg0, uint64_t arg1) 14204{ 14205 uint16_t *flags = &cpu_core[curcpu].cpuc_dtrace_flags; 14206 uint64_t sarg0 = mstate->dtms_arg[0]; 14207 uint64_t sarg1 = mstate->dtms_arg[1]; 14208 uint64_t rval = 0; 14209 dtrace_helpers_t *helpers = curproc->p_dtrace_helpers; 14210 dtrace_helper_action_t *helper; 14211 dtrace_vstate_t *vstate; 14212 dtrace_difo_t *pred; 14213 int i, trace = dtrace_helptrace_enabled; 14214 14215 ASSERT(which >= 0 && which < DTRACE_NHELPER_ACTIONS); 14216 14217 if (helpers == NULL) 14218 return (0); 14219 14220 if ((helper = helpers->dthps_actions[which]) == NULL) 14221 return (0); 14222 14223 vstate = &helpers->dthps_vstate; 14224 mstate->dtms_arg[0] = arg0; 14225 mstate->dtms_arg[1] = arg1; 14226 14227 /* 14228 * Now iterate over each helper. If its predicate evaluates to 'true', 14229 * we'll call the corresponding actions. Note that the below calls 14230 * to dtrace_dif_emulate() may set faults in machine state. This is 14231 * okay: our caller (the outer dtrace_dif_emulate()) will simply plow 14232 * the stored DIF offset with its own (which is the desired behavior). 14233 * Also, note the calls to dtrace_dif_emulate() may allocate scratch 14234 * from machine state; this is okay, too. 14235 */ 14236 for (; helper != NULL; helper = helper->dtha_next) { 14237 if ((pred = helper->dtha_predicate) != NULL) { 14238 if (trace) 14239 dtrace_helper_trace(helper, mstate, vstate, 0); 14240 14241 if (!dtrace_dif_emulate(pred, mstate, vstate, state)) 14242 goto next; 14243 14244 if (*flags & CPU_DTRACE_FAULT) 14245 goto err; 14246 } 14247 14248 for (i = 0; i < helper->dtha_nactions; i++) { 14249 if (trace) 14250 dtrace_helper_trace(helper, 14251 mstate, vstate, i + 1); 14252 14253 rval = dtrace_dif_emulate(helper->dtha_actions[i], 14254 mstate, vstate, state); 14255 14256 if (*flags & CPU_DTRACE_FAULT) 14257 goto err; 14258 } 14259 14260next: 14261 if (trace) 14262 dtrace_helper_trace(helper, mstate, vstate, 14263 DTRACE_HELPTRACE_NEXT); 14264 } 14265 14266 if (trace) 14267 dtrace_helper_trace(helper, mstate, vstate, 14268 DTRACE_HELPTRACE_DONE); 14269 14270 /* 14271 * Restore the arg0 that we saved upon entry. 14272 */ 14273 mstate->dtms_arg[0] = sarg0; 14274 mstate->dtms_arg[1] = sarg1; 14275 14276 return (rval); 14277 14278err: 14279 if (trace) 14280 dtrace_helper_trace(helper, mstate, vstate, 14281 DTRACE_HELPTRACE_ERR); 14282 14283 /* 14284 * Restore the arg0 that we saved upon entry. 14285 */ 14286 mstate->dtms_arg[0] = sarg0; 14287 mstate->dtms_arg[1] = sarg1; 14288 14289 return (0); 14290} 14291 14292static void 14293dtrace_helper_action_destroy(dtrace_helper_action_t *helper, 14294 dtrace_vstate_t *vstate) 14295{ 14296 int i; 14297 14298 if (helper->dtha_predicate != NULL) 14299 dtrace_difo_release(helper->dtha_predicate, vstate); 14300 14301 for (i = 0; i < helper->dtha_nactions; i++) { 14302 ASSERT(helper->dtha_actions[i] != NULL); 14303 dtrace_difo_release(helper->dtha_actions[i], vstate); 14304 } 14305 14306 kmem_free(helper->dtha_actions, 14307 helper->dtha_nactions * sizeof (dtrace_difo_t *)); 14308 kmem_free(helper, sizeof (dtrace_helper_action_t)); 14309} 14310 14311static int 14312dtrace_helper_destroygen(int gen) 14313{ 14314 proc_t *p = curproc; 14315 dtrace_helpers_t *help = p->p_dtrace_helpers; 14316 dtrace_vstate_t *vstate; 14317 int i; 14318 14319 ASSERT(MUTEX_HELD(&dtrace_lock)); 14320 14321 if (help == NULL || gen > help->dthps_generation) 14322 return (EINVAL); 14323 14324 vstate = &help->dthps_vstate; 14325 14326 for (i = 0; i < DTRACE_NHELPER_ACTIONS; i++) { 14327 dtrace_helper_action_t *last = NULL, *h, *next; 14328 14329 for (h = help->dthps_actions[i]; h != NULL; h = next) { 14330 next = h->dtha_next; 14331 14332 if (h->dtha_generation == gen) { 14333 if (last != NULL) { 14334 last->dtha_next = next; 14335 } else { 14336 help->dthps_actions[i] = next; 14337 } 14338 14339 dtrace_helper_action_destroy(h, vstate); 14340 } else { 14341 last = h; 14342 } 14343 } 14344 } 14345 14346 /* 14347 * Interate until we've cleared out all helper providers with the 14348 * given generation number. 14349 */ 14350 for (;;) { 14351 dtrace_helper_provider_t *prov; 14352 14353 /* 14354 * Look for a helper provider with the right generation. We 14355 * have to start back at the beginning of the list each time 14356 * because we drop dtrace_lock. It's unlikely that we'll make 14357 * more than two passes. 14358 */ 14359 for (i = 0; i < help->dthps_nprovs; i++) { 14360 prov = help->dthps_provs[i]; 14361 14362 if (prov->dthp_generation == gen) 14363 break; 14364 } 14365 14366 /* 14367 * If there were no matches, we're done. 14368 */ 14369 if (i == help->dthps_nprovs) 14370 break; 14371 14372 /* 14373 * Move the last helper provider into this slot. 14374 */ 14375 help->dthps_nprovs--; 14376 help->dthps_provs[i] = help->dthps_provs[help->dthps_nprovs]; 14377 help->dthps_provs[help->dthps_nprovs] = NULL; 14378 14379 mutex_exit(&dtrace_lock); 14380 14381 /* 14382 * If we have a meta provider, remove this helper provider. 14383 */ 14384 mutex_enter(&dtrace_meta_lock); 14385 if (dtrace_meta_pid != NULL) { 14386 ASSERT(dtrace_deferred_pid == NULL); 14387 dtrace_helper_provider_remove(&prov->dthp_prov, 14388 p->p_pid); 14389 } 14390 mutex_exit(&dtrace_meta_lock); 14391 14392 dtrace_helper_provider_destroy(prov); 14393 14394 mutex_enter(&dtrace_lock); 14395 } 14396 14397 return (0); 14398} 14399 14400static int 14401dtrace_helper_validate(dtrace_helper_action_t *helper) 14402{ 14403 int err = 0, i; 14404 dtrace_difo_t *dp; 14405 14406 if ((dp = helper->dtha_predicate) != NULL) 14407 err += dtrace_difo_validate_helper(dp); 14408 14409 for (i = 0; i < helper->dtha_nactions; i++) 14410 err += dtrace_difo_validate_helper(helper->dtha_actions[i]); 14411 14412 return (err == 0); 14413} 14414 14415static int 14416dtrace_helper_action_add(int which, dtrace_ecbdesc_t *ep) 14417{ 14418 dtrace_helpers_t *help; 14419 dtrace_helper_action_t *helper, *last; 14420 dtrace_actdesc_t *act; 14421 dtrace_vstate_t *vstate; 14422 dtrace_predicate_t *pred; 14423 int count = 0, nactions = 0, i; 14424 14425 if (which < 0 || which >= DTRACE_NHELPER_ACTIONS) 14426 return (EINVAL); 14427 14428 help = curproc->p_dtrace_helpers; 14429 last = help->dthps_actions[which]; 14430 vstate = &help->dthps_vstate; 14431 14432 for (count = 0; last != NULL; last = last->dtha_next) { 14433 count++; 14434 if (last->dtha_next == NULL) 14435 break; 14436 } 14437 14438 /* 14439 * If we already have dtrace_helper_actions_max helper actions for this 14440 * helper action type, we'll refuse to add a new one. 14441 */ 14442 if (count >= dtrace_helper_actions_max) 14443 return (ENOSPC); 14444 14445 helper = kmem_zalloc(sizeof (dtrace_helper_action_t), KM_SLEEP); 14446 helper->dtha_generation = help->dthps_generation; 14447 14448 if ((pred = ep->dted_pred.dtpdd_predicate) != NULL) { 14449 ASSERT(pred->dtp_difo != NULL); 14450 dtrace_difo_hold(pred->dtp_difo); 14451 helper->dtha_predicate = pred->dtp_difo; 14452 } 14453 14454 for (act = ep->dted_action; act != NULL; act = act->dtad_next) { 14455 if (act->dtad_kind != DTRACEACT_DIFEXPR) 14456 goto err; 14457 14458 if (act->dtad_difo == NULL) 14459 goto err; 14460 14461 nactions++; 14462 } 14463 14464 helper->dtha_actions = kmem_zalloc(sizeof (dtrace_difo_t *) * 14465 (helper->dtha_nactions = nactions), KM_SLEEP); 14466 14467 for (act = ep->dted_action, i = 0; act != NULL; act = act->dtad_next) { 14468 dtrace_difo_hold(act->dtad_difo); 14469 helper->dtha_actions[i++] = act->dtad_difo; 14470 } 14471 14472 if (!dtrace_helper_validate(helper)) 14473 goto err; 14474 14475 if (last == NULL) { 14476 help->dthps_actions[which] = helper; 14477 } else { 14478 last->dtha_next = helper; 14479 } 14480 14481 if (vstate->dtvs_nlocals > dtrace_helptrace_nlocals) { 14482 dtrace_helptrace_nlocals = vstate->dtvs_nlocals; 14483 dtrace_helptrace_next = 0; 14484 } 14485 14486 return (0); 14487err: 14488 dtrace_helper_action_destroy(helper, vstate); 14489 return (EINVAL); 14490} 14491 14492static void 14493dtrace_helper_provider_register(proc_t *p, dtrace_helpers_t *help, 14494 dof_helper_t *dofhp) 14495{ 14496 ASSERT(MUTEX_NOT_HELD(&dtrace_lock)); 14497 14498 mutex_enter(&dtrace_meta_lock); 14499 mutex_enter(&dtrace_lock); 14500 14501 if (!dtrace_attached() || dtrace_meta_pid == NULL) { 14502 /* 14503 * If the dtrace module is loaded but not attached, or if 14504 * there aren't isn't a meta provider registered to deal with 14505 * these provider descriptions, we need to postpone creating 14506 * the actual providers until later. 14507 */ 14508 14509 if (help->dthps_next == NULL && help->dthps_prev == NULL && 14510 dtrace_deferred_pid != help) { 14511 help->dthps_deferred = 1; 14512 help->dthps_pid = p->p_pid; 14513 help->dthps_next = dtrace_deferred_pid; 14514 help->dthps_prev = NULL; 14515 if (dtrace_deferred_pid != NULL) 14516 dtrace_deferred_pid->dthps_prev = help; 14517 dtrace_deferred_pid = help; 14518 } 14519 14520 mutex_exit(&dtrace_lock); 14521 14522 } else if (dofhp != NULL) { 14523 /* 14524 * If the dtrace module is loaded and we have a particular 14525 * helper provider description, pass that off to the 14526 * meta provider. 14527 */ 14528 14529 mutex_exit(&dtrace_lock); 14530 14531 dtrace_helper_provide(dofhp, p->p_pid); 14532 14533 } else { 14534 /* 14535 * Otherwise, just pass all the helper provider descriptions 14536 * off to the meta provider. 14537 */ 14538 14539 int i; 14540 mutex_exit(&dtrace_lock); 14541 14542 for (i = 0; i < help->dthps_nprovs; i++) { 14543 dtrace_helper_provide(&help->dthps_provs[i]->dthp_prov, 14544 p->p_pid); 14545 } 14546 } 14547 14548 mutex_exit(&dtrace_meta_lock); 14549} 14550 14551static int 14552dtrace_helper_provider_add(dof_helper_t *dofhp, int gen) 14553{ 14554 dtrace_helpers_t *help; 14555 dtrace_helper_provider_t *hprov, **tmp_provs; 14556 uint_t tmp_maxprovs, i; 14557 14558 ASSERT(MUTEX_HELD(&dtrace_lock)); 14559 14560 help = curproc->p_dtrace_helpers; 14561 ASSERT(help != NULL); 14562 14563 /* 14564 * If we already have dtrace_helper_providers_max helper providers, 14565 * we're refuse to add a new one. 14566 */ 14567 if (help->dthps_nprovs >= dtrace_helper_providers_max) 14568 return (ENOSPC); 14569 14570 /* 14571 * Check to make sure this isn't a duplicate. 14572 */ 14573 for (i = 0; i < help->dthps_nprovs; i++) { 14574 if (dofhp->dofhp_dof == 14575 help->dthps_provs[i]->dthp_prov.dofhp_dof) 14576 return (EALREADY); 14577 } 14578 14579 hprov = kmem_zalloc(sizeof (dtrace_helper_provider_t), KM_SLEEP); 14580 hprov->dthp_prov = *dofhp; 14581 hprov->dthp_ref = 1; 14582 hprov->dthp_generation = gen; 14583 14584 /* 14585 * Allocate a bigger table for helper providers if it's already full. 14586 */ 14587 if (help->dthps_maxprovs == help->dthps_nprovs) { 14588 tmp_maxprovs = help->dthps_maxprovs; 14589 tmp_provs = help->dthps_provs; 14590 14591 if (help->dthps_maxprovs == 0) 14592 help->dthps_maxprovs = 2; 14593 else 14594 help->dthps_maxprovs *= 2; 14595 if (help->dthps_maxprovs > dtrace_helper_providers_max) 14596 help->dthps_maxprovs = dtrace_helper_providers_max; 14597 14598 ASSERT(tmp_maxprovs < help->dthps_maxprovs); 14599 14600 help->dthps_provs = kmem_zalloc(help->dthps_maxprovs * 14601 sizeof (dtrace_helper_provider_t *), KM_SLEEP); 14602 14603 if (tmp_provs != NULL) { 14604 bcopy(tmp_provs, help->dthps_provs, tmp_maxprovs * 14605 sizeof (dtrace_helper_provider_t *)); 14606 kmem_free(tmp_provs, tmp_maxprovs * 14607 sizeof (dtrace_helper_provider_t *)); 14608 } 14609 } 14610 14611 help->dthps_provs[help->dthps_nprovs] = hprov; 14612 help->dthps_nprovs++; 14613 14614 return (0); 14615} 14616 14617static void 14618dtrace_helper_provider_destroy(dtrace_helper_provider_t *hprov) 14619{ 14620 mutex_enter(&dtrace_lock); 14621 14622 if (--hprov->dthp_ref == 0) { 14623 dof_hdr_t *dof; 14624 mutex_exit(&dtrace_lock); 14625 dof = (dof_hdr_t *)(uintptr_t)hprov->dthp_prov.dofhp_dof; 14626 dtrace_dof_destroy(dof); 14627 kmem_free(hprov, sizeof (dtrace_helper_provider_t)); 14628 } else { 14629 mutex_exit(&dtrace_lock); 14630 } 14631} 14632 14633static int 14634dtrace_helper_provider_validate(dof_hdr_t *dof, dof_sec_t *sec) 14635{ 14636 uintptr_t daddr = (uintptr_t)dof; 14637 dof_sec_t *str_sec, *prb_sec, *arg_sec, *off_sec, *enoff_sec; 14638 dof_provider_t *provider; 14639 dof_probe_t *probe; 14640 uint8_t *arg; 14641 char *strtab, *typestr; 14642 dof_stridx_t typeidx; 14643 size_t typesz; 14644 uint_t nprobes, j, k; 14645 14646 ASSERT(sec->dofs_type == DOF_SECT_PROVIDER); 14647 14648 if (sec->dofs_offset & (sizeof (uint_t) - 1)) { 14649 dtrace_dof_error(dof, "misaligned section offset"); 14650 return (-1); 14651 } 14652 14653 /* 14654 * The section needs to be large enough to contain the DOF provider 14655 * structure appropriate for the given version. 14656 */ 14657 if (sec->dofs_size < 14658 ((dof->dofh_ident[DOF_ID_VERSION] == DOF_VERSION_1) ? 14659 offsetof(dof_provider_t, dofpv_prenoffs) : 14660 sizeof (dof_provider_t))) { 14661 dtrace_dof_error(dof, "provider section too small"); 14662 return (-1); 14663 } 14664 14665 provider = (dof_provider_t *)(uintptr_t)(daddr + sec->dofs_offset); 14666 str_sec = dtrace_dof_sect(dof, DOF_SECT_STRTAB, provider->dofpv_strtab); 14667 prb_sec = dtrace_dof_sect(dof, DOF_SECT_PROBES, provider->dofpv_probes); 14668 arg_sec = dtrace_dof_sect(dof, DOF_SECT_PRARGS, provider->dofpv_prargs); 14669 off_sec = dtrace_dof_sect(dof, DOF_SECT_PROFFS, provider->dofpv_proffs); 14670 14671 if (str_sec == NULL || prb_sec == NULL || 14672 arg_sec == NULL || off_sec == NULL) 14673 return (-1); 14674 14675 enoff_sec = NULL; 14676 14677 if (dof->dofh_ident[DOF_ID_VERSION] != DOF_VERSION_1 && 14678 provider->dofpv_prenoffs != DOF_SECT_NONE && 14679 (enoff_sec = dtrace_dof_sect(dof, DOF_SECT_PRENOFFS, 14680 provider->dofpv_prenoffs)) == NULL) 14681 return (-1); 14682 14683 strtab = (char *)(uintptr_t)(daddr + str_sec->dofs_offset); 14684 14685 if (provider->dofpv_name >= str_sec->dofs_size || 14686 strlen(strtab + provider->dofpv_name) >= DTRACE_PROVNAMELEN) { 14687 dtrace_dof_error(dof, "invalid provider name"); 14688 return (-1); 14689 } 14690 14691 if (prb_sec->dofs_entsize == 0 || 14692 prb_sec->dofs_entsize > prb_sec->dofs_size) { 14693 dtrace_dof_error(dof, "invalid entry size"); 14694 return (-1); 14695 } 14696 14697 if (prb_sec->dofs_entsize & (sizeof (uintptr_t) - 1)) { 14698 dtrace_dof_error(dof, "misaligned entry size"); 14699 return (-1); 14700 } 14701 14702 if (off_sec->dofs_entsize != sizeof (uint32_t)) { 14703 dtrace_dof_error(dof, "invalid entry size"); 14704 return (-1); 14705 } 14706 14707 if (off_sec->dofs_offset & (sizeof (uint32_t) - 1)) { 14708 dtrace_dof_error(dof, "misaligned section offset"); 14709 return (-1); 14710 } 14711 14712 if (arg_sec->dofs_entsize != sizeof (uint8_t)) { 14713 dtrace_dof_error(dof, "invalid entry size"); 14714 return (-1); 14715 } 14716 14717 arg = (uint8_t *)(uintptr_t)(daddr + arg_sec->dofs_offset); 14718 14719 nprobes = prb_sec->dofs_size / prb_sec->dofs_entsize; 14720 14721 /* 14722 * Take a pass through the probes to check for errors. 14723 */ 14724 for (j = 0; j < nprobes; j++) { 14725 probe = (dof_probe_t *)(uintptr_t)(daddr + 14726 prb_sec->dofs_offset + j * prb_sec->dofs_entsize); 14727 14728 if (probe->dofpr_func >= str_sec->dofs_size) { 14729 dtrace_dof_error(dof, "invalid function name"); 14730 return (-1); 14731 } 14732 14733 if (strlen(strtab + probe->dofpr_func) >= DTRACE_FUNCNAMELEN) { 14734 dtrace_dof_error(dof, "function name too long"); 14735 return (-1); 14736 } 14737 14738 if (probe->dofpr_name >= str_sec->dofs_size || 14739 strlen(strtab + probe->dofpr_name) >= DTRACE_NAMELEN) { 14740 dtrace_dof_error(dof, "invalid probe name"); 14741 return (-1); 14742 } 14743 14744 /* 14745 * The offset count must not wrap the index, and the offsets 14746 * must also not overflow the section's data. 14747 */ 14748 if (probe->dofpr_offidx + probe->dofpr_noffs < 14749 probe->dofpr_offidx || 14750 (probe->dofpr_offidx + probe->dofpr_noffs) * 14751 off_sec->dofs_entsize > off_sec->dofs_size) { 14752 dtrace_dof_error(dof, "invalid probe offset"); 14753 return (-1); 14754 } 14755 14756 if (dof->dofh_ident[DOF_ID_VERSION] != DOF_VERSION_1) { 14757 /* 14758 * If there's no is-enabled offset section, make sure 14759 * there aren't any is-enabled offsets. Otherwise 14760 * perform the same checks as for probe offsets 14761 * (immediately above). 14762 */ 14763 if (enoff_sec == NULL) { 14764 if (probe->dofpr_enoffidx != 0 || 14765 probe->dofpr_nenoffs != 0) { 14766 dtrace_dof_error(dof, "is-enabled " 14767 "offsets with null section"); 14768 return (-1); 14769 } 14770 } else if (probe->dofpr_enoffidx + 14771 probe->dofpr_nenoffs < probe->dofpr_enoffidx || 14772 (probe->dofpr_enoffidx + probe->dofpr_nenoffs) * 14773 enoff_sec->dofs_entsize > enoff_sec->dofs_size) { 14774 dtrace_dof_error(dof, "invalid is-enabled " 14775 "offset"); 14776 return (-1); 14777 } 14778 14779 if (probe->dofpr_noffs + probe->dofpr_nenoffs == 0) { 14780 dtrace_dof_error(dof, "zero probe and " 14781 "is-enabled offsets"); 14782 return (-1); 14783 } 14784 } else if (probe->dofpr_noffs == 0) { 14785 dtrace_dof_error(dof, "zero probe offsets"); 14786 return (-1); 14787 } 14788 14789 if (probe->dofpr_argidx + probe->dofpr_xargc < 14790 probe->dofpr_argidx || 14791 (probe->dofpr_argidx + probe->dofpr_xargc) * 14792 arg_sec->dofs_entsize > arg_sec->dofs_size) { 14793 dtrace_dof_error(dof, "invalid args"); 14794 return (-1); 14795 } 14796 14797 typeidx = probe->dofpr_nargv; 14798 typestr = strtab + probe->dofpr_nargv; 14799 for (k = 0; k < probe->dofpr_nargc; k++) { 14800 if (typeidx >= str_sec->dofs_size) { 14801 dtrace_dof_error(dof, "bad " 14802 "native argument type"); 14803 return (-1); 14804 } 14805 14806 typesz = strlen(typestr) + 1; 14807 if (typesz > DTRACE_ARGTYPELEN) { 14808 dtrace_dof_error(dof, "native " 14809 "argument type too long"); 14810 return (-1); 14811 } 14812 typeidx += typesz; 14813 typestr += typesz; 14814 } 14815 14816 typeidx = probe->dofpr_xargv; 14817 typestr = strtab + probe->dofpr_xargv; 14818 for (k = 0; k < probe->dofpr_xargc; k++) { 14819 if (arg[probe->dofpr_argidx + k] > probe->dofpr_nargc) { 14820 dtrace_dof_error(dof, "bad " 14821 "native argument index"); 14822 return (-1); 14823 } 14824 14825 if (typeidx >= str_sec->dofs_size) { 14826 dtrace_dof_error(dof, "bad " 14827 "translated argument type"); 14828 return (-1); 14829 } 14830 14831 typesz = strlen(typestr) + 1; 14832 if (typesz > DTRACE_ARGTYPELEN) { 14833 dtrace_dof_error(dof, "translated argument " 14834 "type too long"); 14835 return (-1); 14836 } 14837 14838 typeidx += typesz; 14839 typestr += typesz; 14840 } 14841 } 14842 14843 return (0); 14844} 14845 14846static int 14847dtrace_helper_slurp(dof_hdr_t *dof, dof_helper_t *dhp) 14848{ 14849 dtrace_helpers_t *help; 14850 dtrace_vstate_t *vstate; 14851 dtrace_enabling_t *enab = NULL; 14852 int i, gen, rv, nhelpers = 0, nprovs = 0, destroy = 1; 14853 uintptr_t daddr = (uintptr_t)dof; 14854 14855 ASSERT(MUTEX_HELD(&dtrace_lock)); 14856 14857 if ((help = curproc->p_dtrace_helpers) == NULL) 14858 help = dtrace_helpers_create(curproc); 14859 14860 vstate = &help->dthps_vstate; 14861 14862 if ((rv = dtrace_dof_slurp(dof, vstate, NULL, &enab, 14863 dhp != NULL ? dhp->dofhp_addr : 0, B_FALSE)) != 0) { 14864 dtrace_dof_destroy(dof); 14865 return (rv); 14866 } 14867 14868 /* 14869 * Look for helper providers and validate their descriptions. 14870 */ 14871 if (dhp != NULL) { 14872 for (i = 0; i < dof->dofh_secnum; i++) { 14873 dof_sec_t *sec = (dof_sec_t *)(uintptr_t)(daddr + 14874 dof->dofh_secoff + i * dof->dofh_secsize); 14875 14876 if (sec->dofs_type != DOF_SECT_PROVIDER) 14877 continue; 14878 14879 if (dtrace_helper_provider_validate(dof, sec) != 0) { 14880 dtrace_enabling_destroy(enab); 14881 dtrace_dof_destroy(dof); 14882 return (-1); 14883 } 14884 14885 nprovs++; 14886 } 14887 } 14888 14889 /* 14890 * Now we need to walk through the ECB descriptions in the enabling. 14891 */ 14892 for (i = 0; i < enab->dten_ndesc; i++) { 14893 dtrace_ecbdesc_t *ep = enab->dten_desc[i]; 14894 dtrace_probedesc_t *desc = &ep->dted_probe; 14895 14896 if (strcmp(desc->dtpd_provider, "dtrace") != 0) 14897 continue; 14898 14899 if (strcmp(desc->dtpd_mod, "helper") != 0) 14900 continue; 14901 14902 if (strcmp(desc->dtpd_func, "ustack") != 0) 14903 continue; 14904 14905 if ((rv = dtrace_helper_action_add(DTRACE_HELPER_ACTION_USTACK, 14906 ep)) != 0) { 14907 /* 14908 * Adding this helper action failed -- we are now going 14909 * to rip out the entire generation and return failure. 14910 */ 14911 (void) dtrace_helper_destroygen(help->dthps_generation); 14912 dtrace_enabling_destroy(enab); 14913 dtrace_dof_destroy(dof); 14914 return (-1); 14915 } 14916 14917 nhelpers++; 14918 } 14919 14920 if (nhelpers < enab->dten_ndesc) 14921 dtrace_dof_error(dof, "unmatched helpers"); 14922 14923 gen = help->dthps_generation++; 14924 dtrace_enabling_destroy(enab); 14925 14926 if (dhp != NULL && nprovs > 0) { 14927 dhp->dofhp_dof = (uint64_t)(uintptr_t)dof; 14928 if (dtrace_helper_provider_add(dhp, gen) == 0) { 14929 mutex_exit(&dtrace_lock); 14930 dtrace_helper_provider_register(curproc, help, dhp); 14931 mutex_enter(&dtrace_lock); 14932 14933 destroy = 0; 14934 } 14935 } 14936 14937 if (destroy) 14938 dtrace_dof_destroy(dof); 14939 14940 return (gen); 14941} 14942 14943static dtrace_helpers_t * 14944dtrace_helpers_create(proc_t *p) 14945{ 14946 dtrace_helpers_t *help; 14947 14948 ASSERT(MUTEX_HELD(&dtrace_lock)); 14949 ASSERT(p->p_dtrace_helpers == NULL); 14950 14951 help = kmem_zalloc(sizeof (dtrace_helpers_t), KM_SLEEP); 14952 help->dthps_actions = kmem_zalloc(sizeof (dtrace_helper_action_t *) * 14953 DTRACE_NHELPER_ACTIONS, KM_SLEEP); 14954 14955 p->p_dtrace_helpers = help; 14956 dtrace_helpers++; 14957 14958 return (help); 14959} 14960 14961#if defined(sun) 14962static 14963#endif 14964void 14965dtrace_helpers_destroy(proc_t *p) 14966{ 14967 dtrace_helpers_t *help; 14968 dtrace_vstate_t *vstate; 14969#if defined(sun) 14970 proc_t *p = curproc; 14971#endif 14972 int i; 14973 14974 mutex_enter(&dtrace_lock); 14975 14976 ASSERT(p->p_dtrace_helpers != NULL); 14977 ASSERT(dtrace_helpers > 0); 14978 14979 help = p->p_dtrace_helpers; 14980 vstate = &help->dthps_vstate; 14981 14982 /* 14983 * We're now going to lose the help from this process. 14984 */ 14985 p->p_dtrace_helpers = NULL; 14986 dtrace_sync(); 14987 14988 /* 14989 * Destory the helper actions. 14990 */ 14991 for (i = 0; i < DTRACE_NHELPER_ACTIONS; i++) { 14992 dtrace_helper_action_t *h, *next; 14993 14994 for (h = help->dthps_actions[i]; h != NULL; h = next) { 14995 next = h->dtha_next; 14996 dtrace_helper_action_destroy(h, vstate); 14997 h = next; 14998 } 14999 } 15000 15001 mutex_exit(&dtrace_lock); 15002 15003 /* 15004 * Destroy the helper providers. 15005 */ 15006 if (help->dthps_maxprovs > 0) { 15007 mutex_enter(&dtrace_meta_lock); 15008 if (dtrace_meta_pid != NULL) { 15009 ASSERT(dtrace_deferred_pid == NULL); 15010 15011 for (i = 0; i < help->dthps_nprovs; i++) { 15012 dtrace_helper_provider_remove( 15013 &help->dthps_provs[i]->dthp_prov, p->p_pid); 15014 } 15015 } else { 15016 mutex_enter(&dtrace_lock); 15017 ASSERT(help->dthps_deferred == 0 || 15018 help->dthps_next != NULL || 15019 help->dthps_prev != NULL || 15020 help == dtrace_deferred_pid); 15021 15022 /* 15023 * Remove the helper from the deferred list. 15024 */ 15025 if (help->dthps_next != NULL) 15026 help->dthps_next->dthps_prev = help->dthps_prev; 15027 if (help->dthps_prev != NULL) 15028 help->dthps_prev->dthps_next = help->dthps_next; 15029 if (dtrace_deferred_pid == help) { 15030 dtrace_deferred_pid = help->dthps_next; 15031 ASSERT(help->dthps_prev == NULL); 15032 } 15033 15034 mutex_exit(&dtrace_lock); 15035 } 15036 15037 mutex_exit(&dtrace_meta_lock); 15038 15039 for (i = 0; i < help->dthps_nprovs; i++) { 15040 dtrace_helper_provider_destroy(help->dthps_provs[i]); 15041 } 15042 15043 kmem_free(help->dthps_provs, help->dthps_maxprovs * 15044 sizeof (dtrace_helper_provider_t *)); 15045 } 15046 15047 mutex_enter(&dtrace_lock); 15048 15049 dtrace_vstate_fini(&help->dthps_vstate); 15050 kmem_free(help->dthps_actions, 15051 sizeof (dtrace_helper_action_t *) * DTRACE_NHELPER_ACTIONS); 15052 kmem_free(help, sizeof (dtrace_helpers_t)); 15053 15054 --dtrace_helpers; 15055 mutex_exit(&dtrace_lock); 15056} 15057 15058#if defined(sun) 15059static 15060#endif 15061void 15062dtrace_helpers_duplicate(proc_t *from, proc_t *to) 15063{ 15064 dtrace_helpers_t *help, *newhelp; 15065 dtrace_helper_action_t *helper, *new, *last; 15066 dtrace_difo_t *dp; 15067 dtrace_vstate_t *vstate; 15068 int i, j, sz, hasprovs = 0; 15069 15070 mutex_enter(&dtrace_lock); 15071 ASSERT(from->p_dtrace_helpers != NULL); 15072 ASSERT(dtrace_helpers > 0); 15073 15074 help = from->p_dtrace_helpers; 15075 newhelp = dtrace_helpers_create(to); 15076 ASSERT(to->p_dtrace_helpers != NULL); 15077 15078 newhelp->dthps_generation = help->dthps_generation; 15079 vstate = &newhelp->dthps_vstate; 15080 15081 /* 15082 * Duplicate the helper actions. 15083 */ 15084 for (i = 0; i < DTRACE_NHELPER_ACTIONS; i++) { 15085 if ((helper = help->dthps_actions[i]) == NULL) 15086 continue; 15087 15088 for (last = NULL; helper != NULL; helper = helper->dtha_next) { 15089 new = kmem_zalloc(sizeof (dtrace_helper_action_t), 15090 KM_SLEEP); 15091 new->dtha_generation = helper->dtha_generation; 15092 15093 if ((dp = helper->dtha_predicate) != NULL) { 15094 dp = dtrace_difo_duplicate(dp, vstate); 15095 new->dtha_predicate = dp; 15096 } 15097 15098 new->dtha_nactions = helper->dtha_nactions; 15099 sz = sizeof (dtrace_difo_t *) * new->dtha_nactions; 15100 new->dtha_actions = kmem_alloc(sz, KM_SLEEP); 15101 15102 for (j = 0; j < new->dtha_nactions; j++) { 15103 dtrace_difo_t *dp = helper->dtha_actions[j]; 15104 15105 ASSERT(dp != NULL); 15106 dp = dtrace_difo_duplicate(dp, vstate); 15107 new->dtha_actions[j] = dp; 15108 } 15109 15110 if (last != NULL) { 15111 last->dtha_next = new; 15112 } else { 15113 newhelp->dthps_actions[i] = new; 15114 } 15115 15116 last = new; 15117 } 15118 } 15119 15120 /* 15121 * Duplicate the helper providers and register them with the 15122 * DTrace framework. 15123 */ 15124 if (help->dthps_nprovs > 0) { 15125 newhelp->dthps_nprovs = help->dthps_nprovs; 15126 newhelp->dthps_maxprovs = help->dthps_nprovs; 15127 newhelp->dthps_provs = kmem_alloc(newhelp->dthps_nprovs * 15128 sizeof (dtrace_helper_provider_t *), KM_SLEEP); 15129 for (i = 0; i < newhelp->dthps_nprovs; i++) { 15130 newhelp->dthps_provs[i] = help->dthps_provs[i]; 15131 newhelp->dthps_provs[i]->dthp_ref++; 15132 } 15133 15134 hasprovs = 1; 15135 } 15136 15137 mutex_exit(&dtrace_lock); 15138 15139 if (hasprovs) 15140 dtrace_helper_provider_register(to, newhelp, NULL); 15141} 15142 15143/* 15144 * DTrace Hook Functions 15145 */ 15146static void 15147dtrace_module_loaded(modctl_t *ctl) 15148{ 15149 dtrace_provider_t *prv; 15150 15151 mutex_enter(&dtrace_provider_lock); 15152#if defined(sun) 15153 mutex_enter(&mod_lock); 15154#endif 15155 15156#if defined(sun) 15157 ASSERT(ctl->mod_busy); 15158#endif 15159 15160 /* 15161 * We're going to call each providers per-module provide operation 15162 * specifying only this module. 15163 */ 15164 for (prv = dtrace_provider; prv != NULL; prv = prv->dtpv_next) 15165 prv->dtpv_pops.dtps_provide_module(prv->dtpv_arg, ctl); 15166 15167#if defined(sun) 15168 mutex_exit(&mod_lock); 15169#endif 15170 mutex_exit(&dtrace_provider_lock); 15171 15172 /* 15173 * If we have any retained enablings, we need to match against them. 15174 * Enabling probes requires that cpu_lock be held, and we cannot hold 15175 * cpu_lock here -- it is legal for cpu_lock to be held when loading a 15176 * module. (In particular, this happens when loading scheduling 15177 * classes.) So if we have any retained enablings, we need to dispatch 15178 * our task queue to do the match for us. 15179 */ 15180 mutex_enter(&dtrace_lock); 15181 15182 if (dtrace_retained == NULL) { 15183 mutex_exit(&dtrace_lock); 15184 return; 15185 } 15186 15187 (void) taskq_dispatch(dtrace_taskq, 15188 (task_func_t *)dtrace_enabling_matchall, NULL, TQ_SLEEP); 15189 15190 mutex_exit(&dtrace_lock); 15191 15192 /* 15193 * And now, for a little heuristic sleaze: in general, we want to 15194 * match modules as soon as they load. However, we cannot guarantee 15195 * this, because it would lead us to the lock ordering violation 15196 * outlined above. The common case, of course, is that cpu_lock is 15197 * _not_ held -- so we delay here for a clock tick, hoping that that's 15198 * long enough for the task queue to do its work. If it's not, it's 15199 * not a serious problem -- it just means that the module that we 15200 * just loaded may not be immediately instrumentable. 15201 */ 15202 delay(1); 15203} 15204 15205static void 15206#if defined(sun) 15207dtrace_module_unloaded(modctl_t *ctl) 15208#else 15209dtrace_module_unloaded(modctl_t *ctl, int *error) 15210#endif 15211{ 15212 dtrace_probe_t template, *probe, *first, *next; 15213 dtrace_provider_t *prov; 15214#if !defined(sun) 15215 char modname[DTRACE_MODNAMELEN]; 15216 size_t len; 15217#endif 15218 15219#if defined(sun) 15220 template.dtpr_mod = ctl->mod_modname; 15221#else 15222 /* Handle the fact that ctl->filename may end in ".ko". */ 15223 strlcpy(modname, ctl->filename, sizeof(modname)); 15224 len = strlen(ctl->filename); 15225 if (len > 3 && strcmp(modname + len - 3, ".ko") == 0) 15226 modname[len - 3] = '\0'; 15227 template.dtpr_mod = modname; 15228#endif 15229 15230 mutex_enter(&dtrace_provider_lock); 15231#if defined(sun) 15232 mutex_enter(&mod_lock); 15233#endif 15234 mutex_enter(&dtrace_lock); 15235 15236#if !defined(sun) 15237 if (ctl->nenabled > 0) { 15238 /* Don't allow unloads if a probe is enabled. */ 15239 mutex_exit(&dtrace_provider_lock); 15240 mutex_exit(&dtrace_lock); 15241 *error = -1; 15242 printf( 15243 "kldunload: attempt to unload module that has DTrace probes enabled\n"); 15244 return; 15245 } 15246#endif 15247 15248 if (dtrace_bymod == NULL) { 15249 /* 15250 * The DTrace module is loaded (obviously) but not attached; 15251 * we don't have any work to do. 15252 */ 15253 mutex_exit(&dtrace_provider_lock); 15254#if defined(sun) 15255 mutex_exit(&mod_lock); 15256#endif 15257 mutex_exit(&dtrace_lock); 15258 return; 15259 } 15260 15261 for (probe = first = dtrace_hash_lookup(dtrace_bymod, &template); 15262 probe != NULL; probe = probe->dtpr_nextmod) { 15263 if (probe->dtpr_ecb != NULL) { 15264 mutex_exit(&dtrace_provider_lock); 15265#if defined(sun) 15266 mutex_exit(&mod_lock); 15267#endif 15268 mutex_exit(&dtrace_lock); 15269 15270 /* 15271 * This shouldn't _actually_ be possible -- we're 15272 * unloading a module that has an enabled probe in it. 15273 * (It's normally up to the provider to make sure that 15274 * this can't happen.) However, because dtps_enable() 15275 * doesn't have a failure mode, there can be an 15276 * enable/unload race. Upshot: we don't want to 15277 * assert, but we're not going to disable the 15278 * probe, either. 15279 */ 15280 if (dtrace_err_verbose) { 15281#if defined(sun) 15282 cmn_err(CE_WARN, "unloaded module '%s' had " 15283 "enabled probes", ctl->mod_modname); 15284#else 15285 cmn_err(CE_WARN, "unloaded module '%s' had " 15286 "enabled probes", modname); 15287#endif 15288 } 15289 15290 return; 15291 } 15292 } 15293 15294 probe = first; 15295 15296 for (first = NULL; probe != NULL; probe = next) { 15297 ASSERT(dtrace_probes[probe->dtpr_id - 1] == probe); 15298 15299 dtrace_probes[probe->dtpr_id - 1] = NULL; 15300 15301 next = probe->dtpr_nextmod; 15302 dtrace_hash_remove(dtrace_bymod, probe); 15303 dtrace_hash_remove(dtrace_byfunc, probe); 15304 dtrace_hash_remove(dtrace_byname, probe); 15305 15306 if (first == NULL) { 15307 first = probe; 15308 probe->dtpr_nextmod = NULL; 15309 } else { 15310 probe->dtpr_nextmod = first; 15311 first = probe; 15312 } 15313 } 15314 15315 /* 15316 * We've removed all of the module's probes from the hash chains and 15317 * from the probe array. Now issue a dtrace_sync() to be sure that 15318 * everyone has cleared out from any probe array processing. 15319 */ 15320 dtrace_sync(); 15321 15322 for (probe = first; probe != NULL; probe = first) { 15323 first = probe->dtpr_nextmod; 15324 prov = probe->dtpr_provider; 15325 prov->dtpv_pops.dtps_destroy(prov->dtpv_arg, probe->dtpr_id, 15326 probe->dtpr_arg); 15327 kmem_free(probe->dtpr_mod, strlen(probe->dtpr_mod) + 1); 15328 kmem_free(probe->dtpr_func, strlen(probe->dtpr_func) + 1); 15329 kmem_free(probe->dtpr_name, strlen(probe->dtpr_name) + 1); 15330#if defined(sun) 15331 vmem_free(dtrace_arena, (void *)(uintptr_t)probe->dtpr_id, 1); 15332#else 15333 free_unr(dtrace_arena, probe->dtpr_id); 15334#endif 15335 kmem_free(probe, sizeof (dtrace_probe_t)); 15336 } 15337 15338 mutex_exit(&dtrace_lock); 15339#if defined(sun) 15340 mutex_exit(&mod_lock); 15341#endif 15342 mutex_exit(&dtrace_provider_lock); 15343} 15344 15345#if !defined(sun) 15346static void 15347dtrace_kld_load(void *arg __unused, linker_file_t lf) 15348{ 15349 15350 dtrace_module_loaded(lf); 15351} 15352 15353static void 15354dtrace_kld_unload_try(void *arg __unused, linker_file_t lf, int *error) 15355{ 15356 15357 if (*error != 0) 15358 /* We already have an error, so don't do anything. */ 15359 return; 15360 dtrace_module_unloaded(lf, error); 15361} 15362#endif 15363 15364#if defined(sun) 15365static void 15366dtrace_suspend(void) 15367{ 15368 dtrace_probe_foreach(offsetof(dtrace_pops_t, dtps_suspend)); 15369} 15370 15371static void 15372dtrace_resume(void) 15373{ 15374 dtrace_probe_foreach(offsetof(dtrace_pops_t, dtps_resume)); 15375} 15376#endif 15377 15378static int 15379dtrace_cpu_setup(cpu_setup_t what, processorid_t cpu) 15380{ 15381 ASSERT(MUTEX_HELD(&cpu_lock)); 15382 mutex_enter(&dtrace_lock); 15383 15384 switch (what) { 15385 case CPU_CONFIG: { 15386 dtrace_state_t *state; 15387 dtrace_optval_t *opt, rs, c; 15388 15389 /* 15390 * For now, we only allocate a new buffer for anonymous state. 15391 */ 15392 if ((state = dtrace_anon.dta_state) == NULL) 15393 break; 15394 15395 if (state->dts_activity != DTRACE_ACTIVITY_ACTIVE) 15396 break; 15397 15398 opt = state->dts_options; 15399 c = opt[DTRACEOPT_CPU]; 15400 15401 if (c != DTRACE_CPUALL && c != DTRACEOPT_UNSET && c != cpu) 15402 break; 15403 15404 /* 15405 * Regardless of what the actual policy is, we're going to 15406 * temporarily set our resize policy to be manual. We're 15407 * also going to temporarily set our CPU option to denote 15408 * the newly configured CPU. 15409 */ 15410 rs = opt[DTRACEOPT_BUFRESIZE]; 15411 opt[DTRACEOPT_BUFRESIZE] = DTRACEOPT_BUFRESIZE_MANUAL; 15412 opt[DTRACEOPT_CPU] = (dtrace_optval_t)cpu; 15413 15414 (void) dtrace_state_buffers(state); 15415 15416 opt[DTRACEOPT_BUFRESIZE] = rs; 15417 opt[DTRACEOPT_CPU] = c; 15418 15419 break; 15420 } 15421 15422 case CPU_UNCONFIG: 15423 /* 15424 * We don't free the buffer in the CPU_UNCONFIG case. (The 15425 * buffer will be freed when the consumer exits.) 15426 */ 15427 break; 15428 15429 default: 15430 break; 15431 } 15432 15433 mutex_exit(&dtrace_lock); 15434 return (0); 15435} 15436 15437#if defined(sun) 15438static void 15439dtrace_cpu_setup_initial(processorid_t cpu) 15440{ 15441 (void) dtrace_cpu_setup(CPU_CONFIG, cpu); 15442} 15443#endif 15444 15445static void 15446dtrace_toxrange_add(uintptr_t base, uintptr_t limit) 15447{ 15448 if (dtrace_toxranges >= dtrace_toxranges_max) { 15449 int osize, nsize; 15450 dtrace_toxrange_t *range; 15451 15452 osize = dtrace_toxranges_max * sizeof (dtrace_toxrange_t); 15453 15454 if (osize == 0) { 15455 ASSERT(dtrace_toxrange == NULL); 15456 ASSERT(dtrace_toxranges_max == 0); 15457 dtrace_toxranges_max = 1; 15458 } else { 15459 dtrace_toxranges_max <<= 1; 15460 } 15461 15462 nsize = dtrace_toxranges_max * sizeof (dtrace_toxrange_t); 15463 range = kmem_zalloc(nsize, KM_SLEEP); 15464 15465 if (dtrace_toxrange != NULL) { 15466 ASSERT(osize != 0); 15467 bcopy(dtrace_toxrange, range, osize); 15468 kmem_free(dtrace_toxrange, osize); 15469 } 15470 15471 dtrace_toxrange = range; 15472 } 15473 15474 ASSERT(dtrace_toxrange[dtrace_toxranges].dtt_base == 0); 15475 ASSERT(dtrace_toxrange[dtrace_toxranges].dtt_limit == 0); 15476 15477 dtrace_toxrange[dtrace_toxranges].dtt_base = base; 15478 dtrace_toxrange[dtrace_toxranges].dtt_limit = limit; 15479 dtrace_toxranges++; 15480} 15481 15482/* 15483 * DTrace Driver Cookbook Functions 15484 */ 15485#if defined(sun) 15486/*ARGSUSED*/ 15487static int 15488dtrace_attach(dev_info_t *devi, ddi_attach_cmd_t cmd) 15489{ 15490 dtrace_provider_id_t id; 15491 dtrace_state_t *state = NULL; 15492 dtrace_enabling_t *enab; 15493 15494 mutex_enter(&cpu_lock); 15495 mutex_enter(&dtrace_provider_lock); 15496 mutex_enter(&dtrace_lock); 15497 15498 if (ddi_soft_state_init(&dtrace_softstate, 15499 sizeof (dtrace_state_t), 0) != 0) { 15500 cmn_err(CE_NOTE, "/dev/dtrace failed to initialize soft state"); 15501 mutex_exit(&cpu_lock); 15502 mutex_exit(&dtrace_provider_lock); 15503 mutex_exit(&dtrace_lock); 15504 return (DDI_FAILURE); 15505 } 15506 15507 if (ddi_create_minor_node(devi, DTRACEMNR_DTRACE, S_IFCHR, 15508 DTRACEMNRN_DTRACE, DDI_PSEUDO, NULL) == DDI_FAILURE || 15509 ddi_create_minor_node(devi, DTRACEMNR_HELPER, S_IFCHR, 15510 DTRACEMNRN_HELPER, DDI_PSEUDO, NULL) == DDI_FAILURE) { 15511 cmn_err(CE_NOTE, "/dev/dtrace couldn't create minor nodes"); 15512 ddi_remove_minor_node(devi, NULL); 15513 ddi_soft_state_fini(&dtrace_softstate); 15514 mutex_exit(&cpu_lock); 15515 mutex_exit(&dtrace_provider_lock); 15516 mutex_exit(&dtrace_lock); 15517 return (DDI_FAILURE); 15518 } 15519 15520 ddi_report_dev(devi); 15521 dtrace_devi = devi; 15522 15523 dtrace_modload = dtrace_module_loaded; 15524 dtrace_modunload = dtrace_module_unloaded; 15525 dtrace_cpu_init = dtrace_cpu_setup_initial; 15526 dtrace_helpers_cleanup = dtrace_helpers_destroy; 15527 dtrace_helpers_fork = dtrace_helpers_duplicate; 15528 dtrace_cpustart_init = dtrace_suspend; 15529 dtrace_cpustart_fini = dtrace_resume; 15530 dtrace_debugger_init = dtrace_suspend; 15531 dtrace_debugger_fini = dtrace_resume; 15532 15533 register_cpu_setup_func((cpu_setup_func_t *)dtrace_cpu_setup, NULL); 15534 15535 ASSERT(MUTEX_HELD(&cpu_lock)); 15536 15537 dtrace_arena = vmem_create("dtrace", (void *)1, UINT32_MAX, 1, 15538 NULL, NULL, NULL, 0, VM_SLEEP | VMC_IDENTIFIER); 15539 dtrace_minor = vmem_create("dtrace_minor", (void *)DTRACEMNRN_CLONE, 15540 UINT32_MAX - DTRACEMNRN_CLONE, 1, NULL, NULL, NULL, 0, 15541 VM_SLEEP | VMC_IDENTIFIER); 15542 dtrace_taskq = taskq_create("dtrace_taskq", 1, maxclsyspri, 15543 1, INT_MAX, 0); 15544 15545 dtrace_state_cache = kmem_cache_create("dtrace_state_cache", 15546 sizeof (dtrace_dstate_percpu_t) * NCPU, DTRACE_STATE_ALIGN, 15547 NULL, NULL, NULL, NULL, NULL, 0); 15548 15549 ASSERT(MUTEX_HELD(&cpu_lock)); 15550 dtrace_bymod = dtrace_hash_create(offsetof(dtrace_probe_t, dtpr_mod), 15551 offsetof(dtrace_probe_t, dtpr_nextmod), 15552 offsetof(dtrace_probe_t, dtpr_prevmod)); 15553 15554 dtrace_byfunc = dtrace_hash_create(offsetof(dtrace_probe_t, dtpr_func), 15555 offsetof(dtrace_probe_t, dtpr_nextfunc), 15556 offsetof(dtrace_probe_t, dtpr_prevfunc)); 15557 15558 dtrace_byname = dtrace_hash_create(offsetof(dtrace_probe_t, dtpr_name), 15559 offsetof(dtrace_probe_t, dtpr_nextname), 15560 offsetof(dtrace_probe_t, dtpr_prevname)); 15561 15562 if (dtrace_retain_max < 1) { 15563 cmn_err(CE_WARN, "illegal value (%lu) for dtrace_retain_max; " 15564 "setting to 1", dtrace_retain_max); 15565 dtrace_retain_max = 1; 15566 } 15567 15568 /* 15569 * Now discover our toxic ranges. 15570 */ 15571 dtrace_toxic_ranges(dtrace_toxrange_add); 15572 15573 /* 15574 * Before we register ourselves as a provider to our own framework, 15575 * we would like to assert that dtrace_provider is NULL -- but that's 15576 * not true if we were loaded as a dependency of a DTrace provider. 15577 * Once we've registered, we can assert that dtrace_provider is our 15578 * pseudo provider. 15579 */ 15580 (void) dtrace_register("dtrace", &dtrace_provider_attr, 15581 DTRACE_PRIV_NONE, 0, &dtrace_provider_ops, NULL, &id); 15582 15583 ASSERT(dtrace_provider != NULL); 15584 ASSERT((dtrace_provider_id_t)dtrace_provider == id); 15585 15586 dtrace_probeid_begin = dtrace_probe_create((dtrace_provider_id_t) 15587 dtrace_provider, NULL, NULL, "BEGIN", 0, NULL); 15588 dtrace_probeid_end = dtrace_probe_create((dtrace_provider_id_t) 15589 dtrace_provider, NULL, NULL, "END", 0, NULL); 15590 dtrace_probeid_error = dtrace_probe_create((dtrace_provider_id_t) 15591 dtrace_provider, NULL, NULL, "ERROR", 1, NULL); 15592 15593 dtrace_anon_property(); 15594 mutex_exit(&cpu_lock); 15595 15596 /* 15597 * If DTrace helper tracing is enabled, we need to allocate the 15598 * trace buffer and initialize the values. 15599 */ 15600 if (dtrace_helptrace_enabled) { 15601 ASSERT(dtrace_helptrace_buffer == NULL); 15602 dtrace_helptrace_buffer = 15603 kmem_zalloc(dtrace_helptrace_bufsize, KM_SLEEP); 15604 dtrace_helptrace_next = 0; 15605 } 15606 15607 /* 15608 * If there are already providers, we must ask them to provide their 15609 * probes, and then match any anonymous enabling against them. Note 15610 * that there should be no other retained enablings at this time: 15611 * the only retained enablings at this time should be the anonymous 15612 * enabling. 15613 */ 15614 if (dtrace_anon.dta_enabling != NULL) { 15615 ASSERT(dtrace_retained == dtrace_anon.dta_enabling); 15616 15617 dtrace_enabling_provide(NULL); 15618 state = dtrace_anon.dta_state; 15619 15620 /* 15621 * We couldn't hold cpu_lock across the above call to 15622 * dtrace_enabling_provide(), but we must hold it to actually 15623 * enable the probes. We have to drop all of our locks, pick 15624 * up cpu_lock, and regain our locks before matching the 15625 * retained anonymous enabling. 15626 */ 15627 mutex_exit(&dtrace_lock); 15628 mutex_exit(&dtrace_provider_lock); 15629 15630 mutex_enter(&cpu_lock); 15631 mutex_enter(&dtrace_provider_lock); 15632 mutex_enter(&dtrace_lock); 15633 15634 if ((enab = dtrace_anon.dta_enabling) != NULL) 15635 (void) dtrace_enabling_match(enab, NULL); 15636 15637 mutex_exit(&cpu_lock); 15638 } 15639 15640 mutex_exit(&dtrace_lock); 15641 mutex_exit(&dtrace_provider_lock); 15642 15643 if (state != NULL) { 15644 /* 15645 * If we created any anonymous state, set it going now. 15646 */ 15647 (void) dtrace_state_go(state, &dtrace_anon.dta_beganon); 15648 } 15649 15650 return (DDI_SUCCESS); 15651} 15652#endif 15653 15654#if !defined(sun) 15655#if __FreeBSD_version >= 800039 15656static void dtrace_dtr(void *); 15657#endif 15658#endif 15659 15660/*ARGSUSED*/ 15661static int 15662#if defined(sun) 15663dtrace_open(dev_t *devp, int flag, int otyp, cred_t *cred_p) 15664#else 15665dtrace_open(struct cdev *dev, int oflags, int devtype, struct thread *td) 15666#endif 15667{ 15668 dtrace_state_t *state; 15669 uint32_t priv; 15670 uid_t uid; 15671 zoneid_t zoneid; 15672 15673#if defined(sun) 15674 if (getminor(*devp) == DTRACEMNRN_HELPER) 15675 return (0); 15676 15677 /* 15678 * If this wasn't an open with the "helper" minor, then it must be 15679 * the "dtrace" minor. 15680 */ 15681 ASSERT(getminor(*devp) == DTRACEMNRN_DTRACE); 15682#else 15683 cred_t *cred_p = NULL; 15684 15685#if __FreeBSD_version < 800039 15686 /* 15687 * The first minor device is the one that is cloned so there is 15688 * nothing more to do here. 15689 */ 15690 if (dev2unit(dev) == 0) 15691 return 0; 15692 15693 /* 15694 * Devices are cloned, so if the DTrace state has already 15695 * been allocated, that means this device belongs to a 15696 * different client. Each client should open '/dev/dtrace' 15697 * to get a cloned device. 15698 */ 15699 if (dev->si_drv1 != NULL) 15700 return (EBUSY); 15701#endif 15702 15703 cred_p = dev->si_cred; 15704#endif 15705 15706 /* 15707 * If no DTRACE_PRIV_* bits are set in the credential, then the 15708 * caller lacks sufficient permission to do anything with DTrace. 15709 */ 15710 dtrace_cred2priv(cred_p, &priv, &uid, &zoneid); 15711 if (priv == DTRACE_PRIV_NONE) { 15712#if !defined(sun) 15713#if __FreeBSD_version < 800039 15714 /* Destroy the cloned device. */ 15715 destroy_dev(dev); 15716#endif 15717#endif 15718 15719 return (EACCES); 15720 } 15721 15722 /* 15723 * Ask all providers to provide all their probes. 15724 */ 15725 mutex_enter(&dtrace_provider_lock); 15726 dtrace_probe_provide(NULL, NULL); 15727 mutex_exit(&dtrace_provider_lock); 15728 15729 mutex_enter(&cpu_lock); 15730 mutex_enter(&dtrace_lock); 15731 dtrace_opens++; 15732 dtrace_membar_producer(); 15733 15734#if defined(sun) 15735 /* 15736 * If the kernel debugger is active (that is, if the kernel debugger 15737 * modified text in some way), we won't allow the open. 15738 */ 15739 if (kdi_dtrace_set(KDI_DTSET_DTRACE_ACTIVATE) != 0) { 15740 dtrace_opens--; 15741 mutex_exit(&cpu_lock); 15742 mutex_exit(&dtrace_lock); 15743 return (EBUSY); 15744 } 15745 15746 state = dtrace_state_create(devp, cred_p); 15747#else 15748 state = dtrace_state_create(dev); 15749#if __FreeBSD_version < 800039 15750 dev->si_drv1 = state; 15751#else 15752 devfs_set_cdevpriv(state, dtrace_dtr); 15753#endif 15754#endif 15755 15756 mutex_exit(&cpu_lock); 15757 15758 if (state == NULL) { 15759#if defined(sun) 15760 if (--dtrace_opens == 0) 15761 (void) kdi_dtrace_set(KDI_DTSET_DTRACE_DEACTIVATE); 15762#else 15763 --dtrace_opens; 15764#endif 15765 mutex_exit(&dtrace_lock); 15766#if !defined(sun) 15767#if __FreeBSD_version < 800039 15768 /* Destroy the cloned device. */ 15769 destroy_dev(dev); 15770#endif 15771#endif 15772 return (EAGAIN); 15773 } 15774 15775 mutex_exit(&dtrace_lock); 15776 15777 return (0); 15778} 15779 15780/*ARGSUSED*/ 15781#if defined(sun) 15782static int 15783dtrace_close(dev_t dev, int flag, int otyp, cred_t *cred_p) 15784#elif __FreeBSD_version < 800039 15785static int 15786dtrace_close(struct cdev *dev, int flags, int fmt __unused, struct thread *td) 15787#else 15788static void 15789dtrace_dtr(void *data) 15790#endif 15791{ 15792#if defined(sun) 15793 minor_t minor = getminor(dev); 15794 dtrace_state_t *state; 15795 15796 if (minor == DTRACEMNRN_HELPER) 15797 return (0); 15798 15799 state = ddi_get_soft_state(dtrace_softstate, minor); 15800#else 15801#if __FreeBSD_version < 800039 15802 dtrace_state_t *state = dev->si_drv1; 15803 15804 /* Check if this is not a cloned device. */ 15805 if (dev2unit(dev) == 0) 15806 return (0); 15807#else 15808 dtrace_state_t *state = data; 15809#endif 15810 15811#endif 15812 15813 mutex_enter(&cpu_lock); 15814 mutex_enter(&dtrace_lock); 15815 15816 if (state != NULL) { 15817 if (state->dts_anon) { 15818 /* 15819 * There is anonymous state. Destroy that first. 15820 */ 15821 ASSERT(dtrace_anon.dta_state == NULL); 15822 dtrace_state_destroy(state->dts_anon); 15823 } 15824 15825 dtrace_state_destroy(state); 15826 15827#if !defined(sun) 15828 kmem_free(state, 0); 15829#if __FreeBSD_version < 800039 15830 dev->si_drv1 = NULL; 15831#endif 15832#endif 15833 } 15834 15835 ASSERT(dtrace_opens > 0); 15836#if defined(sun) 15837 if (--dtrace_opens == 0) 15838 (void) kdi_dtrace_set(KDI_DTSET_DTRACE_DEACTIVATE); 15839#else 15840 --dtrace_opens; 15841#endif 15842 15843 mutex_exit(&dtrace_lock); 15844 mutex_exit(&cpu_lock); 15845 15846#if __FreeBSD_version < 800039 15847 /* Schedule this cloned device to be destroyed. */ 15848 destroy_dev_sched(dev); 15849#endif 15850 15851#if defined(sun) || __FreeBSD_version < 800039 15852 return (0); 15853#endif 15854} 15855 15856#if defined(sun) 15857/*ARGSUSED*/ 15858static int 15859dtrace_ioctl_helper(int cmd, intptr_t arg, int *rv) 15860{ 15861 int rval; 15862 dof_helper_t help, *dhp = NULL; 15863 15864 switch (cmd) { 15865 case DTRACEHIOC_ADDDOF: 15866 if (copyin((void *)arg, &help, sizeof (help)) != 0) { 15867 dtrace_dof_error(NULL, "failed to copyin DOF helper"); 15868 return (EFAULT); 15869 } 15870 15871 dhp = &help; 15872 arg = (intptr_t)help.dofhp_dof; 15873 /*FALLTHROUGH*/ 15874 15875 case DTRACEHIOC_ADD: { 15876 dof_hdr_t *dof = dtrace_dof_copyin(arg, &rval); 15877 15878 if (dof == NULL) 15879 return (rval); 15880 15881 mutex_enter(&dtrace_lock); 15882 15883 /* 15884 * dtrace_helper_slurp() takes responsibility for the dof -- 15885 * it may free it now or it may save it and free it later. 15886 */ 15887 if ((rval = dtrace_helper_slurp(dof, dhp)) != -1) { 15888 *rv = rval; 15889 rval = 0; 15890 } else { 15891 rval = EINVAL; 15892 } 15893 15894 mutex_exit(&dtrace_lock); 15895 return (rval); 15896 } 15897 15898 case DTRACEHIOC_REMOVE: { 15899 mutex_enter(&dtrace_lock); 15900 rval = dtrace_helper_destroygen(arg); 15901 mutex_exit(&dtrace_lock); 15902 15903 return (rval); 15904 } 15905 15906 default: 15907 break; 15908 } 15909 15910 return (ENOTTY); 15911} 15912 15913/*ARGSUSED*/ 15914static int 15915dtrace_ioctl(dev_t dev, int cmd, intptr_t arg, int md, cred_t *cr, int *rv) 15916{ 15917 minor_t minor = getminor(dev); 15918 dtrace_state_t *state; 15919 int rval; 15920 15921 if (minor == DTRACEMNRN_HELPER) 15922 return (dtrace_ioctl_helper(cmd, arg, rv)); 15923 15924 state = ddi_get_soft_state(dtrace_softstate, minor); 15925 15926 if (state->dts_anon) { 15927 ASSERT(dtrace_anon.dta_state == NULL); 15928 state = state->dts_anon; 15929 } 15930 15931 switch (cmd) { 15932 case DTRACEIOC_PROVIDER: { 15933 dtrace_providerdesc_t pvd; 15934 dtrace_provider_t *pvp; 15935 15936 if (copyin((void *)arg, &pvd, sizeof (pvd)) != 0) 15937 return (EFAULT); 15938 15939 pvd.dtvd_name[DTRACE_PROVNAMELEN - 1] = '\0'; 15940 mutex_enter(&dtrace_provider_lock); 15941 15942 for (pvp = dtrace_provider; pvp != NULL; pvp = pvp->dtpv_next) { 15943 if (strcmp(pvp->dtpv_name, pvd.dtvd_name) == 0) 15944 break; 15945 } 15946 15947 mutex_exit(&dtrace_provider_lock); 15948 15949 if (pvp == NULL) 15950 return (ESRCH); 15951 15952 bcopy(&pvp->dtpv_priv, &pvd.dtvd_priv, sizeof (dtrace_ppriv_t)); 15953 bcopy(&pvp->dtpv_attr, &pvd.dtvd_attr, sizeof (dtrace_pattr_t)); 15954 15955 if (copyout(&pvd, (void *)arg, sizeof (pvd)) != 0) 15956 return (EFAULT); 15957 15958 return (0); 15959 } 15960 15961 case DTRACEIOC_EPROBE: { 15962 dtrace_eprobedesc_t epdesc; 15963 dtrace_ecb_t *ecb; 15964 dtrace_action_t *act; 15965 void *buf; 15966 size_t size; 15967 uintptr_t dest; 15968 int nrecs; 15969 15970 if (copyin((void *)arg, &epdesc, sizeof (epdesc)) != 0) 15971 return (EFAULT); 15972 15973 mutex_enter(&dtrace_lock); 15974 15975 if ((ecb = dtrace_epid2ecb(state, epdesc.dtepd_epid)) == NULL) { 15976 mutex_exit(&dtrace_lock); 15977 return (EINVAL); 15978 } 15979 15980 if (ecb->dte_probe == NULL) { 15981 mutex_exit(&dtrace_lock); 15982 return (EINVAL); 15983 } 15984 15985 epdesc.dtepd_probeid = ecb->dte_probe->dtpr_id; 15986 epdesc.dtepd_uarg = ecb->dte_uarg; 15987 epdesc.dtepd_size = ecb->dte_size; 15988 15989 nrecs = epdesc.dtepd_nrecs; 15990 epdesc.dtepd_nrecs = 0; 15991 for (act = ecb->dte_action; act != NULL; act = act->dta_next) { 15992 if (DTRACEACT_ISAGG(act->dta_kind) || act->dta_intuple) 15993 continue; 15994 15995 epdesc.dtepd_nrecs++; 15996 } 15997 15998 /* 15999 * Now that we have the size, we need to allocate a temporary 16000 * buffer in which to store the complete description. We need 16001 * the temporary buffer to be able to drop dtrace_lock() 16002 * across the copyout(), below. 16003 */ 16004 size = sizeof (dtrace_eprobedesc_t) + 16005 (epdesc.dtepd_nrecs * sizeof (dtrace_recdesc_t)); 16006 16007 buf = kmem_alloc(size, KM_SLEEP); 16008 dest = (uintptr_t)buf; 16009 16010 bcopy(&epdesc, (void *)dest, sizeof (epdesc)); 16011 dest += offsetof(dtrace_eprobedesc_t, dtepd_rec[0]); 16012 16013 for (act = ecb->dte_action; act != NULL; act = act->dta_next) { 16014 if (DTRACEACT_ISAGG(act->dta_kind) || act->dta_intuple) 16015 continue; 16016 16017 if (nrecs-- == 0) 16018 break; 16019 16020 bcopy(&act->dta_rec, (void *)dest, 16021 sizeof (dtrace_recdesc_t)); 16022 dest += sizeof (dtrace_recdesc_t); 16023 } 16024 16025 mutex_exit(&dtrace_lock); 16026 16027 if (copyout(buf, (void *)arg, dest - (uintptr_t)buf) != 0) { 16028 kmem_free(buf, size); 16029 return (EFAULT); 16030 } 16031 16032 kmem_free(buf, size); 16033 return (0); 16034 } 16035 16036 case DTRACEIOC_AGGDESC: { 16037 dtrace_aggdesc_t aggdesc; 16038 dtrace_action_t *act; 16039 dtrace_aggregation_t *agg; 16040 int nrecs; 16041 uint32_t offs; 16042 dtrace_recdesc_t *lrec; 16043 void *buf; 16044 size_t size; 16045 uintptr_t dest; 16046 16047 if (copyin((void *)arg, &aggdesc, sizeof (aggdesc)) != 0) 16048 return (EFAULT); 16049 16050 mutex_enter(&dtrace_lock); 16051 16052 if ((agg = dtrace_aggid2agg(state, aggdesc.dtagd_id)) == NULL) { 16053 mutex_exit(&dtrace_lock); 16054 return (EINVAL); 16055 } 16056 16057 aggdesc.dtagd_epid = agg->dtag_ecb->dte_epid; 16058 16059 nrecs = aggdesc.dtagd_nrecs; 16060 aggdesc.dtagd_nrecs = 0; 16061 16062 offs = agg->dtag_base; 16063 lrec = &agg->dtag_action.dta_rec; 16064 aggdesc.dtagd_size = lrec->dtrd_offset + lrec->dtrd_size - offs; 16065 16066 for (act = agg->dtag_first; ; act = act->dta_next) { 16067 ASSERT(act->dta_intuple || 16068 DTRACEACT_ISAGG(act->dta_kind)); 16069 16070 /* 16071 * If this action has a record size of zero, it 16072 * denotes an argument to the aggregating action. 16073 * Because the presence of this record doesn't (or 16074 * shouldn't) affect the way the data is interpreted, 16075 * we don't copy it out to save user-level the 16076 * confusion of dealing with a zero-length record. 16077 */ 16078 if (act->dta_rec.dtrd_size == 0) { 16079 ASSERT(agg->dtag_hasarg); 16080 continue; 16081 } 16082 16083 aggdesc.dtagd_nrecs++; 16084 16085 if (act == &agg->dtag_action) 16086 break; 16087 } 16088 16089 /* 16090 * Now that we have the size, we need to allocate a temporary 16091 * buffer in which to store the complete description. We need 16092 * the temporary buffer to be able to drop dtrace_lock() 16093 * across the copyout(), below. 16094 */ 16095 size = sizeof (dtrace_aggdesc_t) + 16096 (aggdesc.dtagd_nrecs * sizeof (dtrace_recdesc_t)); 16097 16098 buf = kmem_alloc(size, KM_SLEEP); 16099 dest = (uintptr_t)buf; 16100 16101 bcopy(&aggdesc, (void *)dest, sizeof (aggdesc)); 16102 dest += offsetof(dtrace_aggdesc_t, dtagd_rec[0]); 16103 16104 for (act = agg->dtag_first; ; act = act->dta_next) { 16105 dtrace_recdesc_t rec = act->dta_rec; 16106 16107 /* 16108 * See the comment in the above loop for why we pass 16109 * over zero-length records. 16110 */ 16111 if (rec.dtrd_size == 0) { 16112 ASSERT(agg->dtag_hasarg); 16113 continue; 16114 } 16115 16116 if (nrecs-- == 0) 16117 break; 16118 16119 rec.dtrd_offset -= offs; 16120 bcopy(&rec, (void *)dest, sizeof (rec)); 16121 dest += sizeof (dtrace_recdesc_t); 16122 16123 if (act == &agg->dtag_action) 16124 break; 16125 } 16126 16127 mutex_exit(&dtrace_lock); 16128 16129 if (copyout(buf, (void *)arg, dest - (uintptr_t)buf) != 0) { 16130 kmem_free(buf, size); 16131 return (EFAULT); 16132 } 16133 16134 kmem_free(buf, size); 16135 return (0); 16136 } 16137 16138 case DTRACEIOC_ENABLE: { 16139 dof_hdr_t *dof; 16140 dtrace_enabling_t *enab = NULL; 16141 dtrace_vstate_t *vstate; 16142 int err = 0; 16143 16144 *rv = 0; 16145 16146 /* 16147 * If a NULL argument has been passed, we take this as our 16148 * cue to reevaluate our enablings. 16149 */ 16150 if (arg == NULL) { 16151 dtrace_enabling_matchall(); 16152 16153 return (0); 16154 } 16155 16156 if ((dof = dtrace_dof_copyin(arg, &rval)) == NULL) 16157 return (rval); 16158 16159 mutex_enter(&cpu_lock); 16160 mutex_enter(&dtrace_lock); 16161 vstate = &state->dts_vstate; 16162 16163 if (state->dts_activity != DTRACE_ACTIVITY_INACTIVE) { 16164 mutex_exit(&dtrace_lock); 16165 mutex_exit(&cpu_lock); 16166 dtrace_dof_destroy(dof); 16167 return (EBUSY); 16168 } 16169 16170 if (dtrace_dof_slurp(dof, vstate, cr, &enab, 0, B_TRUE) != 0) { 16171 mutex_exit(&dtrace_lock); 16172 mutex_exit(&cpu_lock); 16173 dtrace_dof_destroy(dof); 16174 return (EINVAL); 16175 } 16176 16177 if ((rval = dtrace_dof_options(dof, state)) != 0) { 16178 dtrace_enabling_destroy(enab); 16179 mutex_exit(&dtrace_lock); 16180 mutex_exit(&cpu_lock); 16181 dtrace_dof_destroy(dof); 16182 return (rval); 16183 } 16184 16185 if ((err = dtrace_enabling_match(enab, rv)) == 0) { 16186 err = dtrace_enabling_retain(enab); 16187 } else { 16188 dtrace_enabling_destroy(enab); 16189 } 16190 16191 mutex_exit(&cpu_lock); 16192 mutex_exit(&dtrace_lock); 16193 dtrace_dof_destroy(dof); 16194 16195 return (err); 16196 } 16197 16198 case DTRACEIOC_REPLICATE: { 16199 dtrace_repldesc_t desc; 16200 dtrace_probedesc_t *match = &desc.dtrpd_match; 16201 dtrace_probedesc_t *create = &desc.dtrpd_create; 16202 int err; 16203 16204 if (copyin((void *)arg, &desc, sizeof (desc)) != 0) 16205 return (EFAULT); 16206 16207 match->dtpd_provider[DTRACE_PROVNAMELEN - 1] = '\0'; 16208 match->dtpd_mod[DTRACE_MODNAMELEN - 1] = '\0'; 16209 match->dtpd_func[DTRACE_FUNCNAMELEN - 1] = '\0'; 16210 match->dtpd_name[DTRACE_NAMELEN - 1] = '\0'; 16211 16212 create->dtpd_provider[DTRACE_PROVNAMELEN - 1] = '\0'; 16213 create->dtpd_mod[DTRACE_MODNAMELEN - 1] = '\0'; 16214 create->dtpd_func[DTRACE_FUNCNAMELEN - 1] = '\0'; 16215 create->dtpd_name[DTRACE_NAMELEN - 1] = '\0'; 16216 16217 mutex_enter(&dtrace_lock); 16218 err = dtrace_enabling_replicate(state, match, create); 16219 mutex_exit(&dtrace_lock); 16220 16221 return (err); 16222 } 16223 16224 case DTRACEIOC_PROBEMATCH: 16225 case DTRACEIOC_PROBES: { 16226 dtrace_probe_t *probe = NULL; 16227 dtrace_probedesc_t desc; 16228 dtrace_probekey_t pkey; 16229 dtrace_id_t i; 16230 int m = 0; 16231 uint32_t priv; 16232 uid_t uid; 16233 zoneid_t zoneid; 16234 16235 if (copyin((void *)arg, &desc, sizeof (desc)) != 0) 16236 return (EFAULT); 16237 16238 desc.dtpd_provider[DTRACE_PROVNAMELEN - 1] = '\0'; 16239 desc.dtpd_mod[DTRACE_MODNAMELEN - 1] = '\0'; 16240 desc.dtpd_func[DTRACE_FUNCNAMELEN - 1] = '\0'; 16241 desc.dtpd_name[DTRACE_NAMELEN - 1] = '\0'; 16242 16243 /* 16244 * Before we attempt to match this probe, we want to give 16245 * all providers the opportunity to provide it. 16246 */ 16247 if (desc.dtpd_id == DTRACE_IDNONE) { 16248 mutex_enter(&dtrace_provider_lock); 16249 dtrace_probe_provide(&desc, NULL); 16250 mutex_exit(&dtrace_provider_lock); 16251 desc.dtpd_id++; 16252 } 16253 16254 if (cmd == DTRACEIOC_PROBEMATCH) { 16255 dtrace_probekey(&desc, &pkey); 16256 pkey.dtpk_id = DTRACE_IDNONE; 16257 } 16258 16259 dtrace_cred2priv(cr, &priv, &uid, &zoneid); 16260 16261 mutex_enter(&dtrace_lock); 16262 16263 if (cmd == DTRACEIOC_PROBEMATCH) { 16264 for (i = desc.dtpd_id; i <= dtrace_nprobes; i++) { 16265 if ((probe = dtrace_probes[i - 1]) != NULL && 16266 (m = dtrace_match_probe(probe, &pkey, 16267 priv, uid, zoneid)) != 0) 16268 break; 16269 } 16270 16271 if (m < 0) { 16272 mutex_exit(&dtrace_lock); 16273 return (EINVAL); 16274 } 16275 16276 } else { 16277 for (i = desc.dtpd_id; i <= dtrace_nprobes; i++) { 16278 if ((probe = dtrace_probes[i - 1]) != NULL && 16279 dtrace_match_priv(probe, priv, uid, zoneid)) 16280 break; 16281 } 16282 } 16283 16284 if (probe == NULL) { 16285 mutex_exit(&dtrace_lock); 16286 return (ESRCH); 16287 } 16288 16289 dtrace_probe_description(probe, &desc); 16290 mutex_exit(&dtrace_lock); 16291 16292 if (copyout(&desc, (void *)arg, sizeof (desc)) != 0) 16293 return (EFAULT); 16294 16295 return (0); 16296 } 16297 16298 case DTRACEIOC_PROBEARG: { 16299 dtrace_argdesc_t desc; 16300 dtrace_probe_t *probe; 16301 dtrace_provider_t *prov; 16302 16303 if (copyin((void *)arg, &desc, sizeof (desc)) != 0) 16304 return (EFAULT); 16305 16306 if (desc.dtargd_id == DTRACE_IDNONE) 16307 return (EINVAL); 16308 16309 if (desc.dtargd_ndx == DTRACE_ARGNONE) 16310 return (EINVAL); 16311 16312 mutex_enter(&dtrace_provider_lock); 16313 mutex_enter(&mod_lock); 16314 mutex_enter(&dtrace_lock); 16315 16316 if (desc.dtargd_id > dtrace_nprobes) { 16317 mutex_exit(&dtrace_lock); 16318 mutex_exit(&mod_lock); 16319 mutex_exit(&dtrace_provider_lock); 16320 return (EINVAL); 16321 } 16322 16323 if ((probe = dtrace_probes[desc.dtargd_id - 1]) == NULL) { 16324 mutex_exit(&dtrace_lock); 16325 mutex_exit(&mod_lock); 16326 mutex_exit(&dtrace_provider_lock); 16327 return (EINVAL); 16328 } 16329 16330 mutex_exit(&dtrace_lock); 16331 16332 prov = probe->dtpr_provider; 16333 16334 if (prov->dtpv_pops.dtps_getargdesc == NULL) { 16335 /* 16336 * There isn't any typed information for this probe. 16337 * Set the argument number to DTRACE_ARGNONE. 16338 */ 16339 desc.dtargd_ndx = DTRACE_ARGNONE; 16340 } else { 16341 desc.dtargd_native[0] = '\0'; 16342 desc.dtargd_xlate[0] = '\0'; 16343 desc.dtargd_mapping = desc.dtargd_ndx; 16344 16345 prov->dtpv_pops.dtps_getargdesc(prov->dtpv_arg, 16346 probe->dtpr_id, probe->dtpr_arg, &desc); 16347 } 16348 16349 mutex_exit(&mod_lock); 16350 mutex_exit(&dtrace_provider_lock); 16351 16352 if (copyout(&desc, (void *)arg, sizeof (desc)) != 0) 16353 return (EFAULT); 16354 16355 return (0); 16356 } 16357 16358 case DTRACEIOC_GO: { 16359 processorid_t cpuid; 16360 rval = dtrace_state_go(state, &cpuid); 16361 16362 if (rval != 0) 16363 return (rval); 16364 16365 if (copyout(&cpuid, (void *)arg, sizeof (cpuid)) != 0) 16366 return (EFAULT); 16367 16368 return (0); 16369 } 16370 16371 case DTRACEIOC_STOP: { 16372 processorid_t cpuid; 16373 16374 mutex_enter(&dtrace_lock); 16375 rval = dtrace_state_stop(state, &cpuid); 16376 mutex_exit(&dtrace_lock); 16377 16378 if (rval != 0) 16379 return (rval); 16380 16381 if (copyout(&cpuid, (void *)arg, sizeof (cpuid)) != 0) 16382 return (EFAULT); 16383 16384 return (0); 16385 } 16386 16387 case DTRACEIOC_DOFGET: { 16388 dof_hdr_t hdr, *dof; 16389 uint64_t len; 16390 16391 if (copyin((void *)arg, &hdr, sizeof (hdr)) != 0) 16392 return (EFAULT); 16393 16394 mutex_enter(&dtrace_lock); 16395 dof = dtrace_dof_create(state); 16396 mutex_exit(&dtrace_lock); 16397 16398 len = MIN(hdr.dofh_loadsz, dof->dofh_loadsz); 16399 rval = copyout(dof, (void *)arg, len); 16400 dtrace_dof_destroy(dof); 16401 16402 return (rval == 0 ? 0 : EFAULT); 16403 } 16404 16405 case DTRACEIOC_AGGSNAP: 16406 case DTRACEIOC_BUFSNAP: { 16407 dtrace_bufdesc_t desc; 16408 caddr_t cached; 16409 dtrace_buffer_t *buf; 16410 16411 if (copyin((void *)arg, &desc, sizeof (desc)) != 0) 16412 return (EFAULT); 16413 16414 if (desc.dtbd_cpu < 0 || desc.dtbd_cpu >= NCPU) 16415 return (EINVAL); 16416 16417 mutex_enter(&dtrace_lock); 16418 16419 if (cmd == DTRACEIOC_BUFSNAP) { 16420 buf = &state->dts_buffer[desc.dtbd_cpu]; 16421 } else { 16422 buf = &state->dts_aggbuffer[desc.dtbd_cpu]; 16423 } 16424 16425 if (buf->dtb_flags & (DTRACEBUF_RING | DTRACEBUF_FILL)) { 16426 size_t sz = buf->dtb_offset; 16427 16428 if (state->dts_activity != DTRACE_ACTIVITY_STOPPED) { 16429 mutex_exit(&dtrace_lock); 16430 return (EBUSY); 16431 } 16432 16433 /* 16434 * If this buffer has already been consumed, we're 16435 * going to indicate that there's nothing left here 16436 * to consume. 16437 */ 16438 if (buf->dtb_flags & DTRACEBUF_CONSUMED) { 16439 mutex_exit(&dtrace_lock); 16440 16441 desc.dtbd_size = 0; 16442 desc.dtbd_drops = 0; 16443 desc.dtbd_errors = 0; 16444 desc.dtbd_oldest = 0; 16445 sz = sizeof (desc); 16446 16447 if (copyout(&desc, (void *)arg, sz) != 0) 16448 return (EFAULT); 16449 16450 return (0); 16451 } 16452 16453 /* 16454 * If this is a ring buffer that has wrapped, we want 16455 * to copy the whole thing out. 16456 */ 16457 if (buf->dtb_flags & DTRACEBUF_WRAPPED) { 16458 dtrace_buffer_polish(buf); 16459 sz = buf->dtb_size; 16460 } 16461 16462 if (copyout(buf->dtb_tomax, desc.dtbd_data, sz) != 0) { 16463 mutex_exit(&dtrace_lock); 16464 return (EFAULT); 16465 } 16466 16467 desc.dtbd_size = sz; 16468 desc.dtbd_drops = buf->dtb_drops; 16469 desc.dtbd_errors = buf->dtb_errors; 16470 desc.dtbd_oldest = buf->dtb_xamot_offset; 16471 desc.dtbd_timestamp = dtrace_gethrtime(); 16472 16473 mutex_exit(&dtrace_lock); 16474 16475 if (copyout(&desc, (void *)arg, sizeof (desc)) != 0) 16476 return (EFAULT); 16477 16478 buf->dtb_flags |= DTRACEBUF_CONSUMED; 16479 16480 return (0); 16481 } 16482 16483 if (buf->dtb_tomax == NULL) { 16484 ASSERT(buf->dtb_xamot == NULL); 16485 mutex_exit(&dtrace_lock); 16486 return (ENOENT); 16487 } 16488 16489 cached = buf->dtb_tomax; 16490 ASSERT(!(buf->dtb_flags & DTRACEBUF_NOSWITCH)); 16491 16492 dtrace_xcall(desc.dtbd_cpu, 16493 (dtrace_xcall_t)dtrace_buffer_switch, buf); 16494 16495 state->dts_errors += buf->dtb_xamot_errors; 16496 16497 /* 16498 * If the buffers did not actually switch, then the cross call 16499 * did not take place -- presumably because the given CPU is 16500 * not in the ready set. If this is the case, we'll return 16501 * ENOENT. 16502 */ 16503 if (buf->dtb_tomax == cached) { 16504 ASSERT(buf->dtb_xamot != cached); 16505 mutex_exit(&dtrace_lock); 16506 return (ENOENT); 16507 } 16508 16509 ASSERT(cached == buf->dtb_xamot); 16510 16511 /* 16512 * We have our snapshot; now copy it out. 16513 */ 16514 if (copyout(buf->dtb_xamot, desc.dtbd_data, 16515 buf->dtb_xamot_offset) != 0) { 16516 mutex_exit(&dtrace_lock); 16517 return (EFAULT); 16518 } 16519 16520 desc.dtbd_size = buf->dtb_xamot_offset; 16521 desc.dtbd_drops = buf->dtb_xamot_drops; 16522 desc.dtbd_errors = buf->dtb_xamot_errors; 16523 desc.dtbd_oldest = 0; 16524 desc.dtbd_timestamp = buf->dtb_switched; 16525 16526 mutex_exit(&dtrace_lock); 16527 16528 /* 16529 * Finally, copy out the buffer description. 16530 */ 16531 if (copyout(&desc, (void *)arg, sizeof (desc)) != 0) 16532 return (EFAULT); 16533 16534 return (0); 16535 } 16536 16537 case DTRACEIOC_CONF: { 16538 dtrace_conf_t conf; 16539 16540 bzero(&conf, sizeof (conf)); 16541 conf.dtc_difversion = DIF_VERSION; 16542 conf.dtc_difintregs = DIF_DIR_NREGS; 16543 conf.dtc_diftupregs = DIF_DTR_NREGS; 16544 conf.dtc_ctfmodel = CTF_MODEL_NATIVE; 16545 16546 if (copyout(&conf, (void *)arg, sizeof (conf)) != 0) 16547 return (EFAULT); 16548 16549 return (0); 16550 } 16551 16552 case DTRACEIOC_STATUS: { 16553 dtrace_status_t stat; 16554 dtrace_dstate_t *dstate; 16555 int i, j; 16556 uint64_t nerrs; 16557 16558 /* 16559 * See the comment in dtrace_state_deadman() for the reason 16560 * for setting dts_laststatus to INT64_MAX before setting 16561 * it to the correct value. 16562 */ 16563 state->dts_laststatus = INT64_MAX; 16564 dtrace_membar_producer(); 16565 state->dts_laststatus = dtrace_gethrtime(); 16566 16567 bzero(&stat, sizeof (stat)); 16568 16569 mutex_enter(&dtrace_lock); 16570 16571 if (state->dts_activity == DTRACE_ACTIVITY_INACTIVE) { 16572 mutex_exit(&dtrace_lock); 16573 return (ENOENT); 16574 } 16575 16576 if (state->dts_activity == DTRACE_ACTIVITY_DRAINING) 16577 stat.dtst_exiting = 1; 16578 16579 nerrs = state->dts_errors; 16580 dstate = &state->dts_vstate.dtvs_dynvars; 16581 16582 for (i = 0; i < NCPU; i++) { 16583 dtrace_dstate_percpu_t *dcpu = &dstate->dtds_percpu[i]; 16584 16585 stat.dtst_dyndrops += dcpu->dtdsc_drops; 16586 stat.dtst_dyndrops_dirty += dcpu->dtdsc_dirty_drops; 16587 stat.dtst_dyndrops_rinsing += dcpu->dtdsc_rinsing_drops; 16588 16589 if (state->dts_buffer[i].dtb_flags & DTRACEBUF_FULL) 16590 stat.dtst_filled++; 16591 16592 nerrs += state->dts_buffer[i].dtb_errors; 16593 16594 for (j = 0; j < state->dts_nspeculations; j++) { 16595 dtrace_speculation_t *spec; 16596 dtrace_buffer_t *buf; 16597 16598 spec = &state->dts_speculations[j]; 16599 buf = &spec->dtsp_buffer[i]; 16600 stat.dtst_specdrops += buf->dtb_xamot_drops; 16601 } 16602 } 16603 16604 stat.dtst_specdrops_busy = state->dts_speculations_busy; 16605 stat.dtst_specdrops_unavail = state->dts_speculations_unavail; 16606 stat.dtst_stkstroverflows = state->dts_stkstroverflows; 16607 stat.dtst_dblerrors = state->dts_dblerrors; 16608 stat.dtst_killed = 16609 (state->dts_activity == DTRACE_ACTIVITY_KILLED); 16610 stat.dtst_errors = nerrs; 16611 16612 mutex_exit(&dtrace_lock); 16613 16614 if (copyout(&stat, (void *)arg, sizeof (stat)) != 0) 16615 return (EFAULT); 16616 16617 return (0); 16618 } 16619 16620 case DTRACEIOC_FORMAT: { 16621 dtrace_fmtdesc_t fmt; 16622 char *str; 16623 int len; 16624 16625 if (copyin((void *)arg, &fmt, sizeof (fmt)) != 0) 16626 return (EFAULT); 16627 16628 mutex_enter(&dtrace_lock); 16629 16630 if (fmt.dtfd_format == 0 || 16631 fmt.dtfd_format > state->dts_nformats) { 16632 mutex_exit(&dtrace_lock); 16633 return (EINVAL); 16634 } 16635 16636 /* 16637 * Format strings are allocated contiguously and they are 16638 * never freed; if a format index is less than the number 16639 * of formats, we can assert that the format map is non-NULL 16640 * and that the format for the specified index is non-NULL. 16641 */ 16642 ASSERT(state->dts_formats != NULL); 16643 str = state->dts_formats[fmt.dtfd_format - 1]; 16644 ASSERT(str != NULL); 16645 16646 len = strlen(str) + 1; 16647 16648 if (len > fmt.dtfd_length) { 16649 fmt.dtfd_length = len; 16650 16651 if (copyout(&fmt, (void *)arg, sizeof (fmt)) != 0) { 16652 mutex_exit(&dtrace_lock); 16653 return (EINVAL); 16654 } 16655 } else { 16656 if (copyout(str, fmt.dtfd_string, len) != 0) { 16657 mutex_exit(&dtrace_lock); 16658 return (EINVAL); 16659 } 16660 } 16661 16662 mutex_exit(&dtrace_lock); 16663 return (0); 16664 } 16665 16666 default: 16667 break; 16668 } 16669 16670 return (ENOTTY); 16671} 16672 16673/*ARGSUSED*/ 16674static int 16675dtrace_detach(dev_info_t *dip, ddi_detach_cmd_t cmd) 16676{ 16677 dtrace_state_t *state; 16678 16679 switch (cmd) { 16680 case DDI_DETACH: 16681 break; 16682 16683 case DDI_SUSPEND: 16684 return (DDI_SUCCESS); 16685 16686 default: 16687 return (DDI_FAILURE); 16688 } 16689 16690 mutex_enter(&cpu_lock); 16691 mutex_enter(&dtrace_provider_lock); 16692 mutex_enter(&dtrace_lock); 16693 16694 ASSERT(dtrace_opens == 0); 16695 16696 if (dtrace_helpers > 0) { 16697 mutex_exit(&dtrace_provider_lock); 16698 mutex_exit(&dtrace_lock); 16699 mutex_exit(&cpu_lock); 16700 return (DDI_FAILURE); 16701 } 16702 16703 if (dtrace_unregister((dtrace_provider_id_t)dtrace_provider) != 0) { 16704 mutex_exit(&dtrace_provider_lock); 16705 mutex_exit(&dtrace_lock); 16706 mutex_exit(&cpu_lock); 16707 return (DDI_FAILURE); 16708 } 16709 16710 dtrace_provider = NULL; 16711 16712 if ((state = dtrace_anon_grab()) != NULL) { 16713 /* 16714 * If there were ECBs on this state, the provider should 16715 * have not been allowed to detach; assert that there is 16716 * none. 16717 */ 16718 ASSERT(state->dts_necbs == 0); 16719 dtrace_state_destroy(state); 16720 16721 /* 16722 * If we're being detached with anonymous state, we need to 16723 * indicate to the kernel debugger that DTrace is now inactive. 16724 */ 16725 (void) kdi_dtrace_set(KDI_DTSET_DTRACE_DEACTIVATE); 16726 } 16727 16728 bzero(&dtrace_anon, sizeof (dtrace_anon_t)); 16729 unregister_cpu_setup_func((cpu_setup_func_t *)dtrace_cpu_setup, NULL); 16730 dtrace_cpu_init = NULL; 16731 dtrace_helpers_cleanup = NULL; 16732 dtrace_helpers_fork = NULL; 16733 dtrace_cpustart_init = NULL; 16734 dtrace_cpustart_fini = NULL; 16735 dtrace_debugger_init = NULL; 16736 dtrace_debugger_fini = NULL; 16737 dtrace_modload = NULL; 16738 dtrace_modunload = NULL; 16739 16740 mutex_exit(&cpu_lock); 16741 16742 if (dtrace_helptrace_enabled) { 16743 kmem_free(dtrace_helptrace_buffer, dtrace_helptrace_bufsize); 16744 dtrace_helptrace_buffer = NULL; 16745 } 16746 16747 kmem_free(dtrace_probes, dtrace_nprobes * sizeof (dtrace_probe_t *)); 16748 dtrace_probes = NULL; 16749 dtrace_nprobes = 0; 16750 16751 dtrace_hash_destroy(dtrace_bymod); 16752 dtrace_hash_destroy(dtrace_byfunc); 16753 dtrace_hash_destroy(dtrace_byname); 16754 dtrace_bymod = NULL; 16755 dtrace_byfunc = NULL; 16756 dtrace_byname = NULL; 16757 16758 kmem_cache_destroy(dtrace_state_cache); 16759 vmem_destroy(dtrace_minor); 16760 vmem_destroy(dtrace_arena); 16761 16762 if (dtrace_toxrange != NULL) { 16763 kmem_free(dtrace_toxrange, 16764 dtrace_toxranges_max * sizeof (dtrace_toxrange_t)); 16765 dtrace_toxrange = NULL; 16766 dtrace_toxranges = 0; 16767 dtrace_toxranges_max = 0; 16768 } 16769 16770 ddi_remove_minor_node(dtrace_devi, NULL); 16771 dtrace_devi = NULL; 16772 16773 ddi_soft_state_fini(&dtrace_softstate); 16774 16775 ASSERT(dtrace_vtime_references == 0); 16776 ASSERT(dtrace_opens == 0); 16777 ASSERT(dtrace_retained == NULL); 16778 16779 mutex_exit(&dtrace_lock); 16780 mutex_exit(&dtrace_provider_lock); 16781 16782 /* 16783 * We don't destroy the task queue until after we have dropped our 16784 * locks (taskq_destroy() may block on running tasks). To prevent 16785 * attempting to do work after we have effectively detached but before 16786 * the task queue has been destroyed, all tasks dispatched via the 16787 * task queue must check that DTrace is still attached before 16788 * performing any operation. 16789 */ 16790 taskq_destroy(dtrace_taskq); 16791 dtrace_taskq = NULL; 16792 16793 return (DDI_SUCCESS); 16794} 16795#endif 16796 16797#if defined(sun) 16798/*ARGSUSED*/ 16799static int 16800dtrace_info(dev_info_t *dip, ddi_info_cmd_t infocmd, void *arg, void **result) 16801{ 16802 int error; 16803 16804 switch (infocmd) { 16805 case DDI_INFO_DEVT2DEVINFO: 16806 *result = (void *)dtrace_devi; 16807 error = DDI_SUCCESS; 16808 break; 16809 case DDI_INFO_DEVT2INSTANCE: 16810 *result = (void *)0; 16811 error = DDI_SUCCESS; 16812 break; 16813 default: 16814 error = DDI_FAILURE; 16815 } 16816 return (error); 16817} 16818#endif 16819 16820#if defined(sun) 16821static struct cb_ops dtrace_cb_ops = { 16822 dtrace_open, /* open */ 16823 dtrace_close, /* close */ 16824 nulldev, /* strategy */ 16825 nulldev, /* print */ 16826 nodev, /* dump */ 16827 nodev, /* read */ 16828 nodev, /* write */ 16829 dtrace_ioctl, /* ioctl */ 16830 nodev, /* devmap */ 16831 nodev, /* mmap */ 16832 nodev, /* segmap */ 16833 nochpoll, /* poll */ 16834 ddi_prop_op, /* cb_prop_op */ 16835 0, /* streamtab */ 16836 D_NEW | D_MP /* Driver compatibility flag */ 16837}; 16838 16839static struct dev_ops dtrace_ops = { 16840 DEVO_REV, /* devo_rev */ 16841 0, /* refcnt */ 16842 dtrace_info, /* get_dev_info */ 16843 nulldev, /* identify */ 16844 nulldev, /* probe */ 16845 dtrace_attach, /* attach */ 16846 dtrace_detach, /* detach */ 16847 nodev, /* reset */ 16848 &dtrace_cb_ops, /* driver operations */ 16849 NULL, /* bus operations */ 16850 nodev /* dev power */ 16851}; 16852 16853static struct modldrv modldrv = { 16854 &mod_driverops, /* module type (this is a pseudo driver) */ 16855 "Dynamic Tracing", /* name of module */ 16856 &dtrace_ops, /* driver ops */ 16857}; 16858 16859static struct modlinkage modlinkage = { 16860 MODREV_1, 16861 (void *)&modldrv, 16862 NULL 16863}; 16864 16865int 16866_init(void) 16867{ 16868 return (mod_install(&modlinkage)); 16869} 16870 16871int 16872_info(struct modinfo *modinfop) 16873{ 16874 return (mod_info(&modlinkage, modinfop)); 16875} 16876 16877int 16878_fini(void) 16879{ 16880 return (mod_remove(&modlinkage)); 16881} 16882#else 16883 16884static d_ioctl_t dtrace_ioctl; 16885static d_ioctl_t dtrace_ioctl_helper; 16886static void dtrace_load(void *); 16887static int dtrace_unload(void); 16888#if __FreeBSD_version < 800039 16889static void dtrace_clone(void *, struct ucred *, char *, int , struct cdev **); 16890static struct clonedevs *dtrace_clones; /* Ptr to the array of cloned devices. */ 16891static eventhandler_tag eh_tag; /* Event handler tag. */ 16892#else 16893static struct cdev *dtrace_dev; 16894static struct cdev *helper_dev; 16895#endif 16896 16897void dtrace_invop_init(void); 16898void dtrace_invop_uninit(void); 16899 16900static struct cdevsw dtrace_cdevsw = { 16901 .d_version = D_VERSION, 16902#if __FreeBSD_version < 800039 16903 .d_flags = D_TRACKCLOSE | D_NEEDMINOR, 16904 .d_close = dtrace_close, 16905#endif 16906 .d_ioctl = dtrace_ioctl, 16907 .d_open = dtrace_open, 16908 .d_name = "dtrace", 16909}; 16910 16911static struct cdevsw helper_cdevsw = { 16912 .d_version = D_VERSION, 16913 .d_ioctl = dtrace_ioctl_helper, 16914 .d_name = "helper", 16915}; 16916 16917#include <dtrace_anon.c> 16918#if __FreeBSD_version < 800039 16919#include <dtrace_clone.c> 16920#endif 16921#include <dtrace_ioctl.c> 16922#include <dtrace_load.c> 16923#include <dtrace_modevent.c> 16924#include <dtrace_sysctl.c> 16925#include <dtrace_unload.c> 16926#include <dtrace_vtime.c> 16927#include <dtrace_hacks.c> 16928#include <dtrace_isa.c> 16929 16930SYSINIT(dtrace_load, SI_SUB_DTRACE, SI_ORDER_FIRST, dtrace_load, NULL); 16931SYSUNINIT(dtrace_unload, SI_SUB_DTRACE, SI_ORDER_FIRST, dtrace_unload, NULL); 16932SYSINIT(dtrace_anon_init, SI_SUB_DTRACE_ANON, SI_ORDER_FIRST, dtrace_anon_init, NULL); 16933 16934DEV_MODULE(dtrace, dtrace_modevent, NULL); 16935MODULE_VERSION(dtrace, 1); 16936MODULE_DEPEND(dtrace, cyclic, 1, 1, 1); 16937MODULE_DEPEND(dtrace, opensolaris, 1, 1, 1); 16938#endif
| 305#endif 306 307#if defined(sun) 308#define curcpu CPU->cpu_id 309#endif 310 311 312/* 313 * DTrace Provider Variables 314 * 315 * These are the variables relating to DTrace as a provider (that is, the 316 * provider of the BEGIN, END, and ERROR probes). 317 */ 318static dtrace_pattr_t dtrace_provider_attr = { 319{ DTRACE_STABILITY_STABLE, DTRACE_STABILITY_STABLE, DTRACE_CLASS_COMMON }, 320{ DTRACE_STABILITY_PRIVATE, DTRACE_STABILITY_PRIVATE, DTRACE_CLASS_UNKNOWN }, 321{ DTRACE_STABILITY_PRIVATE, DTRACE_STABILITY_PRIVATE, DTRACE_CLASS_UNKNOWN }, 322{ DTRACE_STABILITY_STABLE, DTRACE_STABILITY_STABLE, DTRACE_CLASS_COMMON }, 323{ DTRACE_STABILITY_STABLE, DTRACE_STABILITY_STABLE, DTRACE_CLASS_COMMON }, 324}; 325 326static void 327dtrace_nullop(void) 328{} 329 330static dtrace_pops_t dtrace_provider_ops = { 331 (void (*)(void *, dtrace_probedesc_t *))dtrace_nullop, 332 (void (*)(void *, modctl_t *))dtrace_nullop, 333 (void (*)(void *, dtrace_id_t, void *))dtrace_nullop, 334 (void (*)(void *, dtrace_id_t, void *))dtrace_nullop, 335 (void (*)(void *, dtrace_id_t, void *))dtrace_nullop, 336 (void (*)(void *, dtrace_id_t, void *))dtrace_nullop, 337 NULL, 338 NULL, 339 NULL, 340 (void (*)(void *, dtrace_id_t, void *))dtrace_nullop 341}; 342 343static dtrace_id_t dtrace_probeid_begin; /* special BEGIN probe */ 344static dtrace_id_t dtrace_probeid_end; /* special END probe */ 345dtrace_id_t dtrace_probeid_error; /* special ERROR probe */ 346 347/* 348 * DTrace Helper Tracing Variables 349 */ 350uint32_t dtrace_helptrace_next = 0; 351uint32_t dtrace_helptrace_nlocals; 352char *dtrace_helptrace_buffer; 353int dtrace_helptrace_bufsize = 512 * 1024; 354 355#ifdef DEBUG 356int dtrace_helptrace_enabled = 1; 357#else 358int dtrace_helptrace_enabled = 0; 359#endif 360 361/* 362 * DTrace Error Hashing 363 * 364 * On DEBUG kernels, DTrace will track the errors that has seen in a hash 365 * table. This is very useful for checking coverage of tests that are 366 * expected to induce DIF or DOF processing errors, and may be useful for 367 * debugging problems in the DIF code generator or in DOF generation . The 368 * error hash may be examined with the ::dtrace_errhash MDB dcmd. 369 */ 370#ifdef DEBUG 371static dtrace_errhash_t dtrace_errhash[DTRACE_ERRHASHSZ]; 372static const char *dtrace_errlast; 373static kthread_t *dtrace_errthread; 374static kmutex_t dtrace_errlock; 375#endif 376 377/* 378 * DTrace Macros and Constants 379 * 380 * These are various macros that are useful in various spots in the 381 * implementation, along with a few random constants that have no meaning 382 * outside of the implementation. There is no real structure to this cpp 383 * mishmash -- but is there ever? 384 */ 385#define DTRACE_HASHSTR(hash, probe) \ 386 dtrace_hash_str(*((char **)((uintptr_t)(probe) + (hash)->dth_stroffs))) 387 388#define DTRACE_HASHNEXT(hash, probe) \ 389 (dtrace_probe_t **)((uintptr_t)(probe) + (hash)->dth_nextoffs) 390 391#define DTRACE_HASHPREV(hash, probe) \ 392 (dtrace_probe_t **)((uintptr_t)(probe) + (hash)->dth_prevoffs) 393 394#define DTRACE_HASHEQ(hash, lhs, rhs) \ 395 (strcmp(*((char **)((uintptr_t)(lhs) + (hash)->dth_stroffs)), \ 396 *((char **)((uintptr_t)(rhs) + (hash)->dth_stroffs))) == 0) 397 398#define DTRACE_AGGHASHSIZE_SLEW 17 399 400#define DTRACE_V4MAPPED_OFFSET (sizeof (uint32_t) * 3) 401 402/* 403 * The key for a thread-local variable consists of the lower 61 bits of the 404 * t_did, plus the 3 bits of the highest active interrupt above LOCK_LEVEL. 405 * We add DIF_VARIABLE_MAX to t_did to assure that the thread key is never 406 * equal to a variable identifier. This is necessary (but not sufficient) to 407 * assure that global associative arrays never collide with thread-local 408 * variables. To guarantee that they cannot collide, we must also define the 409 * order for keying dynamic variables. That order is: 410 * 411 * [ key0 ] ... [ keyn ] [ variable-key ] [ tls-key ] 412 * 413 * Because the variable-key and the tls-key are in orthogonal spaces, there is 414 * no way for a global variable key signature to match a thread-local key 415 * signature. 416 */ 417#if defined(sun) 418#define DTRACE_TLS_THRKEY(where) { \ 419 uint_t intr = 0; \ 420 uint_t actv = CPU->cpu_intr_actv >> (LOCK_LEVEL + 1); \ 421 for (; actv; actv >>= 1) \ 422 intr++; \ 423 ASSERT(intr < (1 << 3)); \ 424 (where) = ((curthread->t_did + DIF_VARIABLE_MAX) & \ 425 (((uint64_t)1 << 61) - 1)) | ((uint64_t)intr << 61); \ 426} 427#else 428#define DTRACE_TLS_THRKEY(where) { \ 429 solaris_cpu_t *_c = &solaris_cpu[curcpu]; \ 430 uint_t intr = 0; \ 431 uint_t actv = _c->cpu_intr_actv; \ 432 for (; actv; actv >>= 1) \ 433 intr++; \ 434 ASSERT(intr < (1 << 3)); \ 435 (where) = ((curthread->td_tid + DIF_VARIABLE_MAX) & \ 436 (((uint64_t)1 << 61) - 1)) | ((uint64_t)intr << 61); \ 437} 438#endif 439 440#define DT_BSWAP_8(x) ((x) & 0xff) 441#define DT_BSWAP_16(x) ((DT_BSWAP_8(x) << 8) | DT_BSWAP_8((x) >> 8)) 442#define DT_BSWAP_32(x) ((DT_BSWAP_16(x) << 16) | DT_BSWAP_16((x) >> 16)) 443#define DT_BSWAP_64(x) ((DT_BSWAP_32(x) << 32) | DT_BSWAP_32((x) >> 32)) 444 445#define DT_MASK_LO 0x00000000FFFFFFFFULL 446 447#define DTRACE_STORE(type, tomax, offset, what) \ 448 *((type *)((uintptr_t)(tomax) + (uintptr_t)offset)) = (type)(what); 449 450#ifndef __x86 451#define DTRACE_ALIGNCHECK(addr, size, flags) \ 452 if (addr & (size - 1)) { \ 453 *flags |= CPU_DTRACE_BADALIGN; \ 454 cpu_core[curcpu].cpuc_dtrace_illval = addr; \ 455 return (0); \ 456 } 457#else 458#define DTRACE_ALIGNCHECK(addr, size, flags) 459#endif 460 461/* 462 * Test whether a range of memory starting at testaddr of size testsz falls 463 * within the range of memory described by addr, sz. We take care to avoid 464 * problems with overflow and underflow of the unsigned quantities, and 465 * disallow all negative sizes. Ranges of size 0 are allowed. 466 */ 467#define DTRACE_INRANGE(testaddr, testsz, baseaddr, basesz) \ 468 ((testaddr) - (baseaddr) < (basesz) && \ 469 (testaddr) + (testsz) - (baseaddr) <= (basesz) && \ 470 (testaddr) + (testsz) >= (testaddr)) 471 472/* 473 * Test whether alloc_sz bytes will fit in the scratch region. We isolate 474 * alloc_sz on the righthand side of the comparison in order to avoid overflow 475 * or underflow in the comparison with it. This is simpler than the INRANGE 476 * check above, because we know that the dtms_scratch_ptr is valid in the 477 * range. Allocations of size zero are allowed. 478 */ 479#define DTRACE_INSCRATCH(mstate, alloc_sz) \ 480 ((mstate)->dtms_scratch_base + (mstate)->dtms_scratch_size - \ 481 (mstate)->dtms_scratch_ptr >= (alloc_sz)) 482 483#define DTRACE_LOADFUNC(bits) \ 484/*CSTYLED*/ \ 485uint##bits##_t \ 486dtrace_load##bits(uintptr_t addr) \ 487{ \ 488 size_t size = bits / NBBY; \ 489 /*CSTYLED*/ \ 490 uint##bits##_t rval; \ 491 int i; \ 492 volatile uint16_t *flags = (volatile uint16_t *) \ 493 &cpu_core[curcpu].cpuc_dtrace_flags; \ 494 \ 495 DTRACE_ALIGNCHECK(addr, size, flags); \ 496 \ 497 for (i = 0; i < dtrace_toxranges; i++) { \ 498 if (addr >= dtrace_toxrange[i].dtt_limit) \ 499 continue; \ 500 \ 501 if (addr + size <= dtrace_toxrange[i].dtt_base) \ 502 continue; \ 503 \ 504 /* \ 505 * This address falls within a toxic region; return 0. \ 506 */ \ 507 *flags |= CPU_DTRACE_BADADDR; \ 508 cpu_core[curcpu].cpuc_dtrace_illval = addr; \ 509 return (0); \ 510 } \ 511 \ 512 *flags |= CPU_DTRACE_NOFAULT; \ 513 /*CSTYLED*/ \ 514 rval = *((volatile uint##bits##_t *)addr); \ 515 *flags &= ~CPU_DTRACE_NOFAULT; \ 516 \ 517 return (!(*flags & CPU_DTRACE_FAULT) ? rval : 0); \ 518} 519 520#ifdef _LP64 521#define dtrace_loadptr dtrace_load64 522#else 523#define dtrace_loadptr dtrace_load32 524#endif 525 526#define DTRACE_DYNHASH_FREE 0 527#define DTRACE_DYNHASH_SINK 1 528#define DTRACE_DYNHASH_VALID 2 529 530#define DTRACE_MATCH_NEXT 0 531#define DTRACE_MATCH_DONE 1 532#define DTRACE_ANCHORED(probe) ((probe)->dtpr_func[0] != '\0') 533#define DTRACE_STATE_ALIGN 64 534 535#define DTRACE_FLAGS2FLT(flags) \ 536 (((flags) & CPU_DTRACE_BADADDR) ? DTRACEFLT_BADADDR : \ 537 ((flags) & CPU_DTRACE_ILLOP) ? DTRACEFLT_ILLOP : \ 538 ((flags) & CPU_DTRACE_DIVZERO) ? DTRACEFLT_DIVZERO : \ 539 ((flags) & CPU_DTRACE_KPRIV) ? DTRACEFLT_KPRIV : \ 540 ((flags) & CPU_DTRACE_UPRIV) ? DTRACEFLT_UPRIV : \ 541 ((flags) & CPU_DTRACE_TUPOFLOW) ? DTRACEFLT_TUPOFLOW : \ 542 ((flags) & CPU_DTRACE_BADALIGN) ? DTRACEFLT_BADALIGN : \ 543 ((flags) & CPU_DTRACE_NOSCRATCH) ? DTRACEFLT_NOSCRATCH : \ 544 ((flags) & CPU_DTRACE_BADSTACK) ? DTRACEFLT_BADSTACK : \ 545 DTRACEFLT_UNKNOWN) 546 547#define DTRACEACT_ISSTRING(act) \ 548 ((act)->dta_kind == DTRACEACT_DIFEXPR && \ 549 (act)->dta_difo->dtdo_rtype.dtdt_kind == DIF_TYPE_STRING) 550 551/* Function prototype definitions: */ 552static size_t dtrace_strlen(const char *, size_t); 553static dtrace_probe_t *dtrace_probe_lookup_id(dtrace_id_t id); 554static void dtrace_enabling_provide(dtrace_provider_t *); 555static int dtrace_enabling_match(dtrace_enabling_t *, int *); 556static void dtrace_enabling_matchall(void); 557static void dtrace_enabling_reap(void); 558static dtrace_state_t *dtrace_anon_grab(void); 559static uint64_t dtrace_helper(int, dtrace_mstate_t *, 560 dtrace_state_t *, uint64_t, uint64_t); 561static dtrace_helpers_t *dtrace_helpers_create(proc_t *); 562static void dtrace_buffer_drop(dtrace_buffer_t *); 563static int dtrace_buffer_consumed(dtrace_buffer_t *, hrtime_t when); 564static intptr_t dtrace_buffer_reserve(dtrace_buffer_t *, size_t, size_t, 565 dtrace_state_t *, dtrace_mstate_t *); 566static int dtrace_state_option(dtrace_state_t *, dtrace_optid_t, 567 dtrace_optval_t); 568static int dtrace_ecb_create_enable(dtrace_probe_t *, void *); 569static void dtrace_helper_provider_destroy(dtrace_helper_provider_t *); 570uint16_t dtrace_load16(uintptr_t); 571uint32_t dtrace_load32(uintptr_t); 572uint64_t dtrace_load64(uintptr_t); 573uint8_t dtrace_load8(uintptr_t); 574void dtrace_dynvar_clean(dtrace_dstate_t *); 575dtrace_dynvar_t *dtrace_dynvar(dtrace_dstate_t *, uint_t, dtrace_key_t *, 576 size_t, dtrace_dynvar_op_t, dtrace_mstate_t *, dtrace_vstate_t *); 577uintptr_t dtrace_dif_varstr(uintptr_t, dtrace_state_t *, dtrace_mstate_t *); 578 579/* 580 * DTrace Probe Context Functions 581 * 582 * These functions are called from probe context. Because probe context is 583 * any context in which C may be called, arbitrarily locks may be held, 584 * interrupts may be disabled, we may be in arbitrary dispatched state, etc. 585 * As a result, functions called from probe context may only call other DTrace 586 * support functions -- they may not interact at all with the system at large. 587 * (Note that the ASSERT macro is made probe-context safe by redefining it in 588 * terms of dtrace_assfail(), a probe-context safe function.) If arbitrary 589 * loads are to be performed from probe context, they _must_ be in terms of 590 * the safe dtrace_load*() variants. 591 * 592 * Some functions in this block are not actually called from probe context; 593 * for these functions, there will be a comment above the function reading 594 * "Note: not called from probe context." 595 */ 596void 597dtrace_panic(const char *format, ...) 598{ 599 va_list alist; 600 601 va_start(alist, format); 602 dtrace_vpanic(format, alist); 603 va_end(alist); 604} 605 606int 607dtrace_assfail(const char *a, const char *f, int l) 608{ 609 dtrace_panic("assertion failed: %s, file: %s, line: %d", a, f, l); 610 611 /* 612 * We just need something here that even the most clever compiler 613 * cannot optimize away. 614 */ 615 return (a[(uintptr_t)f]); 616} 617 618/* 619 * Atomically increment a specified error counter from probe context. 620 */ 621static void 622dtrace_error(uint32_t *counter) 623{ 624 /* 625 * Most counters stored to in probe context are per-CPU counters. 626 * However, there are some error conditions that are sufficiently 627 * arcane that they don't merit per-CPU storage. If these counters 628 * are incremented concurrently on different CPUs, scalability will be 629 * adversely affected -- but we don't expect them to be white-hot in a 630 * correctly constructed enabling... 631 */ 632 uint32_t oval, nval; 633 634 do { 635 oval = *counter; 636 637 if ((nval = oval + 1) == 0) { 638 /* 639 * If the counter would wrap, set it to 1 -- assuring 640 * that the counter is never zero when we have seen 641 * errors. (The counter must be 32-bits because we 642 * aren't guaranteed a 64-bit compare&swap operation.) 643 * To save this code both the infamy of being fingered 644 * by a priggish news story and the indignity of being 645 * the target of a neo-puritan witch trial, we're 646 * carefully avoiding any colorful description of the 647 * likelihood of this condition -- but suffice it to 648 * say that it is only slightly more likely than the 649 * overflow of predicate cache IDs, as discussed in 650 * dtrace_predicate_create(). 651 */ 652 nval = 1; 653 } 654 } while (dtrace_cas32(counter, oval, nval) != oval); 655} 656 657/* 658 * Use the DTRACE_LOADFUNC macro to define functions for each of loading a 659 * uint8_t, a uint16_t, a uint32_t and a uint64_t. 660 */ 661DTRACE_LOADFUNC(8) 662DTRACE_LOADFUNC(16) 663DTRACE_LOADFUNC(32) 664DTRACE_LOADFUNC(64) 665 666static int 667dtrace_inscratch(uintptr_t dest, size_t size, dtrace_mstate_t *mstate) 668{ 669 if (dest < mstate->dtms_scratch_base) 670 return (0); 671 672 if (dest + size < dest) 673 return (0); 674 675 if (dest + size > mstate->dtms_scratch_ptr) 676 return (0); 677 678 return (1); 679} 680 681static int 682dtrace_canstore_statvar(uint64_t addr, size_t sz, 683 dtrace_statvar_t **svars, int nsvars) 684{ 685 int i; 686 687 for (i = 0; i < nsvars; i++) { 688 dtrace_statvar_t *svar = svars[i]; 689 690 if (svar == NULL || svar->dtsv_size == 0) 691 continue; 692 693 if (DTRACE_INRANGE(addr, sz, svar->dtsv_data, svar->dtsv_size)) 694 return (1); 695 } 696 697 return (0); 698} 699 700/* 701 * Check to see if the address is within a memory region to which a store may 702 * be issued. This includes the DTrace scratch areas, and any DTrace variable 703 * region. The caller of dtrace_canstore() is responsible for performing any 704 * alignment checks that are needed before stores are actually executed. 705 */ 706static int 707dtrace_canstore(uint64_t addr, size_t sz, dtrace_mstate_t *mstate, 708 dtrace_vstate_t *vstate) 709{ 710 /* 711 * First, check to see if the address is in scratch space... 712 */ 713 if (DTRACE_INRANGE(addr, sz, mstate->dtms_scratch_base, 714 mstate->dtms_scratch_size)) 715 return (1); 716 717 /* 718 * Now check to see if it's a dynamic variable. This check will pick 719 * up both thread-local variables and any global dynamically-allocated 720 * variables. 721 */ 722 if (DTRACE_INRANGE(addr, sz, (uintptr_t)vstate->dtvs_dynvars.dtds_base, 723 vstate->dtvs_dynvars.dtds_size)) { 724 dtrace_dstate_t *dstate = &vstate->dtvs_dynvars; 725 uintptr_t base = (uintptr_t)dstate->dtds_base + 726 (dstate->dtds_hashsize * sizeof (dtrace_dynhash_t)); 727 uintptr_t chunkoffs; 728 729 /* 730 * Before we assume that we can store here, we need to make 731 * sure that it isn't in our metadata -- storing to our 732 * dynamic variable metadata would corrupt our state. For 733 * the range to not include any dynamic variable metadata, 734 * it must: 735 * 736 * (1) Start above the hash table that is at the base of 737 * the dynamic variable space 738 * 739 * (2) Have a starting chunk offset that is beyond the 740 * dtrace_dynvar_t that is at the base of every chunk 741 * 742 * (3) Not span a chunk boundary 743 * 744 */ 745 if (addr < base) 746 return (0); 747 748 chunkoffs = (addr - base) % dstate->dtds_chunksize; 749 750 if (chunkoffs < sizeof (dtrace_dynvar_t)) 751 return (0); 752 753 if (chunkoffs + sz > dstate->dtds_chunksize) 754 return (0); 755 756 return (1); 757 } 758 759 /* 760 * Finally, check the static local and global variables. These checks 761 * take the longest, so we perform them last. 762 */ 763 if (dtrace_canstore_statvar(addr, sz, 764 vstate->dtvs_locals, vstate->dtvs_nlocals)) 765 return (1); 766 767 if (dtrace_canstore_statvar(addr, sz, 768 vstate->dtvs_globals, vstate->dtvs_nglobals)) 769 return (1); 770 771 return (0); 772} 773 774 775/* 776 * Convenience routine to check to see if the address is within a memory 777 * region in which a load may be issued given the user's privilege level; 778 * if not, it sets the appropriate error flags and loads 'addr' into the 779 * illegal value slot. 780 * 781 * DTrace subroutines (DIF_SUBR_*) should use this helper to implement 782 * appropriate memory access protection. 783 */ 784static int 785dtrace_canload(uint64_t addr, size_t sz, dtrace_mstate_t *mstate, 786 dtrace_vstate_t *vstate) 787{ 788 volatile uintptr_t *illval = &cpu_core[curcpu].cpuc_dtrace_illval; 789 790 /* 791 * If we hold the privilege to read from kernel memory, then 792 * everything is readable. 793 */ 794 if ((mstate->dtms_access & DTRACE_ACCESS_KERNEL) != 0) 795 return (1); 796 797 /* 798 * You can obviously read that which you can store. 799 */ 800 if (dtrace_canstore(addr, sz, mstate, vstate)) 801 return (1); 802 803 /* 804 * We're allowed to read from our own string table. 805 */ 806 if (DTRACE_INRANGE(addr, sz, (uintptr_t)mstate->dtms_difo->dtdo_strtab, 807 mstate->dtms_difo->dtdo_strlen)) 808 return (1); 809 810 DTRACE_CPUFLAG_SET(CPU_DTRACE_KPRIV); 811 *illval = addr; 812 return (0); 813} 814 815/* 816 * Convenience routine to check to see if a given string is within a memory 817 * region in which a load may be issued given the user's privilege level; 818 * this exists so that we don't need to issue unnecessary dtrace_strlen() 819 * calls in the event that the user has all privileges. 820 */ 821static int 822dtrace_strcanload(uint64_t addr, size_t sz, dtrace_mstate_t *mstate, 823 dtrace_vstate_t *vstate) 824{ 825 size_t strsz; 826 827 /* 828 * If we hold the privilege to read from kernel memory, then 829 * everything is readable. 830 */ 831 if ((mstate->dtms_access & DTRACE_ACCESS_KERNEL) != 0) 832 return (1); 833 834 strsz = 1 + dtrace_strlen((char *)(uintptr_t)addr, sz); 835 if (dtrace_canload(addr, strsz, mstate, vstate)) 836 return (1); 837 838 return (0); 839} 840 841/* 842 * Convenience routine to check to see if a given variable is within a memory 843 * region in which a load may be issued given the user's privilege level. 844 */ 845static int 846dtrace_vcanload(void *src, dtrace_diftype_t *type, dtrace_mstate_t *mstate, 847 dtrace_vstate_t *vstate) 848{ 849 size_t sz; 850 ASSERT(type->dtdt_flags & DIF_TF_BYREF); 851 852 /* 853 * If we hold the privilege to read from kernel memory, then 854 * everything is readable. 855 */ 856 if ((mstate->dtms_access & DTRACE_ACCESS_KERNEL) != 0) 857 return (1); 858 859 if (type->dtdt_kind == DIF_TYPE_STRING) 860 sz = dtrace_strlen(src, 861 vstate->dtvs_state->dts_options[DTRACEOPT_STRSIZE]) + 1; 862 else 863 sz = type->dtdt_size; 864 865 return (dtrace_canload((uintptr_t)src, sz, mstate, vstate)); 866} 867 868/* 869 * Compare two strings using safe loads. 870 */ 871static int 872dtrace_strncmp(char *s1, char *s2, size_t limit) 873{ 874 uint8_t c1, c2; 875 volatile uint16_t *flags; 876 877 if (s1 == s2 || limit == 0) 878 return (0); 879 880 flags = (volatile uint16_t *)&cpu_core[curcpu].cpuc_dtrace_flags; 881 882 do { 883 if (s1 == NULL) { 884 c1 = '\0'; 885 } else { 886 c1 = dtrace_load8((uintptr_t)s1++); 887 } 888 889 if (s2 == NULL) { 890 c2 = '\0'; 891 } else { 892 c2 = dtrace_load8((uintptr_t)s2++); 893 } 894 895 if (c1 != c2) 896 return (c1 - c2); 897 } while (--limit && c1 != '\0' && !(*flags & CPU_DTRACE_FAULT)); 898 899 return (0); 900} 901 902/* 903 * Compute strlen(s) for a string using safe memory accesses. The additional 904 * len parameter is used to specify a maximum length to ensure completion. 905 */ 906static size_t 907dtrace_strlen(const char *s, size_t lim) 908{ 909 uint_t len; 910 911 for (len = 0; len != lim; len++) { 912 if (dtrace_load8((uintptr_t)s++) == '\0') 913 break; 914 } 915 916 return (len); 917} 918 919/* 920 * Check if an address falls within a toxic region. 921 */ 922static int 923dtrace_istoxic(uintptr_t kaddr, size_t size) 924{ 925 uintptr_t taddr, tsize; 926 int i; 927 928 for (i = 0; i < dtrace_toxranges; i++) { 929 taddr = dtrace_toxrange[i].dtt_base; 930 tsize = dtrace_toxrange[i].dtt_limit - taddr; 931 932 if (kaddr - taddr < tsize) { 933 DTRACE_CPUFLAG_SET(CPU_DTRACE_BADADDR); 934 cpu_core[curcpu].cpuc_dtrace_illval = kaddr; 935 return (1); 936 } 937 938 if (taddr - kaddr < size) { 939 DTRACE_CPUFLAG_SET(CPU_DTRACE_BADADDR); 940 cpu_core[curcpu].cpuc_dtrace_illval = taddr; 941 return (1); 942 } 943 } 944 945 return (0); 946} 947 948/* 949 * Copy src to dst using safe memory accesses. The src is assumed to be unsafe 950 * memory specified by the DIF program. The dst is assumed to be safe memory 951 * that we can store to directly because it is managed by DTrace. As with 952 * standard bcopy, overlapping copies are handled properly. 953 */ 954static void 955dtrace_bcopy(const void *src, void *dst, size_t len) 956{ 957 if (len != 0) { 958 uint8_t *s1 = dst; 959 const uint8_t *s2 = src; 960 961 if (s1 <= s2) { 962 do { 963 *s1++ = dtrace_load8((uintptr_t)s2++); 964 } while (--len != 0); 965 } else { 966 s2 += len; 967 s1 += len; 968 969 do { 970 *--s1 = dtrace_load8((uintptr_t)--s2); 971 } while (--len != 0); 972 } 973 } 974} 975 976/* 977 * Copy src to dst using safe memory accesses, up to either the specified 978 * length, or the point that a nul byte is encountered. The src is assumed to 979 * be unsafe memory specified by the DIF program. The dst is assumed to be 980 * safe memory that we can store to directly because it is managed by DTrace. 981 * Unlike dtrace_bcopy(), overlapping regions are not handled. 982 */ 983static void 984dtrace_strcpy(const void *src, void *dst, size_t len) 985{ 986 if (len != 0) { 987 uint8_t *s1 = dst, c; 988 const uint8_t *s2 = src; 989 990 do { 991 *s1++ = c = dtrace_load8((uintptr_t)s2++); 992 } while (--len != 0 && c != '\0'); 993 } 994} 995 996/* 997 * Copy src to dst, deriving the size and type from the specified (BYREF) 998 * variable type. The src is assumed to be unsafe memory specified by the DIF 999 * program. The dst is assumed to be DTrace variable memory that is of the 1000 * specified type; we assume that we can store to directly. 1001 */ 1002static void 1003dtrace_vcopy(void *src, void *dst, dtrace_diftype_t *type) 1004{ 1005 ASSERT(type->dtdt_flags & DIF_TF_BYREF); 1006 1007 if (type->dtdt_kind == DIF_TYPE_STRING) { 1008 dtrace_strcpy(src, dst, type->dtdt_size); 1009 } else { 1010 dtrace_bcopy(src, dst, type->dtdt_size); 1011 } 1012} 1013 1014/* 1015 * Compare s1 to s2 using safe memory accesses. The s1 data is assumed to be 1016 * unsafe memory specified by the DIF program. The s2 data is assumed to be 1017 * safe memory that we can access directly because it is managed by DTrace. 1018 */ 1019static int 1020dtrace_bcmp(const void *s1, const void *s2, size_t len) 1021{ 1022 volatile uint16_t *flags; 1023 1024 flags = (volatile uint16_t *)&cpu_core[curcpu].cpuc_dtrace_flags; 1025 1026 if (s1 == s2) 1027 return (0); 1028 1029 if (s1 == NULL || s2 == NULL) 1030 return (1); 1031 1032 if (s1 != s2 && len != 0) { 1033 const uint8_t *ps1 = s1; 1034 const uint8_t *ps2 = s2; 1035 1036 do { 1037 if (dtrace_load8((uintptr_t)ps1++) != *ps2++) 1038 return (1); 1039 } while (--len != 0 && !(*flags & CPU_DTRACE_FAULT)); 1040 } 1041 return (0); 1042} 1043 1044/* 1045 * Zero the specified region using a simple byte-by-byte loop. Note that this 1046 * is for safe DTrace-managed memory only. 1047 */ 1048static void 1049dtrace_bzero(void *dst, size_t len) 1050{ 1051 uchar_t *cp; 1052 1053 for (cp = dst; len != 0; len--) 1054 *cp++ = 0; 1055} 1056 1057static void 1058dtrace_add_128(uint64_t *addend1, uint64_t *addend2, uint64_t *sum) 1059{ 1060 uint64_t result[2]; 1061 1062 result[0] = addend1[0] + addend2[0]; 1063 result[1] = addend1[1] + addend2[1] + 1064 (result[0] < addend1[0] || result[0] < addend2[0] ? 1 : 0); 1065 1066 sum[0] = result[0]; 1067 sum[1] = result[1]; 1068} 1069 1070/* 1071 * Shift the 128-bit value in a by b. If b is positive, shift left. 1072 * If b is negative, shift right. 1073 */ 1074static void 1075dtrace_shift_128(uint64_t *a, int b) 1076{ 1077 uint64_t mask; 1078 1079 if (b == 0) 1080 return; 1081 1082 if (b < 0) { 1083 b = -b; 1084 if (b >= 64) { 1085 a[0] = a[1] >> (b - 64); 1086 a[1] = 0; 1087 } else { 1088 a[0] >>= b; 1089 mask = 1LL << (64 - b); 1090 mask -= 1; 1091 a[0] |= ((a[1] & mask) << (64 - b)); 1092 a[1] >>= b; 1093 } 1094 } else { 1095 if (b >= 64) { 1096 a[1] = a[0] << (b - 64); 1097 a[0] = 0; 1098 } else { 1099 a[1] <<= b; 1100 mask = a[0] >> (64 - b); 1101 a[1] |= mask; 1102 a[0] <<= b; 1103 } 1104 } 1105} 1106 1107/* 1108 * The basic idea is to break the 2 64-bit values into 4 32-bit values, 1109 * use native multiplication on those, and then re-combine into the 1110 * resulting 128-bit value. 1111 * 1112 * (hi1 << 32 + lo1) * (hi2 << 32 + lo2) = 1113 * hi1 * hi2 << 64 + 1114 * hi1 * lo2 << 32 + 1115 * hi2 * lo1 << 32 + 1116 * lo1 * lo2 1117 */ 1118static void 1119dtrace_multiply_128(uint64_t factor1, uint64_t factor2, uint64_t *product) 1120{ 1121 uint64_t hi1, hi2, lo1, lo2; 1122 uint64_t tmp[2]; 1123 1124 hi1 = factor1 >> 32; 1125 hi2 = factor2 >> 32; 1126 1127 lo1 = factor1 & DT_MASK_LO; 1128 lo2 = factor2 & DT_MASK_LO; 1129 1130 product[0] = lo1 * lo2; 1131 product[1] = hi1 * hi2; 1132 1133 tmp[0] = hi1 * lo2; 1134 tmp[1] = 0; 1135 dtrace_shift_128(tmp, 32); 1136 dtrace_add_128(product, tmp, product); 1137 1138 tmp[0] = hi2 * lo1; 1139 tmp[1] = 0; 1140 dtrace_shift_128(tmp, 32); 1141 dtrace_add_128(product, tmp, product); 1142} 1143 1144/* 1145 * This privilege check should be used by actions and subroutines to 1146 * verify that the user credentials of the process that enabled the 1147 * invoking ECB match the target credentials 1148 */ 1149static int 1150dtrace_priv_proc_common_user(dtrace_state_t *state) 1151{ 1152 cred_t *cr, *s_cr = state->dts_cred.dcr_cred; 1153 1154 /* 1155 * We should always have a non-NULL state cred here, since if cred 1156 * is null (anonymous tracing), we fast-path bypass this routine. 1157 */ 1158 ASSERT(s_cr != NULL); 1159 1160 if ((cr = CRED()) != NULL && 1161 s_cr->cr_uid == cr->cr_uid && 1162 s_cr->cr_uid == cr->cr_ruid && 1163 s_cr->cr_uid == cr->cr_suid && 1164 s_cr->cr_gid == cr->cr_gid && 1165 s_cr->cr_gid == cr->cr_rgid && 1166 s_cr->cr_gid == cr->cr_sgid) 1167 return (1); 1168 1169 return (0); 1170} 1171 1172/* 1173 * This privilege check should be used by actions and subroutines to 1174 * verify that the zone of the process that enabled the invoking ECB 1175 * matches the target credentials 1176 */ 1177static int 1178dtrace_priv_proc_common_zone(dtrace_state_t *state) 1179{ 1180#if defined(sun) 1181 cred_t *cr, *s_cr = state->dts_cred.dcr_cred; 1182 1183 /* 1184 * We should always have a non-NULL state cred here, since if cred 1185 * is null (anonymous tracing), we fast-path bypass this routine. 1186 */ 1187 ASSERT(s_cr != NULL); 1188 1189 if ((cr = CRED()) != NULL && 1190 s_cr->cr_zone == cr->cr_zone) 1191 return (1); 1192 1193 return (0); 1194#else 1195 return (1); 1196#endif 1197} 1198 1199/* 1200 * This privilege check should be used by actions and subroutines to 1201 * verify that the process has not setuid or changed credentials. 1202 */ 1203static int 1204dtrace_priv_proc_common_nocd(void) 1205{ 1206 proc_t *proc; 1207 1208 if ((proc = ttoproc(curthread)) != NULL && 1209 !(proc->p_flag & SNOCD)) 1210 return (1); 1211 1212 return (0); 1213} 1214 1215static int 1216dtrace_priv_proc_destructive(dtrace_state_t *state) 1217{ 1218 int action = state->dts_cred.dcr_action; 1219 1220 if (((action & DTRACE_CRA_PROC_DESTRUCTIVE_ALLZONE) == 0) && 1221 dtrace_priv_proc_common_zone(state) == 0) 1222 goto bad; 1223 1224 if (((action & DTRACE_CRA_PROC_DESTRUCTIVE_ALLUSER) == 0) && 1225 dtrace_priv_proc_common_user(state) == 0) 1226 goto bad; 1227 1228 if (((action & DTRACE_CRA_PROC_DESTRUCTIVE_CREDCHG) == 0) && 1229 dtrace_priv_proc_common_nocd() == 0) 1230 goto bad; 1231 1232 return (1); 1233 1234bad: 1235 cpu_core[curcpu].cpuc_dtrace_flags |= CPU_DTRACE_UPRIV; 1236 1237 return (0); 1238} 1239 1240static int 1241dtrace_priv_proc_control(dtrace_state_t *state) 1242{ 1243 if (state->dts_cred.dcr_action & DTRACE_CRA_PROC_CONTROL) 1244 return (1); 1245 1246 if (dtrace_priv_proc_common_zone(state) && 1247 dtrace_priv_proc_common_user(state) && 1248 dtrace_priv_proc_common_nocd()) 1249 return (1); 1250 1251 cpu_core[curcpu].cpuc_dtrace_flags |= CPU_DTRACE_UPRIV; 1252 1253 return (0); 1254} 1255 1256static int 1257dtrace_priv_proc(dtrace_state_t *state) 1258{ 1259 if (state->dts_cred.dcr_action & DTRACE_CRA_PROC) 1260 return (1); 1261 1262 cpu_core[curcpu].cpuc_dtrace_flags |= CPU_DTRACE_UPRIV; 1263 1264 return (0); 1265} 1266 1267static int 1268dtrace_priv_kernel(dtrace_state_t *state) 1269{ 1270 if (state->dts_cred.dcr_action & DTRACE_CRA_KERNEL) 1271 return (1); 1272 1273 cpu_core[curcpu].cpuc_dtrace_flags |= CPU_DTRACE_KPRIV; 1274 1275 return (0); 1276} 1277 1278static int 1279dtrace_priv_kernel_destructive(dtrace_state_t *state) 1280{ 1281 if (state->dts_cred.dcr_action & DTRACE_CRA_KERNEL_DESTRUCTIVE) 1282 return (1); 1283 1284 cpu_core[curcpu].cpuc_dtrace_flags |= CPU_DTRACE_KPRIV; 1285 1286 return (0); 1287} 1288 1289/* 1290 * Note: not called from probe context. This function is called 1291 * asynchronously (and at a regular interval) from outside of probe context to 1292 * clean the dirty dynamic variable lists on all CPUs. Dynamic variable 1293 * cleaning is explained in detail in <sys/dtrace_impl.h>. 1294 */ 1295void 1296dtrace_dynvar_clean(dtrace_dstate_t *dstate) 1297{ 1298 dtrace_dynvar_t *dirty; 1299 dtrace_dstate_percpu_t *dcpu; 1300 int i, work = 0; 1301 1302 for (i = 0; i < NCPU; i++) { 1303 dcpu = &dstate->dtds_percpu[i]; 1304 1305 ASSERT(dcpu->dtdsc_rinsing == NULL); 1306 1307 /* 1308 * If the dirty list is NULL, there is no dirty work to do. 1309 */ 1310 if (dcpu->dtdsc_dirty == NULL) 1311 continue; 1312 1313 /* 1314 * If the clean list is non-NULL, then we're not going to do 1315 * any work for this CPU -- it means that there has not been 1316 * a dtrace_dynvar() allocation on this CPU (or from this CPU) 1317 * since the last time we cleaned house. 1318 */ 1319 if (dcpu->dtdsc_clean != NULL) 1320 continue; 1321 1322 work = 1; 1323 1324 /* 1325 * Atomically move the dirty list aside. 1326 */ 1327 do { 1328 dirty = dcpu->dtdsc_dirty; 1329 1330 /* 1331 * Before we zap the dirty list, set the rinsing list. 1332 * (This allows for a potential assertion in 1333 * dtrace_dynvar(): if a free dynamic variable appears 1334 * on a hash chain, either the dirty list or the 1335 * rinsing list for some CPU must be non-NULL.) 1336 */ 1337 dcpu->dtdsc_rinsing = dirty; 1338 dtrace_membar_producer(); 1339 } while (dtrace_casptr(&dcpu->dtdsc_dirty, 1340 dirty, NULL) != dirty); 1341 } 1342 1343 if (!work) { 1344 /* 1345 * We have no work to do; we can simply return. 1346 */ 1347 return; 1348 } 1349 1350 dtrace_sync(); 1351 1352 for (i = 0; i < NCPU; i++) { 1353 dcpu = &dstate->dtds_percpu[i]; 1354 1355 if (dcpu->dtdsc_rinsing == NULL) 1356 continue; 1357 1358 /* 1359 * We are now guaranteed that no hash chain contains a pointer 1360 * into this dirty list; we can make it clean. 1361 */ 1362 ASSERT(dcpu->dtdsc_clean == NULL); 1363 dcpu->dtdsc_clean = dcpu->dtdsc_rinsing; 1364 dcpu->dtdsc_rinsing = NULL; 1365 } 1366 1367 /* 1368 * Before we actually set the state to be DTRACE_DSTATE_CLEAN, make 1369 * sure that all CPUs have seen all of the dtdsc_clean pointers. 1370 * This prevents a race whereby a CPU incorrectly decides that 1371 * the state should be something other than DTRACE_DSTATE_CLEAN 1372 * after dtrace_dynvar_clean() has completed. 1373 */ 1374 dtrace_sync(); 1375 1376 dstate->dtds_state = DTRACE_DSTATE_CLEAN; 1377} 1378 1379/* 1380 * Depending on the value of the op parameter, this function looks-up, 1381 * allocates or deallocates an arbitrarily-keyed dynamic variable. If an 1382 * allocation is requested, this function will return a pointer to a 1383 * dtrace_dynvar_t corresponding to the allocated variable -- or NULL if no 1384 * variable can be allocated. If NULL is returned, the appropriate counter 1385 * will be incremented. 1386 */ 1387dtrace_dynvar_t * 1388dtrace_dynvar(dtrace_dstate_t *dstate, uint_t nkeys, 1389 dtrace_key_t *key, size_t dsize, dtrace_dynvar_op_t op, 1390 dtrace_mstate_t *mstate, dtrace_vstate_t *vstate) 1391{ 1392 uint64_t hashval = DTRACE_DYNHASH_VALID; 1393 dtrace_dynhash_t *hash = dstate->dtds_hash; 1394 dtrace_dynvar_t *free, *new_free, *next, *dvar, *start, *prev = NULL; 1395 processorid_t me = curcpu, cpu = me; 1396 dtrace_dstate_percpu_t *dcpu = &dstate->dtds_percpu[me]; 1397 size_t bucket, ksize; 1398 size_t chunksize = dstate->dtds_chunksize; 1399 uintptr_t kdata, lock, nstate; 1400 uint_t i; 1401 1402 ASSERT(nkeys != 0); 1403 1404 /* 1405 * Hash the key. As with aggregations, we use Jenkins' "One-at-a-time" 1406 * algorithm. For the by-value portions, we perform the algorithm in 1407 * 16-bit chunks (as opposed to 8-bit chunks). This speeds things up a 1408 * bit, and seems to have only a minute effect on distribution. For 1409 * the by-reference data, we perform "One-at-a-time" iterating (safely) 1410 * over each referenced byte. It's painful to do this, but it's much 1411 * better than pathological hash distribution. The efficacy of the 1412 * hashing algorithm (and a comparison with other algorithms) may be 1413 * found by running the ::dtrace_dynstat MDB dcmd. 1414 */ 1415 for (i = 0; i < nkeys; i++) { 1416 if (key[i].dttk_size == 0) { 1417 uint64_t val = key[i].dttk_value; 1418 1419 hashval += (val >> 48) & 0xffff; 1420 hashval += (hashval << 10); 1421 hashval ^= (hashval >> 6); 1422 1423 hashval += (val >> 32) & 0xffff; 1424 hashval += (hashval << 10); 1425 hashval ^= (hashval >> 6); 1426 1427 hashval += (val >> 16) & 0xffff; 1428 hashval += (hashval << 10); 1429 hashval ^= (hashval >> 6); 1430 1431 hashval += val & 0xffff; 1432 hashval += (hashval << 10); 1433 hashval ^= (hashval >> 6); 1434 } else { 1435 /* 1436 * This is incredibly painful, but it beats the hell 1437 * out of the alternative. 1438 */ 1439 uint64_t j, size = key[i].dttk_size; 1440 uintptr_t base = (uintptr_t)key[i].dttk_value; 1441 1442 if (!dtrace_canload(base, size, mstate, vstate)) 1443 break; 1444 1445 for (j = 0; j < size; j++) { 1446 hashval += dtrace_load8(base + j); 1447 hashval += (hashval << 10); 1448 hashval ^= (hashval >> 6); 1449 } 1450 } 1451 } 1452 1453 if (DTRACE_CPUFLAG_ISSET(CPU_DTRACE_FAULT)) 1454 return (NULL); 1455 1456 hashval += (hashval << 3); 1457 hashval ^= (hashval >> 11); 1458 hashval += (hashval << 15); 1459 1460 /* 1461 * There is a remote chance (ideally, 1 in 2^31) that our hashval 1462 * comes out to be one of our two sentinel hash values. If this 1463 * actually happens, we set the hashval to be a value known to be a 1464 * non-sentinel value. 1465 */ 1466 if (hashval == DTRACE_DYNHASH_FREE || hashval == DTRACE_DYNHASH_SINK) 1467 hashval = DTRACE_DYNHASH_VALID; 1468 1469 /* 1470 * Yes, it's painful to do a divide here. If the cycle count becomes 1471 * important here, tricks can be pulled to reduce it. (However, it's 1472 * critical that hash collisions be kept to an absolute minimum; 1473 * they're much more painful than a divide.) It's better to have a 1474 * solution that generates few collisions and still keeps things 1475 * relatively simple. 1476 */ 1477 bucket = hashval % dstate->dtds_hashsize; 1478 1479 if (op == DTRACE_DYNVAR_DEALLOC) { 1480 volatile uintptr_t *lockp = &hash[bucket].dtdh_lock; 1481 1482 for (;;) { 1483 while ((lock = *lockp) & 1) 1484 continue; 1485 1486 if (dtrace_casptr((volatile void *)lockp, 1487 (volatile void *)lock, (volatile void *)(lock + 1)) == (void *)lock) 1488 break; 1489 } 1490 1491 dtrace_membar_producer(); 1492 } 1493 1494top: 1495 prev = NULL; 1496 lock = hash[bucket].dtdh_lock; 1497 1498 dtrace_membar_consumer(); 1499 1500 start = hash[bucket].dtdh_chain; 1501 ASSERT(start != NULL && (start->dtdv_hashval == DTRACE_DYNHASH_SINK || 1502 start->dtdv_hashval != DTRACE_DYNHASH_FREE || 1503 op != DTRACE_DYNVAR_DEALLOC)); 1504 1505 for (dvar = start; dvar != NULL; dvar = dvar->dtdv_next) { 1506 dtrace_tuple_t *dtuple = &dvar->dtdv_tuple; 1507 dtrace_key_t *dkey = &dtuple->dtt_key[0]; 1508 1509 if (dvar->dtdv_hashval != hashval) { 1510 if (dvar->dtdv_hashval == DTRACE_DYNHASH_SINK) { 1511 /* 1512 * We've reached the sink, and therefore the 1513 * end of the hash chain; we can kick out of 1514 * the loop knowing that we have seen a valid 1515 * snapshot of state. 1516 */ 1517 ASSERT(dvar->dtdv_next == NULL); 1518 ASSERT(dvar == &dtrace_dynhash_sink); 1519 break; 1520 } 1521 1522 if (dvar->dtdv_hashval == DTRACE_DYNHASH_FREE) { 1523 /* 1524 * We've gone off the rails: somewhere along 1525 * the line, one of the members of this hash 1526 * chain was deleted. Note that we could also 1527 * detect this by simply letting this loop run 1528 * to completion, as we would eventually hit 1529 * the end of the dirty list. However, we 1530 * want to avoid running the length of the 1531 * dirty list unnecessarily (it might be quite 1532 * long), so we catch this as early as 1533 * possible by detecting the hash marker. In 1534 * this case, we simply set dvar to NULL and 1535 * break; the conditional after the loop will 1536 * send us back to top. 1537 */ 1538 dvar = NULL; 1539 break; 1540 } 1541 1542 goto next; 1543 } 1544 1545 if (dtuple->dtt_nkeys != nkeys) 1546 goto next; 1547 1548 for (i = 0; i < nkeys; i++, dkey++) { 1549 if (dkey->dttk_size != key[i].dttk_size) 1550 goto next; /* size or type mismatch */ 1551 1552 if (dkey->dttk_size != 0) { 1553 if (dtrace_bcmp( 1554 (void *)(uintptr_t)key[i].dttk_value, 1555 (void *)(uintptr_t)dkey->dttk_value, 1556 dkey->dttk_size)) 1557 goto next; 1558 } else { 1559 if (dkey->dttk_value != key[i].dttk_value) 1560 goto next; 1561 } 1562 } 1563 1564 if (op != DTRACE_DYNVAR_DEALLOC) 1565 return (dvar); 1566 1567 ASSERT(dvar->dtdv_next == NULL || 1568 dvar->dtdv_next->dtdv_hashval != DTRACE_DYNHASH_FREE); 1569 1570 if (prev != NULL) { 1571 ASSERT(hash[bucket].dtdh_chain != dvar); 1572 ASSERT(start != dvar); 1573 ASSERT(prev->dtdv_next == dvar); 1574 prev->dtdv_next = dvar->dtdv_next; 1575 } else { 1576 if (dtrace_casptr(&hash[bucket].dtdh_chain, 1577 start, dvar->dtdv_next) != start) { 1578 /* 1579 * We have failed to atomically swing the 1580 * hash table head pointer, presumably because 1581 * of a conflicting allocation on another CPU. 1582 * We need to reread the hash chain and try 1583 * again. 1584 */ 1585 goto top; 1586 } 1587 } 1588 1589 dtrace_membar_producer(); 1590 1591 /* 1592 * Now set the hash value to indicate that it's free. 1593 */ 1594 ASSERT(hash[bucket].dtdh_chain != dvar); 1595 dvar->dtdv_hashval = DTRACE_DYNHASH_FREE; 1596 1597 dtrace_membar_producer(); 1598 1599 /* 1600 * Set the next pointer to point at the dirty list, and 1601 * atomically swing the dirty pointer to the newly freed dvar. 1602 */ 1603 do { 1604 next = dcpu->dtdsc_dirty; 1605 dvar->dtdv_next = next; 1606 } while (dtrace_casptr(&dcpu->dtdsc_dirty, next, dvar) != next); 1607 1608 /* 1609 * Finally, unlock this hash bucket. 1610 */ 1611 ASSERT(hash[bucket].dtdh_lock == lock); 1612 ASSERT(lock & 1); 1613 hash[bucket].dtdh_lock++; 1614 1615 return (NULL); 1616next: 1617 prev = dvar; 1618 continue; 1619 } 1620 1621 if (dvar == NULL) { 1622 /* 1623 * If dvar is NULL, it is because we went off the rails: 1624 * one of the elements that we traversed in the hash chain 1625 * was deleted while we were traversing it. In this case, 1626 * we assert that we aren't doing a dealloc (deallocs lock 1627 * the hash bucket to prevent themselves from racing with 1628 * one another), and retry the hash chain traversal. 1629 */ 1630 ASSERT(op != DTRACE_DYNVAR_DEALLOC); 1631 goto top; 1632 } 1633 1634 if (op != DTRACE_DYNVAR_ALLOC) { 1635 /* 1636 * If we are not to allocate a new variable, we want to 1637 * return NULL now. Before we return, check that the value 1638 * of the lock word hasn't changed. If it has, we may have 1639 * seen an inconsistent snapshot. 1640 */ 1641 if (op == DTRACE_DYNVAR_NOALLOC) { 1642 if (hash[bucket].dtdh_lock != lock) 1643 goto top; 1644 } else { 1645 ASSERT(op == DTRACE_DYNVAR_DEALLOC); 1646 ASSERT(hash[bucket].dtdh_lock == lock); 1647 ASSERT(lock & 1); 1648 hash[bucket].dtdh_lock++; 1649 } 1650 1651 return (NULL); 1652 } 1653 1654 /* 1655 * We need to allocate a new dynamic variable. The size we need is the 1656 * size of dtrace_dynvar plus the size of nkeys dtrace_key_t's plus the 1657 * size of any auxiliary key data (rounded up to 8-byte alignment) plus 1658 * the size of any referred-to data (dsize). We then round the final 1659 * size up to the chunksize for allocation. 1660 */ 1661 for (ksize = 0, i = 0; i < nkeys; i++) 1662 ksize += P2ROUNDUP(key[i].dttk_size, sizeof (uint64_t)); 1663 1664 /* 1665 * This should be pretty much impossible, but could happen if, say, 1666 * strange DIF specified the tuple. Ideally, this should be an 1667 * assertion and not an error condition -- but that requires that the 1668 * chunksize calculation in dtrace_difo_chunksize() be absolutely 1669 * bullet-proof. (That is, it must not be able to be fooled by 1670 * malicious DIF.) Given the lack of backwards branches in DIF, 1671 * solving this would presumably not amount to solving the Halting 1672 * Problem -- but it still seems awfully hard. 1673 */ 1674 if (sizeof (dtrace_dynvar_t) + sizeof (dtrace_key_t) * (nkeys - 1) + 1675 ksize + dsize > chunksize) { 1676 dcpu->dtdsc_drops++; 1677 return (NULL); 1678 } 1679 1680 nstate = DTRACE_DSTATE_EMPTY; 1681 1682 do { 1683retry: 1684 free = dcpu->dtdsc_free; 1685 1686 if (free == NULL) { 1687 dtrace_dynvar_t *clean = dcpu->dtdsc_clean; 1688 void *rval; 1689 1690 if (clean == NULL) { 1691 /* 1692 * We're out of dynamic variable space on 1693 * this CPU. Unless we have tried all CPUs, 1694 * we'll try to allocate from a different 1695 * CPU. 1696 */ 1697 switch (dstate->dtds_state) { 1698 case DTRACE_DSTATE_CLEAN: { 1699 void *sp = &dstate->dtds_state; 1700 1701 if (++cpu >= NCPU) 1702 cpu = 0; 1703 1704 if (dcpu->dtdsc_dirty != NULL && 1705 nstate == DTRACE_DSTATE_EMPTY) 1706 nstate = DTRACE_DSTATE_DIRTY; 1707 1708 if (dcpu->dtdsc_rinsing != NULL) 1709 nstate = DTRACE_DSTATE_RINSING; 1710 1711 dcpu = &dstate->dtds_percpu[cpu]; 1712 1713 if (cpu != me) 1714 goto retry; 1715 1716 (void) dtrace_cas32(sp, 1717 DTRACE_DSTATE_CLEAN, nstate); 1718 1719 /* 1720 * To increment the correct bean 1721 * counter, take another lap. 1722 */ 1723 goto retry; 1724 } 1725 1726 case DTRACE_DSTATE_DIRTY: 1727 dcpu->dtdsc_dirty_drops++; 1728 break; 1729 1730 case DTRACE_DSTATE_RINSING: 1731 dcpu->dtdsc_rinsing_drops++; 1732 break; 1733 1734 case DTRACE_DSTATE_EMPTY: 1735 dcpu->dtdsc_drops++; 1736 break; 1737 } 1738 1739 DTRACE_CPUFLAG_SET(CPU_DTRACE_DROP); 1740 return (NULL); 1741 } 1742 1743 /* 1744 * The clean list appears to be non-empty. We want to 1745 * move the clean list to the free list; we start by 1746 * moving the clean pointer aside. 1747 */ 1748 if (dtrace_casptr(&dcpu->dtdsc_clean, 1749 clean, NULL) != clean) { 1750 /* 1751 * We are in one of two situations: 1752 * 1753 * (a) The clean list was switched to the 1754 * free list by another CPU. 1755 * 1756 * (b) The clean list was added to by the 1757 * cleansing cyclic. 1758 * 1759 * In either of these situations, we can 1760 * just reattempt the free list allocation. 1761 */ 1762 goto retry; 1763 } 1764 1765 ASSERT(clean->dtdv_hashval == DTRACE_DYNHASH_FREE); 1766 1767 /* 1768 * Now we'll move the clean list to the free list. 1769 * It's impossible for this to fail: the only way 1770 * the free list can be updated is through this 1771 * code path, and only one CPU can own the clean list. 1772 * Thus, it would only be possible for this to fail if 1773 * this code were racing with dtrace_dynvar_clean(). 1774 * (That is, if dtrace_dynvar_clean() updated the clean 1775 * list, and we ended up racing to update the free 1776 * list.) This race is prevented by the dtrace_sync() 1777 * in dtrace_dynvar_clean() -- which flushes the 1778 * owners of the clean lists out before resetting 1779 * the clean lists. 1780 */ 1781 rval = dtrace_casptr(&dcpu->dtdsc_free, NULL, clean); 1782 ASSERT(rval == NULL); 1783 goto retry; 1784 } 1785 1786 dvar = free; 1787 new_free = dvar->dtdv_next; 1788 } while (dtrace_casptr(&dcpu->dtdsc_free, free, new_free) != free); 1789 1790 /* 1791 * We have now allocated a new chunk. We copy the tuple keys into the 1792 * tuple array and copy any referenced key data into the data space 1793 * following the tuple array. As we do this, we relocate dttk_value 1794 * in the final tuple to point to the key data address in the chunk. 1795 */ 1796 kdata = (uintptr_t)&dvar->dtdv_tuple.dtt_key[nkeys]; 1797 dvar->dtdv_data = (void *)(kdata + ksize); 1798 dvar->dtdv_tuple.dtt_nkeys = nkeys; 1799 1800 for (i = 0; i < nkeys; i++) { 1801 dtrace_key_t *dkey = &dvar->dtdv_tuple.dtt_key[i]; 1802 size_t kesize = key[i].dttk_size; 1803 1804 if (kesize != 0) { 1805 dtrace_bcopy( 1806 (const void *)(uintptr_t)key[i].dttk_value, 1807 (void *)kdata, kesize); 1808 dkey->dttk_value = kdata; 1809 kdata += P2ROUNDUP(kesize, sizeof (uint64_t)); 1810 } else { 1811 dkey->dttk_value = key[i].dttk_value; 1812 } 1813 1814 dkey->dttk_size = kesize; 1815 } 1816 1817 ASSERT(dvar->dtdv_hashval == DTRACE_DYNHASH_FREE); 1818 dvar->dtdv_hashval = hashval; 1819 dvar->dtdv_next = start; 1820 1821 if (dtrace_casptr(&hash[bucket].dtdh_chain, start, dvar) == start) 1822 return (dvar); 1823 1824 /* 1825 * The cas has failed. Either another CPU is adding an element to 1826 * this hash chain, or another CPU is deleting an element from this 1827 * hash chain. The simplest way to deal with both of these cases 1828 * (though not necessarily the most efficient) is to free our 1829 * allocated block and tail-call ourselves. Note that the free is 1830 * to the dirty list and _not_ to the free list. This is to prevent 1831 * races with allocators, above. 1832 */ 1833 dvar->dtdv_hashval = DTRACE_DYNHASH_FREE; 1834 1835 dtrace_membar_producer(); 1836 1837 do { 1838 free = dcpu->dtdsc_dirty; 1839 dvar->dtdv_next = free; 1840 } while (dtrace_casptr(&dcpu->dtdsc_dirty, free, dvar) != free); 1841 1842 return (dtrace_dynvar(dstate, nkeys, key, dsize, op, mstate, vstate)); 1843} 1844 1845/*ARGSUSED*/ 1846static void 1847dtrace_aggregate_min(uint64_t *oval, uint64_t nval, uint64_t arg) 1848{ 1849 if ((int64_t)nval < (int64_t)*oval) 1850 *oval = nval; 1851} 1852 1853/*ARGSUSED*/ 1854static void 1855dtrace_aggregate_max(uint64_t *oval, uint64_t nval, uint64_t arg) 1856{ 1857 if ((int64_t)nval > (int64_t)*oval) 1858 *oval = nval; 1859} 1860 1861static void 1862dtrace_aggregate_quantize(uint64_t *quanta, uint64_t nval, uint64_t incr) 1863{ 1864 int i, zero = DTRACE_QUANTIZE_ZEROBUCKET; 1865 int64_t val = (int64_t)nval; 1866 1867 if (val < 0) { 1868 for (i = 0; i < zero; i++) { 1869 if (val <= DTRACE_QUANTIZE_BUCKETVAL(i)) { 1870 quanta[i] += incr; 1871 return; 1872 } 1873 } 1874 } else { 1875 for (i = zero + 1; i < DTRACE_QUANTIZE_NBUCKETS; i++) { 1876 if (val < DTRACE_QUANTIZE_BUCKETVAL(i)) { 1877 quanta[i - 1] += incr; 1878 return; 1879 } 1880 } 1881 1882 quanta[DTRACE_QUANTIZE_NBUCKETS - 1] += incr; 1883 return; 1884 } 1885 1886 ASSERT(0); 1887} 1888 1889static void 1890dtrace_aggregate_lquantize(uint64_t *lquanta, uint64_t nval, uint64_t incr) 1891{ 1892 uint64_t arg = *lquanta++; 1893 int32_t base = DTRACE_LQUANTIZE_BASE(arg); 1894 uint16_t step = DTRACE_LQUANTIZE_STEP(arg); 1895 uint16_t levels = DTRACE_LQUANTIZE_LEVELS(arg); 1896 int32_t val = (int32_t)nval, level; 1897 1898 ASSERT(step != 0); 1899 ASSERT(levels != 0); 1900 1901 if (val < base) { 1902 /* 1903 * This is an underflow. 1904 */ 1905 lquanta[0] += incr; 1906 return; 1907 } 1908 1909 level = (val - base) / step; 1910 1911 if (level < levels) { 1912 lquanta[level + 1] += incr; 1913 return; 1914 } 1915 1916 /* 1917 * This is an overflow. 1918 */ 1919 lquanta[levels + 1] += incr; 1920} 1921 1922static int 1923dtrace_aggregate_llquantize_bucket(uint16_t factor, uint16_t low, 1924 uint16_t high, uint16_t nsteps, int64_t value) 1925{ 1926 int64_t this = 1, last, next; 1927 int base = 1, order; 1928 1929 ASSERT(factor <= nsteps); 1930 ASSERT(nsteps % factor == 0); 1931 1932 for (order = 0; order < low; order++) 1933 this *= factor; 1934 1935 /* 1936 * If our value is less than our factor taken to the power of the 1937 * low order of magnitude, it goes into the zeroth bucket. 1938 */ 1939 if (value < (last = this)) 1940 return (0); 1941 1942 for (this *= factor; order <= high; order++) { 1943 int nbuckets = this > nsteps ? nsteps : this; 1944 1945 if ((next = this * factor) < this) { 1946 /* 1947 * We should not generally get log/linear quantizations 1948 * with a high magnitude that allows 64-bits to 1949 * overflow, but we nonetheless protect against this 1950 * by explicitly checking for overflow, and clamping 1951 * our value accordingly. 1952 */ 1953 value = this - 1; 1954 } 1955 1956 if (value < this) { 1957 /* 1958 * If our value lies within this order of magnitude, 1959 * determine its position by taking the offset within 1960 * the order of magnitude, dividing by the bucket 1961 * width, and adding to our (accumulated) base. 1962 */ 1963 return (base + (value - last) / (this / nbuckets)); 1964 } 1965 1966 base += nbuckets - (nbuckets / factor); 1967 last = this; 1968 this = next; 1969 } 1970 1971 /* 1972 * Our value is greater than or equal to our factor taken to the 1973 * power of one plus the high magnitude -- return the top bucket. 1974 */ 1975 return (base); 1976} 1977 1978static void 1979dtrace_aggregate_llquantize(uint64_t *llquanta, uint64_t nval, uint64_t incr) 1980{ 1981 uint64_t arg = *llquanta++; 1982 uint16_t factor = DTRACE_LLQUANTIZE_FACTOR(arg); 1983 uint16_t low = DTRACE_LLQUANTIZE_LOW(arg); 1984 uint16_t high = DTRACE_LLQUANTIZE_HIGH(arg); 1985 uint16_t nsteps = DTRACE_LLQUANTIZE_NSTEP(arg); 1986 1987 llquanta[dtrace_aggregate_llquantize_bucket(factor, 1988 low, high, nsteps, nval)] += incr; 1989} 1990 1991/*ARGSUSED*/ 1992static void 1993dtrace_aggregate_avg(uint64_t *data, uint64_t nval, uint64_t arg) 1994{ 1995 data[0]++; 1996 data[1] += nval; 1997} 1998 1999/*ARGSUSED*/ 2000static void 2001dtrace_aggregate_stddev(uint64_t *data, uint64_t nval, uint64_t arg) 2002{ 2003 int64_t snval = (int64_t)nval; 2004 uint64_t tmp[2]; 2005 2006 data[0]++; 2007 data[1] += nval; 2008 2009 /* 2010 * What we want to say here is: 2011 * 2012 * data[2] += nval * nval; 2013 * 2014 * But given that nval is 64-bit, we could easily overflow, so 2015 * we do this as 128-bit arithmetic. 2016 */ 2017 if (snval < 0) 2018 snval = -snval; 2019 2020 dtrace_multiply_128((uint64_t)snval, (uint64_t)snval, tmp); 2021 dtrace_add_128(data + 2, tmp, data + 2); 2022} 2023 2024/*ARGSUSED*/ 2025static void 2026dtrace_aggregate_count(uint64_t *oval, uint64_t nval, uint64_t arg) 2027{ 2028 *oval = *oval + 1; 2029} 2030 2031/*ARGSUSED*/ 2032static void 2033dtrace_aggregate_sum(uint64_t *oval, uint64_t nval, uint64_t arg) 2034{ 2035 *oval += nval; 2036} 2037 2038/* 2039 * Aggregate given the tuple in the principal data buffer, and the aggregating 2040 * action denoted by the specified dtrace_aggregation_t. The aggregation 2041 * buffer is specified as the buf parameter. This routine does not return 2042 * failure; if there is no space in the aggregation buffer, the data will be 2043 * dropped, and a corresponding counter incremented. 2044 */ 2045static void 2046dtrace_aggregate(dtrace_aggregation_t *agg, dtrace_buffer_t *dbuf, 2047 intptr_t offset, dtrace_buffer_t *buf, uint64_t expr, uint64_t arg) 2048{ 2049 dtrace_recdesc_t *rec = &agg->dtag_action.dta_rec; 2050 uint32_t i, ndx, size, fsize; 2051 uint32_t align = sizeof (uint64_t) - 1; 2052 dtrace_aggbuffer_t *agb; 2053 dtrace_aggkey_t *key; 2054 uint32_t hashval = 0, limit, isstr; 2055 caddr_t tomax, data, kdata; 2056 dtrace_actkind_t action; 2057 dtrace_action_t *act; 2058 uintptr_t offs; 2059 2060 if (buf == NULL) 2061 return; 2062 2063 if (!agg->dtag_hasarg) { 2064 /* 2065 * Currently, only quantize() and lquantize() take additional 2066 * arguments, and they have the same semantics: an increment 2067 * value that defaults to 1 when not present. If additional 2068 * aggregating actions take arguments, the setting of the 2069 * default argument value will presumably have to become more 2070 * sophisticated... 2071 */ 2072 arg = 1; 2073 } 2074 2075 action = agg->dtag_action.dta_kind - DTRACEACT_AGGREGATION; 2076 size = rec->dtrd_offset - agg->dtag_base; 2077 fsize = size + rec->dtrd_size; 2078 2079 ASSERT(dbuf->dtb_tomax != NULL); 2080 data = dbuf->dtb_tomax + offset + agg->dtag_base; 2081 2082 if ((tomax = buf->dtb_tomax) == NULL) { 2083 dtrace_buffer_drop(buf); 2084 return; 2085 } 2086 2087 /* 2088 * The metastructure is always at the bottom of the buffer. 2089 */ 2090 agb = (dtrace_aggbuffer_t *)(tomax + buf->dtb_size - 2091 sizeof (dtrace_aggbuffer_t)); 2092 2093 if (buf->dtb_offset == 0) { 2094 /* 2095 * We just kludge up approximately 1/8th of the size to be 2096 * buckets. If this guess ends up being routinely 2097 * off-the-mark, we may need to dynamically readjust this 2098 * based on past performance. 2099 */ 2100 uintptr_t hashsize = (buf->dtb_size >> 3) / sizeof (uintptr_t); 2101 2102 if ((uintptr_t)agb - hashsize * sizeof (dtrace_aggkey_t *) < 2103 (uintptr_t)tomax || hashsize == 0) { 2104 /* 2105 * We've been given a ludicrously small buffer; 2106 * increment our drop count and leave. 2107 */ 2108 dtrace_buffer_drop(buf); 2109 return; 2110 } 2111 2112 /* 2113 * And now, a pathetic attempt to try to get a an odd (or 2114 * perchance, a prime) hash size for better hash distribution. 2115 */ 2116 if (hashsize > (DTRACE_AGGHASHSIZE_SLEW << 3)) 2117 hashsize -= DTRACE_AGGHASHSIZE_SLEW; 2118 2119 agb->dtagb_hashsize = hashsize; 2120 agb->dtagb_hash = (dtrace_aggkey_t **)((uintptr_t)agb - 2121 agb->dtagb_hashsize * sizeof (dtrace_aggkey_t *)); 2122 agb->dtagb_free = (uintptr_t)agb->dtagb_hash; 2123 2124 for (i = 0; i < agb->dtagb_hashsize; i++) 2125 agb->dtagb_hash[i] = NULL; 2126 } 2127 2128 ASSERT(agg->dtag_first != NULL); 2129 ASSERT(agg->dtag_first->dta_intuple); 2130 2131 /* 2132 * Calculate the hash value based on the key. Note that we _don't_ 2133 * include the aggid in the hashing (but we will store it as part of 2134 * the key). The hashing algorithm is Bob Jenkins' "One-at-a-time" 2135 * algorithm: a simple, quick algorithm that has no known funnels, and 2136 * gets good distribution in practice. The efficacy of the hashing 2137 * algorithm (and a comparison with other algorithms) may be found by 2138 * running the ::dtrace_aggstat MDB dcmd. 2139 */ 2140 for (act = agg->dtag_first; act->dta_intuple; act = act->dta_next) { 2141 i = act->dta_rec.dtrd_offset - agg->dtag_base; 2142 limit = i + act->dta_rec.dtrd_size; 2143 ASSERT(limit <= size); 2144 isstr = DTRACEACT_ISSTRING(act); 2145 2146 for (; i < limit; i++) { 2147 hashval += data[i]; 2148 hashval += (hashval << 10); 2149 hashval ^= (hashval >> 6); 2150 2151 if (isstr && data[i] == '\0') 2152 break; 2153 } 2154 } 2155 2156 hashval += (hashval << 3); 2157 hashval ^= (hashval >> 11); 2158 hashval += (hashval << 15); 2159 2160 /* 2161 * Yes, the divide here is expensive -- but it's generally the least 2162 * of the performance issues given the amount of data that we iterate 2163 * over to compute hash values, compare data, etc. 2164 */ 2165 ndx = hashval % agb->dtagb_hashsize; 2166 2167 for (key = agb->dtagb_hash[ndx]; key != NULL; key = key->dtak_next) { 2168 ASSERT((caddr_t)key >= tomax); 2169 ASSERT((caddr_t)key < tomax + buf->dtb_size); 2170 2171 if (hashval != key->dtak_hashval || key->dtak_size != size) 2172 continue; 2173 2174 kdata = key->dtak_data; 2175 ASSERT(kdata >= tomax && kdata < tomax + buf->dtb_size); 2176 2177 for (act = agg->dtag_first; act->dta_intuple; 2178 act = act->dta_next) { 2179 i = act->dta_rec.dtrd_offset - agg->dtag_base; 2180 limit = i + act->dta_rec.dtrd_size; 2181 ASSERT(limit <= size); 2182 isstr = DTRACEACT_ISSTRING(act); 2183 2184 for (; i < limit; i++) { 2185 if (kdata[i] != data[i]) 2186 goto next; 2187 2188 if (isstr && data[i] == '\0') 2189 break; 2190 } 2191 } 2192 2193 if (action != key->dtak_action) { 2194 /* 2195 * We are aggregating on the same value in the same 2196 * aggregation with two different aggregating actions. 2197 * (This should have been picked up in the compiler, 2198 * so we may be dealing with errant or devious DIF.) 2199 * This is an error condition; we indicate as much, 2200 * and return. 2201 */ 2202 DTRACE_CPUFLAG_SET(CPU_DTRACE_ILLOP); 2203 return; 2204 } 2205 2206 /* 2207 * This is a hit: we need to apply the aggregator to 2208 * the value at this key. 2209 */ 2210 agg->dtag_aggregate((uint64_t *)(kdata + size), expr, arg); 2211 return; 2212next: 2213 continue; 2214 } 2215 2216 /* 2217 * We didn't find it. We need to allocate some zero-filled space, 2218 * link it into the hash table appropriately, and apply the aggregator 2219 * to the (zero-filled) value. 2220 */ 2221 offs = buf->dtb_offset; 2222 while (offs & (align - 1)) 2223 offs += sizeof (uint32_t); 2224 2225 /* 2226 * If we don't have enough room to both allocate a new key _and_ 2227 * its associated data, increment the drop count and return. 2228 */ 2229 if ((uintptr_t)tomax + offs + fsize > 2230 agb->dtagb_free - sizeof (dtrace_aggkey_t)) { 2231 dtrace_buffer_drop(buf); 2232 return; 2233 } 2234 2235 /*CONSTCOND*/ 2236 ASSERT(!(sizeof (dtrace_aggkey_t) & (sizeof (uintptr_t) - 1))); 2237 key = (dtrace_aggkey_t *)(agb->dtagb_free - sizeof (dtrace_aggkey_t)); 2238 agb->dtagb_free -= sizeof (dtrace_aggkey_t); 2239 2240 key->dtak_data = kdata = tomax + offs; 2241 buf->dtb_offset = offs + fsize; 2242 2243 /* 2244 * Now copy the data across. 2245 */ 2246 *((dtrace_aggid_t *)kdata) = agg->dtag_id; 2247 2248 for (i = sizeof (dtrace_aggid_t); i < size; i++) 2249 kdata[i] = data[i]; 2250 2251 /* 2252 * Because strings are not zeroed out by default, we need to iterate 2253 * looking for actions that store strings, and we need to explicitly 2254 * pad these strings out with zeroes. 2255 */ 2256 for (act = agg->dtag_first; act->dta_intuple; act = act->dta_next) { 2257 int nul; 2258 2259 if (!DTRACEACT_ISSTRING(act)) 2260 continue; 2261 2262 i = act->dta_rec.dtrd_offset - agg->dtag_base; 2263 limit = i + act->dta_rec.dtrd_size; 2264 ASSERT(limit <= size); 2265 2266 for (nul = 0; i < limit; i++) { 2267 if (nul) { 2268 kdata[i] = '\0'; 2269 continue; 2270 } 2271 2272 if (data[i] != '\0') 2273 continue; 2274 2275 nul = 1; 2276 } 2277 } 2278 2279 for (i = size; i < fsize; i++) 2280 kdata[i] = 0; 2281 2282 key->dtak_hashval = hashval; 2283 key->dtak_size = size; 2284 key->dtak_action = action; 2285 key->dtak_next = agb->dtagb_hash[ndx]; 2286 agb->dtagb_hash[ndx] = key; 2287 2288 /* 2289 * Finally, apply the aggregator. 2290 */ 2291 *((uint64_t *)(key->dtak_data + size)) = agg->dtag_initial; 2292 agg->dtag_aggregate((uint64_t *)(key->dtak_data + size), expr, arg); 2293} 2294 2295/* 2296 * Given consumer state, this routine finds a speculation in the INACTIVE 2297 * state and transitions it into the ACTIVE state. If there is no speculation 2298 * in the INACTIVE state, 0 is returned. In this case, no error counter is 2299 * incremented -- it is up to the caller to take appropriate action. 2300 */ 2301static int 2302dtrace_speculation(dtrace_state_t *state) 2303{ 2304 int i = 0; 2305 dtrace_speculation_state_t current; 2306 uint32_t *stat = &state->dts_speculations_unavail, count; 2307 2308 while (i < state->dts_nspeculations) { 2309 dtrace_speculation_t *spec = &state->dts_speculations[i]; 2310 2311 current = spec->dtsp_state; 2312 2313 if (current != DTRACESPEC_INACTIVE) { 2314 if (current == DTRACESPEC_COMMITTINGMANY || 2315 current == DTRACESPEC_COMMITTING || 2316 current == DTRACESPEC_DISCARDING) 2317 stat = &state->dts_speculations_busy; 2318 i++; 2319 continue; 2320 } 2321 2322 if (dtrace_cas32((uint32_t *)&spec->dtsp_state, 2323 current, DTRACESPEC_ACTIVE) == current) 2324 return (i + 1); 2325 } 2326 2327 /* 2328 * We couldn't find a speculation. If we found as much as a single 2329 * busy speculation buffer, we'll attribute this failure as "busy" 2330 * instead of "unavail". 2331 */ 2332 do { 2333 count = *stat; 2334 } while (dtrace_cas32(stat, count, count + 1) != count); 2335 2336 return (0); 2337} 2338 2339/* 2340 * This routine commits an active speculation. If the specified speculation 2341 * is not in a valid state to perform a commit(), this routine will silently do 2342 * nothing. The state of the specified speculation is transitioned according 2343 * to the state transition diagram outlined in <sys/dtrace_impl.h> 2344 */ 2345static void 2346dtrace_speculation_commit(dtrace_state_t *state, processorid_t cpu, 2347 dtrace_specid_t which) 2348{ 2349 dtrace_speculation_t *spec; 2350 dtrace_buffer_t *src, *dest; 2351 uintptr_t daddr, saddr, dlimit, slimit; 2352 dtrace_speculation_state_t current, new = 0; 2353 intptr_t offs; 2354 uint64_t timestamp; 2355 2356 if (which == 0) 2357 return; 2358 2359 if (which > state->dts_nspeculations) { 2360 cpu_core[cpu].cpuc_dtrace_flags |= CPU_DTRACE_ILLOP; 2361 return; 2362 } 2363 2364 spec = &state->dts_speculations[which - 1]; 2365 src = &spec->dtsp_buffer[cpu]; 2366 dest = &state->dts_buffer[cpu]; 2367 2368 do { 2369 current = spec->dtsp_state; 2370 2371 if (current == DTRACESPEC_COMMITTINGMANY) 2372 break; 2373 2374 switch (current) { 2375 case DTRACESPEC_INACTIVE: 2376 case DTRACESPEC_DISCARDING: 2377 return; 2378 2379 case DTRACESPEC_COMMITTING: 2380 /* 2381 * This is only possible if we are (a) commit()'ing 2382 * without having done a prior speculate() on this CPU 2383 * and (b) racing with another commit() on a different 2384 * CPU. There's nothing to do -- we just assert that 2385 * our offset is 0. 2386 */ 2387 ASSERT(src->dtb_offset == 0); 2388 return; 2389 2390 case DTRACESPEC_ACTIVE: 2391 new = DTRACESPEC_COMMITTING; 2392 break; 2393 2394 case DTRACESPEC_ACTIVEONE: 2395 /* 2396 * This speculation is active on one CPU. If our 2397 * buffer offset is non-zero, we know that the one CPU 2398 * must be us. Otherwise, we are committing on a 2399 * different CPU from the speculate(), and we must 2400 * rely on being asynchronously cleaned. 2401 */ 2402 if (src->dtb_offset != 0) { 2403 new = DTRACESPEC_COMMITTING; 2404 break; 2405 } 2406 /*FALLTHROUGH*/ 2407 2408 case DTRACESPEC_ACTIVEMANY: 2409 new = DTRACESPEC_COMMITTINGMANY; 2410 break; 2411 2412 default: 2413 ASSERT(0); 2414 } 2415 } while (dtrace_cas32((uint32_t *)&spec->dtsp_state, 2416 current, new) != current); 2417 2418 /* 2419 * We have set the state to indicate that we are committing this 2420 * speculation. Now reserve the necessary space in the destination 2421 * buffer. 2422 */ 2423 if ((offs = dtrace_buffer_reserve(dest, src->dtb_offset, 2424 sizeof (uint64_t), state, NULL)) < 0) { 2425 dtrace_buffer_drop(dest); 2426 goto out; 2427 } 2428 2429 /* 2430 * We have sufficient space to copy the speculative buffer into the 2431 * primary buffer. First, modify the speculative buffer, filling 2432 * in the timestamp of all entries with the current time. The data 2433 * must have the commit() time rather than the time it was traced, 2434 * so that all entries in the primary buffer are in timestamp order. 2435 */ 2436 timestamp = dtrace_gethrtime(); 2437 saddr = (uintptr_t)src->dtb_tomax; 2438 slimit = saddr + src->dtb_offset; 2439 while (saddr < slimit) { 2440 size_t size; 2441 dtrace_rechdr_t *dtrh = (dtrace_rechdr_t *)saddr; 2442 2443 if (dtrh->dtrh_epid == DTRACE_EPIDNONE) { 2444 saddr += sizeof (dtrace_epid_t); 2445 continue; 2446 } 2447 ASSERT3U(dtrh->dtrh_epid, <=, state->dts_necbs); 2448 size = state->dts_ecbs[dtrh->dtrh_epid - 1]->dte_size; 2449 2450 ASSERT3U(saddr + size, <=, slimit); 2451 ASSERT3U(size, >=, sizeof (dtrace_rechdr_t)); 2452 ASSERT3U(DTRACE_RECORD_LOAD_TIMESTAMP(dtrh), ==, UINT64_MAX); 2453 2454 DTRACE_RECORD_STORE_TIMESTAMP(dtrh, timestamp); 2455 2456 saddr += size; 2457 } 2458 2459 /* 2460 * Copy the buffer across. (Note that this is a 2461 * highly subobtimal bcopy(); in the unlikely event that this becomes 2462 * a serious performance issue, a high-performance DTrace-specific 2463 * bcopy() should obviously be invented.) 2464 */ 2465 daddr = (uintptr_t)dest->dtb_tomax + offs; 2466 dlimit = daddr + src->dtb_offset; 2467 saddr = (uintptr_t)src->dtb_tomax; 2468 2469 /* 2470 * First, the aligned portion. 2471 */ 2472 while (dlimit - daddr >= sizeof (uint64_t)) { 2473 *((uint64_t *)daddr) = *((uint64_t *)saddr); 2474 2475 daddr += sizeof (uint64_t); 2476 saddr += sizeof (uint64_t); 2477 } 2478 2479 /* 2480 * Now any left-over bit... 2481 */ 2482 while (dlimit - daddr) 2483 *((uint8_t *)daddr++) = *((uint8_t *)saddr++); 2484 2485 /* 2486 * Finally, commit the reserved space in the destination buffer. 2487 */ 2488 dest->dtb_offset = offs + src->dtb_offset; 2489 2490out: 2491 /* 2492 * If we're lucky enough to be the only active CPU on this speculation 2493 * buffer, we can just set the state back to DTRACESPEC_INACTIVE. 2494 */ 2495 if (current == DTRACESPEC_ACTIVE || 2496 (current == DTRACESPEC_ACTIVEONE && new == DTRACESPEC_COMMITTING)) { 2497 uint32_t rval = dtrace_cas32((uint32_t *)&spec->dtsp_state, 2498 DTRACESPEC_COMMITTING, DTRACESPEC_INACTIVE); 2499 2500 ASSERT(rval == DTRACESPEC_COMMITTING); 2501 } 2502 2503 src->dtb_offset = 0; 2504 src->dtb_xamot_drops += src->dtb_drops; 2505 src->dtb_drops = 0; 2506} 2507 2508/* 2509 * This routine discards an active speculation. If the specified speculation 2510 * is not in a valid state to perform a discard(), this routine will silently 2511 * do nothing. The state of the specified speculation is transitioned 2512 * according to the state transition diagram outlined in <sys/dtrace_impl.h> 2513 */ 2514static void 2515dtrace_speculation_discard(dtrace_state_t *state, processorid_t cpu, 2516 dtrace_specid_t which) 2517{ 2518 dtrace_speculation_t *spec; 2519 dtrace_speculation_state_t current, new = 0; 2520 dtrace_buffer_t *buf; 2521 2522 if (which == 0) 2523 return; 2524 2525 if (which > state->dts_nspeculations) { 2526 cpu_core[cpu].cpuc_dtrace_flags |= CPU_DTRACE_ILLOP; 2527 return; 2528 } 2529 2530 spec = &state->dts_speculations[which - 1]; 2531 buf = &spec->dtsp_buffer[cpu]; 2532 2533 do { 2534 current = spec->dtsp_state; 2535 2536 switch (current) { 2537 case DTRACESPEC_INACTIVE: 2538 case DTRACESPEC_COMMITTINGMANY: 2539 case DTRACESPEC_COMMITTING: 2540 case DTRACESPEC_DISCARDING: 2541 return; 2542 2543 case DTRACESPEC_ACTIVE: 2544 case DTRACESPEC_ACTIVEMANY: 2545 new = DTRACESPEC_DISCARDING; 2546 break; 2547 2548 case DTRACESPEC_ACTIVEONE: 2549 if (buf->dtb_offset != 0) { 2550 new = DTRACESPEC_INACTIVE; 2551 } else { 2552 new = DTRACESPEC_DISCARDING; 2553 } 2554 break; 2555 2556 default: 2557 ASSERT(0); 2558 } 2559 } while (dtrace_cas32((uint32_t *)&spec->dtsp_state, 2560 current, new) != current); 2561 2562 buf->dtb_offset = 0; 2563 buf->dtb_drops = 0; 2564} 2565 2566/* 2567 * Note: not called from probe context. This function is called 2568 * asynchronously from cross call context to clean any speculations that are 2569 * in the COMMITTINGMANY or DISCARDING states. These speculations may not be 2570 * transitioned back to the INACTIVE state until all CPUs have cleaned the 2571 * speculation. 2572 */ 2573static void 2574dtrace_speculation_clean_here(dtrace_state_t *state) 2575{ 2576 dtrace_icookie_t cookie; 2577 processorid_t cpu = curcpu; 2578 dtrace_buffer_t *dest = &state->dts_buffer[cpu]; 2579 dtrace_specid_t i; 2580 2581 cookie = dtrace_interrupt_disable(); 2582 2583 if (dest->dtb_tomax == NULL) { 2584 dtrace_interrupt_enable(cookie); 2585 return; 2586 } 2587 2588 for (i = 0; i < state->dts_nspeculations; i++) { 2589 dtrace_speculation_t *spec = &state->dts_speculations[i]; 2590 dtrace_buffer_t *src = &spec->dtsp_buffer[cpu]; 2591 2592 if (src->dtb_tomax == NULL) 2593 continue; 2594 2595 if (spec->dtsp_state == DTRACESPEC_DISCARDING) { 2596 src->dtb_offset = 0; 2597 continue; 2598 } 2599 2600 if (spec->dtsp_state != DTRACESPEC_COMMITTINGMANY) 2601 continue; 2602 2603 if (src->dtb_offset == 0) 2604 continue; 2605 2606 dtrace_speculation_commit(state, cpu, i + 1); 2607 } 2608 2609 dtrace_interrupt_enable(cookie); 2610} 2611 2612/* 2613 * Note: not called from probe context. This function is called 2614 * asynchronously (and at a regular interval) to clean any speculations that 2615 * are in the COMMITTINGMANY or DISCARDING states. If it discovers that there 2616 * is work to be done, it cross calls all CPUs to perform that work; 2617 * COMMITMANY and DISCARDING speculations may not be transitioned back to the 2618 * INACTIVE state until they have been cleaned by all CPUs. 2619 */ 2620static void 2621dtrace_speculation_clean(dtrace_state_t *state) 2622{ 2623 int work = 0, rv; 2624 dtrace_specid_t i; 2625 2626 for (i = 0; i < state->dts_nspeculations; i++) { 2627 dtrace_speculation_t *spec = &state->dts_speculations[i]; 2628 2629 ASSERT(!spec->dtsp_cleaning); 2630 2631 if (spec->dtsp_state != DTRACESPEC_DISCARDING && 2632 spec->dtsp_state != DTRACESPEC_COMMITTINGMANY) 2633 continue; 2634 2635 work++; 2636 spec->dtsp_cleaning = 1; 2637 } 2638 2639 if (!work) 2640 return; 2641 2642 dtrace_xcall(DTRACE_CPUALL, 2643 (dtrace_xcall_t)dtrace_speculation_clean_here, state); 2644 2645 /* 2646 * We now know that all CPUs have committed or discarded their 2647 * speculation buffers, as appropriate. We can now set the state 2648 * to inactive. 2649 */ 2650 for (i = 0; i < state->dts_nspeculations; i++) { 2651 dtrace_speculation_t *spec = &state->dts_speculations[i]; 2652 dtrace_speculation_state_t current, new; 2653 2654 if (!spec->dtsp_cleaning) 2655 continue; 2656 2657 current = spec->dtsp_state; 2658 ASSERT(current == DTRACESPEC_DISCARDING || 2659 current == DTRACESPEC_COMMITTINGMANY); 2660 2661 new = DTRACESPEC_INACTIVE; 2662 2663 rv = dtrace_cas32((uint32_t *)&spec->dtsp_state, current, new); 2664 ASSERT(rv == current); 2665 spec->dtsp_cleaning = 0; 2666 } 2667} 2668 2669/* 2670 * Called as part of a speculate() to get the speculative buffer associated 2671 * with a given speculation. Returns NULL if the specified speculation is not 2672 * in an ACTIVE state. If the speculation is in the ACTIVEONE state -- and 2673 * the active CPU is not the specified CPU -- the speculation will be 2674 * atomically transitioned into the ACTIVEMANY state. 2675 */ 2676static dtrace_buffer_t * 2677dtrace_speculation_buffer(dtrace_state_t *state, processorid_t cpuid, 2678 dtrace_specid_t which) 2679{ 2680 dtrace_speculation_t *spec; 2681 dtrace_speculation_state_t current, new = 0; 2682 dtrace_buffer_t *buf; 2683 2684 if (which == 0) 2685 return (NULL); 2686 2687 if (which > state->dts_nspeculations) { 2688 cpu_core[cpuid].cpuc_dtrace_flags |= CPU_DTRACE_ILLOP; 2689 return (NULL); 2690 } 2691 2692 spec = &state->dts_speculations[which - 1]; 2693 buf = &spec->dtsp_buffer[cpuid]; 2694 2695 do { 2696 current = spec->dtsp_state; 2697 2698 switch (current) { 2699 case DTRACESPEC_INACTIVE: 2700 case DTRACESPEC_COMMITTINGMANY: 2701 case DTRACESPEC_DISCARDING: 2702 return (NULL); 2703 2704 case DTRACESPEC_COMMITTING: 2705 ASSERT(buf->dtb_offset == 0); 2706 return (NULL); 2707 2708 case DTRACESPEC_ACTIVEONE: 2709 /* 2710 * This speculation is currently active on one CPU. 2711 * Check the offset in the buffer; if it's non-zero, 2712 * that CPU must be us (and we leave the state alone). 2713 * If it's zero, assume that we're starting on a new 2714 * CPU -- and change the state to indicate that the 2715 * speculation is active on more than one CPU. 2716 */ 2717 if (buf->dtb_offset != 0) 2718 return (buf); 2719 2720 new = DTRACESPEC_ACTIVEMANY; 2721 break; 2722 2723 case DTRACESPEC_ACTIVEMANY: 2724 return (buf); 2725 2726 case DTRACESPEC_ACTIVE: 2727 new = DTRACESPEC_ACTIVEONE; 2728 break; 2729 2730 default: 2731 ASSERT(0); 2732 } 2733 } while (dtrace_cas32((uint32_t *)&spec->dtsp_state, 2734 current, new) != current); 2735 2736 ASSERT(new == DTRACESPEC_ACTIVEONE || new == DTRACESPEC_ACTIVEMANY); 2737 return (buf); 2738} 2739 2740/* 2741 * Return a string. In the event that the user lacks the privilege to access 2742 * arbitrary kernel memory, we copy the string out to scratch memory so that we 2743 * don't fail access checking. 2744 * 2745 * dtrace_dif_variable() uses this routine as a helper for various 2746 * builtin values such as 'execname' and 'probefunc.' 2747 */ 2748uintptr_t 2749dtrace_dif_varstr(uintptr_t addr, dtrace_state_t *state, 2750 dtrace_mstate_t *mstate) 2751{ 2752 uint64_t size = state->dts_options[DTRACEOPT_STRSIZE]; 2753 uintptr_t ret; 2754 size_t strsz; 2755 2756 /* 2757 * The easy case: this probe is allowed to read all of memory, so 2758 * we can just return this as a vanilla pointer. 2759 */ 2760 if ((mstate->dtms_access & DTRACE_ACCESS_KERNEL) != 0) 2761 return (addr); 2762 2763 /* 2764 * This is the tougher case: we copy the string in question from 2765 * kernel memory into scratch memory and return it that way: this 2766 * ensures that we won't trip up when access checking tests the 2767 * BYREF return value. 2768 */ 2769 strsz = dtrace_strlen((char *)addr, size) + 1; 2770 2771 if (mstate->dtms_scratch_ptr + strsz > 2772 mstate->dtms_scratch_base + mstate->dtms_scratch_size) { 2773 DTRACE_CPUFLAG_SET(CPU_DTRACE_NOSCRATCH); 2774 return (0); 2775 } 2776 2777 dtrace_strcpy((const void *)addr, (void *)mstate->dtms_scratch_ptr, 2778 strsz); 2779 ret = mstate->dtms_scratch_ptr; 2780 mstate->dtms_scratch_ptr += strsz; 2781 return (ret); 2782} 2783 2784/* 2785 * Return a string from a memoy address which is known to have one or 2786 * more concatenated, individually zero terminated, sub-strings. 2787 * In the event that the user lacks the privilege to access 2788 * arbitrary kernel memory, we copy the string out to scratch memory so that we 2789 * don't fail access checking. 2790 * 2791 * dtrace_dif_variable() uses this routine as a helper for various 2792 * builtin values such as 'execargs'. 2793 */ 2794static uintptr_t 2795dtrace_dif_varstrz(uintptr_t addr, size_t strsz, dtrace_state_t *state, 2796 dtrace_mstate_t *mstate) 2797{ 2798 char *p; 2799 size_t i; 2800 uintptr_t ret; 2801 2802 if (mstate->dtms_scratch_ptr + strsz > 2803 mstate->dtms_scratch_base + mstate->dtms_scratch_size) { 2804 DTRACE_CPUFLAG_SET(CPU_DTRACE_NOSCRATCH); 2805 return (0); 2806 } 2807 2808 dtrace_bcopy((const void *)addr, (void *)mstate->dtms_scratch_ptr, 2809 strsz); 2810 2811 /* Replace sub-string termination characters with a space. */ 2812 for (p = (char *) mstate->dtms_scratch_ptr, i = 0; i < strsz - 1; 2813 p++, i++) 2814 if (*p == '\0') 2815 *p = ' '; 2816 2817 ret = mstate->dtms_scratch_ptr; 2818 mstate->dtms_scratch_ptr += strsz; 2819 return (ret); 2820} 2821 2822/* 2823 * This function implements the DIF emulator's variable lookups. The emulator 2824 * passes a reserved variable identifier and optional built-in array index. 2825 */ 2826static uint64_t 2827dtrace_dif_variable(dtrace_mstate_t *mstate, dtrace_state_t *state, uint64_t v, 2828 uint64_t ndx) 2829{ 2830 /* 2831 * If we're accessing one of the uncached arguments, we'll turn this 2832 * into a reference in the args array. 2833 */ 2834 if (v >= DIF_VAR_ARG0 && v <= DIF_VAR_ARG9) { 2835 ndx = v - DIF_VAR_ARG0; 2836 v = DIF_VAR_ARGS; 2837 } 2838 2839 switch (v) { 2840 case DIF_VAR_ARGS: 2841 ASSERT(mstate->dtms_present & DTRACE_MSTATE_ARGS); 2842 if (ndx >= sizeof (mstate->dtms_arg) / 2843 sizeof (mstate->dtms_arg[0])) { 2844 int aframes = mstate->dtms_probe->dtpr_aframes + 2; 2845 dtrace_provider_t *pv; 2846 uint64_t val; 2847 2848 pv = mstate->dtms_probe->dtpr_provider; 2849 if (pv->dtpv_pops.dtps_getargval != NULL) 2850 val = pv->dtpv_pops.dtps_getargval(pv->dtpv_arg, 2851 mstate->dtms_probe->dtpr_id, 2852 mstate->dtms_probe->dtpr_arg, ndx, aframes); 2853 else 2854 val = dtrace_getarg(ndx, aframes); 2855 2856 /* 2857 * This is regrettably required to keep the compiler 2858 * from tail-optimizing the call to dtrace_getarg(). 2859 * The condition always evaluates to true, but the 2860 * compiler has no way of figuring that out a priori. 2861 * (None of this would be necessary if the compiler 2862 * could be relied upon to _always_ tail-optimize 2863 * the call to dtrace_getarg() -- but it can't.) 2864 */ 2865 if (mstate->dtms_probe != NULL) 2866 return (val); 2867 2868 ASSERT(0); 2869 } 2870 2871 return (mstate->dtms_arg[ndx]); 2872 2873#if defined(sun) 2874 case DIF_VAR_UREGS: { 2875 klwp_t *lwp; 2876 2877 if (!dtrace_priv_proc(state)) 2878 return (0); 2879 2880 if ((lwp = curthread->t_lwp) == NULL) { 2881 DTRACE_CPUFLAG_SET(CPU_DTRACE_BADADDR); 2882 cpu_core[curcpu].cpuc_dtrace_illval = NULL; 2883 return (0); 2884 } 2885 2886 return (dtrace_getreg(lwp->lwp_regs, ndx)); 2887 return (0); 2888 } 2889#else 2890 case DIF_VAR_UREGS: { 2891 struct trapframe *tframe; 2892 2893 if (!dtrace_priv_proc(state)) 2894 return (0); 2895 2896 if ((tframe = curthread->td_frame) == NULL) { 2897 DTRACE_CPUFLAG_SET(CPU_DTRACE_BADADDR); 2898 cpu_core[curcpu].cpuc_dtrace_illval = 0; 2899 return (0); 2900 } 2901 2902 return (dtrace_getreg(tframe, ndx)); 2903 } 2904#endif 2905 2906 case DIF_VAR_CURTHREAD: 2907 if (!dtrace_priv_kernel(state)) 2908 return (0); 2909 return ((uint64_t)(uintptr_t)curthread); 2910 2911 case DIF_VAR_TIMESTAMP: 2912 if (!(mstate->dtms_present & DTRACE_MSTATE_TIMESTAMP)) { 2913 mstate->dtms_timestamp = dtrace_gethrtime(); 2914 mstate->dtms_present |= DTRACE_MSTATE_TIMESTAMP; 2915 } 2916 return (mstate->dtms_timestamp); 2917 2918 case DIF_VAR_VTIMESTAMP: 2919 ASSERT(dtrace_vtime_references != 0); 2920 return (curthread->t_dtrace_vtime); 2921 2922 case DIF_VAR_WALLTIMESTAMP: 2923 if (!(mstate->dtms_present & DTRACE_MSTATE_WALLTIMESTAMP)) { 2924 mstate->dtms_walltimestamp = dtrace_gethrestime(); 2925 mstate->dtms_present |= DTRACE_MSTATE_WALLTIMESTAMP; 2926 } 2927 return (mstate->dtms_walltimestamp); 2928 2929#if defined(sun) 2930 case DIF_VAR_IPL: 2931 if (!dtrace_priv_kernel(state)) 2932 return (0); 2933 if (!(mstate->dtms_present & DTRACE_MSTATE_IPL)) { 2934 mstate->dtms_ipl = dtrace_getipl(); 2935 mstate->dtms_present |= DTRACE_MSTATE_IPL; 2936 } 2937 return (mstate->dtms_ipl); 2938#endif 2939 2940 case DIF_VAR_EPID: 2941 ASSERT(mstate->dtms_present & DTRACE_MSTATE_EPID); 2942 return (mstate->dtms_epid); 2943 2944 case DIF_VAR_ID: 2945 ASSERT(mstate->dtms_present & DTRACE_MSTATE_PROBE); 2946 return (mstate->dtms_probe->dtpr_id); 2947 2948 case DIF_VAR_STACKDEPTH: 2949 if (!dtrace_priv_kernel(state)) 2950 return (0); 2951 if (!(mstate->dtms_present & DTRACE_MSTATE_STACKDEPTH)) { 2952 int aframes = mstate->dtms_probe->dtpr_aframes + 2; 2953 2954 mstate->dtms_stackdepth = dtrace_getstackdepth(aframes); 2955 mstate->dtms_present |= DTRACE_MSTATE_STACKDEPTH; 2956 } 2957 return (mstate->dtms_stackdepth); 2958 2959 case DIF_VAR_USTACKDEPTH: 2960 if (!dtrace_priv_proc(state)) 2961 return (0); 2962 if (!(mstate->dtms_present & DTRACE_MSTATE_USTACKDEPTH)) { 2963 /* 2964 * See comment in DIF_VAR_PID. 2965 */ 2966 if (DTRACE_ANCHORED(mstate->dtms_probe) && 2967 CPU_ON_INTR(CPU)) { 2968 mstate->dtms_ustackdepth = 0; 2969 } else { 2970 DTRACE_CPUFLAG_SET(CPU_DTRACE_NOFAULT); 2971 mstate->dtms_ustackdepth = 2972 dtrace_getustackdepth(); 2973 DTRACE_CPUFLAG_CLEAR(CPU_DTRACE_NOFAULT); 2974 } 2975 mstate->dtms_present |= DTRACE_MSTATE_USTACKDEPTH; 2976 } 2977 return (mstate->dtms_ustackdepth); 2978 2979 case DIF_VAR_CALLER: 2980 if (!dtrace_priv_kernel(state)) 2981 return (0); 2982 if (!(mstate->dtms_present & DTRACE_MSTATE_CALLER)) { 2983 int aframes = mstate->dtms_probe->dtpr_aframes + 2; 2984 2985 if (!DTRACE_ANCHORED(mstate->dtms_probe)) { 2986 /* 2987 * If this is an unanchored probe, we are 2988 * required to go through the slow path: 2989 * dtrace_caller() only guarantees correct 2990 * results for anchored probes. 2991 */ 2992 pc_t caller[2] = {0, 0}; 2993 2994 dtrace_getpcstack(caller, 2, aframes, 2995 (uint32_t *)(uintptr_t)mstate->dtms_arg[0]); 2996 mstate->dtms_caller = caller[1]; 2997 } else if ((mstate->dtms_caller = 2998 dtrace_caller(aframes)) == -1) { 2999 /* 3000 * We have failed to do this the quick way; 3001 * we must resort to the slower approach of 3002 * calling dtrace_getpcstack(). 3003 */ 3004 pc_t caller = 0; 3005 3006 dtrace_getpcstack(&caller, 1, aframes, NULL); 3007 mstate->dtms_caller = caller; 3008 } 3009 3010 mstate->dtms_present |= DTRACE_MSTATE_CALLER; 3011 } 3012 return (mstate->dtms_caller); 3013 3014 case DIF_VAR_UCALLER: 3015 if (!dtrace_priv_proc(state)) 3016 return (0); 3017 3018 if (!(mstate->dtms_present & DTRACE_MSTATE_UCALLER)) { 3019 uint64_t ustack[3]; 3020 3021 /* 3022 * dtrace_getupcstack() fills in the first uint64_t 3023 * with the current PID. The second uint64_t will 3024 * be the program counter at user-level. The third 3025 * uint64_t will contain the caller, which is what 3026 * we're after. 3027 */ 3028 ustack[2] = 0; 3029 DTRACE_CPUFLAG_SET(CPU_DTRACE_NOFAULT); 3030 dtrace_getupcstack(ustack, 3); 3031 DTRACE_CPUFLAG_CLEAR(CPU_DTRACE_NOFAULT); 3032 mstate->dtms_ucaller = ustack[2]; 3033 mstate->dtms_present |= DTRACE_MSTATE_UCALLER; 3034 } 3035 3036 return (mstate->dtms_ucaller); 3037 3038 case DIF_VAR_PROBEPROV: 3039 ASSERT(mstate->dtms_present & DTRACE_MSTATE_PROBE); 3040 return (dtrace_dif_varstr( 3041 (uintptr_t)mstate->dtms_probe->dtpr_provider->dtpv_name, 3042 state, mstate)); 3043 3044 case DIF_VAR_PROBEMOD: 3045 ASSERT(mstate->dtms_present & DTRACE_MSTATE_PROBE); 3046 return (dtrace_dif_varstr( 3047 (uintptr_t)mstate->dtms_probe->dtpr_mod, 3048 state, mstate)); 3049 3050 case DIF_VAR_PROBEFUNC: 3051 ASSERT(mstate->dtms_present & DTRACE_MSTATE_PROBE); 3052 return (dtrace_dif_varstr( 3053 (uintptr_t)mstate->dtms_probe->dtpr_func, 3054 state, mstate)); 3055 3056 case DIF_VAR_PROBENAME: 3057 ASSERT(mstate->dtms_present & DTRACE_MSTATE_PROBE); 3058 return (dtrace_dif_varstr( 3059 (uintptr_t)mstate->dtms_probe->dtpr_name, 3060 state, mstate)); 3061 3062 case DIF_VAR_PID: 3063 if (!dtrace_priv_proc(state)) 3064 return (0); 3065 3066#if defined(sun) 3067 /* 3068 * Note that we are assuming that an unanchored probe is 3069 * always due to a high-level interrupt. (And we're assuming 3070 * that there is only a single high level interrupt.) 3071 */ 3072 if (DTRACE_ANCHORED(mstate->dtms_probe) && CPU_ON_INTR(CPU)) 3073 return (pid0.pid_id); 3074 3075 /* 3076 * It is always safe to dereference one's own t_procp pointer: 3077 * it always points to a valid, allocated proc structure. 3078 * Further, it is always safe to dereference the p_pidp member 3079 * of one's own proc structure. (These are truisms becuase 3080 * threads and processes don't clean up their own state -- 3081 * they leave that task to whomever reaps them.) 3082 */ 3083 return ((uint64_t)curthread->t_procp->p_pidp->pid_id); 3084#else 3085 return ((uint64_t)curproc->p_pid); 3086#endif 3087 3088 case DIF_VAR_PPID: 3089 if (!dtrace_priv_proc(state)) 3090 return (0); 3091 3092#if defined(sun) 3093 /* 3094 * See comment in DIF_VAR_PID. 3095 */ 3096 if (DTRACE_ANCHORED(mstate->dtms_probe) && CPU_ON_INTR(CPU)) 3097 return (pid0.pid_id); 3098 3099 /* 3100 * It is always safe to dereference one's own t_procp pointer: 3101 * it always points to a valid, allocated proc structure. 3102 * (This is true because threads don't clean up their own 3103 * state -- they leave that task to whomever reaps them.) 3104 */ 3105 return ((uint64_t)curthread->t_procp->p_ppid); 3106#else 3107 return ((uint64_t)curproc->p_pptr->p_pid); 3108#endif 3109 3110 case DIF_VAR_TID: 3111#if defined(sun) 3112 /* 3113 * See comment in DIF_VAR_PID. 3114 */ 3115 if (DTRACE_ANCHORED(mstate->dtms_probe) && CPU_ON_INTR(CPU)) 3116 return (0); 3117#endif 3118 3119 return ((uint64_t)curthread->t_tid); 3120 3121 case DIF_VAR_EXECARGS: { 3122 struct pargs *p_args = curthread->td_proc->p_args; 3123 3124 if (p_args == NULL) 3125 return(0); 3126 3127 return (dtrace_dif_varstrz( 3128 (uintptr_t) p_args->ar_args, p_args->ar_length, state, mstate)); 3129 } 3130 3131 case DIF_VAR_EXECNAME: 3132#if defined(sun) 3133 if (!dtrace_priv_proc(state)) 3134 return (0); 3135 3136 /* 3137 * See comment in DIF_VAR_PID. 3138 */ 3139 if (DTRACE_ANCHORED(mstate->dtms_probe) && CPU_ON_INTR(CPU)) 3140 return ((uint64_t)(uintptr_t)p0.p_user.u_comm); 3141 3142 /* 3143 * It is always safe to dereference one's own t_procp pointer: 3144 * it always points to a valid, allocated proc structure. 3145 * (This is true because threads don't clean up their own 3146 * state -- they leave that task to whomever reaps them.) 3147 */ 3148 return (dtrace_dif_varstr( 3149 (uintptr_t)curthread->t_procp->p_user.u_comm, 3150 state, mstate)); 3151#else 3152 return (dtrace_dif_varstr( 3153 (uintptr_t) curthread->td_proc->p_comm, state, mstate)); 3154#endif 3155 3156 case DIF_VAR_ZONENAME: 3157#if defined(sun) 3158 if (!dtrace_priv_proc(state)) 3159 return (0); 3160 3161 /* 3162 * See comment in DIF_VAR_PID. 3163 */ 3164 if (DTRACE_ANCHORED(mstate->dtms_probe) && CPU_ON_INTR(CPU)) 3165 return ((uint64_t)(uintptr_t)p0.p_zone->zone_name); 3166 3167 /* 3168 * It is always safe to dereference one's own t_procp pointer: 3169 * it always points to a valid, allocated proc structure. 3170 * (This is true because threads don't clean up their own 3171 * state -- they leave that task to whomever reaps them.) 3172 */ 3173 return (dtrace_dif_varstr( 3174 (uintptr_t)curthread->t_procp->p_zone->zone_name, 3175 state, mstate)); 3176#else 3177 return (0); 3178#endif 3179 3180 case DIF_VAR_UID: 3181 if (!dtrace_priv_proc(state)) 3182 return (0); 3183 3184#if defined(sun) 3185 /* 3186 * See comment in DIF_VAR_PID. 3187 */ 3188 if (DTRACE_ANCHORED(mstate->dtms_probe) && CPU_ON_INTR(CPU)) 3189 return ((uint64_t)p0.p_cred->cr_uid); 3190#endif 3191 3192 /* 3193 * It is always safe to dereference one's own t_procp pointer: 3194 * it always points to a valid, allocated proc structure. 3195 * (This is true because threads don't clean up their own 3196 * state -- they leave that task to whomever reaps them.) 3197 * 3198 * Additionally, it is safe to dereference one's own process 3199 * credential, since this is never NULL after process birth. 3200 */ 3201 return ((uint64_t)curthread->t_procp->p_cred->cr_uid); 3202 3203 case DIF_VAR_GID: 3204 if (!dtrace_priv_proc(state)) 3205 return (0); 3206 3207#if defined(sun) 3208 /* 3209 * See comment in DIF_VAR_PID. 3210 */ 3211 if (DTRACE_ANCHORED(mstate->dtms_probe) && CPU_ON_INTR(CPU)) 3212 return ((uint64_t)p0.p_cred->cr_gid); 3213#endif 3214 3215 /* 3216 * It is always safe to dereference one's own t_procp pointer: 3217 * it always points to a valid, allocated proc structure. 3218 * (This is true because threads don't clean up their own 3219 * state -- they leave that task to whomever reaps them.) 3220 * 3221 * Additionally, it is safe to dereference one's own process 3222 * credential, since this is never NULL after process birth. 3223 */ 3224 return ((uint64_t)curthread->t_procp->p_cred->cr_gid); 3225 3226 case DIF_VAR_ERRNO: { 3227#if defined(sun) 3228 klwp_t *lwp; 3229 if (!dtrace_priv_proc(state)) 3230 return (0); 3231 3232 /* 3233 * See comment in DIF_VAR_PID. 3234 */ 3235 if (DTRACE_ANCHORED(mstate->dtms_probe) && CPU_ON_INTR(CPU)) 3236 return (0); 3237 3238 /* 3239 * It is always safe to dereference one's own t_lwp pointer in 3240 * the event that this pointer is non-NULL. (This is true 3241 * because threads and lwps don't clean up their own state -- 3242 * they leave that task to whomever reaps them.) 3243 */ 3244 if ((lwp = curthread->t_lwp) == NULL) 3245 return (0); 3246 3247 return ((uint64_t)lwp->lwp_errno); 3248#else 3249 return (curthread->td_errno); 3250#endif 3251 } 3252#if !defined(sun) 3253 case DIF_VAR_CPU: { 3254 return curcpu; 3255 } 3256#endif 3257 default: 3258 DTRACE_CPUFLAG_SET(CPU_DTRACE_ILLOP); 3259 return (0); 3260 } 3261} 3262 3263/* 3264 * Emulate the execution of DTrace ID subroutines invoked by the call opcode. 3265 * Notice that we don't bother validating the proper number of arguments or 3266 * their types in the tuple stack. This isn't needed because all argument 3267 * interpretation is safe because of our load safety -- the worst that can 3268 * happen is that a bogus program can obtain bogus results. 3269 */ 3270static void 3271dtrace_dif_subr(uint_t subr, uint_t rd, uint64_t *regs, 3272 dtrace_key_t *tupregs, int nargs, 3273 dtrace_mstate_t *mstate, dtrace_state_t *state) 3274{ 3275 volatile uint16_t *flags = &cpu_core[curcpu].cpuc_dtrace_flags; 3276 volatile uintptr_t *illval = &cpu_core[curcpu].cpuc_dtrace_illval; 3277 dtrace_vstate_t *vstate = &state->dts_vstate; 3278 3279#if defined(sun) 3280 union { 3281 mutex_impl_t mi; 3282 uint64_t mx; 3283 } m; 3284 3285 union { 3286 krwlock_t ri; 3287 uintptr_t rw; 3288 } r; 3289#else 3290 struct thread *lowner; 3291 union { 3292 struct lock_object *li; 3293 uintptr_t lx; 3294 } l; 3295#endif 3296 3297 switch (subr) { 3298 case DIF_SUBR_RAND: 3299 regs[rd] = (dtrace_gethrtime() * 2416 + 374441) % 1771875; 3300 break; 3301 3302#if defined(sun) 3303 case DIF_SUBR_MUTEX_OWNED: 3304 if (!dtrace_canload(tupregs[0].dttk_value, sizeof (kmutex_t), 3305 mstate, vstate)) { 3306 regs[rd] = 0; 3307 break; 3308 } 3309 3310 m.mx = dtrace_load64(tupregs[0].dttk_value); 3311 if (MUTEX_TYPE_ADAPTIVE(&m.mi)) 3312 regs[rd] = MUTEX_OWNER(&m.mi) != MUTEX_NO_OWNER; 3313 else 3314 regs[rd] = LOCK_HELD(&m.mi.m_spin.m_spinlock); 3315 break; 3316 3317 case DIF_SUBR_MUTEX_OWNER: 3318 if (!dtrace_canload(tupregs[0].dttk_value, sizeof (kmutex_t), 3319 mstate, vstate)) { 3320 regs[rd] = 0; 3321 break; 3322 } 3323 3324 m.mx = dtrace_load64(tupregs[0].dttk_value); 3325 if (MUTEX_TYPE_ADAPTIVE(&m.mi) && 3326 MUTEX_OWNER(&m.mi) != MUTEX_NO_OWNER) 3327 regs[rd] = (uintptr_t)MUTEX_OWNER(&m.mi); 3328 else 3329 regs[rd] = 0; 3330 break; 3331 3332 case DIF_SUBR_MUTEX_TYPE_ADAPTIVE: 3333 if (!dtrace_canload(tupregs[0].dttk_value, sizeof (kmutex_t), 3334 mstate, vstate)) { 3335 regs[rd] = 0; 3336 break; 3337 } 3338 3339 m.mx = dtrace_load64(tupregs[0].dttk_value); 3340 regs[rd] = MUTEX_TYPE_ADAPTIVE(&m.mi); 3341 break; 3342 3343 case DIF_SUBR_MUTEX_TYPE_SPIN: 3344 if (!dtrace_canload(tupregs[0].dttk_value, sizeof (kmutex_t), 3345 mstate, vstate)) { 3346 regs[rd] = 0; 3347 break; 3348 } 3349 3350 m.mx = dtrace_load64(tupregs[0].dttk_value); 3351 regs[rd] = MUTEX_TYPE_SPIN(&m.mi); 3352 break; 3353 3354 case DIF_SUBR_RW_READ_HELD: { 3355 uintptr_t tmp; 3356 3357 if (!dtrace_canload(tupregs[0].dttk_value, sizeof (uintptr_t), 3358 mstate, vstate)) { 3359 regs[rd] = 0; 3360 break; 3361 } 3362 3363 r.rw = dtrace_loadptr(tupregs[0].dttk_value); 3364 regs[rd] = _RW_READ_HELD(&r.ri, tmp); 3365 break; 3366 } 3367 3368 case DIF_SUBR_RW_WRITE_HELD: 3369 if (!dtrace_canload(tupregs[0].dttk_value, sizeof (krwlock_t), 3370 mstate, vstate)) { 3371 regs[rd] = 0; 3372 break; 3373 } 3374 3375 r.rw = dtrace_loadptr(tupregs[0].dttk_value); 3376 regs[rd] = _RW_WRITE_HELD(&r.ri); 3377 break; 3378 3379 case DIF_SUBR_RW_ISWRITER: 3380 if (!dtrace_canload(tupregs[0].dttk_value, sizeof (krwlock_t), 3381 mstate, vstate)) { 3382 regs[rd] = 0; 3383 break; 3384 } 3385 3386 r.rw = dtrace_loadptr(tupregs[0].dttk_value); 3387 regs[rd] = _RW_ISWRITER(&r.ri); 3388 break; 3389 3390#else 3391 case DIF_SUBR_MUTEX_OWNED: 3392 if (!dtrace_canload(tupregs[0].dttk_value, 3393 sizeof (struct lock_object), mstate, vstate)) { 3394 regs[rd] = 0; 3395 break; 3396 } 3397 l.lx = dtrace_loadptr((uintptr_t)&tupregs[0].dttk_value); 3398 regs[rd] = LOCK_CLASS(l.li)->lc_owner(l.li, &lowner); 3399 break; 3400 3401 case DIF_SUBR_MUTEX_OWNER: 3402 if (!dtrace_canload(tupregs[0].dttk_value, 3403 sizeof (struct lock_object), mstate, vstate)) { 3404 regs[rd] = 0; 3405 break; 3406 } 3407 l.lx = dtrace_loadptr((uintptr_t)&tupregs[0].dttk_value); 3408 LOCK_CLASS(l.li)->lc_owner(l.li, &lowner); 3409 regs[rd] = (uintptr_t)lowner; 3410 break; 3411 3412 case DIF_SUBR_MUTEX_TYPE_ADAPTIVE: 3413 if (!dtrace_canload(tupregs[0].dttk_value, sizeof (struct mtx), 3414 mstate, vstate)) { 3415 regs[rd] = 0; 3416 break; 3417 } 3418 l.lx = dtrace_loadptr((uintptr_t)&tupregs[0].dttk_value); 3419 /* XXX - should be only LC_SLEEPABLE? */ 3420 regs[rd] = (LOCK_CLASS(l.li)->lc_flags & 3421 (LC_SLEEPLOCK | LC_SLEEPABLE)) != 0; 3422 break; 3423 3424 case DIF_SUBR_MUTEX_TYPE_SPIN: 3425 if (!dtrace_canload(tupregs[0].dttk_value, sizeof (struct mtx), 3426 mstate, vstate)) { 3427 regs[rd] = 0; 3428 break; 3429 } 3430 l.lx = dtrace_loadptr((uintptr_t)&tupregs[0].dttk_value); 3431 regs[rd] = (LOCK_CLASS(l.li)->lc_flags & LC_SPINLOCK) != 0; 3432 break; 3433 3434 case DIF_SUBR_RW_READ_HELD: 3435 case DIF_SUBR_SX_SHARED_HELD: 3436 if (!dtrace_canload(tupregs[0].dttk_value, sizeof (uintptr_t), 3437 mstate, vstate)) { 3438 regs[rd] = 0; 3439 break; 3440 } 3441 l.lx = dtrace_loadptr((uintptr_t)&tupregs[0].dttk_value); 3442 regs[rd] = LOCK_CLASS(l.li)->lc_owner(l.li, &lowner) && 3443 lowner == NULL; 3444 break; 3445 3446 case DIF_SUBR_RW_WRITE_HELD: 3447 case DIF_SUBR_SX_EXCLUSIVE_HELD: 3448 if (!dtrace_canload(tupregs[0].dttk_value, sizeof (uintptr_t), 3449 mstate, vstate)) { 3450 regs[rd] = 0; 3451 break; 3452 } 3453 l.lx = dtrace_loadptr(tupregs[0].dttk_value); 3454 LOCK_CLASS(l.li)->lc_owner(l.li, &lowner); 3455 regs[rd] = (lowner == curthread); 3456 break; 3457 3458 case DIF_SUBR_RW_ISWRITER: 3459 case DIF_SUBR_SX_ISEXCLUSIVE: 3460 if (!dtrace_canload(tupregs[0].dttk_value, sizeof (uintptr_t), 3461 mstate, vstate)) { 3462 regs[rd] = 0; 3463 break; 3464 } 3465 l.lx = dtrace_loadptr(tupregs[0].dttk_value); 3466 regs[rd] = LOCK_CLASS(l.li)->lc_owner(l.li, &lowner) && 3467 lowner != NULL; 3468 break; 3469#endif /* ! defined(sun) */ 3470 3471 case DIF_SUBR_BCOPY: { 3472 /* 3473 * We need to be sure that the destination is in the scratch 3474 * region -- no other region is allowed. 3475 */ 3476 uintptr_t src = tupregs[0].dttk_value; 3477 uintptr_t dest = tupregs[1].dttk_value; 3478 size_t size = tupregs[2].dttk_value; 3479 3480 if (!dtrace_inscratch(dest, size, mstate)) { 3481 *flags |= CPU_DTRACE_BADADDR; 3482 *illval = regs[rd]; 3483 break; 3484 } 3485 3486 if (!dtrace_canload(src, size, mstate, vstate)) { 3487 regs[rd] = 0; 3488 break; 3489 } 3490 3491 dtrace_bcopy((void *)src, (void *)dest, size); 3492 break; 3493 } 3494 3495 case DIF_SUBR_ALLOCA: 3496 case DIF_SUBR_COPYIN: { 3497 uintptr_t dest = P2ROUNDUP(mstate->dtms_scratch_ptr, 8); 3498 uint64_t size = 3499 tupregs[subr == DIF_SUBR_ALLOCA ? 0 : 1].dttk_value; 3500 size_t scratch_size = (dest - mstate->dtms_scratch_ptr) + size; 3501 3502 /* 3503 * This action doesn't require any credential checks since 3504 * probes will not activate in user contexts to which the 3505 * enabling user does not have permissions. 3506 */ 3507 3508 /* 3509 * Rounding up the user allocation size could have overflowed 3510 * a large, bogus allocation (like -1ULL) to 0. 3511 */ 3512 if (scratch_size < size || 3513 !DTRACE_INSCRATCH(mstate, scratch_size)) { 3514 DTRACE_CPUFLAG_SET(CPU_DTRACE_NOSCRATCH); 3515 regs[rd] = 0; 3516 break; 3517 } 3518 3519 if (subr == DIF_SUBR_COPYIN) { 3520 DTRACE_CPUFLAG_SET(CPU_DTRACE_NOFAULT); 3521 dtrace_copyin(tupregs[0].dttk_value, dest, size, flags); 3522 DTRACE_CPUFLAG_CLEAR(CPU_DTRACE_NOFAULT); 3523 } 3524 3525 mstate->dtms_scratch_ptr += scratch_size; 3526 regs[rd] = dest; 3527 break; 3528 } 3529 3530 case DIF_SUBR_COPYINTO: { 3531 uint64_t size = tupregs[1].dttk_value; 3532 uintptr_t dest = tupregs[2].dttk_value; 3533 3534 /* 3535 * This action doesn't require any credential checks since 3536 * probes will not activate in user contexts to which the 3537 * enabling user does not have permissions. 3538 */ 3539 if (!dtrace_inscratch(dest, size, mstate)) { 3540 *flags |= CPU_DTRACE_BADADDR; 3541 *illval = regs[rd]; 3542 break; 3543 } 3544 3545 DTRACE_CPUFLAG_SET(CPU_DTRACE_NOFAULT); 3546 dtrace_copyin(tupregs[0].dttk_value, dest, size, flags); 3547 DTRACE_CPUFLAG_CLEAR(CPU_DTRACE_NOFAULT); 3548 break; 3549 } 3550 3551 case DIF_SUBR_COPYINSTR: { 3552 uintptr_t dest = mstate->dtms_scratch_ptr; 3553 uint64_t size = state->dts_options[DTRACEOPT_STRSIZE]; 3554 3555 if (nargs > 1 && tupregs[1].dttk_value < size) 3556 size = tupregs[1].dttk_value + 1; 3557 3558 /* 3559 * This action doesn't require any credential checks since 3560 * probes will not activate in user contexts to which the 3561 * enabling user does not have permissions. 3562 */ 3563 if (!DTRACE_INSCRATCH(mstate, size)) { 3564 DTRACE_CPUFLAG_SET(CPU_DTRACE_NOSCRATCH); 3565 regs[rd] = 0; 3566 break; 3567 } 3568 3569 DTRACE_CPUFLAG_SET(CPU_DTRACE_NOFAULT); 3570 dtrace_copyinstr(tupregs[0].dttk_value, dest, size, flags); 3571 DTRACE_CPUFLAG_CLEAR(CPU_DTRACE_NOFAULT); 3572 3573 ((char *)dest)[size - 1] = '\0'; 3574 mstate->dtms_scratch_ptr += size; 3575 regs[rd] = dest; 3576 break; 3577 } 3578 3579#if defined(sun) 3580 case DIF_SUBR_MSGSIZE: 3581 case DIF_SUBR_MSGDSIZE: { 3582 uintptr_t baddr = tupregs[0].dttk_value, daddr; 3583 uintptr_t wptr, rptr; 3584 size_t count = 0; 3585 int cont = 0; 3586 3587 while (baddr != 0 && !(*flags & CPU_DTRACE_FAULT)) { 3588 3589 if (!dtrace_canload(baddr, sizeof (mblk_t), mstate, 3590 vstate)) { 3591 regs[rd] = 0; 3592 break; 3593 } 3594 3595 wptr = dtrace_loadptr(baddr + 3596 offsetof(mblk_t, b_wptr)); 3597 3598 rptr = dtrace_loadptr(baddr + 3599 offsetof(mblk_t, b_rptr)); 3600 3601 if (wptr < rptr) { 3602 *flags |= CPU_DTRACE_BADADDR; 3603 *illval = tupregs[0].dttk_value; 3604 break; 3605 } 3606 3607 daddr = dtrace_loadptr(baddr + 3608 offsetof(mblk_t, b_datap)); 3609 3610 baddr = dtrace_loadptr(baddr + 3611 offsetof(mblk_t, b_cont)); 3612 3613 /* 3614 * We want to prevent against denial-of-service here, 3615 * so we're only going to search the list for 3616 * dtrace_msgdsize_max mblks. 3617 */ 3618 if (cont++ > dtrace_msgdsize_max) { 3619 *flags |= CPU_DTRACE_ILLOP; 3620 break; 3621 } 3622 3623 if (subr == DIF_SUBR_MSGDSIZE) { 3624 if (dtrace_load8(daddr + 3625 offsetof(dblk_t, db_type)) != M_DATA) 3626 continue; 3627 } 3628 3629 count += wptr - rptr; 3630 } 3631 3632 if (!(*flags & CPU_DTRACE_FAULT)) 3633 regs[rd] = count; 3634 3635 break; 3636 } 3637#endif 3638 3639 case DIF_SUBR_PROGENYOF: { 3640 pid_t pid = tupregs[0].dttk_value; 3641 proc_t *p; 3642 int rval = 0; 3643 3644 DTRACE_CPUFLAG_SET(CPU_DTRACE_NOFAULT); 3645 3646 for (p = curthread->t_procp; p != NULL; p = p->p_parent) { 3647#if defined(sun) 3648 if (p->p_pidp->pid_id == pid) { 3649#else 3650 if (p->p_pid == pid) { 3651#endif 3652 rval = 1; 3653 break; 3654 } 3655 } 3656 3657 DTRACE_CPUFLAG_CLEAR(CPU_DTRACE_NOFAULT); 3658 3659 regs[rd] = rval; 3660 break; 3661 } 3662 3663 case DIF_SUBR_SPECULATION: 3664 regs[rd] = dtrace_speculation(state); 3665 break; 3666 3667 case DIF_SUBR_COPYOUT: { 3668 uintptr_t kaddr = tupregs[0].dttk_value; 3669 uintptr_t uaddr = tupregs[1].dttk_value; 3670 uint64_t size = tupregs[2].dttk_value; 3671 3672 if (!dtrace_destructive_disallow && 3673 dtrace_priv_proc_control(state) && 3674 !dtrace_istoxic(kaddr, size)) { 3675 DTRACE_CPUFLAG_SET(CPU_DTRACE_NOFAULT); 3676 dtrace_copyout(kaddr, uaddr, size, flags); 3677 DTRACE_CPUFLAG_CLEAR(CPU_DTRACE_NOFAULT); 3678 } 3679 break; 3680 } 3681 3682 case DIF_SUBR_COPYOUTSTR: { 3683 uintptr_t kaddr = tupregs[0].dttk_value; 3684 uintptr_t uaddr = tupregs[1].dttk_value; 3685 uint64_t size = tupregs[2].dttk_value; 3686 3687 if (!dtrace_destructive_disallow && 3688 dtrace_priv_proc_control(state) && 3689 !dtrace_istoxic(kaddr, size)) { 3690 DTRACE_CPUFLAG_SET(CPU_DTRACE_NOFAULT); 3691 dtrace_copyoutstr(kaddr, uaddr, size, flags); 3692 DTRACE_CPUFLAG_CLEAR(CPU_DTRACE_NOFAULT); 3693 } 3694 break; 3695 } 3696 3697 case DIF_SUBR_STRLEN: { 3698 size_t sz; 3699 uintptr_t addr = (uintptr_t)tupregs[0].dttk_value; 3700 sz = dtrace_strlen((char *)addr, 3701 state->dts_options[DTRACEOPT_STRSIZE]); 3702 3703 if (!dtrace_canload(addr, sz + 1, mstate, vstate)) { 3704 regs[rd] = 0; 3705 break; 3706 } 3707 3708 regs[rd] = sz; 3709 3710 break; 3711 } 3712 3713 case DIF_SUBR_STRCHR: 3714 case DIF_SUBR_STRRCHR: { 3715 /* 3716 * We're going to iterate over the string looking for the 3717 * specified character. We will iterate until we have reached 3718 * the string length or we have found the character. If this 3719 * is DIF_SUBR_STRRCHR, we will look for the last occurrence 3720 * of the specified character instead of the first. 3721 */ 3722 uintptr_t saddr = tupregs[0].dttk_value; 3723 uintptr_t addr = tupregs[0].dttk_value; 3724 uintptr_t limit = addr + state->dts_options[DTRACEOPT_STRSIZE]; 3725 char c, target = (char)tupregs[1].dttk_value; 3726 3727 for (regs[rd] = 0; addr < limit; addr++) { 3728 if ((c = dtrace_load8(addr)) == target) { 3729 regs[rd] = addr; 3730 3731 if (subr == DIF_SUBR_STRCHR) 3732 break; 3733 } 3734 3735 if (c == '\0') 3736 break; 3737 } 3738 3739 if (!dtrace_canload(saddr, addr - saddr, mstate, vstate)) { 3740 regs[rd] = 0; 3741 break; 3742 } 3743 3744 break; 3745 } 3746 3747 case DIF_SUBR_STRSTR: 3748 case DIF_SUBR_INDEX: 3749 case DIF_SUBR_RINDEX: { 3750 /* 3751 * We're going to iterate over the string looking for the 3752 * specified string. We will iterate until we have reached 3753 * the string length or we have found the string. (Yes, this 3754 * is done in the most naive way possible -- but considering 3755 * that the string we're searching for is likely to be 3756 * relatively short, the complexity of Rabin-Karp or similar 3757 * hardly seems merited.) 3758 */ 3759 char *addr = (char *)(uintptr_t)tupregs[0].dttk_value; 3760 char *substr = (char *)(uintptr_t)tupregs[1].dttk_value; 3761 uint64_t size = state->dts_options[DTRACEOPT_STRSIZE]; 3762 size_t len = dtrace_strlen(addr, size); 3763 size_t sublen = dtrace_strlen(substr, size); 3764 char *limit = addr + len, *orig = addr; 3765 int notfound = subr == DIF_SUBR_STRSTR ? 0 : -1; 3766 int inc = 1; 3767 3768 regs[rd] = notfound; 3769 3770 if (!dtrace_canload((uintptr_t)addr, len + 1, mstate, vstate)) { 3771 regs[rd] = 0; 3772 break; 3773 } 3774 3775 if (!dtrace_canload((uintptr_t)substr, sublen + 1, mstate, 3776 vstate)) { 3777 regs[rd] = 0; 3778 break; 3779 } 3780 3781 /* 3782 * strstr() and index()/rindex() have similar semantics if 3783 * both strings are the empty string: strstr() returns a 3784 * pointer to the (empty) string, and index() and rindex() 3785 * both return index 0 (regardless of any position argument). 3786 */ 3787 if (sublen == 0 && len == 0) { 3788 if (subr == DIF_SUBR_STRSTR) 3789 regs[rd] = (uintptr_t)addr; 3790 else 3791 regs[rd] = 0; 3792 break; 3793 } 3794 3795 if (subr != DIF_SUBR_STRSTR) { 3796 if (subr == DIF_SUBR_RINDEX) { 3797 limit = orig - 1; 3798 addr += len; 3799 inc = -1; 3800 } 3801 3802 /* 3803 * Both index() and rindex() take an optional position 3804 * argument that denotes the starting position. 3805 */ 3806 if (nargs == 3) { 3807 int64_t pos = (int64_t)tupregs[2].dttk_value; 3808 3809 /* 3810 * If the position argument to index() is 3811 * negative, Perl implicitly clamps it at 3812 * zero. This semantic is a little surprising 3813 * given the special meaning of negative 3814 * positions to similar Perl functions like 3815 * substr(), but it appears to reflect a 3816 * notion that index() can start from a 3817 * negative index and increment its way up to 3818 * the string. Given this notion, Perl's 3819 * rindex() is at least self-consistent in 3820 * that it implicitly clamps positions greater 3821 * than the string length to be the string 3822 * length. Where Perl completely loses 3823 * coherence, however, is when the specified 3824 * substring is the empty string (""). In 3825 * this case, even if the position is 3826 * negative, rindex() returns 0 -- and even if 3827 * the position is greater than the length, 3828 * index() returns the string length. These 3829 * semantics violate the notion that index() 3830 * should never return a value less than the 3831 * specified position and that rindex() should 3832 * never return a value greater than the 3833 * specified position. (One assumes that 3834 * these semantics are artifacts of Perl's 3835 * implementation and not the results of 3836 * deliberate design -- it beggars belief that 3837 * even Larry Wall could desire such oddness.) 3838 * While in the abstract one would wish for 3839 * consistent position semantics across 3840 * substr(), index() and rindex() -- or at the 3841 * very least self-consistent position 3842 * semantics for index() and rindex() -- we 3843 * instead opt to keep with the extant Perl 3844 * semantics, in all their broken glory. (Do 3845 * we have more desire to maintain Perl's 3846 * semantics than Perl does? Probably.) 3847 */ 3848 if (subr == DIF_SUBR_RINDEX) { 3849 if (pos < 0) { 3850 if (sublen == 0) 3851 regs[rd] = 0; 3852 break; 3853 } 3854 3855 if (pos > len) 3856 pos = len; 3857 } else { 3858 if (pos < 0) 3859 pos = 0; 3860 3861 if (pos >= len) { 3862 if (sublen == 0) 3863 regs[rd] = len; 3864 break; 3865 } 3866 } 3867 3868 addr = orig + pos; 3869 } 3870 } 3871 3872 for (regs[rd] = notfound; addr != limit; addr += inc) { 3873 if (dtrace_strncmp(addr, substr, sublen) == 0) { 3874 if (subr != DIF_SUBR_STRSTR) { 3875 /* 3876 * As D index() and rindex() are 3877 * modeled on Perl (and not on awk), 3878 * we return a zero-based (and not a 3879 * one-based) index. (For you Perl 3880 * weenies: no, we're not going to add 3881 * $[ -- and shouldn't you be at a con 3882 * or something?) 3883 */ 3884 regs[rd] = (uintptr_t)(addr - orig); 3885 break; 3886 } 3887 3888 ASSERT(subr == DIF_SUBR_STRSTR); 3889 regs[rd] = (uintptr_t)addr; 3890 break; 3891 } 3892 } 3893 3894 break; 3895 } 3896 3897 case DIF_SUBR_STRTOK: { 3898 uintptr_t addr = tupregs[0].dttk_value; 3899 uintptr_t tokaddr = tupregs[1].dttk_value; 3900 uint64_t size = state->dts_options[DTRACEOPT_STRSIZE]; 3901 uintptr_t limit, toklimit = tokaddr + size; 3902 uint8_t c = 0, tokmap[32]; /* 256 / 8 */ 3903 char *dest = (char *)mstate->dtms_scratch_ptr; 3904 int i; 3905 3906 /* 3907 * Check both the token buffer and (later) the input buffer, 3908 * since both could be non-scratch addresses. 3909 */ 3910 if (!dtrace_strcanload(tokaddr, size, mstate, vstate)) { 3911 regs[rd] = 0; 3912 break; 3913 } 3914 3915 if (!DTRACE_INSCRATCH(mstate, size)) { 3916 DTRACE_CPUFLAG_SET(CPU_DTRACE_NOSCRATCH); 3917 regs[rd] = 0; 3918 break; 3919 } 3920 3921 if (addr == 0) { 3922 /* 3923 * If the address specified is NULL, we use our saved 3924 * strtok pointer from the mstate. Note that this 3925 * means that the saved strtok pointer is _only_ 3926 * valid within multiple enablings of the same probe -- 3927 * it behaves like an implicit clause-local variable. 3928 */ 3929 addr = mstate->dtms_strtok; 3930 } else { 3931 /* 3932 * If the user-specified address is non-NULL we must 3933 * access check it. This is the only time we have 3934 * a chance to do so, since this address may reside 3935 * in the string table of this clause-- future calls 3936 * (when we fetch addr from mstate->dtms_strtok) 3937 * would fail this access check. 3938 */ 3939 if (!dtrace_strcanload(addr, size, mstate, vstate)) { 3940 regs[rd] = 0; 3941 break; 3942 } 3943 } 3944 3945 /* 3946 * First, zero the token map, and then process the token 3947 * string -- setting a bit in the map for every character 3948 * found in the token string. 3949 */ 3950 for (i = 0; i < sizeof (tokmap); i++) 3951 tokmap[i] = 0; 3952 3953 for (; tokaddr < toklimit; tokaddr++) { 3954 if ((c = dtrace_load8(tokaddr)) == '\0') 3955 break; 3956 3957 ASSERT((c >> 3) < sizeof (tokmap)); 3958 tokmap[c >> 3] |= (1 << (c & 0x7)); 3959 } 3960 3961 for (limit = addr + size; addr < limit; addr++) { 3962 /* 3963 * We're looking for a character that is _not_ contained 3964 * in the token string. 3965 */ 3966 if ((c = dtrace_load8(addr)) == '\0') 3967 break; 3968 3969 if (!(tokmap[c >> 3] & (1 << (c & 0x7)))) 3970 break; 3971 } 3972 3973 if (c == '\0') { 3974 /* 3975 * We reached the end of the string without finding 3976 * any character that was not in the token string. 3977 * We return NULL in this case, and we set the saved 3978 * address to NULL as well. 3979 */ 3980 regs[rd] = 0; 3981 mstate->dtms_strtok = 0; 3982 break; 3983 } 3984 3985 /* 3986 * From here on, we're copying into the destination string. 3987 */ 3988 for (i = 0; addr < limit && i < size - 1; addr++) { 3989 if ((c = dtrace_load8(addr)) == '\0') 3990 break; 3991 3992 if (tokmap[c >> 3] & (1 << (c & 0x7))) 3993 break; 3994 3995 ASSERT(i < size); 3996 dest[i++] = c; 3997 } 3998 3999 ASSERT(i < size); 4000 dest[i] = '\0'; 4001 regs[rd] = (uintptr_t)dest; 4002 mstate->dtms_scratch_ptr += size; 4003 mstate->dtms_strtok = addr; 4004 break; 4005 } 4006 4007 case DIF_SUBR_SUBSTR: { 4008 uintptr_t s = tupregs[0].dttk_value; 4009 uint64_t size = state->dts_options[DTRACEOPT_STRSIZE]; 4010 char *d = (char *)mstate->dtms_scratch_ptr; 4011 int64_t index = (int64_t)tupregs[1].dttk_value; 4012 int64_t remaining = (int64_t)tupregs[2].dttk_value; 4013 size_t len = dtrace_strlen((char *)s, size); 4014 int64_t i = 0; 4015 4016 if (!dtrace_canload(s, len + 1, mstate, vstate)) { 4017 regs[rd] = 0; 4018 break; 4019 } 4020 4021 if (!DTRACE_INSCRATCH(mstate, size)) { 4022 DTRACE_CPUFLAG_SET(CPU_DTRACE_NOSCRATCH); 4023 regs[rd] = 0; 4024 break; 4025 } 4026 4027 if (nargs <= 2) 4028 remaining = (int64_t)size; 4029 4030 if (index < 0) { 4031 index += len; 4032 4033 if (index < 0 && index + remaining > 0) { 4034 remaining += index; 4035 index = 0; 4036 } 4037 } 4038 4039 if (index >= len || index < 0) { 4040 remaining = 0; 4041 } else if (remaining < 0) { 4042 remaining += len - index; 4043 } else if (index + remaining > size) { 4044 remaining = size - index; 4045 } 4046 4047 for (i = 0; i < remaining; i++) { 4048 if ((d[i] = dtrace_load8(s + index + i)) == '\0') 4049 break; 4050 } 4051 4052 d[i] = '\0'; 4053 4054 mstate->dtms_scratch_ptr += size; 4055 regs[rd] = (uintptr_t)d; 4056 break; 4057 } 4058 4059 case DIF_SUBR_TOUPPER: 4060 case DIF_SUBR_TOLOWER: { 4061 uintptr_t s = tupregs[0].dttk_value; 4062 uint64_t size = state->dts_options[DTRACEOPT_STRSIZE]; 4063 char *dest = (char *)mstate->dtms_scratch_ptr, c; 4064 size_t len = dtrace_strlen((char *)s, size); 4065 char lower, upper, convert; 4066 int64_t i; 4067 4068 if (subr == DIF_SUBR_TOUPPER) { 4069 lower = 'a'; 4070 upper = 'z'; 4071 convert = 'A'; 4072 } else { 4073 lower = 'A'; 4074 upper = 'Z'; 4075 convert = 'a'; 4076 } 4077 4078 if (!dtrace_canload(s, len + 1, mstate, vstate)) { 4079 regs[rd] = 0; 4080 break; 4081 } 4082 4083 if (!DTRACE_INSCRATCH(mstate, size)) { 4084 DTRACE_CPUFLAG_SET(CPU_DTRACE_NOSCRATCH); 4085 regs[rd] = 0; 4086 break; 4087 } 4088 4089 for (i = 0; i < size - 1; i++) { 4090 if ((c = dtrace_load8(s + i)) == '\0') 4091 break; 4092 4093 if (c >= lower && c <= upper) 4094 c = convert + (c - lower); 4095 4096 dest[i] = c; 4097 } 4098 4099 ASSERT(i < size); 4100 dest[i] = '\0'; 4101 regs[rd] = (uintptr_t)dest; 4102 mstate->dtms_scratch_ptr += size; 4103 break; 4104 } 4105 4106#if defined(sun) 4107 case DIF_SUBR_GETMAJOR: 4108#ifdef _LP64 4109 regs[rd] = (tupregs[0].dttk_value >> NBITSMINOR64) & MAXMAJ64; 4110#else 4111 regs[rd] = (tupregs[0].dttk_value >> NBITSMINOR) & MAXMAJ; 4112#endif 4113 break; 4114 4115 case DIF_SUBR_GETMINOR: 4116#ifdef _LP64 4117 regs[rd] = tupregs[0].dttk_value & MAXMIN64; 4118#else 4119 regs[rd] = tupregs[0].dttk_value & MAXMIN; 4120#endif 4121 break; 4122 4123 case DIF_SUBR_DDI_PATHNAME: { 4124 /* 4125 * This one is a galactic mess. We are going to roughly 4126 * emulate ddi_pathname(), but it's made more complicated 4127 * by the fact that we (a) want to include the minor name and 4128 * (b) must proceed iteratively instead of recursively. 4129 */ 4130 uintptr_t dest = mstate->dtms_scratch_ptr; 4131 uint64_t size = state->dts_options[DTRACEOPT_STRSIZE]; 4132 char *start = (char *)dest, *end = start + size - 1; 4133 uintptr_t daddr = tupregs[0].dttk_value; 4134 int64_t minor = (int64_t)tupregs[1].dttk_value; 4135 char *s; 4136 int i, len, depth = 0; 4137 4138 /* 4139 * Due to all the pointer jumping we do and context we must 4140 * rely upon, we just mandate that the user must have kernel 4141 * read privileges to use this routine. 4142 */ 4143 if ((mstate->dtms_access & DTRACE_ACCESS_KERNEL) == 0) { 4144 *flags |= CPU_DTRACE_KPRIV; 4145 *illval = daddr; 4146 regs[rd] = 0; 4147 } 4148 4149 if (!DTRACE_INSCRATCH(mstate, size)) { 4150 DTRACE_CPUFLAG_SET(CPU_DTRACE_NOSCRATCH); 4151 regs[rd] = 0; 4152 break; 4153 } 4154 4155 *end = '\0'; 4156 4157 /* 4158 * We want to have a name for the minor. In order to do this, 4159 * we need to walk the minor list from the devinfo. We want 4160 * to be sure that we don't infinitely walk a circular list, 4161 * so we check for circularity by sending a scout pointer 4162 * ahead two elements for every element that we iterate over; 4163 * if the list is circular, these will ultimately point to the 4164 * same element. You may recognize this little trick as the 4165 * answer to a stupid interview question -- one that always 4166 * seems to be asked by those who had to have it laboriously 4167 * explained to them, and who can't even concisely describe 4168 * the conditions under which one would be forced to resort to 4169 * this technique. Needless to say, those conditions are 4170 * found here -- and probably only here. Is this the only use 4171 * of this infamous trick in shipping, production code? If it 4172 * isn't, it probably should be... 4173 */ 4174 if (minor != -1) { 4175 uintptr_t maddr = dtrace_loadptr(daddr + 4176 offsetof(struct dev_info, devi_minor)); 4177 4178 uintptr_t next = offsetof(struct ddi_minor_data, next); 4179 uintptr_t name = offsetof(struct ddi_minor_data, 4180 d_minor) + offsetof(struct ddi_minor, name); 4181 uintptr_t dev = offsetof(struct ddi_minor_data, 4182 d_minor) + offsetof(struct ddi_minor, dev); 4183 uintptr_t scout; 4184 4185 if (maddr != NULL) 4186 scout = dtrace_loadptr(maddr + next); 4187 4188 while (maddr != NULL && !(*flags & CPU_DTRACE_FAULT)) { 4189 uint64_t m; 4190#ifdef _LP64 4191 m = dtrace_load64(maddr + dev) & MAXMIN64; 4192#else 4193 m = dtrace_load32(maddr + dev) & MAXMIN; 4194#endif 4195 if (m != minor) { 4196 maddr = dtrace_loadptr(maddr + next); 4197 4198 if (scout == NULL) 4199 continue; 4200 4201 scout = dtrace_loadptr(scout + next); 4202 4203 if (scout == NULL) 4204 continue; 4205 4206 scout = dtrace_loadptr(scout + next); 4207 4208 if (scout == NULL) 4209 continue; 4210 4211 if (scout == maddr) { 4212 *flags |= CPU_DTRACE_ILLOP; 4213 break; 4214 } 4215 4216 continue; 4217 } 4218 4219 /* 4220 * We have the minor data. Now we need to 4221 * copy the minor's name into the end of the 4222 * pathname. 4223 */ 4224 s = (char *)dtrace_loadptr(maddr + name); 4225 len = dtrace_strlen(s, size); 4226 4227 if (*flags & CPU_DTRACE_FAULT) 4228 break; 4229 4230 if (len != 0) { 4231 if ((end -= (len + 1)) < start) 4232 break; 4233 4234 *end = ':'; 4235 } 4236 4237 for (i = 1; i <= len; i++) 4238 end[i] = dtrace_load8((uintptr_t)s++); 4239 break; 4240 } 4241 } 4242 4243 while (daddr != NULL && !(*flags & CPU_DTRACE_FAULT)) { 4244 ddi_node_state_t devi_state; 4245 4246 devi_state = dtrace_load32(daddr + 4247 offsetof(struct dev_info, devi_node_state)); 4248 4249 if (*flags & CPU_DTRACE_FAULT) 4250 break; 4251 4252 if (devi_state >= DS_INITIALIZED) { 4253 s = (char *)dtrace_loadptr(daddr + 4254 offsetof(struct dev_info, devi_addr)); 4255 len = dtrace_strlen(s, size); 4256 4257 if (*flags & CPU_DTRACE_FAULT) 4258 break; 4259 4260 if (len != 0) { 4261 if ((end -= (len + 1)) < start) 4262 break; 4263 4264 *end = '@'; 4265 } 4266 4267 for (i = 1; i <= len; i++) 4268 end[i] = dtrace_load8((uintptr_t)s++); 4269 } 4270 4271 /* 4272 * Now for the node name... 4273 */ 4274 s = (char *)dtrace_loadptr(daddr + 4275 offsetof(struct dev_info, devi_node_name)); 4276 4277 daddr = dtrace_loadptr(daddr + 4278 offsetof(struct dev_info, devi_parent)); 4279 4280 /* 4281 * If our parent is NULL (that is, if we're the root 4282 * node), we're going to use the special path 4283 * "devices". 4284 */ 4285 if (daddr == 0) 4286 s = "devices"; 4287 4288 len = dtrace_strlen(s, size); 4289 if (*flags & CPU_DTRACE_FAULT) 4290 break; 4291 4292 if ((end -= (len + 1)) < start) 4293 break; 4294 4295 for (i = 1; i <= len; i++) 4296 end[i] = dtrace_load8((uintptr_t)s++); 4297 *end = '/'; 4298 4299 if (depth++ > dtrace_devdepth_max) { 4300 *flags |= CPU_DTRACE_ILLOP; 4301 break; 4302 } 4303 } 4304 4305 if (end < start) 4306 DTRACE_CPUFLAG_SET(CPU_DTRACE_NOSCRATCH); 4307 4308 if (daddr == 0) { 4309 regs[rd] = (uintptr_t)end; 4310 mstate->dtms_scratch_ptr += size; 4311 } 4312 4313 break; 4314 } 4315#endif 4316 4317 case DIF_SUBR_STRJOIN: { 4318 char *d = (char *)mstate->dtms_scratch_ptr; 4319 uint64_t size = state->dts_options[DTRACEOPT_STRSIZE]; 4320 uintptr_t s1 = tupregs[0].dttk_value; 4321 uintptr_t s2 = tupregs[1].dttk_value; 4322 int i = 0; 4323 4324 if (!dtrace_strcanload(s1, size, mstate, vstate) || 4325 !dtrace_strcanload(s2, size, mstate, vstate)) { 4326 regs[rd] = 0; 4327 break; 4328 } 4329 4330 if (!DTRACE_INSCRATCH(mstate, size)) { 4331 DTRACE_CPUFLAG_SET(CPU_DTRACE_NOSCRATCH); 4332 regs[rd] = 0; 4333 break; 4334 } 4335 4336 for (;;) { 4337 if (i >= size) { 4338 DTRACE_CPUFLAG_SET(CPU_DTRACE_NOSCRATCH); 4339 regs[rd] = 0; 4340 break; 4341 } 4342 4343 if ((d[i++] = dtrace_load8(s1++)) == '\0') { 4344 i--; 4345 break; 4346 } 4347 } 4348 4349 for (;;) { 4350 if (i >= size) { 4351 DTRACE_CPUFLAG_SET(CPU_DTRACE_NOSCRATCH); 4352 regs[rd] = 0; 4353 break; 4354 } 4355 4356 if ((d[i++] = dtrace_load8(s2++)) == '\0') 4357 break; 4358 } 4359 4360 if (i < size) { 4361 mstate->dtms_scratch_ptr += i; 4362 regs[rd] = (uintptr_t)d; 4363 } 4364 4365 break; 4366 } 4367 4368 case DIF_SUBR_LLTOSTR: { 4369 int64_t i = (int64_t)tupregs[0].dttk_value; 4370 uint64_t val, digit; 4371 uint64_t size = 65; /* enough room for 2^64 in binary */ 4372 char *end = (char *)mstate->dtms_scratch_ptr + size - 1; 4373 int base = 10; 4374 4375 if (nargs > 1) { 4376 if ((base = tupregs[1].dttk_value) <= 1 || 4377 base > ('z' - 'a' + 1) + ('9' - '0' + 1)) { 4378 *flags |= CPU_DTRACE_ILLOP; 4379 break; 4380 } 4381 } 4382 4383 val = (base == 10 && i < 0) ? i * -1 : i; 4384 4385 if (!DTRACE_INSCRATCH(mstate, size)) { 4386 DTRACE_CPUFLAG_SET(CPU_DTRACE_NOSCRATCH); 4387 regs[rd] = 0; 4388 break; 4389 } 4390 4391 for (*end-- = '\0'; val; val /= base) { 4392 if ((digit = val % base) <= '9' - '0') { 4393 *end-- = '0' + digit; 4394 } else { 4395 *end-- = 'a' + (digit - ('9' - '0') - 1); 4396 } 4397 } 4398 4399 if (i == 0 && base == 16) 4400 *end-- = '0'; 4401 4402 if (base == 16) 4403 *end-- = 'x'; 4404 4405 if (i == 0 || base == 8 || base == 16) 4406 *end-- = '0'; 4407 4408 if (i < 0 && base == 10) 4409 *end-- = '-'; 4410 4411 regs[rd] = (uintptr_t)end + 1; 4412 mstate->dtms_scratch_ptr += size; 4413 break; 4414 } 4415 4416 case DIF_SUBR_HTONS: 4417 case DIF_SUBR_NTOHS: 4418#if BYTE_ORDER == BIG_ENDIAN 4419 regs[rd] = (uint16_t)tupregs[0].dttk_value; 4420#else 4421 regs[rd] = DT_BSWAP_16((uint16_t)tupregs[0].dttk_value); 4422#endif 4423 break; 4424 4425 4426 case DIF_SUBR_HTONL: 4427 case DIF_SUBR_NTOHL: 4428#if BYTE_ORDER == BIG_ENDIAN 4429 regs[rd] = (uint32_t)tupregs[0].dttk_value; 4430#else 4431 regs[rd] = DT_BSWAP_32((uint32_t)tupregs[0].dttk_value); 4432#endif 4433 break; 4434 4435 4436 case DIF_SUBR_HTONLL: 4437 case DIF_SUBR_NTOHLL: 4438#if BYTE_ORDER == BIG_ENDIAN 4439 regs[rd] = (uint64_t)tupregs[0].dttk_value; 4440#else 4441 regs[rd] = DT_BSWAP_64((uint64_t)tupregs[0].dttk_value); 4442#endif 4443 break; 4444 4445 4446 case DIF_SUBR_DIRNAME: 4447 case DIF_SUBR_BASENAME: { 4448 char *dest = (char *)mstate->dtms_scratch_ptr; 4449 uint64_t size = state->dts_options[DTRACEOPT_STRSIZE]; 4450 uintptr_t src = tupregs[0].dttk_value; 4451 int i, j, len = dtrace_strlen((char *)src, size); 4452 int lastbase = -1, firstbase = -1, lastdir = -1; 4453 int start, end; 4454 4455 if (!dtrace_canload(src, len + 1, mstate, vstate)) { 4456 regs[rd] = 0; 4457 break; 4458 } 4459 4460 if (!DTRACE_INSCRATCH(mstate, size)) { 4461 DTRACE_CPUFLAG_SET(CPU_DTRACE_NOSCRATCH); 4462 regs[rd] = 0; 4463 break; 4464 } 4465 4466 /* 4467 * The basename and dirname for a zero-length string is 4468 * defined to be "." 4469 */ 4470 if (len == 0) { 4471 len = 1; 4472 src = (uintptr_t)"."; 4473 } 4474 4475 /* 4476 * Start from the back of the string, moving back toward the 4477 * front until we see a character that isn't a slash. That 4478 * character is the last character in the basename. 4479 */ 4480 for (i = len - 1; i >= 0; i--) { 4481 if (dtrace_load8(src + i) != '/') 4482 break; 4483 } 4484 4485 if (i >= 0) 4486 lastbase = i; 4487 4488 /* 4489 * Starting from the last character in the basename, move 4490 * towards the front until we find a slash. The character 4491 * that we processed immediately before that is the first 4492 * character in the basename. 4493 */ 4494 for (; i >= 0; i--) { 4495 if (dtrace_load8(src + i) == '/') 4496 break; 4497 } 4498 4499 if (i >= 0) 4500 firstbase = i + 1; 4501 4502 /* 4503 * Now keep going until we find a non-slash character. That 4504 * character is the last character in the dirname. 4505 */ 4506 for (; i >= 0; i--) { 4507 if (dtrace_load8(src + i) != '/') 4508 break; 4509 } 4510 4511 if (i >= 0) 4512 lastdir = i; 4513 4514 ASSERT(!(lastbase == -1 && firstbase != -1)); 4515 ASSERT(!(firstbase == -1 && lastdir != -1)); 4516 4517 if (lastbase == -1) { 4518 /* 4519 * We didn't find a non-slash character. We know that 4520 * the length is non-zero, so the whole string must be 4521 * slashes. In either the dirname or the basename 4522 * case, we return '/'. 4523 */ 4524 ASSERT(firstbase == -1); 4525 firstbase = lastbase = lastdir = 0; 4526 } 4527 4528 if (firstbase == -1) { 4529 /* 4530 * The entire string consists only of a basename 4531 * component. If we're looking for dirname, we need 4532 * to change our string to be just "."; if we're 4533 * looking for a basename, we'll just set the first 4534 * character of the basename to be 0. 4535 */ 4536 if (subr == DIF_SUBR_DIRNAME) { 4537 ASSERT(lastdir == -1); 4538 src = (uintptr_t)"."; 4539 lastdir = 0; 4540 } else { 4541 firstbase = 0; 4542 } 4543 } 4544 4545 if (subr == DIF_SUBR_DIRNAME) { 4546 if (lastdir == -1) { 4547 /* 4548 * We know that we have a slash in the name -- 4549 * or lastdir would be set to 0, above. And 4550 * because lastdir is -1, we know that this 4551 * slash must be the first character. (That 4552 * is, the full string must be of the form 4553 * "/basename".) In this case, the last 4554 * character of the directory name is 0. 4555 */ 4556 lastdir = 0; 4557 } 4558 4559 start = 0; 4560 end = lastdir; 4561 } else { 4562 ASSERT(subr == DIF_SUBR_BASENAME); 4563 ASSERT(firstbase != -1 && lastbase != -1); 4564 start = firstbase; 4565 end = lastbase; 4566 } 4567 4568 for (i = start, j = 0; i <= end && j < size - 1; i++, j++) 4569 dest[j] = dtrace_load8(src + i); 4570 4571 dest[j] = '\0'; 4572 regs[rd] = (uintptr_t)dest; 4573 mstate->dtms_scratch_ptr += size; 4574 break; 4575 } 4576 4577 case DIF_SUBR_CLEANPATH: { 4578 char *dest = (char *)mstate->dtms_scratch_ptr, c; 4579 uint64_t size = state->dts_options[DTRACEOPT_STRSIZE]; 4580 uintptr_t src = tupregs[0].dttk_value; 4581 int i = 0, j = 0; 4582 4583 if (!dtrace_strcanload(src, size, mstate, vstate)) { 4584 regs[rd] = 0; 4585 break; 4586 } 4587 4588 if (!DTRACE_INSCRATCH(mstate, size)) { 4589 DTRACE_CPUFLAG_SET(CPU_DTRACE_NOSCRATCH); 4590 regs[rd] = 0; 4591 break; 4592 } 4593 4594 /* 4595 * Move forward, loading each character. 4596 */ 4597 do { 4598 c = dtrace_load8(src + i++); 4599next: 4600 if (j + 5 >= size) /* 5 = strlen("/..c\0") */ 4601 break; 4602 4603 if (c != '/') { 4604 dest[j++] = c; 4605 continue; 4606 } 4607 4608 c = dtrace_load8(src + i++); 4609 4610 if (c == '/') { 4611 /* 4612 * We have two slashes -- we can just advance 4613 * to the next character. 4614 */ 4615 goto next; 4616 } 4617 4618 if (c != '.') { 4619 /* 4620 * This is not "." and it's not ".." -- we can 4621 * just store the "/" and this character and 4622 * drive on. 4623 */ 4624 dest[j++] = '/'; 4625 dest[j++] = c; 4626 continue; 4627 } 4628 4629 c = dtrace_load8(src + i++); 4630 4631 if (c == '/') { 4632 /* 4633 * This is a "/./" component. We're not going 4634 * to store anything in the destination buffer; 4635 * we're just going to go to the next component. 4636 */ 4637 goto next; 4638 } 4639 4640 if (c != '.') { 4641 /* 4642 * This is not ".." -- we can just store the 4643 * "/." and this character and continue 4644 * processing. 4645 */ 4646 dest[j++] = '/'; 4647 dest[j++] = '.'; 4648 dest[j++] = c; 4649 continue; 4650 } 4651 4652 c = dtrace_load8(src + i++); 4653 4654 if (c != '/' && c != '\0') { 4655 /* 4656 * This is not ".." -- it's "..[mumble]". 4657 * We'll store the "/.." and this character 4658 * and continue processing. 4659 */ 4660 dest[j++] = '/'; 4661 dest[j++] = '.'; 4662 dest[j++] = '.'; 4663 dest[j++] = c; 4664 continue; 4665 } 4666 4667 /* 4668 * This is "/../" or "/..\0". We need to back up 4669 * our destination pointer until we find a "/". 4670 */ 4671 i--; 4672 while (j != 0 && dest[--j] != '/') 4673 continue; 4674 4675 if (c == '\0') 4676 dest[++j] = '/'; 4677 } while (c != '\0'); 4678 4679 dest[j] = '\0'; 4680 regs[rd] = (uintptr_t)dest; 4681 mstate->dtms_scratch_ptr += size; 4682 break; 4683 } 4684 4685 case DIF_SUBR_INET_NTOA: 4686 case DIF_SUBR_INET_NTOA6: 4687 case DIF_SUBR_INET_NTOP: { 4688 size_t size; 4689 int af, argi, i; 4690 char *base, *end; 4691 4692 if (subr == DIF_SUBR_INET_NTOP) { 4693 af = (int)tupregs[0].dttk_value; 4694 argi = 1; 4695 } else { 4696 af = subr == DIF_SUBR_INET_NTOA ? AF_INET: AF_INET6; 4697 argi = 0; 4698 } 4699 4700 if (af == AF_INET) { 4701 ipaddr_t ip4; 4702 uint8_t *ptr8, val; 4703 4704 /* 4705 * Safely load the IPv4 address. 4706 */ 4707 ip4 = dtrace_load32(tupregs[argi].dttk_value); 4708 4709 /* 4710 * Check an IPv4 string will fit in scratch. 4711 */ 4712 size = INET_ADDRSTRLEN; 4713 if (!DTRACE_INSCRATCH(mstate, size)) { 4714 DTRACE_CPUFLAG_SET(CPU_DTRACE_NOSCRATCH); 4715 regs[rd] = 0; 4716 break; 4717 } 4718 base = (char *)mstate->dtms_scratch_ptr; 4719 end = (char *)mstate->dtms_scratch_ptr + size - 1; 4720 4721 /* 4722 * Stringify as a dotted decimal quad. 4723 */ 4724 *end-- = '\0'; 4725 ptr8 = (uint8_t *)&ip4; 4726 for (i = 3; i >= 0; i--) { 4727 val = ptr8[i]; 4728 4729 if (val == 0) { 4730 *end-- = '0'; 4731 } else { 4732 for (; val; val /= 10) { 4733 *end-- = '0' + (val % 10); 4734 } 4735 } 4736 4737 if (i > 0) 4738 *end-- = '.'; 4739 } 4740 ASSERT(end + 1 >= base); 4741 4742 } else if (af == AF_INET6) { 4743 struct in6_addr ip6; 4744 int firstzero, tryzero, numzero, v6end; 4745 uint16_t val; 4746 const char digits[] = "0123456789abcdef"; 4747 4748 /* 4749 * Stringify using RFC 1884 convention 2 - 16 bit 4750 * hexadecimal values with a zero-run compression. 4751 * Lower case hexadecimal digits are used. 4752 * eg, fe80::214:4fff:fe0b:76c8. 4753 * The IPv4 embedded form is returned for inet_ntop, 4754 * just the IPv4 string is returned for inet_ntoa6. 4755 */ 4756 4757 /* 4758 * Safely load the IPv6 address. 4759 */ 4760 dtrace_bcopy( 4761 (void *)(uintptr_t)tupregs[argi].dttk_value, 4762 (void *)(uintptr_t)&ip6, sizeof (struct in6_addr)); 4763 4764 /* 4765 * Check an IPv6 string will fit in scratch. 4766 */ 4767 size = INET6_ADDRSTRLEN; 4768 if (!DTRACE_INSCRATCH(mstate, size)) { 4769 DTRACE_CPUFLAG_SET(CPU_DTRACE_NOSCRATCH); 4770 regs[rd] = 0; 4771 break; 4772 } 4773 base = (char *)mstate->dtms_scratch_ptr; 4774 end = (char *)mstate->dtms_scratch_ptr + size - 1; 4775 *end-- = '\0'; 4776 4777 /* 4778 * Find the longest run of 16 bit zero values 4779 * for the single allowed zero compression - "::". 4780 */ 4781 firstzero = -1; 4782 tryzero = -1; 4783 numzero = 1; 4784 for (i = 0; i < sizeof (struct in6_addr); i++) { 4785#if defined(sun) 4786 if (ip6._S6_un._S6_u8[i] == 0 && 4787#else 4788 if (ip6.__u6_addr.__u6_addr8[i] == 0 && 4789#endif 4790 tryzero == -1 && i % 2 == 0) { 4791 tryzero = i; 4792 continue; 4793 } 4794 4795 if (tryzero != -1 && 4796#if defined(sun) 4797 (ip6._S6_un._S6_u8[i] != 0 || 4798#else 4799 (ip6.__u6_addr.__u6_addr8[i] != 0 || 4800#endif 4801 i == sizeof (struct in6_addr) - 1)) { 4802 4803 if (i - tryzero <= numzero) { 4804 tryzero = -1; 4805 continue; 4806 } 4807 4808 firstzero = tryzero; 4809 numzero = i - i % 2 - tryzero; 4810 tryzero = -1; 4811 4812#if defined(sun) 4813 if (ip6._S6_un._S6_u8[i] == 0 && 4814#else 4815 if (ip6.__u6_addr.__u6_addr8[i] == 0 && 4816#endif 4817 i == sizeof (struct in6_addr) - 1) 4818 numzero += 2; 4819 } 4820 } 4821 ASSERT(firstzero + numzero <= sizeof (struct in6_addr)); 4822 4823 /* 4824 * Check for an IPv4 embedded address. 4825 */ 4826 v6end = sizeof (struct in6_addr) - 2; 4827 if (IN6_IS_ADDR_V4MAPPED(&ip6) || 4828 IN6_IS_ADDR_V4COMPAT(&ip6)) { 4829 for (i = sizeof (struct in6_addr) - 1; 4830 i >= DTRACE_V4MAPPED_OFFSET; i--) { 4831 ASSERT(end >= base); 4832 4833#if defined(sun) 4834 val = ip6._S6_un._S6_u8[i]; 4835#else 4836 val = ip6.__u6_addr.__u6_addr8[i]; 4837#endif 4838 4839 if (val == 0) { 4840 *end-- = '0'; 4841 } else { 4842 for (; val; val /= 10) { 4843 *end-- = '0' + val % 10; 4844 } 4845 } 4846 4847 if (i > DTRACE_V4MAPPED_OFFSET) 4848 *end-- = '.'; 4849 } 4850 4851 if (subr == DIF_SUBR_INET_NTOA6) 4852 goto inetout; 4853 4854 /* 4855 * Set v6end to skip the IPv4 address that 4856 * we have already stringified. 4857 */ 4858 v6end = 10; 4859 } 4860 4861 /* 4862 * Build the IPv6 string by working through the 4863 * address in reverse. 4864 */ 4865 for (i = v6end; i >= 0; i -= 2) { 4866 ASSERT(end >= base); 4867 4868 if (i == firstzero + numzero - 2) { 4869 *end-- = ':'; 4870 *end-- = ':'; 4871 i -= numzero - 2; 4872 continue; 4873 } 4874 4875 if (i < 14 && i != firstzero - 2) 4876 *end-- = ':'; 4877 4878#if defined(sun) 4879 val = (ip6._S6_un._S6_u8[i] << 8) + 4880 ip6._S6_un._S6_u8[i + 1]; 4881#else 4882 val = (ip6.__u6_addr.__u6_addr8[i] << 8) + 4883 ip6.__u6_addr.__u6_addr8[i + 1]; 4884#endif 4885 4886 if (val == 0) { 4887 *end-- = '0'; 4888 } else { 4889 for (; val; val /= 16) { 4890 *end-- = digits[val % 16]; 4891 } 4892 } 4893 } 4894 ASSERT(end + 1 >= base); 4895 4896 } else { 4897 /* 4898 * The user didn't use AH_INET or AH_INET6. 4899 */ 4900 DTRACE_CPUFLAG_SET(CPU_DTRACE_ILLOP); 4901 regs[rd] = 0; 4902 break; 4903 } 4904 4905inetout: regs[rd] = (uintptr_t)end + 1; 4906 mstate->dtms_scratch_ptr += size; 4907 break; 4908 } 4909 4910 case DIF_SUBR_MEMREF: { 4911 uintptr_t size = 2 * sizeof(uintptr_t); 4912 uintptr_t *memref = (uintptr_t *) P2ROUNDUP(mstate->dtms_scratch_ptr, sizeof(uintptr_t)); 4913 size_t scratch_size = ((uintptr_t) memref - mstate->dtms_scratch_ptr) + size; 4914 4915 /* address and length */ 4916 memref[0] = tupregs[0].dttk_value; 4917 memref[1] = tupregs[1].dttk_value; 4918 4919 regs[rd] = (uintptr_t) memref; 4920 mstate->dtms_scratch_ptr += scratch_size; 4921 break; 4922 } 4923 4924 case DIF_SUBR_TYPEREF: { 4925 uintptr_t size = 4 * sizeof(uintptr_t); 4926 uintptr_t *typeref = (uintptr_t *) P2ROUNDUP(mstate->dtms_scratch_ptr, sizeof(uintptr_t)); 4927 size_t scratch_size = ((uintptr_t) typeref - mstate->dtms_scratch_ptr) + size; 4928 4929 /* address, num_elements, type_str, type_len */ 4930 typeref[0] = tupregs[0].dttk_value; 4931 typeref[1] = tupregs[1].dttk_value; 4932 typeref[2] = tupregs[2].dttk_value; 4933 typeref[3] = tupregs[3].dttk_value; 4934 4935 regs[rd] = (uintptr_t) typeref; 4936 mstate->dtms_scratch_ptr += scratch_size; 4937 break; 4938 } 4939 } 4940} 4941 4942/* 4943 * Emulate the execution of DTrace IR instructions specified by the given 4944 * DIF object. This function is deliberately void of assertions as all of 4945 * the necessary checks are handled by a call to dtrace_difo_validate(). 4946 */ 4947static uint64_t 4948dtrace_dif_emulate(dtrace_difo_t *difo, dtrace_mstate_t *mstate, 4949 dtrace_vstate_t *vstate, dtrace_state_t *state) 4950{ 4951 const dif_instr_t *text = difo->dtdo_buf; 4952 const uint_t textlen = difo->dtdo_len; 4953 const char *strtab = difo->dtdo_strtab; 4954 const uint64_t *inttab = difo->dtdo_inttab; 4955 4956 uint64_t rval = 0; 4957 dtrace_statvar_t *svar; 4958 dtrace_dstate_t *dstate = &vstate->dtvs_dynvars; 4959 dtrace_difv_t *v; 4960 volatile uint16_t *flags = &cpu_core[curcpu].cpuc_dtrace_flags; 4961 volatile uintptr_t *illval = &cpu_core[curcpu].cpuc_dtrace_illval; 4962 4963 dtrace_key_t tupregs[DIF_DTR_NREGS + 2]; /* +2 for thread and id */ 4964 uint64_t regs[DIF_DIR_NREGS]; 4965 uint64_t *tmp; 4966 4967 uint8_t cc_n = 0, cc_z = 0, cc_v = 0, cc_c = 0; 4968 int64_t cc_r; 4969 uint_t pc = 0, id, opc = 0; 4970 uint8_t ttop = 0; 4971 dif_instr_t instr; 4972 uint_t r1, r2, rd; 4973 4974 /* 4975 * We stash the current DIF object into the machine state: we need it 4976 * for subsequent access checking. 4977 */ 4978 mstate->dtms_difo = difo; 4979 4980 regs[DIF_REG_R0] = 0; /* %r0 is fixed at zero */ 4981 4982 while (pc < textlen && !(*flags & CPU_DTRACE_FAULT)) { 4983 opc = pc; 4984 4985 instr = text[pc++]; 4986 r1 = DIF_INSTR_R1(instr); 4987 r2 = DIF_INSTR_R2(instr); 4988 rd = DIF_INSTR_RD(instr); 4989 4990 switch (DIF_INSTR_OP(instr)) { 4991 case DIF_OP_OR: 4992 regs[rd] = regs[r1] | regs[r2]; 4993 break; 4994 case DIF_OP_XOR: 4995 regs[rd] = regs[r1] ^ regs[r2]; 4996 break; 4997 case DIF_OP_AND: 4998 regs[rd] = regs[r1] & regs[r2]; 4999 break; 5000 case DIF_OP_SLL: 5001 regs[rd] = regs[r1] << regs[r2]; 5002 break; 5003 case DIF_OP_SRL: 5004 regs[rd] = regs[r1] >> regs[r2]; 5005 break; 5006 case DIF_OP_SUB: 5007 regs[rd] = regs[r1] - regs[r2]; 5008 break; 5009 case DIF_OP_ADD: 5010 regs[rd] = regs[r1] + regs[r2]; 5011 break; 5012 case DIF_OP_MUL: 5013 regs[rd] = regs[r1] * regs[r2]; 5014 break; 5015 case DIF_OP_SDIV: 5016 if (regs[r2] == 0) { 5017 regs[rd] = 0; 5018 *flags |= CPU_DTRACE_DIVZERO; 5019 } else { 5020 regs[rd] = (int64_t)regs[r1] / 5021 (int64_t)regs[r2]; 5022 } 5023 break; 5024 5025 case DIF_OP_UDIV: 5026 if (regs[r2] == 0) { 5027 regs[rd] = 0; 5028 *flags |= CPU_DTRACE_DIVZERO; 5029 } else { 5030 regs[rd] = regs[r1] / regs[r2]; 5031 } 5032 break; 5033 5034 case DIF_OP_SREM: 5035 if (regs[r2] == 0) { 5036 regs[rd] = 0; 5037 *flags |= CPU_DTRACE_DIVZERO; 5038 } else { 5039 regs[rd] = (int64_t)regs[r1] % 5040 (int64_t)regs[r2]; 5041 } 5042 break; 5043 5044 case DIF_OP_UREM: 5045 if (regs[r2] == 0) { 5046 regs[rd] = 0; 5047 *flags |= CPU_DTRACE_DIVZERO; 5048 } else { 5049 regs[rd] = regs[r1] % regs[r2]; 5050 } 5051 break; 5052 5053 case DIF_OP_NOT: 5054 regs[rd] = ~regs[r1]; 5055 break; 5056 case DIF_OP_MOV: 5057 regs[rd] = regs[r1]; 5058 break; 5059 case DIF_OP_CMP: 5060 cc_r = regs[r1] - regs[r2]; 5061 cc_n = cc_r < 0; 5062 cc_z = cc_r == 0; 5063 cc_v = 0; 5064 cc_c = regs[r1] < regs[r2]; 5065 break; 5066 case DIF_OP_TST: 5067 cc_n = cc_v = cc_c = 0; 5068 cc_z = regs[r1] == 0; 5069 break; 5070 case DIF_OP_BA: 5071 pc = DIF_INSTR_LABEL(instr); 5072 break; 5073 case DIF_OP_BE: 5074 if (cc_z) 5075 pc = DIF_INSTR_LABEL(instr); 5076 break; 5077 case DIF_OP_BNE: 5078 if (cc_z == 0) 5079 pc = DIF_INSTR_LABEL(instr); 5080 break; 5081 case DIF_OP_BG: 5082 if ((cc_z | (cc_n ^ cc_v)) == 0) 5083 pc = DIF_INSTR_LABEL(instr); 5084 break; 5085 case DIF_OP_BGU: 5086 if ((cc_c | cc_z) == 0) 5087 pc = DIF_INSTR_LABEL(instr); 5088 break; 5089 case DIF_OP_BGE: 5090 if ((cc_n ^ cc_v) == 0) 5091 pc = DIF_INSTR_LABEL(instr); 5092 break; 5093 case DIF_OP_BGEU: 5094 if (cc_c == 0) 5095 pc = DIF_INSTR_LABEL(instr); 5096 break; 5097 case DIF_OP_BL: 5098 if (cc_n ^ cc_v) 5099 pc = DIF_INSTR_LABEL(instr); 5100 break; 5101 case DIF_OP_BLU: 5102 if (cc_c) 5103 pc = DIF_INSTR_LABEL(instr); 5104 break; 5105 case DIF_OP_BLE: 5106 if (cc_z | (cc_n ^ cc_v)) 5107 pc = DIF_INSTR_LABEL(instr); 5108 break; 5109 case DIF_OP_BLEU: 5110 if (cc_c | cc_z) 5111 pc = DIF_INSTR_LABEL(instr); 5112 break; 5113 case DIF_OP_RLDSB: 5114 if (!dtrace_canstore(regs[r1], 1, mstate, vstate)) { 5115 *flags |= CPU_DTRACE_KPRIV; 5116 *illval = regs[r1]; 5117 break; 5118 } 5119 /*FALLTHROUGH*/ 5120 case DIF_OP_LDSB: 5121 regs[rd] = (int8_t)dtrace_load8(regs[r1]); 5122 break; 5123 case DIF_OP_RLDSH: 5124 if (!dtrace_canstore(regs[r1], 2, mstate, vstate)) { 5125 *flags |= CPU_DTRACE_KPRIV; 5126 *illval = regs[r1]; 5127 break; 5128 } 5129 /*FALLTHROUGH*/ 5130 case DIF_OP_LDSH: 5131 regs[rd] = (int16_t)dtrace_load16(regs[r1]); 5132 break; 5133 case DIF_OP_RLDSW: 5134 if (!dtrace_canstore(regs[r1], 4, mstate, vstate)) { 5135 *flags |= CPU_DTRACE_KPRIV; 5136 *illval = regs[r1]; 5137 break; 5138 } 5139 /*FALLTHROUGH*/ 5140 case DIF_OP_LDSW: 5141 regs[rd] = (int32_t)dtrace_load32(regs[r1]); 5142 break; 5143 case DIF_OP_RLDUB: 5144 if (!dtrace_canstore(regs[r1], 1, mstate, vstate)) { 5145 *flags |= CPU_DTRACE_KPRIV; 5146 *illval = regs[r1]; 5147 break; 5148 } 5149 /*FALLTHROUGH*/ 5150 case DIF_OP_LDUB: 5151 regs[rd] = dtrace_load8(regs[r1]); 5152 break; 5153 case DIF_OP_RLDUH: 5154 if (!dtrace_canstore(regs[r1], 2, mstate, vstate)) { 5155 *flags |= CPU_DTRACE_KPRIV; 5156 *illval = regs[r1]; 5157 break; 5158 } 5159 /*FALLTHROUGH*/ 5160 case DIF_OP_LDUH: 5161 regs[rd] = dtrace_load16(regs[r1]); 5162 break; 5163 case DIF_OP_RLDUW: 5164 if (!dtrace_canstore(regs[r1], 4, mstate, vstate)) { 5165 *flags |= CPU_DTRACE_KPRIV; 5166 *illval = regs[r1]; 5167 break; 5168 } 5169 /*FALLTHROUGH*/ 5170 case DIF_OP_LDUW: 5171 regs[rd] = dtrace_load32(regs[r1]); 5172 break; 5173 case DIF_OP_RLDX: 5174 if (!dtrace_canstore(regs[r1], 8, mstate, vstate)) { 5175 *flags |= CPU_DTRACE_KPRIV; 5176 *illval = regs[r1]; 5177 break; 5178 } 5179 /*FALLTHROUGH*/ 5180 case DIF_OP_LDX: 5181 regs[rd] = dtrace_load64(regs[r1]); 5182 break; 5183 case DIF_OP_ULDSB: 5184 regs[rd] = (int8_t) 5185 dtrace_fuword8((void *)(uintptr_t)regs[r1]); 5186 break; 5187 case DIF_OP_ULDSH: 5188 regs[rd] = (int16_t) 5189 dtrace_fuword16((void *)(uintptr_t)regs[r1]); 5190 break; 5191 case DIF_OP_ULDSW: 5192 regs[rd] = (int32_t) 5193 dtrace_fuword32((void *)(uintptr_t)regs[r1]); 5194 break; 5195 case DIF_OP_ULDUB: 5196 regs[rd] = 5197 dtrace_fuword8((void *)(uintptr_t)regs[r1]); 5198 break; 5199 case DIF_OP_ULDUH: 5200 regs[rd] = 5201 dtrace_fuword16((void *)(uintptr_t)regs[r1]); 5202 break; 5203 case DIF_OP_ULDUW: 5204 regs[rd] = 5205 dtrace_fuword32((void *)(uintptr_t)regs[r1]); 5206 break; 5207 case DIF_OP_ULDX: 5208 regs[rd] = 5209 dtrace_fuword64((void *)(uintptr_t)regs[r1]); 5210 break; 5211 case DIF_OP_RET: 5212 rval = regs[rd]; 5213 pc = textlen; 5214 break; 5215 case DIF_OP_NOP: 5216 break; 5217 case DIF_OP_SETX: 5218 regs[rd] = inttab[DIF_INSTR_INTEGER(instr)]; 5219 break; 5220 case DIF_OP_SETS: 5221 regs[rd] = (uint64_t)(uintptr_t) 5222 (strtab + DIF_INSTR_STRING(instr)); 5223 break; 5224 case DIF_OP_SCMP: { 5225 size_t sz = state->dts_options[DTRACEOPT_STRSIZE]; 5226 uintptr_t s1 = regs[r1]; 5227 uintptr_t s2 = regs[r2]; 5228 5229 if (s1 != 0 && 5230 !dtrace_strcanload(s1, sz, mstate, vstate)) 5231 break; 5232 if (s2 != 0 && 5233 !dtrace_strcanload(s2, sz, mstate, vstate)) 5234 break; 5235 5236 cc_r = dtrace_strncmp((char *)s1, (char *)s2, sz); 5237 5238 cc_n = cc_r < 0; 5239 cc_z = cc_r == 0; 5240 cc_v = cc_c = 0; 5241 break; 5242 } 5243 case DIF_OP_LDGA: 5244 regs[rd] = dtrace_dif_variable(mstate, state, 5245 r1, regs[r2]); 5246 break; 5247 case DIF_OP_LDGS: 5248 id = DIF_INSTR_VAR(instr); 5249 5250 if (id >= DIF_VAR_OTHER_UBASE) { 5251 uintptr_t a; 5252 5253 id -= DIF_VAR_OTHER_UBASE; 5254 svar = vstate->dtvs_globals[id]; 5255 ASSERT(svar != NULL); 5256 v = &svar->dtsv_var; 5257 5258 if (!(v->dtdv_type.dtdt_flags & DIF_TF_BYREF)) { 5259 regs[rd] = svar->dtsv_data; 5260 break; 5261 } 5262 5263 a = (uintptr_t)svar->dtsv_data; 5264 5265 if (*(uint8_t *)a == UINT8_MAX) { 5266 /* 5267 * If the 0th byte is set to UINT8_MAX 5268 * then this is to be treated as a 5269 * reference to a NULL variable. 5270 */ 5271 regs[rd] = 0; 5272 } else { 5273 regs[rd] = a + sizeof (uint64_t); 5274 } 5275 5276 break; 5277 } 5278 5279 regs[rd] = dtrace_dif_variable(mstate, state, id, 0); 5280 break; 5281 5282 case DIF_OP_STGS: 5283 id = DIF_INSTR_VAR(instr); 5284 5285 ASSERT(id >= DIF_VAR_OTHER_UBASE); 5286 id -= DIF_VAR_OTHER_UBASE; 5287 5288 svar = vstate->dtvs_globals[id]; 5289 ASSERT(svar != NULL); 5290 v = &svar->dtsv_var; 5291 5292 if (v->dtdv_type.dtdt_flags & DIF_TF_BYREF) { 5293 uintptr_t a = (uintptr_t)svar->dtsv_data; 5294 5295 ASSERT(a != 0); 5296 ASSERT(svar->dtsv_size != 0); 5297 5298 if (regs[rd] == 0) { 5299 *(uint8_t *)a = UINT8_MAX; 5300 break; 5301 } else { 5302 *(uint8_t *)a = 0; 5303 a += sizeof (uint64_t); 5304 } 5305 if (!dtrace_vcanload( 5306 (void *)(uintptr_t)regs[rd], &v->dtdv_type, 5307 mstate, vstate)) 5308 break; 5309 5310 dtrace_vcopy((void *)(uintptr_t)regs[rd], 5311 (void *)a, &v->dtdv_type); 5312 break; 5313 } 5314 5315 svar->dtsv_data = regs[rd]; 5316 break; 5317 5318 case DIF_OP_LDTA: 5319 /* 5320 * There are no DTrace built-in thread-local arrays at 5321 * present. This opcode is saved for future work. 5322 */ 5323 *flags |= CPU_DTRACE_ILLOP; 5324 regs[rd] = 0; 5325 break; 5326 5327 case DIF_OP_LDLS: 5328 id = DIF_INSTR_VAR(instr); 5329 5330 if (id < DIF_VAR_OTHER_UBASE) { 5331 /* 5332 * For now, this has no meaning. 5333 */ 5334 regs[rd] = 0; 5335 break; 5336 } 5337 5338 id -= DIF_VAR_OTHER_UBASE; 5339 5340 ASSERT(id < vstate->dtvs_nlocals); 5341 ASSERT(vstate->dtvs_locals != NULL); 5342 5343 svar = vstate->dtvs_locals[id]; 5344 ASSERT(svar != NULL); 5345 v = &svar->dtsv_var; 5346 5347 if (v->dtdv_type.dtdt_flags & DIF_TF_BYREF) { 5348 uintptr_t a = (uintptr_t)svar->dtsv_data; 5349 size_t sz = v->dtdv_type.dtdt_size; 5350 5351 sz += sizeof (uint64_t); 5352 ASSERT(svar->dtsv_size == NCPU * sz); 5353 a += curcpu * sz; 5354 5355 if (*(uint8_t *)a == UINT8_MAX) { 5356 /* 5357 * If the 0th byte is set to UINT8_MAX 5358 * then this is to be treated as a 5359 * reference to a NULL variable. 5360 */ 5361 regs[rd] = 0; 5362 } else { 5363 regs[rd] = a + sizeof (uint64_t); 5364 } 5365 5366 break; 5367 } 5368 5369 ASSERT(svar->dtsv_size == NCPU * sizeof (uint64_t)); 5370 tmp = (uint64_t *)(uintptr_t)svar->dtsv_data; 5371 regs[rd] = tmp[curcpu]; 5372 break; 5373 5374 case DIF_OP_STLS: 5375 id = DIF_INSTR_VAR(instr); 5376 5377 ASSERT(id >= DIF_VAR_OTHER_UBASE); 5378 id -= DIF_VAR_OTHER_UBASE; 5379 ASSERT(id < vstate->dtvs_nlocals); 5380 5381 ASSERT(vstate->dtvs_locals != NULL); 5382 svar = vstate->dtvs_locals[id]; 5383 ASSERT(svar != NULL); 5384 v = &svar->dtsv_var; 5385 5386 if (v->dtdv_type.dtdt_flags & DIF_TF_BYREF) { 5387 uintptr_t a = (uintptr_t)svar->dtsv_data; 5388 size_t sz = v->dtdv_type.dtdt_size; 5389 5390 sz += sizeof (uint64_t); 5391 ASSERT(svar->dtsv_size == NCPU * sz); 5392 a += curcpu * sz; 5393 5394 if (regs[rd] == 0) { 5395 *(uint8_t *)a = UINT8_MAX; 5396 break; 5397 } else { 5398 *(uint8_t *)a = 0; 5399 a += sizeof (uint64_t); 5400 } 5401 5402 if (!dtrace_vcanload( 5403 (void *)(uintptr_t)regs[rd], &v->dtdv_type, 5404 mstate, vstate)) 5405 break; 5406 5407 dtrace_vcopy((void *)(uintptr_t)regs[rd], 5408 (void *)a, &v->dtdv_type); 5409 break; 5410 } 5411 5412 ASSERT(svar->dtsv_size == NCPU * sizeof (uint64_t)); 5413 tmp = (uint64_t *)(uintptr_t)svar->dtsv_data; 5414 tmp[curcpu] = regs[rd]; 5415 break; 5416 5417 case DIF_OP_LDTS: { 5418 dtrace_dynvar_t *dvar; 5419 dtrace_key_t *key; 5420 5421 id = DIF_INSTR_VAR(instr); 5422 ASSERT(id >= DIF_VAR_OTHER_UBASE); 5423 id -= DIF_VAR_OTHER_UBASE; 5424 v = &vstate->dtvs_tlocals[id]; 5425 5426 key = &tupregs[DIF_DTR_NREGS]; 5427 key[0].dttk_value = (uint64_t)id; 5428 key[0].dttk_size = 0; 5429 DTRACE_TLS_THRKEY(key[1].dttk_value); 5430 key[1].dttk_size = 0; 5431 5432 dvar = dtrace_dynvar(dstate, 2, key, 5433 sizeof (uint64_t), DTRACE_DYNVAR_NOALLOC, 5434 mstate, vstate); 5435 5436 if (dvar == NULL) { 5437 regs[rd] = 0; 5438 break; 5439 } 5440 5441 if (v->dtdv_type.dtdt_flags & DIF_TF_BYREF) { 5442 regs[rd] = (uint64_t)(uintptr_t)dvar->dtdv_data; 5443 } else { 5444 regs[rd] = *((uint64_t *)dvar->dtdv_data); 5445 } 5446 5447 break; 5448 } 5449 5450 case DIF_OP_STTS: { 5451 dtrace_dynvar_t *dvar; 5452 dtrace_key_t *key; 5453 5454 id = DIF_INSTR_VAR(instr); 5455 ASSERT(id >= DIF_VAR_OTHER_UBASE); 5456 id -= DIF_VAR_OTHER_UBASE; 5457 5458 key = &tupregs[DIF_DTR_NREGS]; 5459 key[0].dttk_value = (uint64_t)id; 5460 key[0].dttk_size = 0; 5461 DTRACE_TLS_THRKEY(key[1].dttk_value); 5462 key[1].dttk_size = 0; 5463 v = &vstate->dtvs_tlocals[id]; 5464 5465 dvar = dtrace_dynvar(dstate, 2, key, 5466 v->dtdv_type.dtdt_size > sizeof (uint64_t) ? 5467 v->dtdv_type.dtdt_size : sizeof (uint64_t), 5468 regs[rd] ? DTRACE_DYNVAR_ALLOC : 5469 DTRACE_DYNVAR_DEALLOC, mstate, vstate); 5470 5471 /* 5472 * Given that we're storing to thread-local data, 5473 * we need to flush our predicate cache. 5474 */ 5475 curthread->t_predcache = 0; 5476 5477 if (dvar == NULL) 5478 break; 5479 5480 if (v->dtdv_type.dtdt_flags & DIF_TF_BYREF) { 5481 if (!dtrace_vcanload( 5482 (void *)(uintptr_t)regs[rd], 5483 &v->dtdv_type, mstate, vstate)) 5484 break; 5485 5486 dtrace_vcopy((void *)(uintptr_t)regs[rd], 5487 dvar->dtdv_data, &v->dtdv_type); 5488 } else { 5489 *((uint64_t *)dvar->dtdv_data) = regs[rd]; 5490 } 5491 5492 break; 5493 } 5494 5495 case DIF_OP_SRA: 5496 regs[rd] = (int64_t)regs[r1] >> regs[r2]; 5497 break; 5498 5499 case DIF_OP_CALL: 5500 dtrace_dif_subr(DIF_INSTR_SUBR(instr), rd, 5501 regs, tupregs, ttop, mstate, state); 5502 break; 5503 5504 case DIF_OP_PUSHTR: 5505 if (ttop == DIF_DTR_NREGS) { 5506 *flags |= CPU_DTRACE_TUPOFLOW; 5507 break; 5508 } 5509 5510 if (r1 == DIF_TYPE_STRING) { 5511 /* 5512 * If this is a string type and the size is 0, 5513 * we'll use the system-wide default string 5514 * size. Note that we are _not_ looking at 5515 * the value of the DTRACEOPT_STRSIZE option; 5516 * had this been set, we would expect to have 5517 * a non-zero size value in the "pushtr". 5518 */ 5519 tupregs[ttop].dttk_size = 5520 dtrace_strlen((char *)(uintptr_t)regs[rd], 5521 regs[r2] ? regs[r2] : 5522 dtrace_strsize_default) + 1; 5523 } else { 5524 tupregs[ttop].dttk_size = regs[r2]; 5525 } 5526 5527 tupregs[ttop++].dttk_value = regs[rd]; 5528 break; 5529 5530 case DIF_OP_PUSHTV: 5531 if (ttop == DIF_DTR_NREGS) { 5532 *flags |= CPU_DTRACE_TUPOFLOW; 5533 break; 5534 } 5535 5536 tupregs[ttop].dttk_value = regs[rd]; 5537 tupregs[ttop++].dttk_size = 0; 5538 break; 5539 5540 case DIF_OP_POPTS: 5541 if (ttop != 0) 5542 ttop--; 5543 break; 5544 5545 case DIF_OP_FLUSHTS: 5546 ttop = 0; 5547 break; 5548 5549 case DIF_OP_LDGAA: 5550 case DIF_OP_LDTAA: { 5551 dtrace_dynvar_t *dvar; 5552 dtrace_key_t *key = tupregs; 5553 uint_t nkeys = ttop; 5554 5555 id = DIF_INSTR_VAR(instr); 5556 ASSERT(id >= DIF_VAR_OTHER_UBASE); 5557 id -= DIF_VAR_OTHER_UBASE; 5558 5559 key[nkeys].dttk_value = (uint64_t)id; 5560 key[nkeys++].dttk_size = 0; 5561 5562 if (DIF_INSTR_OP(instr) == DIF_OP_LDTAA) { 5563 DTRACE_TLS_THRKEY(key[nkeys].dttk_value); 5564 key[nkeys++].dttk_size = 0; 5565 v = &vstate->dtvs_tlocals[id]; 5566 } else { 5567 v = &vstate->dtvs_globals[id]->dtsv_var; 5568 } 5569 5570 dvar = dtrace_dynvar(dstate, nkeys, key, 5571 v->dtdv_type.dtdt_size > sizeof (uint64_t) ? 5572 v->dtdv_type.dtdt_size : sizeof (uint64_t), 5573 DTRACE_DYNVAR_NOALLOC, mstate, vstate); 5574 5575 if (dvar == NULL) { 5576 regs[rd] = 0; 5577 break; 5578 } 5579 5580 if (v->dtdv_type.dtdt_flags & DIF_TF_BYREF) { 5581 regs[rd] = (uint64_t)(uintptr_t)dvar->dtdv_data; 5582 } else { 5583 regs[rd] = *((uint64_t *)dvar->dtdv_data); 5584 } 5585 5586 break; 5587 } 5588 5589 case DIF_OP_STGAA: 5590 case DIF_OP_STTAA: { 5591 dtrace_dynvar_t *dvar; 5592 dtrace_key_t *key = tupregs; 5593 uint_t nkeys = ttop; 5594 5595 id = DIF_INSTR_VAR(instr); 5596 ASSERT(id >= DIF_VAR_OTHER_UBASE); 5597 id -= DIF_VAR_OTHER_UBASE; 5598 5599 key[nkeys].dttk_value = (uint64_t)id; 5600 key[nkeys++].dttk_size = 0; 5601 5602 if (DIF_INSTR_OP(instr) == DIF_OP_STTAA) { 5603 DTRACE_TLS_THRKEY(key[nkeys].dttk_value); 5604 key[nkeys++].dttk_size = 0; 5605 v = &vstate->dtvs_tlocals[id]; 5606 } else { 5607 v = &vstate->dtvs_globals[id]->dtsv_var; 5608 } 5609 5610 dvar = dtrace_dynvar(dstate, nkeys, key, 5611 v->dtdv_type.dtdt_size > sizeof (uint64_t) ? 5612 v->dtdv_type.dtdt_size : sizeof (uint64_t), 5613 regs[rd] ? DTRACE_DYNVAR_ALLOC : 5614 DTRACE_DYNVAR_DEALLOC, mstate, vstate); 5615 5616 if (dvar == NULL) 5617 break; 5618 5619 if (v->dtdv_type.dtdt_flags & DIF_TF_BYREF) { 5620 if (!dtrace_vcanload( 5621 (void *)(uintptr_t)regs[rd], &v->dtdv_type, 5622 mstate, vstate)) 5623 break; 5624 5625 dtrace_vcopy((void *)(uintptr_t)regs[rd], 5626 dvar->dtdv_data, &v->dtdv_type); 5627 } else { 5628 *((uint64_t *)dvar->dtdv_data) = regs[rd]; 5629 } 5630 5631 break; 5632 } 5633 5634 case DIF_OP_ALLOCS: { 5635 uintptr_t ptr = P2ROUNDUP(mstate->dtms_scratch_ptr, 8); 5636 size_t size = ptr - mstate->dtms_scratch_ptr + regs[r1]; 5637 5638 /* 5639 * Rounding up the user allocation size could have 5640 * overflowed large, bogus allocations (like -1ULL) to 5641 * 0. 5642 */ 5643 if (size < regs[r1] || 5644 !DTRACE_INSCRATCH(mstate, size)) { 5645 DTRACE_CPUFLAG_SET(CPU_DTRACE_NOSCRATCH); 5646 regs[rd] = 0; 5647 break; 5648 } 5649 5650 dtrace_bzero((void *) mstate->dtms_scratch_ptr, size); 5651 mstate->dtms_scratch_ptr += size; 5652 regs[rd] = ptr; 5653 break; 5654 } 5655 5656 case DIF_OP_COPYS: 5657 if (!dtrace_canstore(regs[rd], regs[r2], 5658 mstate, vstate)) { 5659 *flags |= CPU_DTRACE_BADADDR; 5660 *illval = regs[rd]; 5661 break; 5662 } 5663 5664 if (!dtrace_canload(regs[r1], regs[r2], mstate, vstate)) 5665 break; 5666 5667 dtrace_bcopy((void *)(uintptr_t)regs[r1], 5668 (void *)(uintptr_t)regs[rd], (size_t)regs[r2]); 5669 break; 5670 5671 case DIF_OP_STB: 5672 if (!dtrace_canstore(regs[rd], 1, mstate, vstate)) { 5673 *flags |= CPU_DTRACE_BADADDR; 5674 *illval = regs[rd]; 5675 break; 5676 } 5677 *((uint8_t *)(uintptr_t)regs[rd]) = (uint8_t)regs[r1]; 5678 break; 5679 5680 case DIF_OP_STH: 5681 if (!dtrace_canstore(regs[rd], 2, mstate, vstate)) { 5682 *flags |= CPU_DTRACE_BADADDR; 5683 *illval = regs[rd]; 5684 break; 5685 } 5686 if (regs[rd] & 1) { 5687 *flags |= CPU_DTRACE_BADALIGN; 5688 *illval = regs[rd]; 5689 break; 5690 } 5691 *((uint16_t *)(uintptr_t)regs[rd]) = (uint16_t)regs[r1]; 5692 break; 5693 5694 case DIF_OP_STW: 5695 if (!dtrace_canstore(regs[rd], 4, mstate, vstate)) { 5696 *flags |= CPU_DTRACE_BADADDR; 5697 *illval = regs[rd]; 5698 break; 5699 } 5700 if (regs[rd] & 3) { 5701 *flags |= CPU_DTRACE_BADALIGN; 5702 *illval = regs[rd]; 5703 break; 5704 } 5705 *((uint32_t *)(uintptr_t)regs[rd]) = (uint32_t)regs[r1]; 5706 break; 5707 5708 case DIF_OP_STX: 5709 if (!dtrace_canstore(regs[rd], 8, mstate, vstate)) { 5710 *flags |= CPU_DTRACE_BADADDR; 5711 *illval = regs[rd]; 5712 break; 5713 } 5714 if (regs[rd] & 7) { 5715 *flags |= CPU_DTRACE_BADALIGN; 5716 *illval = regs[rd]; 5717 break; 5718 } 5719 *((uint64_t *)(uintptr_t)regs[rd]) = regs[r1]; 5720 break; 5721 } 5722 } 5723 5724 if (!(*flags & CPU_DTRACE_FAULT)) 5725 return (rval); 5726 5727 mstate->dtms_fltoffs = opc * sizeof (dif_instr_t); 5728 mstate->dtms_present |= DTRACE_MSTATE_FLTOFFS; 5729 5730 return (0); 5731} 5732 5733static void 5734dtrace_action_breakpoint(dtrace_ecb_t *ecb) 5735{ 5736 dtrace_probe_t *probe = ecb->dte_probe; 5737 dtrace_provider_t *prov = probe->dtpr_provider; 5738 char c[DTRACE_FULLNAMELEN + 80], *str; 5739 char *msg = "dtrace: breakpoint action at probe "; 5740 char *ecbmsg = " (ecb "; 5741 uintptr_t mask = (0xf << (sizeof (uintptr_t) * NBBY / 4)); 5742 uintptr_t val = (uintptr_t)ecb; 5743 int shift = (sizeof (uintptr_t) * NBBY) - 4, i = 0; 5744 5745 if (dtrace_destructive_disallow) 5746 return; 5747 5748 /* 5749 * It's impossible to be taking action on the NULL probe. 5750 */ 5751 ASSERT(probe != NULL); 5752 5753 /* 5754 * This is a poor man's (destitute man's?) sprintf(): we want to 5755 * print the provider name, module name, function name and name of 5756 * the probe, along with the hex address of the ECB with the breakpoint 5757 * action -- all of which we must place in the character buffer by 5758 * hand. 5759 */ 5760 while (*msg != '\0') 5761 c[i++] = *msg++; 5762 5763 for (str = prov->dtpv_name; *str != '\0'; str++) 5764 c[i++] = *str; 5765 c[i++] = ':'; 5766 5767 for (str = probe->dtpr_mod; *str != '\0'; str++) 5768 c[i++] = *str; 5769 c[i++] = ':'; 5770 5771 for (str = probe->dtpr_func; *str != '\0'; str++) 5772 c[i++] = *str; 5773 c[i++] = ':'; 5774 5775 for (str = probe->dtpr_name; *str != '\0'; str++) 5776 c[i++] = *str; 5777 5778 while (*ecbmsg != '\0') 5779 c[i++] = *ecbmsg++; 5780 5781 while (shift >= 0) { 5782 mask = (uintptr_t)0xf << shift; 5783 5784 if (val >= ((uintptr_t)1 << shift)) 5785 c[i++] = "0123456789abcdef"[(val & mask) >> shift]; 5786 shift -= 4; 5787 } 5788 5789 c[i++] = ')'; 5790 c[i] = '\0'; 5791 5792#if defined(sun) 5793 debug_enter(c); 5794#else 5795 kdb_enter(KDB_WHY_DTRACE, "breakpoint action"); 5796#endif 5797} 5798 5799static void 5800dtrace_action_panic(dtrace_ecb_t *ecb) 5801{ 5802 dtrace_probe_t *probe = ecb->dte_probe; 5803 5804 /* 5805 * It's impossible to be taking action on the NULL probe. 5806 */ 5807 ASSERT(probe != NULL); 5808 5809 if (dtrace_destructive_disallow) 5810 return; 5811 5812 if (dtrace_panicked != NULL) 5813 return; 5814 5815 if (dtrace_casptr(&dtrace_panicked, NULL, curthread) != NULL) 5816 return; 5817 5818 /* 5819 * We won the right to panic. (We want to be sure that only one 5820 * thread calls panic() from dtrace_probe(), and that panic() is 5821 * called exactly once.) 5822 */ 5823 dtrace_panic("dtrace: panic action at probe %s:%s:%s:%s (ecb %p)", 5824 probe->dtpr_provider->dtpv_name, probe->dtpr_mod, 5825 probe->dtpr_func, probe->dtpr_name, (void *)ecb); 5826} 5827 5828static void 5829dtrace_action_raise(uint64_t sig) 5830{ 5831 if (dtrace_destructive_disallow) 5832 return; 5833 5834 if (sig >= NSIG) { 5835 DTRACE_CPUFLAG_SET(CPU_DTRACE_ILLOP); 5836 return; 5837 } 5838 5839#if defined(sun) 5840 /* 5841 * raise() has a queue depth of 1 -- we ignore all subsequent 5842 * invocations of the raise() action. 5843 */ 5844 if (curthread->t_dtrace_sig == 0) 5845 curthread->t_dtrace_sig = (uint8_t)sig; 5846 5847 curthread->t_sig_check = 1; 5848 aston(curthread); 5849#else 5850 struct proc *p = curproc; 5851 PROC_LOCK(p); 5852 kern_psignal(p, sig); 5853 PROC_UNLOCK(p); 5854#endif 5855} 5856 5857static void 5858dtrace_action_stop(void) 5859{ 5860 if (dtrace_destructive_disallow) 5861 return; 5862 5863#if defined(sun) 5864 if (!curthread->t_dtrace_stop) { 5865 curthread->t_dtrace_stop = 1; 5866 curthread->t_sig_check = 1; 5867 aston(curthread); 5868 } 5869#else 5870 struct proc *p = curproc; 5871 PROC_LOCK(p); 5872 kern_psignal(p, SIGSTOP); 5873 PROC_UNLOCK(p); 5874#endif 5875} 5876 5877static void 5878dtrace_action_chill(dtrace_mstate_t *mstate, hrtime_t val) 5879{ 5880 hrtime_t now; 5881 volatile uint16_t *flags; 5882#if defined(sun) 5883 cpu_t *cpu = CPU; 5884#else 5885 cpu_t *cpu = &solaris_cpu[curcpu]; 5886#endif 5887 5888 if (dtrace_destructive_disallow) 5889 return; 5890 5891 flags = (volatile uint16_t *)&cpu_core[cpu->cpu_id].cpuc_dtrace_flags; 5892 5893 now = dtrace_gethrtime(); 5894 5895 if (now - cpu->cpu_dtrace_chillmark > dtrace_chill_interval) { 5896 /* 5897 * We need to advance the mark to the current time. 5898 */ 5899 cpu->cpu_dtrace_chillmark = now; 5900 cpu->cpu_dtrace_chilled = 0; 5901 } 5902 5903 /* 5904 * Now check to see if the requested chill time would take us over 5905 * the maximum amount of time allowed in the chill interval. (Or 5906 * worse, if the calculation itself induces overflow.) 5907 */ 5908 if (cpu->cpu_dtrace_chilled + val > dtrace_chill_max || 5909 cpu->cpu_dtrace_chilled + val < cpu->cpu_dtrace_chilled) { 5910 *flags |= CPU_DTRACE_ILLOP; 5911 return; 5912 } 5913 5914 while (dtrace_gethrtime() - now < val) 5915 continue; 5916 5917 /* 5918 * Normally, we assure that the value of the variable "timestamp" does 5919 * not change within an ECB. The presence of chill() represents an 5920 * exception to this rule, however. 5921 */ 5922 mstate->dtms_present &= ~DTRACE_MSTATE_TIMESTAMP; 5923 cpu->cpu_dtrace_chilled += val; 5924} 5925 5926static void 5927dtrace_action_ustack(dtrace_mstate_t *mstate, dtrace_state_t *state, 5928 uint64_t *buf, uint64_t arg) 5929{ 5930 int nframes = DTRACE_USTACK_NFRAMES(arg); 5931 int strsize = DTRACE_USTACK_STRSIZE(arg); 5932 uint64_t *pcs = &buf[1], *fps; 5933 char *str = (char *)&pcs[nframes]; 5934 int size, offs = 0, i, j; 5935 uintptr_t old = mstate->dtms_scratch_ptr, saved; 5936 uint16_t *flags = &cpu_core[curcpu].cpuc_dtrace_flags; 5937 char *sym; 5938 5939 /* 5940 * Should be taking a faster path if string space has not been 5941 * allocated. 5942 */ 5943 ASSERT(strsize != 0); 5944 5945 /* 5946 * We will first allocate some temporary space for the frame pointers. 5947 */ 5948 fps = (uint64_t *)P2ROUNDUP(mstate->dtms_scratch_ptr, 8); 5949 size = (uintptr_t)fps - mstate->dtms_scratch_ptr + 5950 (nframes * sizeof (uint64_t)); 5951 5952 if (!DTRACE_INSCRATCH(mstate, size)) { 5953 /* 5954 * Not enough room for our frame pointers -- need to indicate 5955 * that we ran out of scratch space. 5956 */ 5957 DTRACE_CPUFLAG_SET(CPU_DTRACE_NOSCRATCH); 5958 return; 5959 } 5960 5961 mstate->dtms_scratch_ptr += size; 5962 saved = mstate->dtms_scratch_ptr; 5963 5964 /* 5965 * Now get a stack with both program counters and frame pointers. 5966 */ 5967 DTRACE_CPUFLAG_SET(CPU_DTRACE_NOFAULT); 5968 dtrace_getufpstack(buf, fps, nframes + 1); 5969 DTRACE_CPUFLAG_CLEAR(CPU_DTRACE_NOFAULT); 5970 5971 /* 5972 * If that faulted, we're cooked. 5973 */ 5974 if (*flags & CPU_DTRACE_FAULT) 5975 goto out; 5976 5977 /* 5978 * Now we want to walk up the stack, calling the USTACK helper. For 5979 * each iteration, we restore the scratch pointer. 5980 */ 5981 for (i = 0; i < nframes; i++) { 5982 mstate->dtms_scratch_ptr = saved; 5983 5984 if (offs >= strsize) 5985 break; 5986 5987 sym = (char *)(uintptr_t)dtrace_helper( 5988 DTRACE_HELPER_ACTION_USTACK, 5989 mstate, state, pcs[i], fps[i]); 5990 5991 /* 5992 * If we faulted while running the helper, we're going to 5993 * clear the fault and null out the corresponding string. 5994 */ 5995 if (*flags & CPU_DTRACE_FAULT) { 5996 *flags &= ~CPU_DTRACE_FAULT; 5997 str[offs++] = '\0'; 5998 continue; 5999 } 6000 6001 if (sym == NULL) { 6002 str[offs++] = '\0'; 6003 continue; 6004 } 6005 6006 DTRACE_CPUFLAG_SET(CPU_DTRACE_NOFAULT); 6007 6008 /* 6009 * Now copy in the string that the helper returned to us. 6010 */ 6011 for (j = 0; offs + j < strsize; j++) { 6012 if ((str[offs + j] = sym[j]) == '\0') 6013 break; 6014 } 6015 6016 DTRACE_CPUFLAG_CLEAR(CPU_DTRACE_NOFAULT); 6017 6018 offs += j + 1; 6019 } 6020 6021 if (offs >= strsize) { 6022 /* 6023 * If we didn't have room for all of the strings, we don't 6024 * abort processing -- this needn't be a fatal error -- but we 6025 * still want to increment a counter (dts_stkstroverflows) to 6026 * allow this condition to be warned about. (If this is from 6027 * a jstack() action, it is easily tuned via jstackstrsize.) 6028 */ 6029 dtrace_error(&state->dts_stkstroverflows); 6030 } 6031 6032 while (offs < strsize) 6033 str[offs++] = '\0'; 6034 6035out: 6036 mstate->dtms_scratch_ptr = old; 6037} 6038 6039/* 6040 * If you're looking for the epicenter of DTrace, you just found it. This 6041 * is the function called by the provider to fire a probe -- from which all 6042 * subsequent probe-context DTrace activity emanates. 6043 */ 6044void 6045dtrace_probe(dtrace_id_t id, uintptr_t arg0, uintptr_t arg1, 6046 uintptr_t arg2, uintptr_t arg3, uintptr_t arg4) 6047{ 6048 processorid_t cpuid; 6049 dtrace_icookie_t cookie; 6050 dtrace_probe_t *probe; 6051 dtrace_mstate_t mstate; 6052 dtrace_ecb_t *ecb; 6053 dtrace_action_t *act; 6054 intptr_t offs; 6055 size_t size; 6056 int vtime, onintr; 6057 volatile uint16_t *flags; 6058 hrtime_t now; 6059 6060 if (panicstr != NULL) 6061 return; 6062 6063#if defined(sun) 6064 /* 6065 * Kick out immediately if this CPU is still being born (in which case 6066 * curthread will be set to -1) or the current thread can't allow 6067 * probes in its current context. 6068 */ 6069 if (((uintptr_t)curthread & 1) || (curthread->t_flag & T_DONTDTRACE)) 6070 return; 6071#endif 6072 6073 cookie = dtrace_interrupt_disable(); 6074 probe = dtrace_probes[id - 1]; 6075 cpuid = curcpu; 6076 onintr = CPU_ON_INTR(CPU); 6077 6078 if (!onintr && probe->dtpr_predcache != DTRACE_CACHEIDNONE && 6079 probe->dtpr_predcache == curthread->t_predcache) { 6080 /* 6081 * We have hit in the predicate cache; we know that 6082 * this predicate would evaluate to be false. 6083 */ 6084 dtrace_interrupt_enable(cookie); 6085 return; 6086 } 6087 6088#if defined(sun) 6089 if (panic_quiesce) { 6090#else 6091 if (panicstr != NULL) { 6092#endif 6093 /* 6094 * We don't trace anything if we're panicking. 6095 */ 6096 dtrace_interrupt_enable(cookie); 6097 return; 6098 } 6099 6100 now = dtrace_gethrtime(); 6101 vtime = dtrace_vtime_references != 0; 6102 6103 if (vtime && curthread->t_dtrace_start) 6104 curthread->t_dtrace_vtime += now - curthread->t_dtrace_start; 6105 6106 mstate.dtms_difo = NULL; 6107 mstate.dtms_probe = probe; 6108 mstate.dtms_strtok = 0; 6109 mstate.dtms_arg[0] = arg0; 6110 mstate.dtms_arg[1] = arg1; 6111 mstate.dtms_arg[2] = arg2; 6112 mstate.dtms_arg[3] = arg3; 6113 mstate.dtms_arg[4] = arg4; 6114 6115 flags = (volatile uint16_t *)&cpu_core[cpuid].cpuc_dtrace_flags; 6116 6117 for (ecb = probe->dtpr_ecb; ecb != NULL; ecb = ecb->dte_next) { 6118 dtrace_predicate_t *pred = ecb->dte_predicate; 6119 dtrace_state_t *state = ecb->dte_state; 6120 dtrace_buffer_t *buf = &state->dts_buffer[cpuid]; 6121 dtrace_buffer_t *aggbuf = &state->dts_aggbuffer[cpuid]; 6122 dtrace_vstate_t *vstate = &state->dts_vstate; 6123 dtrace_provider_t *prov = probe->dtpr_provider; 6124 uint64_t tracememsize = 0; 6125 int committed = 0; 6126 caddr_t tomax; 6127 6128 /* 6129 * A little subtlety with the following (seemingly innocuous) 6130 * declaration of the automatic 'val': by looking at the 6131 * code, you might think that it could be declared in the 6132 * action processing loop, below. (That is, it's only used in 6133 * the action processing loop.) However, it must be declared 6134 * out of that scope because in the case of DIF expression 6135 * arguments to aggregating actions, one iteration of the 6136 * action loop will use the last iteration's value. 6137 */ 6138 uint64_t val = 0; 6139 6140 mstate.dtms_present = DTRACE_MSTATE_ARGS | DTRACE_MSTATE_PROBE; 6141 *flags &= ~CPU_DTRACE_ERROR; 6142 6143 if (prov == dtrace_provider) { 6144 /* 6145 * If dtrace itself is the provider of this probe, 6146 * we're only going to continue processing the ECB if 6147 * arg0 (the dtrace_state_t) is equal to the ECB's 6148 * creating state. (This prevents disjoint consumers 6149 * from seeing one another's metaprobes.) 6150 */ 6151 if (arg0 != (uint64_t)(uintptr_t)state) 6152 continue; 6153 } 6154 6155 if (state->dts_activity != DTRACE_ACTIVITY_ACTIVE) { 6156 /* 6157 * We're not currently active. If our provider isn't 6158 * the dtrace pseudo provider, we're not interested. 6159 */ 6160 if (prov != dtrace_provider) 6161 continue; 6162 6163 /* 6164 * Now we must further check if we are in the BEGIN 6165 * probe. If we are, we will only continue processing 6166 * if we're still in WARMUP -- if one BEGIN enabling 6167 * has invoked the exit() action, we don't want to 6168 * evaluate subsequent BEGIN enablings. 6169 */ 6170 if (probe->dtpr_id == dtrace_probeid_begin && 6171 state->dts_activity != DTRACE_ACTIVITY_WARMUP) { 6172 ASSERT(state->dts_activity == 6173 DTRACE_ACTIVITY_DRAINING); 6174 continue; 6175 } 6176 } 6177 6178 if (ecb->dte_cond) { 6179 /* 6180 * If the dte_cond bits indicate that this 6181 * consumer is only allowed to see user-mode firings 6182 * of this probe, call the provider's dtps_usermode() 6183 * entry point to check that the probe was fired 6184 * while in a user context. Skip this ECB if that's 6185 * not the case. 6186 */ 6187 if ((ecb->dte_cond & DTRACE_COND_USERMODE) && 6188 prov->dtpv_pops.dtps_usermode(prov->dtpv_arg, 6189 probe->dtpr_id, probe->dtpr_arg) == 0) 6190 continue; 6191 6192#if defined(sun) 6193 /* 6194 * This is more subtle than it looks. We have to be 6195 * absolutely certain that CRED() isn't going to 6196 * change out from under us so it's only legit to 6197 * examine that structure if we're in constrained 6198 * situations. Currently, the only times we'll this 6199 * check is if a non-super-user has enabled the 6200 * profile or syscall providers -- providers that 6201 * allow visibility of all processes. For the 6202 * profile case, the check above will ensure that 6203 * we're examining a user context. 6204 */ 6205 if (ecb->dte_cond & DTRACE_COND_OWNER) { 6206 cred_t *cr; 6207 cred_t *s_cr = 6208 ecb->dte_state->dts_cred.dcr_cred; 6209 proc_t *proc; 6210 6211 ASSERT(s_cr != NULL); 6212 6213 if ((cr = CRED()) == NULL || 6214 s_cr->cr_uid != cr->cr_uid || 6215 s_cr->cr_uid != cr->cr_ruid || 6216 s_cr->cr_uid != cr->cr_suid || 6217 s_cr->cr_gid != cr->cr_gid || 6218 s_cr->cr_gid != cr->cr_rgid || 6219 s_cr->cr_gid != cr->cr_sgid || 6220 (proc = ttoproc(curthread)) == NULL || 6221 (proc->p_flag & SNOCD)) 6222 continue; 6223 } 6224 6225 if (ecb->dte_cond & DTRACE_COND_ZONEOWNER) { 6226 cred_t *cr; 6227 cred_t *s_cr = 6228 ecb->dte_state->dts_cred.dcr_cred; 6229 6230 ASSERT(s_cr != NULL); 6231 6232 if ((cr = CRED()) == NULL || 6233 s_cr->cr_zone->zone_id != 6234 cr->cr_zone->zone_id) 6235 continue; 6236 } 6237#endif 6238 } 6239 6240 if (now - state->dts_alive > dtrace_deadman_timeout) { 6241 /* 6242 * We seem to be dead. Unless we (a) have kernel 6243 * destructive permissions (b) have explicitly enabled 6244 * destructive actions and (c) destructive actions have 6245 * not been disabled, we're going to transition into 6246 * the KILLED state, from which no further processing 6247 * on this state will be performed. 6248 */ 6249 if (!dtrace_priv_kernel_destructive(state) || 6250 !state->dts_cred.dcr_destructive || 6251 dtrace_destructive_disallow) { 6252 void *activity = &state->dts_activity; 6253 dtrace_activity_t current; 6254 6255 do { 6256 current = state->dts_activity; 6257 } while (dtrace_cas32(activity, current, 6258 DTRACE_ACTIVITY_KILLED) != current); 6259 6260 continue; 6261 } 6262 } 6263 6264 if ((offs = dtrace_buffer_reserve(buf, ecb->dte_needed, 6265 ecb->dte_alignment, state, &mstate)) < 0) 6266 continue; 6267 6268 tomax = buf->dtb_tomax; 6269 ASSERT(tomax != NULL); 6270 6271 if (ecb->dte_size != 0) { 6272 dtrace_rechdr_t dtrh; 6273 if (!(mstate.dtms_present & DTRACE_MSTATE_TIMESTAMP)) { 6274 mstate.dtms_timestamp = dtrace_gethrtime(); 6275 mstate.dtms_present |= DTRACE_MSTATE_TIMESTAMP; 6276 } 6277 ASSERT3U(ecb->dte_size, >=, sizeof (dtrace_rechdr_t)); 6278 dtrh.dtrh_epid = ecb->dte_epid; 6279 DTRACE_RECORD_STORE_TIMESTAMP(&dtrh, 6280 mstate.dtms_timestamp); 6281 *((dtrace_rechdr_t *)(tomax + offs)) = dtrh; 6282 } 6283 6284 mstate.dtms_epid = ecb->dte_epid; 6285 mstate.dtms_present |= DTRACE_MSTATE_EPID; 6286 6287 if (state->dts_cred.dcr_visible & DTRACE_CRV_KERNEL) 6288 mstate.dtms_access = DTRACE_ACCESS_KERNEL; 6289 else 6290 mstate.dtms_access = 0; 6291 6292 if (pred != NULL) { 6293 dtrace_difo_t *dp = pred->dtp_difo; 6294 int rval; 6295 6296 rval = dtrace_dif_emulate(dp, &mstate, vstate, state); 6297 6298 if (!(*flags & CPU_DTRACE_ERROR) && !rval) { 6299 dtrace_cacheid_t cid = probe->dtpr_predcache; 6300 6301 if (cid != DTRACE_CACHEIDNONE && !onintr) { 6302 /* 6303 * Update the predicate cache... 6304 */ 6305 ASSERT(cid == pred->dtp_cacheid); 6306 curthread->t_predcache = cid; 6307 } 6308 6309 continue; 6310 } 6311 } 6312 6313 for (act = ecb->dte_action; !(*flags & CPU_DTRACE_ERROR) && 6314 act != NULL; act = act->dta_next) { 6315 size_t valoffs; 6316 dtrace_difo_t *dp; 6317 dtrace_recdesc_t *rec = &act->dta_rec; 6318 6319 size = rec->dtrd_size; 6320 valoffs = offs + rec->dtrd_offset; 6321 6322 if (DTRACEACT_ISAGG(act->dta_kind)) { 6323 uint64_t v = 0xbad; 6324 dtrace_aggregation_t *agg; 6325 6326 agg = (dtrace_aggregation_t *)act; 6327 6328 if ((dp = act->dta_difo) != NULL) 6329 v = dtrace_dif_emulate(dp, 6330 &mstate, vstate, state); 6331 6332 if (*flags & CPU_DTRACE_ERROR) 6333 continue; 6334 6335 /* 6336 * Note that we always pass the expression 6337 * value from the previous iteration of the 6338 * action loop. This value will only be used 6339 * if there is an expression argument to the 6340 * aggregating action, denoted by the 6341 * dtag_hasarg field. 6342 */ 6343 dtrace_aggregate(agg, buf, 6344 offs, aggbuf, v, val); 6345 continue; 6346 } 6347 6348 switch (act->dta_kind) { 6349 case DTRACEACT_STOP: 6350 if (dtrace_priv_proc_destructive(state)) 6351 dtrace_action_stop(); 6352 continue; 6353 6354 case DTRACEACT_BREAKPOINT: 6355 if (dtrace_priv_kernel_destructive(state)) 6356 dtrace_action_breakpoint(ecb); 6357 continue; 6358 6359 case DTRACEACT_PANIC: 6360 if (dtrace_priv_kernel_destructive(state)) 6361 dtrace_action_panic(ecb); 6362 continue; 6363 6364 case DTRACEACT_STACK: 6365 if (!dtrace_priv_kernel(state)) 6366 continue; 6367 6368 dtrace_getpcstack((pc_t *)(tomax + valoffs), 6369 size / sizeof (pc_t), probe->dtpr_aframes, 6370 DTRACE_ANCHORED(probe) ? NULL : 6371 (uint32_t *)arg0); 6372 continue; 6373 6374 case DTRACEACT_JSTACK: 6375 case DTRACEACT_USTACK: 6376 if (!dtrace_priv_proc(state)) 6377 continue; 6378 6379 /* 6380 * See comment in DIF_VAR_PID. 6381 */ 6382 if (DTRACE_ANCHORED(mstate.dtms_probe) && 6383 CPU_ON_INTR(CPU)) { 6384 int depth = DTRACE_USTACK_NFRAMES( 6385 rec->dtrd_arg) + 1; 6386 6387 dtrace_bzero((void *)(tomax + valoffs), 6388 DTRACE_USTACK_STRSIZE(rec->dtrd_arg) 6389 + depth * sizeof (uint64_t)); 6390 6391 continue; 6392 } 6393 6394 if (DTRACE_USTACK_STRSIZE(rec->dtrd_arg) != 0 && 6395 curproc->p_dtrace_helpers != NULL) { 6396 /* 6397 * This is the slow path -- we have 6398 * allocated string space, and we're 6399 * getting the stack of a process that 6400 * has helpers. Call into a separate 6401 * routine to perform this processing. 6402 */ 6403 dtrace_action_ustack(&mstate, state, 6404 (uint64_t *)(tomax + valoffs), 6405 rec->dtrd_arg); 6406 continue; 6407 } 6408 6409 DTRACE_CPUFLAG_SET(CPU_DTRACE_NOFAULT); 6410 dtrace_getupcstack((uint64_t *) 6411 (tomax + valoffs), 6412 DTRACE_USTACK_NFRAMES(rec->dtrd_arg) + 1); 6413 DTRACE_CPUFLAG_CLEAR(CPU_DTRACE_NOFAULT); 6414 continue; 6415 6416 default: 6417 break; 6418 } 6419 6420 dp = act->dta_difo; 6421 ASSERT(dp != NULL); 6422 6423 val = dtrace_dif_emulate(dp, &mstate, vstate, state); 6424 6425 if (*flags & CPU_DTRACE_ERROR) 6426 continue; 6427 6428 switch (act->dta_kind) { 6429 case DTRACEACT_SPECULATE: { 6430 dtrace_rechdr_t *dtrh; 6431 6432 ASSERT(buf == &state->dts_buffer[cpuid]); 6433 buf = dtrace_speculation_buffer(state, 6434 cpuid, val); 6435 6436 if (buf == NULL) { 6437 *flags |= CPU_DTRACE_DROP; 6438 continue; 6439 } 6440 6441 offs = dtrace_buffer_reserve(buf, 6442 ecb->dte_needed, ecb->dte_alignment, 6443 state, NULL); 6444 6445 if (offs < 0) { 6446 *flags |= CPU_DTRACE_DROP; 6447 continue; 6448 } 6449 6450 tomax = buf->dtb_tomax; 6451 ASSERT(tomax != NULL); 6452 6453 if (ecb->dte_size == 0) 6454 continue; 6455 6456 ASSERT3U(ecb->dte_size, >=, 6457 sizeof (dtrace_rechdr_t)); 6458 dtrh = ((void *)(tomax + offs)); 6459 dtrh->dtrh_epid = ecb->dte_epid; 6460 /* 6461 * When the speculation is committed, all of 6462 * the records in the speculative buffer will 6463 * have their timestamps set to the commit 6464 * time. Until then, it is set to a sentinel 6465 * value, for debugability. 6466 */ 6467 DTRACE_RECORD_STORE_TIMESTAMP(dtrh, UINT64_MAX); 6468 continue; 6469 } 6470 6471 case DTRACEACT_PRINTM: { 6472 /* The DIF returns a 'memref'. */ 6473 uintptr_t *memref = (uintptr_t *)(uintptr_t) val; 6474 6475 /* Get the size from the memref. */ 6476 size = memref[1]; 6477 6478 /* 6479 * Check if the size exceeds the allocated 6480 * buffer size. 6481 */ 6482 if (size + sizeof(uintptr_t) > dp->dtdo_rtype.dtdt_size) { 6483 /* Flag a drop! */ 6484 *flags |= CPU_DTRACE_DROP; 6485 continue; 6486 } 6487 6488 /* Store the size in the buffer first. */ 6489 DTRACE_STORE(uintptr_t, tomax, 6490 valoffs, size); 6491 6492 /* 6493 * Offset the buffer address to the start 6494 * of the data. 6495 */ 6496 valoffs += sizeof(uintptr_t); 6497 6498 /* 6499 * Reset to the memory address rather than 6500 * the memref array, then let the BYREF 6501 * code below do the work to store the 6502 * memory data in the buffer. 6503 */ 6504 val = memref[0]; 6505 break; 6506 } 6507 6508 case DTRACEACT_PRINTT: { 6509 /* The DIF returns a 'typeref'. */ 6510 uintptr_t *typeref = (uintptr_t *)(uintptr_t) val; 6511 char c = '\0' + 1; 6512 size_t s; 6513 6514 /* 6515 * Get the type string length and round it 6516 * up so that the data that follows is 6517 * aligned for easy access. 6518 */ 6519 size_t typs = strlen((char *) typeref[2]) + 1; 6520 typs = roundup(typs, sizeof(uintptr_t)); 6521 6522 /* 6523 *Get the size from the typeref using the 6524 * number of elements and the type size. 6525 */ 6526 size = typeref[1] * typeref[3]; 6527 6528 /* 6529 * Check if the size exceeds the allocated 6530 * buffer size. 6531 */ 6532 if (size + typs + 2 * sizeof(uintptr_t) > dp->dtdo_rtype.dtdt_size) { 6533 /* Flag a drop! */ 6534 *flags |= CPU_DTRACE_DROP; 6535 6536 } 6537 6538 /* Store the size in the buffer first. */ 6539 DTRACE_STORE(uintptr_t, tomax, 6540 valoffs, size); 6541 valoffs += sizeof(uintptr_t); 6542 6543 /* Store the type size in the buffer. */ 6544 DTRACE_STORE(uintptr_t, tomax, 6545 valoffs, typeref[3]); 6546 valoffs += sizeof(uintptr_t); 6547 6548 val = typeref[2]; 6549 6550 for (s = 0; s < typs; s++) { 6551 if (c != '\0') 6552 c = dtrace_load8(val++); 6553 6554 DTRACE_STORE(uint8_t, tomax, 6555 valoffs++, c); 6556 } 6557 6558 /* 6559 * Reset to the memory address rather than 6560 * the typeref array, then let the BYREF 6561 * code below do the work to store the 6562 * memory data in the buffer. 6563 */ 6564 val = typeref[0]; 6565 break; 6566 } 6567 6568 case DTRACEACT_CHILL: 6569 if (dtrace_priv_kernel_destructive(state)) 6570 dtrace_action_chill(&mstate, val); 6571 continue; 6572 6573 case DTRACEACT_RAISE: 6574 if (dtrace_priv_proc_destructive(state)) 6575 dtrace_action_raise(val); 6576 continue; 6577 6578 case DTRACEACT_COMMIT: 6579 ASSERT(!committed); 6580 6581 /* 6582 * We need to commit our buffer state. 6583 */ 6584 if (ecb->dte_size) 6585 buf->dtb_offset = offs + ecb->dte_size; 6586 buf = &state->dts_buffer[cpuid]; 6587 dtrace_speculation_commit(state, cpuid, val); 6588 committed = 1; 6589 continue; 6590 6591 case DTRACEACT_DISCARD: 6592 dtrace_speculation_discard(state, cpuid, val); 6593 continue; 6594 6595 case DTRACEACT_DIFEXPR: 6596 case DTRACEACT_LIBACT: 6597 case DTRACEACT_PRINTF: 6598 case DTRACEACT_PRINTA: 6599 case DTRACEACT_SYSTEM: 6600 case DTRACEACT_FREOPEN: 6601 case DTRACEACT_TRACEMEM: 6602 break; 6603 6604 case DTRACEACT_TRACEMEM_DYNSIZE: 6605 tracememsize = val; 6606 break; 6607 6608 case DTRACEACT_SYM: 6609 case DTRACEACT_MOD: 6610 if (!dtrace_priv_kernel(state)) 6611 continue; 6612 break; 6613 6614 case DTRACEACT_USYM: 6615 case DTRACEACT_UMOD: 6616 case DTRACEACT_UADDR: { 6617#if defined(sun) 6618 struct pid *pid = curthread->t_procp->p_pidp; 6619#endif 6620 6621 if (!dtrace_priv_proc(state)) 6622 continue; 6623 6624 DTRACE_STORE(uint64_t, tomax, 6625#if defined(sun) 6626 valoffs, (uint64_t)pid->pid_id); 6627#else 6628 valoffs, (uint64_t) curproc->p_pid); 6629#endif 6630 DTRACE_STORE(uint64_t, tomax, 6631 valoffs + sizeof (uint64_t), val); 6632 6633 continue; 6634 } 6635 6636 case DTRACEACT_EXIT: { 6637 /* 6638 * For the exit action, we are going to attempt 6639 * to atomically set our activity to be 6640 * draining. If this fails (either because 6641 * another CPU has beat us to the exit action, 6642 * or because our current activity is something 6643 * other than ACTIVE or WARMUP), we will 6644 * continue. This assures that the exit action 6645 * can be successfully recorded at most once 6646 * when we're in the ACTIVE state. If we're 6647 * encountering the exit() action while in 6648 * COOLDOWN, however, we want to honor the new 6649 * status code. (We know that we're the only 6650 * thread in COOLDOWN, so there is no race.) 6651 */ 6652 void *activity = &state->dts_activity; 6653 dtrace_activity_t current = state->dts_activity; 6654 6655 if (current == DTRACE_ACTIVITY_COOLDOWN) 6656 break; 6657 6658 if (current != DTRACE_ACTIVITY_WARMUP) 6659 current = DTRACE_ACTIVITY_ACTIVE; 6660 6661 if (dtrace_cas32(activity, current, 6662 DTRACE_ACTIVITY_DRAINING) != current) { 6663 *flags |= CPU_DTRACE_DROP; 6664 continue; 6665 } 6666 6667 break; 6668 } 6669 6670 default: 6671 ASSERT(0); 6672 } 6673 6674 if (dp->dtdo_rtype.dtdt_flags & DIF_TF_BYREF) { 6675 uintptr_t end = valoffs + size; 6676 6677 if (tracememsize != 0 && 6678 valoffs + tracememsize < end) { 6679 end = valoffs + tracememsize; 6680 tracememsize = 0; 6681 } 6682 6683 if (!dtrace_vcanload((void *)(uintptr_t)val, 6684 &dp->dtdo_rtype, &mstate, vstate)) 6685 continue; 6686 6687 /* 6688 * If this is a string, we're going to only 6689 * load until we find the zero byte -- after 6690 * which we'll store zero bytes. 6691 */ 6692 if (dp->dtdo_rtype.dtdt_kind == 6693 DIF_TYPE_STRING) { 6694 char c = '\0' + 1; 6695 int intuple = act->dta_intuple; 6696 size_t s; 6697 6698 for (s = 0; s < size; s++) { 6699 if (c != '\0') 6700 c = dtrace_load8(val++); 6701 6702 DTRACE_STORE(uint8_t, tomax, 6703 valoffs++, c); 6704 6705 if (c == '\0' && intuple) 6706 break; 6707 } 6708 6709 continue; 6710 } 6711 6712 while (valoffs < end) { 6713 DTRACE_STORE(uint8_t, tomax, valoffs++, 6714 dtrace_load8(val++)); 6715 } 6716 6717 continue; 6718 } 6719 6720 switch (size) { 6721 case 0: 6722 break; 6723 6724 case sizeof (uint8_t): 6725 DTRACE_STORE(uint8_t, tomax, valoffs, val); 6726 break; 6727 case sizeof (uint16_t): 6728 DTRACE_STORE(uint16_t, tomax, valoffs, val); 6729 break; 6730 case sizeof (uint32_t): 6731 DTRACE_STORE(uint32_t, tomax, valoffs, val); 6732 break; 6733 case sizeof (uint64_t): 6734 DTRACE_STORE(uint64_t, tomax, valoffs, val); 6735 break; 6736 default: 6737 /* 6738 * Any other size should have been returned by 6739 * reference, not by value. 6740 */ 6741 ASSERT(0); 6742 break; 6743 } 6744 } 6745 6746 if (*flags & CPU_DTRACE_DROP) 6747 continue; 6748 6749 if (*flags & CPU_DTRACE_FAULT) { 6750 int ndx; 6751 dtrace_action_t *err; 6752 6753 buf->dtb_errors++; 6754 6755 if (probe->dtpr_id == dtrace_probeid_error) { 6756 /* 6757 * There's nothing we can do -- we had an 6758 * error on the error probe. We bump an 6759 * error counter to at least indicate that 6760 * this condition happened. 6761 */ 6762 dtrace_error(&state->dts_dblerrors); 6763 continue; 6764 } 6765 6766 if (vtime) { 6767 /* 6768 * Before recursing on dtrace_probe(), we 6769 * need to explicitly clear out our start 6770 * time to prevent it from being accumulated 6771 * into t_dtrace_vtime. 6772 */ 6773 curthread->t_dtrace_start = 0; 6774 } 6775 6776 /* 6777 * Iterate over the actions to figure out which action 6778 * we were processing when we experienced the error. 6779 * Note that act points _past_ the faulting action; if 6780 * act is ecb->dte_action, the fault was in the 6781 * predicate, if it's ecb->dte_action->dta_next it's 6782 * in action #1, and so on. 6783 */ 6784 for (err = ecb->dte_action, ndx = 0; 6785 err != act; err = err->dta_next, ndx++) 6786 continue; 6787 6788 dtrace_probe_error(state, ecb->dte_epid, ndx, 6789 (mstate.dtms_present & DTRACE_MSTATE_FLTOFFS) ? 6790 mstate.dtms_fltoffs : -1, DTRACE_FLAGS2FLT(*flags), 6791 cpu_core[cpuid].cpuc_dtrace_illval); 6792 6793 continue; 6794 } 6795 6796 if (!committed) 6797 buf->dtb_offset = offs + ecb->dte_size; 6798 } 6799 6800 if (vtime) 6801 curthread->t_dtrace_start = dtrace_gethrtime(); 6802 6803 dtrace_interrupt_enable(cookie); 6804} 6805 6806/* 6807 * DTrace Probe Hashing Functions 6808 * 6809 * The functions in this section (and indeed, the functions in remaining 6810 * sections) are not _called_ from probe context. (Any exceptions to this are 6811 * marked with a "Note:".) Rather, they are called from elsewhere in the 6812 * DTrace framework to look-up probes in, add probes to and remove probes from 6813 * the DTrace probe hashes. (Each probe is hashed by each element of the 6814 * probe tuple -- allowing for fast lookups, regardless of what was 6815 * specified.) 6816 */ 6817static uint_t 6818dtrace_hash_str(const char *p) 6819{ 6820 unsigned int g; 6821 uint_t hval = 0; 6822 6823 while (*p) { 6824 hval = (hval << 4) + *p++; 6825 if ((g = (hval & 0xf0000000)) != 0) 6826 hval ^= g >> 24; 6827 hval &= ~g; 6828 } 6829 return (hval); 6830} 6831 6832static dtrace_hash_t * 6833dtrace_hash_create(uintptr_t stroffs, uintptr_t nextoffs, uintptr_t prevoffs) 6834{ 6835 dtrace_hash_t *hash = kmem_zalloc(sizeof (dtrace_hash_t), KM_SLEEP); 6836 6837 hash->dth_stroffs = stroffs; 6838 hash->dth_nextoffs = nextoffs; 6839 hash->dth_prevoffs = prevoffs; 6840 6841 hash->dth_size = 1; 6842 hash->dth_mask = hash->dth_size - 1; 6843 6844 hash->dth_tab = kmem_zalloc(hash->dth_size * 6845 sizeof (dtrace_hashbucket_t *), KM_SLEEP); 6846 6847 return (hash); 6848} 6849 6850static void 6851dtrace_hash_destroy(dtrace_hash_t *hash) 6852{ 6853#ifdef DEBUG 6854 int i; 6855 6856 for (i = 0; i < hash->dth_size; i++) 6857 ASSERT(hash->dth_tab[i] == NULL); 6858#endif 6859 6860 kmem_free(hash->dth_tab, 6861 hash->dth_size * sizeof (dtrace_hashbucket_t *)); 6862 kmem_free(hash, sizeof (dtrace_hash_t)); 6863} 6864 6865static void 6866dtrace_hash_resize(dtrace_hash_t *hash) 6867{ 6868 int size = hash->dth_size, i, ndx; 6869 int new_size = hash->dth_size << 1; 6870 int new_mask = new_size - 1; 6871 dtrace_hashbucket_t **new_tab, *bucket, *next; 6872 6873 ASSERT((new_size & new_mask) == 0); 6874 6875 new_tab = kmem_zalloc(new_size * sizeof (void *), KM_SLEEP); 6876 6877 for (i = 0; i < size; i++) { 6878 for (bucket = hash->dth_tab[i]; bucket != NULL; bucket = next) { 6879 dtrace_probe_t *probe = bucket->dthb_chain; 6880 6881 ASSERT(probe != NULL); 6882 ndx = DTRACE_HASHSTR(hash, probe) & new_mask; 6883 6884 next = bucket->dthb_next; 6885 bucket->dthb_next = new_tab[ndx]; 6886 new_tab[ndx] = bucket; 6887 } 6888 } 6889 6890 kmem_free(hash->dth_tab, hash->dth_size * sizeof (void *)); 6891 hash->dth_tab = new_tab; 6892 hash->dth_size = new_size; 6893 hash->dth_mask = new_mask; 6894} 6895 6896static void 6897dtrace_hash_add(dtrace_hash_t *hash, dtrace_probe_t *new) 6898{ 6899 int hashval = DTRACE_HASHSTR(hash, new); 6900 int ndx = hashval & hash->dth_mask; 6901 dtrace_hashbucket_t *bucket = hash->dth_tab[ndx]; 6902 dtrace_probe_t **nextp, **prevp; 6903 6904 for (; bucket != NULL; bucket = bucket->dthb_next) { 6905 if (DTRACE_HASHEQ(hash, bucket->dthb_chain, new)) 6906 goto add; 6907 } 6908 6909 if ((hash->dth_nbuckets >> 1) > hash->dth_size) { 6910 dtrace_hash_resize(hash); 6911 dtrace_hash_add(hash, new); 6912 return; 6913 } 6914 6915 bucket = kmem_zalloc(sizeof (dtrace_hashbucket_t), KM_SLEEP); 6916 bucket->dthb_next = hash->dth_tab[ndx]; 6917 hash->dth_tab[ndx] = bucket; 6918 hash->dth_nbuckets++; 6919 6920add: 6921 nextp = DTRACE_HASHNEXT(hash, new); 6922 ASSERT(*nextp == NULL && *(DTRACE_HASHPREV(hash, new)) == NULL); 6923 *nextp = bucket->dthb_chain; 6924 6925 if (bucket->dthb_chain != NULL) { 6926 prevp = DTRACE_HASHPREV(hash, bucket->dthb_chain); 6927 ASSERT(*prevp == NULL); 6928 *prevp = new; 6929 } 6930 6931 bucket->dthb_chain = new; 6932 bucket->dthb_len++; 6933} 6934 6935static dtrace_probe_t * 6936dtrace_hash_lookup(dtrace_hash_t *hash, dtrace_probe_t *template) 6937{ 6938 int hashval = DTRACE_HASHSTR(hash, template); 6939 int ndx = hashval & hash->dth_mask; 6940 dtrace_hashbucket_t *bucket = hash->dth_tab[ndx]; 6941 6942 for (; bucket != NULL; bucket = bucket->dthb_next) { 6943 if (DTRACE_HASHEQ(hash, bucket->dthb_chain, template)) 6944 return (bucket->dthb_chain); 6945 } 6946 6947 return (NULL); 6948} 6949 6950static int 6951dtrace_hash_collisions(dtrace_hash_t *hash, dtrace_probe_t *template) 6952{ 6953 int hashval = DTRACE_HASHSTR(hash, template); 6954 int ndx = hashval & hash->dth_mask; 6955 dtrace_hashbucket_t *bucket = hash->dth_tab[ndx]; 6956 6957 for (; bucket != NULL; bucket = bucket->dthb_next) { 6958 if (DTRACE_HASHEQ(hash, bucket->dthb_chain, template)) 6959 return (bucket->dthb_len); 6960 } 6961 6962 return (0); 6963} 6964 6965static void 6966dtrace_hash_remove(dtrace_hash_t *hash, dtrace_probe_t *probe) 6967{ 6968 int ndx = DTRACE_HASHSTR(hash, probe) & hash->dth_mask; 6969 dtrace_hashbucket_t *bucket = hash->dth_tab[ndx]; 6970 6971 dtrace_probe_t **prevp = DTRACE_HASHPREV(hash, probe); 6972 dtrace_probe_t **nextp = DTRACE_HASHNEXT(hash, probe); 6973 6974 /* 6975 * Find the bucket that we're removing this probe from. 6976 */ 6977 for (; bucket != NULL; bucket = bucket->dthb_next) { 6978 if (DTRACE_HASHEQ(hash, bucket->dthb_chain, probe)) 6979 break; 6980 } 6981 6982 ASSERT(bucket != NULL); 6983 6984 if (*prevp == NULL) { 6985 if (*nextp == NULL) { 6986 /* 6987 * The removed probe was the only probe on this 6988 * bucket; we need to remove the bucket. 6989 */ 6990 dtrace_hashbucket_t *b = hash->dth_tab[ndx]; 6991 6992 ASSERT(bucket->dthb_chain == probe); 6993 ASSERT(b != NULL); 6994 6995 if (b == bucket) { 6996 hash->dth_tab[ndx] = bucket->dthb_next; 6997 } else { 6998 while (b->dthb_next != bucket) 6999 b = b->dthb_next; 7000 b->dthb_next = bucket->dthb_next; 7001 } 7002 7003 ASSERT(hash->dth_nbuckets > 0); 7004 hash->dth_nbuckets--; 7005 kmem_free(bucket, sizeof (dtrace_hashbucket_t)); 7006 return; 7007 } 7008 7009 bucket->dthb_chain = *nextp; 7010 } else { 7011 *(DTRACE_HASHNEXT(hash, *prevp)) = *nextp; 7012 } 7013 7014 if (*nextp != NULL) 7015 *(DTRACE_HASHPREV(hash, *nextp)) = *prevp; 7016} 7017 7018/* 7019 * DTrace Utility Functions 7020 * 7021 * These are random utility functions that are _not_ called from probe context. 7022 */ 7023static int 7024dtrace_badattr(const dtrace_attribute_t *a) 7025{ 7026 return (a->dtat_name > DTRACE_STABILITY_MAX || 7027 a->dtat_data > DTRACE_STABILITY_MAX || 7028 a->dtat_class > DTRACE_CLASS_MAX); 7029} 7030 7031/* 7032 * Return a duplicate copy of a string. If the specified string is NULL, 7033 * this function returns a zero-length string. 7034 */ 7035static char * 7036dtrace_strdup(const char *str) 7037{ 7038 char *new = kmem_zalloc((str != NULL ? strlen(str) : 0) + 1, KM_SLEEP); 7039 7040 if (str != NULL) 7041 (void) strcpy(new, str); 7042 7043 return (new); 7044} 7045 7046#define DTRACE_ISALPHA(c) \ 7047 (((c) >= 'a' && (c) <= 'z') || ((c) >= 'A' && (c) <= 'Z')) 7048 7049static int 7050dtrace_badname(const char *s) 7051{ 7052 char c; 7053 7054 if (s == NULL || (c = *s++) == '\0') 7055 return (0); 7056 7057 if (!DTRACE_ISALPHA(c) && c != '-' && c != '_' && c != '.') 7058 return (1); 7059 7060 while ((c = *s++) != '\0') { 7061 if (!DTRACE_ISALPHA(c) && (c < '0' || c > '9') && 7062 c != '-' && c != '_' && c != '.' && c != '`') 7063 return (1); 7064 } 7065 7066 return (0); 7067} 7068 7069static void 7070dtrace_cred2priv(cred_t *cr, uint32_t *privp, uid_t *uidp, zoneid_t *zoneidp) 7071{ 7072 uint32_t priv; 7073 7074#if defined(sun) 7075 if (cr == NULL || PRIV_POLICY_ONLY(cr, PRIV_ALL, B_FALSE)) { 7076 /* 7077 * For DTRACE_PRIV_ALL, the uid and zoneid don't matter. 7078 */ 7079 priv = DTRACE_PRIV_ALL; 7080 } else { 7081 *uidp = crgetuid(cr); 7082 *zoneidp = crgetzoneid(cr); 7083 7084 priv = 0; 7085 if (PRIV_POLICY_ONLY(cr, PRIV_DTRACE_KERNEL, B_FALSE)) 7086 priv |= DTRACE_PRIV_KERNEL | DTRACE_PRIV_USER; 7087 else if (PRIV_POLICY_ONLY(cr, PRIV_DTRACE_USER, B_FALSE)) 7088 priv |= DTRACE_PRIV_USER; 7089 if (PRIV_POLICY_ONLY(cr, PRIV_DTRACE_PROC, B_FALSE)) 7090 priv |= DTRACE_PRIV_PROC; 7091 if (PRIV_POLICY_ONLY(cr, PRIV_PROC_OWNER, B_FALSE)) 7092 priv |= DTRACE_PRIV_OWNER; 7093 if (PRIV_POLICY_ONLY(cr, PRIV_PROC_ZONE, B_FALSE)) 7094 priv |= DTRACE_PRIV_ZONEOWNER; 7095 } 7096#else 7097 priv = DTRACE_PRIV_ALL; 7098#endif 7099 7100 *privp = priv; 7101} 7102 7103#ifdef DTRACE_ERRDEBUG 7104static void 7105dtrace_errdebug(const char *str) 7106{ 7107 int hval = dtrace_hash_str(str) % DTRACE_ERRHASHSZ; 7108 int occupied = 0; 7109 7110 mutex_enter(&dtrace_errlock); 7111 dtrace_errlast = str; 7112 dtrace_errthread = curthread; 7113 7114 while (occupied++ < DTRACE_ERRHASHSZ) { 7115 if (dtrace_errhash[hval].dter_msg == str) { 7116 dtrace_errhash[hval].dter_count++; 7117 goto out; 7118 } 7119 7120 if (dtrace_errhash[hval].dter_msg != NULL) { 7121 hval = (hval + 1) % DTRACE_ERRHASHSZ; 7122 continue; 7123 } 7124 7125 dtrace_errhash[hval].dter_msg = str; 7126 dtrace_errhash[hval].dter_count = 1; 7127 goto out; 7128 } 7129 7130 panic("dtrace: undersized error hash"); 7131out: 7132 mutex_exit(&dtrace_errlock); 7133} 7134#endif 7135 7136/* 7137 * DTrace Matching Functions 7138 * 7139 * These functions are used to match groups of probes, given some elements of 7140 * a probe tuple, or some globbed expressions for elements of a probe tuple. 7141 */ 7142static int 7143dtrace_match_priv(const dtrace_probe_t *prp, uint32_t priv, uid_t uid, 7144 zoneid_t zoneid) 7145{ 7146 if (priv != DTRACE_PRIV_ALL) { 7147 uint32_t ppriv = prp->dtpr_provider->dtpv_priv.dtpp_flags; 7148 uint32_t match = priv & ppriv; 7149 7150 /* 7151 * No PRIV_DTRACE_* privileges... 7152 */ 7153 if ((priv & (DTRACE_PRIV_PROC | DTRACE_PRIV_USER | 7154 DTRACE_PRIV_KERNEL)) == 0) 7155 return (0); 7156 7157 /* 7158 * No matching bits, but there were bits to match... 7159 */ 7160 if (match == 0 && ppriv != 0) 7161 return (0); 7162 7163 /* 7164 * Need to have permissions to the process, but don't... 7165 */ 7166 if (((ppriv & ~match) & DTRACE_PRIV_OWNER) != 0 && 7167 uid != prp->dtpr_provider->dtpv_priv.dtpp_uid) { 7168 return (0); 7169 } 7170 7171 /* 7172 * Need to be in the same zone unless we possess the 7173 * privilege to examine all zones. 7174 */ 7175 if (((ppriv & ~match) & DTRACE_PRIV_ZONEOWNER) != 0 && 7176 zoneid != prp->dtpr_provider->dtpv_priv.dtpp_zoneid) { 7177 return (0); 7178 } 7179 } 7180 7181 return (1); 7182} 7183 7184/* 7185 * dtrace_match_probe compares a dtrace_probe_t to a pre-compiled key, which 7186 * consists of input pattern strings and an ops-vector to evaluate them. 7187 * This function returns >0 for match, 0 for no match, and <0 for error. 7188 */ 7189static int 7190dtrace_match_probe(const dtrace_probe_t *prp, const dtrace_probekey_t *pkp, 7191 uint32_t priv, uid_t uid, zoneid_t zoneid) 7192{ 7193 dtrace_provider_t *pvp = prp->dtpr_provider; 7194 int rv; 7195 7196 if (pvp->dtpv_defunct) 7197 return (0); 7198 7199 if ((rv = pkp->dtpk_pmatch(pvp->dtpv_name, pkp->dtpk_prov, 0)) <= 0) 7200 return (rv); 7201 7202 if ((rv = pkp->dtpk_mmatch(prp->dtpr_mod, pkp->dtpk_mod, 0)) <= 0) 7203 return (rv); 7204 7205 if ((rv = pkp->dtpk_fmatch(prp->dtpr_func, pkp->dtpk_func, 0)) <= 0) 7206 return (rv); 7207 7208 if ((rv = pkp->dtpk_nmatch(prp->dtpr_name, pkp->dtpk_name, 0)) <= 0) 7209 return (rv); 7210 7211 if (dtrace_match_priv(prp, priv, uid, zoneid) == 0) 7212 return (0); 7213 7214 return (rv); 7215} 7216 7217/* 7218 * dtrace_match_glob() is a safe kernel implementation of the gmatch(3GEN) 7219 * interface for matching a glob pattern 'p' to an input string 's'. Unlike 7220 * libc's version, the kernel version only applies to 8-bit ASCII strings. 7221 * In addition, all of the recursion cases except for '*' matching have been 7222 * unwound. For '*', we still implement recursive evaluation, but a depth 7223 * counter is maintained and matching is aborted if we recurse too deep. 7224 * The function returns 0 if no match, >0 if match, and <0 if recursion error. 7225 */ 7226static int 7227dtrace_match_glob(const char *s, const char *p, int depth) 7228{ 7229 const char *olds; 7230 char s1, c; 7231 int gs; 7232 7233 if (depth > DTRACE_PROBEKEY_MAXDEPTH) 7234 return (-1); 7235 7236 if (s == NULL) 7237 s = ""; /* treat NULL as empty string */ 7238 7239top: 7240 olds = s; 7241 s1 = *s++; 7242 7243 if (p == NULL) 7244 return (0); 7245 7246 if ((c = *p++) == '\0') 7247 return (s1 == '\0'); 7248 7249 switch (c) { 7250 case '[': { 7251 int ok = 0, notflag = 0; 7252 char lc = '\0'; 7253 7254 if (s1 == '\0') 7255 return (0); 7256 7257 if (*p == '!') { 7258 notflag = 1; 7259 p++; 7260 } 7261 7262 if ((c = *p++) == '\0') 7263 return (0); 7264 7265 do { 7266 if (c == '-' && lc != '\0' && *p != ']') { 7267 if ((c = *p++) == '\0') 7268 return (0); 7269 if (c == '\\' && (c = *p++) == '\0') 7270 return (0); 7271 7272 if (notflag) { 7273 if (s1 < lc || s1 > c) 7274 ok++; 7275 else 7276 return (0); 7277 } else if (lc <= s1 && s1 <= c) 7278 ok++; 7279 7280 } else if (c == '\\' && (c = *p++) == '\0') 7281 return (0); 7282 7283 lc = c; /* save left-hand 'c' for next iteration */ 7284 7285 if (notflag) { 7286 if (s1 != c) 7287 ok++; 7288 else 7289 return (0); 7290 } else if (s1 == c) 7291 ok++; 7292 7293 if ((c = *p++) == '\0') 7294 return (0); 7295 7296 } while (c != ']'); 7297 7298 if (ok) 7299 goto top; 7300 7301 return (0); 7302 } 7303 7304 case '\\': 7305 if ((c = *p++) == '\0') 7306 return (0); 7307 /*FALLTHRU*/ 7308 7309 default: 7310 if (c != s1) 7311 return (0); 7312 /*FALLTHRU*/ 7313 7314 case '?': 7315 if (s1 != '\0') 7316 goto top; 7317 return (0); 7318 7319 case '*': 7320 while (*p == '*') 7321 p++; /* consecutive *'s are identical to a single one */ 7322 7323 if (*p == '\0') 7324 return (1); 7325 7326 for (s = olds; *s != '\0'; s++) { 7327 if ((gs = dtrace_match_glob(s, p, depth + 1)) != 0) 7328 return (gs); 7329 } 7330 7331 return (0); 7332 } 7333} 7334 7335/*ARGSUSED*/ 7336static int 7337dtrace_match_string(const char *s, const char *p, int depth) 7338{ 7339 return (s != NULL && strcmp(s, p) == 0); 7340} 7341 7342/*ARGSUSED*/ 7343static int 7344dtrace_match_nul(const char *s, const char *p, int depth) 7345{ 7346 return (1); /* always match the empty pattern */ 7347} 7348 7349/*ARGSUSED*/ 7350static int 7351dtrace_match_nonzero(const char *s, const char *p, int depth) 7352{ 7353 return (s != NULL && s[0] != '\0'); 7354} 7355 7356static int 7357dtrace_match(const dtrace_probekey_t *pkp, uint32_t priv, uid_t uid, 7358 zoneid_t zoneid, int (*matched)(dtrace_probe_t *, void *), void *arg) 7359{ 7360 dtrace_probe_t template, *probe; 7361 dtrace_hash_t *hash = NULL; 7362 int len, best = INT_MAX, nmatched = 0; 7363 dtrace_id_t i; 7364 7365 ASSERT(MUTEX_HELD(&dtrace_lock)); 7366 7367 /* 7368 * If the probe ID is specified in the key, just lookup by ID and 7369 * invoke the match callback once if a matching probe is found. 7370 */ 7371 if (pkp->dtpk_id != DTRACE_IDNONE) { 7372 if ((probe = dtrace_probe_lookup_id(pkp->dtpk_id)) != NULL && 7373 dtrace_match_probe(probe, pkp, priv, uid, zoneid) > 0) { 7374 (void) (*matched)(probe, arg); 7375 nmatched++; 7376 } 7377 return (nmatched); 7378 } 7379 7380 template.dtpr_mod = (char *)pkp->dtpk_mod; 7381 template.dtpr_func = (char *)pkp->dtpk_func; 7382 template.dtpr_name = (char *)pkp->dtpk_name; 7383 7384 /* 7385 * We want to find the most distinct of the module name, function 7386 * name, and name. So for each one that is not a glob pattern or 7387 * empty string, we perform a lookup in the corresponding hash and 7388 * use the hash table with the fewest collisions to do our search. 7389 */ 7390 if (pkp->dtpk_mmatch == &dtrace_match_string && 7391 (len = dtrace_hash_collisions(dtrace_bymod, &template)) < best) { 7392 best = len; 7393 hash = dtrace_bymod; 7394 } 7395 7396 if (pkp->dtpk_fmatch == &dtrace_match_string && 7397 (len = dtrace_hash_collisions(dtrace_byfunc, &template)) < best) { 7398 best = len; 7399 hash = dtrace_byfunc; 7400 } 7401 7402 if (pkp->dtpk_nmatch == &dtrace_match_string && 7403 (len = dtrace_hash_collisions(dtrace_byname, &template)) < best) { 7404 best = len; 7405 hash = dtrace_byname; 7406 } 7407 7408 /* 7409 * If we did not select a hash table, iterate over every probe and 7410 * invoke our callback for each one that matches our input probe key. 7411 */ 7412 if (hash == NULL) { 7413 for (i = 0; i < dtrace_nprobes; i++) { 7414 if ((probe = dtrace_probes[i]) == NULL || 7415 dtrace_match_probe(probe, pkp, priv, uid, 7416 zoneid) <= 0) 7417 continue; 7418 7419 nmatched++; 7420 7421 if ((*matched)(probe, arg) != DTRACE_MATCH_NEXT) 7422 break; 7423 } 7424 7425 return (nmatched); 7426 } 7427 7428 /* 7429 * If we selected a hash table, iterate over each probe of the same key 7430 * name and invoke the callback for every probe that matches the other 7431 * attributes of our input probe key. 7432 */ 7433 for (probe = dtrace_hash_lookup(hash, &template); probe != NULL; 7434 probe = *(DTRACE_HASHNEXT(hash, probe))) { 7435 7436 if (dtrace_match_probe(probe, pkp, priv, uid, zoneid) <= 0) 7437 continue; 7438 7439 nmatched++; 7440 7441 if ((*matched)(probe, arg) != DTRACE_MATCH_NEXT) 7442 break; 7443 } 7444 7445 return (nmatched); 7446} 7447 7448/* 7449 * Return the function pointer dtrace_probecmp() should use to compare the 7450 * specified pattern with a string. For NULL or empty patterns, we select 7451 * dtrace_match_nul(). For glob pattern strings, we use dtrace_match_glob(). 7452 * For non-empty non-glob strings, we use dtrace_match_string(). 7453 */ 7454static dtrace_probekey_f * 7455dtrace_probekey_func(const char *p) 7456{ 7457 char c; 7458 7459 if (p == NULL || *p == '\0') 7460 return (&dtrace_match_nul); 7461 7462 while ((c = *p++) != '\0') { 7463 if (c == '[' || c == '?' || c == '*' || c == '\\') 7464 return (&dtrace_match_glob); 7465 } 7466 7467 return (&dtrace_match_string); 7468} 7469 7470/* 7471 * Build a probe comparison key for use with dtrace_match_probe() from the 7472 * given probe description. By convention, a null key only matches anchored 7473 * probes: if each field is the empty string, reset dtpk_fmatch to 7474 * dtrace_match_nonzero(). 7475 */ 7476static void 7477dtrace_probekey(dtrace_probedesc_t *pdp, dtrace_probekey_t *pkp) 7478{ 7479 pkp->dtpk_prov = pdp->dtpd_provider; 7480 pkp->dtpk_pmatch = dtrace_probekey_func(pdp->dtpd_provider); 7481 7482 pkp->dtpk_mod = pdp->dtpd_mod; 7483 pkp->dtpk_mmatch = dtrace_probekey_func(pdp->dtpd_mod); 7484 7485 pkp->dtpk_func = pdp->dtpd_func; 7486 pkp->dtpk_fmatch = dtrace_probekey_func(pdp->dtpd_func); 7487 7488 pkp->dtpk_name = pdp->dtpd_name; 7489 pkp->dtpk_nmatch = dtrace_probekey_func(pdp->dtpd_name); 7490 7491 pkp->dtpk_id = pdp->dtpd_id; 7492 7493 if (pkp->dtpk_id == DTRACE_IDNONE && 7494 pkp->dtpk_pmatch == &dtrace_match_nul && 7495 pkp->dtpk_mmatch == &dtrace_match_nul && 7496 pkp->dtpk_fmatch == &dtrace_match_nul && 7497 pkp->dtpk_nmatch == &dtrace_match_nul) 7498 pkp->dtpk_fmatch = &dtrace_match_nonzero; 7499} 7500 7501/* 7502 * DTrace Provider-to-Framework API Functions 7503 * 7504 * These functions implement much of the Provider-to-Framework API, as 7505 * described in <sys/dtrace.h>. The parts of the API not in this section are 7506 * the functions in the API for probe management (found below), and 7507 * dtrace_probe() itself (found above). 7508 */ 7509 7510/* 7511 * Register the calling provider with the DTrace framework. This should 7512 * generally be called by DTrace providers in their attach(9E) entry point. 7513 */ 7514int 7515dtrace_register(const char *name, const dtrace_pattr_t *pap, uint32_t priv, 7516 cred_t *cr, const dtrace_pops_t *pops, void *arg, dtrace_provider_id_t *idp) 7517{ 7518 dtrace_provider_t *provider; 7519 7520 if (name == NULL || pap == NULL || pops == NULL || idp == NULL) { 7521 cmn_err(CE_WARN, "failed to register provider '%s': invalid " 7522 "arguments", name ? name : "<NULL>"); 7523 return (EINVAL); 7524 } 7525 7526 if (name[0] == '\0' || dtrace_badname(name)) { 7527 cmn_err(CE_WARN, "failed to register provider '%s': invalid " 7528 "provider name", name); 7529 return (EINVAL); 7530 } 7531 7532 if ((pops->dtps_provide == NULL && pops->dtps_provide_module == NULL) || 7533 pops->dtps_enable == NULL || pops->dtps_disable == NULL || 7534 pops->dtps_destroy == NULL || 7535 ((pops->dtps_resume == NULL) != (pops->dtps_suspend == NULL))) { 7536 cmn_err(CE_WARN, "failed to register provider '%s': invalid " 7537 "provider ops", name); 7538 return (EINVAL); 7539 } 7540 7541 if (dtrace_badattr(&pap->dtpa_provider) || 7542 dtrace_badattr(&pap->dtpa_mod) || 7543 dtrace_badattr(&pap->dtpa_func) || 7544 dtrace_badattr(&pap->dtpa_name) || 7545 dtrace_badattr(&pap->dtpa_args)) { 7546 cmn_err(CE_WARN, "failed to register provider '%s': invalid " 7547 "provider attributes", name); 7548 return (EINVAL); 7549 } 7550 7551 if (priv & ~DTRACE_PRIV_ALL) { 7552 cmn_err(CE_WARN, "failed to register provider '%s': invalid " 7553 "privilege attributes", name); 7554 return (EINVAL); 7555 } 7556 7557 if ((priv & DTRACE_PRIV_KERNEL) && 7558 (priv & (DTRACE_PRIV_USER | DTRACE_PRIV_OWNER)) && 7559 pops->dtps_usermode == NULL) { 7560 cmn_err(CE_WARN, "failed to register provider '%s': need " 7561 "dtps_usermode() op for given privilege attributes", name); 7562 return (EINVAL); 7563 } 7564 7565 provider = kmem_zalloc(sizeof (dtrace_provider_t), KM_SLEEP); 7566 provider->dtpv_name = kmem_alloc(strlen(name) + 1, KM_SLEEP); 7567 (void) strcpy(provider->dtpv_name, name); 7568 7569 provider->dtpv_attr = *pap; 7570 provider->dtpv_priv.dtpp_flags = priv; 7571 if (cr != NULL) { 7572 provider->dtpv_priv.dtpp_uid = crgetuid(cr); 7573 provider->dtpv_priv.dtpp_zoneid = crgetzoneid(cr); 7574 } 7575 provider->dtpv_pops = *pops; 7576 7577 if (pops->dtps_provide == NULL) { 7578 ASSERT(pops->dtps_provide_module != NULL); 7579 provider->dtpv_pops.dtps_provide = 7580 (void (*)(void *, dtrace_probedesc_t *))dtrace_nullop; 7581 } 7582 7583 if (pops->dtps_provide_module == NULL) { 7584 ASSERT(pops->dtps_provide != NULL); 7585 provider->dtpv_pops.dtps_provide_module = 7586 (void (*)(void *, modctl_t *))dtrace_nullop; 7587 } 7588 7589 if (pops->dtps_suspend == NULL) { 7590 ASSERT(pops->dtps_resume == NULL); 7591 provider->dtpv_pops.dtps_suspend = 7592 (void (*)(void *, dtrace_id_t, void *))dtrace_nullop; 7593 provider->dtpv_pops.dtps_resume = 7594 (void (*)(void *, dtrace_id_t, void *))dtrace_nullop; 7595 } 7596 7597 provider->dtpv_arg = arg; 7598 *idp = (dtrace_provider_id_t)provider; 7599 7600 if (pops == &dtrace_provider_ops) { 7601 ASSERT(MUTEX_HELD(&dtrace_provider_lock)); 7602 ASSERT(MUTEX_HELD(&dtrace_lock)); 7603 ASSERT(dtrace_anon.dta_enabling == NULL); 7604 7605 /* 7606 * We make sure that the DTrace provider is at the head of 7607 * the provider chain. 7608 */ 7609 provider->dtpv_next = dtrace_provider; 7610 dtrace_provider = provider; 7611 return (0); 7612 } 7613 7614 mutex_enter(&dtrace_provider_lock); 7615 mutex_enter(&dtrace_lock); 7616 7617 /* 7618 * If there is at least one provider registered, we'll add this 7619 * provider after the first provider. 7620 */ 7621 if (dtrace_provider != NULL) { 7622 provider->dtpv_next = dtrace_provider->dtpv_next; 7623 dtrace_provider->dtpv_next = provider; 7624 } else { 7625 dtrace_provider = provider; 7626 } 7627 7628 if (dtrace_retained != NULL) { 7629 dtrace_enabling_provide(provider); 7630 7631 /* 7632 * Now we need to call dtrace_enabling_matchall() -- which 7633 * will acquire cpu_lock and dtrace_lock. We therefore need 7634 * to drop all of our locks before calling into it... 7635 */ 7636 mutex_exit(&dtrace_lock); 7637 mutex_exit(&dtrace_provider_lock); 7638 dtrace_enabling_matchall(); 7639 7640 return (0); 7641 } 7642 7643 mutex_exit(&dtrace_lock); 7644 mutex_exit(&dtrace_provider_lock); 7645 7646 return (0); 7647} 7648 7649/* 7650 * Unregister the specified provider from the DTrace framework. This should 7651 * generally be called by DTrace providers in their detach(9E) entry point. 7652 */ 7653int 7654dtrace_unregister(dtrace_provider_id_t id) 7655{ 7656 dtrace_provider_t *old = (dtrace_provider_t *)id; 7657 dtrace_provider_t *prev = NULL; 7658 int i, self = 0, noreap = 0; 7659 dtrace_probe_t *probe, *first = NULL; 7660 7661 if (old->dtpv_pops.dtps_enable == 7662 (void (*)(void *, dtrace_id_t, void *))dtrace_nullop) { 7663 /* 7664 * If DTrace itself is the provider, we're called with locks 7665 * already held. 7666 */ 7667 ASSERT(old == dtrace_provider); 7668#if defined(sun) 7669 ASSERT(dtrace_devi != NULL); 7670#endif 7671 ASSERT(MUTEX_HELD(&dtrace_provider_lock)); 7672 ASSERT(MUTEX_HELD(&dtrace_lock)); 7673 self = 1; 7674 7675 if (dtrace_provider->dtpv_next != NULL) { 7676 /* 7677 * There's another provider here; return failure. 7678 */ 7679 return (EBUSY); 7680 } 7681 } else { 7682 mutex_enter(&dtrace_provider_lock); 7683#if defined(sun) 7684 mutex_enter(&mod_lock); 7685#endif 7686 mutex_enter(&dtrace_lock); 7687 } 7688 7689 /* 7690 * If anyone has /dev/dtrace open, or if there are anonymous enabled 7691 * probes, we refuse to let providers slither away, unless this 7692 * provider has already been explicitly invalidated. 7693 */ 7694 if (!old->dtpv_defunct && 7695 (dtrace_opens || (dtrace_anon.dta_state != NULL && 7696 dtrace_anon.dta_state->dts_necbs > 0))) { 7697 if (!self) { 7698 mutex_exit(&dtrace_lock); 7699#if defined(sun) 7700 mutex_exit(&mod_lock); 7701#endif 7702 mutex_exit(&dtrace_provider_lock); 7703 } 7704 return (EBUSY); 7705 } 7706 7707 /* 7708 * Attempt to destroy the probes associated with this provider. 7709 */ 7710 for (i = 0; i < dtrace_nprobes; i++) { 7711 if ((probe = dtrace_probes[i]) == NULL) 7712 continue; 7713 7714 if (probe->dtpr_provider != old) 7715 continue; 7716 7717 if (probe->dtpr_ecb == NULL) 7718 continue; 7719 7720 /* 7721 * If we are trying to unregister a defunct provider, and the 7722 * provider was made defunct within the interval dictated by 7723 * dtrace_unregister_defunct_reap, we'll (asynchronously) 7724 * attempt to reap our enablings. To denote that the provider 7725 * should reattempt to unregister itself at some point in the 7726 * future, we will return a differentiable error code (EAGAIN 7727 * instead of EBUSY) in this case. 7728 */ 7729 if (dtrace_gethrtime() - old->dtpv_defunct > 7730 dtrace_unregister_defunct_reap) 7731 noreap = 1; 7732 7733 if (!self) { 7734 mutex_exit(&dtrace_lock); 7735#if defined(sun) 7736 mutex_exit(&mod_lock); 7737#endif 7738 mutex_exit(&dtrace_provider_lock); 7739 } 7740 7741 if (noreap) 7742 return (EBUSY); 7743 7744 (void) taskq_dispatch(dtrace_taskq, 7745 (task_func_t *)dtrace_enabling_reap, NULL, TQ_SLEEP); 7746 7747 return (EAGAIN); 7748 } 7749 7750 /* 7751 * All of the probes for this provider are disabled; we can safely 7752 * remove all of them from their hash chains and from the probe array. 7753 */ 7754 for (i = 0; i < dtrace_nprobes; i++) { 7755 if ((probe = dtrace_probes[i]) == NULL) 7756 continue; 7757 7758 if (probe->dtpr_provider != old) 7759 continue; 7760 7761 dtrace_probes[i] = NULL; 7762 7763 dtrace_hash_remove(dtrace_bymod, probe); 7764 dtrace_hash_remove(dtrace_byfunc, probe); 7765 dtrace_hash_remove(dtrace_byname, probe); 7766 7767 if (first == NULL) { 7768 first = probe; 7769 probe->dtpr_nextmod = NULL; 7770 } else { 7771 probe->dtpr_nextmod = first; 7772 first = probe; 7773 } 7774 } 7775 7776 /* 7777 * The provider's probes have been removed from the hash chains and 7778 * from the probe array. Now issue a dtrace_sync() to be sure that 7779 * everyone has cleared out from any probe array processing. 7780 */ 7781 dtrace_sync(); 7782 7783 for (probe = first; probe != NULL; probe = first) { 7784 first = probe->dtpr_nextmod; 7785 7786 old->dtpv_pops.dtps_destroy(old->dtpv_arg, probe->dtpr_id, 7787 probe->dtpr_arg); 7788 kmem_free(probe->dtpr_mod, strlen(probe->dtpr_mod) + 1); 7789 kmem_free(probe->dtpr_func, strlen(probe->dtpr_func) + 1); 7790 kmem_free(probe->dtpr_name, strlen(probe->dtpr_name) + 1); 7791#if defined(sun) 7792 vmem_free(dtrace_arena, (void *)(uintptr_t)(probe->dtpr_id), 1); 7793#else 7794 free_unr(dtrace_arena, probe->dtpr_id); 7795#endif 7796 kmem_free(probe, sizeof (dtrace_probe_t)); 7797 } 7798 7799 if ((prev = dtrace_provider) == old) { 7800#if defined(sun) 7801 ASSERT(self || dtrace_devi == NULL); 7802 ASSERT(old->dtpv_next == NULL || dtrace_devi == NULL); 7803#endif 7804 dtrace_provider = old->dtpv_next; 7805 } else { 7806 while (prev != NULL && prev->dtpv_next != old) 7807 prev = prev->dtpv_next; 7808 7809 if (prev == NULL) { 7810 panic("attempt to unregister non-existent " 7811 "dtrace provider %p\n", (void *)id); 7812 } 7813 7814 prev->dtpv_next = old->dtpv_next; 7815 } 7816 7817 if (!self) { 7818 mutex_exit(&dtrace_lock); 7819#if defined(sun) 7820 mutex_exit(&mod_lock); 7821#endif 7822 mutex_exit(&dtrace_provider_lock); 7823 } 7824 7825 kmem_free(old->dtpv_name, strlen(old->dtpv_name) + 1); 7826 kmem_free(old, sizeof (dtrace_provider_t)); 7827 7828 return (0); 7829} 7830 7831/* 7832 * Invalidate the specified provider. All subsequent probe lookups for the 7833 * specified provider will fail, but its probes will not be removed. 7834 */ 7835void 7836dtrace_invalidate(dtrace_provider_id_t id) 7837{ 7838 dtrace_provider_t *pvp = (dtrace_provider_t *)id; 7839 7840 ASSERT(pvp->dtpv_pops.dtps_enable != 7841 (void (*)(void *, dtrace_id_t, void *))dtrace_nullop); 7842 7843 mutex_enter(&dtrace_provider_lock); 7844 mutex_enter(&dtrace_lock); 7845 7846 pvp->dtpv_defunct = dtrace_gethrtime(); 7847 7848 mutex_exit(&dtrace_lock); 7849 mutex_exit(&dtrace_provider_lock); 7850} 7851 7852/* 7853 * Indicate whether or not DTrace has attached. 7854 */ 7855int 7856dtrace_attached(void) 7857{ 7858 /* 7859 * dtrace_provider will be non-NULL iff the DTrace driver has 7860 * attached. (It's non-NULL because DTrace is always itself a 7861 * provider.) 7862 */ 7863 return (dtrace_provider != NULL); 7864} 7865 7866/* 7867 * Remove all the unenabled probes for the given provider. This function is 7868 * not unlike dtrace_unregister(), except that it doesn't remove the provider 7869 * -- just as many of its associated probes as it can. 7870 */ 7871int 7872dtrace_condense(dtrace_provider_id_t id) 7873{ 7874 dtrace_provider_t *prov = (dtrace_provider_t *)id; 7875 int i; 7876 dtrace_probe_t *probe; 7877 7878 /* 7879 * Make sure this isn't the dtrace provider itself. 7880 */ 7881 ASSERT(prov->dtpv_pops.dtps_enable != 7882 (void (*)(void *, dtrace_id_t, void *))dtrace_nullop); 7883 7884 mutex_enter(&dtrace_provider_lock); 7885 mutex_enter(&dtrace_lock); 7886 7887 /* 7888 * Attempt to destroy the probes associated with this provider. 7889 */ 7890 for (i = 0; i < dtrace_nprobes; i++) { 7891 if ((probe = dtrace_probes[i]) == NULL) 7892 continue; 7893 7894 if (probe->dtpr_provider != prov) 7895 continue; 7896 7897 if (probe->dtpr_ecb != NULL) 7898 continue; 7899 7900 dtrace_probes[i] = NULL; 7901 7902 dtrace_hash_remove(dtrace_bymod, probe); 7903 dtrace_hash_remove(dtrace_byfunc, probe); 7904 dtrace_hash_remove(dtrace_byname, probe); 7905 7906 prov->dtpv_pops.dtps_destroy(prov->dtpv_arg, i + 1, 7907 probe->dtpr_arg); 7908 kmem_free(probe->dtpr_mod, strlen(probe->dtpr_mod) + 1); 7909 kmem_free(probe->dtpr_func, strlen(probe->dtpr_func) + 1); 7910 kmem_free(probe->dtpr_name, strlen(probe->dtpr_name) + 1); 7911 kmem_free(probe, sizeof (dtrace_probe_t)); 7912#if defined(sun) 7913 vmem_free(dtrace_arena, (void *)((uintptr_t)i + 1), 1); 7914#else 7915 free_unr(dtrace_arena, i + 1); 7916#endif 7917 } 7918 7919 mutex_exit(&dtrace_lock); 7920 mutex_exit(&dtrace_provider_lock); 7921 7922 return (0); 7923} 7924 7925/* 7926 * DTrace Probe Management Functions 7927 * 7928 * The functions in this section perform the DTrace probe management, 7929 * including functions to create probes, look-up probes, and call into the 7930 * providers to request that probes be provided. Some of these functions are 7931 * in the Provider-to-Framework API; these functions can be identified by the 7932 * fact that they are not declared "static". 7933 */ 7934 7935/* 7936 * Create a probe with the specified module name, function name, and name. 7937 */ 7938dtrace_id_t 7939dtrace_probe_create(dtrace_provider_id_t prov, const char *mod, 7940 const char *func, const char *name, int aframes, void *arg) 7941{ 7942 dtrace_probe_t *probe, **probes; 7943 dtrace_provider_t *provider = (dtrace_provider_t *)prov; 7944 dtrace_id_t id; 7945 7946 if (provider == dtrace_provider) { 7947 ASSERT(MUTEX_HELD(&dtrace_lock)); 7948 } else { 7949 mutex_enter(&dtrace_lock); 7950 } 7951 7952#if defined(sun) 7953 id = (dtrace_id_t)(uintptr_t)vmem_alloc(dtrace_arena, 1, 7954 VM_BESTFIT | VM_SLEEP); 7955#else 7956 id = alloc_unr(dtrace_arena); 7957#endif 7958 probe = kmem_zalloc(sizeof (dtrace_probe_t), KM_SLEEP); 7959 7960 probe->dtpr_id = id; 7961 probe->dtpr_gen = dtrace_probegen++; 7962 probe->dtpr_mod = dtrace_strdup(mod); 7963 probe->dtpr_func = dtrace_strdup(func); 7964 probe->dtpr_name = dtrace_strdup(name); 7965 probe->dtpr_arg = arg; 7966 probe->dtpr_aframes = aframes; 7967 probe->dtpr_provider = provider; 7968 7969 dtrace_hash_add(dtrace_bymod, probe); 7970 dtrace_hash_add(dtrace_byfunc, probe); 7971 dtrace_hash_add(dtrace_byname, probe); 7972 7973 if (id - 1 >= dtrace_nprobes) { 7974 size_t osize = dtrace_nprobes * sizeof (dtrace_probe_t *); 7975 size_t nsize = osize << 1; 7976 7977 if (nsize == 0) { 7978 ASSERT(osize == 0); 7979 ASSERT(dtrace_probes == NULL); 7980 nsize = sizeof (dtrace_probe_t *); 7981 } 7982 7983 probes = kmem_zalloc(nsize, KM_SLEEP); 7984 7985 if (dtrace_probes == NULL) { 7986 ASSERT(osize == 0); 7987 dtrace_probes = probes; 7988 dtrace_nprobes = 1; 7989 } else { 7990 dtrace_probe_t **oprobes = dtrace_probes; 7991 7992 bcopy(oprobes, probes, osize); 7993 dtrace_membar_producer(); 7994 dtrace_probes = probes; 7995 7996 dtrace_sync(); 7997 7998 /* 7999 * All CPUs are now seeing the new probes array; we can 8000 * safely free the old array. 8001 */ 8002 kmem_free(oprobes, osize); 8003 dtrace_nprobes <<= 1; 8004 } 8005 8006 ASSERT(id - 1 < dtrace_nprobes); 8007 } 8008 8009 ASSERT(dtrace_probes[id - 1] == NULL); 8010 dtrace_probes[id - 1] = probe; 8011 8012 if (provider != dtrace_provider) 8013 mutex_exit(&dtrace_lock); 8014 8015 return (id); 8016} 8017 8018static dtrace_probe_t * 8019dtrace_probe_lookup_id(dtrace_id_t id) 8020{ 8021 ASSERT(MUTEX_HELD(&dtrace_lock)); 8022 8023 if (id == 0 || id > dtrace_nprobes) 8024 return (NULL); 8025 8026 return (dtrace_probes[id - 1]); 8027} 8028 8029static int 8030dtrace_probe_lookup_match(dtrace_probe_t *probe, void *arg) 8031{ 8032 *((dtrace_id_t *)arg) = probe->dtpr_id; 8033 8034 return (DTRACE_MATCH_DONE); 8035} 8036 8037/* 8038 * Look up a probe based on provider and one or more of module name, function 8039 * name and probe name. 8040 */ 8041dtrace_id_t 8042dtrace_probe_lookup(dtrace_provider_id_t prid, char *mod, 8043 char *func, char *name) 8044{ 8045 dtrace_probekey_t pkey; 8046 dtrace_id_t id; 8047 int match; 8048 8049 pkey.dtpk_prov = ((dtrace_provider_t *)prid)->dtpv_name; 8050 pkey.dtpk_pmatch = &dtrace_match_string; 8051 pkey.dtpk_mod = mod; 8052 pkey.dtpk_mmatch = mod ? &dtrace_match_string : &dtrace_match_nul; 8053 pkey.dtpk_func = func; 8054 pkey.dtpk_fmatch = func ? &dtrace_match_string : &dtrace_match_nul; 8055 pkey.dtpk_name = name; 8056 pkey.dtpk_nmatch = name ? &dtrace_match_string : &dtrace_match_nul; 8057 pkey.dtpk_id = DTRACE_IDNONE; 8058 8059 mutex_enter(&dtrace_lock); 8060 match = dtrace_match(&pkey, DTRACE_PRIV_ALL, 0, 0, 8061 dtrace_probe_lookup_match, &id); 8062 mutex_exit(&dtrace_lock); 8063 8064 ASSERT(match == 1 || match == 0); 8065 return (match ? id : 0); 8066} 8067 8068/* 8069 * Returns the probe argument associated with the specified probe. 8070 */ 8071void * 8072dtrace_probe_arg(dtrace_provider_id_t id, dtrace_id_t pid) 8073{ 8074 dtrace_probe_t *probe; 8075 void *rval = NULL; 8076 8077 mutex_enter(&dtrace_lock); 8078 8079 if ((probe = dtrace_probe_lookup_id(pid)) != NULL && 8080 probe->dtpr_provider == (dtrace_provider_t *)id) 8081 rval = probe->dtpr_arg; 8082 8083 mutex_exit(&dtrace_lock); 8084 8085 return (rval); 8086} 8087 8088/* 8089 * Copy a probe into a probe description. 8090 */ 8091static void 8092dtrace_probe_description(const dtrace_probe_t *prp, dtrace_probedesc_t *pdp) 8093{ 8094 bzero(pdp, sizeof (dtrace_probedesc_t)); 8095 pdp->dtpd_id = prp->dtpr_id; 8096 8097 (void) strncpy(pdp->dtpd_provider, 8098 prp->dtpr_provider->dtpv_name, DTRACE_PROVNAMELEN - 1); 8099 8100 (void) strncpy(pdp->dtpd_mod, prp->dtpr_mod, DTRACE_MODNAMELEN - 1); 8101 (void) strncpy(pdp->dtpd_func, prp->dtpr_func, DTRACE_FUNCNAMELEN - 1); 8102 (void) strncpy(pdp->dtpd_name, prp->dtpr_name, DTRACE_NAMELEN - 1); 8103} 8104 8105/* 8106 * Called to indicate that a probe -- or probes -- should be provided by a 8107 * specfied provider. If the specified description is NULL, the provider will 8108 * be told to provide all of its probes. (This is done whenever a new 8109 * consumer comes along, or whenever a retained enabling is to be matched.) If 8110 * the specified description is non-NULL, the provider is given the 8111 * opportunity to dynamically provide the specified probe, allowing providers 8112 * to support the creation of probes on-the-fly. (So-called _autocreated_ 8113 * probes.) If the provider is NULL, the operations will be applied to all 8114 * providers; if the provider is non-NULL the operations will only be applied 8115 * to the specified provider. The dtrace_provider_lock must be held, and the 8116 * dtrace_lock must _not_ be held -- the provider's dtps_provide() operation 8117 * will need to grab the dtrace_lock when it reenters the framework through 8118 * dtrace_probe_lookup(), dtrace_probe_create(), etc. 8119 */ 8120static void 8121dtrace_probe_provide(dtrace_probedesc_t *desc, dtrace_provider_t *prv) 8122{ 8123#if defined(sun) 8124 modctl_t *ctl; 8125#endif 8126 int all = 0; 8127 8128 ASSERT(MUTEX_HELD(&dtrace_provider_lock)); 8129 8130 if (prv == NULL) { 8131 all = 1; 8132 prv = dtrace_provider; 8133 } 8134 8135 do { 8136 /* 8137 * First, call the blanket provide operation. 8138 */ 8139 prv->dtpv_pops.dtps_provide(prv->dtpv_arg, desc); 8140 8141#if defined(sun) 8142 /* 8143 * Now call the per-module provide operation. We will grab 8144 * mod_lock to prevent the list from being modified. Note 8145 * that this also prevents the mod_busy bits from changing. 8146 * (mod_busy can only be changed with mod_lock held.) 8147 */ 8148 mutex_enter(&mod_lock); 8149 8150 ctl = &modules; 8151 do { 8152 if (ctl->mod_busy || ctl->mod_mp == NULL) 8153 continue; 8154 8155 prv->dtpv_pops.dtps_provide_module(prv->dtpv_arg, ctl); 8156 8157 } while ((ctl = ctl->mod_next) != &modules); 8158 8159 mutex_exit(&mod_lock); 8160#endif 8161 } while (all && (prv = prv->dtpv_next) != NULL); 8162} 8163 8164#if defined(sun) 8165/* 8166 * Iterate over each probe, and call the Framework-to-Provider API function 8167 * denoted by offs. 8168 */ 8169static void 8170dtrace_probe_foreach(uintptr_t offs) 8171{ 8172 dtrace_provider_t *prov; 8173 void (*func)(void *, dtrace_id_t, void *); 8174 dtrace_probe_t *probe; 8175 dtrace_icookie_t cookie; 8176 int i; 8177 8178 /* 8179 * We disable interrupts to walk through the probe array. This is 8180 * safe -- the dtrace_sync() in dtrace_unregister() assures that we 8181 * won't see stale data. 8182 */ 8183 cookie = dtrace_interrupt_disable(); 8184 8185 for (i = 0; i < dtrace_nprobes; i++) { 8186 if ((probe = dtrace_probes[i]) == NULL) 8187 continue; 8188 8189 if (probe->dtpr_ecb == NULL) { 8190 /* 8191 * This probe isn't enabled -- don't call the function. 8192 */ 8193 continue; 8194 } 8195 8196 prov = probe->dtpr_provider; 8197 func = *((void(**)(void *, dtrace_id_t, void *)) 8198 ((uintptr_t)&prov->dtpv_pops + offs)); 8199 8200 func(prov->dtpv_arg, i + 1, probe->dtpr_arg); 8201 } 8202 8203 dtrace_interrupt_enable(cookie); 8204} 8205#endif 8206 8207static int 8208dtrace_probe_enable(dtrace_probedesc_t *desc, dtrace_enabling_t *enab) 8209{ 8210 dtrace_probekey_t pkey; 8211 uint32_t priv; 8212 uid_t uid; 8213 zoneid_t zoneid; 8214 8215 ASSERT(MUTEX_HELD(&dtrace_lock)); 8216 dtrace_ecb_create_cache = NULL; 8217 8218 if (desc == NULL) { 8219 /* 8220 * If we're passed a NULL description, we're being asked to 8221 * create an ECB with a NULL probe. 8222 */ 8223 (void) dtrace_ecb_create_enable(NULL, enab); 8224 return (0); 8225 } 8226 8227 dtrace_probekey(desc, &pkey); 8228 dtrace_cred2priv(enab->dten_vstate->dtvs_state->dts_cred.dcr_cred, 8229 &priv, &uid, &zoneid); 8230 8231 return (dtrace_match(&pkey, priv, uid, zoneid, dtrace_ecb_create_enable, 8232 enab)); 8233} 8234 8235/* 8236 * DTrace Helper Provider Functions 8237 */ 8238static void 8239dtrace_dofattr2attr(dtrace_attribute_t *attr, const dof_attr_t dofattr) 8240{ 8241 attr->dtat_name = DOF_ATTR_NAME(dofattr); 8242 attr->dtat_data = DOF_ATTR_DATA(dofattr); 8243 attr->dtat_class = DOF_ATTR_CLASS(dofattr); 8244} 8245 8246static void 8247dtrace_dofprov2hprov(dtrace_helper_provdesc_t *hprov, 8248 const dof_provider_t *dofprov, char *strtab) 8249{ 8250 hprov->dthpv_provname = strtab + dofprov->dofpv_name; 8251 dtrace_dofattr2attr(&hprov->dthpv_pattr.dtpa_provider, 8252 dofprov->dofpv_provattr); 8253 dtrace_dofattr2attr(&hprov->dthpv_pattr.dtpa_mod, 8254 dofprov->dofpv_modattr); 8255 dtrace_dofattr2attr(&hprov->dthpv_pattr.dtpa_func, 8256 dofprov->dofpv_funcattr); 8257 dtrace_dofattr2attr(&hprov->dthpv_pattr.dtpa_name, 8258 dofprov->dofpv_nameattr); 8259 dtrace_dofattr2attr(&hprov->dthpv_pattr.dtpa_args, 8260 dofprov->dofpv_argsattr); 8261} 8262 8263static void 8264dtrace_helper_provide_one(dof_helper_t *dhp, dof_sec_t *sec, pid_t pid) 8265{ 8266 uintptr_t daddr = (uintptr_t)dhp->dofhp_dof; 8267 dof_hdr_t *dof = (dof_hdr_t *)daddr; 8268 dof_sec_t *str_sec, *prb_sec, *arg_sec, *off_sec, *enoff_sec; 8269 dof_provider_t *provider; 8270 dof_probe_t *probe; 8271 uint32_t *off, *enoff; 8272 uint8_t *arg; 8273 char *strtab; 8274 uint_t i, nprobes; 8275 dtrace_helper_provdesc_t dhpv; 8276 dtrace_helper_probedesc_t dhpb; 8277 dtrace_meta_t *meta = dtrace_meta_pid; 8278 dtrace_mops_t *mops = &meta->dtm_mops; 8279 void *parg; 8280 8281 provider = (dof_provider_t *)(uintptr_t)(daddr + sec->dofs_offset); 8282 str_sec = (dof_sec_t *)(uintptr_t)(daddr + dof->dofh_secoff + 8283 provider->dofpv_strtab * dof->dofh_secsize); 8284 prb_sec = (dof_sec_t *)(uintptr_t)(daddr + dof->dofh_secoff + 8285 provider->dofpv_probes * dof->dofh_secsize); 8286 arg_sec = (dof_sec_t *)(uintptr_t)(daddr + dof->dofh_secoff + 8287 provider->dofpv_prargs * dof->dofh_secsize); 8288 off_sec = (dof_sec_t *)(uintptr_t)(daddr + dof->dofh_secoff + 8289 provider->dofpv_proffs * dof->dofh_secsize); 8290 8291 strtab = (char *)(uintptr_t)(daddr + str_sec->dofs_offset); 8292 off = (uint32_t *)(uintptr_t)(daddr + off_sec->dofs_offset); 8293 arg = (uint8_t *)(uintptr_t)(daddr + arg_sec->dofs_offset); 8294 enoff = NULL; 8295 8296 /* 8297 * See dtrace_helper_provider_validate(). 8298 */ 8299 if (dof->dofh_ident[DOF_ID_VERSION] != DOF_VERSION_1 && 8300 provider->dofpv_prenoffs != DOF_SECT_NONE) { 8301 enoff_sec = (dof_sec_t *)(uintptr_t)(daddr + dof->dofh_secoff + 8302 provider->dofpv_prenoffs * dof->dofh_secsize); 8303 enoff = (uint32_t *)(uintptr_t)(daddr + enoff_sec->dofs_offset); 8304 } 8305 8306 nprobes = prb_sec->dofs_size / prb_sec->dofs_entsize; 8307 8308 /* 8309 * Create the provider. 8310 */ 8311 dtrace_dofprov2hprov(&dhpv, provider, strtab); 8312 8313 if ((parg = mops->dtms_provide_pid(meta->dtm_arg, &dhpv, pid)) == NULL) 8314 return; 8315 8316 meta->dtm_count++; 8317 8318 /* 8319 * Create the probes. 8320 */ 8321 for (i = 0; i < nprobes; i++) { 8322 probe = (dof_probe_t *)(uintptr_t)(daddr + 8323 prb_sec->dofs_offset + i * prb_sec->dofs_entsize); 8324 8325 dhpb.dthpb_mod = dhp->dofhp_mod; 8326 dhpb.dthpb_func = strtab + probe->dofpr_func; 8327 dhpb.dthpb_name = strtab + probe->dofpr_name; 8328 dhpb.dthpb_base = probe->dofpr_addr; 8329 dhpb.dthpb_offs = off + probe->dofpr_offidx; 8330 dhpb.dthpb_noffs = probe->dofpr_noffs; 8331 if (enoff != NULL) { 8332 dhpb.dthpb_enoffs = enoff + probe->dofpr_enoffidx; 8333 dhpb.dthpb_nenoffs = probe->dofpr_nenoffs; 8334 } else { 8335 dhpb.dthpb_enoffs = NULL; 8336 dhpb.dthpb_nenoffs = 0; 8337 } 8338 dhpb.dthpb_args = arg + probe->dofpr_argidx; 8339 dhpb.dthpb_nargc = probe->dofpr_nargc; 8340 dhpb.dthpb_xargc = probe->dofpr_xargc; 8341 dhpb.dthpb_ntypes = strtab + probe->dofpr_nargv; 8342 dhpb.dthpb_xtypes = strtab + probe->dofpr_xargv; 8343 8344 mops->dtms_create_probe(meta->dtm_arg, parg, &dhpb); 8345 } 8346} 8347 8348static void 8349dtrace_helper_provide(dof_helper_t *dhp, pid_t pid) 8350{ 8351 uintptr_t daddr = (uintptr_t)dhp->dofhp_dof; 8352 dof_hdr_t *dof = (dof_hdr_t *)daddr; 8353 int i; 8354 8355 ASSERT(MUTEX_HELD(&dtrace_meta_lock)); 8356 8357 for (i = 0; i < dof->dofh_secnum; i++) { 8358 dof_sec_t *sec = (dof_sec_t *)(uintptr_t)(daddr + 8359 dof->dofh_secoff + i * dof->dofh_secsize); 8360 8361 if (sec->dofs_type != DOF_SECT_PROVIDER) 8362 continue; 8363 8364 dtrace_helper_provide_one(dhp, sec, pid); 8365 } 8366 8367 /* 8368 * We may have just created probes, so we must now rematch against 8369 * any retained enablings. Note that this call will acquire both 8370 * cpu_lock and dtrace_lock; the fact that we are holding 8371 * dtrace_meta_lock now is what defines the ordering with respect to 8372 * these three locks. 8373 */ 8374 dtrace_enabling_matchall(); 8375} 8376 8377static void 8378dtrace_helper_provider_remove_one(dof_helper_t *dhp, dof_sec_t *sec, pid_t pid) 8379{ 8380 uintptr_t daddr = (uintptr_t)dhp->dofhp_dof; 8381 dof_hdr_t *dof = (dof_hdr_t *)daddr; 8382 dof_sec_t *str_sec; 8383 dof_provider_t *provider; 8384 char *strtab; 8385 dtrace_helper_provdesc_t dhpv; 8386 dtrace_meta_t *meta = dtrace_meta_pid; 8387 dtrace_mops_t *mops = &meta->dtm_mops; 8388 8389 provider = (dof_provider_t *)(uintptr_t)(daddr + sec->dofs_offset); 8390 str_sec = (dof_sec_t *)(uintptr_t)(daddr + dof->dofh_secoff + 8391 provider->dofpv_strtab * dof->dofh_secsize); 8392 8393 strtab = (char *)(uintptr_t)(daddr + str_sec->dofs_offset); 8394 8395 /* 8396 * Create the provider. 8397 */ 8398 dtrace_dofprov2hprov(&dhpv, provider, strtab); 8399 8400 mops->dtms_remove_pid(meta->dtm_arg, &dhpv, pid); 8401 8402 meta->dtm_count--; 8403} 8404 8405static void 8406dtrace_helper_provider_remove(dof_helper_t *dhp, pid_t pid) 8407{ 8408 uintptr_t daddr = (uintptr_t)dhp->dofhp_dof; 8409 dof_hdr_t *dof = (dof_hdr_t *)daddr; 8410 int i; 8411 8412 ASSERT(MUTEX_HELD(&dtrace_meta_lock)); 8413 8414 for (i = 0; i < dof->dofh_secnum; i++) { 8415 dof_sec_t *sec = (dof_sec_t *)(uintptr_t)(daddr + 8416 dof->dofh_secoff + i * dof->dofh_secsize); 8417 8418 if (sec->dofs_type != DOF_SECT_PROVIDER) 8419 continue; 8420 8421 dtrace_helper_provider_remove_one(dhp, sec, pid); 8422 } 8423} 8424 8425/* 8426 * DTrace Meta Provider-to-Framework API Functions 8427 * 8428 * These functions implement the Meta Provider-to-Framework API, as described 8429 * in <sys/dtrace.h>. 8430 */ 8431int 8432dtrace_meta_register(const char *name, const dtrace_mops_t *mops, void *arg, 8433 dtrace_meta_provider_id_t *idp) 8434{ 8435 dtrace_meta_t *meta; 8436 dtrace_helpers_t *help, *next; 8437 int i; 8438 8439 *idp = DTRACE_METAPROVNONE; 8440 8441 /* 8442 * We strictly don't need the name, but we hold onto it for 8443 * debuggability. All hail error queues! 8444 */ 8445 if (name == NULL) { 8446 cmn_err(CE_WARN, "failed to register meta-provider: " 8447 "invalid name"); 8448 return (EINVAL); 8449 } 8450 8451 if (mops == NULL || 8452 mops->dtms_create_probe == NULL || 8453 mops->dtms_provide_pid == NULL || 8454 mops->dtms_remove_pid == NULL) { 8455 cmn_err(CE_WARN, "failed to register meta-register %s: " 8456 "invalid ops", name); 8457 return (EINVAL); 8458 } 8459 8460 meta = kmem_zalloc(sizeof (dtrace_meta_t), KM_SLEEP); 8461 meta->dtm_mops = *mops; 8462 meta->dtm_name = kmem_alloc(strlen(name) + 1, KM_SLEEP); 8463 (void) strcpy(meta->dtm_name, name); 8464 meta->dtm_arg = arg; 8465 8466 mutex_enter(&dtrace_meta_lock); 8467 mutex_enter(&dtrace_lock); 8468 8469 if (dtrace_meta_pid != NULL) { 8470 mutex_exit(&dtrace_lock); 8471 mutex_exit(&dtrace_meta_lock); 8472 cmn_err(CE_WARN, "failed to register meta-register %s: " 8473 "user-land meta-provider exists", name); 8474 kmem_free(meta->dtm_name, strlen(meta->dtm_name) + 1); 8475 kmem_free(meta, sizeof (dtrace_meta_t)); 8476 return (EINVAL); 8477 } 8478 8479 dtrace_meta_pid = meta; 8480 *idp = (dtrace_meta_provider_id_t)meta; 8481 8482 /* 8483 * If there are providers and probes ready to go, pass them 8484 * off to the new meta provider now. 8485 */ 8486 8487 help = dtrace_deferred_pid; 8488 dtrace_deferred_pid = NULL; 8489 8490 mutex_exit(&dtrace_lock); 8491 8492 while (help != NULL) { 8493 for (i = 0; i < help->dthps_nprovs; i++) { 8494 dtrace_helper_provide(&help->dthps_provs[i]->dthp_prov, 8495 help->dthps_pid); 8496 } 8497 8498 next = help->dthps_next; 8499 help->dthps_next = NULL; 8500 help->dthps_prev = NULL; 8501 help->dthps_deferred = 0; 8502 help = next; 8503 } 8504 8505 mutex_exit(&dtrace_meta_lock); 8506 8507 return (0); 8508} 8509 8510int 8511dtrace_meta_unregister(dtrace_meta_provider_id_t id) 8512{ 8513 dtrace_meta_t **pp, *old = (dtrace_meta_t *)id; 8514 8515 mutex_enter(&dtrace_meta_lock); 8516 mutex_enter(&dtrace_lock); 8517 8518 if (old == dtrace_meta_pid) { 8519 pp = &dtrace_meta_pid; 8520 } else { 8521 panic("attempt to unregister non-existent " 8522 "dtrace meta-provider %p\n", (void *)old); 8523 } 8524 8525 if (old->dtm_count != 0) { 8526 mutex_exit(&dtrace_lock); 8527 mutex_exit(&dtrace_meta_lock); 8528 return (EBUSY); 8529 } 8530 8531 *pp = NULL; 8532 8533 mutex_exit(&dtrace_lock); 8534 mutex_exit(&dtrace_meta_lock); 8535 8536 kmem_free(old->dtm_name, strlen(old->dtm_name) + 1); 8537 kmem_free(old, sizeof (dtrace_meta_t)); 8538 8539 return (0); 8540} 8541 8542 8543/* 8544 * DTrace DIF Object Functions 8545 */ 8546static int 8547dtrace_difo_err(uint_t pc, const char *format, ...) 8548{ 8549 if (dtrace_err_verbose) { 8550 va_list alist; 8551 8552 (void) uprintf("dtrace DIF object error: [%u]: ", pc); 8553 va_start(alist, format); 8554 (void) vuprintf(format, alist); 8555 va_end(alist); 8556 } 8557 8558#ifdef DTRACE_ERRDEBUG 8559 dtrace_errdebug(format); 8560#endif 8561 return (1); 8562} 8563 8564/* 8565 * Validate a DTrace DIF object by checking the IR instructions. The following 8566 * rules are currently enforced by dtrace_difo_validate(): 8567 * 8568 * 1. Each instruction must have a valid opcode 8569 * 2. Each register, string, variable, or subroutine reference must be valid 8570 * 3. No instruction can modify register %r0 (must be zero) 8571 * 4. All instruction reserved bits must be set to zero 8572 * 5. The last instruction must be a "ret" instruction 8573 * 6. All branch targets must reference a valid instruction _after_ the branch 8574 */ 8575static int 8576dtrace_difo_validate(dtrace_difo_t *dp, dtrace_vstate_t *vstate, uint_t nregs, 8577 cred_t *cr) 8578{ 8579 int err = 0, i; 8580 int (*efunc)(uint_t pc, const char *, ...) = dtrace_difo_err; 8581 int kcheckload; 8582 uint_t pc; 8583 8584 kcheckload = cr == NULL || 8585 (vstate->dtvs_state->dts_cred.dcr_visible & DTRACE_CRV_KERNEL) == 0; 8586 8587 dp->dtdo_destructive = 0; 8588 8589 for (pc = 0; pc < dp->dtdo_len && err == 0; pc++) { 8590 dif_instr_t instr = dp->dtdo_buf[pc]; 8591 8592 uint_t r1 = DIF_INSTR_R1(instr); 8593 uint_t r2 = DIF_INSTR_R2(instr); 8594 uint_t rd = DIF_INSTR_RD(instr); 8595 uint_t rs = DIF_INSTR_RS(instr); 8596 uint_t label = DIF_INSTR_LABEL(instr); 8597 uint_t v = DIF_INSTR_VAR(instr); 8598 uint_t subr = DIF_INSTR_SUBR(instr); 8599 uint_t type = DIF_INSTR_TYPE(instr); 8600 uint_t op = DIF_INSTR_OP(instr); 8601 8602 switch (op) { 8603 case DIF_OP_OR: 8604 case DIF_OP_XOR: 8605 case DIF_OP_AND: 8606 case DIF_OP_SLL: 8607 case DIF_OP_SRL: 8608 case DIF_OP_SRA: 8609 case DIF_OP_SUB: 8610 case DIF_OP_ADD: 8611 case DIF_OP_MUL: 8612 case DIF_OP_SDIV: 8613 case DIF_OP_UDIV: 8614 case DIF_OP_SREM: 8615 case DIF_OP_UREM: 8616 case DIF_OP_COPYS: 8617 if (r1 >= nregs) 8618 err += efunc(pc, "invalid register %u\n", r1); 8619 if (r2 >= nregs) 8620 err += efunc(pc, "invalid register %u\n", r2); 8621 if (rd >= nregs) 8622 err += efunc(pc, "invalid register %u\n", rd); 8623 if (rd == 0) 8624 err += efunc(pc, "cannot write to %r0\n"); 8625 break; 8626 case DIF_OP_NOT: 8627 case DIF_OP_MOV: 8628 case DIF_OP_ALLOCS: 8629 if (r1 >= nregs) 8630 err += efunc(pc, "invalid register %u\n", r1); 8631 if (r2 != 0) 8632 err += efunc(pc, "non-zero reserved bits\n"); 8633 if (rd >= nregs) 8634 err += efunc(pc, "invalid register %u\n", rd); 8635 if (rd == 0) 8636 err += efunc(pc, "cannot write to %r0\n"); 8637 break; 8638 case DIF_OP_LDSB: 8639 case DIF_OP_LDSH: 8640 case DIF_OP_LDSW: 8641 case DIF_OP_LDUB: 8642 case DIF_OP_LDUH: 8643 case DIF_OP_LDUW: 8644 case DIF_OP_LDX: 8645 if (r1 >= nregs) 8646 err += efunc(pc, "invalid register %u\n", r1); 8647 if (r2 != 0) 8648 err += efunc(pc, "non-zero reserved bits\n"); 8649 if (rd >= nregs) 8650 err += efunc(pc, "invalid register %u\n", rd); 8651 if (rd == 0) 8652 err += efunc(pc, "cannot write to %r0\n"); 8653 if (kcheckload) 8654 dp->dtdo_buf[pc] = DIF_INSTR_LOAD(op + 8655 DIF_OP_RLDSB - DIF_OP_LDSB, r1, rd); 8656 break; 8657 case DIF_OP_RLDSB: 8658 case DIF_OP_RLDSH: 8659 case DIF_OP_RLDSW: 8660 case DIF_OP_RLDUB: 8661 case DIF_OP_RLDUH: 8662 case DIF_OP_RLDUW: 8663 case DIF_OP_RLDX: 8664 if (r1 >= nregs) 8665 err += efunc(pc, "invalid register %u\n", r1); 8666 if (r2 != 0) 8667 err += efunc(pc, "non-zero reserved bits\n"); 8668 if (rd >= nregs) 8669 err += efunc(pc, "invalid register %u\n", rd); 8670 if (rd == 0) 8671 err += efunc(pc, "cannot write to %r0\n"); 8672 break; 8673 case DIF_OP_ULDSB: 8674 case DIF_OP_ULDSH: 8675 case DIF_OP_ULDSW: 8676 case DIF_OP_ULDUB: 8677 case DIF_OP_ULDUH: 8678 case DIF_OP_ULDUW: 8679 case DIF_OP_ULDX: 8680 if (r1 >= nregs) 8681 err += efunc(pc, "invalid register %u\n", r1); 8682 if (r2 != 0) 8683 err += efunc(pc, "non-zero reserved bits\n"); 8684 if (rd >= nregs) 8685 err += efunc(pc, "invalid register %u\n", rd); 8686 if (rd == 0) 8687 err += efunc(pc, "cannot write to %r0\n"); 8688 break; 8689 case DIF_OP_STB: 8690 case DIF_OP_STH: 8691 case DIF_OP_STW: 8692 case DIF_OP_STX: 8693 if (r1 >= nregs) 8694 err += efunc(pc, "invalid register %u\n", r1); 8695 if (r2 != 0) 8696 err += efunc(pc, "non-zero reserved bits\n"); 8697 if (rd >= nregs) 8698 err += efunc(pc, "invalid register %u\n", rd); 8699 if (rd == 0) 8700 err += efunc(pc, "cannot write to 0 address\n"); 8701 break; 8702 case DIF_OP_CMP: 8703 case DIF_OP_SCMP: 8704 if (r1 >= nregs) 8705 err += efunc(pc, "invalid register %u\n", r1); 8706 if (r2 >= nregs) 8707 err += efunc(pc, "invalid register %u\n", r2); 8708 if (rd != 0) 8709 err += efunc(pc, "non-zero reserved bits\n"); 8710 break; 8711 case DIF_OP_TST: 8712 if (r1 >= nregs) 8713 err += efunc(pc, "invalid register %u\n", r1); 8714 if (r2 != 0 || rd != 0) 8715 err += efunc(pc, "non-zero reserved bits\n"); 8716 break; 8717 case DIF_OP_BA: 8718 case DIF_OP_BE: 8719 case DIF_OP_BNE: 8720 case DIF_OP_BG: 8721 case DIF_OP_BGU: 8722 case DIF_OP_BGE: 8723 case DIF_OP_BGEU: 8724 case DIF_OP_BL: 8725 case DIF_OP_BLU: 8726 case DIF_OP_BLE: 8727 case DIF_OP_BLEU: 8728 if (label >= dp->dtdo_len) { 8729 err += efunc(pc, "invalid branch target %u\n", 8730 label); 8731 } 8732 if (label <= pc) { 8733 err += efunc(pc, "backward branch to %u\n", 8734 label); 8735 } 8736 break; 8737 case DIF_OP_RET: 8738 if (r1 != 0 || r2 != 0) 8739 err += efunc(pc, "non-zero reserved bits\n"); 8740 if (rd >= nregs) 8741 err += efunc(pc, "invalid register %u\n", rd); 8742 break; 8743 case DIF_OP_NOP: 8744 case DIF_OP_POPTS: 8745 case DIF_OP_FLUSHTS: 8746 if (r1 != 0 || r2 != 0 || rd != 0) 8747 err += efunc(pc, "non-zero reserved bits\n"); 8748 break; 8749 case DIF_OP_SETX: 8750 if (DIF_INSTR_INTEGER(instr) >= dp->dtdo_intlen) { 8751 err += efunc(pc, "invalid integer ref %u\n", 8752 DIF_INSTR_INTEGER(instr)); 8753 } 8754 if (rd >= nregs) 8755 err += efunc(pc, "invalid register %u\n", rd); 8756 if (rd == 0) 8757 err += efunc(pc, "cannot write to %r0\n"); 8758 break; 8759 case DIF_OP_SETS: 8760 if (DIF_INSTR_STRING(instr) >= dp->dtdo_strlen) { 8761 err += efunc(pc, "invalid string ref %u\n", 8762 DIF_INSTR_STRING(instr)); 8763 } 8764 if (rd >= nregs) 8765 err += efunc(pc, "invalid register %u\n", rd); 8766 if (rd == 0) 8767 err += efunc(pc, "cannot write to %r0\n"); 8768 break; 8769 case DIF_OP_LDGA: 8770 case DIF_OP_LDTA: 8771 if (r1 > DIF_VAR_ARRAY_MAX) 8772 err += efunc(pc, "invalid array %u\n", r1); 8773 if (r2 >= nregs) 8774 err += efunc(pc, "invalid register %u\n", r2); 8775 if (rd >= nregs) 8776 err += efunc(pc, "invalid register %u\n", rd); 8777 if (rd == 0) 8778 err += efunc(pc, "cannot write to %r0\n"); 8779 break; 8780 case DIF_OP_LDGS: 8781 case DIF_OP_LDTS: 8782 case DIF_OP_LDLS: 8783 case DIF_OP_LDGAA: 8784 case DIF_OP_LDTAA: 8785 if (v < DIF_VAR_OTHER_MIN || v > DIF_VAR_OTHER_MAX) 8786 err += efunc(pc, "invalid variable %u\n", v); 8787 if (rd >= nregs) 8788 err += efunc(pc, "invalid register %u\n", rd); 8789 if (rd == 0) 8790 err += efunc(pc, "cannot write to %r0\n"); 8791 break; 8792 case DIF_OP_STGS: 8793 case DIF_OP_STTS: 8794 case DIF_OP_STLS: 8795 case DIF_OP_STGAA: 8796 case DIF_OP_STTAA: 8797 if (v < DIF_VAR_OTHER_UBASE || v > DIF_VAR_OTHER_MAX) 8798 err += efunc(pc, "invalid variable %u\n", v); 8799 if (rs >= nregs) 8800 err += efunc(pc, "invalid register %u\n", rd); 8801 break; 8802 case DIF_OP_CALL: 8803 if (subr > DIF_SUBR_MAX) 8804 err += efunc(pc, "invalid subr %u\n", subr); 8805 if (rd >= nregs) 8806 err += efunc(pc, "invalid register %u\n", rd); 8807 if (rd == 0) 8808 err += efunc(pc, "cannot write to %r0\n"); 8809 8810 if (subr == DIF_SUBR_COPYOUT || 8811 subr == DIF_SUBR_COPYOUTSTR) { 8812 dp->dtdo_destructive = 1; 8813 } 8814 break; 8815 case DIF_OP_PUSHTR: 8816 if (type != DIF_TYPE_STRING && type != DIF_TYPE_CTF) 8817 err += efunc(pc, "invalid ref type %u\n", type); 8818 if (r2 >= nregs) 8819 err += efunc(pc, "invalid register %u\n", r2); 8820 if (rs >= nregs) 8821 err += efunc(pc, "invalid register %u\n", rs); 8822 break; 8823 case DIF_OP_PUSHTV: 8824 if (type != DIF_TYPE_CTF) 8825 err += efunc(pc, "invalid val type %u\n", type); 8826 if (r2 >= nregs) 8827 err += efunc(pc, "invalid register %u\n", r2); 8828 if (rs >= nregs) 8829 err += efunc(pc, "invalid register %u\n", rs); 8830 break; 8831 default: 8832 err += efunc(pc, "invalid opcode %u\n", 8833 DIF_INSTR_OP(instr)); 8834 } 8835 } 8836 8837 if (dp->dtdo_len != 0 && 8838 DIF_INSTR_OP(dp->dtdo_buf[dp->dtdo_len - 1]) != DIF_OP_RET) { 8839 err += efunc(dp->dtdo_len - 1, 8840 "expected 'ret' as last DIF instruction\n"); 8841 } 8842 8843 if (!(dp->dtdo_rtype.dtdt_flags & DIF_TF_BYREF)) { 8844 /* 8845 * If we're not returning by reference, the size must be either 8846 * 0 or the size of one of the base types. 8847 */ 8848 switch (dp->dtdo_rtype.dtdt_size) { 8849 case 0: 8850 case sizeof (uint8_t): 8851 case sizeof (uint16_t): 8852 case sizeof (uint32_t): 8853 case sizeof (uint64_t): 8854 break; 8855 8856 default: 8857 err += efunc(dp->dtdo_len - 1, "bad return size"); 8858 } 8859 } 8860 8861 for (i = 0; i < dp->dtdo_varlen && err == 0; i++) { 8862 dtrace_difv_t *v = &dp->dtdo_vartab[i], *existing = NULL; 8863 dtrace_diftype_t *vt, *et; 8864 uint_t id, ndx; 8865 8866 if (v->dtdv_scope != DIFV_SCOPE_GLOBAL && 8867 v->dtdv_scope != DIFV_SCOPE_THREAD && 8868 v->dtdv_scope != DIFV_SCOPE_LOCAL) { 8869 err += efunc(i, "unrecognized variable scope %d\n", 8870 v->dtdv_scope); 8871 break; 8872 } 8873 8874 if (v->dtdv_kind != DIFV_KIND_ARRAY && 8875 v->dtdv_kind != DIFV_KIND_SCALAR) { 8876 err += efunc(i, "unrecognized variable type %d\n", 8877 v->dtdv_kind); 8878 break; 8879 } 8880 8881 if ((id = v->dtdv_id) > DIF_VARIABLE_MAX) { 8882 err += efunc(i, "%d exceeds variable id limit\n", id); 8883 break; 8884 } 8885 8886 if (id < DIF_VAR_OTHER_UBASE) 8887 continue; 8888 8889 /* 8890 * For user-defined variables, we need to check that this 8891 * definition is identical to any previous definition that we 8892 * encountered. 8893 */ 8894 ndx = id - DIF_VAR_OTHER_UBASE; 8895 8896 switch (v->dtdv_scope) { 8897 case DIFV_SCOPE_GLOBAL: 8898 if (ndx < vstate->dtvs_nglobals) { 8899 dtrace_statvar_t *svar; 8900 8901 if ((svar = vstate->dtvs_globals[ndx]) != NULL) 8902 existing = &svar->dtsv_var; 8903 } 8904 8905 break; 8906 8907 case DIFV_SCOPE_THREAD: 8908 if (ndx < vstate->dtvs_ntlocals) 8909 existing = &vstate->dtvs_tlocals[ndx]; 8910 break; 8911 8912 case DIFV_SCOPE_LOCAL: 8913 if (ndx < vstate->dtvs_nlocals) { 8914 dtrace_statvar_t *svar; 8915 8916 if ((svar = vstate->dtvs_locals[ndx]) != NULL) 8917 existing = &svar->dtsv_var; 8918 } 8919 8920 break; 8921 } 8922 8923 vt = &v->dtdv_type; 8924 8925 if (vt->dtdt_flags & DIF_TF_BYREF) { 8926 if (vt->dtdt_size == 0) { 8927 err += efunc(i, "zero-sized variable\n"); 8928 break; 8929 } 8930 8931 if (v->dtdv_scope == DIFV_SCOPE_GLOBAL && 8932 vt->dtdt_size > dtrace_global_maxsize) { 8933 err += efunc(i, "oversized by-ref global\n"); 8934 break; 8935 } 8936 } 8937 8938 if (existing == NULL || existing->dtdv_id == 0) 8939 continue; 8940 8941 ASSERT(existing->dtdv_id == v->dtdv_id); 8942 ASSERT(existing->dtdv_scope == v->dtdv_scope); 8943 8944 if (existing->dtdv_kind != v->dtdv_kind) 8945 err += efunc(i, "%d changed variable kind\n", id); 8946 8947 et = &existing->dtdv_type; 8948 8949 if (vt->dtdt_flags != et->dtdt_flags) { 8950 err += efunc(i, "%d changed variable type flags\n", id); 8951 break; 8952 } 8953 8954 if (vt->dtdt_size != 0 && vt->dtdt_size != et->dtdt_size) { 8955 err += efunc(i, "%d changed variable type size\n", id); 8956 break; 8957 } 8958 } 8959 8960 return (err); 8961} 8962 8963/* 8964 * Validate a DTrace DIF object that it is to be used as a helper. Helpers 8965 * are much more constrained than normal DIFOs. Specifically, they may 8966 * not: 8967 * 8968 * 1. Make calls to subroutines other than copyin(), copyinstr() or 8969 * miscellaneous string routines 8970 * 2. Access DTrace variables other than the args[] array, and the 8971 * curthread, pid, ppid, tid, execname, zonename, uid and gid variables. 8972 * 3. Have thread-local variables. 8973 * 4. Have dynamic variables. 8974 */ 8975static int 8976dtrace_difo_validate_helper(dtrace_difo_t *dp) 8977{ 8978 int (*efunc)(uint_t pc, const char *, ...) = dtrace_difo_err; 8979 int err = 0; 8980 uint_t pc; 8981 8982 for (pc = 0; pc < dp->dtdo_len; pc++) { 8983 dif_instr_t instr = dp->dtdo_buf[pc]; 8984 8985 uint_t v = DIF_INSTR_VAR(instr); 8986 uint_t subr = DIF_INSTR_SUBR(instr); 8987 uint_t op = DIF_INSTR_OP(instr); 8988 8989 switch (op) { 8990 case DIF_OP_OR: 8991 case DIF_OP_XOR: 8992 case DIF_OP_AND: 8993 case DIF_OP_SLL: 8994 case DIF_OP_SRL: 8995 case DIF_OP_SRA: 8996 case DIF_OP_SUB: 8997 case DIF_OP_ADD: 8998 case DIF_OP_MUL: 8999 case DIF_OP_SDIV: 9000 case DIF_OP_UDIV: 9001 case DIF_OP_SREM: 9002 case DIF_OP_UREM: 9003 case DIF_OP_COPYS: 9004 case DIF_OP_NOT: 9005 case DIF_OP_MOV: 9006 case DIF_OP_RLDSB: 9007 case DIF_OP_RLDSH: 9008 case DIF_OP_RLDSW: 9009 case DIF_OP_RLDUB: 9010 case DIF_OP_RLDUH: 9011 case DIF_OP_RLDUW: 9012 case DIF_OP_RLDX: 9013 case DIF_OP_ULDSB: 9014 case DIF_OP_ULDSH: 9015 case DIF_OP_ULDSW: 9016 case DIF_OP_ULDUB: 9017 case DIF_OP_ULDUH: 9018 case DIF_OP_ULDUW: 9019 case DIF_OP_ULDX: 9020 case DIF_OP_STB: 9021 case DIF_OP_STH: 9022 case DIF_OP_STW: 9023 case DIF_OP_STX: 9024 case DIF_OP_ALLOCS: 9025 case DIF_OP_CMP: 9026 case DIF_OP_SCMP: 9027 case DIF_OP_TST: 9028 case DIF_OP_BA: 9029 case DIF_OP_BE: 9030 case DIF_OP_BNE: 9031 case DIF_OP_BG: 9032 case DIF_OP_BGU: 9033 case DIF_OP_BGE: 9034 case DIF_OP_BGEU: 9035 case DIF_OP_BL: 9036 case DIF_OP_BLU: 9037 case DIF_OP_BLE: 9038 case DIF_OP_BLEU: 9039 case DIF_OP_RET: 9040 case DIF_OP_NOP: 9041 case DIF_OP_POPTS: 9042 case DIF_OP_FLUSHTS: 9043 case DIF_OP_SETX: 9044 case DIF_OP_SETS: 9045 case DIF_OP_LDGA: 9046 case DIF_OP_LDLS: 9047 case DIF_OP_STGS: 9048 case DIF_OP_STLS: 9049 case DIF_OP_PUSHTR: 9050 case DIF_OP_PUSHTV: 9051 break; 9052 9053 case DIF_OP_LDGS: 9054 if (v >= DIF_VAR_OTHER_UBASE) 9055 break; 9056 9057 if (v >= DIF_VAR_ARG0 && v <= DIF_VAR_ARG9) 9058 break; 9059 9060 if (v == DIF_VAR_CURTHREAD || v == DIF_VAR_PID || 9061 v == DIF_VAR_PPID || v == DIF_VAR_TID || 9062 v == DIF_VAR_EXECARGS || 9063 v == DIF_VAR_EXECNAME || v == DIF_VAR_ZONENAME || 9064 v == DIF_VAR_UID || v == DIF_VAR_GID) 9065 break; 9066 9067 err += efunc(pc, "illegal variable %u\n", v); 9068 break; 9069 9070 case DIF_OP_LDTA: 9071 case DIF_OP_LDTS: 9072 case DIF_OP_LDGAA: 9073 case DIF_OP_LDTAA: 9074 err += efunc(pc, "illegal dynamic variable load\n"); 9075 break; 9076 9077 case DIF_OP_STTS: 9078 case DIF_OP_STGAA: 9079 case DIF_OP_STTAA: 9080 err += efunc(pc, "illegal dynamic variable store\n"); 9081 break; 9082 9083 case DIF_OP_CALL: 9084 if (subr == DIF_SUBR_ALLOCA || 9085 subr == DIF_SUBR_BCOPY || 9086 subr == DIF_SUBR_COPYIN || 9087 subr == DIF_SUBR_COPYINTO || 9088 subr == DIF_SUBR_COPYINSTR || 9089 subr == DIF_SUBR_INDEX || 9090 subr == DIF_SUBR_INET_NTOA || 9091 subr == DIF_SUBR_INET_NTOA6 || 9092 subr == DIF_SUBR_INET_NTOP || 9093 subr == DIF_SUBR_LLTOSTR || 9094 subr == DIF_SUBR_RINDEX || 9095 subr == DIF_SUBR_STRCHR || 9096 subr == DIF_SUBR_STRJOIN || 9097 subr == DIF_SUBR_STRRCHR || 9098 subr == DIF_SUBR_STRSTR || 9099 subr == DIF_SUBR_HTONS || 9100 subr == DIF_SUBR_HTONL || 9101 subr == DIF_SUBR_HTONLL || 9102 subr == DIF_SUBR_NTOHS || 9103 subr == DIF_SUBR_NTOHL || 9104 subr == DIF_SUBR_NTOHLL || 9105 subr == DIF_SUBR_MEMREF || 9106 subr == DIF_SUBR_TYPEREF) 9107 break; 9108 9109 err += efunc(pc, "invalid subr %u\n", subr); 9110 break; 9111 9112 default: 9113 err += efunc(pc, "invalid opcode %u\n", 9114 DIF_INSTR_OP(instr)); 9115 } 9116 } 9117 9118 return (err); 9119} 9120 9121/* 9122 * Returns 1 if the expression in the DIF object can be cached on a per-thread 9123 * basis; 0 if not. 9124 */ 9125static int 9126dtrace_difo_cacheable(dtrace_difo_t *dp) 9127{ 9128 int i; 9129 9130 if (dp == NULL) 9131 return (0); 9132 9133 for (i = 0; i < dp->dtdo_varlen; i++) { 9134 dtrace_difv_t *v = &dp->dtdo_vartab[i]; 9135 9136 if (v->dtdv_scope != DIFV_SCOPE_GLOBAL) 9137 continue; 9138 9139 switch (v->dtdv_id) { 9140 case DIF_VAR_CURTHREAD: 9141 case DIF_VAR_PID: 9142 case DIF_VAR_TID: 9143 case DIF_VAR_EXECARGS: 9144 case DIF_VAR_EXECNAME: 9145 case DIF_VAR_ZONENAME: 9146 break; 9147 9148 default: 9149 return (0); 9150 } 9151 } 9152 9153 /* 9154 * This DIF object may be cacheable. Now we need to look for any 9155 * array loading instructions, any memory loading instructions, or 9156 * any stores to thread-local variables. 9157 */ 9158 for (i = 0; i < dp->dtdo_len; i++) { 9159 uint_t op = DIF_INSTR_OP(dp->dtdo_buf[i]); 9160 9161 if ((op >= DIF_OP_LDSB && op <= DIF_OP_LDX) || 9162 (op >= DIF_OP_ULDSB && op <= DIF_OP_ULDX) || 9163 (op >= DIF_OP_RLDSB && op <= DIF_OP_RLDX) || 9164 op == DIF_OP_LDGA || op == DIF_OP_STTS) 9165 return (0); 9166 } 9167 9168 return (1); 9169} 9170 9171static void 9172dtrace_difo_hold(dtrace_difo_t *dp) 9173{ 9174 int i; 9175 9176 ASSERT(MUTEX_HELD(&dtrace_lock)); 9177 9178 dp->dtdo_refcnt++; 9179 ASSERT(dp->dtdo_refcnt != 0); 9180 9181 /* 9182 * We need to check this DIF object for references to the variable 9183 * DIF_VAR_VTIMESTAMP. 9184 */ 9185 for (i = 0; i < dp->dtdo_varlen; i++) { 9186 dtrace_difv_t *v = &dp->dtdo_vartab[i]; 9187 9188 if (v->dtdv_id != DIF_VAR_VTIMESTAMP) 9189 continue; 9190 9191 if (dtrace_vtime_references++ == 0) 9192 dtrace_vtime_enable(); 9193 } 9194} 9195 9196/* 9197 * This routine calculates the dynamic variable chunksize for a given DIF 9198 * object. The calculation is not fool-proof, and can probably be tricked by 9199 * malicious DIF -- but it works for all compiler-generated DIF. Because this 9200 * calculation is likely imperfect, dtrace_dynvar() is able to gracefully fail 9201 * if a dynamic variable size exceeds the chunksize. 9202 */ 9203static void 9204dtrace_difo_chunksize(dtrace_difo_t *dp, dtrace_vstate_t *vstate) 9205{ 9206 uint64_t sval = 0; 9207 dtrace_key_t tupregs[DIF_DTR_NREGS + 2]; /* +2 for thread and id */ 9208 const dif_instr_t *text = dp->dtdo_buf; 9209 uint_t pc, srd = 0; 9210 uint_t ttop = 0; 9211 size_t size, ksize; 9212 uint_t id, i; 9213 9214 for (pc = 0; pc < dp->dtdo_len; pc++) { 9215 dif_instr_t instr = text[pc]; 9216 uint_t op = DIF_INSTR_OP(instr); 9217 uint_t rd = DIF_INSTR_RD(instr); 9218 uint_t r1 = DIF_INSTR_R1(instr); 9219 uint_t nkeys = 0; 9220 uchar_t scope = 0; 9221 9222 dtrace_key_t *key = tupregs; 9223 9224 switch (op) { 9225 case DIF_OP_SETX: 9226 sval = dp->dtdo_inttab[DIF_INSTR_INTEGER(instr)]; 9227 srd = rd; 9228 continue; 9229 9230 case DIF_OP_STTS: 9231 key = &tupregs[DIF_DTR_NREGS]; 9232 key[0].dttk_size = 0; 9233 key[1].dttk_size = 0; 9234 nkeys = 2; 9235 scope = DIFV_SCOPE_THREAD; 9236 break; 9237 9238 case DIF_OP_STGAA: 9239 case DIF_OP_STTAA: 9240 nkeys = ttop; 9241 9242 if (DIF_INSTR_OP(instr) == DIF_OP_STTAA) 9243 key[nkeys++].dttk_size = 0; 9244 9245 key[nkeys++].dttk_size = 0; 9246 9247 if (op == DIF_OP_STTAA) { 9248 scope = DIFV_SCOPE_THREAD; 9249 } else { 9250 scope = DIFV_SCOPE_GLOBAL; 9251 } 9252 9253 break; 9254 9255 case DIF_OP_PUSHTR: 9256 if (ttop == DIF_DTR_NREGS) 9257 return; 9258 9259 if ((srd == 0 || sval == 0) && r1 == DIF_TYPE_STRING) { 9260 /* 9261 * If the register for the size of the "pushtr" 9262 * is %r0 (or the value is 0) and the type is 9263 * a string, we'll use the system-wide default 9264 * string size. 9265 */ 9266 tupregs[ttop++].dttk_size = 9267 dtrace_strsize_default; 9268 } else { 9269 if (srd == 0) 9270 return; 9271 9272 tupregs[ttop++].dttk_size = sval; 9273 } 9274 9275 break; 9276 9277 case DIF_OP_PUSHTV: 9278 if (ttop == DIF_DTR_NREGS) 9279 return; 9280 9281 tupregs[ttop++].dttk_size = 0; 9282 break; 9283 9284 case DIF_OP_FLUSHTS: 9285 ttop = 0; 9286 break; 9287 9288 case DIF_OP_POPTS: 9289 if (ttop != 0) 9290 ttop--; 9291 break; 9292 } 9293 9294 sval = 0; 9295 srd = 0; 9296 9297 if (nkeys == 0) 9298 continue; 9299 9300 /* 9301 * We have a dynamic variable allocation; calculate its size. 9302 */ 9303 for (ksize = 0, i = 0; i < nkeys; i++) 9304 ksize += P2ROUNDUP(key[i].dttk_size, sizeof (uint64_t)); 9305 9306 size = sizeof (dtrace_dynvar_t); 9307 size += sizeof (dtrace_key_t) * (nkeys - 1); 9308 size += ksize; 9309 9310 /* 9311 * Now we need to determine the size of the stored data. 9312 */ 9313 id = DIF_INSTR_VAR(instr); 9314 9315 for (i = 0; i < dp->dtdo_varlen; i++) { 9316 dtrace_difv_t *v = &dp->dtdo_vartab[i]; 9317 9318 if (v->dtdv_id == id && v->dtdv_scope == scope) { 9319 size += v->dtdv_type.dtdt_size; 9320 break; 9321 } 9322 } 9323 9324 if (i == dp->dtdo_varlen) 9325 return; 9326 9327 /* 9328 * We have the size. If this is larger than the chunk size 9329 * for our dynamic variable state, reset the chunk size. 9330 */ 9331 size = P2ROUNDUP(size, sizeof (uint64_t)); 9332 9333 if (size > vstate->dtvs_dynvars.dtds_chunksize) 9334 vstate->dtvs_dynvars.dtds_chunksize = size; 9335 } 9336} 9337 9338static void 9339dtrace_difo_init(dtrace_difo_t *dp, dtrace_vstate_t *vstate) 9340{ 9341 int i, oldsvars, osz, nsz, otlocals, ntlocals; 9342 uint_t id; 9343 9344 ASSERT(MUTEX_HELD(&dtrace_lock)); 9345 ASSERT(dp->dtdo_buf != NULL && dp->dtdo_len != 0); 9346 9347 for (i = 0; i < dp->dtdo_varlen; i++) { 9348 dtrace_difv_t *v = &dp->dtdo_vartab[i]; 9349 dtrace_statvar_t *svar, ***svarp = NULL; 9350 size_t dsize = 0; 9351 uint8_t scope = v->dtdv_scope; 9352 int *np = NULL; 9353 9354 if ((id = v->dtdv_id) < DIF_VAR_OTHER_UBASE) 9355 continue; 9356 9357 id -= DIF_VAR_OTHER_UBASE; 9358 9359 switch (scope) { 9360 case DIFV_SCOPE_THREAD: 9361 while (id >= (otlocals = vstate->dtvs_ntlocals)) { 9362 dtrace_difv_t *tlocals; 9363 9364 if ((ntlocals = (otlocals << 1)) == 0) 9365 ntlocals = 1; 9366 9367 osz = otlocals * sizeof (dtrace_difv_t); 9368 nsz = ntlocals * sizeof (dtrace_difv_t); 9369 9370 tlocals = kmem_zalloc(nsz, KM_SLEEP); 9371 9372 if (osz != 0) { 9373 bcopy(vstate->dtvs_tlocals, 9374 tlocals, osz); 9375 kmem_free(vstate->dtvs_tlocals, osz); 9376 } 9377 9378 vstate->dtvs_tlocals = tlocals; 9379 vstate->dtvs_ntlocals = ntlocals; 9380 } 9381 9382 vstate->dtvs_tlocals[id] = *v; 9383 continue; 9384 9385 case DIFV_SCOPE_LOCAL: 9386 np = &vstate->dtvs_nlocals; 9387 svarp = &vstate->dtvs_locals; 9388 9389 if (v->dtdv_type.dtdt_flags & DIF_TF_BYREF) 9390 dsize = NCPU * (v->dtdv_type.dtdt_size + 9391 sizeof (uint64_t)); 9392 else 9393 dsize = NCPU * sizeof (uint64_t); 9394 9395 break; 9396 9397 case DIFV_SCOPE_GLOBAL: 9398 np = &vstate->dtvs_nglobals; 9399 svarp = &vstate->dtvs_globals; 9400 9401 if (v->dtdv_type.dtdt_flags & DIF_TF_BYREF) 9402 dsize = v->dtdv_type.dtdt_size + 9403 sizeof (uint64_t); 9404 9405 break; 9406 9407 default: 9408 ASSERT(0); 9409 } 9410 9411 while (id >= (oldsvars = *np)) { 9412 dtrace_statvar_t **statics; 9413 int newsvars, oldsize, newsize; 9414 9415 if ((newsvars = (oldsvars << 1)) == 0) 9416 newsvars = 1; 9417 9418 oldsize = oldsvars * sizeof (dtrace_statvar_t *); 9419 newsize = newsvars * sizeof (dtrace_statvar_t *); 9420 9421 statics = kmem_zalloc(newsize, KM_SLEEP); 9422 9423 if (oldsize != 0) { 9424 bcopy(*svarp, statics, oldsize); 9425 kmem_free(*svarp, oldsize); 9426 } 9427 9428 *svarp = statics; 9429 *np = newsvars; 9430 } 9431 9432 if ((svar = (*svarp)[id]) == NULL) { 9433 svar = kmem_zalloc(sizeof (dtrace_statvar_t), KM_SLEEP); 9434 svar->dtsv_var = *v; 9435 9436 if ((svar->dtsv_size = dsize) != 0) { 9437 svar->dtsv_data = (uint64_t)(uintptr_t) 9438 kmem_zalloc(dsize, KM_SLEEP); 9439 } 9440 9441 (*svarp)[id] = svar; 9442 } 9443 9444 svar->dtsv_refcnt++; 9445 } 9446 9447 dtrace_difo_chunksize(dp, vstate); 9448 dtrace_difo_hold(dp); 9449} 9450 9451static dtrace_difo_t * 9452dtrace_difo_duplicate(dtrace_difo_t *dp, dtrace_vstate_t *vstate) 9453{ 9454 dtrace_difo_t *new; 9455 size_t sz; 9456 9457 ASSERT(dp->dtdo_buf != NULL); 9458 ASSERT(dp->dtdo_refcnt != 0); 9459 9460 new = kmem_zalloc(sizeof (dtrace_difo_t), KM_SLEEP); 9461 9462 ASSERT(dp->dtdo_buf != NULL); 9463 sz = dp->dtdo_len * sizeof (dif_instr_t); 9464 new->dtdo_buf = kmem_alloc(sz, KM_SLEEP); 9465 bcopy(dp->dtdo_buf, new->dtdo_buf, sz); 9466 new->dtdo_len = dp->dtdo_len; 9467 9468 if (dp->dtdo_strtab != NULL) { 9469 ASSERT(dp->dtdo_strlen != 0); 9470 new->dtdo_strtab = kmem_alloc(dp->dtdo_strlen, KM_SLEEP); 9471 bcopy(dp->dtdo_strtab, new->dtdo_strtab, dp->dtdo_strlen); 9472 new->dtdo_strlen = dp->dtdo_strlen; 9473 } 9474 9475 if (dp->dtdo_inttab != NULL) { 9476 ASSERT(dp->dtdo_intlen != 0); 9477 sz = dp->dtdo_intlen * sizeof (uint64_t); 9478 new->dtdo_inttab = kmem_alloc(sz, KM_SLEEP); 9479 bcopy(dp->dtdo_inttab, new->dtdo_inttab, sz); 9480 new->dtdo_intlen = dp->dtdo_intlen; 9481 } 9482 9483 if (dp->dtdo_vartab != NULL) { 9484 ASSERT(dp->dtdo_varlen != 0); 9485 sz = dp->dtdo_varlen * sizeof (dtrace_difv_t); 9486 new->dtdo_vartab = kmem_alloc(sz, KM_SLEEP); 9487 bcopy(dp->dtdo_vartab, new->dtdo_vartab, sz); 9488 new->dtdo_varlen = dp->dtdo_varlen; 9489 } 9490 9491 dtrace_difo_init(new, vstate); 9492 return (new); 9493} 9494 9495static void 9496dtrace_difo_destroy(dtrace_difo_t *dp, dtrace_vstate_t *vstate) 9497{ 9498 int i; 9499 9500 ASSERT(dp->dtdo_refcnt == 0); 9501 9502 for (i = 0; i < dp->dtdo_varlen; i++) { 9503 dtrace_difv_t *v = &dp->dtdo_vartab[i]; 9504 dtrace_statvar_t *svar, **svarp = NULL; 9505 uint_t id; 9506 uint8_t scope = v->dtdv_scope; 9507 int *np = NULL; 9508 9509 switch (scope) { 9510 case DIFV_SCOPE_THREAD: 9511 continue; 9512 9513 case DIFV_SCOPE_LOCAL: 9514 np = &vstate->dtvs_nlocals; 9515 svarp = vstate->dtvs_locals; 9516 break; 9517 9518 case DIFV_SCOPE_GLOBAL: 9519 np = &vstate->dtvs_nglobals; 9520 svarp = vstate->dtvs_globals; 9521 break; 9522 9523 default: 9524 ASSERT(0); 9525 } 9526 9527 if ((id = v->dtdv_id) < DIF_VAR_OTHER_UBASE) 9528 continue; 9529 9530 id -= DIF_VAR_OTHER_UBASE; 9531 ASSERT(id < *np); 9532 9533 svar = svarp[id]; 9534 ASSERT(svar != NULL); 9535 ASSERT(svar->dtsv_refcnt > 0); 9536 9537 if (--svar->dtsv_refcnt > 0) 9538 continue; 9539 9540 if (svar->dtsv_size != 0) { 9541 ASSERT(svar->dtsv_data != 0); 9542 kmem_free((void *)(uintptr_t)svar->dtsv_data, 9543 svar->dtsv_size); 9544 } 9545 9546 kmem_free(svar, sizeof (dtrace_statvar_t)); 9547 svarp[id] = NULL; 9548 } 9549 9550 if (dp->dtdo_buf != NULL) 9551 kmem_free(dp->dtdo_buf, dp->dtdo_len * sizeof (dif_instr_t)); 9552 if (dp->dtdo_inttab != NULL) 9553 kmem_free(dp->dtdo_inttab, dp->dtdo_intlen * sizeof (uint64_t)); 9554 if (dp->dtdo_strtab != NULL) 9555 kmem_free(dp->dtdo_strtab, dp->dtdo_strlen); 9556 if (dp->dtdo_vartab != NULL) 9557 kmem_free(dp->dtdo_vartab, dp->dtdo_varlen * sizeof (dtrace_difv_t)); 9558 9559 kmem_free(dp, sizeof (dtrace_difo_t)); 9560} 9561 9562static void 9563dtrace_difo_release(dtrace_difo_t *dp, dtrace_vstate_t *vstate) 9564{ 9565 int i; 9566 9567 ASSERT(MUTEX_HELD(&dtrace_lock)); 9568 ASSERT(dp->dtdo_refcnt != 0); 9569 9570 for (i = 0; i < dp->dtdo_varlen; i++) { 9571 dtrace_difv_t *v = &dp->dtdo_vartab[i]; 9572 9573 if (v->dtdv_id != DIF_VAR_VTIMESTAMP) 9574 continue; 9575 9576 ASSERT(dtrace_vtime_references > 0); 9577 if (--dtrace_vtime_references == 0) 9578 dtrace_vtime_disable(); 9579 } 9580 9581 if (--dp->dtdo_refcnt == 0) 9582 dtrace_difo_destroy(dp, vstate); 9583} 9584 9585/* 9586 * DTrace Format Functions 9587 */ 9588static uint16_t 9589dtrace_format_add(dtrace_state_t *state, char *str) 9590{ 9591 char *fmt, **new; 9592 uint16_t ndx, len = strlen(str) + 1; 9593 9594 fmt = kmem_zalloc(len, KM_SLEEP); 9595 bcopy(str, fmt, len); 9596 9597 for (ndx = 0; ndx < state->dts_nformats; ndx++) { 9598 if (state->dts_formats[ndx] == NULL) { 9599 state->dts_formats[ndx] = fmt; 9600 return (ndx + 1); 9601 } 9602 } 9603 9604 if (state->dts_nformats == USHRT_MAX) { 9605 /* 9606 * This is only likely if a denial-of-service attack is being 9607 * attempted. As such, it's okay to fail silently here. 9608 */ 9609 kmem_free(fmt, len); 9610 return (0); 9611 } 9612 9613 /* 9614 * For simplicity, we always resize the formats array to be exactly the 9615 * number of formats. 9616 */ 9617 ndx = state->dts_nformats++; 9618 new = kmem_alloc((ndx + 1) * sizeof (char *), KM_SLEEP); 9619 9620 if (state->dts_formats != NULL) { 9621 ASSERT(ndx != 0); 9622 bcopy(state->dts_formats, new, ndx * sizeof (char *)); 9623 kmem_free(state->dts_formats, ndx * sizeof (char *)); 9624 } 9625 9626 state->dts_formats = new; 9627 state->dts_formats[ndx] = fmt; 9628 9629 return (ndx + 1); 9630} 9631 9632static void 9633dtrace_format_remove(dtrace_state_t *state, uint16_t format) 9634{ 9635 char *fmt; 9636 9637 ASSERT(state->dts_formats != NULL); 9638 ASSERT(format <= state->dts_nformats); 9639 ASSERT(state->dts_formats[format - 1] != NULL); 9640 9641 fmt = state->dts_formats[format - 1]; 9642 kmem_free(fmt, strlen(fmt) + 1); 9643 state->dts_formats[format - 1] = NULL; 9644} 9645 9646static void 9647dtrace_format_destroy(dtrace_state_t *state) 9648{ 9649 int i; 9650 9651 if (state->dts_nformats == 0) { 9652 ASSERT(state->dts_formats == NULL); 9653 return; 9654 } 9655 9656 ASSERT(state->dts_formats != NULL); 9657 9658 for (i = 0; i < state->dts_nformats; i++) { 9659 char *fmt = state->dts_formats[i]; 9660 9661 if (fmt == NULL) 9662 continue; 9663 9664 kmem_free(fmt, strlen(fmt) + 1); 9665 } 9666 9667 kmem_free(state->dts_formats, state->dts_nformats * sizeof (char *)); 9668 state->dts_nformats = 0; 9669 state->dts_formats = NULL; 9670} 9671 9672/* 9673 * DTrace Predicate Functions 9674 */ 9675static dtrace_predicate_t * 9676dtrace_predicate_create(dtrace_difo_t *dp) 9677{ 9678 dtrace_predicate_t *pred; 9679 9680 ASSERT(MUTEX_HELD(&dtrace_lock)); 9681 ASSERT(dp->dtdo_refcnt != 0); 9682 9683 pred = kmem_zalloc(sizeof (dtrace_predicate_t), KM_SLEEP); 9684 pred->dtp_difo = dp; 9685 pred->dtp_refcnt = 1; 9686 9687 if (!dtrace_difo_cacheable(dp)) 9688 return (pred); 9689 9690 if (dtrace_predcache_id == DTRACE_CACHEIDNONE) { 9691 /* 9692 * This is only theoretically possible -- we have had 2^32 9693 * cacheable predicates on this machine. We cannot allow any 9694 * more predicates to become cacheable: as unlikely as it is, 9695 * there may be a thread caching a (now stale) predicate cache 9696 * ID. (N.B.: the temptation is being successfully resisted to 9697 * have this cmn_err() "Holy shit -- we executed this code!") 9698 */ 9699 return (pred); 9700 } 9701 9702 pred->dtp_cacheid = dtrace_predcache_id++; 9703 9704 return (pred); 9705} 9706 9707static void 9708dtrace_predicate_hold(dtrace_predicate_t *pred) 9709{ 9710 ASSERT(MUTEX_HELD(&dtrace_lock)); 9711 ASSERT(pred->dtp_difo != NULL && pred->dtp_difo->dtdo_refcnt != 0); 9712 ASSERT(pred->dtp_refcnt > 0); 9713 9714 pred->dtp_refcnt++; 9715} 9716 9717static void 9718dtrace_predicate_release(dtrace_predicate_t *pred, dtrace_vstate_t *vstate) 9719{ 9720 dtrace_difo_t *dp = pred->dtp_difo; 9721 9722 ASSERT(MUTEX_HELD(&dtrace_lock)); 9723 ASSERT(dp != NULL && dp->dtdo_refcnt != 0); 9724 ASSERT(pred->dtp_refcnt > 0); 9725 9726 if (--pred->dtp_refcnt == 0) { 9727 dtrace_difo_release(pred->dtp_difo, vstate); 9728 kmem_free(pred, sizeof (dtrace_predicate_t)); 9729 } 9730} 9731 9732/* 9733 * DTrace Action Description Functions 9734 */ 9735static dtrace_actdesc_t * 9736dtrace_actdesc_create(dtrace_actkind_t kind, uint32_t ntuple, 9737 uint64_t uarg, uint64_t arg) 9738{ 9739 dtrace_actdesc_t *act; 9740 9741#if defined(sun) 9742 ASSERT(!DTRACEACT_ISPRINTFLIKE(kind) || (arg != NULL && 9743 arg >= KERNELBASE) || (arg == NULL && kind == DTRACEACT_PRINTA)); 9744#endif 9745 9746 act = kmem_zalloc(sizeof (dtrace_actdesc_t), KM_SLEEP); 9747 act->dtad_kind = kind; 9748 act->dtad_ntuple = ntuple; 9749 act->dtad_uarg = uarg; 9750 act->dtad_arg = arg; 9751 act->dtad_refcnt = 1; 9752 9753 return (act); 9754} 9755 9756static void 9757dtrace_actdesc_hold(dtrace_actdesc_t *act) 9758{ 9759 ASSERT(act->dtad_refcnt >= 1); 9760 act->dtad_refcnt++; 9761} 9762 9763static void 9764dtrace_actdesc_release(dtrace_actdesc_t *act, dtrace_vstate_t *vstate) 9765{ 9766 dtrace_actkind_t kind = act->dtad_kind; 9767 dtrace_difo_t *dp; 9768 9769 ASSERT(act->dtad_refcnt >= 1); 9770 9771 if (--act->dtad_refcnt != 0) 9772 return; 9773 9774 if ((dp = act->dtad_difo) != NULL) 9775 dtrace_difo_release(dp, vstate); 9776 9777 if (DTRACEACT_ISPRINTFLIKE(kind)) { 9778 char *str = (char *)(uintptr_t)act->dtad_arg; 9779 9780#if defined(sun) 9781 ASSERT((str != NULL && (uintptr_t)str >= KERNELBASE) || 9782 (str == NULL && act->dtad_kind == DTRACEACT_PRINTA)); 9783#endif 9784 9785 if (str != NULL) 9786 kmem_free(str, strlen(str) + 1); 9787 } 9788 9789 kmem_free(act, sizeof (dtrace_actdesc_t)); 9790} 9791 9792/* 9793 * DTrace ECB Functions 9794 */ 9795static dtrace_ecb_t * 9796dtrace_ecb_add(dtrace_state_t *state, dtrace_probe_t *probe) 9797{ 9798 dtrace_ecb_t *ecb; 9799 dtrace_epid_t epid; 9800 9801 ASSERT(MUTEX_HELD(&dtrace_lock)); 9802 9803 ecb = kmem_zalloc(sizeof (dtrace_ecb_t), KM_SLEEP); 9804 ecb->dte_predicate = NULL; 9805 ecb->dte_probe = probe; 9806 9807 /* 9808 * The default size is the size of the default action: recording 9809 * the header. 9810 */ 9811 ecb->dte_size = ecb->dte_needed = sizeof (dtrace_rechdr_t); 9812 ecb->dte_alignment = sizeof (dtrace_epid_t); 9813 9814 epid = state->dts_epid++; 9815 9816 if (epid - 1 >= state->dts_necbs) { 9817 dtrace_ecb_t **oecbs = state->dts_ecbs, **ecbs; 9818 int necbs = state->dts_necbs << 1; 9819 9820 ASSERT(epid == state->dts_necbs + 1); 9821 9822 if (necbs == 0) { 9823 ASSERT(oecbs == NULL); 9824 necbs = 1; 9825 } 9826 9827 ecbs = kmem_zalloc(necbs * sizeof (*ecbs), KM_SLEEP); 9828 9829 if (oecbs != NULL) 9830 bcopy(oecbs, ecbs, state->dts_necbs * sizeof (*ecbs)); 9831 9832 dtrace_membar_producer(); 9833 state->dts_ecbs = ecbs; 9834 9835 if (oecbs != NULL) { 9836 /* 9837 * If this state is active, we must dtrace_sync() 9838 * before we can free the old dts_ecbs array: we're 9839 * coming in hot, and there may be active ring 9840 * buffer processing (which indexes into the dts_ecbs 9841 * array) on another CPU. 9842 */ 9843 if (state->dts_activity != DTRACE_ACTIVITY_INACTIVE) 9844 dtrace_sync(); 9845 9846 kmem_free(oecbs, state->dts_necbs * sizeof (*ecbs)); 9847 } 9848 9849 dtrace_membar_producer(); 9850 state->dts_necbs = necbs; 9851 } 9852 9853 ecb->dte_state = state; 9854 9855 ASSERT(state->dts_ecbs[epid - 1] == NULL); 9856 dtrace_membar_producer(); 9857 state->dts_ecbs[(ecb->dte_epid = epid) - 1] = ecb; 9858 9859 return (ecb); 9860} 9861 9862static void 9863dtrace_ecb_enable(dtrace_ecb_t *ecb) 9864{ 9865 dtrace_probe_t *probe = ecb->dte_probe; 9866 9867 ASSERT(MUTEX_HELD(&cpu_lock)); 9868 ASSERT(MUTEX_HELD(&dtrace_lock)); 9869 ASSERT(ecb->dte_next == NULL); 9870 9871 if (probe == NULL) { 9872 /* 9873 * This is the NULL probe -- there's nothing to do. 9874 */ 9875 return; 9876 } 9877 9878 if (probe->dtpr_ecb == NULL) { 9879 dtrace_provider_t *prov = probe->dtpr_provider; 9880 9881 /* 9882 * We're the first ECB on this probe. 9883 */ 9884 probe->dtpr_ecb = probe->dtpr_ecb_last = ecb; 9885 9886 if (ecb->dte_predicate != NULL) 9887 probe->dtpr_predcache = ecb->dte_predicate->dtp_cacheid; 9888 9889 prov->dtpv_pops.dtps_enable(prov->dtpv_arg, 9890 probe->dtpr_id, probe->dtpr_arg); 9891 } else { 9892 /* 9893 * This probe is already active. Swing the last pointer to 9894 * point to the new ECB, and issue a dtrace_sync() to assure 9895 * that all CPUs have seen the change. 9896 */ 9897 ASSERT(probe->dtpr_ecb_last != NULL); 9898 probe->dtpr_ecb_last->dte_next = ecb; 9899 probe->dtpr_ecb_last = ecb; 9900 probe->dtpr_predcache = 0; 9901 9902 dtrace_sync(); 9903 } 9904} 9905 9906static void 9907dtrace_ecb_resize(dtrace_ecb_t *ecb) 9908{ 9909 dtrace_action_t *act; 9910 uint32_t curneeded = UINT32_MAX; 9911 uint32_t aggbase = UINT32_MAX; 9912 9913 /* 9914 * If we record anything, we always record the dtrace_rechdr_t. (And 9915 * we always record it first.) 9916 */ 9917 ecb->dte_size = sizeof (dtrace_rechdr_t); 9918 ecb->dte_alignment = sizeof (dtrace_epid_t); 9919 9920 for (act = ecb->dte_action; act != NULL; act = act->dta_next) { 9921 dtrace_recdesc_t *rec = &act->dta_rec; 9922 ASSERT(rec->dtrd_size > 0 || rec->dtrd_alignment == 1); 9923 9924 ecb->dte_alignment = MAX(ecb->dte_alignment, 9925 rec->dtrd_alignment); 9926 9927 if (DTRACEACT_ISAGG(act->dta_kind)) { 9928 dtrace_aggregation_t *agg = (dtrace_aggregation_t *)act; 9929 9930 ASSERT(rec->dtrd_size != 0); 9931 ASSERT(agg->dtag_first != NULL); 9932 ASSERT(act->dta_prev->dta_intuple); 9933 ASSERT(aggbase != UINT32_MAX); 9934 ASSERT(curneeded != UINT32_MAX); 9935 9936 agg->dtag_base = aggbase; 9937 9938 curneeded = P2ROUNDUP(curneeded, rec->dtrd_alignment); 9939 rec->dtrd_offset = curneeded; 9940 curneeded += rec->dtrd_size; 9941 ecb->dte_needed = MAX(ecb->dte_needed, curneeded); 9942 9943 aggbase = UINT32_MAX; 9944 curneeded = UINT32_MAX; 9945 } else if (act->dta_intuple) { 9946 if (curneeded == UINT32_MAX) { 9947 /* 9948 * This is the first record in a tuple. Align 9949 * curneeded to be at offset 4 in an 8-byte 9950 * aligned block. 9951 */ 9952 ASSERT(act->dta_prev == NULL || 9953 !act->dta_prev->dta_intuple); 9954 ASSERT3U(aggbase, ==, UINT32_MAX); 9955 curneeded = P2PHASEUP(ecb->dte_size, 9956 sizeof (uint64_t), sizeof (dtrace_aggid_t)); 9957 9958 aggbase = curneeded - sizeof (dtrace_aggid_t); 9959 ASSERT(IS_P2ALIGNED(aggbase, 9960 sizeof (uint64_t))); 9961 } 9962 curneeded = P2ROUNDUP(curneeded, rec->dtrd_alignment); 9963 rec->dtrd_offset = curneeded; 9964 curneeded += rec->dtrd_size; 9965 } else { 9966 /* tuples must be followed by an aggregation */ 9967 ASSERT(act->dta_prev == NULL || 9968 !act->dta_prev->dta_intuple); 9969 9970 ecb->dte_size = P2ROUNDUP(ecb->dte_size, 9971 rec->dtrd_alignment); 9972 rec->dtrd_offset = ecb->dte_size; 9973 ecb->dte_size += rec->dtrd_size; 9974 ecb->dte_needed = MAX(ecb->dte_needed, ecb->dte_size); 9975 } 9976 } 9977 9978 if ((act = ecb->dte_action) != NULL && 9979 !(act->dta_kind == DTRACEACT_SPECULATE && act->dta_next == NULL) && 9980 ecb->dte_size == sizeof (dtrace_rechdr_t)) { 9981 /* 9982 * If the size is still sizeof (dtrace_rechdr_t), then all 9983 * actions store no data; set the size to 0. 9984 */ 9985 ecb->dte_size = 0; 9986 } 9987 9988 ecb->dte_size = P2ROUNDUP(ecb->dte_size, sizeof (dtrace_epid_t)); 9989 ecb->dte_needed = P2ROUNDUP(ecb->dte_needed, (sizeof (dtrace_epid_t))); 9990 ecb->dte_state->dts_needed = MAX(ecb->dte_state->dts_needed, 9991 ecb->dte_needed); 9992} 9993 9994static dtrace_action_t * 9995dtrace_ecb_aggregation_create(dtrace_ecb_t *ecb, dtrace_actdesc_t *desc) 9996{ 9997 dtrace_aggregation_t *agg; 9998 size_t size = sizeof (uint64_t); 9999 int ntuple = desc->dtad_ntuple; 10000 dtrace_action_t *act; 10001 dtrace_recdesc_t *frec; 10002 dtrace_aggid_t aggid; 10003 dtrace_state_t *state = ecb->dte_state; 10004 10005 agg = kmem_zalloc(sizeof (dtrace_aggregation_t), KM_SLEEP); 10006 agg->dtag_ecb = ecb; 10007 10008 ASSERT(DTRACEACT_ISAGG(desc->dtad_kind)); 10009 10010 switch (desc->dtad_kind) { 10011 case DTRACEAGG_MIN: 10012 agg->dtag_initial = INT64_MAX; 10013 agg->dtag_aggregate = dtrace_aggregate_min; 10014 break; 10015 10016 case DTRACEAGG_MAX: 10017 agg->dtag_initial = INT64_MIN; 10018 agg->dtag_aggregate = dtrace_aggregate_max; 10019 break; 10020 10021 case DTRACEAGG_COUNT: 10022 agg->dtag_aggregate = dtrace_aggregate_count; 10023 break; 10024 10025 case DTRACEAGG_QUANTIZE: 10026 agg->dtag_aggregate = dtrace_aggregate_quantize; 10027 size = (((sizeof (uint64_t) * NBBY) - 1) * 2 + 1) * 10028 sizeof (uint64_t); 10029 break; 10030 10031 case DTRACEAGG_LQUANTIZE: { 10032 uint16_t step = DTRACE_LQUANTIZE_STEP(desc->dtad_arg); 10033 uint16_t levels = DTRACE_LQUANTIZE_LEVELS(desc->dtad_arg); 10034 10035 agg->dtag_initial = desc->dtad_arg; 10036 agg->dtag_aggregate = dtrace_aggregate_lquantize; 10037 10038 if (step == 0 || levels == 0) 10039 goto err; 10040 10041 size = levels * sizeof (uint64_t) + 3 * sizeof (uint64_t); 10042 break; 10043 } 10044 10045 case DTRACEAGG_LLQUANTIZE: { 10046 uint16_t factor = DTRACE_LLQUANTIZE_FACTOR(desc->dtad_arg); 10047 uint16_t low = DTRACE_LLQUANTIZE_LOW(desc->dtad_arg); 10048 uint16_t high = DTRACE_LLQUANTIZE_HIGH(desc->dtad_arg); 10049 uint16_t nsteps = DTRACE_LLQUANTIZE_NSTEP(desc->dtad_arg); 10050 int64_t v; 10051 10052 agg->dtag_initial = desc->dtad_arg; 10053 agg->dtag_aggregate = dtrace_aggregate_llquantize; 10054 10055 if (factor < 2 || low >= high || nsteps < factor) 10056 goto err; 10057 10058 /* 10059 * Now check that the number of steps evenly divides a power 10060 * of the factor. (This assures both integer bucket size and 10061 * linearity within each magnitude.) 10062 */ 10063 for (v = factor; v < nsteps; v *= factor) 10064 continue; 10065 10066 if ((v % nsteps) || (nsteps % factor)) 10067 goto err; 10068 10069 size = (dtrace_aggregate_llquantize_bucket(factor, 10070 low, high, nsteps, INT64_MAX) + 2) * sizeof (uint64_t); 10071 break; 10072 } 10073 10074 case DTRACEAGG_AVG: 10075 agg->dtag_aggregate = dtrace_aggregate_avg; 10076 size = sizeof (uint64_t) * 2; 10077 break; 10078 10079 case DTRACEAGG_STDDEV: 10080 agg->dtag_aggregate = dtrace_aggregate_stddev; 10081 size = sizeof (uint64_t) * 4; 10082 break; 10083 10084 case DTRACEAGG_SUM: 10085 agg->dtag_aggregate = dtrace_aggregate_sum; 10086 break; 10087 10088 default: 10089 goto err; 10090 } 10091 10092 agg->dtag_action.dta_rec.dtrd_size = size; 10093 10094 if (ntuple == 0) 10095 goto err; 10096 10097 /* 10098 * We must make sure that we have enough actions for the n-tuple. 10099 */ 10100 for (act = ecb->dte_action_last; act != NULL; act = act->dta_prev) { 10101 if (DTRACEACT_ISAGG(act->dta_kind)) 10102 break; 10103 10104 if (--ntuple == 0) { 10105 /* 10106 * This is the action with which our n-tuple begins. 10107 */ 10108 agg->dtag_first = act; 10109 goto success; 10110 } 10111 } 10112 10113 /* 10114 * This n-tuple is short by ntuple elements. Return failure. 10115 */ 10116 ASSERT(ntuple != 0); 10117err: 10118 kmem_free(agg, sizeof (dtrace_aggregation_t)); 10119 return (NULL); 10120 10121success: 10122 /* 10123 * If the last action in the tuple has a size of zero, it's actually 10124 * an expression argument for the aggregating action. 10125 */ 10126 ASSERT(ecb->dte_action_last != NULL); 10127 act = ecb->dte_action_last; 10128 10129 if (act->dta_kind == DTRACEACT_DIFEXPR) { 10130 ASSERT(act->dta_difo != NULL); 10131 10132 if (act->dta_difo->dtdo_rtype.dtdt_size == 0) 10133 agg->dtag_hasarg = 1; 10134 } 10135 10136 /* 10137 * We need to allocate an id for this aggregation. 10138 */ 10139#if defined(sun) 10140 aggid = (dtrace_aggid_t)(uintptr_t)vmem_alloc(state->dts_aggid_arena, 1, 10141 VM_BESTFIT | VM_SLEEP); 10142#else 10143 aggid = alloc_unr(state->dts_aggid_arena); 10144#endif 10145 10146 if (aggid - 1 >= state->dts_naggregations) { 10147 dtrace_aggregation_t **oaggs = state->dts_aggregations; 10148 dtrace_aggregation_t **aggs; 10149 int naggs = state->dts_naggregations << 1; 10150 int onaggs = state->dts_naggregations; 10151 10152 ASSERT(aggid == state->dts_naggregations + 1); 10153 10154 if (naggs == 0) { 10155 ASSERT(oaggs == NULL); 10156 naggs = 1; 10157 } 10158 10159 aggs = kmem_zalloc(naggs * sizeof (*aggs), KM_SLEEP); 10160 10161 if (oaggs != NULL) { 10162 bcopy(oaggs, aggs, onaggs * sizeof (*aggs)); 10163 kmem_free(oaggs, onaggs * sizeof (*aggs)); 10164 } 10165 10166 state->dts_aggregations = aggs; 10167 state->dts_naggregations = naggs; 10168 } 10169 10170 ASSERT(state->dts_aggregations[aggid - 1] == NULL); 10171 state->dts_aggregations[(agg->dtag_id = aggid) - 1] = agg; 10172 10173 frec = &agg->dtag_first->dta_rec; 10174 if (frec->dtrd_alignment < sizeof (dtrace_aggid_t)) 10175 frec->dtrd_alignment = sizeof (dtrace_aggid_t); 10176 10177 for (act = agg->dtag_first; act != NULL; act = act->dta_next) { 10178 ASSERT(!act->dta_intuple); 10179 act->dta_intuple = 1; 10180 } 10181 10182 return (&agg->dtag_action); 10183} 10184 10185static void 10186dtrace_ecb_aggregation_destroy(dtrace_ecb_t *ecb, dtrace_action_t *act) 10187{ 10188 dtrace_aggregation_t *agg = (dtrace_aggregation_t *)act; 10189 dtrace_state_t *state = ecb->dte_state; 10190 dtrace_aggid_t aggid = agg->dtag_id; 10191 10192 ASSERT(DTRACEACT_ISAGG(act->dta_kind)); 10193#if defined(sun) 10194 vmem_free(state->dts_aggid_arena, (void *)(uintptr_t)aggid, 1); 10195#else 10196 free_unr(state->dts_aggid_arena, aggid); 10197#endif 10198 10199 ASSERT(state->dts_aggregations[aggid - 1] == agg); 10200 state->dts_aggregations[aggid - 1] = NULL; 10201 10202 kmem_free(agg, sizeof (dtrace_aggregation_t)); 10203} 10204 10205static int 10206dtrace_ecb_action_add(dtrace_ecb_t *ecb, dtrace_actdesc_t *desc) 10207{ 10208 dtrace_action_t *action, *last; 10209 dtrace_difo_t *dp = desc->dtad_difo; 10210 uint32_t size = 0, align = sizeof (uint8_t), mask; 10211 uint16_t format = 0; 10212 dtrace_recdesc_t *rec; 10213 dtrace_state_t *state = ecb->dte_state; 10214 dtrace_optval_t *opt = state->dts_options, nframes = 0, strsize; 10215 uint64_t arg = desc->dtad_arg; 10216 10217 ASSERT(MUTEX_HELD(&dtrace_lock)); 10218 ASSERT(ecb->dte_action == NULL || ecb->dte_action->dta_refcnt == 1); 10219 10220 if (DTRACEACT_ISAGG(desc->dtad_kind)) { 10221 /* 10222 * If this is an aggregating action, there must be neither 10223 * a speculate nor a commit on the action chain. 10224 */ 10225 dtrace_action_t *act; 10226 10227 for (act = ecb->dte_action; act != NULL; act = act->dta_next) { 10228 if (act->dta_kind == DTRACEACT_COMMIT) 10229 return (EINVAL); 10230 10231 if (act->dta_kind == DTRACEACT_SPECULATE) 10232 return (EINVAL); 10233 } 10234 10235 action = dtrace_ecb_aggregation_create(ecb, desc); 10236 10237 if (action == NULL) 10238 return (EINVAL); 10239 } else { 10240 if (DTRACEACT_ISDESTRUCTIVE(desc->dtad_kind) || 10241 (desc->dtad_kind == DTRACEACT_DIFEXPR && 10242 dp != NULL && dp->dtdo_destructive)) { 10243 state->dts_destructive = 1; 10244 } 10245 10246 switch (desc->dtad_kind) { 10247 case DTRACEACT_PRINTF: 10248 case DTRACEACT_PRINTA: 10249 case DTRACEACT_SYSTEM: 10250 case DTRACEACT_FREOPEN: 10251 case DTRACEACT_DIFEXPR: 10252 /* 10253 * We know that our arg is a string -- turn it into a 10254 * format. 10255 */ 10256 if (arg == 0) { 10257 ASSERT(desc->dtad_kind == DTRACEACT_PRINTA || 10258 desc->dtad_kind == DTRACEACT_DIFEXPR); 10259 format = 0; 10260 } else { 10261 ASSERT(arg != 0); 10262#if defined(sun) 10263 ASSERT(arg > KERNELBASE); 10264#endif 10265 format = dtrace_format_add(state, 10266 (char *)(uintptr_t)arg); 10267 } 10268 10269 /*FALLTHROUGH*/ 10270 case DTRACEACT_LIBACT: 10271 case DTRACEACT_TRACEMEM: 10272 case DTRACEACT_TRACEMEM_DYNSIZE: 10273 if (dp == NULL) 10274 return (EINVAL); 10275 10276 if ((size = dp->dtdo_rtype.dtdt_size) != 0) 10277 break; 10278 10279 if (dp->dtdo_rtype.dtdt_kind == DIF_TYPE_STRING) { 10280 if (!(dp->dtdo_rtype.dtdt_flags & DIF_TF_BYREF)) 10281 return (EINVAL); 10282 10283 size = opt[DTRACEOPT_STRSIZE]; 10284 } 10285 10286 break; 10287 10288 case DTRACEACT_STACK: 10289 if ((nframes = arg) == 0) { 10290 nframes = opt[DTRACEOPT_STACKFRAMES]; 10291 ASSERT(nframes > 0); 10292 arg = nframes; 10293 } 10294 10295 size = nframes * sizeof (pc_t); 10296 break; 10297 10298 case DTRACEACT_JSTACK: 10299 if ((strsize = DTRACE_USTACK_STRSIZE(arg)) == 0) 10300 strsize = opt[DTRACEOPT_JSTACKSTRSIZE]; 10301 10302 if ((nframes = DTRACE_USTACK_NFRAMES(arg)) == 0) 10303 nframes = opt[DTRACEOPT_JSTACKFRAMES]; 10304 10305 arg = DTRACE_USTACK_ARG(nframes, strsize); 10306 10307 /*FALLTHROUGH*/ 10308 case DTRACEACT_USTACK: 10309 if (desc->dtad_kind != DTRACEACT_JSTACK && 10310 (nframes = DTRACE_USTACK_NFRAMES(arg)) == 0) { 10311 strsize = DTRACE_USTACK_STRSIZE(arg); 10312 nframes = opt[DTRACEOPT_USTACKFRAMES]; 10313 ASSERT(nframes > 0); 10314 arg = DTRACE_USTACK_ARG(nframes, strsize); 10315 } 10316 10317 /* 10318 * Save a slot for the pid. 10319 */ 10320 size = (nframes + 1) * sizeof (uint64_t); 10321 size += DTRACE_USTACK_STRSIZE(arg); 10322 size = P2ROUNDUP(size, (uint32_t)(sizeof (uintptr_t))); 10323 10324 break; 10325 10326 case DTRACEACT_SYM: 10327 case DTRACEACT_MOD: 10328 if (dp == NULL || ((size = dp->dtdo_rtype.dtdt_size) != 10329 sizeof (uint64_t)) || 10330 (dp->dtdo_rtype.dtdt_flags & DIF_TF_BYREF)) 10331 return (EINVAL); 10332 break; 10333 10334 case DTRACEACT_USYM: 10335 case DTRACEACT_UMOD: 10336 case DTRACEACT_UADDR: 10337 if (dp == NULL || 10338 (dp->dtdo_rtype.dtdt_size != sizeof (uint64_t)) || 10339 (dp->dtdo_rtype.dtdt_flags & DIF_TF_BYREF)) 10340 return (EINVAL); 10341 10342 /* 10343 * We have a slot for the pid, plus a slot for the 10344 * argument. To keep things simple (aligned with 10345 * bitness-neutral sizing), we store each as a 64-bit 10346 * quantity. 10347 */ 10348 size = 2 * sizeof (uint64_t); 10349 break; 10350 10351 case DTRACEACT_STOP: 10352 case DTRACEACT_BREAKPOINT: 10353 case DTRACEACT_PANIC: 10354 break; 10355 10356 case DTRACEACT_CHILL: 10357 case DTRACEACT_DISCARD: 10358 case DTRACEACT_RAISE: 10359 if (dp == NULL) 10360 return (EINVAL); 10361 break; 10362 10363 case DTRACEACT_EXIT: 10364 if (dp == NULL || 10365 (size = dp->dtdo_rtype.dtdt_size) != sizeof (int) || 10366 (dp->dtdo_rtype.dtdt_flags & DIF_TF_BYREF)) 10367 return (EINVAL); 10368 break; 10369 10370 case DTRACEACT_SPECULATE: 10371 if (ecb->dte_size > sizeof (dtrace_rechdr_t)) 10372 return (EINVAL); 10373 10374 if (dp == NULL) 10375 return (EINVAL); 10376 10377 state->dts_speculates = 1; 10378 break; 10379 10380 case DTRACEACT_PRINTM: 10381 size = dp->dtdo_rtype.dtdt_size; 10382 break; 10383 10384 case DTRACEACT_PRINTT: 10385 size = dp->dtdo_rtype.dtdt_size; 10386 break; 10387 10388 case DTRACEACT_COMMIT: { 10389 dtrace_action_t *act = ecb->dte_action; 10390 10391 for (; act != NULL; act = act->dta_next) { 10392 if (act->dta_kind == DTRACEACT_COMMIT) 10393 return (EINVAL); 10394 } 10395 10396 if (dp == NULL) 10397 return (EINVAL); 10398 break; 10399 } 10400 10401 default: 10402 return (EINVAL); 10403 } 10404 10405 if (size != 0 || desc->dtad_kind == DTRACEACT_SPECULATE) { 10406 /* 10407 * If this is a data-storing action or a speculate, 10408 * we must be sure that there isn't a commit on the 10409 * action chain. 10410 */ 10411 dtrace_action_t *act = ecb->dte_action; 10412 10413 for (; act != NULL; act = act->dta_next) { 10414 if (act->dta_kind == DTRACEACT_COMMIT) 10415 return (EINVAL); 10416 } 10417 } 10418 10419 action = kmem_zalloc(sizeof (dtrace_action_t), KM_SLEEP); 10420 action->dta_rec.dtrd_size = size; 10421 } 10422 10423 action->dta_refcnt = 1; 10424 rec = &action->dta_rec; 10425 size = rec->dtrd_size; 10426 10427 for (mask = sizeof (uint64_t) - 1; size != 0 && mask > 0; mask >>= 1) { 10428 if (!(size & mask)) { 10429 align = mask + 1; 10430 break; 10431 } 10432 } 10433 10434 action->dta_kind = desc->dtad_kind; 10435 10436 if ((action->dta_difo = dp) != NULL) 10437 dtrace_difo_hold(dp); 10438 10439 rec->dtrd_action = action->dta_kind; 10440 rec->dtrd_arg = arg; 10441 rec->dtrd_uarg = desc->dtad_uarg; 10442 rec->dtrd_alignment = (uint16_t)align; 10443 rec->dtrd_format = format; 10444 10445 if ((last = ecb->dte_action_last) != NULL) { 10446 ASSERT(ecb->dte_action != NULL); 10447 action->dta_prev = last; 10448 last->dta_next = action; 10449 } else { 10450 ASSERT(ecb->dte_action == NULL); 10451 ecb->dte_action = action; 10452 } 10453 10454 ecb->dte_action_last = action; 10455 10456 return (0); 10457} 10458 10459static void 10460dtrace_ecb_action_remove(dtrace_ecb_t *ecb) 10461{ 10462 dtrace_action_t *act = ecb->dte_action, *next; 10463 dtrace_vstate_t *vstate = &ecb->dte_state->dts_vstate; 10464 dtrace_difo_t *dp; 10465 uint16_t format; 10466 10467 if (act != NULL && act->dta_refcnt > 1) { 10468 ASSERT(act->dta_next == NULL || act->dta_next->dta_refcnt == 1); 10469 act->dta_refcnt--; 10470 } else { 10471 for (; act != NULL; act = next) { 10472 next = act->dta_next; 10473 ASSERT(next != NULL || act == ecb->dte_action_last); 10474 ASSERT(act->dta_refcnt == 1); 10475 10476 if ((format = act->dta_rec.dtrd_format) != 0) 10477 dtrace_format_remove(ecb->dte_state, format); 10478 10479 if ((dp = act->dta_difo) != NULL) 10480 dtrace_difo_release(dp, vstate); 10481 10482 if (DTRACEACT_ISAGG(act->dta_kind)) { 10483 dtrace_ecb_aggregation_destroy(ecb, act); 10484 } else { 10485 kmem_free(act, sizeof (dtrace_action_t)); 10486 } 10487 } 10488 } 10489 10490 ecb->dte_action = NULL; 10491 ecb->dte_action_last = NULL; 10492 ecb->dte_size = 0; 10493} 10494 10495static void 10496dtrace_ecb_disable(dtrace_ecb_t *ecb) 10497{ 10498 /* 10499 * We disable the ECB by removing it from its probe. 10500 */ 10501 dtrace_ecb_t *pecb, *prev = NULL; 10502 dtrace_probe_t *probe = ecb->dte_probe; 10503 10504 ASSERT(MUTEX_HELD(&dtrace_lock)); 10505 10506 if (probe == NULL) { 10507 /* 10508 * This is the NULL probe; there is nothing to disable. 10509 */ 10510 return; 10511 } 10512 10513 for (pecb = probe->dtpr_ecb; pecb != NULL; pecb = pecb->dte_next) { 10514 if (pecb == ecb) 10515 break; 10516 prev = pecb; 10517 } 10518 10519 ASSERT(pecb != NULL); 10520 10521 if (prev == NULL) { 10522 probe->dtpr_ecb = ecb->dte_next; 10523 } else { 10524 prev->dte_next = ecb->dte_next; 10525 } 10526 10527 if (ecb == probe->dtpr_ecb_last) { 10528 ASSERT(ecb->dte_next == NULL); 10529 probe->dtpr_ecb_last = prev; 10530 } 10531 10532 /* 10533 * The ECB has been disconnected from the probe; now sync to assure 10534 * that all CPUs have seen the change before returning. 10535 */ 10536 dtrace_sync(); 10537 10538 if (probe->dtpr_ecb == NULL) { 10539 /* 10540 * That was the last ECB on the probe; clear the predicate 10541 * cache ID for the probe, disable it and sync one more time 10542 * to assure that we'll never hit it again. 10543 */ 10544 dtrace_provider_t *prov = probe->dtpr_provider; 10545 10546 ASSERT(ecb->dte_next == NULL); 10547 ASSERT(probe->dtpr_ecb_last == NULL); 10548 probe->dtpr_predcache = DTRACE_CACHEIDNONE; 10549 prov->dtpv_pops.dtps_disable(prov->dtpv_arg, 10550 probe->dtpr_id, probe->dtpr_arg); 10551 dtrace_sync(); 10552 } else { 10553 /* 10554 * There is at least one ECB remaining on the probe. If there 10555 * is _exactly_ one, set the probe's predicate cache ID to be 10556 * the predicate cache ID of the remaining ECB. 10557 */ 10558 ASSERT(probe->dtpr_ecb_last != NULL); 10559 ASSERT(probe->dtpr_predcache == DTRACE_CACHEIDNONE); 10560 10561 if (probe->dtpr_ecb == probe->dtpr_ecb_last) { 10562 dtrace_predicate_t *p = probe->dtpr_ecb->dte_predicate; 10563 10564 ASSERT(probe->dtpr_ecb->dte_next == NULL); 10565 10566 if (p != NULL) 10567 probe->dtpr_predcache = p->dtp_cacheid; 10568 } 10569 10570 ecb->dte_next = NULL; 10571 } 10572} 10573 10574static void 10575dtrace_ecb_destroy(dtrace_ecb_t *ecb) 10576{ 10577 dtrace_state_t *state = ecb->dte_state; 10578 dtrace_vstate_t *vstate = &state->dts_vstate; 10579 dtrace_predicate_t *pred; 10580 dtrace_epid_t epid = ecb->dte_epid; 10581 10582 ASSERT(MUTEX_HELD(&dtrace_lock)); 10583 ASSERT(ecb->dte_next == NULL); 10584 ASSERT(ecb->dte_probe == NULL || ecb->dte_probe->dtpr_ecb != ecb); 10585 10586 if ((pred = ecb->dte_predicate) != NULL) 10587 dtrace_predicate_release(pred, vstate); 10588 10589 dtrace_ecb_action_remove(ecb); 10590 10591 ASSERT(state->dts_ecbs[epid - 1] == ecb); 10592 state->dts_ecbs[epid - 1] = NULL; 10593 10594 kmem_free(ecb, sizeof (dtrace_ecb_t)); 10595} 10596 10597static dtrace_ecb_t * 10598dtrace_ecb_create(dtrace_state_t *state, dtrace_probe_t *probe, 10599 dtrace_enabling_t *enab) 10600{ 10601 dtrace_ecb_t *ecb; 10602 dtrace_predicate_t *pred; 10603 dtrace_actdesc_t *act; 10604 dtrace_provider_t *prov; 10605 dtrace_ecbdesc_t *desc = enab->dten_current; 10606 10607 ASSERT(MUTEX_HELD(&dtrace_lock)); 10608 ASSERT(state != NULL); 10609 10610 ecb = dtrace_ecb_add(state, probe); 10611 ecb->dte_uarg = desc->dted_uarg; 10612 10613 if ((pred = desc->dted_pred.dtpdd_predicate) != NULL) { 10614 dtrace_predicate_hold(pred); 10615 ecb->dte_predicate = pred; 10616 } 10617 10618 if (probe != NULL) { 10619 /* 10620 * If the provider shows more leg than the consumer is old 10621 * enough to see, we need to enable the appropriate implicit 10622 * predicate bits to prevent the ecb from activating at 10623 * revealing times. 10624 * 10625 * Providers specifying DTRACE_PRIV_USER at register time 10626 * are stating that they need the /proc-style privilege 10627 * model to be enforced, and this is what DTRACE_COND_OWNER 10628 * and DTRACE_COND_ZONEOWNER will then do at probe time. 10629 */ 10630 prov = probe->dtpr_provider; 10631 if (!(state->dts_cred.dcr_visible & DTRACE_CRV_ALLPROC) && 10632 (prov->dtpv_priv.dtpp_flags & DTRACE_PRIV_USER)) 10633 ecb->dte_cond |= DTRACE_COND_OWNER; 10634 10635 if (!(state->dts_cred.dcr_visible & DTRACE_CRV_ALLZONE) && 10636 (prov->dtpv_priv.dtpp_flags & DTRACE_PRIV_USER)) 10637 ecb->dte_cond |= DTRACE_COND_ZONEOWNER; 10638 10639 /* 10640 * If the provider shows us kernel innards and the user 10641 * is lacking sufficient privilege, enable the 10642 * DTRACE_COND_USERMODE implicit predicate. 10643 */ 10644 if (!(state->dts_cred.dcr_visible & DTRACE_CRV_KERNEL) && 10645 (prov->dtpv_priv.dtpp_flags & DTRACE_PRIV_KERNEL)) 10646 ecb->dte_cond |= DTRACE_COND_USERMODE; 10647 } 10648 10649 if (dtrace_ecb_create_cache != NULL) { 10650 /* 10651 * If we have a cached ecb, we'll use its action list instead 10652 * of creating our own (saving both time and space). 10653 */ 10654 dtrace_ecb_t *cached = dtrace_ecb_create_cache; 10655 dtrace_action_t *act = cached->dte_action; 10656 10657 if (act != NULL) { 10658 ASSERT(act->dta_refcnt > 0); 10659 act->dta_refcnt++; 10660 ecb->dte_action = act; 10661 ecb->dte_action_last = cached->dte_action_last; 10662 ecb->dte_needed = cached->dte_needed; 10663 ecb->dte_size = cached->dte_size; 10664 ecb->dte_alignment = cached->dte_alignment; 10665 } 10666 10667 return (ecb); 10668 } 10669 10670 for (act = desc->dted_action; act != NULL; act = act->dtad_next) { 10671 if ((enab->dten_error = dtrace_ecb_action_add(ecb, act)) != 0) { 10672 dtrace_ecb_destroy(ecb); 10673 return (NULL); 10674 } 10675 } 10676 10677 dtrace_ecb_resize(ecb); 10678 10679 return (dtrace_ecb_create_cache = ecb); 10680} 10681 10682static int 10683dtrace_ecb_create_enable(dtrace_probe_t *probe, void *arg) 10684{ 10685 dtrace_ecb_t *ecb; 10686 dtrace_enabling_t *enab = arg; 10687 dtrace_state_t *state = enab->dten_vstate->dtvs_state; 10688 10689 ASSERT(state != NULL); 10690 10691 if (probe != NULL && probe->dtpr_gen < enab->dten_probegen) { 10692 /* 10693 * This probe was created in a generation for which this 10694 * enabling has previously created ECBs; we don't want to 10695 * enable it again, so just kick out. 10696 */ 10697 return (DTRACE_MATCH_NEXT); 10698 } 10699 10700 if ((ecb = dtrace_ecb_create(state, probe, enab)) == NULL) 10701 return (DTRACE_MATCH_DONE); 10702 10703 dtrace_ecb_enable(ecb); 10704 return (DTRACE_MATCH_NEXT); 10705} 10706 10707static dtrace_ecb_t * 10708dtrace_epid2ecb(dtrace_state_t *state, dtrace_epid_t id) 10709{ 10710 dtrace_ecb_t *ecb; 10711 10712 ASSERT(MUTEX_HELD(&dtrace_lock)); 10713 10714 if (id == 0 || id > state->dts_necbs) 10715 return (NULL); 10716 10717 ASSERT(state->dts_necbs > 0 && state->dts_ecbs != NULL); 10718 ASSERT((ecb = state->dts_ecbs[id - 1]) == NULL || ecb->dte_epid == id); 10719 10720 return (state->dts_ecbs[id - 1]); 10721} 10722 10723static dtrace_aggregation_t * 10724dtrace_aggid2agg(dtrace_state_t *state, dtrace_aggid_t id) 10725{ 10726 dtrace_aggregation_t *agg; 10727 10728 ASSERT(MUTEX_HELD(&dtrace_lock)); 10729 10730 if (id == 0 || id > state->dts_naggregations) 10731 return (NULL); 10732 10733 ASSERT(state->dts_naggregations > 0 && state->dts_aggregations != NULL); 10734 ASSERT((agg = state->dts_aggregations[id - 1]) == NULL || 10735 agg->dtag_id == id); 10736 10737 return (state->dts_aggregations[id - 1]); 10738} 10739 10740/* 10741 * DTrace Buffer Functions 10742 * 10743 * The following functions manipulate DTrace buffers. Most of these functions 10744 * are called in the context of establishing or processing consumer state; 10745 * exceptions are explicitly noted. 10746 */ 10747 10748/* 10749 * Note: called from cross call context. This function switches the two 10750 * buffers on a given CPU. The atomicity of this operation is assured by 10751 * disabling interrupts while the actual switch takes place; the disabling of 10752 * interrupts serializes the execution with any execution of dtrace_probe() on 10753 * the same CPU. 10754 */ 10755static void 10756dtrace_buffer_switch(dtrace_buffer_t *buf) 10757{ 10758 caddr_t tomax = buf->dtb_tomax; 10759 caddr_t xamot = buf->dtb_xamot; 10760 dtrace_icookie_t cookie; 10761 hrtime_t now; 10762 10763 ASSERT(!(buf->dtb_flags & DTRACEBUF_NOSWITCH)); 10764 ASSERT(!(buf->dtb_flags & DTRACEBUF_RING)); 10765 10766 cookie = dtrace_interrupt_disable(); 10767 now = dtrace_gethrtime(); 10768 buf->dtb_tomax = xamot; 10769 buf->dtb_xamot = tomax; 10770 buf->dtb_xamot_drops = buf->dtb_drops; 10771 buf->dtb_xamot_offset = buf->dtb_offset; 10772 buf->dtb_xamot_errors = buf->dtb_errors; 10773 buf->dtb_xamot_flags = buf->dtb_flags; 10774 buf->dtb_offset = 0; 10775 buf->dtb_drops = 0; 10776 buf->dtb_errors = 0; 10777 buf->dtb_flags &= ~(DTRACEBUF_ERROR | DTRACEBUF_DROPPED); 10778 buf->dtb_interval = now - buf->dtb_switched; 10779 buf->dtb_switched = now; 10780 dtrace_interrupt_enable(cookie); 10781} 10782 10783/* 10784 * Note: called from cross call context. This function activates a buffer 10785 * on a CPU. As with dtrace_buffer_switch(), the atomicity of the operation 10786 * is guaranteed by the disabling of interrupts. 10787 */ 10788static void 10789dtrace_buffer_activate(dtrace_state_t *state) 10790{ 10791 dtrace_buffer_t *buf; 10792 dtrace_icookie_t cookie = dtrace_interrupt_disable(); 10793 10794 buf = &state->dts_buffer[curcpu]; 10795 10796 if (buf->dtb_tomax != NULL) { 10797 /* 10798 * We might like to assert that the buffer is marked inactive, 10799 * but this isn't necessarily true: the buffer for the CPU 10800 * that processes the BEGIN probe has its buffer activated 10801 * manually. In this case, we take the (harmless) action 10802 * re-clearing the bit INACTIVE bit. 10803 */ 10804 buf->dtb_flags &= ~DTRACEBUF_INACTIVE; 10805 } 10806 10807 dtrace_interrupt_enable(cookie); 10808} 10809 10810static int 10811dtrace_buffer_alloc(dtrace_buffer_t *bufs, size_t size, int flags, 10812 processorid_t cpu) 10813{ 10814#if defined(sun) 10815 cpu_t *cp; 10816#endif 10817 dtrace_buffer_t *buf; 10818 10819#if defined(sun) 10820 ASSERT(MUTEX_HELD(&cpu_lock)); 10821 ASSERT(MUTEX_HELD(&dtrace_lock)); 10822 10823 if (size > dtrace_nonroot_maxsize && 10824 !PRIV_POLICY_CHOICE(CRED(), PRIV_ALL, B_FALSE)) 10825 return (EFBIG); 10826 10827 cp = cpu_list; 10828 10829 do { 10830 if (cpu != DTRACE_CPUALL && cpu != cp->cpu_id) 10831 continue; 10832 10833 buf = &bufs[cp->cpu_id]; 10834 10835 /* 10836 * If there is already a buffer allocated for this CPU, it 10837 * is only possible that this is a DR event. In this case, 10838 */ 10839 if (buf->dtb_tomax != NULL) { 10840 ASSERT(buf->dtb_size == size); 10841 continue; 10842 } 10843 10844 ASSERT(buf->dtb_xamot == NULL); 10845 10846 if ((buf->dtb_tomax = kmem_zalloc(size, KM_NOSLEEP)) == NULL) 10847 goto err; 10848 10849 buf->dtb_size = size; 10850 buf->dtb_flags = flags; 10851 buf->dtb_offset = 0; 10852 buf->dtb_drops = 0; 10853 10854 if (flags & DTRACEBUF_NOSWITCH) 10855 continue; 10856 10857 if ((buf->dtb_xamot = kmem_zalloc(size, KM_NOSLEEP)) == NULL) 10858 goto err; 10859 } while ((cp = cp->cpu_next) != cpu_list); 10860 10861 return (0); 10862 10863err: 10864 cp = cpu_list; 10865 10866 do { 10867 if (cpu != DTRACE_CPUALL && cpu != cp->cpu_id) 10868 continue; 10869 10870 buf = &bufs[cp->cpu_id]; 10871 10872 if (buf->dtb_xamot != NULL) { 10873 ASSERT(buf->dtb_tomax != NULL); 10874 ASSERT(buf->dtb_size == size); 10875 kmem_free(buf->dtb_xamot, size); 10876 } 10877 10878 if (buf->dtb_tomax != NULL) { 10879 ASSERT(buf->dtb_size == size); 10880 kmem_free(buf->dtb_tomax, size); 10881 } 10882 10883 buf->dtb_tomax = NULL; 10884 buf->dtb_xamot = NULL; 10885 buf->dtb_size = 0; 10886 } while ((cp = cp->cpu_next) != cpu_list); 10887 10888 return (ENOMEM); 10889#else 10890 int i; 10891 10892#if defined(__amd64__) || defined(__mips__) || defined(__powerpc__) 10893 /* 10894 * FreeBSD isn't good at limiting the amount of memory we 10895 * ask to malloc, so let's place a limit here before trying 10896 * to do something that might well end in tears at bedtime. 10897 */ 10898 if (size > physmem * PAGE_SIZE / (128 * (mp_maxid + 1))) 10899 return(ENOMEM); 10900#endif 10901 10902 ASSERT(MUTEX_HELD(&dtrace_lock)); 10903 CPU_FOREACH(i) { 10904 if (cpu != DTRACE_CPUALL && cpu != i) 10905 continue; 10906 10907 buf = &bufs[i]; 10908 10909 /* 10910 * If there is already a buffer allocated for this CPU, it 10911 * is only possible that this is a DR event. In this case, 10912 * the buffer size must match our specified size. 10913 */ 10914 if (buf->dtb_tomax != NULL) { 10915 ASSERT(buf->dtb_size == size); 10916 continue; 10917 } 10918 10919 ASSERT(buf->dtb_xamot == NULL); 10920 10921 if ((buf->dtb_tomax = kmem_zalloc(size, KM_NOSLEEP)) == NULL) 10922 goto err; 10923 10924 buf->dtb_size = size; 10925 buf->dtb_flags = flags; 10926 buf->dtb_offset = 0; 10927 buf->dtb_drops = 0; 10928 10929 if (flags & DTRACEBUF_NOSWITCH) 10930 continue; 10931 10932 if ((buf->dtb_xamot = kmem_zalloc(size, KM_NOSLEEP)) == NULL) 10933 goto err; 10934 } 10935 10936 return (0); 10937 10938err: 10939 /* 10940 * Error allocating memory, so free the buffers that were 10941 * allocated before the failed allocation. 10942 */ 10943 CPU_FOREACH(i) { 10944 if (cpu != DTRACE_CPUALL && cpu != i) 10945 continue; 10946 10947 buf = &bufs[i]; 10948 10949 if (buf->dtb_xamot != NULL) { 10950 ASSERT(buf->dtb_tomax != NULL); 10951 ASSERT(buf->dtb_size == size); 10952 kmem_free(buf->dtb_xamot, size); 10953 } 10954 10955 if (buf->dtb_tomax != NULL) { 10956 ASSERT(buf->dtb_size == size); 10957 kmem_free(buf->dtb_tomax, size); 10958 } 10959 10960 buf->dtb_tomax = NULL; 10961 buf->dtb_xamot = NULL; 10962 buf->dtb_size = 0; 10963 10964 } 10965 10966 return (ENOMEM); 10967#endif 10968} 10969 10970/* 10971 * Note: called from probe context. This function just increments the drop 10972 * count on a buffer. It has been made a function to allow for the 10973 * possibility of understanding the source of mysterious drop counts. (A 10974 * problem for which one may be particularly disappointed that DTrace cannot 10975 * be used to understand DTrace.) 10976 */ 10977static void 10978dtrace_buffer_drop(dtrace_buffer_t *buf) 10979{ 10980 buf->dtb_drops++; 10981} 10982 10983/* 10984 * Note: called from probe context. This function is called to reserve space 10985 * in a buffer. If mstate is non-NULL, sets the scratch base and size in the 10986 * mstate. Returns the new offset in the buffer, or a negative value if an 10987 * error has occurred. 10988 */ 10989static intptr_t 10990dtrace_buffer_reserve(dtrace_buffer_t *buf, size_t needed, size_t align, 10991 dtrace_state_t *state, dtrace_mstate_t *mstate) 10992{ 10993 intptr_t offs = buf->dtb_offset, soffs; 10994 intptr_t woffs; 10995 caddr_t tomax; 10996 size_t total; 10997 10998 if (buf->dtb_flags & DTRACEBUF_INACTIVE) 10999 return (-1); 11000 11001 if ((tomax = buf->dtb_tomax) == NULL) { 11002 dtrace_buffer_drop(buf); 11003 return (-1); 11004 } 11005 11006 if (!(buf->dtb_flags & (DTRACEBUF_RING | DTRACEBUF_FILL))) { 11007 while (offs & (align - 1)) { 11008 /* 11009 * Assert that our alignment is off by a number which 11010 * is itself sizeof (uint32_t) aligned. 11011 */ 11012 ASSERT(!((align - (offs & (align - 1))) & 11013 (sizeof (uint32_t) - 1))); 11014 DTRACE_STORE(uint32_t, tomax, offs, DTRACE_EPIDNONE); 11015 offs += sizeof (uint32_t); 11016 } 11017 11018 if ((soffs = offs + needed) > buf->dtb_size) { 11019 dtrace_buffer_drop(buf); 11020 return (-1); 11021 } 11022 11023 if (mstate == NULL) 11024 return (offs); 11025 11026 mstate->dtms_scratch_base = (uintptr_t)tomax + soffs; 11027 mstate->dtms_scratch_size = buf->dtb_size - soffs; 11028 mstate->dtms_scratch_ptr = mstate->dtms_scratch_base; 11029 11030 return (offs); 11031 } 11032 11033 if (buf->dtb_flags & DTRACEBUF_FILL) { 11034 if (state->dts_activity != DTRACE_ACTIVITY_COOLDOWN && 11035 (buf->dtb_flags & DTRACEBUF_FULL)) 11036 return (-1); 11037 goto out; 11038 } 11039 11040 total = needed + (offs & (align - 1)); 11041 11042 /* 11043 * For a ring buffer, life is quite a bit more complicated. Before 11044 * we can store any padding, we need to adjust our wrapping offset. 11045 * (If we've never before wrapped or we're not about to, no adjustment 11046 * is required.) 11047 */ 11048 if ((buf->dtb_flags & DTRACEBUF_WRAPPED) || 11049 offs + total > buf->dtb_size) { 11050 woffs = buf->dtb_xamot_offset; 11051 11052 if (offs + total > buf->dtb_size) { 11053 /* 11054 * We can't fit in the end of the buffer. First, a 11055 * sanity check that we can fit in the buffer at all. 11056 */ 11057 if (total > buf->dtb_size) { 11058 dtrace_buffer_drop(buf); 11059 return (-1); 11060 } 11061 11062 /* 11063 * We're going to be storing at the top of the buffer, 11064 * so now we need to deal with the wrapped offset. We 11065 * only reset our wrapped offset to 0 if it is 11066 * currently greater than the current offset. If it 11067 * is less than the current offset, it is because a 11068 * previous allocation induced a wrap -- but the 11069 * allocation didn't subsequently take the space due 11070 * to an error or false predicate evaluation. In this 11071 * case, we'll just leave the wrapped offset alone: if 11072 * the wrapped offset hasn't been advanced far enough 11073 * for this allocation, it will be adjusted in the 11074 * lower loop. 11075 */ 11076 if (buf->dtb_flags & DTRACEBUF_WRAPPED) { 11077 if (woffs >= offs) 11078 woffs = 0; 11079 } else { 11080 woffs = 0; 11081 } 11082 11083 /* 11084 * Now we know that we're going to be storing to the 11085 * top of the buffer and that there is room for us 11086 * there. We need to clear the buffer from the current 11087 * offset to the end (there may be old gunk there). 11088 */ 11089 while (offs < buf->dtb_size) 11090 tomax[offs++] = 0; 11091 11092 /* 11093 * We need to set our offset to zero. And because we 11094 * are wrapping, we need to set the bit indicating as 11095 * much. We can also adjust our needed space back 11096 * down to the space required by the ECB -- we know 11097 * that the top of the buffer is aligned. 11098 */ 11099 offs = 0; 11100 total = needed; 11101 buf->dtb_flags |= DTRACEBUF_WRAPPED; 11102 } else { 11103 /* 11104 * There is room for us in the buffer, so we simply 11105 * need to check the wrapped offset. 11106 */ 11107 if (woffs < offs) { 11108 /* 11109 * The wrapped offset is less than the offset. 11110 * This can happen if we allocated buffer space 11111 * that induced a wrap, but then we didn't 11112 * subsequently take the space due to an error 11113 * or false predicate evaluation. This is 11114 * okay; we know that _this_ allocation isn't 11115 * going to induce a wrap. We still can't 11116 * reset the wrapped offset to be zero, 11117 * however: the space may have been trashed in 11118 * the previous failed probe attempt. But at 11119 * least the wrapped offset doesn't need to 11120 * be adjusted at all... 11121 */ 11122 goto out; 11123 } 11124 } 11125 11126 while (offs + total > woffs) { 11127 dtrace_epid_t epid = *(uint32_t *)(tomax + woffs); 11128 size_t size; 11129 11130 if (epid == DTRACE_EPIDNONE) { 11131 size = sizeof (uint32_t); 11132 } else { 11133 ASSERT3U(epid, <=, state->dts_necbs); 11134 ASSERT(state->dts_ecbs[epid - 1] != NULL); 11135 11136 size = state->dts_ecbs[epid - 1]->dte_size; 11137 } 11138 11139 ASSERT(woffs + size <= buf->dtb_size); 11140 ASSERT(size != 0); 11141 11142 if (woffs + size == buf->dtb_size) { 11143 /* 11144 * We've reached the end of the buffer; we want 11145 * to set the wrapped offset to 0 and break 11146 * out. However, if the offs is 0, then we're 11147 * in a strange edge-condition: the amount of 11148 * space that we want to reserve plus the size 11149 * of the record that we're overwriting is 11150 * greater than the size of the buffer. This 11151 * is problematic because if we reserve the 11152 * space but subsequently don't consume it (due 11153 * to a failed predicate or error) the wrapped 11154 * offset will be 0 -- yet the EPID at offset 0 11155 * will not be committed. This situation is 11156 * relatively easy to deal with: if we're in 11157 * this case, the buffer is indistinguishable 11158 * from one that hasn't wrapped; we need only 11159 * finish the job by clearing the wrapped bit, 11160 * explicitly setting the offset to be 0, and 11161 * zero'ing out the old data in the buffer. 11162 */ 11163 if (offs == 0) { 11164 buf->dtb_flags &= ~DTRACEBUF_WRAPPED; 11165 buf->dtb_offset = 0; 11166 woffs = total; 11167 11168 while (woffs < buf->dtb_size) 11169 tomax[woffs++] = 0; 11170 } 11171 11172 woffs = 0; 11173 break; 11174 } 11175 11176 woffs += size; 11177 } 11178 11179 /* 11180 * We have a wrapped offset. It may be that the wrapped offset 11181 * has become zero -- that's okay. 11182 */ 11183 buf->dtb_xamot_offset = woffs; 11184 } 11185 11186out: 11187 /* 11188 * Now we can plow the buffer with any necessary padding. 11189 */ 11190 while (offs & (align - 1)) { 11191 /* 11192 * Assert that our alignment is off by a number which 11193 * is itself sizeof (uint32_t) aligned. 11194 */ 11195 ASSERT(!((align - (offs & (align - 1))) & 11196 (sizeof (uint32_t) - 1))); 11197 DTRACE_STORE(uint32_t, tomax, offs, DTRACE_EPIDNONE); 11198 offs += sizeof (uint32_t); 11199 } 11200 11201 if (buf->dtb_flags & DTRACEBUF_FILL) { 11202 if (offs + needed > buf->dtb_size - state->dts_reserve) { 11203 buf->dtb_flags |= DTRACEBUF_FULL; 11204 return (-1); 11205 } 11206 } 11207 11208 if (mstate == NULL) 11209 return (offs); 11210 11211 /* 11212 * For ring buffers and fill buffers, the scratch space is always 11213 * the inactive buffer. 11214 */ 11215 mstate->dtms_scratch_base = (uintptr_t)buf->dtb_xamot; 11216 mstate->dtms_scratch_size = buf->dtb_size; 11217 mstate->dtms_scratch_ptr = mstate->dtms_scratch_base; 11218 11219 return (offs); 11220} 11221 11222static void 11223dtrace_buffer_polish(dtrace_buffer_t *buf) 11224{ 11225 ASSERT(buf->dtb_flags & DTRACEBUF_RING); 11226 ASSERT(MUTEX_HELD(&dtrace_lock)); 11227 11228 if (!(buf->dtb_flags & DTRACEBUF_WRAPPED)) 11229 return; 11230 11231 /* 11232 * We need to polish the ring buffer. There are three cases: 11233 * 11234 * - The first (and presumably most common) is that there is no gap 11235 * between the buffer offset and the wrapped offset. In this case, 11236 * there is nothing in the buffer that isn't valid data; we can 11237 * mark the buffer as polished and return. 11238 * 11239 * - The second (less common than the first but still more common 11240 * than the third) is that there is a gap between the buffer offset 11241 * and the wrapped offset, and the wrapped offset is larger than the 11242 * buffer offset. This can happen because of an alignment issue, or 11243 * can happen because of a call to dtrace_buffer_reserve() that 11244 * didn't subsequently consume the buffer space. In this case, 11245 * we need to zero the data from the buffer offset to the wrapped 11246 * offset. 11247 * 11248 * - The third (and least common) is that there is a gap between the 11249 * buffer offset and the wrapped offset, but the wrapped offset is 11250 * _less_ than the buffer offset. This can only happen because a 11251 * call to dtrace_buffer_reserve() induced a wrap, but the space 11252 * was not subsequently consumed. In this case, we need to zero the 11253 * space from the offset to the end of the buffer _and_ from the 11254 * top of the buffer to the wrapped offset. 11255 */ 11256 if (buf->dtb_offset < buf->dtb_xamot_offset) { 11257 bzero(buf->dtb_tomax + buf->dtb_offset, 11258 buf->dtb_xamot_offset - buf->dtb_offset); 11259 } 11260 11261 if (buf->dtb_offset > buf->dtb_xamot_offset) { 11262 bzero(buf->dtb_tomax + buf->dtb_offset, 11263 buf->dtb_size - buf->dtb_offset); 11264 bzero(buf->dtb_tomax, buf->dtb_xamot_offset); 11265 } 11266} 11267 11268/* 11269 * This routine determines if data generated at the specified time has likely 11270 * been entirely consumed at user-level. This routine is called to determine 11271 * if an ECB on a defunct probe (but for an active enabling) can be safely 11272 * disabled and destroyed. 11273 */ 11274static int 11275dtrace_buffer_consumed(dtrace_buffer_t *bufs, hrtime_t when) 11276{ 11277 int i; 11278 11279 for (i = 0; i < NCPU; i++) { 11280 dtrace_buffer_t *buf = &bufs[i]; 11281 11282 if (buf->dtb_size == 0) 11283 continue; 11284 11285 if (buf->dtb_flags & DTRACEBUF_RING) 11286 return (0); 11287 11288 if (!buf->dtb_switched && buf->dtb_offset != 0) 11289 return (0); 11290 11291 if (buf->dtb_switched - buf->dtb_interval < when) 11292 return (0); 11293 } 11294 11295 return (1); 11296} 11297 11298static void 11299dtrace_buffer_free(dtrace_buffer_t *bufs) 11300{ 11301 int i; 11302 11303 for (i = 0; i < NCPU; i++) { 11304 dtrace_buffer_t *buf = &bufs[i]; 11305 11306 if (buf->dtb_tomax == NULL) { 11307 ASSERT(buf->dtb_xamot == NULL); 11308 ASSERT(buf->dtb_size == 0); 11309 continue; 11310 } 11311 11312 if (buf->dtb_xamot != NULL) { 11313 ASSERT(!(buf->dtb_flags & DTRACEBUF_NOSWITCH)); 11314 kmem_free(buf->dtb_xamot, buf->dtb_size); 11315 } 11316 11317 kmem_free(buf->dtb_tomax, buf->dtb_size); 11318 buf->dtb_size = 0; 11319 buf->dtb_tomax = NULL; 11320 buf->dtb_xamot = NULL; 11321 } 11322} 11323 11324/* 11325 * DTrace Enabling Functions 11326 */ 11327static dtrace_enabling_t * 11328dtrace_enabling_create(dtrace_vstate_t *vstate) 11329{ 11330 dtrace_enabling_t *enab; 11331 11332 enab = kmem_zalloc(sizeof (dtrace_enabling_t), KM_SLEEP); 11333 enab->dten_vstate = vstate; 11334 11335 return (enab); 11336} 11337 11338static void 11339dtrace_enabling_add(dtrace_enabling_t *enab, dtrace_ecbdesc_t *ecb) 11340{ 11341 dtrace_ecbdesc_t **ndesc; 11342 size_t osize, nsize; 11343 11344 /* 11345 * We can't add to enablings after we've enabled them, or after we've 11346 * retained them. 11347 */ 11348 ASSERT(enab->dten_probegen == 0); 11349 ASSERT(enab->dten_next == NULL && enab->dten_prev == NULL); 11350 11351 if (enab->dten_ndesc < enab->dten_maxdesc) { 11352 enab->dten_desc[enab->dten_ndesc++] = ecb; 11353 return; 11354 } 11355 11356 osize = enab->dten_maxdesc * sizeof (dtrace_enabling_t *); 11357 11358 if (enab->dten_maxdesc == 0) { 11359 enab->dten_maxdesc = 1; 11360 } else { 11361 enab->dten_maxdesc <<= 1; 11362 } 11363 11364 ASSERT(enab->dten_ndesc < enab->dten_maxdesc); 11365 11366 nsize = enab->dten_maxdesc * sizeof (dtrace_enabling_t *); 11367 ndesc = kmem_zalloc(nsize, KM_SLEEP); 11368 bcopy(enab->dten_desc, ndesc, osize); 11369 if (enab->dten_desc != NULL) 11370 kmem_free(enab->dten_desc, osize); 11371 11372 enab->dten_desc = ndesc; 11373 enab->dten_desc[enab->dten_ndesc++] = ecb; 11374} 11375 11376static void 11377dtrace_enabling_addlike(dtrace_enabling_t *enab, dtrace_ecbdesc_t *ecb, 11378 dtrace_probedesc_t *pd) 11379{ 11380 dtrace_ecbdesc_t *new; 11381 dtrace_predicate_t *pred; 11382 dtrace_actdesc_t *act; 11383 11384 /* 11385 * We're going to create a new ECB description that matches the 11386 * specified ECB in every way, but has the specified probe description. 11387 */ 11388 new = kmem_zalloc(sizeof (dtrace_ecbdesc_t), KM_SLEEP); 11389 11390 if ((pred = ecb->dted_pred.dtpdd_predicate) != NULL) 11391 dtrace_predicate_hold(pred); 11392 11393 for (act = ecb->dted_action; act != NULL; act = act->dtad_next) 11394 dtrace_actdesc_hold(act); 11395 11396 new->dted_action = ecb->dted_action; 11397 new->dted_pred = ecb->dted_pred; 11398 new->dted_probe = *pd; 11399 new->dted_uarg = ecb->dted_uarg; 11400 11401 dtrace_enabling_add(enab, new); 11402} 11403 11404static void 11405dtrace_enabling_dump(dtrace_enabling_t *enab) 11406{ 11407 int i; 11408 11409 for (i = 0; i < enab->dten_ndesc; i++) { 11410 dtrace_probedesc_t *desc = &enab->dten_desc[i]->dted_probe; 11411 11412 cmn_err(CE_NOTE, "enabling probe %d (%s:%s:%s:%s)", i, 11413 desc->dtpd_provider, desc->dtpd_mod, 11414 desc->dtpd_func, desc->dtpd_name); 11415 } 11416} 11417 11418static void 11419dtrace_enabling_destroy(dtrace_enabling_t *enab) 11420{ 11421 int i; 11422 dtrace_ecbdesc_t *ep; 11423 dtrace_vstate_t *vstate = enab->dten_vstate; 11424 11425 ASSERT(MUTEX_HELD(&dtrace_lock)); 11426 11427 for (i = 0; i < enab->dten_ndesc; i++) { 11428 dtrace_actdesc_t *act, *next; 11429 dtrace_predicate_t *pred; 11430 11431 ep = enab->dten_desc[i]; 11432 11433 if ((pred = ep->dted_pred.dtpdd_predicate) != NULL) 11434 dtrace_predicate_release(pred, vstate); 11435 11436 for (act = ep->dted_action; act != NULL; act = next) { 11437 next = act->dtad_next; 11438 dtrace_actdesc_release(act, vstate); 11439 } 11440 11441 kmem_free(ep, sizeof (dtrace_ecbdesc_t)); 11442 } 11443 11444 if (enab->dten_desc != NULL) 11445 kmem_free(enab->dten_desc, 11446 enab->dten_maxdesc * sizeof (dtrace_enabling_t *)); 11447 11448 /* 11449 * If this was a retained enabling, decrement the dts_nretained count 11450 * and take it off of the dtrace_retained list. 11451 */ 11452 if (enab->dten_prev != NULL || enab->dten_next != NULL || 11453 dtrace_retained == enab) { 11454 ASSERT(enab->dten_vstate->dtvs_state != NULL); 11455 ASSERT(enab->dten_vstate->dtvs_state->dts_nretained > 0); 11456 enab->dten_vstate->dtvs_state->dts_nretained--; 11457 } 11458 11459 if (enab->dten_prev == NULL) { 11460 if (dtrace_retained == enab) { 11461 dtrace_retained = enab->dten_next; 11462 11463 if (dtrace_retained != NULL) 11464 dtrace_retained->dten_prev = NULL; 11465 } 11466 } else { 11467 ASSERT(enab != dtrace_retained); 11468 ASSERT(dtrace_retained != NULL); 11469 enab->dten_prev->dten_next = enab->dten_next; 11470 } 11471 11472 if (enab->dten_next != NULL) { 11473 ASSERT(dtrace_retained != NULL); 11474 enab->dten_next->dten_prev = enab->dten_prev; 11475 } 11476 11477 kmem_free(enab, sizeof (dtrace_enabling_t)); 11478} 11479 11480static int 11481dtrace_enabling_retain(dtrace_enabling_t *enab) 11482{ 11483 dtrace_state_t *state; 11484 11485 ASSERT(MUTEX_HELD(&dtrace_lock)); 11486 ASSERT(enab->dten_next == NULL && enab->dten_prev == NULL); 11487 ASSERT(enab->dten_vstate != NULL); 11488 11489 state = enab->dten_vstate->dtvs_state; 11490 ASSERT(state != NULL); 11491 11492 /* 11493 * We only allow each state to retain dtrace_retain_max enablings. 11494 */ 11495 if (state->dts_nretained >= dtrace_retain_max) 11496 return (ENOSPC); 11497 11498 state->dts_nretained++; 11499 11500 if (dtrace_retained == NULL) { 11501 dtrace_retained = enab; 11502 return (0); 11503 } 11504 11505 enab->dten_next = dtrace_retained; 11506 dtrace_retained->dten_prev = enab; 11507 dtrace_retained = enab; 11508 11509 return (0); 11510} 11511 11512static int 11513dtrace_enabling_replicate(dtrace_state_t *state, dtrace_probedesc_t *match, 11514 dtrace_probedesc_t *create) 11515{ 11516 dtrace_enabling_t *new, *enab; 11517 int found = 0, err = ENOENT; 11518 11519 ASSERT(MUTEX_HELD(&dtrace_lock)); 11520 ASSERT(strlen(match->dtpd_provider) < DTRACE_PROVNAMELEN); 11521 ASSERT(strlen(match->dtpd_mod) < DTRACE_MODNAMELEN); 11522 ASSERT(strlen(match->dtpd_func) < DTRACE_FUNCNAMELEN); 11523 ASSERT(strlen(match->dtpd_name) < DTRACE_NAMELEN); 11524 11525 new = dtrace_enabling_create(&state->dts_vstate); 11526 11527 /* 11528 * Iterate over all retained enablings, looking for enablings that 11529 * match the specified state. 11530 */ 11531 for (enab = dtrace_retained; enab != NULL; enab = enab->dten_next) { 11532 int i; 11533 11534 /* 11535 * dtvs_state can only be NULL for helper enablings -- and 11536 * helper enablings can't be retained. 11537 */ 11538 ASSERT(enab->dten_vstate->dtvs_state != NULL); 11539 11540 if (enab->dten_vstate->dtvs_state != state) 11541 continue; 11542 11543 /* 11544 * Now iterate over each probe description; we're looking for 11545 * an exact match to the specified probe description. 11546 */ 11547 for (i = 0; i < enab->dten_ndesc; i++) { 11548 dtrace_ecbdesc_t *ep = enab->dten_desc[i]; 11549 dtrace_probedesc_t *pd = &ep->dted_probe; 11550 11551 if (strcmp(pd->dtpd_provider, match->dtpd_provider)) 11552 continue; 11553 11554 if (strcmp(pd->dtpd_mod, match->dtpd_mod)) 11555 continue; 11556 11557 if (strcmp(pd->dtpd_func, match->dtpd_func)) 11558 continue; 11559 11560 if (strcmp(pd->dtpd_name, match->dtpd_name)) 11561 continue; 11562 11563 /* 11564 * We have a winning probe! Add it to our growing 11565 * enabling. 11566 */ 11567 found = 1; 11568 dtrace_enabling_addlike(new, ep, create); 11569 } 11570 } 11571 11572 if (!found || (err = dtrace_enabling_retain(new)) != 0) { 11573 dtrace_enabling_destroy(new); 11574 return (err); 11575 } 11576 11577 return (0); 11578} 11579 11580static void 11581dtrace_enabling_retract(dtrace_state_t *state) 11582{ 11583 dtrace_enabling_t *enab, *next; 11584 11585 ASSERT(MUTEX_HELD(&dtrace_lock)); 11586 11587 /* 11588 * Iterate over all retained enablings, destroy the enablings retained 11589 * for the specified state. 11590 */ 11591 for (enab = dtrace_retained; enab != NULL; enab = next) { 11592 next = enab->dten_next; 11593 11594 /* 11595 * dtvs_state can only be NULL for helper enablings -- and 11596 * helper enablings can't be retained. 11597 */ 11598 ASSERT(enab->dten_vstate->dtvs_state != NULL); 11599 11600 if (enab->dten_vstate->dtvs_state == state) { 11601 ASSERT(state->dts_nretained > 0); 11602 dtrace_enabling_destroy(enab); 11603 } 11604 } 11605 11606 ASSERT(state->dts_nretained == 0); 11607} 11608 11609static int 11610dtrace_enabling_match(dtrace_enabling_t *enab, int *nmatched) 11611{ 11612 int i = 0; 11613 int matched = 0; 11614 11615 ASSERT(MUTEX_HELD(&cpu_lock)); 11616 ASSERT(MUTEX_HELD(&dtrace_lock)); 11617 11618 for (i = 0; i < enab->dten_ndesc; i++) { 11619 dtrace_ecbdesc_t *ep = enab->dten_desc[i]; 11620 11621 enab->dten_current = ep; 11622 enab->dten_error = 0; 11623 11624 matched += dtrace_probe_enable(&ep->dted_probe, enab); 11625 11626 if (enab->dten_error != 0) { 11627 /* 11628 * If we get an error half-way through enabling the 11629 * probes, we kick out -- perhaps with some number of 11630 * them enabled. Leaving enabled probes enabled may 11631 * be slightly confusing for user-level, but we expect 11632 * that no one will attempt to actually drive on in 11633 * the face of such errors. If this is an anonymous 11634 * enabling (indicated with a NULL nmatched pointer), 11635 * we cmn_err() a message. We aren't expecting to 11636 * get such an error -- such as it can exist at all, 11637 * it would be a result of corrupted DOF in the driver 11638 * properties. 11639 */ 11640 if (nmatched == NULL) { 11641 cmn_err(CE_WARN, "dtrace_enabling_match() " 11642 "error on %p: %d", (void *)ep, 11643 enab->dten_error); 11644 } 11645 11646 return (enab->dten_error); 11647 } 11648 } 11649 11650 enab->dten_probegen = dtrace_probegen; 11651 if (nmatched != NULL) 11652 *nmatched = matched; 11653 11654 return (0); 11655} 11656 11657static void 11658dtrace_enabling_matchall(void) 11659{ 11660 dtrace_enabling_t *enab; 11661 11662 mutex_enter(&cpu_lock); 11663 mutex_enter(&dtrace_lock); 11664 11665 /* 11666 * Iterate over all retained enablings to see if any probes match 11667 * against them. We only perform this operation on enablings for which 11668 * we have sufficient permissions by virtue of being in the global zone 11669 * or in the same zone as the DTrace client. Because we can be called 11670 * after dtrace_detach() has been called, we cannot assert that there 11671 * are retained enablings. We can safely load from dtrace_retained, 11672 * however: the taskq_destroy() at the end of dtrace_detach() will 11673 * block pending our completion. 11674 */ 11675 for (enab = dtrace_retained; enab != NULL; enab = enab->dten_next) { 11676#if defined(sun) 11677 cred_t *cr = enab->dten_vstate->dtvs_state->dts_cred.dcr_cred; 11678 11679 if (INGLOBALZONE(curproc) || getzoneid() == crgetzoneid(cr)) 11680#endif 11681 (void) dtrace_enabling_match(enab, NULL); 11682 } 11683 11684 mutex_exit(&dtrace_lock); 11685 mutex_exit(&cpu_lock); 11686} 11687 11688/* 11689 * If an enabling is to be enabled without having matched probes (that is, if 11690 * dtrace_state_go() is to be called on the underlying dtrace_state_t), the 11691 * enabling must be _primed_ by creating an ECB for every ECB description. 11692 * This must be done to assure that we know the number of speculations, the 11693 * number of aggregations, the minimum buffer size needed, etc. before we 11694 * transition out of DTRACE_ACTIVITY_INACTIVE. To do this without actually 11695 * enabling any probes, we create ECBs for every ECB decription, but with a 11696 * NULL probe -- which is exactly what this function does. 11697 */ 11698static void 11699dtrace_enabling_prime(dtrace_state_t *state) 11700{ 11701 dtrace_enabling_t *enab; 11702 int i; 11703 11704 for (enab = dtrace_retained; enab != NULL; enab = enab->dten_next) { 11705 ASSERT(enab->dten_vstate->dtvs_state != NULL); 11706 11707 if (enab->dten_vstate->dtvs_state != state) 11708 continue; 11709 11710 /* 11711 * We don't want to prime an enabling more than once, lest 11712 * we allow a malicious user to induce resource exhaustion. 11713 * (The ECBs that result from priming an enabling aren't 11714 * leaked -- but they also aren't deallocated until the 11715 * consumer state is destroyed.) 11716 */ 11717 if (enab->dten_primed) 11718 continue; 11719 11720 for (i = 0; i < enab->dten_ndesc; i++) { 11721 enab->dten_current = enab->dten_desc[i]; 11722 (void) dtrace_probe_enable(NULL, enab); 11723 } 11724 11725 enab->dten_primed = 1; 11726 } 11727} 11728 11729/* 11730 * Called to indicate that probes should be provided due to retained 11731 * enablings. This is implemented in terms of dtrace_probe_provide(), but it 11732 * must take an initial lap through the enabling calling the dtps_provide() 11733 * entry point explicitly to allow for autocreated probes. 11734 */ 11735static void 11736dtrace_enabling_provide(dtrace_provider_t *prv) 11737{ 11738 int i, all = 0; 11739 dtrace_probedesc_t desc; 11740 11741 ASSERT(MUTEX_HELD(&dtrace_lock)); 11742 ASSERT(MUTEX_HELD(&dtrace_provider_lock)); 11743 11744 if (prv == NULL) { 11745 all = 1; 11746 prv = dtrace_provider; 11747 } 11748 11749 do { 11750 dtrace_enabling_t *enab = dtrace_retained; 11751 void *parg = prv->dtpv_arg; 11752 11753 for (; enab != NULL; enab = enab->dten_next) { 11754 for (i = 0; i < enab->dten_ndesc; i++) { 11755 desc = enab->dten_desc[i]->dted_probe; 11756 mutex_exit(&dtrace_lock); 11757 prv->dtpv_pops.dtps_provide(parg, &desc); 11758 mutex_enter(&dtrace_lock); 11759 } 11760 } 11761 } while (all && (prv = prv->dtpv_next) != NULL); 11762 11763 mutex_exit(&dtrace_lock); 11764 dtrace_probe_provide(NULL, all ? NULL : prv); 11765 mutex_enter(&dtrace_lock); 11766} 11767 11768/* 11769 * Called to reap ECBs that are attached to probes from defunct providers. 11770 */ 11771static void 11772dtrace_enabling_reap(void) 11773{ 11774 dtrace_provider_t *prov; 11775 dtrace_probe_t *probe; 11776 dtrace_ecb_t *ecb; 11777 hrtime_t when; 11778 int i; 11779 11780 mutex_enter(&cpu_lock); 11781 mutex_enter(&dtrace_lock); 11782 11783 for (i = 0; i < dtrace_nprobes; i++) { 11784 if ((probe = dtrace_probes[i]) == NULL) 11785 continue; 11786 11787 if (probe->dtpr_ecb == NULL) 11788 continue; 11789 11790 prov = probe->dtpr_provider; 11791 11792 if ((when = prov->dtpv_defunct) == 0) 11793 continue; 11794 11795 /* 11796 * We have ECBs on a defunct provider: we want to reap these 11797 * ECBs to allow the provider to unregister. The destruction 11798 * of these ECBs must be done carefully: if we destroy the ECB 11799 * and the consumer later wishes to consume an EPID that 11800 * corresponds to the destroyed ECB (and if the EPID metadata 11801 * has not been previously consumed), the consumer will abort 11802 * processing on the unknown EPID. To reduce (but not, sadly, 11803 * eliminate) the possibility of this, we will only destroy an 11804 * ECB for a defunct provider if, for the state that 11805 * corresponds to the ECB: 11806 * 11807 * (a) There is no speculative tracing (which can effectively 11808 * cache an EPID for an arbitrary amount of time). 11809 * 11810 * (b) The principal buffers have been switched twice since the 11811 * provider became defunct. 11812 * 11813 * (c) The aggregation buffers are of zero size or have been 11814 * switched twice since the provider became defunct. 11815 * 11816 * We use dts_speculates to determine (a) and call a function 11817 * (dtrace_buffer_consumed()) to determine (b) and (c). Note 11818 * that as soon as we've been unable to destroy one of the ECBs 11819 * associated with the probe, we quit trying -- reaping is only 11820 * fruitful in as much as we can destroy all ECBs associated 11821 * with the defunct provider's probes. 11822 */ 11823 while ((ecb = probe->dtpr_ecb) != NULL) { 11824 dtrace_state_t *state = ecb->dte_state; 11825 dtrace_buffer_t *buf = state->dts_buffer; 11826 dtrace_buffer_t *aggbuf = state->dts_aggbuffer; 11827 11828 if (state->dts_speculates) 11829 break; 11830 11831 if (!dtrace_buffer_consumed(buf, when)) 11832 break; 11833 11834 if (!dtrace_buffer_consumed(aggbuf, when)) 11835 break; 11836 11837 dtrace_ecb_disable(ecb); 11838 ASSERT(probe->dtpr_ecb != ecb); 11839 dtrace_ecb_destroy(ecb); 11840 } 11841 } 11842 11843 mutex_exit(&dtrace_lock); 11844 mutex_exit(&cpu_lock); 11845} 11846 11847/* 11848 * DTrace DOF Functions 11849 */ 11850/*ARGSUSED*/ 11851static void 11852dtrace_dof_error(dof_hdr_t *dof, const char *str) 11853{ 11854 if (dtrace_err_verbose) 11855 cmn_err(CE_WARN, "failed to process DOF: %s", str); 11856 11857#ifdef DTRACE_ERRDEBUG 11858 dtrace_errdebug(str); 11859#endif 11860} 11861 11862/* 11863 * Create DOF out of a currently enabled state. Right now, we only create 11864 * DOF containing the run-time options -- but this could be expanded to create 11865 * complete DOF representing the enabled state. 11866 */ 11867static dof_hdr_t * 11868dtrace_dof_create(dtrace_state_t *state) 11869{ 11870 dof_hdr_t *dof; 11871 dof_sec_t *sec; 11872 dof_optdesc_t *opt; 11873 int i, len = sizeof (dof_hdr_t) + 11874 roundup(sizeof (dof_sec_t), sizeof (uint64_t)) + 11875 sizeof (dof_optdesc_t) * DTRACEOPT_MAX; 11876 11877 ASSERT(MUTEX_HELD(&dtrace_lock)); 11878 11879 dof = kmem_zalloc(len, KM_SLEEP); 11880 dof->dofh_ident[DOF_ID_MAG0] = DOF_MAG_MAG0; 11881 dof->dofh_ident[DOF_ID_MAG1] = DOF_MAG_MAG1; 11882 dof->dofh_ident[DOF_ID_MAG2] = DOF_MAG_MAG2; 11883 dof->dofh_ident[DOF_ID_MAG3] = DOF_MAG_MAG3; 11884 11885 dof->dofh_ident[DOF_ID_MODEL] = DOF_MODEL_NATIVE; 11886 dof->dofh_ident[DOF_ID_ENCODING] = DOF_ENCODE_NATIVE; 11887 dof->dofh_ident[DOF_ID_VERSION] = DOF_VERSION; 11888 dof->dofh_ident[DOF_ID_DIFVERS] = DIF_VERSION; 11889 dof->dofh_ident[DOF_ID_DIFIREG] = DIF_DIR_NREGS; 11890 dof->dofh_ident[DOF_ID_DIFTREG] = DIF_DTR_NREGS; 11891 11892 dof->dofh_flags = 0; 11893 dof->dofh_hdrsize = sizeof (dof_hdr_t); 11894 dof->dofh_secsize = sizeof (dof_sec_t); 11895 dof->dofh_secnum = 1; /* only DOF_SECT_OPTDESC */ 11896 dof->dofh_secoff = sizeof (dof_hdr_t); 11897 dof->dofh_loadsz = len; 11898 dof->dofh_filesz = len; 11899 dof->dofh_pad = 0; 11900 11901 /* 11902 * Fill in the option section header... 11903 */ 11904 sec = (dof_sec_t *)((uintptr_t)dof + sizeof (dof_hdr_t)); 11905 sec->dofs_type = DOF_SECT_OPTDESC; 11906 sec->dofs_align = sizeof (uint64_t); 11907 sec->dofs_flags = DOF_SECF_LOAD; 11908 sec->dofs_entsize = sizeof (dof_optdesc_t); 11909 11910 opt = (dof_optdesc_t *)((uintptr_t)sec + 11911 roundup(sizeof (dof_sec_t), sizeof (uint64_t))); 11912 11913 sec->dofs_offset = (uintptr_t)opt - (uintptr_t)dof; 11914 sec->dofs_size = sizeof (dof_optdesc_t) * DTRACEOPT_MAX; 11915 11916 for (i = 0; i < DTRACEOPT_MAX; i++) { 11917 opt[i].dofo_option = i; 11918 opt[i].dofo_strtab = DOF_SECIDX_NONE; 11919 opt[i].dofo_value = state->dts_options[i]; 11920 } 11921 11922 return (dof); 11923} 11924 11925static dof_hdr_t * 11926dtrace_dof_copyin(uintptr_t uarg, int *errp) 11927{ 11928 dof_hdr_t hdr, *dof; 11929 11930 ASSERT(!MUTEX_HELD(&dtrace_lock)); 11931 11932 /* 11933 * First, we're going to copyin() the sizeof (dof_hdr_t). 11934 */ 11935 if (copyin((void *)uarg, &hdr, sizeof (hdr)) != 0) { 11936 dtrace_dof_error(NULL, "failed to copyin DOF header"); 11937 *errp = EFAULT; 11938 return (NULL); 11939 } 11940 11941 /* 11942 * Now we'll allocate the entire DOF and copy it in -- provided 11943 * that the length isn't outrageous. 11944 */ 11945 if (hdr.dofh_loadsz >= dtrace_dof_maxsize) { 11946 dtrace_dof_error(&hdr, "load size exceeds maximum"); 11947 *errp = E2BIG; 11948 return (NULL); 11949 } 11950 11951 if (hdr.dofh_loadsz < sizeof (hdr)) { 11952 dtrace_dof_error(&hdr, "invalid load size"); 11953 *errp = EINVAL; 11954 return (NULL); 11955 } 11956 11957 dof = kmem_alloc(hdr.dofh_loadsz, KM_SLEEP); 11958 11959 if (copyin((void *)uarg, dof, hdr.dofh_loadsz) != 0) { 11960 kmem_free(dof, hdr.dofh_loadsz); 11961 *errp = EFAULT; 11962 return (NULL); 11963 } 11964 11965 return (dof); 11966} 11967 11968#if !defined(sun) 11969static __inline uchar_t 11970dtrace_dof_char(char c) { 11971 switch (c) { 11972 case '0': 11973 case '1': 11974 case '2': 11975 case '3': 11976 case '4': 11977 case '5': 11978 case '6': 11979 case '7': 11980 case '8': 11981 case '9': 11982 return (c - '0'); 11983 case 'A': 11984 case 'B': 11985 case 'C': 11986 case 'D': 11987 case 'E': 11988 case 'F': 11989 return (c - 'A' + 10); 11990 case 'a': 11991 case 'b': 11992 case 'c': 11993 case 'd': 11994 case 'e': 11995 case 'f': 11996 return (c - 'a' + 10); 11997 } 11998 /* Should not reach here. */ 11999 return (0); 12000} 12001#endif 12002 12003static dof_hdr_t * 12004dtrace_dof_property(const char *name) 12005{ 12006 uchar_t *buf; 12007 uint64_t loadsz; 12008 unsigned int len, i; 12009 dof_hdr_t *dof; 12010 12011#if defined(sun) 12012 /* 12013 * Unfortunately, array of values in .conf files are always (and 12014 * only) interpreted to be integer arrays. We must read our DOF 12015 * as an integer array, and then squeeze it into a byte array. 12016 */ 12017 if (ddi_prop_lookup_int_array(DDI_DEV_T_ANY, dtrace_devi, 0, 12018 (char *)name, (int **)&buf, &len) != DDI_PROP_SUCCESS) 12019 return (NULL); 12020 12021 for (i = 0; i < len; i++) 12022 buf[i] = (uchar_t)(((int *)buf)[i]); 12023 12024 if (len < sizeof (dof_hdr_t)) { 12025 ddi_prop_free(buf); 12026 dtrace_dof_error(NULL, "truncated header"); 12027 return (NULL); 12028 } 12029 12030 if (len < (loadsz = ((dof_hdr_t *)buf)->dofh_loadsz)) { 12031 ddi_prop_free(buf); 12032 dtrace_dof_error(NULL, "truncated DOF"); 12033 return (NULL); 12034 } 12035 12036 if (loadsz >= dtrace_dof_maxsize) { 12037 ddi_prop_free(buf); 12038 dtrace_dof_error(NULL, "oversized DOF"); 12039 return (NULL); 12040 } 12041 12042 dof = kmem_alloc(loadsz, KM_SLEEP); 12043 bcopy(buf, dof, loadsz); 12044 ddi_prop_free(buf); 12045#else 12046 char *p; 12047 char *p_env; 12048 12049 if ((p_env = getenv(name)) == NULL) 12050 return (NULL); 12051 12052 len = strlen(p_env) / 2; 12053 12054 buf = kmem_alloc(len, KM_SLEEP); 12055 12056 dof = (dof_hdr_t *) buf; 12057 12058 p = p_env; 12059 12060 for (i = 0; i < len; i++) { 12061 buf[i] = (dtrace_dof_char(p[0]) << 4) | 12062 dtrace_dof_char(p[1]); 12063 p += 2; 12064 } 12065 12066 freeenv(p_env); 12067 12068 if (len < sizeof (dof_hdr_t)) { 12069 kmem_free(buf, 0); 12070 dtrace_dof_error(NULL, "truncated header"); 12071 return (NULL); 12072 } 12073 12074 if (len < (loadsz = dof->dofh_loadsz)) { 12075 kmem_free(buf, 0); 12076 dtrace_dof_error(NULL, "truncated DOF"); 12077 return (NULL); 12078 } 12079 12080 if (loadsz >= dtrace_dof_maxsize) { 12081 kmem_free(buf, 0); 12082 dtrace_dof_error(NULL, "oversized DOF"); 12083 return (NULL); 12084 } 12085#endif 12086 12087 return (dof); 12088} 12089 12090static void 12091dtrace_dof_destroy(dof_hdr_t *dof) 12092{ 12093 kmem_free(dof, dof->dofh_loadsz); 12094} 12095 12096/* 12097 * Return the dof_sec_t pointer corresponding to a given section index. If the 12098 * index is not valid, dtrace_dof_error() is called and NULL is returned. If 12099 * a type other than DOF_SECT_NONE is specified, the header is checked against 12100 * this type and NULL is returned if the types do not match. 12101 */ 12102static dof_sec_t * 12103dtrace_dof_sect(dof_hdr_t *dof, uint32_t type, dof_secidx_t i) 12104{ 12105 dof_sec_t *sec = (dof_sec_t *)(uintptr_t) 12106 ((uintptr_t)dof + dof->dofh_secoff + i * dof->dofh_secsize); 12107 12108 if (i >= dof->dofh_secnum) { 12109 dtrace_dof_error(dof, "referenced section index is invalid"); 12110 return (NULL); 12111 } 12112 12113 if (!(sec->dofs_flags & DOF_SECF_LOAD)) { 12114 dtrace_dof_error(dof, "referenced section is not loadable"); 12115 return (NULL); 12116 } 12117 12118 if (type != DOF_SECT_NONE && type != sec->dofs_type) { 12119 dtrace_dof_error(dof, "referenced section is the wrong type"); 12120 return (NULL); 12121 } 12122 12123 return (sec); 12124} 12125 12126static dtrace_probedesc_t * 12127dtrace_dof_probedesc(dof_hdr_t *dof, dof_sec_t *sec, dtrace_probedesc_t *desc) 12128{ 12129 dof_probedesc_t *probe; 12130 dof_sec_t *strtab; 12131 uintptr_t daddr = (uintptr_t)dof; 12132 uintptr_t str; 12133 size_t size; 12134 12135 if (sec->dofs_type != DOF_SECT_PROBEDESC) { 12136 dtrace_dof_error(dof, "invalid probe section"); 12137 return (NULL); 12138 } 12139 12140 if (sec->dofs_align != sizeof (dof_secidx_t)) { 12141 dtrace_dof_error(dof, "bad alignment in probe description"); 12142 return (NULL); 12143 } 12144 12145 if (sec->dofs_offset + sizeof (dof_probedesc_t) > dof->dofh_loadsz) { 12146 dtrace_dof_error(dof, "truncated probe description"); 12147 return (NULL); 12148 } 12149 12150 probe = (dof_probedesc_t *)(uintptr_t)(daddr + sec->dofs_offset); 12151 strtab = dtrace_dof_sect(dof, DOF_SECT_STRTAB, probe->dofp_strtab); 12152 12153 if (strtab == NULL) 12154 return (NULL); 12155 12156 str = daddr + strtab->dofs_offset; 12157 size = strtab->dofs_size; 12158 12159 if (probe->dofp_provider >= strtab->dofs_size) { 12160 dtrace_dof_error(dof, "corrupt probe provider"); 12161 return (NULL); 12162 } 12163 12164 (void) strncpy(desc->dtpd_provider, 12165 (char *)(str + probe->dofp_provider), 12166 MIN(DTRACE_PROVNAMELEN - 1, size - probe->dofp_provider)); 12167 12168 if (probe->dofp_mod >= strtab->dofs_size) { 12169 dtrace_dof_error(dof, "corrupt probe module"); 12170 return (NULL); 12171 } 12172 12173 (void) strncpy(desc->dtpd_mod, (char *)(str + probe->dofp_mod), 12174 MIN(DTRACE_MODNAMELEN - 1, size - probe->dofp_mod)); 12175 12176 if (probe->dofp_func >= strtab->dofs_size) { 12177 dtrace_dof_error(dof, "corrupt probe function"); 12178 return (NULL); 12179 } 12180 12181 (void) strncpy(desc->dtpd_func, (char *)(str + probe->dofp_func), 12182 MIN(DTRACE_FUNCNAMELEN - 1, size - probe->dofp_func)); 12183 12184 if (probe->dofp_name >= strtab->dofs_size) { 12185 dtrace_dof_error(dof, "corrupt probe name"); 12186 return (NULL); 12187 } 12188 12189 (void) strncpy(desc->dtpd_name, (char *)(str + probe->dofp_name), 12190 MIN(DTRACE_NAMELEN - 1, size - probe->dofp_name)); 12191 12192 return (desc); 12193} 12194 12195static dtrace_difo_t * 12196dtrace_dof_difo(dof_hdr_t *dof, dof_sec_t *sec, dtrace_vstate_t *vstate, 12197 cred_t *cr) 12198{ 12199 dtrace_difo_t *dp; 12200 size_t ttl = 0; 12201 dof_difohdr_t *dofd; 12202 uintptr_t daddr = (uintptr_t)dof; 12203 size_t max = dtrace_difo_maxsize; 12204 int i, l, n; 12205 12206 static const struct { 12207 int section; 12208 int bufoffs; 12209 int lenoffs; 12210 int entsize; 12211 int align; 12212 const char *msg; 12213 } difo[] = { 12214 { DOF_SECT_DIF, offsetof(dtrace_difo_t, dtdo_buf), 12215 offsetof(dtrace_difo_t, dtdo_len), sizeof (dif_instr_t), 12216 sizeof (dif_instr_t), "multiple DIF sections" }, 12217 12218 { DOF_SECT_INTTAB, offsetof(dtrace_difo_t, dtdo_inttab), 12219 offsetof(dtrace_difo_t, dtdo_intlen), sizeof (uint64_t), 12220 sizeof (uint64_t), "multiple integer tables" }, 12221 12222 { DOF_SECT_STRTAB, offsetof(dtrace_difo_t, dtdo_strtab), 12223 offsetof(dtrace_difo_t, dtdo_strlen), 0, 12224 sizeof (char), "multiple string tables" }, 12225 12226 { DOF_SECT_VARTAB, offsetof(dtrace_difo_t, dtdo_vartab), 12227 offsetof(dtrace_difo_t, dtdo_varlen), sizeof (dtrace_difv_t), 12228 sizeof (uint_t), "multiple variable tables" }, 12229 12230 { DOF_SECT_NONE, 0, 0, 0, 0, NULL } 12231 }; 12232 12233 if (sec->dofs_type != DOF_SECT_DIFOHDR) { 12234 dtrace_dof_error(dof, "invalid DIFO header section"); 12235 return (NULL); 12236 } 12237 12238 if (sec->dofs_align != sizeof (dof_secidx_t)) { 12239 dtrace_dof_error(dof, "bad alignment in DIFO header"); 12240 return (NULL); 12241 } 12242 12243 if (sec->dofs_size < sizeof (dof_difohdr_t) || 12244 sec->dofs_size % sizeof (dof_secidx_t)) { 12245 dtrace_dof_error(dof, "bad size in DIFO header"); 12246 return (NULL); 12247 } 12248 12249 dofd = (dof_difohdr_t *)(uintptr_t)(daddr + sec->dofs_offset); 12250 n = (sec->dofs_size - sizeof (*dofd)) / sizeof (dof_secidx_t) + 1; 12251 12252 dp = kmem_zalloc(sizeof (dtrace_difo_t), KM_SLEEP); 12253 dp->dtdo_rtype = dofd->dofd_rtype; 12254 12255 for (l = 0; l < n; l++) { 12256 dof_sec_t *subsec; 12257 void **bufp; 12258 uint32_t *lenp; 12259 12260 if ((subsec = dtrace_dof_sect(dof, DOF_SECT_NONE, 12261 dofd->dofd_links[l])) == NULL) 12262 goto err; /* invalid section link */ 12263 12264 if (ttl + subsec->dofs_size > max) { 12265 dtrace_dof_error(dof, "exceeds maximum size"); 12266 goto err; 12267 } 12268 12269 ttl += subsec->dofs_size; 12270 12271 for (i = 0; difo[i].section != DOF_SECT_NONE; i++) { 12272 if (subsec->dofs_type != difo[i].section) 12273 continue; 12274 12275 if (!(subsec->dofs_flags & DOF_SECF_LOAD)) { 12276 dtrace_dof_error(dof, "section not loaded"); 12277 goto err; 12278 } 12279 12280 if (subsec->dofs_align != difo[i].align) { 12281 dtrace_dof_error(dof, "bad alignment"); 12282 goto err; 12283 } 12284 12285 bufp = (void **)((uintptr_t)dp + difo[i].bufoffs); 12286 lenp = (uint32_t *)((uintptr_t)dp + difo[i].lenoffs); 12287 12288 if (*bufp != NULL) { 12289 dtrace_dof_error(dof, difo[i].msg); 12290 goto err; 12291 } 12292 12293 if (difo[i].entsize != subsec->dofs_entsize) { 12294 dtrace_dof_error(dof, "entry size mismatch"); 12295 goto err; 12296 } 12297 12298 if (subsec->dofs_entsize != 0 && 12299 (subsec->dofs_size % subsec->dofs_entsize) != 0) { 12300 dtrace_dof_error(dof, "corrupt entry size"); 12301 goto err; 12302 } 12303 12304 *lenp = subsec->dofs_size; 12305 *bufp = kmem_alloc(subsec->dofs_size, KM_SLEEP); 12306 bcopy((char *)(uintptr_t)(daddr + subsec->dofs_offset), 12307 *bufp, subsec->dofs_size); 12308 12309 if (subsec->dofs_entsize != 0) 12310 *lenp /= subsec->dofs_entsize; 12311 12312 break; 12313 } 12314 12315 /* 12316 * If we encounter a loadable DIFO sub-section that is not 12317 * known to us, assume this is a broken program and fail. 12318 */ 12319 if (difo[i].section == DOF_SECT_NONE && 12320 (subsec->dofs_flags & DOF_SECF_LOAD)) { 12321 dtrace_dof_error(dof, "unrecognized DIFO subsection"); 12322 goto err; 12323 } 12324 } 12325 12326 if (dp->dtdo_buf == NULL) { 12327 /* 12328 * We can't have a DIF object without DIF text. 12329 */ 12330 dtrace_dof_error(dof, "missing DIF text"); 12331 goto err; 12332 } 12333 12334 /* 12335 * Before we validate the DIF object, run through the variable table 12336 * looking for the strings -- if any of their size are under, we'll set 12337 * their size to be the system-wide default string size. Note that 12338 * this should _not_ happen if the "strsize" option has been set -- 12339 * in this case, the compiler should have set the size to reflect the 12340 * setting of the option. 12341 */ 12342 for (i = 0; i < dp->dtdo_varlen; i++) { 12343 dtrace_difv_t *v = &dp->dtdo_vartab[i]; 12344 dtrace_diftype_t *t = &v->dtdv_type; 12345 12346 if (v->dtdv_id < DIF_VAR_OTHER_UBASE) 12347 continue; 12348 12349 if (t->dtdt_kind == DIF_TYPE_STRING && t->dtdt_size == 0) 12350 t->dtdt_size = dtrace_strsize_default; 12351 } 12352 12353 if (dtrace_difo_validate(dp, vstate, DIF_DIR_NREGS, cr) != 0) 12354 goto err; 12355 12356 dtrace_difo_init(dp, vstate); 12357 return (dp); 12358 12359err: 12360 kmem_free(dp->dtdo_buf, dp->dtdo_len * sizeof (dif_instr_t)); 12361 kmem_free(dp->dtdo_inttab, dp->dtdo_intlen * sizeof (uint64_t)); 12362 kmem_free(dp->dtdo_strtab, dp->dtdo_strlen); 12363 kmem_free(dp->dtdo_vartab, dp->dtdo_varlen * sizeof (dtrace_difv_t)); 12364 12365 kmem_free(dp, sizeof (dtrace_difo_t)); 12366 return (NULL); 12367} 12368 12369static dtrace_predicate_t * 12370dtrace_dof_predicate(dof_hdr_t *dof, dof_sec_t *sec, dtrace_vstate_t *vstate, 12371 cred_t *cr) 12372{ 12373 dtrace_difo_t *dp; 12374 12375 if ((dp = dtrace_dof_difo(dof, sec, vstate, cr)) == NULL) 12376 return (NULL); 12377 12378 return (dtrace_predicate_create(dp)); 12379} 12380 12381static dtrace_actdesc_t * 12382dtrace_dof_actdesc(dof_hdr_t *dof, dof_sec_t *sec, dtrace_vstate_t *vstate, 12383 cred_t *cr) 12384{ 12385 dtrace_actdesc_t *act, *first = NULL, *last = NULL, *next; 12386 dof_actdesc_t *desc; 12387 dof_sec_t *difosec; 12388 size_t offs; 12389 uintptr_t daddr = (uintptr_t)dof; 12390 uint64_t arg; 12391 dtrace_actkind_t kind; 12392 12393 if (sec->dofs_type != DOF_SECT_ACTDESC) { 12394 dtrace_dof_error(dof, "invalid action section"); 12395 return (NULL); 12396 } 12397 12398 if (sec->dofs_offset + sizeof (dof_actdesc_t) > dof->dofh_loadsz) { 12399 dtrace_dof_error(dof, "truncated action description"); 12400 return (NULL); 12401 } 12402 12403 if (sec->dofs_align != sizeof (uint64_t)) { 12404 dtrace_dof_error(dof, "bad alignment in action description"); 12405 return (NULL); 12406 } 12407 12408 if (sec->dofs_size < sec->dofs_entsize) { 12409 dtrace_dof_error(dof, "section entry size exceeds total size"); 12410 return (NULL); 12411 } 12412 12413 if (sec->dofs_entsize != sizeof (dof_actdesc_t)) { 12414 dtrace_dof_error(dof, "bad entry size in action description"); 12415 return (NULL); 12416 } 12417 12418 if (sec->dofs_size / sec->dofs_entsize > dtrace_actions_max) { 12419 dtrace_dof_error(dof, "actions exceed dtrace_actions_max"); 12420 return (NULL); 12421 } 12422 12423 for (offs = 0; offs < sec->dofs_size; offs += sec->dofs_entsize) { 12424 desc = (dof_actdesc_t *)(daddr + 12425 (uintptr_t)sec->dofs_offset + offs); 12426 kind = (dtrace_actkind_t)desc->dofa_kind; 12427 12428 if ((DTRACEACT_ISPRINTFLIKE(kind) && 12429 (kind != DTRACEACT_PRINTA || 12430 desc->dofa_strtab != DOF_SECIDX_NONE)) || 12431 (kind == DTRACEACT_DIFEXPR && 12432 desc->dofa_strtab != DOF_SECIDX_NONE)) { 12433 dof_sec_t *strtab; 12434 char *str, *fmt; 12435 uint64_t i; 12436 12437 /* 12438 * The argument to these actions is an index into the 12439 * DOF string table. For printf()-like actions, this 12440 * is the format string. For print(), this is the 12441 * CTF type of the expression result. 12442 */ 12443 if ((strtab = dtrace_dof_sect(dof, 12444 DOF_SECT_STRTAB, desc->dofa_strtab)) == NULL) 12445 goto err; 12446 12447 str = (char *)((uintptr_t)dof + 12448 (uintptr_t)strtab->dofs_offset); 12449 12450 for (i = desc->dofa_arg; i < strtab->dofs_size; i++) { 12451 if (str[i] == '\0') 12452 break; 12453 } 12454 12455 if (i >= strtab->dofs_size) { 12456 dtrace_dof_error(dof, "bogus format string"); 12457 goto err; 12458 } 12459 12460 if (i == desc->dofa_arg) { 12461 dtrace_dof_error(dof, "empty format string"); 12462 goto err; 12463 } 12464 12465 i -= desc->dofa_arg; 12466 fmt = kmem_alloc(i + 1, KM_SLEEP); 12467 bcopy(&str[desc->dofa_arg], fmt, i + 1); 12468 arg = (uint64_t)(uintptr_t)fmt; 12469 } else { 12470 if (kind == DTRACEACT_PRINTA) { 12471 ASSERT(desc->dofa_strtab == DOF_SECIDX_NONE); 12472 arg = 0; 12473 } else { 12474 arg = desc->dofa_arg; 12475 } 12476 } 12477 12478 act = dtrace_actdesc_create(kind, desc->dofa_ntuple, 12479 desc->dofa_uarg, arg); 12480 12481 if (last != NULL) { 12482 last->dtad_next = act; 12483 } else { 12484 first = act; 12485 } 12486 12487 last = act; 12488 12489 if (desc->dofa_difo == DOF_SECIDX_NONE) 12490 continue; 12491 12492 if ((difosec = dtrace_dof_sect(dof, 12493 DOF_SECT_DIFOHDR, desc->dofa_difo)) == NULL) 12494 goto err; 12495 12496 act->dtad_difo = dtrace_dof_difo(dof, difosec, vstate, cr); 12497 12498 if (act->dtad_difo == NULL) 12499 goto err; 12500 } 12501 12502 ASSERT(first != NULL); 12503 return (first); 12504 12505err: 12506 for (act = first; act != NULL; act = next) { 12507 next = act->dtad_next; 12508 dtrace_actdesc_release(act, vstate); 12509 } 12510 12511 return (NULL); 12512} 12513 12514static dtrace_ecbdesc_t * 12515dtrace_dof_ecbdesc(dof_hdr_t *dof, dof_sec_t *sec, dtrace_vstate_t *vstate, 12516 cred_t *cr) 12517{ 12518 dtrace_ecbdesc_t *ep; 12519 dof_ecbdesc_t *ecb; 12520 dtrace_probedesc_t *desc; 12521 dtrace_predicate_t *pred = NULL; 12522 12523 if (sec->dofs_size < sizeof (dof_ecbdesc_t)) { 12524 dtrace_dof_error(dof, "truncated ECB description"); 12525 return (NULL); 12526 } 12527 12528 if (sec->dofs_align != sizeof (uint64_t)) { 12529 dtrace_dof_error(dof, "bad alignment in ECB description"); 12530 return (NULL); 12531 } 12532 12533 ecb = (dof_ecbdesc_t *)((uintptr_t)dof + (uintptr_t)sec->dofs_offset); 12534 sec = dtrace_dof_sect(dof, DOF_SECT_PROBEDESC, ecb->dofe_probes); 12535 12536 if (sec == NULL) 12537 return (NULL); 12538 12539 ep = kmem_zalloc(sizeof (dtrace_ecbdesc_t), KM_SLEEP); 12540 ep->dted_uarg = ecb->dofe_uarg; 12541 desc = &ep->dted_probe; 12542 12543 if (dtrace_dof_probedesc(dof, sec, desc) == NULL) 12544 goto err; 12545 12546 if (ecb->dofe_pred != DOF_SECIDX_NONE) { 12547 if ((sec = dtrace_dof_sect(dof, 12548 DOF_SECT_DIFOHDR, ecb->dofe_pred)) == NULL) 12549 goto err; 12550 12551 if ((pred = dtrace_dof_predicate(dof, sec, vstate, cr)) == NULL) 12552 goto err; 12553 12554 ep->dted_pred.dtpdd_predicate = pred; 12555 } 12556 12557 if (ecb->dofe_actions != DOF_SECIDX_NONE) { 12558 if ((sec = dtrace_dof_sect(dof, 12559 DOF_SECT_ACTDESC, ecb->dofe_actions)) == NULL) 12560 goto err; 12561 12562 ep->dted_action = dtrace_dof_actdesc(dof, sec, vstate, cr); 12563 12564 if (ep->dted_action == NULL) 12565 goto err; 12566 } 12567 12568 return (ep); 12569 12570err: 12571 if (pred != NULL) 12572 dtrace_predicate_release(pred, vstate); 12573 kmem_free(ep, sizeof (dtrace_ecbdesc_t)); 12574 return (NULL); 12575} 12576 12577/* 12578 * Apply the relocations from the specified 'sec' (a DOF_SECT_URELHDR) to the 12579 * specified DOF. At present, this amounts to simply adding 'ubase' to the 12580 * site of any user SETX relocations to account for load object base address. 12581 * In the future, if we need other relocations, this function can be extended. 12582 */ 12583static int 12584dtrace_dof_relocate(dof_hdr_t *dof, dof_sec_t *sec, uint64_t ubase) 12585{ 12586 uintptr_t daddr = (uintptr_t)dof; 12587 dof_relohdr_t *dofr = 12588 (dof_relohdr_t *)(uintptr_t)(daddr + sec->dofs_offset); 12589 dof_sec_t *ss, *rs, *ts; 12590 dof_relodesc_t *r; 12591 uint_t i, n; 12592 12593 if (sec->dofs_size < sizeof (dof_relohdr_t) || 12594 sec->dofs_align != sizeof (dof_secidx_t)) { 12595 dtrace_dof_error(dof, "invalid relocation header"); 12596 return (-1); 12597 } 12598 12599 ss = dtrace_dof_sect(dof, DOF_SECT_STRTAB, dofr->dofr_strtab); 12600 rs = dtrace_dof_sect(dof, DOF_SECT_RELTAB, dofr->dofr_relsec); 12601 ts = dtrace_dof_sect(dof, DOF_SECT_NONE, dofr->dofr_tgtsec); 12602 12603 if (ss == NULL || rs == NULL || ts == NULL) 12604 return (-1); /* dtrace_dof_error() has been called already */ 12605 12606 if (rs->dofs_entsize < sizeof (dof_relodesc_t) || 12607 rs->dofs_align != sizeof (uint64_t)) { 12608 dtrace_dof_error(dof, "invalid relocation section"); 12609 return (-1); 12610 } 12611 12612 r = (dof_relodesc_t *)(uintptr_t)(daddr + rs->dofs_offset); 12613 n = rs->dofs_size / rs->dofs_entsize; 12614 12615 for (i = 0; i < n; i++) { 12616 uintptr_t taddr = daddr + ts->dofs_offset + r->dofr_offset; 12617 12618 switch (r->dofr_type) { 12619 case DOF_RELO_NONE: 12620 break; 12621 case DOF_RELO_SETX: 12622 if (r->dofr_offset >= ts->dofs_size || r->dofr_offset + 12623 sizeof (uint64_t) > ts->dofs_size) { 12624 dtrace_dof_error(dof, "bad relocation offset"); 12625 return (-1); 12626 } 12627 12628 if (!IS_P2ALIGNED(taddr, sizeof (uint64_t))) { 12629 dtrace_dof_error(dof, "misaligned setx relo"); 12630 return (-1); 12631 } 12632 12633 *(uint64_t *)taddr += ubase; 12634 break; 12635 default: 12636 dtrace_dof_error(dof, "invalid relocation type"); 12637 return (-1); 12638 } 12639 12640 r = (dof_relodesc_t *)((uintptr_t)r + rs->dofs_entsize); 12641 } 12642 12643 return (0); 12644} 12645 12646/* 12647 * The dof_hdr_t passed to dtrace_dof_slurp() should be a partially validated 12648 * header: it should be at the front of a memory region that is at least 12649 * sizeof (dof_hdr_t) in size -- and then at least dof_hdr.dofh_loadsz in 12650 * size. It need not be validated in any other way. 12651 */ 12652static int 12653dtrace_dof_slurp(dof_hdr_t *dof, dtrace_vstate_t *vstate, cred_t *cr, 12654 dtrace_enabling_t **enabp, uint64_t ubase, int noprobes) 12655{ 12656 uint64_t len = dof->dofh_loadsz, seclen; 12657 uintptr_t daddr = (uintptr_t)dof; 12658 dtrace_ecbdesc_t *ep; 12659 dtrace_enabling_t *enab; 12660 uint_t i; 12661 12662 ASSERT(MUTEX_HELD(&dtrace_lock)); 12663 ASSERT(dof->dofh_loadsz >= sizeof (dof_hdr_t)); 12664 12665 /* 12666 * Check the DOF header identification bytes. In addition to checking 12667 * valid settings, we also verify that unused bits/bytes are zeroed so 12668 * we can use them later without fear of regressing existing binaries. 12669 */ 12670 if (bcmp(&dof->dofh_ident[DOF_ID_MAG0], 12671 DOF_MAG_STRING, DOF_MAG_STRLEN) != 0) { 12672 dtrace_dof_error(dof, "DOF magic string mismatch"); 12673 return (-1); 12674 } 12675 12676 if (dof->dofh_ident[DOF_ID_MODEL] != DOF_MODEL_ILP32 && 12677 dof->dofh_ident[DOF_ID_MODEL] != DOF_MODEL_LP64) { 12678 dtrace_dof_error(dof, "DOF has invalid data model"); 12679 return (-1); 12680 } 12681 12682 if (dof->dofh_ident[DOF_ID_ENCODING] != DOF_ENCODE_NATIVE) { 12683 dtrace_dof_error(dof, "DOF encoding mismatch"); 12684 return (-1); 12685 } 12686 12687 if (dof->dofh_ident[DOF_ID_VERSION] != DOF_VERSION_1 && 12688 dof->dofh_ident[DOF_ID_VERSION] != DOF_VERSION_2) { 12689 dtrace_dof_error(dof, "DOF version mismatch"); 12690 return (-1); 12691 } 12692 12693 if (dof->dofh_ident[DOF_ID_DIFVERS] != DIF_VERSION_2) { 12694 dtrace_dof_error(dof, "DOF uses unsupported instruction set"); 12695 return (-1); 12696 } 12697 12698 if (dof->dofh_ident[DOF_ID_DIFIREG] > DIF_DIR_NREGS) { 12699 dtrace_dof_error(dof, "DOF uses too many integer registers"); 12700 return (-1); 12701 } 12702 12703 if (dof->dofh_ident[DOF_ID_DIFTREG] > DIF_DTR_NREGS) { 12704 dtrace_dof_error(dof, "DOF uses too many tuple registers"); 12705 return (-1); 12706 } 12707 12708 for (i = DOF_ID_PAD; i < DOF_ID_SIZE; i++) { 12709 if (dof->dofh_ident[i] != 0) { 12710 dtrace_dof_error(dof, "DOF has invalid ident byte set"); 12711 return (-1); 12712 } 12713 } 12714 12715 if (dof->dofh_flags & ~DOF_FL_VALID) { 12716 dtrace_dof_error(dof, "DOF has invalid flag bits set"); 12717 return (-1); 12718 } 12719 12720 if (dof->dofh_secsize == 0) { 12721 dtrace_dof_error(dof, "zero section header size"); 12722 return (-1); 12723 } 12724 12725 /* 12726 * Check that the section headers don't exceed the amount of DOF 12727 * data. Note that we cast the section size and number of sections 12728 * to uint64_t's to prevent possible overflow in the multiplication. 12729 */ 12730 seclen = (uint64_t)dof->dofh_secnum * (uint64_t)dof->dofh_secsize; 12731 12732 if (dof->dofh_secoff > len || seclen > len || 12733 dof->dofh_secoff + seclen > len) { 12734 dtrace_dof_error(dof, "truncated section headers"); 12735 return (-1); 12736 } 12737 12738 if (!IS_P2ALIGNED(dof->dofh_secoff, sizeof (uint64_t))) { 12739 dtrace_dof_error(dof, "misaligned section headers"); 12740 return (-1); 12741 } 12742 12743 if (!IS_P2ALIGNED(dof->dofh_secsize, sizeof (uint64_t))) { 12744 dtrace_dof_error(dof, "misaligned section size"); 12745 return (-1); 12746 } 12747 12748 /* 12749 * Take an initial pass through the section headers to be sure that 12750 * the headers don't have stray offsets. If the 'noprobes' flag is 12751 * set, do not permit sections relating to providers, probes, or args. 12752 */ 12753 for (i = 0; i < dof->dofh_secnum; i++) { 12754 dof_sec_t *sec = (dof_sec_t *)(daddr + 12755 (uintptr_t)dof->dofh_secoff + i * dof->dofh_secsize); 12756 12757 if (noprobes) { 12758 switch (sec->dofs_type) { 12759 case DOF_SECT_PROVIDER: 12760 case DOF_SECT_PROBES: 12761 case DOF_SECT_PRARGS: 12762 case DOF_SECT_PROFFS: 12763 dtrace_dof_error(dof, "illegal sections " 12764 "for enabling"); 12765 return (-1); 12766 } 12767 } 12768 12769 if (!(sec->dofs_flags & DOF_SECF_LOAD)) 12770 continue; /* just ignore non-loadable sections */ 12771 12772 if (sec->dofs_align & (sec->dofs_align - 1)) { 12773 dtrace_dof_error(dof, "bad section alignment"); 12774 return (-1); 12775 } 12776 12777 if (sec->dofs_offset & (sec->dofs_align - 1)) { 12778 dtrace_dof_error(dof, "misaligned section"); 12779 return (-1); 12780 } 12781 12782 if (sec->dofs_offset > len || sec->dofs_size > len || 12783 sec->dofs_offset + sec->dofs_size > len) { 12784 dtrace_dof_error(dof, "corrupt section header"); 12785 return (-1); 12786 } 12787 12788 if (sec->dofs_type == DOF_SECT_STRTAB && *((char *)daddr + 12789 sec->dofs_offset + sec->dofs_size - 1) != '\0') { 12790 dtrace_dof_error(dof, "non-terminating string table"); 12791 return (-1); 12792 } 12793 } 12794 12795 /* 12796 * Take a second pass through the sections and locate and perform any 12797 * relocations that are present. We do this after the first pass to 12798 * be sure that all sections have had their headers validated. 12799 */ 12800 for (i = 0; i < dof->dofh_secnum; i++) { 12801 dof_sec_t *sec = (dof_sec_t *)(daddr + 12802 (uintptr_t)dof->dofh_secoff + i * dof->dofh_secsize); 12803 12804 if (!(sec->dofs_flags & DOF_SECF_LOAD)) 12805 continue; /* skip sections that are not loadable */ 12806 12807 switch (sec->dofs_type) { 12808 case DOF_SECT_URELHDR: 12809 if (dtrace_dof_relocate(dof, sec, ubase) != 0) 12810 return (-1); 12811 break; 12812 } 12813 } 12814 12815 if ((enab = *enabp) == NULL) 12816 enab = *enabp = dtrace_enabling_create(vstate); 12817 12818 for (i = 0; i < dof->dofh_secnum; i++) { 12819 dof_sec_t *sec = (dof_sec_t *)(daddr + 12820 (uintptr_t)dof->dofh_secoff + i * dof->dofh_secsize); 12821 12822 if (sec->dofs_type != DOF_SECT_ECBDESC) 12823 continue; 12824 12825 if ((ep = dtrace_dof_ecbdesc(dof, sec, vstate, cr)) == NULL) { 12826 dtrace_enabling_destroy(enab); 12827 *enabp = NULL; 12828 return (-1); 12829 } 12830 12831 dtrace_enabling_add(enab, ep); 12832 } 12833 12834 return (0); 12835} 12836 12837/* 12838 * Process DOF for any options. This routine assumes that the DOF has been 12839 * at least processed by dtrace_dof_slurp(). 12840 */ 12841static int 12842dtrace_dof_options(dof_hdr_t *dof, dtrace_state_t *state) 12843{ 12844 int i, rval; 12845 uint32_t entsize; 12846 size_t offs; 12847 dof_optdesc_t *desc; 12848 12849 for (i = 0; i < dof->dofh_secnum; i++) { 12850 dof_sec_t *sec = (dof_sec_t *)((uintptr_t)dof + 12851 (uintptr_t)dof->dofh_secoff + i * dof->dofh_secsize); 12852 12853 if (sec->dofs_type != DOF_SECT_OPTDESC) 12854 continue; 12855 12856 if (sec->dofs_align != sizeof (uint64_t)) { 12857 dtrace_dof_error(dof, "bad alignment in " 12858 "option description"); 12859 return (EINVAL); 12860 } 12861 12862 if ((entsize = sec->dofs_entsize) == 0) { 12863 dtrace_dof_error(dof, "zeroed option entry size"); 12864 return (EINVAL); 12865 } 12866 12867 if (entsize < sizeof (dof_optdesc_t)) { 12868 dtrace_dof_error(dof, "bad option entry size"); 12869 return (EINVAL); 12870 } 12871 12872 for (offs = 0; offs < sec->dofs_size; offs += entsize) { 12873 desc = (dof_optdesc_t *)((uintptr_t)dof + 12874 (uintptr_t)sec->dofs_offset + offs); 12875 12876 if (desc->dofo_strtab != DOF_SECIDX_NONE) { 12877 dtrace_dof_error(dof, "non-zero option string"); 12878 return (EINVAL); 12879 } 12880 12881 if (desc->dofo_value == DTRACEOPT_UNSET) { 12882 dtrace_dof_error(dof, "unset option"); 12883 return (EINVAL); 12884 } 12885 12886 if ((rval = dtrace_state_option(state, 12887 desc->dofo_option, desc->dofo_value)) != 0) { 12888 dtrace_dof_error(dof, "rejected option"); 12889 return (rval); 12890 } 12891 } 12892 } 12893 12894 return (0); 12895} 12896 12897/* 12898 * DTrace Consumer State Functions 12899 */ 12900static int 12901dtrace_dstate_init(dtrace_dstate_t *dstate, size_t size) 12902{ 12903 size_t hashsize, maxper, min, chunksize = dstate->dtds_chunksize; 12904 void *base; 12905 uintptr_t limit; 12906 dtrace_dynvar_t *dvar, *next, *start; 12907 int i; 12908 12909 ASSERT(MUTEX_HELD(&dtrace_lock)); 12910 ASSERT(dstate->dtds_base == NULL && dstate->dtds_percpu == NULL); 12911 12912 bzero(dstate, sizeof (dtrace_dstate_t)); 12913 12914 if ((dstate->dtds_chunksize = chunksize) == 0) 12915 dstate->dtds_chunksize = DTRACE_DYNVAR_CHUNKSIZE; 12916 12917 if (size < (min = dstate->dtds_chunksize + sizeof (dtrace_dynhash_t))) 12918 size = min; 12919 12920 if ((base = kmem_zalloc(size, KM_NOSLEEP)) == NULL) 12921 return (ENOMEM); 12922 12923 dstate->dtds_size = size; 12924 dstate->dtds_base = base; 12925 dstate->dtds_percpu = kmem_cache_alloc(dtrace_state_cache, KM_SLEEP); 12926 bzero(dstate->dtds_percpu, NCPU * sizeof (dtrace_dstate_percpu_t)); 12927 12928 hashsize = size / (dstate->dtds_chunksize + sizeof (dtrace_dynhash_t)); 12929 12930 if (hashsize != 1 && (hashsize & 1)) 12931 hashsize--; 12932 12933 dstate->dtds_hashsize = hashsize; 12934 dstate->dtds_hash = dstate->dtds_base; 12935 12936 /* 12937 * Set all of our hash buckets to point to the single sink, and (if 12938 * it hasn't already been set), set the sink's hash value to be the 12939 * sink sentinel value. The sink is needed for dynamic variable 12940 * lookups to know that they have iterated over an entire, valid hash 12941 * chain. 12942 */ 12943 for (i = 0; i < hashsize; i++) 12944 dstate->dtds_hash[i].dtdh_chain = &dtrace_dynhash_sink; 12945 12946 if (dtrace_dynhash_sink.dtdv_hashval != DTRACE_DYNHASH_SINK) 12947 dtrace_dynhash_sink.dtdv_hashval = DTRACE_DYNHASH_SINK; 12948 12949 /* 12950 * Determine number of active CPUs. Divide free list evenly among 12951 * active CPUs. 12952 */ 12953 start = (dtrace_dynvar_t *) 12954 ((uintptr_t)base + hashsize * sizeof (dtrace_dynhash_t)); 12955 limit = (uintptr_t)base + size; 12956 12957 maxper = (limit - (uintptr_t)start) / NCPU; 12958 maxper = (maxper / dstate->dtds_chunksize) * dstate->dtds_chunksize; 12959 12960#if !defined(sun) 12961 CPU_FOREACH(i) { 12962#else 12963 for (i = 0; i < NCPU; i++) { 12964#endif 12965 dstate->dtds_percpu[i].dtdsc_free = dvar = start; 12966 12967 /* 12968 * If we don't even have enough chunks to make it once through 12969 * NCPUs, we're just going to allocate everything to the first 12970 * CPU. And if we're on the last CPU, we're going to allocate 12971 * whatever is left over. In either case, we set the limit to 12972 * be the limit of the dynamic variable space. 12973 */ 12974 if (maxper == 0 || i == NCPU - 1) { 12975 limit = (uintptr_t)base + size; 12976 start = NULL; 12977 } else { 12978 limit = (uintptr_t)start + maxper; 12979 start = (dtrace_dynvar_t *)limit; 12980 } 12981 12982 ASSERT(limit <= (uintptr_t)base + size); 12983 12984 for (;;) { 12985 next = (dtrace_dynvar_t *)((uintptr_t)dvar + 12986 dstate->dtds_chunksize); 12987 12988 if ((uintptr_t)next + dstate->dtds_chunksize >= limit) 12989 break; 12990 12991 dvar->dtdv_next = next; 12992 dvar = next; 12993 } 12994 12995 if (maxper == 0) 12996 break; 12997 } 12998 12999 return (0); 13000} 13001 13002static void 13003dtrace_dstate_fini(dtrace_dstate_t *dstate) 13004{ 13005 ASSERT(MUTEX_HELD(&cpu_lock)); 13006 13007 if (dstate->dtds_base == NULL) 13008 return; 13009 13010 kmem_free(dstate->dtds_base, dstate->dtds_size); 13011 kmem_cache_free(dtrace_state_cache, dstate->dtds_percpu); 13012} 13013 13014static void 13015dtrace_vstate_fini(dtrace_vstate_t *vstate) 13016{ 13017 /* 13018 * Logical XOR, where are you? 13019 */ 13020 ASSERT((vstate->dtvs_nglobals == 0) ^ (vstate->dtvs_globals != NULL)); 13021 13022 if (vstate->dtvs_nglobals > 0) { 13023 kmem_free(vstate->dtvs_globals, vstate->dtvs_nglobals * 13024 sizeof (dtrace_statvar_t *)); 13025 } 13026 13027 if (vstate->dtvs_ntlocals > 0) { 13028 kmem_free(vstate->dtvs_tlocals, vstate->dtvs_ntlocals * 13029 sizeof (dtrace_difv_t)); 13030 } 13031 13032 ASSERT((vstate->dtvs_nlocals == 0) ^ (vstate->dtvs_locals != NULL)); 13033 13034 if (vstate->dtvs_nlocals > 0) { 13035 kmem_free(vstate->dtvs_locals, vstate->dtvs_nlocals * 13036 sizeof (dtrace_statvar_t *)); 13037 } 13038} 13039 13040#if defined(sun) 13041static void 13042dtrace_state_clean(dtrace_state_t *state) 13043{ 13044 if (state->dts_activity == DTRACE_ACTIVITY_INACTIVE) 13045 return; 13046 13047 dtrace_dynvar_clean(&state->dts_vstate.dtvs_dynvars); 13048 dtrace_speculation_clean(state); 13049} 13050 13051static void 13052dtrace_state_deadman(dtrace_state_t *state) 13053{ 13054 hrtime_t now; 13055 13056 dtrace_sync(); 13057 13058 now = dtrace_gethrtime(); 13059 13060 if (state != dtrace_anon.dta_state && 13061 now - state->dts_laststatus >= dtrace_deadman_user) 13062 return; 13063 13064 /* 13065 * We must be sure that dts_alive never appears to be less than the 13066 * value upon entry to dtrace_state_deadman(), and because we lack a 13067 * dtrace_cas64(), we cannot store to it atomically. We thus instead 13068 * store INT64_MAX to it, followed by a memory barrier, followed by 13069 * the new value. This assures that dts_alive never appears to be 13070 * less than its true value, regardless of the order in which the 13071 * stores to the underlying storage are issued. 13072 */ 13073 state->dts_alive = INT64_MAX; 13074 dtrace_membar_producer(); 13075 state->dts_alive = now; 13076} 13077#else 13078static void 13079dtrace_state_clean(void *arg) 13080{ 13081 dtrace_state_t *state = arg; 13082 dtrace_optval_t *opt = state->dts_options; 13083 13084 if (state->dts_activity == DTRACE_ACTIVITY_INACTIVE) 13085 return; 13086 13087 dtrace_dynvar_clean(&state->dts_vstate.dtvs_dynvars); 13088 dtrace_speculation_clean(state); 13089 13090 callout_reset(&state->dts_cleaner, hz * opt[DTRACEOPT_CLEANRATE] / NANOSEC, 13091 dtrace_state_clean, state); 13092} 13093 13094static void 13095dtrace_state_deadman(void *arg) 13096{ 13097 dtrace_state_t *state = arg; 13098 hrtime_t now; 13099 13100 dtrace_sync(); 13101 13102 dtrace_debug_output(); 13103 13104 now = dtrace_gethrtime(); 13105 13106 if (state != dtrace_anon.dta_state && 13107 now - state->dts_laststatus >= dtrace_deadman_user) 13108 return; 13109 13110 /* 13111 * We must be sure that dts_alive never appears to be less than the 13112 * value upon entry to dtrace_state_deadman(), and because we lack a 13113 * dtrace_cas64(), we cannot store to it atomically. We thus instead 13114 * store INT64_MAX to it, followed by a memory barrier, followed by 13115 * the new value. This assures that dts_alive never appears to be 13116 * less than its true value, regardless of the order in which the 13117 * stores to the underlying storage are issued. 13118 */ 13119 state->dts_alive = INT64_MAX; 13120 dtrace_membar_producer(); 13121 state->dts_alive = now; 13122 13123 callout_reset(&state->dts_deadman, hz * dtrace_deadman_interval / NANOSEC, 13124 dtrace_state_deadman, state); 13125} 13126#endif 13127 13128static dtrace_state_t * 13129#if defined(sun) 13130dtrace_state_create(dev_t *devp, cred_t *cr) 13131#else 13132dtrace_state_create(struct cdev *dev) 13133#endif 13134{ 13135#if defined(sun) 13136 minor_t minor; 13137 major_t major; 13138#else 13139 cred_t *cr = NULL; 13140 int m = 0; 13141#endif 13142 char c[30]; 13143 dtrace_state_t *state; 13144 dtrace_optval_t *opt; 13145 int bufsize = NCPU * sizeof (dtrace_buffer_t), i; 13146 13147 ASSERT(MUTEX_HELD(&dtrace_lock)); 13148 ASSERT(MUTEX_HELD(&cpu_lock)); 13149 13150#if defined(sun) 13151 minor = (minor_t)(uintptr_t)vmem_alloc(dtrace_minor, 1, 13152 VM_BESTFIT | VM_SLEEP); 13153 13154 if (ddi_soft_state_zalloc(dtrace_softstate, minor) != DDI_SUCCESS) { 13155 vmem_free(dtrace_minor, (void *)(uintptr_t)minor, 1); 13156 return (NULL); 13157 } 13158 13159 state = ddi_get_soft_state(dtrace_softstate, minor); 13160#else 13161 if (dev != NULL) { 13162 cr = dev->si_cred; 13163 m = dev2unit(dev); 13164 } 13165 13166 /* Allocate memory for the state. */ 13167 state = kmem_zalloc(sizeof(dtrace_state_t), KM_SLEEP); 13168#endif 13169 13170 state->dts_epid = DTRACE_EPIDNONE + 1; 13171 13172 (void) snprintf(c, sizeof (c), "dtrace_aggid_%d", m); 13173#if defined(sun) 13174 state->dts_aggid_arena = vmem_create(c, (void *)1, UINT32_MAX, 1, 13175 NULL, NULL, NULL, 0, VM_SLEEP | VMC_IDENTIFIER); 13176 13177 if (devp != NULL) { 13178 major = getemajor(*devp); 13179 } else { 13180 major = ddi_driver_major(dtrace_devi); 13181 } 13182 13183 state->dts_dev = makedevice(major, minor); 13184 13185 if (devp != NULL) 13186 *devp = state->dts_dev; 13187#else 13188 state->dts_aggid_arena = new_unrhdr(1, INT_MAX, &dtrace_unr_mtx); 13189 state->dts_dev = dev; 13190#endif 13191 13192 /* 13193 * We allocate NCPU buffers. On the one hand, this can be quite 13194 * a bit of memory per instance (nearly 36K on a Starcat). On the 13195 * other hand, it saves an additional memory reference in the probe 13196 * path. 13197 */ 13198 state->dts_buffer = kmem_zalloc(bufsize, KM_SLEEP); 13199 state->dts_aggbuffer = kmem_zalloc(bufsize, KM_SLEEP); 13200 13201#if defined(sun) 13202 state->dts_cleaner = CYCLIC_NONE; 13203 state->dts_deadman = CYCLIC_NONE; 13204#else 13205 callout_init(&state->dts_cleaner, CALLOUT_MPSAFE); 13206 callout_init(&state->dts_deadman, CALLOUT_MPSAFE); 13207#endif 13208 state->dts_vstate.dtvs_state = state; 13209 13210 for (i = 0; i < DTRACEOPT_MAX; i++) 13211 state->dts_options[i] = DTRACEOPT_UNSET; 13212 13213 /* 13214 * Set the default options. 13215 */ 13216 opt = state->dts_options; 13217 opt[DTRACEOPT_BUFPOLICY] = DTRACEOPT_BUFPOLICY_SWITCH; 13218 opt[DTRACEOPT_BUFRESIZE] = DTRACEOPT_BUFRESIZE_AUTO; 13219 opt[DTRACEOPT_NSPEC] = dtrace_nspec_default; 13220 opt[DTRACEOPT_SPECSIZE] = dtrace_specsize_default; 13221 opt[DTRACEOPT_CPU] = (dtrace_optval_t)DTRACE_CPUALL; 13222 opt[DTRACEOPT_STRSIZE] = dtrace_strsize_default; 13223 opt[DTRACEOPT_STACKFRAMES] = dtrace_stackframes_default; 13224 opt[DTRACEOPT_USTACKFRAMES] = dtrace_ustackframes_default; 13225 opt[DTRACEOPT_CLEANRATE] = dtrace_cleanrate_default; 13226 opt[DTRACEOPT_AGGRATE] = dtrace_aggrate_default; 13227 opt[DTRACEOPT_SWITCHRATE] = dtrace_switchrate_default; 13228 opt[DTRACEOPT_STATUSRATE] = dtrace_statusrate_default; 13229 opt[DTRACEOPT_JSTACKFRAMES] = dtrace_jstackframes_default; 13230 opt[DTRACEOPT_JSTACKSTRSIZE] = dtrace_jstackstrsize_default; 13231 13232 state->dts_activity = DTRACE_ACTIVITY_INACTIVE; 13233 13234 /* 13235 * Depending on the user credentials, we set flag bits which alter probe 13236 * visibility or the amount of destructiveness allowed. In the case of 13237 * actual anonymous tracing, or the possession of all privileges, all of 13238 * the normal checks are bypassed. 13239 */ 13240 if (cr == NULL || PRIV_POLICY_ONLY(cr, PRIV_ALL, B_FALSE)) { 13241 state->dts_cred.dcr_visible = DTRACE_CRV_ALL; 13242 state->dts_cred.dcr_action = DTRACE_CRA_ALL; 13243 } else { 13244 /* 13245 * Set up the credentials for this instantiation. We take a 13246 * hold on the credential to prevent it from disappearing on 13247 * us; this in turn prevents the zone_t referenced by this 13248 * credential from disappearing. This means that we can 13249 * examine the credential and the zone from probe context. 13250 */ 13251 crhold(cr); 13252 state->dts_cred.dcr_cred = cr; 13253 13254 /* 13255 * CRA_PROC means "we have *some* privilege for dtrace" and 13256 * unlocks the use of variables like pid, zonename, etc. 13257 */ 13258 if (PRIV_POLICY_ONLY(cr, PRIV_DTRACE_USER, B_FALSE) || 13259 PRIV_POLICY_ONLY(cr, PRIV_DTRACE_PROC, B_FALSE)) { 13260 state->dts_cred.dcr_action |= DTRACE_CRA_PROC; 13261 } 13262 13263 /* 13264 * dtrace_user allows use of syscall and profile providers. 13265 * If the user also has proc_owner and/or proc_zone, we 13266 * extend the scope to include additional visibility and 13267 * destructive power. 13268 */ 13269 if (PRIV_POLICY_ONLY(cr, PRIV_DTRACE_USER, B_FALSE)) { 13270 if (PRIV_POLICY_ONLY(cr, PRIV_PROC_OWNER, B_FALSE)) { 13271 state->dts_cred.dcr_visible |= 13272 DTRACE_CRV_ALLPROC; 13273 13274 state->dts_cred.dcr_action |= 13275 DTRACE_CRA_PROC_DESTRUCTIVE_ALLUSER; 13276 } 13277 13278 if (PRIV_POLICY_ONLY(cr, PRIV_PROC_ZONE, B_FALSE)) { 13279 state->dts_cred.dcr_visible |= 13280 DTRACE_CRV_ALLZONE; 13281 13282 state->dts_cred.dcr_action |= 13283 DTRACE_CRA_PROC_DESTRUCTIVE_ALLZONE; 13284 } 13285 13286 /* 13287 * If we have all privs in whatever zone this is, 13288 * we can do destructive things to processes which 13289 * have altered credentials. 13290 */ 13291#if defined(sun) 13292 if (priv_isequalset(priv_getset(cr, PRIV_EFFECTIVE), 13293 cr->cr_zone->zone_privset)) { 13294 state->dts_cred.dcr_action |= 13295 DTRACE_CRA_PROC_DESTRUCTIVE_CREDCHG; 13296 } 13297#endif 13298 } 13299 13300 /* 13301 * Holding the dtrace_kernel privilege also implies that 13302 * the user has the dtrace_user privilege from a visibility 13303 * perspective. But without further privileges, some 13304 * destructive actions are not available. 13305 */ 13306 if (PRIV_POLICY_ONLY(cr, PRIV_DTRACE_KERNEL, B_FALSE)) { 13307 /* 13308 * Make all probes in all zones visible. However, 13309 * this doesn't mean that all actions become available 13310 * to all zones. 13311 */ 13312 state->dts_cred.dcr_visible |= DTRACE_CRV_KERNEL | 13313 DTRACE_CRV_ALLPROC | DTRACE_CRV_ALLZONE; 13314 13315 state->dts_cred.dcr_action |= DTRACE_CRA_KERNEL | 13316 DTRACE_CRA_PROC; 13317 /* 13318 * Holding proc_owner means that destructive actions 13319 * for *this* zone are allowed. 13320 */ 13321 if (PRIV_POLICY_ONLY(cr, PRIV_PROC_OWNER, B_FALSE)) 13322 state->dts_cred.dcr_action |= 13323 DTRACE_CRA_PROC_DESTRUCTIVE_ALLUSER; 13324 13325 /* 13326 * Holding proc_zone means that destructive actions 13327 * for this user/group ID in all zones is allowed. 13328 */ 13329 if (PRIV_POLICY_ONLY(cr, PRIV_PROC_ZONE, B_FALSE)) 13330 state->dts_cred.dcr_action |= 13331 DTRACE_CRA_PROC_DESTRUCTIVE_ALLZONE; 13332 13333#if defined(sun) 13334 /* 13335 * If we have all privs in whatever zone this is, 13336 * we can do destructive things to processes which 13337 * have altered credentials. 13338 */ 13339 if (priv_isequalset(priv_getset(cr, PRIV_EFFECTIVE), 13340 cr->cr_zone->zone_privset)) { 13341 state->dts_cred.dcr_action |= 13342 DTRACE_CRA_PROC_DESTRUCTIVE_CREDCHG; 13343 } 13344#endif 13345 } 13346 13347 /* 13348 * Holding the dtrace_proc privilege gives control over fasttrap 13349 * and pid providers. We need to grant wider destructive 13350 * privileges in the event that the user has proc_owner and/or 13351 * proc_zone. 13352 */ 13353 if (PRIV_POLICY_ONLY(cr, PRIV_DTRACE_PROC, B_FALSE)) { 13354 if (PRIV_POLICY_ONLY(cr, PRIV_PROC_OWNER, B_FALSE)) 13355 state->dts_cred.dcr_action |= 13356 DTRACE_CRA_PROC_DESTRUCTIVE_ALLUSER; 13357 13358 if (PRIV_POLICY_ONLY(cr, PRIV_PROC_ZONE, B_FALSE)) 13359 state->dts_cred.dcr_action |= 13360 DTRACE_CRA_PROC_DESTRUCTIVE_ALLZONE; 13361 } 13362 } 13363 13364 return (state); 13365} 13366 13367static int 13368dtrace_state_buffer(dtrace_state_t *state, dtrace_buffer_t *buf, int which) 13369{ 13370 dtrace_optval_t *opt = state->dts_options, size; 13371 processorid_t cpu = 0;; 13372 int flags = 0, rval; 13373 13374 ASSERT(MUTEX_HELD(&dtrace_lock)); 13375 ASSERT(MUTEX_HELD(&cpu_lock)); 13376 ASSERT(which < DTRACEOPT_MAX); 13377 ASSERT(state->dts_activity == DTRACE_ACTIVITY_INACTIVE || 13378 (state == dtrace_anon.dta_state && 13379 state->dts_activity == DTRACE_ACTIVITY_ACTIVE)); 13380 13381 if (opt[which] == DTRACEOPT_UNSET || opt[which] == 0) 13382 return (0); 13383 13384 if (opt[DTRACEOPT_CPU] != DTRACEOPT_UNSET) 13385 cpu = opt[DTRACEOPT_CPU]; 13386 13387 if (which == DTRACEOPT_SPECSIZE) 13388 flags |= DTRACEBUF_NOSWITCH; 13389 13390 if (which == DTRACEOPT_BUFSIZE) { 13391 if (opt[DTRACEOPT_BUFPOLICY] == DTRACEOPT_BUFPOLICY_RING) 13392 flags |= DTRACEBUF_RING; 13393 13394 if (opt[DTRACEOPT_BUFPOLICY] == DTRACEOPT_BUFPOLICY_FILL) 13395 flags |= DTRACEBUF_FILL; 13396 13397 if (state != dtrace_anon.dta_state || 13398 state->dts_activity != DTRACE_ACTIVITY_ACTIVE) 13399 flags |= DTRACEBUF_INACTIVE; 13400 } 13401 13402 for (size = opt[which]; size >= sizeof (uint64_t); size >>= 1) { 13403 /* 13404 * The size must be 8-byte aligned. If the size is not 8-byte 13405 * aligned, drop it down by the difference. 13406 */ 13407 if (size & (sizeof (uint64_t) - 1)) 13408 size -= size & (sizeof (uint64_t) - 1); 13409 13410 if (size < state->dts_reserve) { 13411 /* 13412 * Buffers always must be large enough to accommodate 13413 * their prereserved space. We return E2BIG instead 13414 * of ENOMEM in this case to allow for user-level 13415 * software to differentiate the cases. 13416 */ 13417 return (E2BIG); 13418 } 13419 13420 rval = dtrace_buffer_alloc(buf, size, flags, cpu); 13421 13422 if (rval != ENOMEM) { 13423 opt[which] = size; 13424 return (rval); 13425 } 13426 13427 if (opt[DTRACEOPT_BUFRESIZE] == DTRACEOPT_BUFRESIZE_MANUAL) 13428 return (rval); 13429 } 13430 13431 return (ENOMEM); 13432} 13433 13434static int 13435dtrace_state_buffers(dtrace_state_t *state) 13436{ 13437 dtrace_speculation_t *spec = state->dts_speculations; 13438 int rval, i; 13439 13440 if ((rval = dtrace_state_buffer(state, state->dts_buffer, 13441 DTRACEOPT_BUFSIZE)) != 0) 13442 return (rval); 13443 13444 if ((rval = dtrace_state_buffer(state, state->dts_aggbuffer, 13445 DTRACEOPT_AGGSIZE)) != 0) 13446 return (rval); 13447 13448 for (i = 0; i < state->dts_nspeculations; i++) { 13449 if ((rval = dtrace_state_buffer(state, 13450 spec[i].dtsp_buffer, DTRACEOPT_SPECSIZE)) != 0) 13451 return (rval); 13452 } 13453 13454 return (0); 13455} 13456 13457static void 13458dtrace_state_prereserve(dtrace_state_t *state) 13459{ 13460 dtrace_ecb_t *ecb; 13461 dtrace_probe_t *probe; 13462 13463 state->dts_reserve = 0; 13464 13465 if (state->dts_options[DTRACEOPT_BUFPOLICY] != DTRACEOPT_BUFPOLICY_FILL) 13466 return; 13467 13468 /* 13469 * If our buffer policy is a "fill" buffer policy, we need to set the 13470 * prereserved space to be the space required by the END probes. 13471 */ 13472 probe = dtrace_probes[dtrace_probeid_end - 1]; 13473 ASSERT(probe != NULL); 13474 13475 for (ecb = probe->dtpr_ecb; ecb != NULL; ecb = ecb->dte_next) { 13476 if (ecb->dte_state != state) 13477 continue; 13478 13479 state->dts_reserve += ecb->dte_needed + ecb->dte_alignment; 13480 } 13481} 13482 13483static int 13484dtrace_state_go(dtrace_state_t *state, processorid_t *cpu) 13485{ 13486 dtrace_optval_t *opt = state->dts_options, sz, nspec; 13487 dtrace_speculation_t *spec; 13488 dtrace_buffer_t *buf; 13489#if defined(sun) 13490 cyc_handler_t hdlr; 13491 cyc_time_t when; 13492#endif 13493 int rval = 0, i, bufsize = NCPU * sizeof (dtrace_buffer_t); 13494 dtrace_icookie_t cookie; 13495 13496 mutex_enter(&cpu_lock); 13497 mutex_enter(&dtrace_lock); 13498 13499 if (state->dts_activity != DTRACE_ACTIVITY_INACTIVE) { 13500 rval = EBUSY; 13501 goto out; 13502 } 13503 13504 /* 13505 * Before we can perform any checks, we must prime all of the 13506 * retained enablings that correspond to this state. 13507 */ 13508 dtrace_enabling_prime(state); 13509 13510 if (state->dts_destructive && !state->dts_cred.dcr_destructive) { 13511 rval = EACCES; 13512 goto out; 13513 } 13514 13515 dtrace_state_prereserve(state); 13516 13517 /* 13518 * Now we want to do is try to allocate our speculations. 13519 * We do not automatically resize the number of speculations; if 13520 * this fails, we will fail the operation. 13521 */ 13522 nspec = opt[DTRACEOPT_NSPEC]; 13523 ASSERT(nspec != DTRACEOPT_UNSET); 13524 13525 if (nspec > INT_MAX) { 13526 rval = ENOMEM; 13527 goto out; 13528 } 13529 13530 spec = kmem_zalloc(nspec * sizeof (dtrace_speculation_t), KM_NOSLEEP); 13531 13532 if (spec == NULL) { 13533 rval = ENOMEM; 13534 goto out; 13535 } 13536 13537 state->dts_speculations = spec; 13538 state->dts_nspeculations = (int)nspec; 13539 13540 for (i = 0; i < nspec; i++) { 13541 if ((buf = kmem_zalloc(bufsize, KM_NOSLEEP)) == NULL) { 13542 rval = ENOMEM; 13543 goto err; 13544 } 13545 13546 spec[i].dtsp_buffer = buf; 13547 } 13548 13549 if (opt[DTRACEOPT_GRABANON] != DTRACEOPT_UNSET) { 13550 if (dtrace_anon.dta_state == NULL) { 13551 rval = ENOENT; 13552 goto out; 13553 } 13554 13555 if (state->dts_necbs != 0) { 13556 rval = EALREADY; 13557 goto out; 13558 } 13559 13560 state->dts_anon = dtrace_anon_grab(); 13561 ASSERT(state->dts_anon != NULL); 13562 state = state->dts_anon; 13563 13564 /* 13565 * We want "grabanon" to be set in the grabbed state, so we'll 13566 * copy that option value from the grabbing state into the 13567 * grabbed state. 13568 */ 13569 state->dts_options[DTRACEOPT_GRABANON] = 13570 opt[DTRACEOPT_GRABANON]; 13571 13572 *cpu = dtrace_anon.dta_beganon; 13573 13574 /* 13575 * If the anonymous state is active (as it almost certainly 13576 * is if the anonymous enabling ultimately matched anything), 13577 * we don't allow any further option processing -- but we 13578 * don't return failure. 13579 */ 13580 if (state->dts_activity != DTRACE_ACTIVITY_INACTIVE) 13581 goto out; 13582 } 13583 13584 if (opt[DTRACEOPT_AGGSIZE] != DTRACEOPT_UNSET && 13585 opt[DTRACEOPT_AGGSIZE] != 0) { 13586 if (state->dts_aggregations == NULL) { 13587 /* 13588 * We're not going to create an aggregation buffer 13589 * because we don't have any ECBs that contain 13590 * aggregations -- set this option to 0. 13591 */ 13592 opt[DTRACEOPT_AGGSIZE] = 0; 13593 } else { 13594 /* 13595 * If we have an aggregation buffer, we must also have 13596 * a buffer to use as scratch. 13597 */ 13598 if (opt[DTRACEOPT_BUFSIZE] == DTRACEOPT_UNSET || 13599 opt[DTRACEOPT_BUFSIZE] < state->dts_needed) { 13600 opt[DTRACEOPT_BUFSIZE] = state->dts_needed; 13601 } 13602 } 13603 } 13604 13605 if (opt[DTRACEOPT_SPECSIZE] != DTRACEOPT_UNSET && 13606 opt[DTRACEOPT_SPECSIZE] != 0) { 13607 if (!state->dts_speculates) { 13608 /* 13609 * We're not going to create speculation buffers 13610 * because we don't have any ECBs that actually 13611 * speculate -- set the speculation size to 0. 13612 */ 13613 opt[DTRACEOPT_SPECSIZE] = 0; 13614 } 13615 } 13616 13617 /* 13618 * The bare minimum size for any buffer that we're actually going to 13619 * do anything to is sizeof (uint64_t). 13620 */ 13621 sz = sizeof (uint64_t); 13622 13623 if ((state->dts_needed != 0 && opt[DTRACEOPT_BUFSIZE] < sz) || 13624 (state->dts_speculates && opt[DTRACEOPT_SPECSIZE] < sz) || 13625 (state->dts_aggregations != NULL && opt[DTRACEOPT_AGGSIZE] < sz)) { 13626 /* 13627 * A buffer size has been explicitly set to 0 (or to a size 13628 * that will be adjusted to 0) and we need the space -- we 13629 * need to return failure. We return ENOSPC to differentiate 13630 * it from failing to allocate a buffer due to failure to meet 13631 * the reserve (for which we return E2BIG). 13632 */ 13633 rval = ENOSPC; 13634 goto out; 13635 } 13636 13637 if ((rval = dtrace_state_buffers(state)) != 0) 13638 goto err; 13639 13640 if ((sz = opt[DTRACEOPT_DYNVARSIZE]) == DTRACEOPT_UNSET) 13641 sz = dtrace_dstate_defsize; 13642 13643 do { 13644 rval = dtrace_dstate_init(&state->dts_vstate.dtvs_dynvars, sz); 13645 13646 if (rval == 0) 13647 break; 13648 13649 if (opt[DTRACEOPT_BUFRESIZE] == DTRACEOPT_BUFRESIZE_MANUAL) 13650 goto err; 13651 } while (sz >>= 1); 13652 13653 opt[DTRACEOPT_DYNVARSIZE] = sz; 13654 13655 if (rval != 0) 13656 goto err; 13657 13658 if (opt[DTRACEOPT_STATUSRATE] > dtrace_statusrate_max) 13659 opt[DTRACEOPT_STATUSRATE] = dtrace_statusrate_max; 13660 13661 if (opt[DTRACEOPT_CLEANRATE] == 0) 13662 opt[DTRACEOPT_CLEANRATE] = dtrace_cleanrate_max; 13663 13664 if (opt[DTRACEOPT_CLEANRATE] < dtrace_cleanrate_min) 13665 opt[DTRACEOPT_CLEANRATE] = dtrace_cleanrate_min; 13666 13667 if (opt[DTRACEOPT_CLEANRATE] > dtrace_cleanrate_max) 13668 opt[DTRACEOPT_CLEANRATE] = dtrace_cleanrate_max; 13669 13670 state->dts_alive = state->dts_laststatus = dtrace_gethrtime(); 13671#if defined(sun) 13672 hdlr.cyh_func = (cyc_func_t)dtrace_state_clean; 13673 hdlr.cyh_arg = state; 13674 hdlr.cyh_level = CY_LOW_LEVEL; 13675 13676 when.cyt_when = 0; 13677 when.cyt_interval = opt[DTRACEOPT_CLEANRATE]; 13678 13679 state->dts_cleaner = cyclic_add(&hdlr, &when); 13680 13681 hdlr.cyh_func = (cyc_func_t)dtrace_state_deadman; 13682 hdlr.cyh_arg = state; 13683 hdlr.cyh_level = CY_LOW_LEVEL; 13684 13685 when.cyt_when = 0; 13686 when.cyt_interval = dtrace_deadman_interval; 13687 13688 state->dts_deadman = cyclic_add(&hdlr, &when); 13689#else 13690 callout_reset(&state->dts_cleaner, hz * opt[DTRACEOPT_CLEANRATE] / NANOSEC, 13691 dtrace_state_clean, state); 13692 callout_reset(&state->dts_deadman, hz * dtrace_deadman_interval / NANOSEC, 13693 dtrace_state_deadman, state); 13694#endif 13695 13696 state->dts_activity = DTRACE_ACTIVITY_WARMUP; 13697 13698 /* 13699 * Now it's time to actually fire the BEGIN probe. We need to disable 13700 * interrupts here both to record the CPU on which we fired the BEGIN 13701 * probe (the data from this CPU will be processed first at user 13702 * level) and to manually activate the buffer for this CPU. 13703 */ 13704 cookie = dtrace_interrupt_disable(); 13705 *cpu = curcpu; 13706 ASSERT(state->dts_buffer[*cpu].dtb_flags & DTRACEBUF_INACTIVE); 13707 state->dts_buffer[*cpu].dtb_flags &= ~DTRACEBUF_INACTIVE; 13708 13709 dtrace_probe(dtrace_probeid_begin, 13710 (uint64_t)(uintptr_t)state, 0, 0, 0, 0); 13711 dtrace_interrupt_enable(cookie); 13712 /* 13713 * We may have had an exit action from a BEGIN probe; only change our 13714 * state to ACTIVE if we're still in WARMUP. 13715 */ 13716 ASSERT(state->dts_activity == DTRACE_ACTIVITY_WARMUP || 13717 state->dts_activity == DTRACE_ACTIVITY_DRAINING); 13718 13719 if (state->dts_activity == DTRACE_ACTIVITY_WARMUP) 13720 state->dts_activity = DTRACE_ACTIVITY_ACTIVE; 13721 13722 /* 13723 * Regardless of whether or not now we're in ACTIVE or DRAINING, we 13724 * want each CPU to transition its principal buffer out of the 13725 * INACTIVE state. Doing this assures that no CPU will suddenly begin 13726 * processing an ECB halfway down a probe's ECB chain; all CPUs will 13727 * atomically transition from processing none of a state's ECBs to 13728 * processing all of them. 13729 */ 13730 dtrace_xcall(DTRACE_CPUALL, 13731 (dtrace_xcall_t)dtrace_buffer_activate, state); 13732 goto out; 13733 13734err: 13735 dtrace_buffer_free(state->dts_buffer); 13736 dtrace_buffer_free(state->dts_aggbuffer); 13737 13738 if ((nspec = state->dts_nspeculations) == 0) { 13739 ASSERT(state->dts_speculations == NULL); 13740 goto out; 13741 } 13742 13743 spec = state->dts_speculations; 13744 ASSERT(spec != NULL); 13745 13746 for (i = 0; i < state->dts_nspeculations; i++) { 13747 if ((buf = spec[i].dtsp_buffer) == NULL) 13748 break; 13749 13750 dtrace_buffer_free(buf); 13751 kmem_free(buf, bufsize); 13752 } 13753 13754 kmem_free(spec, nspec * sizeof (dtrace_speculation_t)); 13755 state->dts_nspeculations = 0; 13756 state->dts_speculations = NULL; 13757 13758out: 13759 mutex_exit(&dtrace_lock); 13760 mutex_exit(&cpu_lock); 13761 13762 return (rval); 13763} 13764 13765static int 13766dtrace_state_stop(dtrace_state_t *state, processorid_t *cpu) 13767{ 13768 dtrace_icookie_t cookie; 13769 13770 ASSERT(MUTEX_HELD(&dtrace_lock)); 13771 13772 if (state->dts_activity != DTRACE_ACTIVITY_ACTIVE && 13773 state->dts_activity != DTRACE_ACTIVITY_DRAINING) 13774 return (EINVAL); 13775 13776 /* 13777 * We'll set the activity to DTRACE_ACTIVITY_DRAINING, and issue a sync 13778 * to be sure that every CPU has seen it. See below for the details 13779 * on why this is done. 13780 */ 13781 state->dts_activity = DTRACE_ACTIVITY_DRAINING; 13782 dtrace_sync(); 13783 13784 /* 13785 * By this point, it is impossible for any CPU to be still processing 13786 * with DTRACE_ACTIVITY_ACTIVE. We can thus set our activity to 13787 * DTRACE_ACTIVITY_COOLDOWN and know that we're not racing with any 13788 * other CPU in dtrace_buffer_reserve(). This allows dtrace_probe() 13789 * and callees to know that the activity is DTRACE_ACTIVITY_COOLDOWN 13790 * iff we're in the END probe. 13791 */ 13792 state->dts_activity = DTRACE_ACTIVITY_COOLDOWN; 13793 dtrace_sync(); 13794 ASSERT(state->dts_activity == DTRACE_ACTIVITY_COOLDOWN); 13795 13796 /* 13797 * Finally, we can release the reserve and call the END probe. We 13798 * disable interrupts across calling the END probe to allow us to 13799 * return the CPU on which we actually called the END probe. This 13800 * allows user-land to be sure that this CPU's principal buffer is 13801 * processed last. 13802 */ 13803 state->dts_reserve = 0; 13804 13805 cookie = dtrace_interrupt_disable(); 13806 *cpu = curcpu; 13807 dtrace_probe(dtrace_probeid_end, 13808 (uint64_t)(uintptr_t)state, 0, 0, 0, 0); 13809 dtrace_interrupt_enable(cookie); 13810 13811 state->dts_activity = DTRACE_ACTIVITY_STOPPED; 13812 dtrace_sync(); 13813 13814 return (0); 13815} 13816 13817static int 13818dtrace_state_option(dtrace_state_t *state, dtrace_optid_t option, 13819 dtrace_optval_t val) 13820{ 13821 ASSERT(MUTEX_HELD(&dtrace_lock)); 13822 13823 if (state->dts_activity != DTRACE_ACTIVITY_INACTIVE) 13824 return (EBUSY); 13825 13826 if (option >= DTRACEOPT_MAX) 13827 return (EINVAL); 13828 13829 if (option != DTRACEOPT_CPU && val < 0) 13830 return (EINVAL); 13831 13832 switch (option) { 13833 case DTRACEOPT_DESTRUCTIVE: 13834 if (dtrace_destructive_disallow) 13835 return (EACCES); 13836 13837 state->dts_cred.dcr_destructive = 1; 13838 break; 13839 13840 case DTRACEOPT_BUFSIZE: 13841 case DTRACEOPT_DYNVARSIZE: 13842 case DTRACEOPT_AGGSIZE: 13843 case DTRACEOPT_SPECSIZE: 13844 case DTRACEOPT_STRSIZE: 13845 if (val < 0) 13846 return (EINVAL); 13847 13848 if (val >= LONG_MAX) { 13849 /* 13850 * If this is an otherwise negative value, set it to 13851 * the highest multiple of 128m less than LONG_MAX. 13852 * Technically, we're adjusting the size without 13853 * regard to the buffer resizing policy, but in fact, 13854 * this has no effect -- if we set the buffer size to 13855 * ~LONG_MAX and the buffer policy is ultimately set to 13856 * be "manual", the buffer allocation is guaranteed to 13857 * fail, if only because the allocation requires two 13858 * buffers. (We set the the size to the highest 13859 * multiple of 128m because it ensures that the size 13860 * will remain a multiple of a megabyte when 13861 * repeatedly halved -- all the way down to 15m.) 13862 */ 13863 val = LONG_MAX - (1 << 27) + 1; 13864 } 13865 } 13866 13867 state->dts_options[option] = val; 13868 13869 return (0); 13870} 13871 13872static void 13873dtrace_state_destroy(dtrace_state_t *state) 13874{ 13875 dtrace_ecb_t *ecb; 13876 dtrace_vstate_t *vstate = &state->dts_vstate; 13877#if defined(sun) 13878 minor_t minor = getminor(state->dts_dev); 13879#endif 13880 int i, bufsize = NCPU * sizeof (dtrace_buffer_t); 13881 dtrace_speculation_t *spec = state->dts_speculations; 13882 int nspec = state->dts_nspeculations; 13883 uint32_t match; 13884 13885 ASSERT(MUTEX_HELD(&dtrace_lock)); 13886 ASSERT(MUTEX_HELD(&cpu_lock)); 13887 13888 /* 13889 * First, retract any retained enablings for this state. 13890 */ 13891 dtrace_enabling_retract(state); 13892 ASSERT(state->dts_nretained == 0); 13893 13894 if (state->dts_activity == DTRACE_ACTIVITY_ACTIVE || 13895 state->dts_activity == DTRACE_ACTIVITY_DRAINING) { 13896 /* 13897 * We have managed to come into dtrace_state_destroy() on a 13898 * hot enabling -- almost certainly because of a disorderly 13899 * shutdown of a consumer. (That is, a consumer that is 13900 * exiting without having called dtrace_stop().) In this case, 13901 * we're going to set our activity to be KILLED, and then 13902 * issue a sync to be sure that everyone is out of probe 13903 * context before we start blowing away ECBs. 13904 */ 13905 state->dts_activity = DTRACE_ACTIVITY_KILLED; 13906 dtrace_sync(); 13907 } 13908 13909 /* 13910 * Release the credential hold we took in dtrace_state_create(). 13911 */ 13912 if (state->dts_cred.dcr_cred != NULL) 13913 crfree(state->dts_cred.dcr_cred); 13914 13915 /* 13916 * Now we can safely disable and destroy any enabled probes. Because 13917 * any DTRACE_PRIV_KERNEL probes may actually be slowing our progress 13918 * (especially if they're all enabled), we take two passes through the 13919 * ECBs: in the first, we disable just DTRACE_PRIV_KERNEL probes, and 13920 * in the second we disable whatever is left over. 13921 */ 13922 for (match = DTRACE_PRIV_KERNEL; ; match = 0) { 13923 for (i = 0; i < state->dts_necbs; i++) { 13924 if ((ecb = state->dts_ecbs[i]) == NULL) 13925 continue; 13926 13927 if (match && ecb->dte_probe != NULL) { 13928 dtrace_probe_t *probe = ecb->dte_probe; 13929 dtrace_provider_t *prov = probe->dtpr_provider; 13930 13931 if (!(prov->dtpv_priv.dtpp_flags & match)) 13932 continue; 13933 } 13934 13935 dtrace_ecb_disable(ecb); 13936 dtrace_ecb_destroy(ecb); 13937 } 13938 13939 if (!match) 13940 break; 13941 } 13942 13943 /* 13944 * Before we free the buffers, perform one more sync to assure that 13945 * every CPU is out of probe context. 13946 */ 13947 dtrace_sync(); 13948 13949 dtrace_buffer_free(state->dts_buffer); 13950 dtrace_buffer_free(state->dts_aggbuffer); 13951 13952 for (i = 0; i < nspec; i++) 13953 dtrace_buffer_free(spec[i].dtsp_buffer); 13954 13955#if defined(sun) 13956 if (state->dts_cleaner != CYCLIC_NONE) 13957 cyclic_remove(state->dts_cleaner); 13958 13959 if (state->dts_deadman != CYCLIC_NONE) 13960 cyclic_remove(state->dts_deadman); 13961#else 13962 callout_stop(&state->dts_cleaner); 13963 callout_drain(&state->dts_cleaner); 13964 callout_stop(&state->dts_deadman); 13965 callout_drain(&state->dts_deadman); 13966#endif 13967 13968 dtrace_dstate_fini(&vstate->dtvs_dynvars); 13969 dtrace_vstate_fini(vstate); 13970 if (state->dts_ecbs != NULL) 13971 kmem_free(state->dts_ecbs, state->dts_necbs * sizeof (dtrace_ecb_t *)); 13972 13973 if (state->dts_aggregations != NULL) { 13974#ifdef DEBUG 13975 for (i = 0; i < state->dts_naggregations; i++) 13976 ASSERT(state->dts_aggregations[i] == NULL); 13977#endif 13978 ASSERT(state->dts_naggregations > 0); 13979 kmem_free(state->dts_aggregations, 13980 state->dts_naggregations * sizeof (dtrace_aggregation_t *)); 13981 } 13982 13983 kmem_free(state->dts_buffer, bufsize); 13984 kmem_free(state->dts_aggbuffer, bufsize); 13985 13986 for (i = 0; i < nspec; i++) 13987 kmem_free(spec[i].dtsp_buffer, bufsize); 13988 13989 if (spec != NULL) 13990 kmem_free(spec, nspec * sizeof (dtrace_speculation_t)); 13991 13992 dtrace_format_destroy(state); 13993 13994 if (state->dts_aggid_arena != NULL) { 13995#if defined(sun) 13996 vmem_destroy(state->dts_aggid_arena); 13997#else 13998 delete_unrhdr(state->dts_aggid_arena); 13999#endif 14000 state->dts_aggid_arena = NULL; 14001 } 14002#if defined(sun) 14003 ddi_soft_state_free(dtrace_softstate, minor); 14004 vmem_free(dtrace_minor, (void *)(uintptr_t)minor, 1); 14005#endif 14006} 14007 14008/* 14009 * DTrace Anonymous Enabling Functions 14010 */ 14011static dtrace_state_t * 14012dtrace_anon_grab(void) 14013{ 14014 dtrace_state_t *state; 14015 14016 ASSERT(MUTEX_HELD(&dtrace_lock)); 14017 14018 if ((state = dtrace_anon.dta_state) == NULL) { 14019 ASSERT(dtrace_anon.dta_enabling == NULL); 14020 return (NULL); 14021 } 14022 14023 ASSERT(dtrace_anon.dta_enabling != NULL); 14024 ASSERT(dtrace_retained != NULL); 14025 14026 dtrace_enabling_destroy(dtrace_anon.dta_enabling); 14027 dtrace_anon.dta_enabling = NULL; 14028 dtrace_anon.dta_state = NULL; 14029 14030 return (state); 14031} 14032 14033static void 14034dtrace_anon_property(void) 14035{ 14036 int i, rv; 14037 dtrace_state_t *state; 14038 dof_hdr_t *dof; 14039 char c[32]; /* enough for "dof-data-" + digits */ 14040 14041 ASSERT(MUTEX_HELD(&dtrace_lock)); 14042 ASSERT(MUTEX_HELD(&cpu_lock)); 14043 14044 for (i = 0; ; i++) { 14045 (void) snprintf(c, sizeof (c), "dof-data-%d", i); 14046 14047 dtrace_err_verbose = 1; 14048 14049 if ((dof = dtrace_dof_property(c)) == NULL) { 14050 dtrace_err_verbose = 0; 14051 break; 14052 } 14053 14054#if defined(sun) 14055 /* 14056 * We want to create anonymous state, so we need to transition 14057 * the kernel debugger to indicate that DTrace is active. If 14058 * this fails (e.g. because the debugger has modified text in 14059 * some way), we won't continue with the processing. 14060 */ 14061 if (kdi_dtrace_set(KDI_DTSET_DTRACE_ACTIVATE) != 0) { 14062 cmn_err(CE_NOTE, "kernel debugger active; anonymous " 14063 "enabling ignored."); 14064 dtrace_dof_destroy(dof); 14065 break; 14066 } 14067#endif 14068 14069 /* 14070 * If we haven't allocated an anonymous state, we'll do so now. 14071 */ 14072 if ((state = dtrace_anon.dta_state) == NULL) { 14073#if defined(sun) 14074 state = dtrace_state_create(NULL, NULL); 14075#else 14076 state = dtrace_state_create(NULL); 14077#endif 14078 dtrace_anon.dta_state = state; 14079 14080 if (state == NULL) { 14081 /* 14082 * This basically shouldn't happen: the only 14083 * failure mode from dtrace_state_create() is a 14084 * failure of ddi_soft_state_zalloc() that 14085 * itself should never happen. Still, the 14086 * interface allows for a failure mode, and 14087 * we want to fail as gracefully as possible: 14088 * we'll emit an error message and cease 14089 * processing anonymous state in this case. 14090 */ 14091 cmn_err(CE_WARN, "failed to create " 14092 "anonymous state"); 14093 dtrace_dof_destroy(dof); 14094 break; 14095 } 14096 } 14097 14098 rv = dtrace_dof_slurp(dof, &state->dts_vstate, CRED(), 14099 &dtrace_anon.dta_enabling, 0, B_TRUE); 14100 14101 if (rv == 0) 14102 rv = dtrace_dof_options(dof, state); 14103 14104 dtrace_err_verbose = 0; 14105 dtrace_dof_destroy(dof); 14106 14107 if (rv != 0) { 14108 /* 14109 * This is malformed DOF; chuck any anonymous state 14110 * that we created. 14111 */ 14112 ASSERT(dtrace_anon.dta_enabling == NULL); 14113 dtrace_state_destroy(state); 14114 dtrace_anon.dta_state = NULL; 14115 break; 14116 } 14117 14118 ASSERT(dtrace_anon.dta_enabling != NULL); 14119 } 14120 14121 if (dtrace_anon.dta_enabling != NULL) { 14122 int rval; 14123 14124 /* 14125 * dtrace_enabling_retain() can only fail because we are 14126 * trying to retain more enablings than are allowed -- but 14127 * we only have one anonymous enabling, and we are guaranteed 14128 * to be allowed at least one retained enabling; we assert 14129 * that dtrace_enabling_retain() returns success. 14130 */ 14131 rval = dtrace_enabling_retain(dtrace_anon.dta_enabling); 14132 ASSERT(rval == 0); 14133 14134 dtrace_enabling_dump(dtrace_anon.dta_enabling); 14135 } 14136} 14137 14138/* 14139 * DTrace Helper Functions 14140 */ 14141static void 14142dtrace_helper_trace(dtrace_helper_action_t *helper, 14143 dtrace_mstate_t *mstate, dtrace_vstate_t *vstate, int where) 14144{ 14145 uint32_t size, next, nnext, i; 14146 dtrace_helptrace_t *ent; 14147 uint16_t flags = cpu_core[curcpu].cpuc_dtrace_flags; 14148 14149 if (!dtrace_helptrace_enabled) 14150 return; 14151 14152 ASSERT(vstate->dtvs_nlocals <= dtrace_helptrace_nlocals); 14153 14154 /* 14155 * What would a tracing framework be without its own tracing 14156 * framework? (Well, a hell of a lot simpler, for starters...) 14157 */ 14158 size = sizeof (dtrace_helptrace_t) + dtrace_helptrace_nlocals * 14159 sizeof (uint64_t) - sizeof (uint64_t); 14160 14161 /* 14162 * Iterate until we can allocate a slot in the trace buffer. 14163 */ 14164 do { 14165 next = dtrace_helptrace_next; 14166 14167 if (next + size < dtrace_helptrace_bufsize) { 14168 nnext = next + size; 14169 } else { 14170 nnext = size; 14171 } 14172 } while (dtrace_cas32(&dtrace_helptrace_next, next, nnext) != next); 14173 14174 /* 14175 * We have our slot; fill it in. 14176 */ 14177 if (nnext == size) 14178 next = 0; 14179 14180 ent = (dtrace_helptrace_t *)&dtrace_helptrace_buffer[next]; 14181 ent->dtht_helper = helper; 14182 ent->dtht_where = where; 14183 ent->dtht_nlocals = vstate->dtvs_nlocals; 14184 14185 ent->dtht_fltoffs = (mstate->dtms_present & DTRACE_MSTATE_FLTOFFS) ? 14186 mstate->dtms_fltoffs : -1; 14187 ent->dtht_fault = DTRACE_FLAGS2FLT(flags); 14188 ent->dtht_illval = cpu_core[curcpu].cpuc_dtrace_illval; 14189 14190 for (i = 0; i < vstate->dtvs_nlocals; i++) { 14191 dtrace_statvar_t *svar; 14192 14193 if ((svar = vstate->dtvs_locals[i]) == NULL) 14194 continue; 14195 14196 ASSERT(svar->dtsv_size >= NCPU * sizeof (uint64_t)); 14197 ent->dtht_locals[i] = 14198 ((uint64_t *)(uintptr_t)svar->dtsv_data)[curcpu]; 14199 } 14200} 14201 14202static uint64_t 14203dtrace_helper(int which, dtrace_mstate_t *mstate, 14204 dtrace_state_t *state, uint64_t arg0, uint64_t arg1) 14205{ 14206 uint16_t *flags = &cpu_core[curcpu].cpuc_dtrace_flags; 14207 uint64_t sarg0 = mstate->dtms_arg[0]; 14208 uint64_t sarg1 = mstate->dtms_arg[1]; 14209 uint64_t rval = 0; 14210 dtrace_helpers_t *helpers = curproc->p_dtrace_helpers; 14211 dtrace_helper_action_t *helper; 14212 dtrace_vstate_t *vstate; 14213 dtrace_difo_t *pred; 14214 int i, trace = dtrace_helptrace_enabled; 14215 14216 ASSERT(which >= 0 && which < DTRACE_NHELPER_ACTIONS); 14217 14218 if (helpers == NULL) 14219 return (0); 14220 14221 if ((helper = helpers->dthps_actions[which]) == NULL) 14222 return (0); 14223 14224 vstate = &helpers->dthps_vstate; 14225 mstate->dtms_arg[0] = arg0; 14226 mstate->dtms_arg[1] = arg1; 14227 14228 /* 14229 * Now iterate over each helper. If its predicate evaluates to 'true', 14230 * we'll call the corresponding actions. Note that the below calls 14231 * to dtrace_dif_emulate() may set faults in machine state. This is 14232 * okay: our caller (the outer dtrace_dif_emulate()) will simply plow 14233 * the stored DIF offset with its own (which is the desired behavior). 14234 * Also, note the calls to dtrace_dif_emulate() may allocate scratch 14235 * from machine state; this is okay, too. 14236 */ 14237 for (; helper != NULL; helper = helper->dtha_next) { 14238 if ((pred = helper->dtha_predicate) != NULL) { 14239 if (trace) 14240 dtrace_helper_trace(helper, mstate, vstate, 0); 14241 14242 if (!dtrace_dif_emulate(pred, mstate, vstate, state)) 14243 goto next; 14244 14245 if (*flags & CPU_DTRACE_FAULT) 14246 goto err; 14247 } 14248 14249 for (i = 0; i < helper->dtha_nactions; i++) { 14250 if (trace) 14251 dtrace_helper_trace(helper, 14252 mstate, vstate, i + 1); 14253 14254 rval = dtrace_dif_emulate(helper->dtha_actions[i], 14255 mstate, vstate, state); 14256 14257 if (*flags & CPU_DTRACE_FAULT) 14258 goto err; 14259 } 14260 14261next: 14262 if (trace) 14263 dtrace_helper_trace(helper, mstate, vstate, 14264 DTRACE_HELPTRACE_NEXT); 14265 } 14266 14267 if (trace) 14268 dtrace_helper_trace(helper, mstate, vstate, 14269 DTRACE_HELPTRACE_DONE); 14270 14271 /* 14272 * Restore the arg0 that we saved upon entry. 14273 */ 14274 mstate->dtms_arg[0] = sarg0; 14275 mstate->dtms_arg[1] = sarg1; 14276 14277 return (rval); 14278 14279err: 14280 if (trace) 14281 dtrace_helper_trace(helper, mstate, vstate, 14282 DTRACE_HELPTRACE_ERR); 14283 14284 /* 14285 * Restore the arg0 that we saved upon entry. 14286 */ 14287 mstate->dtms_arg[0] = sarg0; 14288 mstate->dtms_arg[1] = sarg1; 14289 14290 return (0); 14291} 14292 14293static void 14294dtrace_helper_action_destroy(dtrace_helper_action_t *helper, 14295 dtrace_vstate_t *vstate) 14296{ 14297 int i; 14298 14299 if (helper->dtha_predicate != NULL) 14300 dtrace_difo_release(helper->dtha_predicate, vstate); 14301 14302 for (i = 0; i < helper->dtha_nactions; i++) { 14303 ASSERT(helper->dtha_actions[i] != NULL); 14304 dtrace_difo_release(helper->dtha_actions[i], vstate); 14305 } 14306 14307 kmem_free(helper->dtha_actions, 14308 helper->dtha_nactions * sizeof (dtrace_difo_t *)); 14309 kmem_free(helper, sizeof (dtrace_helper_action_t)); 14310} 14311 14312static int 14313dtrace_helper_destroygen(int gen) 14314{ 14315 proc_t *p = curproc; 14316 dtrace_helpers_t *help = p->p_dtrace_helpers; 14317 dtrace_vstate_t *vstate; 14318 int i; 14319 14320 ASSERT(MUTEX_HELD(&dtrace_lock)); 14321 14322 if (help == NULL || gen > help->dthps_generation) 14323 return (EINVAL); 14324 14325 vstate = &help->dthps_vstate; 14326 14327 for (i = 0; i < DTRACE_NHELPER_ACTIONS; i++) { 14328 dtrace_helper_action_t *last = NULL, *h, *next; 14329 14330 for (h = help->dthps_actions[i]; h != NULL; h = next) { 14331 next = h->dtha_next; 14332 14333 if (h->dtha_generation == gen) { 14334 if (last != NULL) { 14335 last->dtha_next = next; 14336 } else { 14337 help->dthps_actions[i] = next; 14338 } 14339 14340 dtrace_helper_action_destroy(h, vstate); 14341 } else { 14342 last = h; 14343 } 14344 } 14345 } 14346 14347 /* 14348 * Interate until we've cleared out all helper providers with the 14349 * given generation number. 14350 */ 14351 for (;;) { 14352 dtrace_helper_provider_t *prov; 14353 14354 /* 14355 * Look for a helper provider with the right generation. We 14356 * have to start back at the beginning of the list each time 14357 * because we drop dtrace_lock. It's unlikely that we'll make 14358 * more than two passes. 14359 */ 14360 for (i = 0; i < help->dthps_nprovs; i++) { 14361 prov = help->dthps_provs[i]; 14362 14363 if (prov->dthp_generation == gen) 14364 break; 14365 } 14366 14367 /* 14368 * If there were no matches, we're done. 14369 */ 14370 if (i == help->dthps_nprovs) 14371 break; 14372 14373 /* 14374 * Move the last helper provider into this slot. 14375 */ 14376 help->dthps_nprovs--; 14377 help->dthps_provs[i] = help->dthps_provs[help->dthps_nprovs]; 14378 help->dthps_provs[help->dthps_nprovs] = NULL; 14379 14380 mutex_exit(&dtrace_lock); 14381 14382 /* 14383 * If we have a meta provider, remove this helper provider. 14384 */ 14385 mutex_enter(&dtrace_meta_lock); 14386 if (dtrace_meta_pid != NULL) { 14387 ASSERT(dtrace_deferred_pid == NULL); 14388 dtrace_helper_provider_remove(&prov->dthp_prov, 14389 p->p_pid); 14390 } 14391 mutex_exit(&dtrace_meta_lock); 14392 14393 dtrace_helper_provider_destroy(prov); 14394 14395 mutex_enter(&dtrace_lock); 14396 } 14397 14398 return (0); 14399} 14400 14401static int 14402dtrace_helper_validate(dtrace_helper_action_t *helper) 14403{ 14404 int err = 0, i; 14405 dtrace_difo_t *dp; 14406 14407 if ((dp = helper->dtha_predicate) != NULL) 14408 err += dtrace_difo_validate_helper(dp); 14409 14410 for (i = 0; i < helper->dtha_nactions; i++) 14411 err += dtrace_difo_validate_helper(helper->dtha_actions[i]); 14412 14413 return (err == 0); 14414} 14415 14416static int 14417dtrace_helper_action_add(int which, dtrace_ecbdesc_t *ep) 14418{ 14419 dtrace_helpers_t *help; 14420 dtrace_helper_action_t *helper, *last; 14421 dtrace_actdesc_t *act; 14422 dtrace_vstate_t *vstate; 14423 dtrace_predicate_t *pred; 14424 int count = 0, nactions = 0, i; 14425 14426 if (which < 0 || which >= DTRACE_NHELPER_ACTIONS) 14427 return (EINVAL); 14428 14429 help = curproc->p_dtrace_helpers; 14430 last = help->dthps_actions[which]; 14431 vstate = &help->dthps_vstate; 14432 14433 for (count = 0; last != NULL; last = last->dtha_next) { 14434 count++; 14435 if (last->dtha_next == NULL) 14436 break; 14437 } 14438 14439 /* 14440 * If we already have dtrace_helper_actions_max helper actions for this 14441 * helper action type, we'll refuse to add a new one. 14442 */ 14443 if (count >= dtrace_helper_actions_max) 14444 return (ENOSPC); 14445 14446 helper = kmem_zalloc(sizeof (dtrace_helper_action_t), KM_SLEEP); 14447 helper->dtha_generation = help->dthps_generation; 14448 14449 if ((pred = ep->dted_pred.dtpdd_predicate) != NULL) { 14450 ASSERT(pred->dtp_difo != NULL); 14451 dtrace_difo_hold(pred->dtp_difo); 14452 helper->dtha_predicate = pred->dtp_difo; 14453 } 14454 14455 for (act = ep->dted_action; act != NULL; act = act->dtad_next) { 14456 if (act->dtad_kind != DTRACEACT_DIFEXPR) 14457 goto err; 14458 14459 if (act->dtad_difo == NULL) 14460 goto err; 14461 14462 nactions++; 14463 } 14464 14465 helper->dtha_actions = kmem_zalloc(sizeof (dtrace_difo_t *) * 14466 (helper->dtha_nactions = nactions), KM_SLEEP); 14467 14468 for (act = ep->dted_action, i = 0; act != NULL; act = act->dtad_next) { 14469 dtrace_difo_hold(act->dtad_difo); 14470 helper->dtha_actions[i++] = act->dtad_difo; 14471 } 14472 14473 if (!dtrace_helper_validate(helper)) 14474 goto err; 14475 14476 if (last == NULL) { 14477 help->dthps_actions[which] = helper; 14478 } else { 14479 last->dtha_next = helper; 14480 } 14481 14482 if (vstate->dtvs_nlocals > dtrace_helptrace_nlocals) { 14483 dtrace_helptrace_nlocals = vstate->dtvs_nlocals; 14484 dtrace_helptrace_next = 0; 14485 } 14486 14487 return (0); 14488err: 14489 dtrace_helper_action_destroy(helper, vstate); 14490 return (EINVAL); 14491} 14492 14493static void 14494dtrace_helper_provider_register(proc_t *p, dtrace_helpers_t *help, 14495 dof_helper_t *dofhp) 14496{ 14497 ASSERT(MUTEX_NOT_HELD(&dtrace_lock)); 14498 14499 mutex_enter(&dtrace_meta_lock); 14500 mutex_enter(&dtrace_lock); 14501 14502 if (!dtrace_attached() || dtrace_meta_pid == NULL) { 14503 /* 14504 * If the dtrace module is loaded but not attached, or if 14505 * there aren't isn't a meta provider registered to deal with 14506 * these provider descriptions, we need to postpone creating 14507 * the actual providers until later. 14508 */ 14509 14510 if (help->dthps_next == NULL && help->dthps_prev == NULL && 14511 dtrace_deferred_pid != help) { 14512 help->dthps_deferred = 1; 14513 help->dthps_pid = p->p_pid; 14514 help->dthps_next = dtrace_deferred_pid; 14515 help->dthps_prev = NULL; 14516 if (dtrace_deferred_pid != NULL) 14517 dtrace_deferred_pid->dthps_prev = help; 14518 dtrace_deferred_pid = help; 14519 } 14520 14521 mutex_exit(&dtrace_lock); 14522 14523 } else if (dofhp != NULL) { 14524 /* 14525 * If the dtrace module is loaded and we have a particular 14526 * helper provider description, pass that off to the 14527 * meta provider. 14528 */ 14529 14530 mutex_exit(&dtrace_lock); 14531 14532 dtrace_helper_provide(dofhp, p->p_pid); 14533 14534 } else { 14535 /* 14536 * Otherwise, just pass all the helper provider descriptions 14537 * off to the meta provider. 14538 */ 14539 14540 int i; 14541 mutex_exit(&dtrace_lock); 14542 14543 for (i = 0; i < help->dthps_nprovs; i++) { 14544 dtrace_helper_provide(&help->dthps_provs[i]->dthp_prov, 14545 p->p_pid); 14546 } 14547 } 14548 14549 mutex_exit(&dtrace_meta_lock); 14550} 14551 14552static int 14553dtrace_helper_provider_add(dof_helper_t *dofhp, int gen) 14554{ 14555 dtrace_helpers_t *help; 14556 dtrace_helper_provider_t *hprov, **tmp_provs; 14557 uint_t tmp_maxprovs, i; 14558 14559 ASSERT(MUTEX_HELD(&dtrace_lock)); 14560 14561 help = curproc->p_dtrace_helpers; 14562 ASSERT(help != NULL); 14563 14564 /* 14565 * If we already have dtrace_helper_providers_max helper providers, 14566 * we're refuse to add a new one. 14567 */ 14568 if (help->dthps_nprovs >= dtrace_helper_providers_max) 14569 return (ENOSPC); 14570 14571 /* 14572 * Check to make sure this isn't a duplicate. 14573 */ 14574 for (i = 0; i < help->dthps_nprovs; i++) { 14575 if (dofhp->dofhp_dof == 14576 help->dthps_provs[i]->dthp_prov.dofhp_dof) 14577 return (EALREADY); 14578 } 14579 14580 hprov = kmem_zalloc(sizeof (dtrace_helper_provider_t), KM_SLEEP); 14581 hprov->dthp_prov = *dofhp; 14582 hprov->dthp_ref = 1; 14583 hprov->dthp_generation = gen; 14584 14585 /* 14586 * Allocate a bigger table for helper providers if it's already full. 14587 */ 14588 if (help->dthps_maxprovs == help->dthps_nprovs) { 14589 tmp_maxprovs = help->dthps_maxprovs; 14590 tmp_provs = help->dthps_provs; 14591 14592 if (help->dthps_maxprovs == 0) 14593 help->dthps_maxprovs = 2; 14594 else 14595 help->dthps_maxprovs *= 2; 14596 if (help->dthps_maxprovs > dtrace_helper_providers_max) 14597 help->dthps_maxprovs = dtrace_helper_providers_max; 14598 14599 ASSERT(tmp_maxprovs < help->dthps_maxprovs); 14600 14601 help->dthps_provs = kmem_zalloc(help->dthps_maxprovs * 14602 sizeof (dtrace_helper_provider_t *), KM_SLEEP); 14603 14604 if (tmp_provs != NULL) { 14605 bcopy(tmp_provs, help->dthps_provs, tmp_maxprovs * 14606 sizeof (dtrace_helper_provider_t *)); 14607 kmem_free(tmp_provs, tmp_maxprovs * 14608 sizeof (dtrace_helper_provider_t *)); 14609 } 14610 } 14611 14612 help->dthps_provs[help->dthps_nprovs] = hprov; 14613 help->dthps_nprovs++; 14614 14615 return (0); 14616} 14617 14618static void 14619dtrace_helper_provider_destroy(dtrace_helper_provider_t *hprov) 14620{ 14621 mutex_enter(&dtrace_lock); 14622 14623 if (--hprov->dthp_ref == 0) { 14624 dof_hdr_t *dof; 14625 mutex_exit(&dtrace_lock); 14626 dof = (dof_hdr_t *)(uintptr_t)hprov->dthp_prov.dofhp_dof; 14627 dtrace_dof_destroy(dof); 14628 kmem_free(hprov, sizeof (dtrace_helper_provider_t)); 14629 } else { 14630 mutex_exit(&dtrace_lock); 14631 } 14632} 14633 14634static int 14635dtrace_helper_provider_validate(dof_hdr_t *dof, dof_sec_t *sec) 14636{ 14637 uintptr_t daddr = (uintptr_t)dof; 14638 dof_sec_t *str_sec, *prb_sec, *arg_sec, *off_sec, *enoff_sec; 14639 dof_provider_t *provider; 14640 dof_probe_t *probe; 14641 uint8_t *arg; 14642 char *strtab, *typestr; 14643 dof_stridx_t typeidx; 14644 size_t typesz; 14645 uint_t nprobes, j, k; 14646 14647 ASSERT(sec->dofs_type == DOF_SECT_PROVIDER); 14648 14649 if (sec->dofs_offset & (sizeof (uint_t) - 1)) { 14650 dtrace_dof_error(dof, "misaligned section offset"); 14651 return (-1); 14652 } 14653 14654 /* 14655 * The section needs to be large enough to contain the DOF provider 14656 * structure appropriate for the given version. 14657 */ 14658 if (sec->dofs_size < 14659 ((dof->dofh_ident[DOF_ID_VERSION] == DOF_VERSION_1) ? 14660 offsetof(dof_provider_t, dofpv_prenoffs) : 14661 sizeof (dof_provider_t))) { 14662 dtrace_dof_error(dof, "provider section too small"); 14663 return (-1); 14664 } 14665 14666 provider = (dof_provider_t *)(uintptr_t)(daddr + sec->dofs_offset); 14667 str_sec = dtrace_dof_sect(dof, DOF_SECT_STRTAB, provider->dofpv_strtab); 14668 prb_sec = dtrace_dof_sect(dof, DOF_SECT_PROBES, provider->dofpv_probes); 14669 arg_sec = dtrace_dof_sect(dof, DOF_SECT_PRARGS, provider->dofpv_prargs); 14670 off_sec = dtrace_dof_sect(dof, DOF_SECT_PROFFS, provider->dofpv_proffs); 14671 14672 if (str_sec == NULL || prb_sec == NULL || 14673 arg_sec == NULL || off_sec == NULL) 14674 return (-1); 14675 14676 enoff_sec = NULL; 14677 14678 if (dof->dofh_ident[DOF_ID_VERSION] != DOF_VERSION_1 && 14679 provider->dofpv_prenoffs != DOF_SECT_NONE && 14680 (enoff_sec = dtrace_dof_sect(dof, DOF_SECT_PRENOFFS, 14681 provider->dofpv_prenoffs)) == NULL) 14682 return (-1); 14683 14684 strtab = (char *)(uintptr_t)(daddr + str_sec->dofs_offset); 14685 14686 if (provider->dofpv_name >= str_sec->dofs_size || 14687 strlen(strtab + provider->dofpv_name) >= DTRACE_PROVNAMELEN) { 14688 dtrace_dof_error(dof, "invalid provider name"); 14689 return (-1); 14690 } 14691 14692 if (prb_sec->dofs_entsize == 0 || 14693 prb_sec->dofs_entsize > prb_sec->dofs_size) { 14694 dtrace_dof_error(dof, "invalid entry size"); 14695 return (-1); 14696 } 14697 14698 if (prb_sec->dofs_entsize & (sizeof (uintptr_t) - 1)) { 14699 dtrace_dof_error(dof, "misaligned entry size"); 14700 return (-1); 14701 } 14702 14703 if (off_sec->dofs_entsize != sizeof (uint32_t)) { 14704 dtrace_dof_error(dof, "invalid entry size"); 14705 return (-1); 14706 } 14707 14708 if (off_sec->dofs_offset & (sizeof (uint32_t) - 1)) { 14709 dtrace_dof_error(dof, "misaligned section offset"); 14710 return (-1); 14711 } 14712 14713 if (arg_sec->dofs_entsize != sizeof (uint8_t)) { 14714 dtrace_dof_error(dof, "invalid entry size"); 14715 return (-1); 14716 } 14717 14718 arg = (uint8_t *)(uintptr_t)(daddr + arg_sec->dofs_offset); 14719 14720 nprobes = prb_sec->dofs_size / prb_sec->dofs_entsize; 14721 14722 /* 14723 * Take a pass through the probes to check for errors. 14724 */ 14725 for (j = 0; j < nprobes; j++) { 14726 probe = (dof_probe_t *)(uintptr_t)(daddr + 14727 prb_sec->dofs_offset + j * prb_sec->dofs_entsize); 14728 14729 if (probe->dofpr_func >= str_sec->dofs_size) { 14730 dtrace_dof_error(dof, "invalid function name"); 14731 return (-1); 14732 } 14733 14734 if (strlen(strtab + probe->dofpr_func) >= DTRACE_FUNCNAMELEN) { 14735 dtrace_dof_error(dof, "function name too long"); 14736 return (-1); 14737 } 14738 14739 if (probe->dofpr_name >= str_sec->dofs_size || 14740 strlen(strtab + probe->dofpr_name) >= DTRACE_NAMELEN) { 14741 dtrace_dof_error(dof, "invalid probe name"); 14742 return (-1); 14743 } 14744 14745 /* 14746 * The offset count must not wrap the index, and the offsets 14747 * must also not overflow the section's data. 14748 */ 14749 if (probe->dofpr_offidx + probe->dofpr_noffs < 14750 probe->dofpr_offidx || 14751 (probe->dofpr_offidx + probe->dofpr_noffs) * 14752 off_sec->dofs_entsize > off_sec->dofs_size) { 14753 dtrace_dof_error(dof, "invalid probe offset"); 14754 return (-1); 14755 } 14756 14757 if (dof->dofh_ident[DOF_ID_VERSION] != DOF_VERSION_1) { 14758 /* 14759 * If there's no is-enabled offset section, make sure 14760 * there aren't any is-enabled offsets. Otherwise 14761 * perform the same checks as for probe offsets 14762 * (immediately above). 14763 */ 14764 if (enoff_sec == NULL) { 14765 if (probe->dofpr_enoffidx != 0 || 14766 probe->dofpr_nenoffs != 0) { 14767 dtrace_dof_error(dof, "is-enabled " 14768 "offsets with null section"); 14769 return (-1); 14770 } 14771 } else if (probe->dofpr_enoffidx + 14772 probe->dofpr_nenoffs < probe->dofpr_enoffidx || 14773 (probe->dofpr_enoffidx + probe->dofpr_nenoffs) * 14774 enoff_sec->dofs_entsize > enoff_sec->dofs_size) { 14775 dtrace_dof_error(dof, "invalid is-enabled " 14776 "offset"); 14777 return (-1); 14778 } 14779 14780 if (probe->dofpr_noffs + probe->dofpr_nenoffs == 0) { 14781 dtrace_dof_error(dof, "zero probe and " 14782 "is-enabled offsets"); 14783 return (-1); 14784 } 14785 } else if (probe->dofpr_noffs == 0) { 14786 dtrace_dof_error(dof, "zero probe offsets"); 14787 return (-1); 14788 } 14789 14790 if (probe->dofpr_argidx + probe->dofpr_xargc < 14791 probe->dofpr_argidx || 14792 (probe->dofpr_argidx + probe->dofpr_xargc) * 14793 arg_sec->dofs_entsize > arg_sec->dofs_size) { 14794 dtrace_dof_error(dof, "invalid args"); 14795 return (-1); 14796 } 14797 14798 typeidx = probe->dofpr_nargv; 14799 typestr = strtab + probe->dofpr_nargv; 14800 for (k = 0; k < probe->dofpr_nargc; k++) { 14801 if (typeidx >= str_sec->dofs_size) { 14802 dtrace_dof_error(dof, "bad " 14803 "native argument type"); 14804 return (-1); 14805 } 14806 14807 typesz = strlen(typestr) + 1; 14808 if (typesz > DTRACE_ARGTYPELEN) { 14809 dtrace_dof_error(dof, "native " 14810 "argument type too long"); 14811 return (-1); 14812 } 14813 typeidx += typesz; 14814 typestr += typesz; 14815 } 14816 14817 typeidx = probe->dofpr_xargv; 14818 typestr = strtab + probe->dofpr_xargv; 14819 for (k = 0; k < probe->dofpr_xargc; k++) { 14820 if (arg[probe->dofpr_argidx + k] > probe->dofpr_nargc) { 14821 dtrace_dof_error(dof, "bad " 14822 "native argument index"); 14823 return (-1); 14824 } 14825 14826 if (typeidx >= str_sec->dofs_size) { 14827 dtrace_dof_error(dof, "bad " 14828 "translated argument type"); 14829 return (-1); 14830 } 14831 14832 typesz = strlen(typestr) + 1; 14833 if (typesz > DTRACE_ARGTYPELEN) { 14834 dtrace_dof_error(dof, "translated argument " 14835 "type too long"); 14836 return (-1); 14837 } 14838 14839 typeidx += typesz; 14840 typestr += typesz; 14841 } 14842 } 14843 14844 return (0); 14845} 14846 14847static int 14848dtrace_helper_slurp(dof_hdr_t *dof, dof_helper_t *dhp) 14849{ 14850 dtrace_helpers_t *help; 14851 dtrace_vstate_t *vstate; 14852 dtrace_enabling_t *enab = NULL; 14853 int i, gen, rv, nhelpers = 0, nprovs = 0, destroy = 1; 14854 uintptr_t daddr = (uintptr_t)dof; 14855 14856 ASSERT(MUTEX_HELD(&dtrace_lock)); 14857 14858 if ((help = curproc->p_dtrace_helpers) == NULL) 14859 help = dtrace_helpers_create(curproc); 14860 14861 vstate = &help->dthps_vstate; 14862 14863 if ((rv = dtrace_dof_slurp(dof, vstate, NULL, &enab, 14864 dhp != NULL ? dhp->dofhp_addr : 0, B_FALSE)) != 0) { 14865 dtrace_dof_destroy(dof); 14866 return (rv); 14867 } 14868 14869 /* 14870 * Look for helper providers and validate their descriptions. 14871 */ 14872 if (dhp != NULL) { 14873 for (i = 0; i < dof->dofh_secnum; i++) { 14874 dof_sec_t *sec = (dof_sec_t *)(uintptr_t)(daddr + 14875 dof->dofh_secoff + i * dof->dofh_secsize); 14876 14877 if (sec->dofs_type != DOF_SECT_PROVIDER) 14878 continue; 14879 14880 if (dtrace_helper_provider_validate(dof, sec) != 0) { 14881 dtrace_enabling_destroy(enab); 14882 dtrace_dof_destroy(dof); 14883 return (-1); 14884 } 14885 14886 nprovs++; 14887 } 14888 } 14889 14890 /* 14891 * Now we need to walk through the ECB descriptions in the enabling. 14892 */ 14893 for (i = 0; i < enab->dten_ndesc; i++) { 14894 dtrace_ecbdesc_t *ep = enab->dten_desc[i]; 14895 dtrace_probedesc_t *desc = &ep->dted_probe; 14896 14897 if (strcmp(desc->dtpd_provider, "dtrace") != 0) 14898 continue; 14899 14900 if (strcmp(desc->dtpd_mod, "helper") != 0) 14901 continue; 14902 14903 if (strcmp(desc->dtpd_func, "ustack") != 0) 14904 continue; 14905 14906 if ((rv = dtrace_helper_action_add(DTRACE_HELPER_ACTION_USTACK, 14907 ep)) != 0) { 14908 /* 14909 * Adding this helper action failed -- we are now going 14910 * to rip out the entire generation and return failure. 14911 */ 14912 (void) dtrace_helper_destroygen(help->dthps_generation); 14913 dtrace_enabling_destroy(enab); 14914 dtrace_dof_destroy(dof); 14915 return (-1); 14916 } 14917 14918 nhelpers++; 14919 } 14920 14921 if (nhelpers < enab->dten_ndesc) 14922 dtrace_dof_error(dof, "unmatched helpers"); 14923 14924 gen = help->dthps_generation++; 14925 dtrace_enabling_destroy(enab); 14926 14927 if (dhp != NULL && nprovs > 0) { 14928 dhp->dofhp_dof = (uint64_t)(uintptr_t)dof; 14929 if (dtrace_helper_provider_add(dhp, gen) == 0) { 14930 mutex_exit(&dtrace_lock); 14931 dtrace_helper_provider_register(curproc, help, dhp); 14932 mutex_enter(&dtrace_lock); 14933 14934 destroy = 0; 14935 } 14936 } 14937 14938 if (destroy) 14939 dtrace_dof_destroy(dof); 14940 14941 return (gen); 14942} 14943 14944static dtrace_helpers_t * 14945dtrace_helpers_create(proc_t *p) 14946{ 14947 dtrace_helpers_t *help; 14948 14949 ASSERT(MUTEX_HELD(&dtrace_lock)); 14950 ASSERT(p->p_dtrace_helpers == NULL); 14951 14952 help = kmem_zalloc(sizeof (dtrace_helpers_t), KM_SLEEP); 14953 help->dthps_actions = kmem_zalloc(sizeof (dtrace_helper_action_t *) * 14954 DTRACE_NHELPER_ACTIONS, KM_SLEEP); 14955 14956 p->p_dtrace_helpers = help; 14957 dtrace_helpers++; 14958 14959 return (help); 14960} 14961 14962#if defined(sun) 14963static 14964#endif 14965void 14966dtrace_helpers_destroy(proc_t *p) 14967{ 14968 dtrace_helpers_t *help; 14969 dtrace_vstate_t *vstate; 14970#if defined(sun) 14971 proc_t *p = curproc; 14972#endif 14973 int i; 14974 14975 mutex_enter(&dtrace_lock); 14976 14977 ASSERT(p->p_dtrace_helpers != NULL); 14978 ASSERT(dtrace_helpers > 0); 14979 14980 help = p->p_dtrace_helpers; 14981 vstate = &help->dthps_vstate; 14982 14983 /* 14984 * We're now going to lose the help from this process. 14985 */ 14986 p->p_dtrace_helpers = NULL; 14987 dtrace_sync(); 14988 14989 /* 14990 * Destory the helper actions. 14991 */ 14992 for (i = 0; i < DTRACE_NHELPER_ACTIONS; i++) { 14993 dtrace_helper_action_t *h, *next; 14994 14995 for (h = help->dthps_actions[i]; h != NULL; h = next) { 14996 next = h->dtha_next; 14997 dtrace_helper_action_destroy(h, vstate); 14998 h = next; 14999 } 15000 } 15001 15002 mutex_exit(&dtrace_lock); 15003 15004 /* 15005 * Destroy the helper providers. 15006 */ 15007 if (help->dthps_maxprovs > 0) { 15008 mutex_enter(&dtrace_meta_lock); 15009 if (dtrace_meta_pid != NULL) { 15010 ASSERT(dtrace_deferred_pid == NULL); 15011 15012 for (i = 0; i < help->dthps_nprovs; i++) { 15013 dtrace_helper_provider_remove( 15014 &help->dthps_provs[i]->dthp_prov, p->p_pid); 15015 } 15016 } else { 15017 mutex_enter(&dtrace_lock); 15018 ASSERT(help->dthps_deferred == 0 || 15019 help->dthps_next != NULL || 15020 help->dthps_prev != NULL || 15021 help == dtrace_deferred_pid); 15022 15023 /* 15024 * Remove the helper from the deferred list. 15025 */ 15026 if (help->dthps_next != NULL) 15027 help->dthps_next->dthps_prev = help->dthps_prev; 15028 if (help->dthps_prev != NULL) 15029 help->dthps_prev->dthps_next = help->dthps_next; 15030 if (dtrace_deferred_pid == help) { 15031 dtrace_deferred_pid = help->dthps_next; 15032 ASSERT(help->dthps_prev == NULL); 15033 } 15034 15035 mutex_exit(&dtrace_lock); 15036 } 15037 15038 mutex_exit(&dtrace_meta_lock); 15039 15040 for (i = 0; i < help->dthps_nprovs; i++) { 15041 dtrace_helper_provider_destroy(help->dthps_provs[i]); 15042 } 15043 15044 kmem_free(help->dthps_provs, help->dthps_maxprovs * 15045 sizeof (dtrace_helper_provider_t *)); 15046 } 15047 15048 mutex_enter(&dtrace_lock); 15049 15050 dtrace_vstate_fini(&help->dthps_vstate); 15051 kmem_free(help->dthps_actions, 15052 sizeof (dtrace_helper_action_t *) * DTRACE_NHELPER_ACTIONS); 15053 kmem_free(help, sizeof (dtrace_helpers_t)); 15054 15055 --dtrace_helpers; 15056 mutex_exit(&dtrace_lock); 15057} 15058 15059#if defined(sun) 15060static 15061#endif 15062void 15063dtrace_helpers_duplicate(proc_t *from, proc_t *to) 15064{ 15065 dtrace_helpers_t *help, *newhelp; 15066 dtrace_helper_action_t *helper, *new, *last; 15067 dtrace_difo_t *dp; 15068 dtrace_vstate_t *vstate; 15069 int i, j, sz, hasprovs = 0; 15070 15071 mutex_enter(&dtrace_lock); 15072 ASSERT(from->p_dtrace_helpers != NULL); 15073 ASSERT(dtrace_helpers > 0); 15074 15075 help = from->p_dtrace_helpers; 15076 newhelp = dtrace_helpers_create(to); 15077 ASSERT(to->p_dtrace_helpers != NULL); 15078 15079 newhelp->dthps_generation = help->dthps_generation; 15080 vstate = &newhelp->dthps_vstate; 15081 15082 /* 15083 * Duplicate the helper actions. 15084 */ 15085 for (i = 0; i < DTRACE_NHELPER_ACTIONS; i++) { 15086 if ((helper = help->dthps_actions[i]) == NULL) 15087 continue; 15088 15089 for (last = NULL; helper != NULL; helper = helper->dtha_next) { 15090 new = kmem_zalloc(sizeof (dtrace_helper_action_t), 15091 KM_SLEEP); 15092 new->dtha_generation = helper->dtha_generation; 15093 15094 if ((dp = helper->dtha_predicate) != NULL) { 15095 dp = dtrace_difo_duplicate(dp, vstate); 15096 new->dtha_predicate = dp; 15097 } 15098 15099 new->dtha_nactions = helper->dtha_nactions; 15100 sz = sizeof (dtrace_difo_t *) * new->dtha_nactions; 15101 new->dtha_actions = kmem_alloc(sz, KM_SLEEP); 15102 15103 for (j = 0; j < new->dtha_nactions; j++) { 15104 dtrace_difo_t *dp = helper->dtha_actions[j]; 15105 15106 ASSERT(dp != NULL); 15107 dp = dtrace_difo_duplicate(dp, vstate); 15108 new->dtha_actions[j] = dp; 15109 } 15110 15111 if (last != NULL) { 15112 last->dtha_next = new; 15113 } else { 15114 newhelp->dthps_actions[i] = new; 15115 } 15116 15117 last = new; 15118 } 15119 } 15120 15121 /* 15122 * Duplicate the helper providers and register them with the 15123 * DTrace framework. 15124 */ 15125 if (help->dthps_nprovs > 0) { 15126 newhelp->dthps_nprovs = help->dthps_nprovs; 15127 newhelp->dthps_maxprovs = help->dthps_nprovs; 15128 newhelp->dthps_provs = kmem_alloc(newhelp->dthps_nprovs * 15129 sizeof (dtrace_helper_provider_t *), KM_SLEEP); 15130 for (i = 0; i < newhelp->dthps_nprovs; i++) { 15131 newhelp->dthps_provs[i] = help->dthps_provs[i]; 15132 newhelp->dthps_provs[i]->dthp_ref++; 15133 } 15134 15135 hasprovs = 1; 15136 } 15137 15138 mutex_exit(&dtrace_lock); 15139 15140 if (hasprovs) 15141 dtrace_helper_provider_register(to, newhelp, NULL); 15142} 15143 15144/* 15145 * DTrace Hook Functions 15146 */ 15147static void 15148dtrace_module_loaded(modctl_t *ctl) 15149{ 15150 dtrace_provider_t *prv; 15151 15152 mutex_enter(&dtrace_provider_lock); 15153#if defined(sun) 15154 mutex_enter(&mod_lock); 15155#endif 15156 15157#if defined(sun) 15158 ASSERT(ctl->mod_busy); 15159#endif 15160 15161 /* 15162 * We're going to call each providers per-module provide operation 15163 * specifying only this module. 15164 */ 15165 for (prv = dtrace_provider; prv != NULL; prv = prv->dtpv_next) 15166 prv->dtpv_pops.dtps_provide_module(prv->dtpv_arg, ctl); 15167 15168#if defined(sun) 15169 mutex_exit(&mod_lock); 15170#endif 15171 mutex_exit(&dtrace_provider_lock); 15172 15173 /* 15174 * If we have any retained enablings, we need to match against them. 15175 * Enabling probes requires that cpu_lock be held, and we cannot hold 15176 * cpu_lock here -- it is legal for cpu_lock to be held when loading a 15177 * module. (In particular, this happens when loading scheduling 15178 * classes.) So if we have any retained enablings, we need to dispatch 15179 * our task queue to do the match for us. 15180 */ 15181 mutex_enter(&dtrace_lock); 15182 15183 if (dtrace_retained == NULL) { 15184 mutex_exit(&dtrace_lock); 15185 return; 15186 } 15187 15188 (void) taskq_dispatch(dtrace_taskq, 15189 (task_func_t *)dtrace_enabling_matchall, NULL, TQ_SLEEP); 15190 15191 mutex_exit(&dtrace_lock); 15192 15193 /* 15194 * And now, for a little heuristic sleaze: in general, we want to 15195 * match modules as soon as they load. However, we cannot guarantee 15196 * this, because it would lead us to the lock ordering violation 15197 * outlined above. The common case, of course, is that cpu_lock is 15198 * _not_ held -- so we delay here for a clock tick, hoping that that's 15199 * long enough for the task queue to do its work. If it's not, it's 15200 * not a serious problem -- it just means that the module that we 15201 * just loaded may not be immediately instrumentable. 15202 */ 15203 delay(1); 15204} 15205 15206static void 15207#if defined(sun) 15208dtrace_module_unloaded(modctl_t *ctl) 15209#else 15210dtrace_module_unloaded(modctl_t *ctl, int *error) 15211#endif 15212{ 15213 dtrace_probe_t template, *probe, *first, *next; 15214 dtrace_provider_t *prov; 15215#if !defined(sun) 15216 char modname[DTRACE_MODNAMELEN]; 15217 size_t len; 15218#endif 15219 15220#if defined(sun) 15221 template.dtpr_mod = ctl->mod_modname; 15222#else 15223 /* Handle the fact that ctl->filename may end in ".ko". */ 15224 strlcpy(modname, ctl->filename, sizeof(modname)); 15225 len = strlen(ctl->filename); 15226 if (len > 3 && strcmp(modname + len - 3, ".ko") == 0) 15227 modname[len - 3] = '\0'; 15228 template.dtpr_mod = modname; 15229#endif 15230 15231 mutex_enter(&dtrace_provider_lock); 15232#if defined(sun) 15233 mutex_enter(&mod_lock); 15234#endif 15235 mutex_enter(&dtrace_lock); 15236 15237#if !defined(sun) 15238 if (ctl->nenabled > 0) { 15239 /* Don't allow unloads if a probe is enabled. */ 15240 mutex_exit(&dtrace_provider_lock); 15241 mutex_exit(&dtrace_lock); 15242 *error = -1; 15243 printf( 15244 "kldunload: attempt to unload module that has DTrace probes enabled\n"); 15245 return; 15246 } 15247#endif 15248 15249 if (dtrace_bymod == NULL) { 15250 /* 15251 * The DTrace module is loaded (obviously) but not attached; 15252 * we don't have any work to do. 15253 */ 15254 mutex_exit(&dtrace_provider_lock); 15255#if defined(sun) 15256 mutex_exit(&mod_lock); 15257#endif 15258 mutex_exit(&dtrace_lock); 15259 return; 15260 } 15261 15262 for (probe = first = dtrace_hash_lookup(dtrace_bymod, &template); 15263 probe != NULL; probe = probe->dtpr_nextmod) { 15264 if (probe->dtpr_ecb != NULL) { 15265 mutex_exit(&dtrace_provider_lock); 15266#if defined(sun) 15267 mutex_exit(&mod_lock); 15268#endif 15269 mutex_exit(&dtrace_lock); 15270 15271 /* 15272 * This shouldn't _actually_ be possible -- we're 15273 * unloading a module that has an enabled probe in it. 15274 * (It's normally up to the provider to make sure that 15275 * this can't happen.) However, because dtps_enable() 15276 * doesn't have a failure mode, there can be an 15277 * enable/unload race. Upshot: we don't want to 15278 * assert, but we're not going to disable the 15279 * probe, either. 15280 */ 15281 if (dtrace_err_verbose) { 15282#if defined(sun) 15283 cmn_err(CE_WARN, "unloaded module '%s' had " 15284 "enabled probes", ctl->mod_modname); 15285#else 15286 cmn_err(CE_WARN, "unloaded module '%s' had " 15287 "enabled probes", modname); 15288#endif 15289 } 15290 15291 return; 15292 } 15293 } 15294 15295 probe = first; 15296 15297 for (first = NULL; probe != NULL; probe = next) { 15298 ASSERT(dtrace_probes[probe->dtpr_id - 1] == probe); 15299 15300 dtrace_probes[probe->dtpr_id - 1] = NULL; 15301 15302 next = probe->dtpr_nextmod; 15303 dtrace_hash_remove(dtrace_bymod, probe); 15304 dtrace_hash_remove(dtrace_byfunc, probe); 15305 dtrace_hash_remove(dtrace_byname, probe); 15306 15307 if (first == NULL) { 15308 first = probe; 15309 probe->dtpr_nextmod = NULL; 15310 } else { 15311 probe->dtpr_nextmod = first; 15312 first = probe; 15313 } 15314 } 15315 15316 /* 15317 * We've removed all of the module's probes from the hash chains and 15318 * from the probe array. Now issue a dtrace_sync() to be sure that 15319 * everyone has cleared out from any probe array processing. 15320 */ 15321 dtrace_sync(); 15322 15323 for (probe = first; probe != NULL; probe = first) { 15324 first = probe->dtpr_nextmod; 15325 prov = probe->dtpr_provider; 15326 prov->dtpv_pops.dtps_destroy(prov->dtpv_arg, probe->dtpr_id, 15327 probe->dtpr_arg); 15328 kmem_free(probe->dtpr_mod, strlen(probe->dtpr_mod) + 1); 15329 kmem_free(probe->dtpr_func, strlen(probe->dtpr_func) + 1); 15330 kmem_free(probe->dtpr_name, strlen(probe->dtpr_name) + 1); 15331#if defined(sun) 15332 vmem_free(dtrace_arena, (void *)(uintptr_t)probe->dtpr_id, 1); 15333#else 15334 free_unr(dtrace_arena, probe->dtpr_id); 15335#endif 15336 kmem_free(probe, sizeof (dtrace_probe_t)); 15337 } 15338 15339 mutex_exit(&dtrace_lock); 15340#if defined(sun) 15341 mutex_exit(&mod_lock); 15342#endif 15343 mutex_exit(&dtrace_provider_lock); 15344} 15345 15346#if !defined(sun) 15347static void 15348dtrace_kld_load(void *arg __unused, linker_file_t lf) 15349{ 15350 15351 dtrace_module_loaded(lf); 15352} 15353 15354static void 15355dtrace_kld_unload_try(void *arg __unused, linker_file_t lf, int *error) 15356{ 15357 15358 if (*error != 0) 15359 /* We already have an error, so don't do anything. */ 15360 return; 15361 dtrace_module_unloaded(lf, error); 15362} 15363#endif 15364 15365#if defined(sun) 15366static void 15367dtrace_suspend(void) 15368{ 15369 dtrace_probe_foreach(offsetof(dtrace_pops_t, dtps_suspend)); 15370} 15371 15372static void 15373dtrace_resume(void) 15374{ 15375 dtrace_probe_foreach(offsetof(dtrace_pops_t, dtps_resume)); 15376} 15377#endif 15378 15379static int 15380dtrace_cpu_setup(cpu_setup_t what, processorid_t cpu) 15381{ 15382 ASSERT(MUTEX_HELD(&cpu_lock)); 15383 mutex_enter(&dtrace_lock); 15384 15385 switch (what) { 15386 case CPU_CONFIG: { 15387 dtrace_state_t *state; 15388 dtrace_optval_t *opt, rs, c; 15389 15390 /* 15391 * For now, we only allocate a new buffer for anonymous state. 15392 */ 15393 if ((state = dtrace_anon.dta_state) == NULL) 15394 break; 15395 15396 if (state->dts_activity != DTRACE_ACTIVITY_ACTIVE) 15397 break; 15398 15399 opt = state->dts_options; 15400 c = opt[DTRACEOPT_CPU]; 15401 15402 if (c != DTRACE_CPUALL && c != DTRACEOPT_UNSET && c != cpu) 15403 break; 15404 15405 /* 15406 * Regardless of what the actual policy is, we're going to 15407 * temporarily set our resize policy to be manual. We're 15408 * also going to temporarily set our CPU option to denote 15409 * the newly configured CPU. 15410 */ 15411 rs = opt[DTRACEOPT_BUFRESIZE]; 15412 opt[DTRACEOPT_BUFRESIZE] = DTRACEOPT_BUFRESIZE_MANUAL; 15413 opt[DTRACEOPT_CPU] = (dtrace_optval_t)cpu; 15414 15415 (void) dtrace_state_buffers(state); 15416 15417 opt[DTRACEOPT_BUFRESIZE] = rs; 15418 opt[DTRACEOPT_CPU] = c; 15419 15420 break; 15421 } 15422 15423 case CPU_UNCONFIG: 15424 /* 15425 * We don't free the buffer in the CPU_UNCONFIG case. (The 15426 * buffer will be freed when the consumer exits.) 15427 */ 15428 break; 15429 15430 default: 15431 break; 15432 } 15433 15434 mutex_exit(&dtrace_lock); 15435 return (0); 15436} 15437 15438#if defined(sun) 15439static void 15440dtrace_cpu_setup_initial(processorid_t cpu) 15441{ 15442 (void) dtrace_cpu_setup(CPU_CONFIG, cpu); 15443} 15444#endif 15445 15446static void 15447dtrace_toxrange_add(uintptr_t base, uintptr_t limit) 15448{ 15449 if (dtrace_toxranges >= dtrace_toxranges_max) { 15450 int osize, nsize; 15451 dtrace_toxrange_t *range; 15452 15453 osize = dtrace_toxranges_max * sizeof (dtrace_toxrange_t); 15454 15455 if (osize == 0) { 15456 ASSERT(dtrace_toxrange == NULL); 15457 ASSERT(dtrace_toxranges_max == 0); 15458 dtrace_toxranges_max = 1; 15459 } else { 15460 dtrace_toxranges_max <<= 1; 15461 } 15462 15463 nsize = dtrace_toxranges_max * sizeof (dtrace_toxrange_t); 15464 range = kmem_zalloc(nsize, KM_SLEEP); 15465 15466 if (dtrace_toxrange != NULL) { 15467 ASSERT(osize != 0); 15468 bcopy(dtrace_toxrange, range, osize); 15469 kmem_free(dtrace_toxrange, osize); 15470 } 15471 15472 dtrace_toxrange = range; 15473 } 15474 15475 ASSERT(dtrace_toxrange[dtrace_toxranges].dtt_base == 0); 15476 ASSERT(dtrace_toxrange[dtrace_toxranges].dtt_limit == 0); 15477 15478 dtrace_toxrange[dtrace_toxranges].dtt_base = base; 15479 dtrace_toxrange[dtrace_toxranges].dtt_limit = limit; 15480 dtrace_toxranges++; 15481} 15482 15483/* 15484 * DTrace Driver Cookbook Functions 15485 */ 15486#if defined(sun) 15487/*ARGSUSED*/ 15488static int 15489dtrace_attach(dev_info_t *devi, ddi_attach_cmd_t cmd) 15490{ 15491 dtrace_provider_id_t id; 15492 dtrace_state_t *state = NULL; 15493 dtrace_enabling_t *enab; 15494 15495 mutex_enter(&cpu_lock); 15496 mutex_enter(&dtrace_provider_lock); 15497 mutex_enter(&dtrace_lock); 15498 15499 if (ddi_soft_state_init(&dtrace_softstate, 15500 sizeof (dtrace_state_t), 0) != 0) { 15501 cmn_err(CE_NOTE, "/dev/dtrace failed to initialize soft state"); 15502 mutex_exit(&cpu_lock); 15503 mutex_exit(&dtrace_provider_lock); 15504 mutex_exit(&dtrace_lock); 15505 return (DDI_FAILURE); 15506 } 15507 15508 if (ddi_create_minor_node(devi, DTRACEMNR_DTRACE, S_IFCHR, 15509 DTRACEMNRN_DTRACE, DDI_PSEUDO, NULL) == DDI_FAILURE || 15510 ddi_create_minor_node(devi, DTRACEMNR_HELPER, S_IFCHR, 15511 DTRACEMNRN_HELPER, DDI_PSEUDO, NULL) == DDI_FAILURE) { 15512 cmn_err(CE_NOTE, "/dev/dtrace couldn't create minor nodes"); 15513 ddi_remove_minor_node(devi, NULL); 15514 ddi_soft_state_fini(&dtrace_softstate); 15515 mutex_exit(&cpu_lock); 15516 mutex_exit(&dtrace_provider_lock); 15517 mutex_exit(&dtrace_lock); 15518 return (DDI_FAILURE); 15519 } 15520 15521 ddi_report_dev(devi); 15522 dtrace_devi = devi; 15523 15524 dtrace_modload = dtrace_module_loaded; 15525 dtrace_modunload = dtrace_module_unloaded; 15526 dtrace_cpu_init = dtrace_cpu_setup_initial; 15527 dtrace_helpers_cleanup = dtrace_helpers_destroy; 15528 dtrace_helpers_fork = dtrace_helpers_duplicate; 15529 dtrace_cpustart_init = dtrace_suspend; 15530 dtrace_cpustart_fini = dtrace_resume; 15531 dtrace_debugger_init = dtrace_suspend; 15532 dtrace_debugger_fini = dtrace_resume; 15533 15534 register_cpu_setup_func((cpu_setup_func_t *)dtrace_cpu_setup, NULL); 15535 15536 ASSERT(MUTEX_HELD(&cpu_lock)); 15537 15538 dtrace_arena = vmem_create("dtrace", (void *)1, UINT32_MAX, 1, 15539 NULL, NULL, NULL, 0, VM_SLEEP | VMC_IDENTIFIER); 15540 dtrace_minor = vmem_create("dtrace_minor", (void *)DTRACEMNRN_CLONE, 15541 UINT32_MAX - DTRACEMNRN_CLONE, 1, NULL, NULL, NULL, 0, 15542 VM_SLEEP | VMC_IDENTIFIER); 15543 dtrace_taskq = taskq_create("dtrace_taskq", 1, maxclsyspri, 15544 1, INT_MAX, 0); 15545 15546 dtrace_state_cache = kmem_cache_create("dtrace_state_cache", 15547 sizeof (dtrace_dstate_percpu_t) * NCPU, DTRACE_STATE_ALIGN, 15548 NULL, NULL, NULL, NULL, NULL, 0); 15549 15550 ASSERT(MUTEX_HELD(&cpu_lock)); 15551 dtrace_bymod = dtrace_hash_create(offsetof(dtrace_probe_t, dtpr_mod), 15552 offsetof(dtrace_probe_t, dtpr_nextmod), 15553 offsetof(dtrace_probe_t, dtpr_prevmod)); 15554 15555 dtrace_byfunc = dtrace_hash_create(offsetof(dtrace_probe_t, dtpr_func), 15556 offsetof(dtrace_probe_t, dtpr_nextfunc), 15557 offsetof(dtrace_probe_t, dtpr_prevfunc)); 15558 15559 dtrace_byname = dtrace_hash_create(offsetof(dtrace_probe_t, dtpr_name), 15560 offsetof(dtrace_probe_t, dtpr_nextname), 15561 offsetof(dtrace_probe_t, dtpr_prevname)); 15562 15563 if (dtrace_retain_max < 1) { 15564 cmn_err(CE_WARN, "illegal value (%lu) for dtrace_retain_max; " 15565 "setting to 1", dtrace_retain_max); 15566 dtrace_retain_max = 1; 15567 } 15568 15569 /* 15570 * Now discover our toxic ranges. 15571 */ 15572 dtrace_toxic_ranges(dtrace_toxrange_add); 15573 15574 /* 15575 * Before we register ourselves as a provider to our own framework, 15576 * we would like to assert that dtrace_provider is NULL -- but that's 15577 * not true if we were loaded as a dependency of a DTrace provider. 15578 * Once we've registered, we can assert that dtrace_provider is our 15579 * pseudo provider. 15580 */ 15581 (void) dtrace_register("dtrace", &dtrace_provider_attr, 15582 DTRACE_PRIV_NONE, 0, &dtrace_provider_ops, NULL, &id); 15583 15584 ASSERT(dtrace_provider != NULL); 15585 ASSERT((dtrace_provider_id_t)dtrace_provider == id); 15586 15587 dtrace_probeid_begin = dtrace_probe_create((dtrace_provider_id_t) 15588 dtrace_provider, NULL, NULL, "BEGIN", 0, NULL); 15589 dtrace_probeid_end = dtrace_probe_create((dtrace_provider_id_t) 15590 dtrace_provider, NULL, NULL, "END", 0, NULL); 15591 dtrace_probeid_error = dtrace_probe_create((dtrace_provider_id_t) 15592 dtrace_provider, NULL, NULL, "ERROR", 1, NULL); 15593 15594 dtrace_anon_property(); 15595 mutex_exit(&cpu_lock); 15596 15597 /* 15598 * If DTrace helper tracing is enabled, we need to allocate the 15599 * trace buffer and initialize the values. 15600 */ 15601 if (dtrace_helptrace_enabled) { 15602 ASSERT(dtrace_helptrace_buffer == NULL); 15603 dtrace_helptrace_buffer = 15604 kmem_zalloc(dtrace_helptrace_bufsize, KM_SLEEP); 15605 dtrace_helptrace_next = 0; 15606 } 15607 15608 /* 15609 * If there are already providers, we must ask them to provide their 15610 * probes, and then match any anonymous enabling against them. Note 15611 * that there should be no other retained enablings at this time: 15612 * the only retained enablings at this time should be the anonymous 15613 * enabling. 15614 */ 15615 if (dtrace_anon.dta_enabling != NULL) { 15616 ASSERT(dtrace_retained == dtrace_anon.dta_enabling); 15617 15618 dtrace_enabling_provide(NULL); 15619 state = dtrace_anon.dta_state; 15620 15621 /* 15622 * We couldn't hold cpu_lock across the above call to 15623 * dtrace_enabling_provide(), but we must hold it to actually 15624 * enable the probes. We have to drop all of our locks, pick 15625 * up cpu_lock, and regain our locks before matching the 15626 * retained anonymous enabling. 15627 */ 15628 mutex_exit(&dtrace_lock); 15629 mutex_exit(&dtrace_provider_lock); 15630 15631 mutex_enter(&cpu_lock); 15632 mutex_enter(&dtrace_provider_lock); 15633 mutex_enter(&dtrace_lock); 15634 15635 if ((enab = dtrace_anon.dta_enabling) != NULL) 15636 (void) dtrace_enabling_match(enab, NULL); 15637 15638 mutex_exit(&cpu_lock); 15639 } 15640 15641 mutex_exit(&dtrace_lock); 15642 mutex_exit(&dtrace_provider_lock); 15643 15644 if (state != NULL) { 15645 /* 15646 * If we created any anonymous state, set it going now. 15647 */ 15648 (void) dtrace_state_go(state, &dtrace_anon.dta_beganon); 15649 } 15650 15651 return (DDI_SUCCESS); 15652} 15653#endif 15654 15655#if !defined(sun) 15656#if __FreeBSD_version >= 800039 15657static void dtrace_dtr(void *); 15658#endif 15659#endif 15660 15661/*ARGSUSED*/ 15662static int 15663#if defined(sun) 15664dtrace_open(dev_t *devp, int flag, int otyp, cred_t *cred_p) 15665#else 15666dtrace_open(struct cdev *dev, int oflags, int devtype, struct thread *td) 15667#endif 15668{ 15669 dtrace_state_t *state; 15670 uint32_t priv; 15671 uid_t uid; 15672 zoneid_t zoneid; 15673 15674#if defined(sun) 15675 if (getminor(*devp) == DTRACEMNRN_HELPER) 15676 return (0); 15677 15678 /* 15679 * If this wasn't an open with the "helper" minor, then it must be 15680 * the "dtrace" minor. 15681 */ 15682 ASSERT(getminor(*devp) == DTRACEMNRN_DTRACE); 15683#else 15684 cred_t *cred_p = NULL; 15685 15686#if __FreeBSD_version < 800039 15687 /* 15688 * The first minor device is the one that is cloned so there is 15689 * nothing more to do here. 15690 */ 15691 if (dev2unit(dev) == 0) 15692 return 0; 15693 15694 /* 15695 * Devices are cloned, so if the DTrace state has already 15696 * been allocated, that means this device belongs to a 15697 * different client. Each client should open '/dev/dtrace' 15698 * to get a cloned device. 15699 */ 15700 if (dev->si_drv1 != NULL) 15701 return (EBUSY); 15702#endif 15703 15704 cred_p = dev->si_cred; 15705#endif 15706 15707 /* 15708 * If no DTRACE_PRIV_* bits are set in the credential, then the 15709 * caller lacks sufficient permission to do anything with DTrace. 15710 */ 15711 dtrace_cred2priv(cred_p, &priv, &uid, &zoneid); 15712 if (priv == DTRACE_PRIV_NONE) { 15713#if !defined(sun) 15714#if __FreeBSD_version < 800039 15715 /* Destroy the cloned device. */ 15716 destroy_dev(dev); 15717#endif 15718#endif 15719 15720 return (EACCES); 15721 } 15722 15723 /* 15724 * Ask all providers to provide all their probes. 15725 */ 15726 mutex_enter(&dtrace_provider_lock); 15727 dtrace_probe_provide(NULL, NULL); 15728 mutex_exit(&dtrace_provider_lock); 15729 15730 mutex_enter(&cpu_lock); 15731 mutex_enter(&dtrace_lock); 15732 dtrace_opens++; 15733 dtrace_membar_producer(); 15734 15735#if defined(sun) 15736 /* 15737 * If the kernel debugger is active (that is, if the kernel debugger 15738 * modified text in some way), we won't allow the open. 15739 */ 15740 if (kdi_dtrace_set(KDI_DTSET_DTRACE_ACTIVATE) != 0) { 15741 dtrace_opens--; 15742 mutex_exit(&cpu_lock); 15743 mutex_exit(&dtrace_lock); 15744 return (EBUSY); 15745 } 15746 15747 state = dtrace_state_create(devp, cred_p); 15748#else 15749 state = dtrace_state_create(dev); 15750#if __FreeBSD_version < 800039 15751 dev->si_drv1 = state; 15752#else 15753 devfs_set_cdevpriv(state, dtrace_dtr); 15754#endif 15755#endif 15756 15757 mutex_exit(&cpu_lock); 15758 15759 if (state == NULL) { 15760#if defined(sun) 15761 if (--dtrace_opens == 0) 15762 (void) kdi_dtrace_set(KDI_DTSET_DTRACE_DEACTIVATE); 15763#else 15764 --dtrace_opens; 15765#endif 15766 mutex_exit(&dtrace_lock); 15767#if !defined(sun) 15768#if __FreeBSD_version < 800039 15769 /* Destroy the cloned device. */ 15770 destroy_dev(dev); 15771#endif 15772#endif 15773 return (EAGAIN); 15774 } 15775 15776 mutex_exit(&dtrace_lock); 15777 15778 return (0); 15779} 15780 15781/*ARGSUSED*/ 15782#if defined(sun) 15783static int 15784dtrace_close(dev_t dev, int flag, int otyp, cred_t *cred_p) 15785#elif __FreeBSD_version < 800039 15786static int 15787dtrace_close(struct cdev *dev, int flags, int fmt __unused, struct thread *td) 15788#else 15789static void 15790dtrace_dtr(void *data) 15791#endif 15792{ 15793#if defined(sun) 15794 minor_t minor = getminor(dev); 15795 dtrace_state_t *state; 15796 15797 if (minor == DTRACEMNRN_HELPER) 15798 return (0); 15799 15800 state = ddi_get_soft_state(dtrace_softstate, minor); 15801#else 15802#if __FreeBSD_version < 800039 15803 dtrace_state_t *state = dev->si_drv1; 15804 15805 /* Check if this is not a cloned device. */ 15806 if (dev2unit(dev) == 0) 15807 return (0); 15808#else 15809 dtrace_state_t *state = data; 15810#endif 15811 15812#endif 15813 15814 mutex_enter(&cpu_lock); 15815 mutex_enter(&dtrace_lock); 15816 15817 if (state != NULL) { 15818 if (state->dts_anon) { 15819 /* 15820 * There is anonymous state. Destroy that first. 15821 */ 15822 ASSERT(dtrace_anon.dta_state == NULL); 15823 dtrace_state_destroy(state->dts_anon); 15824 } 15825 15826 dtrace_state_destroy(state); 15827 15828#if !defined(sun) 15829 kmem_free(state, 0); 15830#if __FreeBSD_version < 800039 15831 dev->si_drv1 = NULL; 15832#endif 15833#endif 15834 } 15835 15836 ASSERT(dtrace_opens > 0); 15837#if defined(sun) 15838 if (--dtrace_opens == 0) 15839 (void) kdi_dtrace_set(KDI_DTSET_DTRACE_DEACTIVATE); 15840#else 15841 --dtrace_opens; 15842#endif 15843 15844 mutex_exit(&dtrace_lock); 15845 mutex_exit(&cpu_lock); 15846 15847#if __FreeBSD_version < 800039 15848 /* Schedule this cloned device to be destroyed. */ 15849 destroy_dev_sched(dev); 15850#endif 15851 15852#if defined(sun) || __FreeBSD_version < 800039 15853 return (0); 15854#endif 15855} 15856 15857#if defined(sun) 15858/*ARGSUSED*/ 15859static int 15860dtrace_ioctl_helper(int cmd, intptr_t arg, int *rv) 15861{ 15862 int rval; 15863 dof_helper_t help, *dhp = NULL; 15864 15865 switch (cmd) { 15866 case DTRACEHIOC_ADDDOF: 15867 if (copyin((void *)arg, &help, sizeof (help)) != 0) { 15868 dtrace_dof_error(NULL, "failed to copyin DOF helper"); 15869 return (EFAULT); 15870 } 15871 15872 dhp = &help; 15873 arg = (intptr_t)help.dofhp_dof; 15874 /*FALLTHROUGH*/ 15875 15876 case DTRACEHIOC_ADD: { 15877 dof_hdr_t *dof = dtrace_dof_copyin(arg, &rval); 15878 15879 if (dof == NULL) 15880 return (rval); 15881 15882 mutex_enter(&dtrace_lock); 15883 15884 /* 15885 * dtrace_helper_slurp() takes responsibility for the dof -- 15886 * it may free it now or it may save it and free it later. 15887 */ 15888 if ((rval = dtrace_helper_slurp(dof, dhp)) != -1) { 15889 *rv = rval; 15890 rval = 0; 15891 } else { 15892 rval = EINVAL; 15893 } 15894 15895 mutex_exit(&dtrace_lock); 15896 return (rval); 15897 } 15898 15899 case DTRACEHIOC_REMOVE: { 15900 mutex_enter(&dtrace_lock); 15901 rval = dtrace_helper_destroygen(arg); 15902 mutex_exit(&dtrace_lock); 15903 15904 return (rval); 15905 } 15906 15907 default: 15908 break; 15909 } 15910 15911 return (ENOTTY); 15912} 15913 15914/*ARGSUSED*/ 15915static int 15916dtrace_ioctl(dev_t dev, int cmd, intptr_t arg, int md, cred_t *cr, int *rv) 15917{ 15918 minor_t minor = getminor(dev); 15919 dtrace_state_t *state; 15920 int rval; 15921 15922 if (minor == DTRACEMNRN_HELPER) 15923 return (dtrace_ioctl_helper(cmd, arg, rv)); 15924 15925 state = ddi_get_soft_state(dtrace_softstate, minor); 15926 15927 if (state->dts_anon) { 15928 ASSERT(dtrace_anon.dta_state == NULL); 15929 state = state->dts_anon; 15930 } 15931 15932 switch (cmd) { 15933 case DTRACEIOC_PROVIDER: { 15934 dtrace_providerdesc_t pvd; 15935 dtrace_provider_t *pvp; 15936 15937 if (copyin((void *)arg, &pvd, sizeof (pvd)) != 0) 15938 return (EFAULT); 15939 15940 pvd.dtvd_name[DTRACE_PROVNAMELEN - 1] = '\0'; 15941 mutex_enter(&dtrace_provider_lock); 15942 15943 for (pvp = dtrace_provider; pvp != NULL; pvp = pvp->dtpv_next) { 15944 if (strcmp(pvp->dtpv_name, pvd.dtvd_name) == 0) 15945 break; 15946 } 15947 15948 mutex_exit(&dtrace_provider_lock); 15949 15950 if (pvp == NULL) 15951 return (ESRCH); 15952 15953 bcopy(&pvp->dtpv_priv, &pvd.dtvd_priv, sizeof (dtrace_ppriv_t)); 15954 bcopy(&pvp->dtpv_attr, &pvd.dtvd_attr, sizeof (dtrace_pattr_t)); 15955 15956 if (copyout(&pvd, (void *)arg, sizeof (pvd)) != 0) 15957 return (EFAULT); 15958 15959 return (0); 15960 } 15961 15962 case DTRACEIOC_EPROBE: { 15963 dtrace_eprobedesc_t epdesc; 15964 dtrace_ecb_t *ecb; 15965 dtrace_action_t *act; 15966 void *buf; 15967 size_t size; 15968 uintptr_t dest; 15969 int nrecs; 15970 15971 if (copyin((void *)arg, &epdesc, sizeof (epdesc)) != 0) 15972 return (EFAULT); 15973 15974 mutex_enter(&dtrace_lock); 15975 15976 if ((ecb = dtrace_epid2ecb(state, epdesc.dtepd_epid)) == NULL) { 15977 mutex_exit(&dtrace_lock); 15978 return (EINVAL); 15979 } 15980 15981 if (ecb->dte_probe == NULL) { 15982 mutex_exit(&dtrace_lock); 15983 return (EINVAL); 15984 } 15985 15986 epdesc.dtepd_probeid = ecb->dte_probe->dtpr_id; 15987 epdesc.dtepd_uarg = ecb->dte_uarg; 15988 epdesc.dtepd_size = ecb->dte_size; 15989 15990 nrecs = epdesc.dtepd_nrecs; 15991 epdesc.dtepd_nrecs = 0; 15992 for (act = ecb->dte_action; act != NULL; act = act->dta_next) { 15993 if (DTRACEACT_ISAGG(act->dta_kind) || act->dta_intuple) 15994 continue; 15995 15996 epdesc.dtepd_nrecs++; 15997 } 15998 15999 /* 16000 * Now that we have the size, we need to allocate a temporary 16001 * buffer in which to store the complete description. We need 16002 * the temporary buffer to be able to drop dtrace_lock() 16003 * across the copyout(), below. 16004 */ 16005 size = sizeof (dtrace_eprobedesc_t) + 16006 (epdesc.dtepd_nrecs * sizeof (dtrace_recdesc_t)); 16007 16008 buf = kmem_alloc(size, KM_SLEEP); 16009 dest = (uintptr_t)buf; 16010 16011 bcopy(&epdesc, (void *)dest, sizeof (epdesc)); 16012 dest += offsetof(dtrace_eprobedesc_t, dtepd_rec[0]); 16013 16014 for (act = ecb->dte_action; act != NULL; act = act->dta_next) { 16015 if (DTRACEACT_ISAGG(act->dta_kind) || act->dta_intuple) 16016 continue; 16017 16018 if (nrecs-- == 0) 16019 break; 16020 16021 bcopy(&act->dta_rec, (void *)dest, 16022 sizeof (dtrace_recdesc_t)); 16023 dest += sizeof (dtrace_recdesc_t); 16024 } 16025 16026 mutex_exit(&dtrace_lock); 16027 16028 if (copyout(buf, (void *)arg, dest - (uintptr_t)buf) != 0) { 16029 kmem_free(buf, size); 16030 return (EFAULT); 16031 } 16032 16033 kmem_free(buf, size); 16034 return (0); 16035 } 16036 16037 case DTRACEIOC_AGGDESC: { 16038 dtrace_aggdesc_t aggdesc; 16039 dtrace_action_t *act; 16040 dtrace_aggregation_t *agg; 16041 int nrecs; 16042 uint32_t offs; 16043 dtrace_recdesc_t *lrec; 16044 void *buf; 16045 size_t size; 16046 uintptr_t dest; 16047 16048 if (copyin((void *)arg, &aggdesc, sizeof (aggdesc)) != 0) 16049 return (EFAULT); 16050 16051 mutex_enter(&dtrace_lock); 16052 16053 if ((agg = dtrace_aggid2agg(state, aggdesc.dtagd_id)) == NULL) { 16054 mutex_exit(&dtrace_lock); 16055 return (EINVAL); 16056 } 16057 16058 aggdesc.dtagd_epid = agg->dtag_ecb->dte_epid; 16059 16060 nrecs = aggdesc.dtagd_nrecs; 16061 aggdesc.dtagd_nrecs = 0; 16062 16063 offs = agg->dtag_base; 16064 lrec = &agg->dtag_action.dta_rec; 16065 aggdesc.dtagd_size = lrec->dtrd_offset + lrec->dtrd_size - offs; 16066 16067 for (act = agg->dtag_first; ; act = act->dta_next) { 16068 ASSERT(act->dta_intuple || 16069 DTRACEACT_ISAGG(act->dta_kind)); 16070 16071 /* 16072 * If this action has a record size of zero, it 16073 * denotes an argument to the aggregating action. 16074 * Because the presence of this record doesn't (or 16075 * shouldn't) affect the way the data is interpreted, 16076 * we don't copy it out to save user-level the 16077 * confusion of dealing with a zero-length record. 16078 */ 16079 if (act->dta_rec.dtrd_size == 0) { 16080 ASSERT(agg->dtag_hasarg); 16081 continue; 16082 } 16083 16084 aggdesc.dtagd_nrecs++; 16085 16086 if (act == &agg->dtag_action) 16087 break; 16088 } 16089 16090 /* 16091 * Now that we have the size, we need to allocate a temporary 16092 * buffer in which to store the complete description. We need 16093 * the temporary buffer to be able to drop dtrace_lock() 16094 * across the copyout(), below. 16095 */ 16096 size = sizeof (dtrace_aggdesc_t) + 16097 (aggdesc.dtagd_nrecs * sizeof (dtrace_recdesc_t)); 16098 16099 buf = kmem_alloc(size, KM_SLEEP); 16100 dest = (uintptr_t)buf; 16101 16102 bcopy(&aggdesc, (void *)dest, sizeof (aggdesc)); 16103 dest += offsetof(dtrace_aggdesc_t, dtagd_rec[0]); 16104 16105 for (act = agg->dtag_first; ; act = act->dta_next) { 16106 dtrace_recdesc_t rec = act->dta_rec; 16107 16108 /* 16109 * See the comment in the above loop for why we pass 16110 * over zero-length records. 16111 */ 16112 if (rec.dtrd_size == 0) { 16113 ASSERT(agg->dtag_hasarg); 16114 continue; 16115 } 16116 16117 if (nrecs-- == 0) 16118 break; 16119 16120 rec.dtrd_offset -= offs; 16121 bcopy(&rec, (void *)dest, sizeof (rec)); 16122 dest += sizeof (dtrace_recdesc_t); 16123 16124 if (act == &agg->dtag_action) 16125 break; 16126 } 16127 16128 mutex_exit(&dtrace_lock); 16129 16130 if (copyout(buf, (void *)arg, dest - (uintptr_t)buf) != 0) { 16131 kmem_free(buf, size); 16132 return (EFAULT); 16133 } 16134 16135 kmem_free(buf, size); 16136 return (0); 16137 } 16138 16139 case DTRACEIOC_ENABLE: { 16140 dof_hdr_t *dof; 16141 dtrace_enabling_t *enab = NULL; 16142 dtrace_vstate_t *vstate; 16143 int err = 0; 16144 16145 *rv = 0; 16146 16147 /* 16148 * If a NULL argument has been passed, we take this as our 16149 * cue to reevaluate our enablings. 16150 */ 16151 if (arg == NULL) { 16152 dtrace_enabling_matchall(); 16153 16154 return (0); 16155 } 16156 16157 if ((dof = dtrace_dof_copyin(arg, &rval)) == NULL) 16158 return (rval); 16159 16160 mutex_enter(&cpu_lock); 16161 mutex_enter(&dtrace_lock); 16162 vstate = &state->dts_vstate; 16163 16164 if (state->dts_activity != DTRACE_ACTIVITY_INACTIVE) { 16165 mutex_exit(&dtrace_lock); 16166 mutex_exit(&cpu_lock); 16167 dtrace_dof_destroy(dof); 16168 return (EBUSY); 16169 } 16170 16171 if (dtrace_dof_slurp(dof, vstate, cr, &enab, 0, B_TRUE) != 0) { 16172 mutex_exit(&dtrace_lock); 16173 mutex_exit(&cpu_lock); 16174 dtrace_dof_destroy(dof); 16175 return (EINVAL); 16176 } 16177 16178 if ((rval = dtrace_dof_options(dof, state)) != 0) { 16179 dtrace_enabling_destroy(enab); 16180 mutex_exit(&dtrace_lock); 16181 mutex_exit(&cpu_lock); 16182 dtrace_dof_destroy(dof); 16183 return (rval); 16184 } 16185 16186 if ((err = dtrace_enabling_match(enab, rv)) == 0) { 16187 err = dtrace_enabling_retain(enab); 16188 } else { 16189 dtrace_enabling_destroy(enab); 16190 } 16191 16192 mutex_exit(&cpu_lock); 16193 mutex_exit(&dtrace_lock); 16194 dtrace_dof_destroy(dof); 16195 16196 return (err); 16197 } 16198 16199 case DTRACEIOC_REPLICATE: { 16200 dtrace_repldesc_t desc; 16201 dtrace_probedesc_t *match = &desc.dtrpd_match; 16202 dtrace_probedesc_t *create = &desc.dtrpd_create; 16203 int err; 16204 16205 if (copyin((void *)arg, &desc, sizeof (desc)) != 0) 16206 return (EFAULT); 16207 16208 match->dtpd_provider[DTRACE_PROVNAMELEN - 1] = '\0'; 16209 match->dtpd_mod[DTRACE_MODNAMELEN - 1] = '\0'; 16210 match->dtpd_func[DTRACE_FUNCNAMELEN - 1] = '\0'; 16211 match->dtpd_name[DTRACE_NAMELEN - 1] = '\0'; 16212 16213 create->dtpd_provider[DTRACE_PROVNAMELEN - 1] = '\0'; 16214 create->dtpd_mod[DTRACE_MODNAMELEN - 1] = '\0'; 16215 create->dtpd_func[DTRACE_FUNCNAMELEN - 1] = '\0'; 16216 create->dtpd_name[DTRACE_NAMELEN - 1] = '\0'; 16217 16218 mutex_enter(&dtrace_lock); 16219 err = dtrace_enabling_replicate(state, match, create); 16220 mutex_exit(&dtrace_lock); 16221 16222 return (err); 16223 } 16224 16225 case DTRACEIOC_PROBEMATCH: 16226 case DTRACEIOC_PROBES: { 16227 dtrace_probe_t *probe = NULL; 16228 dtrace_probedesc_t desc; 16229 dtrace_probekey_t pkey; 16230 dtrace_id_t i; 16231 int m = 0; 16232 uint32_t priv; 16233 uid_t uid; 16234 zoneid_t zoneid; 16235 16236 if (copyin((void *)arg, &desc, sizeof (desc)) != 0) 16237 return (EFAULT); 16238 16239 desc.dtpd_provider[DTRACE_PROVNAMELEN - 1] = '\0'; 16240 desc.dtpd_mod[DTRACE_MODNAMELEN - 1] = '\0'; 16241 desc.dtpd_func[DTRACE_FUNCNAMELEN - 1] = '\0'; 16242 desc.dtpd_name[DTRACE_NAMELEN - 1] = '\0'; 16243 16244 /* 16245 * Before we attempt to match this probe, we want to give 16246 * all providers the opportunity to provide it. 16247 */ 16248 if (desc.dtpd_id == DTRACE_IDNONE) { 16249 mutex_enter(&dtrace_provider_lock); 16250 dtrace_probe_provide(&desc, NULL); 16251 mutex_exit(&dtrace_provider_lock); 16252 desc.dtpd_id++; 16253 } 16254 16255 if (cmd == DTRACEIOC_PROBEMATCH) { 16256 dtrace_probekey(&desc, &pkey); 16257 pkey.dtpk_id = DTRACE_IDNONE; 16258 } 16259 16260 dtrace_cred2priv(cr, &priv, &uid, &zoneid); 16261 16262 mutex_enter(&dtrace_lock); 16263 16264 if (cmd == DTRACEIOC_PROBEMATCH) { 16265 for (i = desc.dtpd_id; i <= dtrace_nprobes; i++) { 16266 if ((probe = dtrace_probes[i - 1]) != NULL && 16267 (m = dtrace_match_probe(probe, &pkey, 16268 priv, uid, zoneid)) != 0) 16269 break; 16270 } 16271 16272 if (m < 0) { 16273 mutex_exit(&dtrace_lock); 16274 return (EINVAL); 16275 } 16276 16277 } else { 16278 for (i = desc.dtpd_id; i <= dtrace_nprobes; i++) { 16279 if ((probe = dtrace_probes[i - 1]) != NULL && 16280 dtrace_match_priv(probe, priv, uid, zoneid)) 16281 break; 16282 } 16283 } 16284 16285 if (probe == NULL) { 16286 mutex_exit(&dtrace_lock); 16287 return (ESRCH); 16288 } 16289 16290 dtrace_probe_description(probe, &desc); 16291 mutex_exit(&dtrace_lock); 16292 16293 if (copyout(&desc, (void *)arg, sizeof (desc)) != 0) 16294 return (EFAULT); 16295 16296 return (0); 16297 } 16298 16299 case DTRACEIOC_PROBEARG: { 16300 dtrace_argdesc_t desc; 16301 dtrace_probe_t *probe; 16302 dtrace_provider_t *prov; 16303 16304 if (copyin((void *)arg, &desc, sizeof (desc)) != 0) 16305 return (EFAULT); 16306 16307 if (desc.dtargd_id == DTRACE_IDNONE) 16308 return (EINVAL); 16309 16310 if (desc.dtargd_ndx == DTRACE_ARGNONE) 16311 return (EINVAL); 16312 16313 mutex_enter(&dtrace_provider_lock); 16314 mutex_enter(&mod_lock); 16315 mutex_enter(&dtrace_lock); 16316 16317 if (desc.dtargd_id > dtrace_nprobes) { 16318 mutex_exit(&dtrace_lock); 16319 mutex_exit(&mod_lock); 16320 mutex_exit(&dtrace_provider_lock); 16321 return (EINVAL); 16322 } 16323 16324 if ((probe = dtrace_probes[desc.dtargd_id - 1]) == NULL) { 16325 mutex_exit(&dtrace_lock); 16326 mutex_exit(&mod_lock); 16327 mutex_exit(&dtrace_provider_lock); 16328 return (EINVAL); 16329 } 16330 16331 mutex_exit(&dtrace_lock); 16332 16333 prov = probe->dtpr_provider; 16334 16335 if (prov->dtpv_pops.dtps_getargdesc == NULL) { 16336 /* 16337 * There isn't any typed information for this probe. 16338 * Set the argument number to DTRACE_ARGNONE. 16339 */ 16340 desc.dtargd_ndx = DTRACE_ARGNONE; 16341 } else { 16342 desc.dtargd_native[0] = '\0'; 16343 desc.dtargd_xlate[0] = '\0'; 16344 desc.dtargd_mapping = desc.dtargd_ndx; 16345 16346 prov->dtpv_pops.dtps_getargdesc(prov->dtpv_arg, 16347 probe->dtpr_id, probe->dtpr_arg, &desc); 16348 } 16349 16350 mutex_exit(&mod_lock); 16351 mutex_exit(&dtrace_provider_lock); 16352 16353 if (copyout(&desc, (void *)arg, sizeof (desc)) != 0) 16354 return (EFAULT); 16355 16356 return (0); 16357 } 16358 16359 case DTRACEIOC_GO: { 16360 processorid_t cpuid; 16361 rval = dtrace_state_go(state, &cpuid); 16362 16363 if (rval != 0) 16364 return (rval); 16365 16366 if (copyout(&cpuid, (void *)arg, sizeof (cpuid)) != 0) 16367 return (EFAULT); 16368 16369 return (0); 16370 } 16371 16372 case DTRACEIOC_STOP: { 16373 processorid_t cpuid; 16374 16375 mutex_enter(&dtrace_lock); 16376 rval = dtrace_state_stop(state, &cpuid); 16377 mutex_exit(&dtrace_lock); 16378 16379 if (rval != 0) 16380 return (rval); 16381 16382 if (copyout(&cpuid, (void *)arg, sizeof (cpuid)) != 0) 16383 return (EFAULT); 16384 16385 return (0); 16386 } 16387 16388 case DTRACEIOC_DOFGET: { 16389 dof_hdr_t hdr, *dof; 16390 uint64_t len; 16391 16392 if (copyin((void *)arg, &hdr, sizeof (hdr)) != 0) 16393 return (EFAULT); 16394 16395 mutex_enter(&dtrace_lock); 16396 dof = dtrace_dof_create(state); 16397 mutex_exit(&dtrace_lock); 16398 16399 len = MIN(hdr.dofh_loadsz, dof->dofh_loadsz); 16400 rval = copyout(dof, (void *)arg, len); 16401 dtrace_dof_destroy(dof); 16402 16403 return (rval == 0 ? 0 : EFAULT); 16404 } 16405 16406 case DTRACEIOC_AGGSNAP: 16407 case DTRACEIOC_BUFSNAP: { 16408 dtrace_bufdesc_t desc; 16409 caddr_t cached; 16410 dtrace_buffer_t *buf; 16411 16412 if (copyin((void *)arg, &desc, sizeof (desc)) != 0) 16413 return (EFAULT); 16414 16415 if (desc.dtbd_cpu < 0 || desc.dtbd_cpu >= NCPU) 16416 return (EINVAL); 16417 16418 mutex_enter(&dtrace_lock); 16419 16420 if (cmd == DTRACEIOC_BUFSNAP) { 16421 buf = &state->dts_buffer[desc.dtbd_cpu]; 16422 } else { 16423 buf = &state->dts_aggbuffer[desc.dtbd_cpu]; 16424 } 16425 16426 if (buf->dtb_flags & (DTRACEBUF_RING | DTRACEBUF_FILL)) { 16427 size_t sz = buf->dtb_offset; 16428 16429 if (state->dts_activity != DTRACE_ACTIVITY_STOPPED) { 16430 mutex_exit(&dtrace_lock); 16431 return (EBUSY); 16432 } 16433 16434 /* 16435 * If this buffer has already been consumed, we're 16436 * going to indicate that there's nothing left here 16437 * to consume. 16438 */ 16439 if (buf->dtb_flags & DTRACEBUF_CONSUMED) { 16440 mutex_exit(&dtrace_lock); 16441 16442 desc.dtbd_size = 0; 16443 desc.dtbd_drops = 0; 16444 desc.dtbd_errors = 0; 16445 desc.dtbd_oldest = 0; 16446 sz = sizeof (desc); 16447 16448 if (copyout(&desc, (void *)arg, sz) != 0) 16449 return (EFAULT); 16450 16451 return (0); 16452 } 16453 16454 /* 16455 * If this is a ring buffer that has wrapped, we want 16456 * to copy the whole thing out. 16457 */ 16458 if (buf->dtb_flags & DTRACEBUF_WRAPPED) { 16459 dtrace_buffer_polish(buf); 16460 sz = buf->dtb_size; 16461 } 16462 16463 if (copyout(buf->dtb_tomax, desc.dtbd_data, sz) != 0) { 16464 mutex_exit(&dtrace_lock); 16465 return (EFAULT); 16466 } 16467 16468 desc.dtbd_size = sz; 16469 desc.dtbd_drops = buf->dtb_drops; 16470 desc.dtbd_errors = buf->dtb_errors; 16471 desc.dtbd_oldest = buf->dtb_xamot_offset; 16472 desc.dtbd_timestamp = dtrace_gethrtime(); 16473 16474 mutex_exit(&dtrace_lock); 16475 16476 if (copyout(&desc, (void *)arg, sizeof (desc)) != 0) 16477 return (EFAULT); 16478 16479 buf->dtb_flags |= DTRACEBUF_CONSUMED; 16480 16481 return (0); 16482 } 16483 16484 if (buf->dtb_tomax == NULL) { 16485 ASSERT(buf->dtb_xamot == NULL); 16486 mutex_exit(&dtrace_lock); 16487 return (ENOENT); 16488 } 16489 16490 cached = buf->dtb_tomax; 16491 ASSERT(!(buf->dtb_flags & DTRACEBUF_NOSWITCH)); 16492 16493 dtrace_xcall(desc.dtbd_cpu, 16494 (dtrace_xcall_t)dtrace_buffer_switch, buf); 16495 16496 state->dts_errors += buf->dtb_xamot_errors; 16497 16498 /* 16499 * If the buffers did not actually switch, then the cross call 16500 * did not take place -- presumably because the given CPU is 16501 * not in the ready set. If this is the case, we'll return 16502 * ENOENT. 16503 */ 16504 if (buf->dtb_tomax == cached) { 16505 ASSERT(buf->dtb_xamot != cached); 16506 mutex_exit(&dtrace_lock); 16507 return (ENOENT); 16508 } 16509 16510 ASSERT(cached == buf->dtb_xamot); 16511 16512 /* 16513 * We have our snapshot; now copy it out. 16514 */ 16515 if (copyout(buf->dtb_xamot, desc.dtbd_data, 16516 buf->dtb_xamot_offset) != 0) { 16517 mutex_exit(&dtrace_lock); 16518 return (EFAULT); 16519 } 16520 16521 desc.dtbd_size = buf->dtb_xamot_offset; 16522 desc.dtbd_drops = buf->dtb_xamot_drops; 16523 desc.dtbd_errors = buf->dtb_xamot_errors; 16524 desc.dtbd_oldest = 0; 16525 desc.dtbd_timestamp = buf->dtb_switched; 16526 16527 mutex_exit(&dtrace_lock); 16528 16529 /* 16530 * Finally, copy out the buffer description. 16531 */ 16532 if (copyout(&desc, (void *)arg, sizeof (desc)) != 0) 16533 return (EFAULT); 16534 16535 return (0); 16536 } 16537 16538 case DTRACEIOC_CONF: { 16539 dtrace_conf_t conf; 16540 16541 bzero(&conf, sizeof (conf)); 16542 conf.dtc_difversion = DIF_VERSION; 16543 conf.dtc_difintregs = DIF_DIR_NREGS; 16544 conf.dtc_diftupregs = DIF_DTR_NREGS; 16545 conf.dtc_ctfmodel = CTF_MODEL_NATIVE; 16546 16547 if (copyout(&conf, (void *)arg, sizeof (conf)) != 0) 16548 return (EFAULT); 16549 16550 return (0); 16551 } 16552 16553 case DTRACEIOC_STATUS: { 16554 dtrace_status_t stat; 16555 dtrace_dstate_t *dstate; 16556 int i, j; 16557 uint64_t nerrs; 16558 16559 /* 16560 * See the comment in dtrace_state_deadman() for the reason 16561 * for setting dts_laststatus to INT64_MAX before setting 16562 * it to the correct value. 16563 */ 16564 state->dts_laststatus = INT64_MAX; 16565 dtrace_membar_producer(); 16566 state->dts_laststatus = dtrace_gethrtime(); 16567 16568 bzero(&stat, sizeof (stat)); 16569 16570 mutex_enter(&dtrace_lock); 16571 16572 if (state->dts_activity == DTRACE_ACTIVITY_INACTIVE) { 16573 mutex_exit(&dtrace_lock); 16574 return (ENOENT); 16575 } 16576 16577 if (state->dts_activity == DTRACE_ACTIVITY_DRAINING) 16578 stat.dtst_exiting = 1; 16579 16580 nerrs = state->dts_errors; 16581 dstate = &state->dts_vstate.dtvs_dynvars; 16582 16583 for (i = 0; i < NCPU; i++) { 16584 dtrace_dstate_percpu_t *dcpu = &dstate->dtds_percpu[i]; 16585 16586 stat.dtst_dyndrops += dcpu->dtdsc_drops; 16587 stat.dtst_dyndrops_dirty += dcpu->dtdsc_dirty_drops; 16588 stat.dtst_dyndrops_rinsing += dcpu->dtdsc_rinsing_drops; 16589 16590 if (state->dts_buffer[i].dtb_flags & DTRACEBUF_FULL) 16591 stat.dtst_filled++; 16592 16593 nerrs += state->dts_buffer[i].dtb_errors; 16594 16595 for (j = 0; j < state->dts_nspeculations; j++) { 16596 dtrace_speculation_t *spec; 16597 dtrace_buffer_t *buf; 16598 16599 spec = &state->dts_speculations[j]; 16600 buf = &spec->dtsp_buffer[i]; 16601 stat.dtst_specdrops += buf->dtb_xamot_drops; 16602 } 16603 } 16604 16605 stat.dtst_specdrops_busy = state->dts_speculations_busy; 16606 stat.dtst_specdrops_unavail = state->dts_speculations_unavail; 16607 stat.dtst_stkstroverflows = state->dts_stkstroverflows; 16608 stat.dtst_dblerrors = state->dts_dblerrors; 16609 stat.dtst_killed = 16610 (state->dts_activity == DTRACE_ACTIVITY_KILLED); 16611 stat.dtst_errors = nerrs; 16612 16613 mutex_exit(&dtrace_lock); 16614 16615 if (copyout(&stat, (void *)arg, sizeof (stat)) != 0) 16616 return (EFAULT); 16617 16618 return (0); 16619 } 16620 16621 case DTRACEIOC_FORMAT: { 16622 dtrace_fmtdesc_t fmt; 16623 char *str; 16624 int len; 16625 16626 if (copyin((void *)arg, &fmt, sizeof (fmt)) != 0) 16627 return (EFAULT); 16628 16629 mutex_enter(&dtrace_lock); 16630 16631 if (fmt.dtfd_format == 0 || 16632 fmt.dtfd_format > state->dts_nformats) { 16633 mutex_exit(&dtrace_lock); 16634 return (EINVAL); 16635 } 16636 16637 /* 16638 * Format strings are allocated contiguously and they are 16639 * never freed; if a format index is less than the number 16640 * of formats, we can assert that the format map is non-NULL 16641 * and that the format for the specified index is non-NULL. 16642 */ 16643 ASSERT(state->dts_formats != NULL); 16644 str = state->dts_formats[fmt.dtfd_format - 1]; 16645 ASSERT(str != NULL); 16646 16647 len = strlen(str) + 1; 16648 16649 if (len > fmt.dtfd_length) { 16650 fmt.dtfd_length = len; 16651 16652 if (copyout(&fmt, (void *)arg, sizeof (fmt)) != 0) { 16653 mutex_exit(&dtrace_lock); 16654 return (EINVAL); 16655 } 16656 } else { 16657 if (copyout(str, fmt.dtfd_string, len) != 0) { 16658 mutex_exit(&dtrace_lock); 16659 return (EINVAL); 16660 } 16661 } 16662 16663 mutex_exit(&dtrace_lock); 16664 return (0); 16665 } 16666 16667 default: 16668 break; 16669 } 16670 16671 return (ENOTTY); 16672} 16673 16674/*ARGSUSED*/ 16675static int 16676dtrace_detach(dev_info_t *dip, ddi_detach_cmd_t cmd) 16677{ 16678 dtrace_state_t *state; 16679 16680 switch (cmd) { 16681 case DDI_DETACH: 16682 break; 16683 16684 case DDI_SUSPEND: 16685 return (DDI_SUCCESS); 16686 16687 default: 16688 return (DDI_FAILURE); 16689 } 16690 16691 mutex_enter(&cpu_lock); 16692 mutex_enter(&dtrace_provider_lock); 16693 mutex_enter(&dtrace_lock); 16694 16695 ASSERT(dtrace_opens == 0); 16696 16697 if (dtrace_helpers > 0) { 16698 mutex_exit(&dtrace_provider_lock); 16699 mutex_exit(&dtrace_lock); 16700 mutex_exit(&cpu_lock); 16701 return (DDI_FAILURE); 16702 } 16703 16704 if (dtrace_unregister((dtrace_provider_id_t)dtrace_provider) != 0) { 16705 mutex_exit(&dtrace_provider_lock); 16706 mutex_exit(&dtrace_lock); 16707 mutex_exit(&cpu_lock); 16708 return (DDI_FAILURE); 16709 } 16710 16711 dtrace_provider = NULL; 16712 16713 if ((state = dtrace_anon_grab()) != NULL) { 16714 /* 16715 * If there were ECBs on this state, the provider should 16716 * have not been allowed to detach; assert that there is 16717 * none. 16718 */ 16719 ASSERT(state->dts_necbs == 0); 16720 dtrace_state_destroy(state); 16721 16722 /* 16723 * If we're being detached with anonymous state, we need to 16724 * indicate to the kernel debugger that DTrace is now inactive. 16725 */ 16726 (void) kdi_dtrace_set(KDI_DTSET_DTRACE_DEACTIVATE); 16727 } 16728 16729 bzero(&dtrace_anon, sizeof (dtrace_anon_t)); 16730 unregister_cpu_setup_func((cpu_setup_func_t *)dtrace_cpu_setup, NULL); 16731 dtrace_cpu_init = NULL; 16732 dtrace_helpers_cleanup = NULL; 16733 dtrace_helpers_fork = NULL; 16734 dtrace_cpustart_init = NULL; 16735 dtrace_cpustart_fini = NULL; 16736 dtrace_debugger_init = NULL; 16737 dtrace_debugger_fini = NULL; 16738 dtrace_modload = NULL; 16739 dtrace_modunload = NULL; 16740 16741 mutex_exit(&cpu_lock); 16742 16743 if (dtrace_helptrace_enabled) { 16744 kmem_free(dtrace_helptrace_buffer, dtrace_helptrace_bufsize); 16745 dtrace_helptrace_buffer = NULL; 16746 } 16747 16748 kmem_free(dtrace_probes, dtrace_nprobes * sizeof (dtrace_probe_t *)); 16749 dtrace_probes = NULL; 16750 dtrace_nprobes = 0; 16751 16752 dtrace_hash_destroy(dtrace_bymod); 16753 dtrace_hash_destroy(dtrace_byfunc); 16754 dtrace_hash_destroy(dtrace_byname); 16755 dtrace_bymod = NULL; 16756 dtrace_byfunc = NULL; 16757 dtrace_byname = NULL; 16758 16759 kmem_cache_destroy(dtrace_state_cache); 16760 vmem_destroy(dtrace_minor); 16761 vmem_destroy(dtrace_arena); 16762 16763 if (dtrace_toxrange != NULL) { 16764 kmem_free(dtrace_toxrange, 16765 dtrace_toxranges_max * sizeof (dtrace_toxrange_t)); 16766 dtrace_toxrange = NULL; 16767 dtrace_toxranges = 0; 16768 dtrace_toxranges_max = 0; 16769 } 16770 16771 ddi_remove_minor_node(dtrace_devi, NULL); 16772 dtrace_devi = NULL; 16773 16774 ddi_soft_state_fini(&dtrace_softstate); 16775 16776 ASSERT(dtrace_vtime_references == 0); 16777 ASSERT(dtrace_opens == 0); 16778 ASSERT(dtrace_retained == NULL); 16779 16780 mutex_exit(&dtrace_lock); 16781 mutex_exit(&dtrace_provider_lock); 16782 16783 /* 16784 * We don't destroy the task queue until after we have dropped our 16785 * locks (taskq_destroy() may block on running tasks). To prevent 16786 * attempting to do work after we have effectively detached but before 16787 * the task queue has been destroyed, all tasks dispatched via the 16788 * task queue must check that DTrace is still attached before 16789 * performing any operation. 16790 */ 16791 taskq_destroy(dtrace_taskq); 16792 dtrace_taskq = NULL; 16793 16794 return (DDI_SUCCESS); 16795} 16796#endif 16797 16798#if defined(sun) 16799/*ARGSUSED*/ 16800static int 16801dtrace_info(dev_info_t *dip, ddi_info_cmd_t infocmd, void *arg, void **result) 16802{ 16803 int error; 16804 16805 switch (infocmd) { 16806 case DDI_INFO_DEVT2DEVINFO: 16807 *result = (void *)dtrace_devi; 16808 error = DDI_SUCCESS; 16809 break; 16810 case DDI_INFO_DEVT2INSTANCE: 16811 *result = (void *)0; 16812 error = DDI_SUCCESS; 16813 break; 16814 default: 16815 error = DDI_FAILURE; 16816 } 16817 return (error); 16818} 16819#endif 16820 16821#if defined(sun) 16822static struct cb_ops dtrace_cb_ops = { 16823 dtrace_open, /* open */ 16824 dtrace_close, /* close */ 16825 nulldev, /* strategy */ 16826 nulldev, /* print */ 16827 nodev, /* dump */ 16828 nodev, /* read */ 16829 nodev, /* write */ 16830 dtrace_ioctl, /* ioctl */ 16831 nodev, /* devmap */ 16832 nodev, /* mmap */ 16833 nodev, /* segmap */ 16834 nochpoll, /* poll */ 16835 ddi_prop_op, /* cb_prop_op */ 16836 0, /* streamtab */ 16837 D_NEW | D_MP /* Driver compatibility flag */ 16838}; 16839 16840static struct dev_ops dtrace_ops = { 16841 DEVO_REV, /* devo_rev */ 16842 0, /* refcnt */ 16843 dtrace_info, /* get_dev_info */ 16844 nulldev, /* identify */ 16845 nulldev, /* probe */ 16846 dtrace_attach, /* attach */ 16847 dtrace_detach, /* detach */ 16848 nodev, /* reset */ 16849 &dtrace_cb_ops, /* driver operations */ 16850 NULL, /* bus operations */ 16851 nodev /* dev power */ 16852}; 16853 16854static struct modldrv modldrv = { 16855 &mod_driverops, /* module type (this is a pseudo driver) */ 16856 "Dynamic Tracing", /* name of module */ 16857 &dtrace_ops, /* driver ops */ 16858}; 16859 16860static struct modlinkage modlinkage = { 16861 MODREV_1, 16862 (void *)&modldrv, 16863 NULL 16864}; 16865 16866int 16867_init(void) 16868{ 16869 return (mod_install(&modlinkage)); 16870} 16871 16872int 16873_info(struct modinfo *modinfop) 16874{ 16875 return (mod_info(&modlinkage, modinfop)); 16876} 16877 16878int 16879_fini(void) 16880{ 16881 return (mod_remove(&modlinkage)); 16882} 16883#else 16884 16885static d_ioctl_t dtrace_ioctl; 16886static d_ioctl_t dtrace_ioctl_helper; 16887static void dtrace_load(void *); 16888static int dtrace_unload(void); 16889#if __FreeBSD_version < 800039 16890static void dtrace_clone(void *, struct ucred *, char *, int , struct cdev **); 16891static struct clonedevs *dtrace_clones; /* Ptr to the array of cloned devices. */ 16892static eventhandler_tag eh_tag; /* Event handler tag. */ 16893#else 16894static struct cdev *dtrace_dev; 16895static struct cdev *helper_dev; 16896#endif 16897 16898void dtrace_invop_init(void); 16899void dtrace_invop_uninit(void); 16900 16901static struct cdevsw dtrace_cdevsw = { 16902 .d_version = D_VERSION, 16903#if __FreeBSD_version < 800039 16904 .d_flags = D_TRACKCLOSE | D_NEEDMINOR, 16905 .d_close = dtrace_close, 16906#endif 16907 .d_ioctl = dtrace_ioctl, 16908 .d_open = dtrace_open, 16909 .d_name = "dtrace", 16910}; 16911 16912static struct cdevsw helper_cdevsw = { 16913 .d_version = D_VERSION, 16914 .d_ioctl = dtrace_ioctl_helper, 16915 .d_name = "helper", 16916}; 16917 16918#include <dtrace_anon.c> 16919#if __FreeBSD_version < 800039 16920#include <dtrace_clone.c> 16921#endif 16922#include <dtrace_ioctl.c> 16923#include <dtrace_load.c> 16924#include <dtrace_modevent.c> 16925#include <dtrace_sysctl.c> 16926#include <dtrace_unload.c> 16927#include <dtrace_vtime.c> 16928#include <dtrace_hacks.c> 16929#include <dtrace_isa.c> 16930 16931SYSINIT(dtrace_load, SI_SUB_DTRACE, SI_ORDER_FIRST, dtrace_load, NULL); 16932SYSUNINIT(dtrace_unload, SI_SUB_DTRACE, SI_ORDER_FIRST, dtrace_unload, NULL); 16933SYSINIT(dtrace_anon_init, SI_SUB_DTRACE_ANON, SI_ORDER_FIRST, dtrace_anon_init, NULL); 16934 16935DEV_MODULE(dtrace, dtrace_modevent, NULL); 16936MODULE_VERSION(dtrace, 1); 16937MODULE_DEPEND(dtrace, cyclic, 1, 1, 1); 16938MODULE_DEPEND(dtrace, opensolaris, 1, 1, 1); 16939#endif
|