sharedRuntime.cpp revision 116:018d5b58dd4f
1/* 2 * Copyright 1997-2007 Sun Microsystems, Inc. All Rights Reserved. 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 4 * 5 * This code is free software; you can redistribute it and/or modify it 6 * under the terms of the GNU General Public License version 2 only, as 7 * published by the Free Software Foundation. 8 * 9 * This code is distributed in the hope that it will be useful, but WITHOUT 10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License 12 * version 2 for more details (a copy is included in the LICENSE file that 13 * accompanied this code). 14 * 15 * You should have received a copy of the GNU General Public License version 16 * 2 along with this work; if not, write to the Free Software Foundation, 17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. 18 * 19 * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara, 20 * CA 95054 USA or visit www.sun.com if you need additional information or 21 * have any questions. 22 * 23 */ 24 25#include "incls/_precompiled.incl" 26#include "incls/_sharedRuntime.cpp.incl" 27#include <math.h> 28 29HS_DTRACE_PROBE_DECL4(hotspot, object__alloc, Thread*, char*, int, size_t); 30HS_DTRACE_PROBE_DECL7(hotspot, method__entry, int, 31 char*, int, char*, int, char*, int); 32HS_DTRACE_PROBE_DECL7(hotspot, method__return, int, 33 char*, int, char*, int, char*, int); 34 35// Implementation of SharedRuntime 36 37#ifndef PRODUCT 38// For statistics 39int SharedRuntime::_ic_miss_ctr = 0; 40int SharedRuntime::_wrong_method_ctr = 0; 41int SharedRuntime::_resolve_static_ctr = 0; 42int SharedRuntime::_resolve_virtual_ctr = 0; 43int SharedRuntime::_resolve_opt_virtual_ctr = 0; 44int SharedRuntime::_implicit_null_throws = 0; 45int SharedRuntime::_implicit_div0_throws = 0; 46int SharedRuntime::_throw_null_ctr = 0; 47 48int SharedRuntime::_nof_normal_calls = 0; 49int SharedRuntime::_nof_optimized_calls = 0; 50int SharedRuntime::_nof_inlined_calls = 0; 51int SharedRuntime::_nof_megamorphic_calls = 0; 52int SharedRuntime::_nof_static_calls = 0; 53int SharedRuntime::_nof_inlined_static_calls = 0; 54int SharedRuntime::_nof_interface_calls = 0; 55int SharedRuntime::_nof_optimized_interface_calls = 0; 56int SharedRuntime::_nof_inlined_interface_calls = 0; 57int SharedRuntime::_nof_megamorphic_interface_calls = 0; 58int SharedRuntime::_nof_removable_exceptions = 0; 59 60int SharedRuntime::_new_instance_ctr=0; 61int SharedRuntime::_new_array_ctr=0; 62int SharedRuntime::_multi1_ctr=0; 63int SharedRuntime::_multi2_ctr=0; 64int SharedRuntime::_multi3_ctr=0; 65int SharedRuntime::_multi4_ctr=0; 66int SharedRuntime::_multi5_ctr=0; 67int SharedRuntime::_mon_enter_stub_ctr=0; 68int SharedRuntime::_mon_exit_stub_ctr=0; 69int SharedRuntime::_mon_enter_ctr=0; 70int SharedRuntime::_mon_exit_ctr=0; 71int SharedRuntime::_partial_subtype_ctr=0; 72int SharedRuntime::_jbyte_array_copy_ctr=0; 73int SharedRuntime::_jshort_array_copy_ctr=0; 74int SharedRuntime::_jint_array_copy_ctr=0; 75int SharedRuntime::_jlong_array_copy_ctr=0; 76int SharedRuntime::_oop_array_copy_ctr=0; 77int SharedRuntime::_checkcast_array_copy_ctr=0; 78int SharedRuntime::_unsafe_array_copy_ctr=0; 79int SharedRuntime::_generic_array_copy_ctr=0; 80int SharedRuntime::_slow_array_copy_ctr=0; 81int SharedRuntime::_find_handler_ctr=0; 82int SharedRuntime::_rethrow_ctr=0; 83 84int SharedRuntime::_ICmiss_index = 0; 85int SharedRuntime::_ICmiss_count[SharedRuntime::maxICmiss_count]; 86address SharedRuntime::_ICmiss_at[SharedRuntime::maxICmiss_count]; 87 88void SharedRuntime::trace_ic_miss(address at) { 89 for (int i = 0; i < _ICmiss_index; i++) { 90 if (_ICmiss_at[i] == at) { 91 _ICmiss_count[i]++; 92 return; 93 } 94 } 95 int index = _ICmiss_index++; 96 if (_ICmiss_index >= maxICmiss_count) _ICmiss_index = maxICmiss_count - 1; 97 _ICmiss_at[index] = at; 98 _ICmiss_count[index] = 1; 99} 100 101void SharedRuntime::print_ic_miss_histogram() { 102 if (ICMissHistogram) { 103 tty->print_cr ("IC Miss Histogram:"); 104 int tot_misses = 0; 105 for (int i = 0; i < _ICmiss_index; i++) { 106 tty->print_cr(" at: " INTPTR_FORMAT " nof: %d", _ICmiss_at[i], _ICmiss_count[i]); 107 tot_misses += _ICmiss_count[i]; 108 } 109 tty->print_cr ("Total IC misses: %7d", tot_misses); 110 } 111} 112#endif // PRODUCT 113 114 115JRT_LEAF(jlong, SharedRuntime::lmul(jlong y, jlong x)) 116 return x * y; 117JRT_END 118 119 120JRT_LEAF(jlong, SharedRuntime::ldiv(jlong y, jlong x)) 121 if (x == min_jlong && y == CONST64(-1)) { 122 return x; 123 } else { 124 return x / y; 125 } 126JRT_END 127 128 129JRT_LEAF(jlong, SharedRuntime::lrem(jlong y, jlong x)) 130 if (x == min_jlong && y == CONST64(-1)) { 131 return 0; 132 } else { 133 return x % y; 134 } 135JRT_END 136 137 138const juint float_sign_mask = 0x7FFFFFFF; 139const juint float_infinity = 0x7F800000; 140const julong double_sign_mask = CONST64(0x7FFFFFFFFFFFFFFF); 141const julong double_infinity = CONST64(0x7FF0000000000000); 142 143JRT_LEAF(jfloat, SharedRuntime::frem(jfloat x, jfloat y)) 144#ifdef _WIN64 145 // 64-bit Windows on amd64 returns the wrong values for 146 // infinity operands. 147 union { jfloat f; juint i; } xbits, ybits; 148 xbits.f = x; 149 ybits.f = y; 150 // x Mod Infinity == x unless x is infinity 151 if ( ((xbits.i & float_sign_mask) != float_infinity) && 152 ((ybits.i & float_sign_mask) == float_infinity) ) { 153 return x; 154 } 155#endif 156 return ((jfloat)fmod((double)x,(double)y)); 157JRT_END 158 159 160JRT_LEAF(jdouble, SharedRuntime::drem(jdouble x, jdouble y)) 161#ifdef _WIN64 162 union { jdouble d; julong l; } xbits, ybits; 163 xbits.d = x; 164 ybits.d = y; 165 // x Mod Infinity == x unless x is infinity 166 if ( ((xbits.l & double_sign_mask) != double_infinity) && 167 ((ybits.l & double_sign_mask) == double_infinity) ) { 168 return x; 169 } 170#endif 171 return ((jdouble)fmod((double)x,(double)y)); 172JRT_END 173 174 175JRT_LEAF(jint, SharedRuntime::f2i(jfloat x)) 176 if (g_isnan(x)) {return 0;} 177 jlong lltmp = (jlong)x; 178 jint ltmp = (jint)lltmp; 179 if (ltmp == lltmp) { 180 return ltmp; 181 } else { 182 if (x < 0) { 183 return min_jint; 184 } else { 185 return max_jint; 186 } 187 } 188JRT_END 189 190 191JRT_LEAF(jlong, SharedRuntime::f2l(jfloat x)) 192 if (g_isnan(x)) {return 0;} 193 jlong lltmp = (jlong)x; 194 if (lltmp != min_jlong) { 195 return lltmp; 196 } else { 197 if (x < 0) { 198 return min_jlong; 199 } else { 200 return max_jlong; 201 } 202 } 203JRT_END 204 205 206JRT_LEAF(jint, SharedRuntime::d2i(jdouble x)) 207 if (g_isnan(x)) {return 0;} 208 jlong lltmp = (jlong)x; 209 jint ltmp = (jint)lltmp; 210 if (ltmp == lltmp) { 211 return ltmp; 212 } else { 213 if (x < 0) { 214 return min_jint; 215 } else { 216 return max_jint; 217 } 218 } 219JRT_END 220 221 222JRT_LEAF(jlong, SharedRuntime::d2l(jdouble x)) 223 if (g_isnan(x)) {return 0;} 224 jlong lltmp = (jlong)x; 225 if (lltmp != min_jlong) { 226 return lltmp; 227 } else { 228 if (x < 0) { 229 return min_jlong; 230 } else { 231 return max_jlong; 232 } 233 } 234JRT_END 235 236 237JRT_LEAF(jfloat, SharedRuntime::d2f(jdouble x)) 238 return (jfloat)x; 239JRT_END 240 241 242JRT_LEAF(jfloat, SharedRuntime::l2f(jlong x)) 243 return (jfloat)x; 244JRT_END 245 246 247JRT_LEAF(jdouble, SharedRuntime::l2d(jlong x)) 248 return (jdouble)x; 249JRT_END 250 251// Exception handling accross interpreter/compiler boundaries 252// 253// exception_handler_for_return_address(...) returns the continuation address. 254// The continuation address is the entry point of the exception handler of the 255// previous frame depending on the return address. 256 257address SharedRuntime::raw_exception_handler_for_return_address(address return_address) { 258 assert(frame::verify_return_pc(return_address), "must be a return pc"); 259 260 // the fastest case first 261 CodeBlob* blob = CodeCache::find_blob(return_address); 262 if (blob != NULL && blob->is_nmethod()) { 263 nmethod* code = (nmethod*)blob; 264 assert(code != NULL, "nmethod must be present"); 265 // native nmethods don't have exception handlers 266 assert(!code->is_native_method(), "no exception handler"); 267 assert(code->header_begin() != code->exception_begin(), "no exception handler"); 268 if (code->is_deopt_pc(return_address)) { 269 return SharedRuntime::deopt_blob()->unpack_with_exception(); 270 } else { 271 return code->exception_begin(); 272 } 273 } 274 275 // Entry code 276 if (StubRoutines::returns_to_call_stub(return_address)) { 277 return StubRoutines::catch_exception_entry(); 278 } 279 // Interpreted code 280 if (Interpreter::contains(return_address)) { 281 return Interpreter::rethrow_exception_entry(); 282 } 283 284 // Compiled code 285 if (CodeCache::contains(return_address)) { 286 CodeBlob* blob = CodeCache::find_blob(return_address); 287 if (blob->is_nmethod()) { 288 nmethod* code = (nmethod*)blob; 289 assert(code != NULL, "nmethod must be present"); 290 assert(code->header_begin() != code->exception_begin(), "no exception handler"); 291 return code->exception_begin(); 292 } 293 if (blob->is_runtime_stub()) { 294 ShouldNotReachHere(); // callers are responsible for skipping runtime stub frames 295 } 296 } 297 guarantee(!VtableStubs::contains(return_address), "NULL exceptions in vtables should have been handled already!"); 298#ifndef PRODUCT 299 { ResourceMark rm; 300 tty->print_cr("No exception handler found for exception at " INTPTR_FORMAT " - potential problems:", return_address); 301 tty->print_cr("a) exception happened in (new?) code stubs/buffers that is not handled here"); 302 tty->print_cr("b) other problem"); 303 } 304#endif // PRODUCT 305 ShouldNotReachHere(); 306 return NULL; 307} 308 309 310JRT_LEAF(address, SharedRuntime::exception_handler_for_return_address(address return_address)) 311 return raw_exception_handler_for_return_address(return_address); 312JRT_END 313 314address SharedRuntime::get_poll_stub(address pc) { 315 address stub; 316 // Look up the code blob 317 CodeBlob *cb = CodeCache::find_blob(pc); 318 319 // Should be an nmethod 320 assert( cb && cb->is_nmethod(), "safepoint polling: pc must refer to an nmethod" ); 321 322 // Look up the relocation information 323 assert( ((nmethod*)cb)->is_at_poll_or_poll_return(pc), 324 "safepoint polling: type must be poll" ); 325 326 assert( ((NativeInstruction*)pc)->is_safepoint_poll(), 327 "Only polling locations are used for safepoint"); 328 329 bool at_poll_return = ((nmethod*)cb)->is_at_poll_return(pc); 330 if (at_poll_return) { 331 assert(SharedRuntime::polling_page_return_handler_blob() != NULL, 332 "polling page return stub not created yet"); 333 stub = SharedRuntime::polling_page_return_handler_blob()->instructions_begin(); 334 } else { 335 assert(SharedRuntime::polling_page_safepoint_handler_blob() != NULL, 336 "polling page safepoint stub not created yet"); 337 stub = SharedRuntime::polling_page_safepoint_handler_blob()->instructions_begin(); 338 } 339#ifndef PRODUCT 340 if( TraceSafepoint ) { 341 char buf[256]; 342 jio_snprintf(buf, sizeof(buf), 343 "... found polling page %s exception at pc = " 344 INTPTR_FORMAT ", stub =" INTPTR_FORMAT, 345 at_poll_return ? "return" : "loop", 346 (intptr_t)pc, (intptr_t)stub); 347 tty->print_raw_cr(buf); 348 } 349#endif // PRODUCT 350 return stub; 351} 352 353 354oop SharedRuntime::retrieve_receiver( symbolHandle sig, frame caller ) { 355 assert(caller.is_interpreted_frame(), ""); 356 int args_size = ArgumentSizeComputer(sig).size() + 1; 357 assert(args_size <= caller.interpreter_frame_expression_stack_size(), "receiver must be on interpreter stack"); 358 oop result = (oop) *caller.interpreter_frame_tos_at(args_size - 1); 359 assert(Universe::heap()->is_in(result) && result->is_oop(), "receiver must be an oop"); 360 return result; 361} 362 363 364void SharedRuntime::throw_and_post_jvmti_exception(JavaThread *thread, Handle h_exception) { 365 if (JvmtiExport::can_post_exceptions()) { 366 vframeStream vfst(thread, true); 367 methodHandle method = methodHandle(thread, vfst.method()); 368 address bcp = method()->bcp_from(vfst.bci()); 369 JvmtiExport::post_exception_throw(thread, method(), bcp, h_exception()); 370 } 371 Exceptions::_throw(thread, __FILE__, __LINE__, h_exception); 372} 373 374void SharedRuntime::throw_and_post_jvmti_exception(JavaThread *thread, symbolOop name, const char *message) { 375 Handle h_exception = Exceptions::new_exception(thread, name, message); 376 throw_and_post_jvmti_exception(thread, h_exception); 377} 378 379// ret_pc points into caller; we are returning caller's exception handler 380// for given exception 381address SharedRuntime::compute_compiled_exc_handler(nmethod* nm, address ret_pc, Handle& exception, 382 bool force_unwind, bool top_frame_only) { 383 assert(nm != NULL, "must exist"); 384 ResourceMark rm; 385 386 ScopeDesc* sd = nm->scope_desc_at(ret_pc); 387 // determine handler bci, if any 388 EXCEPTION_MARK; 389 390 int handler_bci = -1; 391 int scope_depth = 0; 392 if (!force_unwind) { 393 int bci = sd->bci(); 394 do { 395 bool skip_scope_increment = false; 396 // exception handler lookup 397 KlassHandle ek (THREAD, exception->klass()); 398 handler_bci = sd->method()->fast_exception_handler_bci_for(ek, bci, THREAD); 399 if (HAS_PENDING_EXCEPTION) { 400 // We threw an exception while trying to find the exception handler. 401 // Transfer the new exception to the exception handle which will 402 // be set into thread local storage, and do another lookup for an 403 // exception handler for this exception, this time starting at the 404 // BCI of the exception handler which caused the exception to be 405 // thrown (bugs 4307310 and 4546590). Set "exception" reference 406 // argument to ensure that the correct exception is thrown (4870175). 407 exception = Handle(THREAD, PENDING_EXCEPTION); 408 CLEAR_PENDING_EXCEPTION; 409 if (handler_bci >= 0) { 410 bci = handler_bci; 411 handler_bci = -1; 412 skip_scope_increment = true; 413 } 414 } 415 if (!top_frame_only && handler_bci < 0 && !skip_scope_increment) { 416 sd = sd->sender(); 417 if (sd != NULL) { 418 bci = sd->bci(); 419 } 420 ++scope_depth; 421 } 422 } while (!top_frame_only && handler_bci < 0 && sd != NULL); 423 } 424 425 // found handling method => lookup exception handler 426 int catch_pco = ret_pc - nm->instructions_begin(); 427 428 ExceptionHandlerTable table(nm); 429 HandlerTableEntry *t = table.entry_for(catch_pco, handler_bci, scope_depth); 430 if (t == NULL && (nm->is_compiled_by_c1() || handler_bci != -1)) { 431 // Allow abbreviated catch tables. The idea is to allow a method 432 // to materialize its exceptions without committing to the exact 433 // routing of exceptions. In particular this is needed for adding 434 // a synthethic handler to unlock monitors when inlining 435 // synchonized methods since the unlock path isn't represented in 436 // the bytecodes. 437 t = table.entry_for(catch_pco, -1, 0); 438 } 439 440#ifdef COMPILER1 441 if (nm->is_compiled_by_c1() && t == NULL && handler_bci == -1) { 442 // Exception is not handled by this frame so unwind. Note that 443 // this is not the same as how C2 does this. C2 emits a table 444 // entry that dispatches to the unwind code in the nmethod. 445 return NULL; 446 } 447#endif /* COMPILER1 */ 448 449 450 if (t == NULL) { 451 tty->print_cr("MISSING EXCEPTION HANDLER for pc " INTPTR_FORMAT " and handler bci %d", ret_pc, handler_bci); 452 tty->print_cr(" Exception:"); 453 exception->print(); 454 tty->cr(); 455 tty->print_cr(" Compiled exception table :"); 456 table.print(); 457 nm->print_code(); 458 guarantee(false, "missing exception handler"); 459 return NULL; 460 } 461 462 return nm->instructions_begin() + t->pco(); 463} 464 465JRT_ENTRY(void, SharedRuntime::throw_AbstractMethodError(JavaThread* thread)) 466 // These errors occur only at call sites 467 throw_and_post_jvmti_exception(thread, vmSymbols::java_lang_AbstractMethodError()); 468JRT_END 469 470JRT_ENTRY(void, SharedRuntime::throw_IncompatibleClassChangeError(JavaThread* thread)) 471 // These errors occur only at call sites 472 throw_and_post_jvmti_exception(thread, vmSymbols::java_lang_IncompatibleClassChangeError(), "vtable stub"); 473JRT_END 474 475JRT_ENTRY(void, SharedRuntime::throw_ArithmeticException(JavaThread* thread)) 476 throw_and_post_jvmti_exception(thread, vmSymbols::java_lang_ArithmeticException(), "/ by zero"); 477JRT_END 478 479JRT_ENTRY(void, SharedRuntime::throw_NullPointerException(JavaThread* thread)) 480 throw_and_post_jvmti_exception(thread, vmSymbols::java_lang_NullPointerException()); 481JRT_END 482 483JRT_ENTRY(void, SharedRuntime::throw_NullPointerException_at_call(JavaThread* thread)) 484 // This entry point is effectively only used for NullPointerExceptions which occur at inline 485 // cache sites (when the callee activation is not yet set up) so we are at a call site 486 throw_and_post_jvmti_exception(thread, vmSymbols::java_lang_NullPointerException()); 487JRT_END 488 489JRT_ENTRY(void, SharedRuntime::throw_StackOverflowError(JavaThread* thread)) 490 // We avoid using the normal exception construction in this case because 491 // it performs an upcall to Java, and we're already out of stack space. 492 klassOop k = SystemDictionary::StackOverflowError_klass(); 493 oop exception_oop = instanceKlass::cast(k)->allocate_instance(CHECK); 494 Handle exception (thread, exception_oop); 495 if (StackTraceInThrowable) { 496 java_lang_Throwable::fill_in_stack_trace(exception); 497 } 498 throw_and_post_jvmti_exception(thread, exception); 499JRT_END 500 501address SharedRuntime::continuation_for_implicit_exception(JavaThread* thread, 502 address pc, 503 SharedRuntime::ImplicitExceptionKind exception_kind) 504{ 505 address target_pc = NULL; 506 507 if (Interpreter::contains(pc)) { 508#ifdef CC_INTERP 509 // C++ interpreter doesn't throw implicit exceptions 510 ShouldNotReachHere(); 511#else 512 switch (exception_kind) { 513 case IMPLICIT_NULL: return Interpreter::throw_NullPointerException_entry(); 514 case IMPLICIT_DIVIDE_BY_ZERO: return Interpreter::throw_ArithmeticException_entry(); 515 case STACK_OVERFLOW: return Interpreter::throw_StackOverflowError_entry(); 516 default: ShouldNotReachHere(); 517 } 518#endif // !CC_INTERP 519 } else { 520 switch (exception_kind) { 521 case STACK_OVERFLOW: { 522 // Stack overflow only occurs upon frame setup; the callee is 523 // going to be unwound. Dispatch to a shared runtime stub 524 // which will cause the StackOverflowError to be fabricated 525 // and processed. 526 // For stack overflow in deoptimization blob, cleanup thread. 527 if (thread->deopt_mark() != NULL) { 528 Deoptimization::cleanup_deopt_info(thread, NULL); 529 } 530 return StubRoutines::throw_StackOverflowError_entry(); 531 } 532 533 case IMPLICIT_NULL: { 534 if (VtableStubs::contains(pc)) { 535 // We haven't yet entered the callee frame. Fabricate an 536 // exception and begin dispatching it in the caller. Since 537 // the caller was at a call site, it's safe to destroy all 538 // caller-saved registers, as these entry points do. 539 VtableStub* vt_stub = VtableStubs::stub_containing(pc); 540 guarantee(vt_stub != NULL, "unable to find SEGVing vtable stub"); 541 if (vt_stub->is_abstract_method_error(pc)) { 542 assert(!vt_stub->is_vtable_stub(), "should never see AbstractMethodErrors from vtable-type VtableStubs"); 543 return StubRoutines::throw_AbstractMethodError_entry(); 544 } else { 545 return StubRoutines::throw_NullPointerException_at_call_entry(); 546 } 547 } else { 548 CodeBlob* cb = CodeCache::find_blob(pc); 549 guarantee(cb != NULL, "exception happened outside interpreter, nmethods and vtable stubs (1)"); 550 551 // Exception happened in CodeCache. Must be either: 552 // 1. Inline-cache check in C2I handler blob, 553 // 2. Inline-cache check in nmethod, or 554 // 3. Implict null exception in nmethod 555 556 if (!cb->is_nmethod()) { 557 guarantee(cb->is_adapter_blob(), 558 "exception happened outside interpreter, nmethods and vtable stubs (2)"); 559 // There is no handler here, so we will simply unwind. 560 return StubRoutines::throw_NullPointerException_at_call_entry(); 561 } 562 563 // Otherwise, it's an nmethod. Consult its exception handlers. 564 nmethod* nm = (nmethod*)cb; 565 if (nm->inlinecache_check_contains(pc)) { 566 // exception happened inside inline-cache check code 567 // => the nmethod is not yet active (i.e., the frame 568 // is not set up yet) => use return address pushed by 569 // caller => don't push another return address 570 return StubRoutines::throw_NullPointerException_at_call_entry(); 571 } 572 573#ifndef PRODUCT 574 _implicit_null_throws++; 575#endif 576 target_pc = nm->continuation_for_implicit_exception(pc); 577 guarantee(target_pc != 0, "must have a continuation point"); 578 } 579 580 break; // fall through 581 } 582 583 584 case IMPLICIT_DIVIDE_BY_ZERO: { 585 nmethod* nm = CodeCache::find_nmethod(pc); 586 guarantee(nm != NULL, "must have containing nmethod for implicit division-by-zero exceptions"); 587#ifndef PRODUCT 588 _implicit_div0_throws++; 589#endif 590 target_pc = nm->continuation_for_implicit_exception(pc); 591 guarantee(target_pc != 0, "must have a continuation point"); 592 break; // fall through 593 } 594 595 default: ShouldNotReachHere(); 596 } 597 598 guarantee(target_pc != NULL, "must have computed destination PC for implicit exception"); 599 assert(exception_kind == IMPLICIT_NULL || exception_kind == IMPLICIT_DIVIDE_BY_ZERO, "wrong implicit exception kind"); 600 601 // for AbortVMOnException flag 602 NOT_PRODUCT(Exceptions::debug_check_abort("java.lang.NullPointerException")); 603 if (exception_kind == IMPLICIT_NULL) { 604 Events::log("Implicit null exception at " INTPTR_FORMAT " to " INTPTR_FORMAT, pc, target_pc); 605 } else { 606 Events::log("Implicit division by zero exception at " INTPTR_FORMAT " to " INTPTR_FORMAT, pc, target_pc); 607 } 608 return target_pc; 609 } 610 611 ShouldNotReachHere(); 612 return NULL; 613} 614 615 616JNI_ENTRY(void, throw_unsatisfied_link_error(JNIEnv* env, ...)) 617{ 618 THROW(vmSymbols::java_lang_UnsatisfiedLinkError()); 619} 620JNI_END 621 622 623address SharedRuntime::native_method_throw_unsatisfied_link_error_entry() { 624 return CAST_FROM_FN_PTR(address, &throw_unsatisfied_link_error); 625} 626 627 628#ifndef PRODUCT 629JRT_ENTRY(intptr_t, SharedRuntime::trace_bytecode(JavaThread* thread, intptr_t preserve_this_value, intptr_t tos, intptr_t tos2)) 630 const frame f = thread->last_frame(); 631 assert(f.is_interpreted_frame(), "must be an interpreted frame"); 632#ifndef PRODUCT 633 methodHandle mh(THREAD, f.interpreter_frame_method()); 634 BytecodeTracer::trace(mh, f.interpreter_frame_bcp(), tos, tos2); 635#endif // !PRODUCT 636 return preserve_this_value; 637JRT_END 638#endif // !PRODUCT 639 640 641JRT_ENTRY(void, SharedRuntime::yield_all(JavaThread* thread, int attempts)) 642 os::yield_all(attempts); 643JRT_END 644 645 646// --------------------------------------------------------------------------------------------------------- 647// Non-product code 648#ifndef PRODUCT 649 650void SharedRuntime::verify_caller_frame(frame caller_frame, methodHandle callee_method) { 651 ResourceMark rm; 652 assert (caller_frame.is_interpreted_frame(), "sanity check"); 653 assert (callee_method->has_compiled_code(), "callee must be compiled"); 654 methodHandle caller_method (Thread::current(), caller_frame.interpreter_frame_method()); 655 jint bci = caller_frame.interpreter_frame_bci(); 656 methodHandle method = find_callee_method_inside_interpreter(caller_frame, caller_method, bci); 657 assert (callee_method == method, "incorrect method"); 658} 659 660methodHandle SharedRuntime::find_callee_method_inside_interpreter(frame caller_frame, methodHandle caller_method, int bci) { 661 EXCEPTION_MARK; 662 Bytecode_invoke* bytecode = Bytecode_invoke_at(caller_method, bci); 663 methodHandle staticCallee = bytecode->static_target(CATCH); // Non-product code 664 665 bytecode = Bytecode_invoke_at(caller_method, bci); 666 int bytecode_index = bytecode->index(); 667 Bytecodes::Code bc = bytecode->adjusted_invoke_code(); 668 669 Handle receiver; 670 if (bc == Bytecodes::_invokeinterface || 671 bc == Bytecodes::_invokevirtual || 672 bc == Bytecodes::_invokespecial) { 673 symbolHandle signature (THREAD, staticCallee->signature()); 674 receiver = Handle(THREAD, retrieve_receiver(signature, caller_frame)); 675 } else { 676 receiver = Handle(); 677 } 678 CallInfo result; 679 constantPoolHandle constants (THREAD, caller_method->constants()); 680 LinkResolver::resolve_invoke(result, receiver, constants, bytecode_index, bc, CATCH); // Non-product code 681 methodHandle calleeMethod = result.selected_method(); 682 return calleeMethod; 683} 684 685#endif // PRODUCT 686 687 688JRT_ENTRY_NO_ASYNC(void, SharedRuntime::register_finalizer(JavaThread* thread, oopDesc* obj)) 689 assert(obj->is_oop(), "must be a valid oop"); 690 assert(obj->klass()->klass_part()->has_finalizer(), "shouldn't be here otherwise"); 691 instanceKlass::register_finalizer(instanceOop(obj), CHECK); 692JRT_END 693 694 695jlong SharedRuntime::get_java_tid(Thread* thread) { 696 if (thread != NULL) { 697 if (thread->is_Java_thread()) { 698 oop obj = ((JavaThread*)thread)->threadObj(); 699 return (obj == NULL) ? 0 : java_lang_Thread::thread_id(obj); 700 } 701 } 702 return 0; 703} 704 705/** 706 * This function ought to be a void function, but cannot be because 707 * it gets turned into a tail-call on sparc, which runs into dtrace bug 708 * 6254741. Once that is fixed we can remove the dummy return value. 709 */ 710int SharedRuntime::dtrace_object_alloc(oopDesc* o) { 711 return dtrace_object_alloc_base(Thread::current(), o); 712} 713 714int SharedRuntime::dtrace_object_alloc_base(Thread* thread, oopDesc* o) { 715 assert(DTraceAllocProbes, "wrong call"); 716 Klass* klass = o->blueprint(); 717 int size = o->size(); 718 symbolOop name = klass->name(); 719 HS_DTRACE_PROBE4(hotspot, object__alloc, get_java_tid(thread), 720 name->bytes(), name->utf8_length(), size * HeapWordSize); 721 return 0; 722} 723 724JRT_LEAF(int, SharedRuntime::dtrace_method_entry( 725 JavaThread* thread, methodOopDesc* method)) 726 assert(DTraceMethodProbes, "wrong call"); 727 symbolOop kname = method->klass_name(); 728 symbolOop name = method->name(); 729 symbolOop sig = method->signature(); 730 HS_DTRACE_PROBE7(hotspot, method__entry, get_java_tid(thread), 731 kname->bytes(), kname->utf8_length(), 732 name->bytes(), name->utf8_length(), 733 sig->bytes(), sig->utf8_length()); 734 return 0; 735JRT_END 736 737JRT_LEAF(int, SharedRuntime::dtrace_method_exit( 738 JavaThread* thread, methodOopDesc* method)) 739 assert(DTraceMethodProbes, "wrong call"); 740 symbolOop kname = method->klass_name(); 741 symbolOop name = method->name(); 742 symbolOop sig = method->signature(); 743 HS_DTRACE_PROBE7(hotspot, method__return, get_java_tid(thread), 744 kname->bytes(), kname->utf8_length(), 745 name->bytes(), name->utf8_length(), 746 sig->bytes(), sig->utf8_length()); 747 return 0; 748JRT_END 749 750 751// Finds receiver, CallInfo (i.e. receiver method), and calling bytecode) 752// for a call current in progress, i.e., arguments has been pushed on stack 753// put callee has not been invoked yet. Used by: resolve virtual/static, 754// vtable updates, etc. Caller frame must be compiled. 755Handle SharedRuntime::find_callee_info(JavaThread* thread, Bytecodes::Code& bc, CallInfo& callinfo, TRAPS) { 756 ResourceMark rm(THREAD); 757 758 // last java frame on stack (which includes native call frames) 759 vframeStream vfst(thread, true); // Do not skip and javaCalls 760 761 return find_callee_info_helper(thread, vfst, bc, callinfo, CHECK_(Handle())); 762} 763 764 765// Finds receiver, CallInfo (i.e. receiver method), and calling bytecode 766// for a call current in progress, i.e., arguments has been pushed on stack 767// but callee has not been invoked yet. Caller frame must be compiled. 768Handle SharedRuntime::find_callee_info_helper(JavaThread* thread, 769 vframeStream& vfst, 770 Bytecodes::Code& bc, 771 CallInfo& callinfo, TRAPS) { 772 Handle receiver; 773 Handle nullHandle; //create a handy null handle for exception returns 774 775 assert(!vfst.at_end(), "Java frame must exist"); 776 777 // Find caller and bci from vframe 778 methodHandle caller (THREAD, vfst.method()); 779 int bci = vfst.bci(); 780 781 // Find bytecode 782 Bytecode_invoke* bytecode = Bytecode_invoke_at(caller, bci); 783 bc = bytecode->adjusted_invoke_code(); 784 int bytecode_index = bytecode->index(); 785 786 // Find receiver for non-static call 787 if (bc != Bytecodes::_invokestatic) { 788 // This register map must be update since we need to find the receiver for 789 // compiled frames. The receiver might be in a register. 790 RegisterMap reg_map2(thread); 791 frame stubFrame = thread->last_frame(); 792 // Caller-frame is a compiled frame 793 frame callerFrame = stubFrame.sender(®_map2); 794 795 methodHandle callee = bytecode->static_target(CHECK_(nullHandle)); 796 if (callee.is_null()) { 797 THROW_(vmSymbols::java_lang_NoSuchMethodException(), nullHandle); 798 } 799 // Retrieve from a compiled argument list 800 receiver = Handle(THREAD, callerFrame.retrieve_receiver(®_map2)); 801 802 if (receiver.is_null()) { 803 THROW_(vmSymbols::java_lang_NullPointerException(), nullHandle); 804 } 805 } 806 807 // Resolve method. This is parameterized by bytecode. 808 constantPoolHandle constants (THREAD, caller->constants()); 809 assert (receiver.is_null() || receiver->is_oop(), "wrong receiver"); 810 LinkResolver::resolve_invoke(callinfo, receiver, constants, bytecode_index, bc, CHECK_(nullHandle)); 811 812#ifdef ASSERT 813 // Check that the receiver klass is of the right subtype and that it is initialized for virtual calls 814 if (bc != Bytecodes::_invokestatic) { 815 assert(receiver.not_null(), "should have thrown exception"); 816 KlassHandle receiver_klass (THREAD, receiver->klass()); 817 klassOop rk = constants->klass_ref_at(bytecode_index, CHECK_(nullHandle)); 818 // klass is already loaded 819 KlassHandle static_receiver_klass (THREAD, rk); 820 assert(receiver_klass->is_subtype_of(static_receiver_klass()), "actual receiver must be subclass of static receiver klass"); 821 if (receiver_klass->oop_is_instance()) { 822 if (instanceKlass::cast(receiver_klass())->is_not_initialized()) { 823 tty->print_cr("ERROR: Klass not yet initialized!!"); 824 receiver_klass.print(); 825 } 826 assert (!instanceKlass::cast(receiver_klass())->is_not_initialized(), "receiver_klass must be initialized"); 827 } 828 } 829#endif 830 831 return receiver; 832} 833 834methodHandle SharedRuntime::find_callee_method(JavaThread* thread, TRAPS) { 835 ResourceMark rm(THREAD); 836 // We need first to check if any Java activations (compiled, interpreted) 837 // exist on the stack since last JavaCall. If not, we need 838 // to get the target method from the JavaCall wrapper. 839 vframeStream vfst(thread, true); // Do not skip any javaCalls 840 methodHandle callee_method; 841 if (vfst.at_end()) { 842 // No Java frames were found on stack since we did the JavaCall. 843 // Hence the stack can only contain an entry_frame. We need to 844 // find the target method from the stub frame. 845 RegisterMap reg_map(thread, false); 846 frame fr = thread->last_frame(); 847 assert(fr.is_runtime_frame(), "must be a runtimeStub"); 848 fr = fr.sender(®_map); 849 assert(fr.is_entry_frame(), "must be"); 850 // fr is now pointing to the entry frame. 851 callee_method = methodHandle(THREAD, fr.entry_frame_call_wrapper()->callee_method()); 852 assert(fr.entry_frame_call_wrapper()->receiver() == NULL || !callee_method->is_static(), "non-null receiver for static call??"); 853 } else { 854 Bytecodes::Code bc; 855 CallInfo callinfo; 856 find_callee_info_helper(thread, vfst, bc, callinfo, CHECK_(methodHandle())); 857 callee_method = callinfo.selected_method(); 858 } 859 assert(callee_method()->is_method(), "must be"); 860 return callee_method; 861} 862 863// Resolves a call. 864methodHandle SharedRuntime::resolve_helper(JavaThread *thread, 865 bool is_virtual, 866 bool is_optimized, TRAPS) { 867 methodHandle callee_method; 868 callee_method = resolve_sub_helper(thread, is_virtual, is_optimized, THREAD); 869 if (JvmtiExport::can_hotswap_or_post_breakpoint()) { 870 int retry_count = 0; 871 while (!HAS_PENDING_EXCEPTION && callee_method->is_old() && 872 callee_method->method_holder() != SystemDictionary::object_klass()) { 873 // If has a pending exception then there is no need to re-try to 874 // resolve this method. 875 // If the method has been redefined, we need to try again. 876 // Hack: we have no way to update the vtables of arrays, so don't 877 // require that java.lang.Object has been updated. 878 879 // It is very unlikely that method is redefined more than 100 times 880 // in the middle of resolve. If it is looping here more than 100 times 881 // means then there could be a bug here. 882 guarantee((retry_count++ < 100), 883 "Could not resolve to latest version of redefined method"); 884 // method is redefined in the middle of resolve so re-try. 885 callee_method = resolve_sub_helper(thread, is_virtual, is_optimized, THREAD); 886 } 887 } 888 return callee_method; 889} 890 891// Resolves a call. The compilers generate code for calls that go here 892// and are patched with the real destination of the call. 893methodHandle SharedRuntime::resolve_sub_helper(JavaThread *thread, 894 bool is_virtual, 895 bool is_optimized, TRAPS) { 896 897 ResourceMark rm(thread); 898 RegisterMap cbl_map(thread, false); 899 frame caller_frame = thread->last_frame().sender(&cbl_map); 900 901 CodeBlob* cb = caller_frame.cb(); 902 guarantee(cb != NULL && cb->is_nmethod(), "must be called from nmethod"); 903 // make sure caller is not getting deoptimized 904 // and removed before we are done with it. 905 // CLEANUP - with lazy deopt shouldn't need this lock 906 nmethodLocker caller_lock((nmethod*)cb); 907 908 909 // determine call info & receiver 910 // note: a) receiver is NULL for static calls 911 // b) an exception is thrown if receiver is NULL for non-static calls 912 CallInfo call_info; 913 Bytecodes::Code invoke_code = Bytecodes::_illegal; 914 Handle receiver = find_callee_info(thread, invoke_code, 915 call_info, CHECK_(methodHandle())); 916 methodHandle callee_method = call_info.selected_method(); 917 918 assert((!is_virtual && invoke_code == Bytecodes::_invokestatic) || 919 ( is_virtual && invoke_code != Bytecodes::_invokestatic), "inconsistent bytecode"); 920 921#ifndef PRODUCT 922 // tracing/debugging/statistics 923 int *addr = (is_optimized) ? (&_resolve_opt_virtual_ctr) : 924 (is_virtual) ? (&_resolve_virtual_ctr) : 925 (&_resolve_static_ctr); 926 Atomic::inc(addr); 927 928 if (TraceCallFixup) { 929 ResourceMark rm(thread); 930 tty->print("resolving %s%s (%s) call to", 931 (is_optimized) ? "optimized " : "", (is_virtual) ? "virtual" : "static", 932 Bytecodes::name(invoke_code)); 933 callee_method->print_short_name(tty); 934 tty->print_cr(" code: " INTPTR_FORMAT, callee_method->code()); 935 } 936#endif 937 938 // Compute entry points. This might require generation of C2I converter 939 // frames, so we cannot be holding any locks here. Furthermore, the 940 // computation of the entry points is independent of patching the call. We 941 // always return the entry-point, but we only patch the stub if the call has 942 // not been deoptimized. Return values: For a virtual call this is an 943 // (cached_oop, destination address) pair. For a static call/optimized 944 // virtual this is just a destination address. 945 946 StaticCallInfo static_call_info; 947 CompiledICInfo virtual_call_info; 948 949 950 // Make sure the callee nmethod does not get deoptimized and removed before 951 // we are done patching the code. 952 nmethod* nm = callee_method->code(); 953 nmethodLocker nl_callee(nm); 954#ifdef ASSERT 955 address dest_entry_point = nm == NULL ? 0 : nm->entry_point(); // used below 956#endif 957 958 if (is_virtual) { 959 assert(receiver.not_null(), "sanity check"); 960 bool static_bound = call_info.resolved_method()->can_be_statically_bound(); 961 KlassHandle h_klass(THREAD, receiver->klass()); 962 CompiledIC::compute_monomorphic_entry(callee_method, h_klass, 963 is_optimized, static_bound, virtual_call_info, 964 CHECK_(methodHandle())); 965 } else { 966 // static call 967 CompiledStaticCall::compute_entry(callee_method, static_call_info); 968 } 969 970 // grab lock, check for deoptimization and potentially patch caller 971 { 972 MutexLocker ml_patch(CompiledIC_lock); 973 974 // Now that we are ready to patch if the methodOop was redefined then 975 // don't update call site and let the caller retry. 976 977 if (!callee_method->is_old()) { 978#ifdef ASSERT 979 // We must not try to patch to jump to an already unloaded method. 980 if (dest_entry_point != 0) { 981 assert(CodeCache::find_blob(dest_entry_point) != NULL, 982 "should not unload nmethod while locked"); 983 } 984#endif 985 if (is_virtual) { 986 CompiledIC* inline_cache = CompiledIC_before(caller_frame.pc()); 987 if (inline_cache->is_clean()) { 988 inline_cache->set_to_monomorphic(virtual_call_info); 989 } 990 } else { 991 CompiledStaticCall* ssc = compiledStaticCall_before(caller_frame.pc()); 992 if (ssc->is_clean()) ssc->set(static_call_info); 993 } 994 } 995 996 } // unlock CompiledIC_lock 997 998 return callee_method; 999} 1000 1001 1002// Inline caches exist only in compiled code 1003JRT_BLOCK_ENTRY(address, SharedRuntime::handle_wrong_method_ic_miss(JavaThread* thread)) 1004#ifdef ASSERT 1005 RegisterMap reg_map(thread, false); 1006 frame stub_frame = thread->last_frame(); 1007 assert(stub_frame.is_runtime_frame(), "sanity check"); 1008 frame caller_frame = stub_frame.sender(®_map); 1009 assert(!caller_frame.is_interpreted_frame() && !caller_frame.is_entry_frame(), "unexpected frame"); 1010#endif /* ASSERT */ 1011 1012 methodHandle callee_method; 1013 JRT_BLOCK 1014 callee_method = SharedRuntime::handle_ic_miss_helper(thread, CHECK_NULL); 1015 // Return methodOop through TLS 1016 thread->set_vm_result(callee_method()); 1017 JRT_BLOCK_END 1018 // return compiled code entry point after potential safepoints 1019 assert(callee_method->verified_code_entry() != NULL, " Jump to zero!"); 1020 return callee_method->verified_code_entry(); 1021JRT_END 1022 1023 1024// Handle call site that has been made non-entrant 1025JRT_BLOCK_ENTRY(address, SharedRuntime::handle_wrong_method(JavaThread* thread)) 1026 // 6243940 We might end up in here if the callee is deoptimized 1027 // as we race to call it. We don't want to take a safepoint if 1028 // the caller was interpreted because the caller frame will look 1029 // interpreted to the stack walkers and arguments are now 1030 // "compiled" so it is much better to make this transition 1031 // invisible to the stack walking code. The i2c path will 1032 // place the callee method in the callee_target. It is stashed 1033 // there because if we try and find the callee by normal means a 1034 // safepoint is possible and have trouble gc'ing the compiled args. 1035 RegisterMap reg_map(thread, false); 1036 frame stub_frame = thread->last_frame(); 1037 assert(stub_frame.is_runtime_frame(), "sanity check"); 1038 frame caller_frame = stub_frame.sender(®_map); 1039 if (caller_frame.is_interpreted_frame() || caller_frame.is_entry_frame() ) { 1040 methodOop callee = thread->callee_target(); 1041 guarantee(callee != NULL && callee->is_method(), "bad handshake"); 1042 thread->set_vm_result(callee); 1043 thread->set_callee_target(NULL); 1044 return callee->get_c2i_entry(); 1045 } 1046 1047 // Must be compiled to compiled path which is safe to stackwalk 1048 methodHandle callee_method; 1049 JRT_BLOCK 1050 // Force resolving of caller (if we called from compiled frame) 1051 callee_method = SharedRuntime::reresolve_call_site(thread, CHECK_NULL); 1052 thread->set_vm_result(callee_method()); 1053 JRT_BLOCK_END 1054 // return compiled code entry point after potential safepoints 1055 assert(callee_method->verified_code_entry() != NULL, " Jump to zero!"); 1056 return callee_method->verified_code_entry(); 1057JRT_END 1058 1059 1060// resolve a static call and patch code 1061JRT_BLOCK_ENTRY(address, SharedRuntime::resolve_static_call_C(JavaThread *thread )) 1062 methodHandle callee_method; 1063 JRT_BLOCK 1064 callee_method = SharedRuntime::resolve_helper(thread, false, false, CHECK_NULL); 1065 thread->set_vm_result(callee_method()); 1066 JRT_BLOCK_END 1067 // return compiled code entry point after potential safepoints 1068 assert(callee_method->verified_code_entry() != NULL, " Jump to zero!"); 1069 return callee_method->verified_code_entry(); 1070JRT_END 1071 1072 1073// resolve virtual call and update inline cache to monomorphic 1074JRT_BLOCK_ENTRY(address, SharedRuntime::resolve_virtual_call_C(JavaThread *thread )) 1075 methodHandle callee_method; 1076 JRT_BLOCK 1077 callee_method = SharedRuntime::resolve_helper(thread, true, false, CHECK_NULL); 1078 thread->set_vm_result(callee_method()); 1079 JRT_BLOCK_END 1080 // return compiled code entry point after potential safepoints 1081 assert(callee_method->verified_code_entry() != NULL, " Jump to zero!"); 1082 return callee_method->verified_code_entry(); 1083JRT_END 1084 1085 1086// Resolve a virtual call that can be statically bound (e.g., always 1087// monomorphic, so it has no inline cache). Patch code to resolved target. 1088JRT_BLOCK_ENTRY(address, SharedRuntime::resolve_opt_virtual_call_C(JavaThread *thread)) 1089 methodHandle callee_method; 1090 JRT_BLOCK 1091 callee_method = SharedRuntime::resolve_helper(thread, true, true, CHECK_NULL); 1092 thread->set_vm_result(callee_method()); 1093 JRT_BLOCK_END 1094 // return compiled code entry point after potential safepoints 1095 assert(callee_method->verified_code_entry() != NULL, " Jump to zero!"); 1096 return callee_method->verified_code_entry(); 1097JRT_END 1098 1099 1100 1101 1102 1103methodHandle SharedRuntime::handle_ic_miss_helper(JavaThread *thread, TRAPS) { 1104 ResourceMark rm(thread); 1105 CallInfo call_info; 1106 Bytecodes::Code bc; 1107 1108 // receiver is NULL for static calls. An exception is thrown for NULL 1109 // receivers for non-static calls 1110 Handle receiver = find_callee_info(thread, bc, call_info, 1111 CHECK_(methodHandle())); 1112 // Compiler1 can produce virtual call sites that can actually be statically bound 1113 // If we fell thru to below we would think that the site was going megamorphic 1114 // when in fact the site can never miss. Worse because we'd think it was megamorphic 1115 // we'd try and do a vtable dispatch however methods that can be statically bound 1116 // don't have vtable entries (vtable_index < 0) and we'd blow up. So we force a 1117 // reresolution of the call site (as if we did a handle_wrong_method and not an 1118 // plain ic_miss) and the site will be converted to an optimized virtual call site 1119 // never to miss again. I don't believe C2 will produce code like this but if it 1120 // did this would still be the correct thing to do for it too, hence no ifdef. 1121 // 1122 if (call_info.resolved_method()->can_be_statically_bound()) { 1123 methodHandle callee_method = SharedRuntime::reresolve_call_site(thread, CHECK_(methodHandle())); 1124 if (TraceCallFixup) { 1125 RegisterMap reg_map(thread, false); 1126 frame caller_frame = thread->last_frame().sender(®_map); 1127 ResourceMark rm(thread); 1128 tty->print("converting IC miss to reresolve (%s) call to", Bytecodes::name(bc)); 1129 callee_method->print_short_name(tty); 1130 tty->print_cr(" from pc: " INTPTR_FORMAT, caller_frame.pc()); 1131 tty->print_cr(" code: " INTPTR_FORMAT, callee_method->code()); 1132 } 1133 return callee_method; 1134 } 1135 1136 methodHandle callee_method = call_info.selected_method(); 1137 1138 bool should_be_mono = false; 1139 1140#ifndef PRODUCT 1141 Atomic::inc(&_ic_miss_ctr); 1142 1143 // Statistics & Tracing 1144 if (TraceCallFixup) { 1145 ResourceMark rm(thread); 1146 tty->print("IC miss (%s) call to", Bytecodes::name(bc)); 1147 callee_method->print_short_name(tty); 1148 tty->print_cr(" code: " INTPTR_FORMAT, callee_method->code()); 1149 } 1150 1151 if (ICMissHistogram) { 1152 MutexLocker m(VMStatistic_lock); 1153 RegisterMap reg_map(thread, false); 1154 frame f = thread->last_frame().real_sender(®_map);// skip runtime stub 1155 // produce statistics under the lock 1156 trace_ic_miss(f.pc()); 1157 } 1158#endif 1159 1160 // install an event collector so that when a vtable stub is created the 1161 // profiler can be notified via a DYNAMIC_CODE_GENERATED event. The 1162 // event can't be posted when the stub is created as locks are held 1163 // - instead the event will be deferred until the event collector goes 1164 // out of scope. 1165 JvmtiDynamicCodeEventCollector event_collector; 1166 1167 // Update inline cache to megamorphic. Skip update if caller has been 1168 // made non-entrant or we are called from interpreted. 1169 { MutexLocker ml_patch (CompiledIC_lock); 1170 RegisterMap reg_map(thread, false); 1171 frame caller_frame = thread->last_frame().sender(®_map); 1172 CodeBlob* cb = caller_frame.cb(); 1173 if (cb->is_nmethod() && ((nmethod*)cb)->is_in_use()) { 1174 // Not a non-entrant nmethod, so find inline_cache 1175 CompiledIC* inline_cache = CompiledIC_before(caller_frame.pc()); 1176 bool should_be_mono = false; 1177 if (inline_cache->is_optimized()) { 1178 if (TraceCallFixup) { 1179 ResourceMark rm(thread); 1180 tty->print("OPTIMIZED IC miss (%s) call to", Bytecodes::name(bc)); 1181 callee_method->print_short_name(tty); 1182 tty->print_cr(" code: " INTPTR_FORMAT, callee_method->code()); 1183 } 1184 should_be_mono = true; 1185 } else { 1186 compiledICHolderOop ic_oop = (compiledICHolderOop) inline_cache->cached_oop(); 1187 if ( ic_oop != NULL && ic_oop->is_compiledICHolder()) { 1188 1189 if (receiver()->klass() == ic_oop->holder_klass()) { 1190 // This isn't a real miss. We must have seen that compiled code 1191 // is now available and we want the call site converted to a 1192 // monomorphic compiled call site. 1193 // We can't assert for callee_method->code() != NULL because it 1194 // could have been deoptimized in the meantime 1195 if (TraceCallFixup) { 1196 ResourceMark rm(thread); 1197 tty->print("FALSE IC miss (%s) converting to compiled call to", Bytecodes::name(bc)); 1198 callee_method->print_short_name(tty); 1199 tty->print_cr(" code: " INTPTR_FORMAT, callee_method->code()); 1200 } 1201 should_be_mono = true; 1202 } 1203 } 1204 } 1205 1206 if (should_be_mono) { 1207 1208 // We have a path that was monomorphic but was going interpreted 1209 // and now we have (or had) a compiled entry. We correct the IC 1210 // by using a new icBuffer. 1211 CompiledICInfo info; 1212 KlassHandle receiver_klass(THREAD, receiver()->klass()); 1213 inline_cache->compute_monomorphic_entry(callee_method, 1214 receiver_klass, 1215 inline_cache->is_optimized(), 1216 false, 1217 info, CHECK_(methodHandle())); 1218 inline_cache->set_to_monomorphic(info); 1219 } else if (!inline_cache->is_megamorphic() && !inline_cache->is_clean()) { 1220 // Change to megamorphic 1221 inline_cache->set_to_megamorphic(&call_info, bc, CHECK_(methodHandle())); 1222 } else { 1223 // Either clean or megamorphic 1224 } 1225 } 1226 } // Release CompiledIC_lock 1227 1228 return callee_method; 1229} 1230 1231// 1232// Resets a call-site in compiled code so it will get resolved again. 1233// This routines handles both virtual call sites, optimized virtual call 1234// sites, and static call sites. Typically used to change a call sites 1235// destination from compiled to interpreted. 1236// 1237methodHandle SharedRuntime::reresolve_call_site(JavaThread *thread, TRAPS) { 1238 ResourceMark rm(thread); 1239 RegisterMap reg_map(thread, false); 1240 frame stub_frame = thread->last_frame(); 1241 assert(stub_frame.is_runtime_frame(), "must be a runtimeStub"); 1242 frame caller = stub_frame.sender(®_map); 1243 1244 // Do nothing if the frame isn't a live compiled frame. 1245 // nmethod could be deoptimized by the time we get here 1246 // so no update to the caller is needed. 1247 1248 if (caller.is_compiled_frame() && !caller.is_deoptimized_frame()) { 1249 1250 address pc = caller.pc(); 1251 Events::log("update call-site at pc " INTPTR_FORMAT, pc); 1252 1253 // Default call_addr is the location of the "basic" call. 1254 // Determine the address of the call we a reresolving. With 1255 // Inline Caches we will always find a recognizable call. 1256 // With Inline Caches disabled we may or may not find a 1257 // recognizable call. We will always find a call for static 1258 // calls and for optimized virtual calls. For vanilla virtual 1259 // calls it depends on the state of the UseInlineCaches switch. 1260 // 1261 // With Inline Caches disabled we can get here for a virtual call 1262 // for two reasons: 1263 // 1 - calling an abstract method. The vtable for abstract methods 1264 // will run us thru handle_wrong_method and we will eventually 1265 // end up in the interpreter to throw the ame. 1266 // 2 - a racing deoptimization. We could be doing a vanilla vtable 1267 // call and between the time we fetch the entry address and 1268 // we jump to it the target gets deoptimized. Similar to 1 1269 // we will wind up in the interprter (thru a c2i with c2). 1270 // 1271 address call_addr = NULL; 1272 { 1273 // Get call instruction under lock because another thread may be 1274 // busy patching it. 1275 MutexLockerEx ml_patch(Patching_lock, Mutex::_no_safepoint_check_flag); 1276 // Location of call instruction 1277 if (NativeCall::is_call_before(pc)) { 1278 NativeCall *ncall = nativeCall_before(pc); 1279 call_addr = ncall->instruction_address(); 1280 } 1281 } 1282 1283 // Check for static or virtual call 1284 bool is_static_call = false; 1285 nmethod* caller_nm = CodeCache::find_nmethod(pc); 1286 // Make sure nmethod doesn't get deoptimized and removed until 1287 // this is done with it. 1288 // CLEANUP - with lazy deopt shouldn't need this lock 1289 nmethodLocker nmlock(caller_nm); 1290 1291 if (call_addr != NULL) { 1292 RelocIterator iter(caller_nm, call_addr, call_addr+1); 1293 int ret = iter.next(); // Get item 1294 if (ret) { 1295 assert(iter.addr() == call_addr, "must find call"); 1296 if (iter.type() == relocInfo::static_call_type) { 1297 is_static_call = true; 1298 } else { 1299 assert(iter.type() == relocInfo::virtual_call_type || 1300 iter.type() == relocInfo::opt_virtual_call_type 1301 , "unexpected relocInfo. type"); 1302 } 1303 } else { 1304 assert(!UseInlineCaches, "relocation info. must exist for this address"); 1305 } 1306 1307 // Cleaning the inline cache will force a new resolve. This is more robust 1308 // than directly setting it to the new destination, since resolving of calls 1309 // is always done through the same code path. (experience shows that it 1310 // leads to very hard to track down bugs, if an inline cache gets updated 1311 // to a wrong method). It should not be performance critical, since the 1312 // resolve is only done once. 1313 1314 MutexLocker ml(CompiledIC_lock); 1315 // 1316 // We do not patch the call site if the nmethod has been made non-entrant 1317 // as it is a waste of time 1318 // 1319 if (caller_nm->is_in_use()) { 1320 if (is_static_call) { 1321 CompiledStaticCall* ssc= compiledStaticCall_at(call_addr); 1322 ssc->set_to_clean(); 1323 } else { 1324 // compiled, dispatched call (which used to call an interpreted method) 1325 CompiledIC* inline_cache = CompiledIC_at(call_addr); 1326 inline_cache->set_to_clean(); 1327 } 1328 } 1329 } 1330 1331 } 1332 1333 methodHandle callee_method = find_callee_method(thread, CHECK_(methodHandle())); 1334 1335 1336#ifndef PRODUCT 1337 Atomic::inc(&_wrong_method_ctr); 1338 1339 if (TraceCallFixup) { 1340 ResourceMark rm(thread); 1341 tty->print("handle_wrong_method reresolving call to"); 1342 callee_method->print_short_name(tty); 1343 tty->print_cr(" code: " INTPTR_FORMAT, callee_method->code()); 1344 } 1345#endif 1346 1347 return callee_method; 1348} 1349 1350// --------------------------------------------------------------------------- 1351// We are calling the interpreter via a c2i. Normally this would mean that 1352// we were called by a compiled method. However we could have lost a race 1353// where we went int -> i2c -> c2i and so the caller could in fact be 1354// interpreted. If the caller is compiled we attampt to patch the caller 1355// so he no longer calls into the interpreter. 1356IRT_LEAF(void, SharedRuntime::fixup_callers_callsite(methodOopDesc* method, address caller_pc)) 1357 methodOop moop(method); 1358 1359 address entry_point = moop->from_compiled_entry(); 1360 1361 // It's possible that deoptimization can occur at a call site which hasn't 1362 // been resolved yet, in which case this function will be called from 1363 // an nmethod that has been patched for deopt and we can ignore the 1364 // request for a fixup. 1365 // Also it is possible that we lost a race in that from_compiled_entry 1366 // is now back to the i2c in that case we don't need to patch and if 1367 // we did we'd leap into space because the callsite needs to use 1368 // "to interpreter" stub in order to load up the methodOop. Don't 1369 // ask me how I know this... 1370 // 1371 1372 CodeBlob* cb = CodeCache::find_blob(caller_pc); 1373 if ( !cb->is_nmethod() || entry_point == moop->get_c2i_entry()) { 1374 return; 1375 } 1376 1377 // There is a benign race here. We could be attempting to patch to a compiled 1378 // entry point at the same time the callee is being deoptimized. If that is 1379 // the case then entry_point may in fact point to a c2i and we'd patch the 1380 // call site with the same old data. clear_code will set code() to NULL 1381 // at the end of it. If we happen to see that NULL then we can skip trying 1382 // to patch. If we hit the window where the callee has a c2i in the 1383 // from_compiled_entry and the NULL isn't present yet then we lose the race 1384 // and patch the code with the same old data. Asi es la vida. 1385 1386 if (moop->code() == NULL) return; 1387 1388 if (((nmethod*)cb)->is_in_use()) { 1389 1390 // Expect to find a native call there (unless it was no-inline cache vtable dispatch) 1391 MutexLockerEx ml_patch(Patching_lock, Mutex::_no_safepoint_check_flag); 1392 if (NativeCall::is_call_before(caller_pc + frame::pc_return_offset)) { 1393 NativeCall *call = nativeCall_before(caller_pc + frame::pc_return_offset); 1394 // 1395 // bug 6281185. We might get here after resolving a call site to a vanilla 1396 // virtual call. Because the resolvee uses the verified entry it may then 1397 // see compiled code and attempt to patch the site by calling us. This would 1398 // then incorrectly convert the call site to optimized and its downhill from 1399 // there. If you're lucky you'll get the assert in the bugid, if not you've 1400 // just made a call site that could be megamorphic into a monomorphic site 1401 // for the rest of its life! Just another racing bug in the life of 1402 // fixup_callers_callsite ... 1403 // 1404 RelocIterator iter(cb, call->instruction_address(), call->next_instruction_address()); 1405 iter.next(); 1406 assert(iter.has_current(), "must have a reloc at java call site"); 1407 relocInfo::relocType typ = iter.reloc()->type(); 1408 if ( typ != relocInfo::static_call_type && 1409 typ != relocInfo::opt_virtual_call_type && 1410 typ != relocInfo::static_stub_type) { 1411 return; 1412 } 1413 address destination = call->destination(); 1414 if (destination != entry_point) { 1415 CodeBlob* callee = CodeCache::find_blob(destination); 1416 // callee == cb seems weird. It means calling interpreter thru stub. 1417 if (callee == cb || callee->is_adapter_blob()) { 1418 // static call or optimized virtual 1419 if (TraceCallFixup) { 1420 tty->print("fixup callsite at " INTPTR_FORMAT " to compiled code for", caller_pc); 1421 moop->print_short_name(tty); 1422 tty->print_cr(" to " INTPTR_FORMAT, entry_point); 1423 } 1424 call->set_destination_mt_safe(entry_point); 1425 } else { 1426 if (TraceCallFixup) { 1427 tty->print("failed to fixup callsite at " INTPTR_FORMAT " to compiled code for", caller_pc); 1428 moop->print_short_name(tty); 1429 tty->print_cr(" to " INTPTR_FORMAT, entry_point); 1430 } 1431 // assert is too strong could also be resolve destinations. 1432 // assert(InlineCacheBuffer::contains(destination) || VtableStubs::contains(destination), "must be"); 1433 } 1434 } else { 1435 if (TraceCallFixup) { 1436 tty->print("already patched callsite at " INTPTR_FORMAT " to compiled code for", caller_pc); 1437 moop->print_short_name(tty); 1438 tty->print_cr(" to " INTPTR_FORMAT, entry_point); 1439 } 1440 } 1441 } 1442 } 1443 1444IRT_END 1445 1446 1447// same as JVM_Arraycopy, but called directly from compiled code 1448JRT_ENTRY(void, SharedRuntime::slow_arraycopy_C(oopDesc* src, jint src_pos, 1449 oopDesc* dest, jint dest_pos, 1450 jint length, 1451 JavaThread* thread)) { 1452#ifndef PRODUCT 1453 _slow_array_copy_ctr++; 1454#endif 1455 // Check if we have null pointers 1456 if (src == NULL || dest == NULL) { 1457 THROW(vmSymbols::java_lang_NullPointerException()); 1458 } 1459 // Do the copy. The casts to arrayOop are necessary to the copy_array API, 1460 // even though the copy_array API also performs dynamic checks to ensure 1461 // that src and dest are truly arrays (and are conformable). 1462 // The copy_array mechanism is awkward and could be removed, but 1463 // the compilers don't call this function except as a last resort, 1464 // so it probably doesn't matter. 1465 Klass::cast(src->klass())->copy_array((arrayOopDesc*)src, src_pos, 1466 (arrayOopDesc*)dest, dest_pos, 1467 length, thread); 1468} 1469JRT_END 1470 1471char* SharedRuntime::generate_class_cast_message( 1472 JavaThread* thread, const char* objName) { 1473 1474 // Get target class name from the checkcast instruction 1475 vframeStream vfst(thread, true); 1476 assert(!vfst.at_end(), "Java frame must exist"); 1477 Bytecode_checkcast* cc = Bytecode_checkcast_at( 1478 vfst.method()->bcp_from(vfst.bci())); 1479 Klass* targetKlass = Klass::cast(vfst.method()->constants()->klass_at( 1480 cc->index(), thread)); 1481 return generate_class_cast_message(objName, targetKlass->external_name()); 1482} 1483 1484char* SharedRuntime::generate_class_cast_message( 1485 const char* objName, const char* targetKlassName) { 1486 const char* desc = " cannot be cast to "; 1487 size_t msglen = strlen(objName) + strlen(desc) + strlen(targetKlassName) + 1; 1488 1489 char* message = NEW_RESOURCE_ARRAY(char, msglen); 1490 if (NULL == message) { 1491 // Shouldn't happen, but don't cause even more problems if it does 1492 message = const_cast<char*>(objName); 1493 } else { 1494 jio_snprintf(message, msglen, "%s%s%s", objName, desc, targetKlassName); 1495 } 1496 return message; 1497} 1498 1499JRT_LEAF(void, SharedRuntime::reguard_yellow_pages()) 1500 (void) JavaThread::current()->reguard_stack(); 1501JRT_END 1502 1503 1504// Handles the uncommon case in locking, i.e., contention or an inflated lock. 1505#ifndef PRODUCT 1506int SharedRuntime::_monitor_enter_ctr=0; 1507#endif 1508JRT_ENTRY_NO_ASYNC(void, SharedRuntime::complete_monitor_locking_C(oopDesc* _obj, BasicLock* lock, JavaThread* thread)) 1509 oop obj(_obj); 1510#ifndef PRODUCT 1511 _monitor_enter_ctr++; // monitor enter slow 1512#endif 1513 if (PrintBiasedLockingStatistics) { 1514 Atomic::inc(BiasedLocking::slow_path_entry_count_addr()); 1515 } 1516 Handle h_obj(THREAD, obj); 1517 if (UseBiasedLocking) { 1518 // Retry fast entry if bias is revoked to avoid unnecessary inflation 1519 ObjectSynchronizer::fast_enter(h_obj, lock, true, CHECK); 1520 } else { 1521 ObjectSynchronizer::slow_enter(h_obj, lock, CHECK); 1522 } 1523 assert(!HAS_PENDING_EXCEPTION, "Should have no exception here"); 1524JRT_END 1525 1526#ifndef PRODUCT 1527int SharedRuntime::_monitor_exit_ctr=0; 1528#endif 1529// Handles the uncommon cases of monitor unlocking in compiled code 1530JRT_LEAF(void, SharedRuntime::complete_monitor_unlocking_C(oopDesc* _obj, BasicLock* lock)) 1531 oop obj(_obj); 1532#ifndef PRODUCT 1533 _monitor_exit_ctr++; // monitor exit slow 1534#endif 1535 Thread* THREAD = JavaThread::current(); 1536 // I'm not convinced we need the code contained by MIGHT_HAVE_PENDING anymore 1537 // testing was unable to ever fire the assert that guarded it so I have removed it. 1538 assert(!HAS_PENDING_EXCEPTION, "Do we need code below anymore?"); 1539#undef MIGHT_HAVE_PENDING 1540#ifdef MIGHT_HAVE_PENDING 1541 // Save and restore any pending_exception around the exception mark. 1542 // While the slow_exit must not throw an exception, we could come into 1543 // this routine with one set. 1544 oop pending_excep = NULL; 1545 const char* pending_file; 1546 int pending_line; 1547 if (HAS_PENDING_EXCEPTION) { 1548 pending_excep = PENDING_EXCEPTION; 1549 pending_file = THREAD->exception_file(); 1550 pending_line = THREAD->exception_line(); 1551 CLEAR_PENDING_EXCEPTION; 1552 } 1553#endif /* MIGHT_HAVE_PENDING */ 1554 1555 { 1556 // Exit must be non-blocking, and therefore no exceptions can be thrown. 1557 EXCEPTION_MARK; 1558 ObjectSynchronizer::slow_exit(obj, lock, THREAD); 1559 } 1560 1561#ifdef MIGHT_HAVE_PENDING 1562 if (pending_excep != NULL) { 1563 THREAD->set_pending_exception(pending_excep, pending_file, pending_line); 1564 } 1565#endif /* MIGHT_HAVE_PENDING */ 1566JRT_END 1567 1568#ifndef PRODUCT 1569 1570void SharedRuntime::print_statistics() { 1571 ttyLocker ttyl; 1572 if (xtty != NULL) xtty->head("statistics type='SharedRuntime'"); 1573 1574 if (_monitor_enter_ctr ) tty->print_cr("%5d monitor enter slow", _monitor_enter_ctr); 1575 if (_monitor_exit_ctr ) tty->print_cr("%5d monitor exit slow", _monitor_exit_ctr); 1576 if (_throw_null_ctr) tty->print_cr("%5d implicit null throw", _throw_null_ctr); 1577 1578 SharedRuntime::print_ic_miss_histogram(); 1579 1580 if (CountRemovableExceptions) { 1581 if (_nof_removable_exceptions > 0) { 1582 Unimplemented(); // this counter is not yet incremented 1583 tty->print_cr("Removable exceptions: %d", _nof_removable_exceptions); 1584 } 1585 } 1586 1587 // Dump the JRT_ENTRY counters 1588 if( _new_instance_ctr ) tty->print_cr("%5d new instance requires GC", _new_instance_ctr); 1589 if( _new_array_ctr ) tty->print_cr("%5d new array requires GC", _new_array_ctr); 1590 if( _multi1_ctr ) tty->print_cr("%5d multianewarray 1 dim", _multi1_ctr); 1591 if( _multi2_ctr ) tty->print_cr("%5d multianewarray 2 dim", _multi2_ctr); 1592 if( _multi3_ctr ) tty->print_cr("%5d multianewarray 3 dim", _multi3_ctr); 1593 if( _multi4_ctr ) tty->print_cr("%5d multianewarray 4 dim", _multi4_ctr); 1594 if( _multi5_ctr ) tty->print_cr("%5d multianewarray 5 dim", _multi5_ctr); 1595 1596 tty->print_cr("%5d inline cache miss in compiled", _ic_miss_ctr ); 1597 tty->print_cr("%5d wrong method", _wrong_method_ctr ); 1598 tty->print_cr("%5d unresolved static call site", _resolve_static_ctr ); 1599 tty->print_cr("%5d unresolved virtual call site", _resolve_virtual_ctr ); 1600 tty->print_cr("%5d unresolved opt virtual call site", _resolve_opt_virtual_ctr ); 1601 1602 if( _mon_enter_stub_ctr ) tty->print_cr("%5d monitor enter stub", _mon_enter_stub_ctr ); 1603 if( _mon_exit_stub_ctr ) tty->print_cr("%5d monitor exit stub", _mon_exit_stub_ctr ); 1604 if( _mon_enter_ctr ) tty->print_cr("%5d monitor enter slow", _mon_enter_ctr ); 1605 if( _mon_exit_ctr ) tty->print_cr("%5d monitor exit slow", _mon_exit_ctr ); 1606 if( _partial_subtype_ctr) tty->print_cr("%5d slow partial subtype", _partial_subtype_ctr ); 1607 if( _jbyte_array_copy_ctr ) tty->print_cr("%5d byte array copies", _jbyte_array_copy_ctr ); 1608 if( _jshort_array_copy_ctr ) tty->print_cr("%5d short array copies", _jshort_array_copy_ctr ); 1609 if( _jint_array_copy_ctr ) tty->print_cr("%5d int array copies", _jint_array_copy_ctr ); 1610 if( _jlong_array_copy_ctr ) tty->print_cr("%5d long array copies", _jlong_array_copy_ctr ); 1611 if( _oop_array_copy_ctr ) tty->print_cr("%5d oop array copies", _oop_array_copy_ctr ); 1612 if( _checkcast_array_copy_ctr ) tty->print_cr("%5d checkcast array copies", _checkcast_array_copy_ctr ); 1613 if( _unsafe_array_copy_ctr ) tty->print_cr("%5d unsafe array copies", _unsafe_array_copy_ctr ); 1614 if( _generic_array_copy_ctr ) tty->print_cr("%5d generic array copies", _generic_array_copy_ctr ); 1615 if( _slow_array_copy_ctr ) tty->print_cr("%5d slow array copies", _slow_array_copy_ctr ); 1616 if( _find_handler_ctr ) tty->print_cr("%5d find exception handler", _find_handler_ctr ); 1617 if( _rethrow_ctr ) tty->print_cr("%5d rethrow handler", _rethrow_ctr ); 1618 1619 if (xtty != NULL) xtty->tail("statistics"); 1620} 1621 1622inline double percent(int x, int y) { 1623 return 100.0 * x / MAX2(y, 1); 1624} 1625 1626class MethodArityHistogram { 1627 public: 1628 enum { MAX_ARITY = 256 }; 1629 private: 1630 static int _arity_histogram[MAX_ARITY]; // histogram of #args 1631 static int _size_histogram[MAX_ARITY]; // histogram of arg size in words 1632 static int _max_arity; // max. arity seen 1633 static int _max_size; // max. arg size seen 1634 1635 static void add_method_to_histogram(nmethod* nm) { 1636 methodOop m = nm->method(); 1637 ArgumentCount args(m->signature()); 1638 int arity = args.size() + (m->is_static() ? 0 : 1); 1639 int argsize = m->size_of_parameters(); 1640 arity = MIN2(arity, MAX_ARITY-1); 1641 argsize = MIN2(argsize, MAX_ARITY-1); 1642 int count = nm->method()->compiled_invocation_count(); 1643 _arity_histogram[arity] += count; 1644 _size_histogram[argsize] += count; 1645 _max_arity = MAX2(_max_arity, arity); 1646 _max_size = MAX2(_max_size, argsize); 1647 } 1648 1649 void print_histogram_helper(int n, int* histo, const char* name) { 1650 const int N = MIN2(5, n); 1651 tty->print_cr("\nHistogram of call arity (incl. rcvr, calls to compiled methods only):"); 1652 double sum = 0; 1653 double weighted_sum = 0; 1654 int i; 1655 for (i = 0; i <= n; i++) { sum += histo[i]; weighted_sum += i*histo[i]; } 1656 double rest = sum; 1657 double percent = sum / 100; 1658 for (i = 0; i <= N; i++) { 1659 rest -= histo[i]; 1660 tty->print_cr("%4d: %7d (%5.1f%%)", i, histo[i], histo[i] / percent); 1661 } 1662 tty->print_cr("rest: %7d (%5.1f%%))", (int)rest, rest / percent); 1663 tty->print_cr("(avg. %s = %3.1f, max = %d)", name, weighted_sum / sum, n); 1664 } 1665 1666 void print_histogram() { 1667 tty->print_cr("\nHistogram of call arity (incl. rcvr, calls to compiled methods only):"); 1668 print_histogram_helper(_max_arity, _arity_histogram, "arity"); 1669 tty->print_cr("\nSame for parameter size (in words):"); 1670 print_histogram_helper(_max_size, _size_histogram, "size"); 1671 tty->cr(); 1672 } 1673 1674 public: 1675 MethodArityHistogram() { 1676 MutexLockerEx mu(CodeCache_lock, Mutex::_no_safepoint_check_flag); 1677 _max_arity = _max_size = 0; 1678 for (int i = 0; i < MAX_ARITY; i++) _arity_histogram[i] = _size_histogram [i] = 0; 1679 CodeCache::nmethods_do(add_method_to_histogram); 1680 print_histogram(); 1681 } 1682}; 1683 1684int MethodArityHistogram::_arity_histogram[MethodArityHistogram::MAX_ARITY]; 1685int MethodArityHistogram::_size_histogram[MethodArityHistogram::MAX_ARITY]; 1686int MethodArityHistogram::_max_arity; 1687int MethodArityHistogram::_max_size; 1688 1689void SharedRuntime::print_call_statistics(int comp_total) { 1690 tty->print_cr("Calls from compiled code:"); 1691 int total = _nof_normal_calls + _nof_interface_calls + _nof_static_calls; 1692 int mono_c = _nof_normal_calls - _nof_optimized_calls - _nof_megamorphic_calls; 1693 int mono_i = _nof_interface_calls - _nof_optimized_interface_calls - _nof_megamorphic_interface_calls; 1694 tty->print_cr("\t%9d (%4.1f%%) total non-inlined ", total, percent(total, total)); 1695 tty->print_cr("\t%9d (%4.1f%%) virtual calls ", _nof_normal_calls, percent(_nof_normal_calls, total)); 1696 tty->print_cr("\t %9d (%3.0f%%) inlined ", _nof_inlined_calls, percent(_nof_inlined_calls, _nof_normal_calls)); 1697 tty->print_cr("\t %9d (%3.0f%%) optimized ", _nof_optimized_calls, percent(_nof_optimized_calls, _nof_normal_calls)); 1698 tty->print_cr("\t %9d (%3.0f%%) monomorphic ", mono_c, percent(mono_c, _nof_normal_calls)); 1699 tty->print_cr("\t %9d (%3.0f%%) megamorphic ", _nof_megamorphic_calls, percent(_nof_megamorphic_calls, _nof_normal_calls)); 1700 tty->print_cr("\t%9d (%4.1f%%) interface calls ", _nof_interface_calls, percent(_nof_interface_calls, total)); 1701 tty->print_cr("\t %9d (%3.0f%%) inlined ", _nof_inlined_interface_calls, percent(_nof_inlined_interface_calls, _nof_interface_calls)); 1702 tty->print_cr("\t %9d (%3.0f%%) optimized ", _nof_optimized_interface_calls, percent(_nof_optimized_interface_calls, _nof_interface_calls)); 1703 tty->print_cr("\t %9d (%3.0f%%) monomorphic ", mono_i, percent(mono_i, _nof_interface_calls)); 1704 tty->print_cr("\t %9d (%3.0f%%) megamorphic ", _nof_megamorphic_interface_calls, percent(_nof_megamorphic_interface_calls, _nof_interface_calls)); 1705 tty->print_cr("\t%9d (%4.1f%%) static/special calls", _nof_static_calls, percent(_nof_static_calls, total)); 1706 tty->print_cr("\t %9d (%3.0f%%) inlined ", _nof_inlined_static_calls, percent(_nof_inlined_static_calls, _nof_static_calls)); 1707 tty->cr(); 1708 tty->print_cr("Note 1: counter updates are not MT-safe."); 1709 tty->print_cr("Note 2: %% in major categories are relative to total non-inlined calls;"); 1710 tty->print_cr(" %% in nested categories are relative to their category"); 1711 tty->print_cr(" (and thus add up to more than 100%% with inlining)"); 1712 tty->cr(); 1713 1714 MethodArityHistogram h; 1715} 1716#endif 1717 1718 1719// --------------------------------------------------------------------------- 1720// Implementation of AdapterHandlerLibrary 1721const char* AdapterHandlerEntry::name = "I2C/C2I adapters"; 1722GrowableArray<uint64_t>* AdapterHandlerLibrary::_fingerprints = NULL; 1723GrowableArray<AdapterHandlerEntry* >* AdapterHandlerLibrary::_handlers = NULL; 1724const int AdapterHandlerLibrary_size = 16*K; 1725u_char AdapterHandlerLibrary::_buffer[AdapterHandlerLibrary_size + 32]; 1726 1727void AdapterHandlerLibrary::initialize() { 1728 if (_fingerprints != NULL) return; 1729 _fingerprints = new(ResourceObj::C_HEAP)GrowableArray<uint64_t>(32, true); 1730 _handlers = new(ResourceObj::C_HEAP)GrowableArray<AdapterHandlerEntry*>(32, true); 1731 // Index 0 reserved for the slow path handler 1732 _fingerprints->append(0/*the never-allowed 0 fingerprint*/); 1733 _handlers->append(NULL); 1734 1735 // Create a special handler for abstract methods. Abstract methods 1736 // are never compiled so an i2c entry is somewhat meaningless, but 1737 // fill it in with something appropriate just in case. Pass handle 1738 // wrong method for the c2i transitions. 1739 address wrong_method = SharedRuntime::get_handle_wrong_method_stub(); 1740 _fingerprints->append(0/*the never-allowed 0 fingerprint*/); 1741 assert(_handlers->length() == AbstractMethodHandler, "in wrong slot"); 1742 _handlers->append(new AdapterHandlerEntry(StubRoutines::throw_AbstractMethodError_entry(), 1743 wrong_method, wrong_method)); 1744} 1745 1746int AdapterHandlerLibrary::get_create_adapter_index(methodHandle method) { 1747 // Use customized signature handler. Need to lock around updates to the 1748 // _fingerprints array (it is not safe for concurrent readers and a single 1749 // writer: this can be fixed if it becomes a problem). 1750 1751 // Get the address of the ic_miss handlers before we grab the 1752 // AdapterHandlerLibrary_lock. This fixes bug 6236259 which 1753 // was caused by the initialization of the stubs happening 1754 // while we held the lock and then notifying jvmti while 1755 // holding it. This just forces the initialization to be a little 1756 // earlier. 1757 address ic_miss = SharedRuntime::get_ic_miss_stub(); 1758 assert(ic_miss != NULL, "must have handler"); 1759 1760 int result; 1761 BufferBlob *B = NULL; 1762 uint64_t fingerprint; 1763 { 1764 MutexLocker mu(AdapterHandlerLibrary_lock); 1765 // make sure data structure is initialized 1766 initialize(); 1767 1768 if (method->is_abstract()) { 1769 return AbstractMethodHandler; 1770 } 1771 1772 // Lookup method signature's fingerprint 1773 fingerprint = Fingerprinter(method).fingerprint(); 1774 assert( fingerprint != CONST64( 0), "no zero fingerprints allowed" ); 1775 // Fingerprints are small fixed-size condensed representations of 1776 // signatures. If the signature is too large, it won't fit in a 1777 // fingerprint. Signatures which cannot support a fingerprint get a new i2c 1778 // adapter gen'd each time, instead of searching the cache for one. This -1 1779 // game can be avoided if I compared signatures instead of using 1780 // fingerprints. However, -1 fingerprints are very rare. 1781 if( fingerprint != UCONST64(-1) ) { // If this is a cache-able fingerprint 1782 // Turns out i2c adapters do not care what the return value is. Mask it 1783 // out so signatures that only differ in return type will share the same 1784 // adapter. 1785 fingerprint &= ~(SignatureIterator::result_feature_mask << SignatureIterator::static_feature_size); 1786 // Search for a prior existing i2c/c2i adapter 1787 int index = _fingerprints->find(fingerprint); 1788 if( index >= 0 ) return index; // Found existing handlers? 1789 } else { 1790 // Annoyingly, I end up adding -1 fingerprints to the array of handlers, 1791 // because I need a unique handler index. It cannot be scanned for 1792 // because all -1's look alike. Instead, the matching index is passed out 1793 // and immediately used to collect the 2 return values (the c2i and i2c 1794 // adapters). 1795 } 1796 1797 // Create I2C & C2I handlers 1798 ResourceMark rm; 1799 // Improve alignment slightly 1800 u_char *buf = (u_char*)(((intptr_t)_buffer + CodeEntryAlignment-1) & ~(CodeEntryAlignment-1)); 1801 CodeBuffer buffer(buf, AdapterHandlerLibrary_size); 1802 short buffer_locs[20]; 1803 buffer.insts()->initialize_shared_locs((relocInfo*)buffer_locs, 1804 sizeof(buffer_locs)/sizeof(relocInfo)); 1805 MacroAssembler _masm(&buffer); 1806 1807 // Fill in the signature array, for the calling-convention call. 1808 int total_args_passed = method->size_of_parameters(); // All args on stack 1809 1810 BasicType* sig_bt = NEW_RESOURCE_ARRAY(BasicType,total_args_passed); 1811 VMRegPair * regs = NEW_RESOURCE_ARRAY(VMRegPair ,total_args_passed); 1812 int i=0; 1813 if( !method->is_static() ) // Pass in receiver first 1814 sig_bt[i++] = T_OBJECT; 1815 for( SignatureStream ss(method->signature()); !ss.at_return_type(); ss.next()) { 1816 sig_bt[i++] = ss.type(); // Collect remaining bits of signature 1817 if( ss.type() == T_LONG || ss.type() == T_DOUBLE ) 1818 sig_bt[i++] = T_VOID; // Longs & doubles take 2 Java slots 1819 } 1820 assert( i==total_args_passed, "" ); 1821 1822 // Now get the re-packed compiled-Java layout. 1823 int comp_args_on_stack; 1824 1825 // Get a description of the compiled java calling convention and the largest used (VMReg) stack slot usage 1826 comp_args_on_stack = SharedRuntime::java_calling_convention(sig_bt, regs, total_args_passed, false); 1827 1828 AdapterHandlerEntry* entry = SharedRuntime::generate_i2c2i_adapters(&_masm, 1829 total_args_passed, 1830 comp_args_on_stack, 1831 sig_bt, 1832 regs); 1833 1834 B = BufferBlob::create(AdapterHandlerEntry::name, &buffer); 1835 if (B == NULL) { 1836 // CodeCache is full, disable compilation 1837 // Ought to log this but compile log is only per compile thread 1838 // and we're some non descript Java thread. 1839 UseInterpreter = true; 1840 if (UseCompiler || AlwaysCompileLoopMethods ) { 1841#ifndef PRODUCT 1842 warning("CodeCache is full. Compiler has been disabled"); 1843 if (CompileTheWorld || ExitOnFullCodeCache) { 1844 before_exit(JavaThread::current()); 1845 exit_globals(); // will delete tty 1846 vm_direct_exit(CompileTheWorld ? 0 : 1); 1847 } 1848#endif 1849 UseCompiler = false; 1850 AlwaysCompileLoopMethods = false; 1851 } 1852 return 0; // Out of CodeCache space (_handlers[0] == NULL) 1853 } 1854 entry->relocate(B->instructions_begin()); 1855#ifndef PRODUCT 1856 // debugging suppport 1857 if (PrintAdapterHandlers) { 1858 tty->cr(); 1859 tty->print_cr("i2c argument handler #%d for: %s %s (fingerprint = 0x%llx, %d bytes generated)", 1860 _handlers->length(), (method->is_static() ? "static" : "receiver"), 1861 method->signature()->as_C_string(), fingerprint, buffer.code_size() ); 1862 tty->print_cr("c2i argument handler starts at %p",entry->get_c2i_entry()); 1863 Disassembler::decode(entry->get_i2c_entry(), entry->get_i2c_entry() + buffer.code_size()); 1864 } 1865#endif 1866 1867 // add handlers to library 1868 _fingerprints->append(fingerprint); 1869 _handlers->append(entry); 1870 // set handler index 1871 assert(_fingerprints->length() == _handlers->length(), "sanity check"); 1872 result = _fingerprints->length() - 1; 1873 } 1874 // Outside of the lock 1875 if (B != NULL) { 1876 char blob_id[256]; 1877 jio_snprintf(blob_id, 1878 sizeof(blob_id), 1879 "%s(" PTR64_FORMAT ")@" PTR_FORMAT, 1880 AdapterHandlerEntry::name, 1881 fingerprint, 1882 B->instructions_begin()); 1883 VTune::register_stub(blob_id, B->instructions_begin(), B->instructions_end()); 1884 Forte::register_stub(blob_id, B->instructions_begin(), B->instructions_end()); 1885 1886 if (JvmtiExport::should_post_dynamic_code_generated()) { 1887 JvmtiExport::post_dynamic_code_generated(blob_id, 1888 B->instructions_begin(), 1889 B->instructions_end()); 1890 } 1891 } 1892 return result; 1893} 1894 1895void AdapterHandlerEntry::relocate(address new_base) { 1896 ptrdiff_t delta = new_base - _i2c_entry; 1897 _i2c_entry += delta; 1898 _c2i_entry += delta; 1899 _c2i_unverified_entry += delta; 1900} 1901 1902// Create a native wrapper for this native method. The wrapper converts the 1903// java compiled calling convention to the native convention, handlizes 1904// arguments, and transitions to native. On return from the native we transition 1905// back to java blocking if a safepoint is in progress. 1906nmethod *AdapterHandlerLibrary::create_native_wrapper(methodHandle method) { 1907 ResourceMark rm; 1908 nmethod* nm = NULL; 1909 1910 if (PrintCompilation) { 1911 ttyLocker ttyl; 1912 tty->print("--- n%s ", (method->is_synchronized() ? "s" : " ")); 1913 method->print_short_name(tty); 1914 if (method->is_static()) { 1915 tty->print(" (static)"); 1916 } 1917 tty->cr(); 1918 } 1919 1920 assert(method->has_native_function(), "must have something valid to call!"); 1921 1922 { 1923 // perform the work while holding the lock, but perform any printing outside the lock 1924 MutexLocker mu(AdapterHandlerLibrary_lock); 1925 // See if somebody beat us to it 1926 nm = method->code(); 1927 if (nm) { 1928 return nm; 1929 } 1930 1931 // Improve alignment slightly 1932 u_char* buf = (u_char*)(((intptr_t)_buffer + CodeEntryAlignment-1) & ~(CodeEntryAlignment-1)); 1933 CodeBuffer buffer(buf, AdapterHandlerLibrary_size); 1934 // Need a few relocation entries 1935 double locs_buf[20]; 1936 buffer.insts()->initialize_shared_locs((relocInfo*)locs_buf, sizeof(locs_buf) / sizeof(relocInfo)); 1937 MacroAssembler _masm(&buffer); 1938 1939 // Fill in the signature array, for the calling-convention call. 1940 int total_args_passed = method->size_of_parameters(); 1941 1942 BasicType* sig_bt = NEW_RESOURCE_ARRAY(BasicType,total_args_passed); 1943 VMRegPair * regs = NEW_RESOURCE_ARRAY(VMRegPair ,total_args_passed); 1944 int i=0; 1945 if( !method->is_static() ) // Pass in receiver first 1946 sig_bt[i++] = T_OBJECT; 1947 SignatureStream ss(method->signature()); 1948 for( ; !ss.at_return_type(); ss.next()) { 1949 sig_bt[i++] = ss.type(); // Collect remaining bits of signature 1950 if( ss.type() == T_LONG || ss.type() == T_DOUBLE ) 1951 sig_bt[i++] = T_VOID; // Longs & doubles take 2 Java slots 1952 } 1953 assert( i==total_args_passed, "" ); 1954 BasicType ret_type = ss.type(); 1955 1956 // Now get the compiled-Java layout as input arguments 1957 int comp_args_on_stack; 1958 comp_args_on_stack = SharedRuntime::java_calling_convention(sig_bt, regs, total_args_passed, false); 1959 1960 // Generate the compiled-to-native wrapper code 1961 nm = SharedRuntime::generate_native_wrapper(&_masm, 1962 method, 1963 total_args_passed, 1964 comp_args_on_stack, 1965 sig_bt,regs, 1966 ret_type); 1967 } 1968 1969 // Must unlock before calling set_code 1970 // Install the generated code. 1971 if (nm != NULL) { 1972 method->set_code(method, nm); 1973 nm->post_compiled_method_load_event(); 1974 } else { 1975 // CodeCache is full, disable compilation 1976 // Ought to log this but compile log is only per compile thread 1977 // and we're some non descript Java thread. 1978 UseInterpreter = true; 1979 if (UseCompiler || AlwaysCompileLoopMethods ) { 1980#ifndef PRODUCT 1981 warning("CodeCache is full. Compiler has been disabled"); 1982 if (CompileTheWorld || ExitOnFullCodeCache) { 1983 before_exit(JavaThread::current()); 1984 exit_globals(); // will delete tty 1985 vm_direct_exit(CompileTheWorld ? 0 : 1); 1986 } 1987#endif 1988 UseCompiler = false; 1989 AlwaysCompileLoopMethods = false; 1990 } 1991 } 1992 return nm; 1993} 1994 1995#ifdef HAVE_DTRACE_H 1996// Create a dtrace nmethod for this method. The wrapper converts the 1997// java compiled calling convention to the native convention, makes a dummy call 1998// (actually nops for the size of the call instruction, which become a trap if 1999// probe is enabled). The returns to the caller. Since this all looks like a 2000// leaf no thread transition is needed. 2001 2002nmethod *AdapterHandlerLibrary::create_dtrace_nmethod(methodHandle method) { 2003 ResourceMark rm; 2004 nmethod* nm = NULL; 2005 2006 if (PrintCompilation) { 2007 ttyLocker ttyl; 2008 tty->print("--- n%s "); 2009 method->print_short_name(tty); 2010 if (method->is_static()) { 2011 tty->print(" (static)"); 2012 } 2013 tty->cr(); 2014 } 2015 2016 { 2017 // perform the work while holding the lock, but perform any printing 2018 // outside the lock 2019 MutexLocker mu(AdapterHandlerLibrary_lock); 2020 // See if somebody beat us to it 2021 nm = method->code(); 2022 if (nm) { 2023 return nm; 2024 } 2025 2026 // Improve alignment slightly 2027 u_char* buf = (u_char*) 2028 (((intptr_t)_buffer + CodeEntryAlignment-1) & ~(CodeEntryAlignment-1)); 2029 CodeBuffer buffer(buf, AdapterHandlerLibrary_size); 2030 // Need a few relocation entries 2031 double locs_buf[20]; 2032 buffer.insts()->initialize_shared_locs( 2033 (relocInfo*)locs_buf, sizeof(locs_buf) / sizeof(relocInfo)); 2034 MacroAssembler _masm(&buffer); 2035 2036 // Generate the compiled-to-native wrapper code 2037 nm = SharedRuntime::generate_dtrace_nmethod(&_masm, method); 2038 } 2039 return nm; 2040} 2041 2042// the dtrace method needs to convert java lang string to utf8 string. 2043void SharedRuntime::get_utf(oopDesc* src, address dst) { 2044 typeArrayOop jlsValue = java_lang_String::value(src); 2045 int jlsOffset = java_lang_String::offset(src); 2046 int jlsLen = java_lang_String::length(src); 2047 jchar* jlsPos = (jlsLen == 0) ? NULL : 2048 jlsValue->char_at_addr(jlsOffset); 2049 (void) UNICODE::as_utf8(jlsPos, jlsLen, (char *)dst, max_dtrace_string_size); 2050} 2051#endif // ndef HAVE_DTRACE_H 2052 2053// ------------------------------------------------------------------------- 2054// Java-Java calling convention 2055// (what you use when Java calls Java) 2056 2057//------------------------------name_for_receiver---------------------------------- 2058// For a given signature, return the VMReg for parameter 0. 2059VMReg SharedRuntime::name_for_receiver() { 2060 VMRegPair regs; 2061 BasicType sig_bt = T_OBJECT; 2062 (void) java_calling_convention(&sig_bt, ®s, 1, true); 2063 // Return argument 0 register. In the LP64 build pointers 2064 // take 2 registers, but the VM wants only the 'main' name. 2065 return regs.first(); 2066} 2067 2068VMRegPair *SharedRuntime::find_callee_arguments(symbolOop sig, bool is_static, int* arg_size) { 2069 // This method is returning a data structure allocating as a 2070 // ResourceObject, so do not put any ResourceMarks in here. 2071 char *s = sig->as_C_string(); 2072 int len = (int)strlen(s); 2073 *s++; len--; // Skip opening paren 2074 char *t = s+len; 2075 while( *(--t) != ')' ) ; // Find close paren 2076 2077 BasicType *sig_bt = NEW_RESOURCE_ARRAY( BasicType, 256 ); 2078 VMRegPair *regs = NEW_RESOURCE_ARRAY( VMRegPair, 256 ); 2079 int cnt = 0; 2080 if (!is_static) { 2081 sig_bt[cnt++] = T_OBJECT; // Receiver is argument 0; not in signature 2082 } 2083 2084 while( s < t ) { 2085 switch( *s++ ) { // Switch on signature character 2086 case 'B': sig_bt[cnt++] = T_BYTE; break; 2087 case 'C': sig_bt[cnt++] = T_CHAR; break; 2088 case 'D': sig_bt[cnt++] = T_DOUBLE; sig_bt[cnt++] = T_VOID; break; 2089 case 'F': sig_bt[cnt++] = T_FLOAT; break; 2090 case 'I': sig_bt[cnt++] = T_INT; break; 2091 case 'J': sig_bt[cnt++] = T_LONG; sig_bt[cnt++] = T_VOID; break; 2092 case 'S': sig_bt[cnt++] = T_SHORT; break; 2093 case 'Z': sig_bt[cnt++] = T_BOOLEAN; break; 2094 case 'V': sig_bt[cnt++] = T_VOID; break; 2095 case 'L': // Oop 2096 while( *s++ != ';' ) ; // Skip signature 2097 sig_bt[cnt++] = T_OBJECT; 2098 break; 2099 case '[': { // Array 2100 do { // Skip optional size 2101 while( *s >= '0' && *s <= '9' ) s++; 2102 } while( *s++ == '[' ); // Nested arrays? 2103 // Skip element type 2104 if( s[-1] == 'L' ) 2105 while( *s++ != ';' ) ; // Skip signature 2106 sig_bt[cnt++] = T_ARRAY; 2107 break; 2108 } 2109 default : ShouldNotReachHere(); 2110 } 2111 } 2112 assert( cnt < 256, "grow table size" ); 2113 2114 int comp_args_on_stack; 2115 comp_args_on_stack = java_calling_convention(sig_bt, regs, cnt, true); 2116 2117 // the calling convention doesn't count out_preserve_stack_slots so 2118 // we must add that in to get "true" stack offsets. 2119 2120 if (comp_args_on_stack) { 2121 for (int i = 0; i < cnt; i++) { 2122 VMReg reg1 = regs[i].first(); 2123 if( reg1->is_stack()) { 2124 // Yuck 2125 reg1 = reg1->bias(out_preserve_stack_slots()); 2126 } 2127 VMReg reg2 = regs[i].second(); 2128 if( reg2->is_stack()) { 2129 // Yuck 2130 reg2 = reg2->bias(out_preserve_stack_slots()); 2131 } 2132 regs[i].set_pair(reg2, reg1); 2133 } 2134 } 2135 2136 // results 2137 *arg_size = cnt; 2138 return regs; 2139} 2140 2141// OSR Migration Code 2142// 2143// This code is used convert interpreter frames into compiled frames. It is 2144// called from very start of a compiled OSR nmethod. A temp array is 2145// allocated to hold the interesting bits of the interpreter frame. All 2146// active locks are inflated to allow them to move. The displaced headers and 2147// active interpeter locals are copied into the temp buffer. Then we return 2148// back to the compiled code. The compiled code then pops the current 2149// interpreter frame off the stack and pushes a new compiled frame. Then it 2150// copies the interpreter locals and displaced headers where it wants. 2151// Finally it calls back to free the temp buffer. 2152// 2153// All of this is done NOT at any Safepoint, nor is any safepoint or GC allowed. 2154 2155JRT_LEAF(intptr_t*, SharedRuntime::OSR_migration_begin( JavaThread *thread) ) 2156 2157#ifdef IA64 2158 ShouldNotReachHere(); // NYI 2159#endif /* IA64 */ 2160 2161 // 2162 // This code is dependent on the memory layout of the interpreter local 2163 // array and the monitors. On all of our platforms the layout is identical 2164 // so this code is shared. If some platform lays the their arrays out 2165 // differently then this code could move to platform specific code or 2166 // the code here could be modified to copy items one at a time using 2167 // frame accessor methods and be platform independent. 2168 2169 frame fr = thread->last_frame(); 2170 assert( fr.is_interpreted_frame(), "" ); 2171 assert( fr.interpreter_frame_expression_stack_size()==0, "only handle empty stacks" ); 2172 2173 // Figure out how many monitors are active. 2174 int active_monitor_count = 0; 2175 for( BasicObjectLock *kptr = fr.interpreter_frame_monitor_end(); 2176 kptr < fr.interpreter_frame_monitor_begin(); 2177 kptr = fr.next_monitor_in_interpreter_frame(kptr) ) { 2178 if( kptr->obj() != NULL ) active_monitor_count++; 2179 } 2180 2181 // QQQ we could place number of active monitors in the array so that compiled code 2182 // could double check it. 2183 2184 methodOop moop = fr.interpreter_frame_method(); 2185 int max_locals = moop->max_locals(); 2186 // Allocate temp buffer, 1 word per local & 2 per active monitor 2187 int buf_size_words = max_locals + active_monitor_count*2; 2188 intptr_t *buf = NEW_C_HEAP_ARRAY(intptr_t,buf_size_words); 2189 2190 // Copy the locals. Order is preserved so that loading of longs works. 2191 // Since there's no GC I can copy the oops blindly. 2192 assert( sizeof(HeapWord)==sizeof(intptr_t), "fix this code"); 2193 if (TaggedStackInterpreter) { 2194 for (int i = 0; i < max_locals; i++) { 2195 // copy only each local separately to the buffer avoiding the tag 2196 buf[i] = *fr.interpreter_frame_local_at(max_locals-i-1); 2197 } 2198 } else { 2199 Copy::disjoint_words( 2200 (HeapWord*)fr.interpreter_frame_local_at(max_locals-1), 2201 (HeapWord*)&buf[0], 2202 max_locals); 2203 } 2204 2205 // Inflate locks. Copy the displaced headers. Be careful, there can be holes. 2206 int i = max_locals; 2207 for( BasicObjectLock *kptr2 = fr.interpreter_frame_monitor_end(); 2208 kptr2 < fr.interpreter_frame_monitor_begin(); 2209 kptr2 = fr.next_monitor_in_interpreter_frame(kptr2) ) { 2210 if( kptr2->obj() != NULL) { // Avoid 'holes' in the monitor array 2211 BasicLock *lock = kptr2->lock(); 2212 // Inflate so the displaced header becomes position-independent 2213 if (lock->displaced_header()->is_unlocked()) 2214 ObjectSynchronizer::inflate_helper(kptr2->obj()); 2215 // Now the displaced header is free to move 2216 buf[i++] = (intptr_t)lock->displaced_header(); 2217 buf[i++] = (intptr_t)kptr2->obj(); 2218 } 2219 } 2220 assert( i - max_locals == active_monitor_count*2, "found the expected number of monitors" ); 2221 2222 return buf; 2223JRT_END 2224 2225JRT_LEAF(void, SharedRuntime::OSR_migration_end( intptr_t* buf) ) 2226 FREE_C_HEAP_ARRAY(intptr_t,buf); 2227JRT_END 2228 2229#ifndef PRODUCT 2230bool AdapterHandlerLibrary::contains(CodeBlob* b) { 2231 2232 for (int i = 0 ; i < _handlers->length() ; i++) { 2233 AdapterHandlerEntry* a = get_entry(i); 2234 if ( a != NULL && b == CodeCache::find_blob(a->get_i2c_entry()) ) return true; 2235 } 2236 return false; 2237} 2238 2239void AdapterHandlerLibrary::print_handler(CodeBlob* b) { 2240 2241 for (int i = 0 ; i < _handlers->length() ; i++) { 2242 AdapterHandlerEntry* a = get_entry(i); 2243 if ( a != NULL && b == CodeCache::find_blob(a->get_i2c_entry()) ) { 2244 tty->print("Adapter for signature: "); 2245 // Fingerprinter::print(_fingerprints->at(i)); 2246 tty->print("0x%" FORMAT64_MODIFIER "x", _fingerprints->at(i)); 2247 tty->print_cr(" i2c: " INTPTR_FORMAT " c2i: " INTPTR_FORMAT " c2iUV: " INTPTR_FORMAT, 2248 a->get_i2c_entry(), a->get_c2i_entry(), a->get_c2i_unverified_entry()); 2249 2250 return; 2251 } 2252 } 2253 assert(false, "Should have found handler"); 2254} 2255#endif /* PRODUCT */ 2256