sharedRuntime.cpp revision 1426:2338d41fbd81
1/* 2 * Copyright 1997-2010 Sun Microsystems, Inc. All Rights Reserved. 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 4 * 5 * This code is free software; you can redistribute it and/or modify it 6 * under the terms of the GNU General Public License version 2 only, as 7 * published by the Free Software Foundation. 8 * 9 * This code is distributed in the hope that it will be useful, but WITHOUT 10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License 12 * version 2 for more details (a copy is included in the LICENSE file that 13 * accompanied this code). 14 * 15 * You should have received a copy of the GNU General Public License version 16 * 2 along with this work; if not, write to the Free Software Foundation, 17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. 18 * 19 * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara, 20 * CA 95054 USA or visit www.sun.com if you need additional information or 21 * have any questions. 22 * 23 */ 24 25#include "incls/_precompiled.incl" 26#include "incls/_sharedRuntime.cpp.incl" 27#include <math.h> 28 29HS_DTRACE_PROBE_DECL4(hotspot, object__alloc, Thread*, char*, int, size_t); 30HS_DTRACE_PROBE_DECL7(hotspot, method__entry, int, 31 char*, int, char*, int, char*, int); 32HS_DTRACE_PROBE_DECL7(hotspot, method__return, int, 33 char*, int, char*, int, char*, int); 34 35// Implementation of SharedRuntime 36 37#ifndef PRODUCT 38// For statistics 39int SharedRuntime::_ic_miss_ctr = 0; 40int SharedRuntime::_wrong_method_ctr = 0; 41int SharedRuntime::_resolve_static_ctr = 0; 42int SharedRuntime::_resolve_virtual_ctr = 0; 43int SharedRuntime::_resolve_opt_virtual_ctr = 0; 44int SharedRuntime::_implicit_null_throws = 0; 45int SharedRuntime::_implicit_div0_throws = 0; 46int SharedRuntime::_throw_null_ctr = 0; 47 48int SharedRuntime::_nof_normal_calls = 0; 49int SharedRuntime::_nof_optimized_calls = 0; 50int SharedRuntime::_nof_inlined_calls = 0; 51int SharedRuntime::_nof_megamorphic_calls = 0; 52int SharedRuntime::_nof_static_calls = 0; 53int SharedRuntime::_nof_inlined_static_calls = 0; 54int SharedRuntime::_nof_interface_calls = 0; 55int SharedRuntime::_nof_optimized_interface_calls = 0; 56int SharedRuntime::_nof_inlined_interface_calls = 0; 57int SharedRuntime::_nof_megamorphic_interface_calls = 0; 58int SharedRuntime::_nof_removable_exceptions = 0; 59 60int SharedRuntime::_new_instance_ctr=0; 61int SharedRuntime::_new_array_ctr=0; 62int SharedRuntime::_multi1_ctr=0; 63int SharedRuntime::_multi2_ctr=0; 64int SharedRuntime::_multi3_ctr=0; 65int SharedRuntime::_multi4_ctr=0; 66int SharedRuntime::_multi5_ctr=0; 67int SharedRuntime::_mon_enter_stub_ctr=0; 68int SharedRuntime::_mon_exit_stub_ctr=0; 69int SharedRuntime::_mon_enter_ctr=0; 70int SharedRuntime::_mon_exit_ctr=0; 71int SharedRuntime::_partial_subtype_ctr=0; 72int SharedRuntime::_jbyte_array_copy_ctr=0; 73int SharedRuntime::_jshort_array_copy_ctr=0; 74int SharedRuntime::_jint_array_copy_ctr=0; 75int SharedRuntime::_jlong_array_copy_ctr=0; 76int SharedRuntime::_oop_array_copy_ctr=0; 77int SharedRuntime::_checkcast_array_copy_ctr=0; 78int SharedRuntime::_unsafe_array_copy_ctr=0; 79int SharedRuntime::_generic_array_copy_ctr=0; 80int SharedRuntime::_slow_array_copy_ctr=0; 81int SharedRuntime::_find_handler_ctr=0; 82int SharedRuntime::_rethrow_ctr=0; 83 84int SharedRuntime::_ICmiss_index = 0; 85int SharedRuntime::_ICmiss_count[SharedRuntime::maxICmiss_count]; 86address SharedRuntime::_ICmiss_at[SharedRuntime::maxICmiss_count]; 87 88void SharedRuntime::trace_ic_miss(address at) { 89 for (int i = 0; i < _ICmiss_index; i++) { 90 if (_ICmiss_at[i] == at) { 91 _ICmiss_count[i]++; 92 return; 93 } 94 } 95 int index = _ICmiss_index++; 96 if (_ICmiss_index >= maxICmiss_count) _ICmiss_index = maxICmiss_count - 1; 97 _ICmiss_at[index] = at; 98 _ICmiss_count[index] = 1; 99} 100 101void SharedRuntime::print_ic_miss_histogram() { 102 if (ICMissHistogram) { 103 tty->print_cr ("IC Miss Histogram:"); 104 int tot_misses = 0; 105 for (int i = 0; i < _ICmiss_index; i++) { 106 tty->print_cr(" at: " INTPTR_FORMAT " nof: %d", _ICmiss_at[i], _ICmiss_count[i]); 107 tot_misses += _ICmiss_count[i]; 108 } 109 tty->print_cr ("Total IC misses: %7d", tot_misses); 110 } 111} 112#endif // PRODUCT 113 114#ifndef SERIALGC 115 116// G1 write-barrier pre: executed before a pointer store. 117JRT_LEAF(void, SharedRuntime::g1_wb_pre(oopDesc* orig, JavaThread *thread)) 118 if (orig == NULL) { 119 assert(false, "should be optimized out"); 120 return; 121 } 122 assert(orig->is_oop(true /* ignore mark word */), "Error"); 123 // store the original value that was in the field reference 124 thread->satb_mark_queue().enqueue(orig); 125JRT_END 126 127// G1 write-barrier post: executed after a pointer store. 128JRT_LEAF(void, SharedRuntime::g1_wb_post(void* card_addr, JavaThread* thread)) 129 thread->dirty_card_queue().enqueue(card_addr); 130JRT_END 131 132#endif // !SERIALGC 133 134 135JRT_LEAF(jlong, SharedRuntime::lmul(jlong y, jlong x)) 136 return x * y; 137JRT_END 138 139 140JRT_LEAF(jlong, SharedRuntime::ldiv(jlong y, jlong x)) 141 if (x == min_jlong && y == CONST64(-1)) { 142 return x; 143 } else { 144 return x / y; 145 } 146JRT_END 147 148 149JRT_LEAF(jlong, SharedRuntime::lrem(jlong y, jlong x)) 150 if (x == min_jlong && y == CONST64(-1)) { 151 return 0; 152 } else { 153 return x % y; 154 } 155JRT_END 156 157 158const juint float_sign_mask = 0x7FFFFFFF; 159const juint float_infinity = 0x7F800000; 160const julong double_sign_mask = CONST64(0x7FFFFFFFFFFFFFFF); 161const julong double_infinity = CONST64(0x7FF0000000000000); 162 163JRT_LEAF(jfloat, SharedRuntime::frem(jfloat x, jfloat y)) 164#ifdef _WIN64 165 // 64-bit Windows on amd64 returns the wrong values for 166 // infinity operands. 167 union { jfloat f; juint i; } xbits, ybits; 168 xbits.f = x; 169 ybits.f = y; 170 // x Mod Infinity == x unless x is infinity 171 if ( ((xbits.i & float_sign_mask) != float_infinity) && 172 ((ybits.i & float_sign_mask) == float_infinity) ) { 173 return x; 174 } 175#endif 176 return ((jfloat)fmod((double)x,(double)y)); 177JRT_END 178 179 180JRT_LEAF(jdouble, SharedRuntime::drem(jdouble x, jdouble y)) 181#ifdef _WIN64 182 union { jdouble d; julong l; } xbits, ybits; 183 xbits.d = x; 184 ybits.d = y; 185 // x Mod Infinity == x unless x is infinity 186 if ( ((xbits.l & double_sign_mask) != double_infinity) && 187 ((ybits.l & double_sign_mask) == double_infinity) ) { 188 return x; 189 } 190#endif 191 return ((jdouble)fmod((double)x,(double)y)); 192JRT_END 193 194 195JRT_LEAF(jint, SharedRuntime::f2i(jfloat x)) 196 if (g_isnan(x)) 197 return 0; 198 if (x >= (jfloat) max_jint) 199 return max_jint; 200 if (x <= (jfloat) min_jint) 201 return min_jint; 202 return (jint) x; 203JRT_END 204 205 206JRT_LEAF(jlong, SharedRuntime::f2l(jfloat x)) 207 if (g_isnan(x)) 208 return 0; 209 if (x >= (jfloat) max_jlong) 210 return max_jlong; 211 if (x <= (jfloat) min_jlong) 212 return min_jlong; 213 return (jlong) x; 214JRT_END 215 216 217JRT_LEAF(jint, SharedRuntime::d2i(jdouble x)) 218 if (g_isnan(x)) 219 return 0; 220 if (x >= (jdouble) max_jint) 221 return max_jint; 222 if (x <= (jdouble) min_jint) 223 return min_jint; 224 return (jint) x; 225JRT_END 226 227 228JRT_LEAF(jlong, SharedRuntime::d2l(jdouble x)) 229 if (g_isnan(x)) 230 return 0; 231 if (x >= (jdouble) max_jlong) 232 return max_jlong; 233 if (x <= (jdouble) min_jlong) 234 return min_jlong; 235 return (jlong) x; 236JRT_END 237 238 239JRT_LEAF(jfloat, SharedRuntime::d2f(jdouble x)) 240 return (jfloat)x; 241JRT_END 242 243 244JRT_LEAF(jfloat, SharedRuntime::l2f(jlong x)) 245 return (jfloat)x; 246JRT_END 247 248 249JRT_LEAF(jdouble, SharedRuntime::l2d(jlong x)) 250 return (jdouble)x; 251JRT_END 252 253// Exception handling accross interpreter/compiler boundaries 254// 255// exception_handler_for_return_address(...) returns the continuation address. 256// The continuation address is the entry point of the exception handler of the 257// previous frame depending on the return address. 258 259address SharedRuntime::raw_exception_handler_for_return_address(JavaThread* thread, address return_address) { 260 assert(frame::verify_return_pc(return_address), "must be a return pc"); 261 262 // Reset MethodHandle flag. 263 thread->set_is_method_handle_return(false); 264 265 // the fastest case first 266 CodeBlob* blob = CodeCache::find_blob(return_address); 267 if (blob != NULL && blob->is_nmethod()) { 268 nmethod* code = (nmethod*)blob; 269 assert(code != NULL, "nmethod must be present"); 270 // Check if the return address is a MethodHandle call site. 271 thread->set_is_method_handle_return(code->is_method_handle_return(return_address)); 272 // native nmethods don't have exception handlers 273 assert(!code->is_native_method(), "no exception handler"); 274 assert(code->header_begin() != code->exception_begin(), "no exception handler"); 275 if (code->is_deopt_pc(return_address)) { 276 return SharedRuntime::deopt_blob()->unpack_with_exception(); 277 } else { 278 return code->exception_begin(); 279 } 280 } 281 282 // Entry code 283 if (StubRoutines::returns_to_call_stub(return_address)) { 284 return StubRoutines::catch_exception_entry(); 285 } 286 // Interpreted code 287 if (Interpreter::contains(return_address)) { 288 return Interpreter::rethrow_exception_entry(); 289 } 290 291 // Compiled code 292 if (CodeCache::contains(return_address)) { 293 CodeBlob* blob = CodeCache::find_blob(return_address); 294 if (blob->is_nmethod()) { 295 nmethod* code = (nmethod*)blob; 296 assert(code != NULL, "nmethod must be present"); 297 // Check if the return address is a MethodHandle call site. 298 thread->set_is_method_handle_return(code->is_method_handle_return(return_address)); 299 assert(code->header_begin() != code->exception_begin(), "no exception handler"); 300 return code->exception_begin(); 301 } 302 if (blob->is_runtime_stub()) { 303 ShouldNotReachHere(); // callers are responsible for skipping runtime stub frames 304 } 305 } 306 guarantee(!VtableStubs::contains(return_address), "NULL exceptions in vtables should have been handled already!"); 307#ifndef PRODUCT 308 { ResourceMark rm; 309 tty->print_cr("No exception handler found for exception at " INTPTR_FORMAT " - potential problems:", return_address); 310 tty->print_cr("a) exception happened in (new?) code stubs/buffers that is not handled here"); 311 tty->print_cr("b) other problem"); 312 } 313#endif // PRODUCT 314 ShouldNotReachHere(); 315 return NULL; 316} 317 318 319JRT_LEAF(address, SharedRuntime::exception_handler_for_return_address(JavaThread* thread, address return_address)) 320 return raw_exception_handler_for_return_address(thread, return_address); 321JRT_END 322 323 324address SharedRuntime::get_poll_stub(address pc) { 325 address stub; 326 // Look up the code blob 327 CodeBlob *cb = CodeCache::find_blob(pc); 328 329 // Should be an nmethod 330 assert( cb && cb->is_nmethod(), "safepoint polling: pc must refer to an nmethod" ); 331 332 // Look up the relocation information 333 assert( ((nmethod*)cb)->is_at_poll_or_poll_return(pc), 334 "safepoint polling: type must be poll" ); 335 336 assert( ((NativeInstruction*)pc)->is_safepoint_poll(), 337 "Only polling locations are used for safepoint"); 338 339 bool at_poll_return = ((nmethod*)cb)->is_at_poll_return(pc); 340 if (at_poll_return) { 341 assert(SharedRuntime::polling_page_return_handler_blob() != NULL, 342 "polling page return stub not created yet"); 343 stub = SharedRuntime::polling_page_return_handler_blob()->instructions_begin(); 344 } else { 345 assert(SharedRuntime::polling_page_safepoint_handler_blob() != NULL, 346 "polling page safepoint stub not created yet"); 347 stub = SharedRuntime::polling_page_safepoint_handler_blob()->instructions_begin(); 348 } 349#ifndef PRODUCT 350 if( TraceSafepoint ) { 351 char buf[256]; 352 jio_snprintf(buf, sizeof(buf), 353 "... found polling page %s exception at pc = " 354 INTPTR_FORMAT ", stub =" INTPTR_FORMAT, 355 at_poll_return ? "return" : "loop", 356 (intptr_t)pc, (intptr_t)stub); 357 tty->print_raw_cr(buf); 358 } 359#endif // PRODUCT 360 return stub; 361} 362 363 364oop SharedRuntime::retrieve_receiver( symbolHandle sig, frame caller ) { 365 assert(caller.is_interpreted_frame(), ""); 366 int args_size = ArgumentSizeComputer(sig).size() + 1; 367 assert(args_size <= caller.interpreter_frame_expression_stack_size(), "receiver must be on interpreter stack"); 368 oop result = (oop) *caller.interpreter_frame_tos_at(args_size - 1); 369 assert(Universe::heap()->is_in(result) && result->is_oop(), "receiver must be an oop"); 370 return result; 371} 372 373 374void SharedRuntime::throw_and_post_jvmti_exception(JavaThread *thread, Handle h_exception) { 375 if (JvmtiExport::can_post_on_exceptions()) { 376 vframeStream vfst(thread, true); 377 methodHandle method = methodHandle(thread, vfst.method()); 378 address bcp = method()->bcp_from(vfst.bci()); 379 JvmtiExport::post_exception_throw(thread, method(), bcp, h_exception()); 380 } 381 Exceptions::_throw(thread, __FILE__, __LINE__, h_exception); 382} 383 384void SharedRuntime::throw_and_post_jvmti_exception(JavaThread *thread, symbolOop name, const char *message) { 385 Handle h_exception = Exceptions::new_exception(thread, name, message); 386 throw_and_post_jvmti_exception(thread, h_exception); 387} 388 389// The interpreter code to call this tracing function is only 390// called/generated when TraceRedefineClasses has the right bits 391// set. Since obsolete methods are never compiled, we don't have 392// to modify the compilers to generate calls to this function. 393// 394JRT_LEAF(int, SharedRuntime::rc_trace_method_entry( 395 JavaThread* thread, methodOopDesc* method)) 396 assert(RC_TRACE_IN_RANGE(0x00001000, 0x00002000), "wrong call"); 397 398 if (method->is_obsolete()) { 399 // We are calling an obsolete method, but this is not necessarily 400 // an error. Our method could have been redefined just after we 401 // fetched the methodOop from the constant pool. 402 403 // RC_TRACE macro has an embedded ResourceMark 404 RC_TRACE_WITH_THREAD(0x00001000, thread, 405 ("calling obsolete method '%s'", 406 method->name_and_sig_as_C_string())); 407 if (RC_TRACE_ENABLED(0x00002000)) { 408 // this option is provided to debug calls to obsolete methods 409 guarantee(false, "faulting at call to an obsolete method."); 410 } 411 } 412 return 0; 413JRT_END 414 415// ret_pc points into caller; we are returning caller's exception handler 416// for given exception 417address SharedRuntime::compute_compiled_exc_handler(nmethod* nm, address ret_pc, Handle& exception, 418 bool force_unwind, bool top_frame_only) { 419 assert(nm != NULL, "must exist"); 420 ResourceMark rm; 421 422 ScopeDesc* sd = nm->scope_desc_at(ret_pc); 423 // determine handler bci, if any 424 EXCEPTION_MARK; 425 426 int handler_bci = -1; 427 int scope_depth = 0; 428 if (!force_unwind) { 429 int bci = sd->bci(); 430 do { 431 bool skip_scope_increment = false; 432 // exception handler lookup 433 KlassHandle ek (THREAD, exception->klass()); 434 handler_bci = sd->method()->fast_exception_handler_bci_for(ek, bci, THREAD); 435 if (HAS_PENDING_EXCEPTION) { 436 // We threw an exception while trying to find the exception handler. 437 // Transfer the new exception to the exception handle which will 438 // be set into thread local storage, and do another lookup for an 439 // exception handler for this exception, this time starting at the 440 // BCI of the exception handler which caused the exception to be 441 // thrown (bugs 4307310 and 4546590). Set "exception" reference 442 // argument to ensure that the correct exception is thrown (4870175). 443 exception = Handle(THREAD, PENDING_EXCEPTION); 444 CLEAR_PENDING_EXCEPTION; 445 if (handler_bci >= 0) { 446 bci = handler_bci; 447 handler_bci = -1; 448 skip_scope_increment = true; 449 } 450 } 451 if (!top_frame_only && handler_bci < 0 && !skip_scope_increment) { 452 sd = sd->sender(); 453 if (sd != NULL) { 454 bci = sd->bci(); 455 } 456 ++scope_depth; 457 } 458 } while (!top_frame_only && handler_bci < 0 && sd != NULL); 459 } 460 461 // found handling method => lookup exception handler 462 int catch_pco = ret_pc - nm->instructions_begin(); 463 464 ExceptionHandlerTable table(nm); 465 HandlerTableEntry *t = table.entry_for(catch_pco, handler_bci, scope_depth); 466 if (t == NULL && (nm->is_compiled_by_c1() || handler_bci != -1)) { 467 // Allow abbreviated catch tables. The idea is to allow a method 468 // to materialize its exceptions without committing to the exact 469 // routing of exceptions. In particular this is needed for adding 470 // a synthethic handler to unlock monitors when inlining 471 // synchonized methods since the unlock path isn't represented in 472 // the bytecodes. 473 t = table.entry_for(catch_pco, -1, 0); 474 } 475 476#ifdef COMPILER1 477 if (t == NULL && nm->is_compiled_by_c1()) { 478 assert(nm->unwind_handler_begin() != NULL, ""); 479 return nm->unwind_handler_begin(); 480 } 481#endif 482 483 if (t == NULL) { 484 tty->print_cr("MISSING EXCEPTION HANDLER for pc " INTPTR_FORMAT " and handler bci %d", ret_pc, handler_bci); 485 tty->print_cr(" Exception:"); 486 exception->print(); 487 tty->cr(); 488 tty->print_cr(" Compiled exception table :"); 489 table.print(); 490 nm->print_code(); 491 guarantee(false, "missing exception handler"); 492 return NULL; 493 } 494 495 return nm->instructions_begin() + t->pco(); 496} 497 498JRT_ENTRY(void, SharedRuntime::throw_AbstractMethodError(JavaThread* thread)) 499 // These errors occur only at call sites 500 throw_and_post_jvmti_exception(thread, vmSymbols::java_lang_AbstractMethodError()); 501JRT_END 502 503JRT_ENTRY(void, SharedRuntime::throw_IncompatibleClassChangeError(JavaThread* thread)) 504 // These errors occur only at call sites 505 throw_and_post_jvmti_exception(thread, vmSymbols::java_lang_IncompatibleClassChangeError(), "vtable stub"); 506JRT_END 507 508JRT_ENTRY(void, SharedRuntime::throw_ArithmeticException(JavaThread* thread)) 509 throw_and_post_jvmti_exception(thread, vmSymbols::java_lang_ArithmeticException(), "/ by zero"); 510JRT_END 511 512JRT_ENTRY(void, SharedRuntime::throw_NullPointerException(JavaThread* thread)) 513 throw_and_post_jvmti_exception(thread, vmSymbols::java_lang_NullPointerException()); 514JRT_END 515 516JRT_ENTRY(void, SharedRuntime::throw_NullPointerException_at_call(JavaThread* thread)) 517 // This entry point is effectively only used for NullPointerExceptions which occur at inline 518 // cache sites (when the callee activation is not yet set up) so we are at a call site 519 throw_and_post_jvmti_exception(thread, vmSymbols::java_lang_NullPointerException()); 520JRT_END 521 522JRT_ENTRY(void, SharedRuntime::throw_StackOverflowError(JavaThread* thread)) 523 // We avoid using the normal exception construction in this case because 524 // it performs an upcall to Java, and we're already out of stack space. 525 klassOop k = SystemDictionary::StackOverflowError_klass(); 526 oop exception_oop = instanceKlass::cast(k)->allocate_instance(CHECK); 527 Handle exception (thread, exception_oop); 528 if (StackTraceInThrowable) { 529 java_lang_Throwable::fill_in_stack_trace(exception); 530 } 531 throw_and_post_jvmti_exception(thread, exception); 532JRT_END 533 534address SharedRuntime::continuation_for_implicit_exception(JavaThread* thread, 535 address pc, 536 SharedRuntime::ImplicitExceptionKind exception_kind) 537{ 538 address target_pc = NULL; 539 540 if (Interpreter::contains(pc)) { 541#ifdef CC_INTERP 542 // C++ interpreter doesn't throw implicit exceptions 543 ShouldNotReachHere(); 544#else 545 switch (exception_kind) { 546 case IMPLICIT_NULL: return Interpreter::throw_NullPointerException_entry(); 547 case IMPLICIT_DIVIDE_BY_ZERO: return Interpreter::throw_ArithmeticException_entry(); 548 case STACK_OVERFLOW: return Interpreter::throw_StackOverflowError_entry(); 549 default: ShouldNotReachHere(); 550 } 551#endif // !CC_INTERP 552 } else { 553 switch (exception_kind) { 554 case STACK_OVERFLOW: { 555 // Stack overflow only occurs upon frame setup; the callee is 556 // going to be unwound. Dispatch to a shared runtime stub 557 // which will cause the StackOverflowError to be fabricated 558 // and processed. 559 // For stack overflow in deoptimization blob, cleanup thread. 560 if (thread->deopt_mark() != NULL) { 561 Deoptimization::cleanup_deopt_info(thread, NULL); 562 } 563 return StubRoutines::throw_StackOverflowError_entry(); 564 } 565 566 case IMPLICIT_NULL: { 567 if (VtableStubs::contains(pc)) { 568 // We haven't yet entered the callee frame. Fabricate an 569 // exception and begin dispatching it in the caller. Since 570 // the caller was at a call site, it's safe to destroy all 571 // caller-saved registers, as these entry points do. 572 VtableStub* vt_stub = VtableStubs::stub_containing(pc); 573 574 // If vt_stub is NULL, then return NULL to signal handler to report the SEGV error. 575 if (vt_stub == NULL) return NULL; 576 577 if (vt_stub->is_abstract_method_error(pc)) { 578 assert(!vt_stub->is_vtable_stub(), "should never see AbstractMethodErrors from vtable-type VtableStubs"); 579 return StubRoutines::throw_AbstractMethodError_entry(); 580 } else { 581 return StubRoutines::throw_NullPointerException_at_call_entry(); 582 } 583 } else { 584 CodeBlob* cb = CodeCache::find_blob(pc); 585 586 // If code blob is NULL, then return NULL to signal handler to report the SEGV error. 587 if (cb == NULL) return NULL; 588 589 // Exception happened in CodeCache. Must be either: 590 // 1. Inline-cache check in C2I handler blob, 591 // 2. Inline-cache check in nmethod, or 592 // 3. Implict null exception in nmethod 593 594 if (!cb->is_nmethod()) { 595 guarantee(cb->is_adapter_blob() || cb->is_method_handles_adapter_blob(), 596 "exception happened outside interpreter, nmethods and vtable stubs (1)"); 597 // There is no handler here, so we will simply unwind. 598 return StubRoutines::throw_NullPointerException_at_call_entry(); 599 } 600 601 // Otherwise, it's an nmethod. Consult its exception handlers. 602 nmethod* nm = (nmethod*)cb; 603 if (nm->inlinecache_check_contains(pc)) { 604 // exception happened inside inline-cache check code 605 // => the nmethod is not yet active (i.e., the frame 606 // is not set up yet) => use return address pushed by 607 // caller => don't push another return address 608 return StubRoutines::throw_NullPointerException_at_call_entry(); 609 } 610 611#ifndef PRODUCT 612 _implicit_null_throws++; 613#endif 614 target_pc = nm->continuation_for_implicit_exception(pc); 615 // If there's an unexpected fault, target_pc might be NULL, 616 // in which case we want to fall through into the normal 617 // error handling code. 618 } 619 620 break; // fall through 621 } 622 623 624 case IMPLICIT_DIVIDE_BY_ZERO: { 625 nmethod* nm = CodeCache::find_nmethod(pc); 626 guarantee(nm != NULL, "must have containing nmethod for implicit division-by-zero exceptions"); 627#ifndef PRODUCT 628 _implicit_div0_throws++; 629#endif 630 target_pc = nm->continuation_for_implicit_exception(pc); 631 // If there's an unexpected fault, target_pc might be NULL, 632 // in which case we want to fall through into the normal 633 // error handling code. 634 break; // fall through 635 } 636 637 default: ShouldNotReachHere(); 638 } 639 640 assert(exception_kind == IMPLICIT_NULL || exception_kind == IMPLICIT_DIVIDE_BY_ZERO, "wrong implicit exception kind"); 641 642 // for AbortVMOnException flag 643 NOT_PRODUCT(Exceptions::debug_check_abort("java.lang.NullPointerException")); 644 if (exception_kind == IMPLICIT_NULL) { 645 Events::log("Implicit null exception at " INTPTR_FORMAT " to " INTPTR_FORMAT, pc, target_pc); 646 } else { 647 Events::log("Implicit division by zero exception at " INTPTR_FORMAT " to " INTPTR_FORMAT, pc, target_pc); 648 } 649 return target_pc; 650 } 651 652 ShouldNotReachHere(); 653 return NULL; 654} 655 656 657JNI_ENTRY(void, throw_unsatisfied_link_error(JNIEnv* env, ...)) 658{ 659 THROW(vmSymbols::java_lang_UnsatisfiedLinkError()); 660} 661JNI_END 662 663 664address SharedRuntime::native_method_throw_unsatisfied_link_error_entry() { 665 return CAST_FROM_FN_PTR(address, &throw_unsatisfied_link_error); 666} 667 668 669#ifndef PRODUCT 670JRT_ENTRY(intptr_t, SharedRuntime::trace_bytecode(JavaThread* thread, intptr_t preserve_this_value, intptr_t tos, intptr_t tos2)) 671 const frame f = thread->last_frame(); 672 assert(f.is_interpreted_frame(), "must be an interpreted frame"); 673#ifndef PRODUCT 674 methodHandle mh(THREAD, f.interpreter_frame_method()); 675 BytecodeTracer::trace(mh, f.interpreter_frame_bcp(), tos, tos2); 676#endif // !PRODUCT 677 return preserve_this_value; 678JRT_END 679#endif // !PRODUCT 680 681 682JRT_ENTRY(void, SharedRuntime::yield_all(JavaThread* thread, int attempts)) 683 os::yield_all(attempts); 684JRT_END 685 686 687JRT_ENTRY_NO_ASYNC(void, SharedRuntime::register_finalizer(JavaThread* thread, oopDesc* obj)) 688 assert(obj->is_oop(), "must be a valid oop"); 689 assert(obj->klass()->klass_part()->has_finalizer(), "shouldn't be here otherwise"); 690 instanceKlass::register_finalizer(instanceOop(obj), CHECK); 691JRT_END 692 693 694jlong SharedRuntime::get_java_tid(Thread* thread) { 695 if (thread != NULL) { 696 if (thread->is_Java_thread()) { 697 oop obj = ((JavaThread*)thread)->threadObj(); 698 return (obj == NULL) ? 0 : java_lang_Thread::thread_id(obj); 699 } 700 } 701 return 0; 702} 703 704/** 705 * This function ought to be a void function, but cannot be because 706 * it gets turned into a tail-call on sparc, which runs into dtrace bug 707 * 6254741. Once that is fixed we can remove the dummy return value. 708 */ 709int SharedRuntime::dtrace_object_alloc(oopDesc* o) { 710 return dtrace_object_alloc_base(Thread::current(), o); 711} 712 713int SharedRuntime::dtrace_object_alloc_base(Thread* thread, oopDesc* o) { 714 assert(DTraceAllocProbes, "wrong call"); 715 Klass* klass = o->blueprint(); 716 int size = o->size(); 717 symbolOop name = klass->name(); 718 HS_DTRACE_PROBE4(hotspot, object__alloc, get_java_tid(thread), 719 name->bytes(), name->utf8_length(), size * HeapWordSize); 720 return 0; 721} 722 723JRT_LEAF(int, SharedRuntime::dtrace_method_entry( 724 JavaThread* thread, methodOopDesc* method)) 725 assert(DTraceMethodProbes, "wrong call"); 726 symbolOop kname = method->klass_name(); 727 symbolOop name = method->name(); 728 symbolOop sig = method->signature(); 729 HS_DTRACE_PROBE7(hotspot, method__entry, get_java_tid(thread), 730 kname->bytes(), kname->utf8_length(), 731 name->bytes(), name->utf8_length(), 732 sig->bytes(), sig->utf8_length()); 733 return 0; 734JRT_END 735 736JRT_LEAF(int, SharedRuntime::dtrace_method_exit( 737 JavaThread* thread, methodOopDesc* method)) 738 assert(DTraceMethodProbes, "wrong call"); 739 symbolOop kname = method->klass_name(); 740 symbolOop name = method->name(); 741 symbolOop sig = method->signature(); 742 HS_DTRACE_PROBE7(hotspot, method__return, get_java_tid(thread), 743 kname->bytes(), kname->utf8_length(), 744 name->bytes(), name->utf8_length(), 745 sig->bytes(), sig->utf8_length()); 746 return 0; 747JRT_END 748 749 750// Finds receiver, CallInfo (i.e. receiver method), and calling bytecode) 751// for a call current in progress, i.e., arguments has been pushed on stack 752// put callee has not been invoked yet. Used by: resolve virtual/static, 753// vtable updates, etc. Caller frame must be compiled. 754Handle SharedRuntime::find_callee_info(JavaThread* thread, Bytecodes::Code& bc, CallInfo& callinfo, TRAPS) { 755 ResourceMark rm(THREAD); 756 757 // last java frame on stack (which includes native call frames) 758 vframeStream vfst(thread, true); // Do not skip and javaCalls 759 760 return find_callee_info_helper(thread, vfst, bc, callinfo, CHECK_(Handle())); 761} 762 763 764// Finds receiver, CallInfo (i.e. receiver method), and calling bytecode 765// for a call current in progress, i.e., arguments has been pushed on stack 766// but callee has not been invoked yet. Caller frame must be compiled. 767Handle SharedRuntime::find_callee_info_helper(JavaThread* thread, 768 vframeStream& vfst, 769 Bytecodes::Code& bc, 770 CallInfo& callinfo, TRAPS) { 771 Handle receiver; 772 Handle nullHandle; //create a handy null handle for exception returns 773 774 assert(!vfst.at_end(), "Java frame must exist"); 775 776 // Find caller and bci from vframe 777 methodHandle caller (THREAD, vfst.method()); 778 int bci = vfst.bci(); 779 780 // Find bytecode 781 Bytecode_invoke* bytecode = Bytecode_invoke_at(caller, bci); 782 bc = bytecode->adjusted_invoke_code(); 783 int bytecode_index = bytecode->index(); 784 785 // Find receiver for non-static call 786 if (bc != Bytecodes::_invokestatic) { 787 // This register map must be update since we need to find the receiver for 788 // compiled frames. The receiver might be in a register. 789 RegisterMap reg_map2(thread); 790 frame stubFrame = thread->last_frame(); 791 // Caller-frame is a compiled frame 792 frame callerFrame = stubFrame.sender(®_map2); 793 794 methodHandle callee = bytecode->static_target(CHECK_(nullHandle)); 795 if (callee.is_null()) { 796 THROW_(vmSymbols::java_lang_NoSuchMethodException(), nullHandle); 797 } 798 // Retrieve from a compiled argument list 799 receiver = Handle(THREAD, callerFrame.retrieve_receiver(®_map2)); 800 801 if (receiver.is_null()) { 802 THROW_(vmSymbols::java_lang_NullPointerException(), nullHandle); 803 } 804 } 805 806 // Resolve method. This is parameterized by bytecode. 807 constantPoolHandle constants (THREAD, caller->constants()); 808 assert (receiver.is_null() || receiver->is_oop(), "wrong receiver"); 809 LinkResolver::resolve_invoke(callinfo, receiver, constants, bytecode_index, bc, CHECK_(nullHandle)); 810 811#ifdef ASSERT 812 // Check that the receiver klass is of the right subtype and that it is initialized for virtual calls 813 if (bc != Bytecodes::_invokestatic && bc != Bytecodes::_invokedynamic) { 814 assert(receiver.not_null(), "should have thrown exception"); 815 KlassHandle receiver_klass (THREAD, receiver->klass()); 816 klassOop rk = constants->klass_ref_at(bytecode_index, CHECK_(nullHandle)); 817 // klass is already loaded 818 KlassHandle static_receiver_klass (THREAD, rk); 819 assert(receiver_klass->is_subtype_of(static_receiver_klass()), "actual receiver must be subclass of static receiver klass"); 820 if (receiver_klass->oop_is_instance()) { 821 if (instanceKlass::cast(receiver_klass())->is_not_initialized()) { 822 tty->print_cr("ERROR: Klass not yet initialized!!"); 823 receiver_klass.print(); 824 } 825 assert (!instanceKlass::cast(receiver_klass())->is_not_initialized(), "receiver_klass must be initialized"); 826 } 827 } 828#endif 829 830 return receiver; 831} 832 833methodHandle SharedRuntime::find_callee_method(JavaThread* thread, TRAPS) { 834 ResourceMark rm(THREAD); 835 // We need first to check if any Java activations (compiled, interpreted) 836 // exist on the stack since last JavaCall. If not, we need 837 // to get the target method from the JavaCall wrapper. 838 vframeStream vfst(thread, true); // Do not skip any javaCalls 839 methodHandle callee_method; 840 if (vfst.at_end()) { 841 // No Java frames were found on stack since we did the JavaCall. 842 // Hence the stack can only contain an entry_frame. We need to 843 // find the target method from the stub frame. 844 RegisterMap reg_map(thread, false); 845 frame fr = thread->last_frame(); 846 assert(fr.is_runtime_frame(), "must be a runtimeStub"); 847 fr = fr.sender(®_map); 848 assert(fr.is_entry_frame(), "must be"); 849 // fr is now pointing to the entry frame. 850 callee_method = methodHandle(THREAD, fr.entry_frame_call_wrapper()->callee_method()); 851 assert(fr.entry_frame_call_wrapper()->receiver() == NULL || !callee_method->is_static(), "non-null receiver for static call??"); 852 } else { 853 Bytecodes::Code bc; 854 CallInfo callinfo; 855 find_callee_info_helper(thread, vfst, bc, callinfo, CHECK_(methodHandle())); 856 callee_method = callinfo.selected_method(); 857 } 858 assert(callee_method()->is_method(), "must be"); 859 return callee_method; 860} 861 862// Resolves a call. 863methodHandle SharedRuntime::resolve_helper(JavaThread *thread, 864 bool is_virtual, 865 bool is_optimized, TRAPS) { 866 methodHandle callee_method; 867 callee_method = resolve_sub_helper(thread, is_virtual, is_optimized, THREAD); 868 if (JvmtiExport::can_hotswap_or_post_breakpoint()) { 869 int retry_count = 0; 870 while (!HAS_PENDING_EXCEPTION && callee_method->is_old() && 871 callee_method->method_holder() != SystemDictionary::Object_klass()) { 872 // If has a pending exception then there is no need to re-try to 873 // resolve this method. 874 // If the method has been redefined, we need to try again. 875 // Hack: we have no way to update the vtables of arrays, so don't 876 // require that java.lang.Object has been updated. 877 878 // It is very unlikely that method is redefined more than 100 times 879 // in the middle of resolve. If it is looping here more than 100 times 880 // means then there could be a bug here. 881 guarantee((retry_count++ < 100), 882 "Could not resolve to latest version of redefined method"); 883 // method is redefined in the middle of resolve so re-try. 884 callee_method = resolve_sub_helper(thread, is_virtual, is_optimized, THREAD); 885 } 886 } 887 return callee_method; 888} 889 890// Resolves a call. The compilers generate code for calls that go here 891// and are patched with the real destination of the call. 892methodHandle SharedRuntime::resolve_sub_helper(JavaThread *thread, 893 bool is_virtual, 894 bool is_optimized, TRAPS) { 895 896 ResourceMark rm(thread); 897 RegisterMap cbl_map(thread, false); 898 frame caller_frame = thread->last_frame().sender(&cbl_map); 899 900 CodeBlob* caller_cb = caller_frame.cb(); 901 guarantee(caller_cb != NULL && caller_cb->is_nmethod(), "must be called from nmethod"); 902 nmethod* caller_nm = caller_cb->as_nmethod_or_null(); 903 // make sure caller is not getting deoptimized 904 // and removed before we are done with it. 905 // CLEANUP - with lazy deopt shouldn't need this lock 906 nmethodLocker caller_lock(caller_nm); 907 908 909 // determine call info & receiver 910 // note: a) receiver is NULL for static calls 911 // b) an exception is thrown if receiver is NULL for non-static calls 912 CallInfo call_info; 913 Bytecodes::Code invoke_code = Bytecodes::_illegal; 914 Handle receiver = find_callee_info(thread, invoke_code, 915 call_info, CHECK_(methodHandle())); 916 methodHandle callee_method = call_info.selected_method(); 917 918 assert((!is_virtual && invoke_code == Bytecodes::_invokestatic) || 919 ( is_virtual && invoke_code != Bytecodes::_invokestatic), "inconsistent bytecode"); 920 921#ifndef PRODUCT 922 // tracing/debugging/statistics 923 int *addr = (is_optimized) ? (&_resolve_opt_virtual_ctr) : 924 (is_virtual) ? (&_resolve_virtual_ctr) : 925 (&_resolve_static_ctr); 926 Atomic::inc(addr); 927 928 if (TraceCallFixup) { 929 ResourceMark rm(thread); 930 tty->print("resolving %s%s (%s) call to", 931 (is_optimized) ? "optimized " : "", (is_virtual) ? "virtual" : "static", 932 Bytecodes::name(invoke_code)); 933 callee_method->print_short_name(tty); 934 tty->print_cr(" code: " INTPTR_FORMAT, callee_method->code()); 935 } 936#endif 937 938 // JSR 292 939 // If the resolved method is a MethodHandle invoke target the call 940 // site must be a MethodHandle call site. 941 if (callee_method->is_method_handle_invoke()) { 942 assert(caller_nm->is_method_handle_return(caller_frame.pc()), "must be MH call site"); 943 } 944 945 // Compute entry points. This might require generation of C2I converter 946 // frames, so we cannot be holding any locks here. Furthermore, the 947 // computation of the entry points is independent of patching the call. We 948 // always return the entry-point, but we only patch the stub if the call has 949 // not been deoptimized. Return values: For a virtual call this is an 950 // (cached_oop, destination address) pair. For a static call/optimized 951 // virtual this is just a destination address. 952 953 StaticCallInfo static_call_info; 954 CompiledICInfo virtual_call_info; 955 956 // Make sure the callee nmethod does not get deoptimized and removed before 957 // we are done patching the code. 958 nmethod* callee_nm = callee_method->code(); 959 nmethodLocker nl_callee(callee_nm); 960#ifdef ASSERT 961 address dest_entry_point = callee_nm == NULL ? 0 : callee_nm->entry_point(); // used below 962#endif 963 964 if (is_virtual) { 965 assert(receiver.not_null(), "sanity check"); 966 bool static_bound = call_info.resolved_method()->can_be_statically_bound(); 967 KlassHandle h_klass(THREAD, receiver->klass()); 968 CompiledIC::compute_monomorphic_entry(callee_method, h_klass, 969 is_optimized, static_bound, virtual_call_info, 970 CHECK_(methodHandle())); 971 } else { 972 // static call 973 CompiledStaticCall::compute_entry(callee_method, static_call_info); 974 } 975 976 // grab lock, check for deoptimization and potentially patch caller 977 { 978 MutexLocker ml_patch(CompiledIC_lock); 979 980 // Now that we are ready to patch if the methodOop was redefined then 981 // don't update call site and let the caller retry. 982 983 if (!callee_method->is_old()) { 984#ifdef ASSERT 985 // We must not try to patch to jump to an already unloaded method. 986 if (dest_entry_point != 0) { 987 assert(CodeCache::find_blob(dest_entry_point) != NULL, 988 "should not unload nmethod while locked"); 989 } 990#endif 991 if (is_virtual) { 992 CompiledIC* inline_cache = CompiledIC_before(caller_frame.pc()); 993 if (inline_cache->is_clean()) { 994 inline_cache->set_to_monomorphic(virtual_call_info); 995 } 996 } else { 997 CompiledStaticCall* ssc = compiledStaticCall_before(caller_frame.pc()); 998 if (ssc->is_clean()) ssc->set(static_call_info); 999 } 1000 } 1001 1002 } // unlock CompiledIC_lock 1003 1004 return callee_method; 1005} 1006 1007 1008// Inline caches exist only in compiled code 1009JRT_BLOCK_ENTRY(address, SharedRuntime::handle_wrong_method_ic_miss(JavaThread* thread)) 1010#ifdef ASSERT 1011 RegisterMap reg_map(thread, false); 1012 frame stub_frame = thread->last_frame(); 1013 assert(stub_frame.is_runtime_frame(), "sanity check"); 1014 frame caller_frame = stub_frame.sender(®_map); 1015 assert(!caller_frame.is_interpreted_frame() && !caller_frame.is_entry_frame(), "unexpected frame"); 1016#endif /* ASSERT */ 1017 1018 methodHandle callee_method; 1019 JRT_BLOCK 1020 callee_method = SharedRuntime::handle_ic_miss_helper(thread, CHECK_NULL); 1021 // Return methodOop through TLS 1022 thread->set_vm_result(callee_method()); 1023 JRT_BLOCK_END 1024 // return compiled code entry point after potential safepoints 1025 assert(callee_method->verified_code_entry() != NULL, " Jump to zero!"); 1026 return callee_method->verified_code_entry(); 1027JRT_END 1028 1029 1030// Handle call site that has been made non-entrant 1031JRT_BLOCK_ENTRY(address, SharedRuntime::handle_wrong_method(JavaThread* thread)) 1032 // 6243940 We might end up in here if the callee is deoptimized 1033 // as we race to call it. We don't want to take a safepoint if 1034 // the caller was interpreted because the caller frame will look 1035 // interpreted to the stack walkers and arguments are now 1036 // "compiled" so it is much better to make this transition 1037 // invisible to the stack walking code. The i2c path will 1038 // place the callee method in the callee_target. It is stashed 1039 // there because if we try and find the callee by normal means a 1040 // safepoint is possible and have trouble gc'ing the compiled args. 1041 RegisterMap reg_map(thread, false); 1042 frame stub_frame = thread->last_frame(); 1043 assert(stub_frame.is_runtime_frame(), "sanity check"); 1044 frame caller_frame = stub_frame.sender(®_map); 1045 1046 // MethodHandle invokes don't have a CompiledIC and should always 1047 // simply redispatch to the callee_target. 1048 address sender_pc = caller_frame.pc(); 1049 CodeBlob* sender_cb = caller_frame.cb(); 1050 nmethod* sender_nm = sender_cb->as_nmethod_or_null(); 1051 bool is_mh_invoke_via_adapter = false; // Direct c2c call or via adapter? 1052 if (sender_nm != NULL && sender_nm->is_method_handle_return(sender_pc)) { 1053 // If the callee_target is set, then we have come here via an i2c 1054 // adapter. 1055 methodOop callee = thread->callee_target(); 1056 if (callee != NULL) { 1057 assert(callee->is_method(), "sanity"); 1058 is_mh_invoke_via_adapter = true; 1059 } 1060 } 1061 1062 if (caller_frame.is_interpreted_frame() || 1063 caller_frame.is_entry_frame() || 1064 is_mh_invoke_via_adapter) { 1065 methodOop callee = thread->callee_target(); 1066 guarantee(callee != NULL && callee->is_method(), "bad handshake"); 1067 thread->set_vm_result(callee); 1068 thread->set_callee_target(NULL); 1069 return callee->get_c2i_entry(); 1070 } 1071 1072 // Must be compiled to compiled path which is safe to stackwalk 1073 methodHandle callee_method; 1074 JRT_BLOCK 1075 // Force resolving of caller (if we called from compiled frame) 1076 callee_method = SharedRuntime::reresolve_call_site(thread, CHECK_NULL); 1077 thread->set_vm_result(callee_method()); 1078 JRT_BLOCK_END 1079 // return compiled code entry point after potential safepoints 1080 assert(callee_method->verified_code_entry() != NULL, " Jump to zero!"); 1081 return callee_method->verified_code_entry(); 1082JRT_END 1083 1084 1085// resolve a static call and patch code 1086JRT_BLOCK_ENTRY(address, SharedRuntime::resolve_static_call_C(JavaThread *thread )) 1087 methodHandle callee_method; 1088 JRT_BLOCK 1089 callee_method = SharedRuntime::resolve_helper(thread, false, false, CHECK_NULL); 1090 thread->set_vm_result(callee_method()); 1091 JRT_BLOCK_END 1092 // return compiled code entry point after potential safepoints 1093 assert(callee_method->verified_code_entry() != NULL, " Jump to zero!"); 1094 return callee_method->verified_code_entry(); 1095JRT_END 1096 1097 1098// resolve virtual call and update inline cache to monomorphic 1099JRT_BLOCK_ENTRY(address, SharedRuntime::resolve_virtual_call_C(JavaThread *thread )) 1100 methodHandle callee_method; 1101 JRT_BLOCK 1102 callee_method = SharedRuntime::resolve_helper(thread, true, false, CHECK_NULL); 1103 thread->set_vm_result(callee_method()); 1104 JRT_BLOCK_END 1105 // return compiled code entry point after potential safepoints 1106 assert(callee_method->verified_code_entry() != NULL, " Jump to zero!"); 1107 return callee_method->verified_code_entry(); 1108JRT_END 1109 1110 1111// Resolve a virtual call that can be statically bound (e.g., always 1112// monomorphic, so it has no inline cache). Patch code to resolved target. 1113JRT_BLOCK_ENTRY(address, SharedRuntime::resolve_opt_virtual_call_C(JavaThread *thread)) 1114 methodHandle callee_method; 1115 JRT_BLOCK 1116 callee_method = SharedRuntime::resolve_helper(thread, true, true, CHECK_NULL); 1117 thread->set_vm_result(callee_method()); 1118 JRT_BLOCK_END 1119 // return compiled code entry point after potential safepoints 1120 assert(callee_method->verified_code_entry() != NULL, " Jump to zero!"); 1121 return callee_method->verified_code_entry(); 1122JRT_END 1123 1124 1125 1126 1127 1128methodHandle SharedRuntime::handle_ic_miss_helper(JavaThread *thread, TRAPS) { 1129 ResourceMark rm(thread); 1130 CallInfo call_info; 1131 Bytecodes::Code bc; 1132 1133 // receiver is NULL for static calls. An exception is thrown for NULL 1134 // receivers for non-static calls 1135 Handle receiver = find_callee_info(thread, bc, call_info, 1136 CHECK_(methodHandle())); 1137 // Compiler1 can produce virtual call sites that can actually be statically bound 1138 // If we fell thru to below we would think that the site was going megamorphic 1139 // when in fact the site can never miss. Worse because we'd think it was megamorphic 1140 // we'd try and do a vtable dispatch however methods that can be statically bound 1141 // don't have vtable entries (vtable_index < 0) and we'd blow up. So we force a 1142 // reresolution of the call site (as if we did a handle_wrong_method and not an 1143 // plain ic_miss) and the site will be converted to an optimized virtual call site 1144 // never to miss again. I don't believe C2 will produce code like this but if it 1145 // did this would still be the correct thing to do for it too, hence no ifdef. 1146 // 1147 if (call_info.resolved_method()->can_be_statically_bound()) { 1148 methodHandle callee_method = SharedRuntime::reresolve_call_site(thread, CHECK_(methodHandle())); 1149 if (TraceCallFixup) { 1150 RegisterMap reg_map(thread, false); 1151 frame caller_frame = thread->last_frame().sender(®_map); 1152 ResourceMark rm(thread); 1153 tty->print("converting IC miss to reresolve (%s) call to", Bytecodes::name(bc)); 1154 callee_method->print_short_name(tty); 1155 tty->print_cr(" from pc: " INTPTR_FORMAT, caller_frame.pc()); 1156 tty->print_cr(" code: " INTPTR_FORMAT, callee_method->code()); 1157 } 1158 return callee_method; 1159 } 1160 1161 methodHandle callee_method = call_info.selected_method(); 1162 1163 bool should_be_mono = false; 1164 1165#ifndef PRODUCT 1166 Atomic::inc(&_ic_miss_ctr); 1167 1168 // Statistics & Tracing 1169 if (TraceCallFixup) { 1170 ResourceMark rm(thread); 1171 tty->print("IC miss (%s) call to", Bytecodes::name(bc)); 1172 callee_method->print_short_name(tty); 1173 tty->print_cr(" code: " INTPTR_FORMAT, callee_method->code()); 1174 } 1175 1176 if (ICMissHistogram) { 1177 MutexLocker m(VMStatistic_lock); 1178 RegisterMap reg_map(thread, false); 1179 frame f = thread->last_frame().real_sender(®_map);// skip runtime stub 1180 // produce statistics under the lock 1181 trace_ic_miss(f.pc()); 1182 } 1183#endif 1184 1185 // install an event collector so that when a vtable stub is created the 1186 // profiler can be notified via a DYNAMIC_CODE_GENERATED event. The 1187 // event can't be posted when the stub is created as locks are held 1188 // - instead the event will be deferred until the event collector goes 1189 // out of scope. 1190 JvmtiDynamicCodeEventCollector event_collector; 1191 1192 // Update inline cache to megamorphic. Skip update if caller has been 1193 // made non-entrant or we are called from interpreted. 1194 { MutexLocker ml_patch (CompiledIC_lock); 1195 RegisterMap reg_map(thread, false); 1196 frame caller_frame = thread->last_frame().sender(®_map); 1197 CodeBlob* cb = caller_frame.cb(); 1198 if (cb->is_nmethod() && ((nmethod*)cb)->is_in_use()) { 1199 // Not a non-entrant nmethod, so find inline_cache 1200 CompiledIC* inline_cache = CompiledIC_before(caller_frame.pc()); 1201 bool should_be_mono = false; 1202 if (inline_cache->is_optimized()) { 1203 if (TraceCallFixup) { 1204 ResourceMark rm(thread); 1205 tty->print("OPTIMIZED IC miss (%s) call to", Bytecodes::name(bc)); 1206 callee_method->print_short_name(tty); 1207 tty->print_cr(" code: " INTPTR_FORMAT, callee_method->code()); 1208 } 1209 should_be_mono = true; 1210 } else { 1211 compiledICHolderOop ic_oop = (compiledICHolderOop) inline_cache->cached_oop(); 1212 if ( ic_oop != NULL && ic_oop->is_compiledICHolder()) { 1213 1214 if (receiver()->klass() == ic_oop->holder_klass()) { 1215 // This isn't a real miss. We must have seen that compiled code 1216 // is now available and we want the call site converted to a 1217 // monomorphic compiled call site. 1218 // We can't assert for callee_method->code() != NULL because it 1219 // could have been deoptimized in the meantime 1220 if (TraceCallFixup) { 1221 ResourceMark rm(thread); 1222 tty->print("FALSE IC miss (%s) converting to compiled call to", Bytecodes::name(bc)); 1223 callee_method->print_short_name(tty); 1224 tty->print_cr(" code: " INTPTR_FORMAT, callee_method->code()); 1225 } 1226 should_be_mono = true; 1227 } 1228 } 1229 } 1230 1231 if (should_be_mono) { 1232 1233 // We have a path that was monomorphic but was going interpreted 1234 // and now we have (or had) a compiled entry. We correct the IC 1235 // by using a new icBuffer. 1236 CompiledICInfo info; 1237 KlassHandle receiver_klass(THREAD, receiver()->klass()); 1238 inline_cache->compute_monomorphic_entry(callee_method, 1239 receiver_klass, 1240 inline_cache->is_optimized(), 1241 false, 1242 info, CHECK_(methodHandle())); 1243 inline_cache->set_to_monomorphic(info); 1244 } else if (!inline_cache->is_megamorphic() && !inline_cache->is_clean()) { 1245 // Change to megamorphic 1246 inline_cache->set_to_megamorphic(&call_info, bc, CHECK_(methodHandle())); 1247 } else { 1248 // Either clean or megamorphic 1249 } 1250 } 1251 } // Release CompiledIC_lock 1252 1253 return callee_method; 1254} 1255 1256// 1257// Resets a call-site in compiled code so it will get resolved again. 1258// This routines handles both virtual call sites, optimized virtual call 1259// sites, and static call sites. Typically used to change a call sites 1260// destination from compiled to interpreted. 1261// 1262methodHandle SharedRuntime::reresolve_call_site(JavaThread *thread, TRAPS) { 1263 ResourceMark rm(thread); 1264 RegisterMap reg_map(thread, false); 1265 frame stub_frame = thread->last_frame(); 1266 assert(stub_frame.is_runtime_frame(), "must be a runtimeStub"); 1267 frame caller = stub_frame.sender(®_map); 1268 1269 // Do nothing if the frame isn't a live compiled frame. 1270 // nmethod could be deoptimized by the time we get here 1271 // so no update to the caller is needed. 1272 1273 if (caller.is_compiled_frame() && !caller.is_deoptimized_frame()) { 1274 1275 address pc = caller.pc(); 1276 Events::log("update call-site at pc " INTPTR_FORMAT, pc); 1277 1278 // Default call_addr is the location of the "basic" call. 1279 // Determine the address of the call we a reresolving. With 1280 // Inline Caches we will always find a recognizable call. 1281 // With Inline Caches disabled we may or may not find a 1282 // recognizable call. We will always find a call for static 1283 // calls and for optimized virtual calls. For vanilla virtual 1284 // calls it depends on the state of the UseInlineCaches switch. 1285 // 1286 // With Inline Caches disabled we can get here for a virtual call 1287 // for two reasons: 1288 // 1 - calling an abstract method. The vtable for abstract methods 1289 // will run us thru handle_wrong_method and we will eventually 1290 // end up in the interpreter to throw the ame. 1291 // 2 - a racing deoptimization. We could be doing a vanilla vtable 1292 // call and between the time we fetch the entry address and 1293 // we jump to it the target gets deoptimized. Similar to 1 1294 // we will wind up in the interprter (thru a c2i with c2). 1295 // 1296 address call_addr = NULL; 1297 { 1298 // Get call instruction under lock because another thread may be 1299 // busy patching it. 1300 MutexLockerEx ml_patch(Patching_lock, Mutex::_no_safepoint_check_flag); 1301 // Location of call instruction 1302 if (NativeCall::is_call_before(pc)) { 1303 NativeCall *ncall = nativeCall_before(pc); 1304 call_addr = ncall->instruction_address(); 1305 } 1306 } 1307 1308 // Check for static or virtual call 1309 bool is_static_call = false; 1310 nmethod* caller_nm = CodeCache::find_nmethod(pc); 1311 // Make sure nmethod doesn't get deoptimized and removed until 1312 // this is done with it. 1313 // CLEANUP - with lazy deopt shouldn't need this lock 1314 nmethodLocker nmlock(caller_nm); 1315 1316 if (call_addr != NULL) { 1317 RelocIterator iter(caller_nm, call_addr, call_addr+1); 1318 int ret = iter.next(); // Get item 1319 if (ret) { 1320 assert(iter.addr() == call_addr, "must find call"); 1321 if (iter.type() == relocInfo::static_call_type) { 1322 is_static_call = true; 1323 } else { 1324 assert(iter.type() == relocInfo::virtual_call_type || 1325 iter.type() == relocInfo::opt_virtual_call_type 1326 , "unexpected relocInfo. type"); 1327 } 1328 } else { 1329 assert(!UseInlineCaches, "relocation info. must exist for this address"); 1330 } 1331 1332 // Cleaning the inline cache will force a new resolve. This is more robust 1333 // than directly setting it to the new destination, since resolving of calls 1334 // is always done through the same code path. (experience shows that it 1335 // leads to very hard to track down bugs, if an inline cache gets updated 1336 // to a wrong method). It should not be performance critical, since the 1337 // resolve is only done once. 1338 1339 MutexLocker ml(CompiledIC_lock); 1340 // 1341 // We do not patch the call site if the nmethod has been made non-entrant 1342 // as it is a waste of time 1343 // 1344 if (caller_nm->is_in_use()) { 1345 if (is_static_call) { 1346 CompiledStaticCall* ssc= compiledStaticCall_at(call_addr); 1347 ssc->set_to_clean(); 1348 } else { 1349 // compiled, dispatched call (which used to call an interpreted method) 1350 CompiledIC* inline_cache = CompiledIC_at(call_addr); 1351 inline_cache->set_to_clean(); 1352 } 1353 } 1354 } 1355 1356 } 1357 1358 methodHandle callee_method = find_callee_method(thread, CHECK_(methodHandle())); 1359 1360 1361#ifndef PRODUCT 1362 Atomic::inc(&_wrong_method_ctr); 1363 1364 if (TraceCallFixup) { 1365 ResourceMark rm(thread); 1366 tty->print("handle_wrong_method reresolving call to"); 1367 callee_method->print_short_name(tty); 1368 tty->print_cr(" code: " INTPTR_FORMAT, callee_method->code()); 1369 } 1370#endif 1371 1372 return callee_method; 1373} 1374 1375// --------------------------------------------------------------------------- 1376// We are calling the interpreter via a c2i. Normally this would mean that 1377// we were called by a compiled method. However we could have lost a race 1378// where we went int -> i2c -> c2i and so the caller could in fact be 1379// interpreted. If the caller is compiled we attempt to patch the caller 1380// so he no longer calls into the interpreter. 1381IRT_LEAF(void, SharedRuntime::fixup_callers_callsite(methodOopDesc* method, address caller_pc)) 1382 methodOop moop(method); 1383 1384 address entry_point = moop->from_compiled_entry(); 1385 1386 // It's possible that deoptimization can occur at a call site which hasn't 1387 // been resolved yet, in which case this function will be called from 1388 // an nmethod that has been patched for deopt and we can ignore the 1389 // request for a fixup. 1390 // Also it is possible that we lost a race in that from_compiled_entry 1391 // is now back to the i2c in that case we don't need to patch and if 1392 // we did we'd leap into space because the callsite needs to use 1393 // "to interpreter" stub in order to load up the methodOop. Don't 1394 // ask me how I know this... 1395 1396 CodeBlob* cb = CodeCache::find_blob(caller_pc); 1397 if (!cb->is_nmethod() || entry_point == moop->get_c2i_entry()) { 1398 return; 1399 } 1400 1401 // The check above makes sure this is a nmethod. 1402 nmethod* nm = cb->as_nmethod_or_null(); 1403 assert(nm, "must be"); 1404 1405 // Don't fixup MethodHandle call sites as c2i/i2c adapters are used 1406 // to implement MethodHandle actions. 1407 if (nm->is_method_handle_return(caller_pc)) { 1408 return; 1409 } 1410 1411 // There is a benign race here. We could be attempting to patch to a compiled 1412 // entry point at the same time the callee is being deoptimized. If that is 1413 // the case then entry_point may in fact point to a c2i and we'd patch the 1414 // call site with the same old data. clear_code will set code() to NULL 1415 // at the end of it. If we happen to see that NULL then we can skip trying 1416 // to patch. If we hit the window where the callee has a c2i in the 1417 // from_compiled_entry and the NULL isn't present yet then we lose the race 1418 // and patch the code with the same old data. Asi es la vida. 1419 1420 if (moop->code() == NULL) return; 1421 1422 if (nm->is_in_use()) { 1423 1424 // Expect to find a native call there (unless it was no-inline cache vtable dispatch) 1425 MutexLockerEx ml_patch(Patching_lock, Mutex::_no_safepoint_check_flag); 1426 if (NativeCall::is_call_before(caller_pc + frame::pc_return_offset)) { 1427 NativeCall *call = nativeCall_before(caller_pc + frame::pc_return_offset); 1428 // 1429 // bug 6281185. We might get here after resolving a call site to a vanilla 1430 // virtual call. Because the resolvee uses the verified entry it may then 1431 // see compiled code and attempt to patch the site by calling us. This would 1432 // then incorrectly convert the call site to optimized and its downhill from 1433 // there. If you're lucky you'll get the assert in the bugid, if not you've 1434 // just made a call site that could be megamorphic into a monomorphic site 1435 // for the rest of its life! Just another racing bug in the life of 1436 // fixup_callers_callsite ... 1437 // 1438 RelocIterator iter(cb, call->instruction_address(), call->next_instruction_address()); 1439 iter.next(); 1440 assert(iter.has_current(), "must have a reloc at java call site"); 1441 relocInfo::relocType typ = iter.reloc()->type(); 1442 if ( typ != relocInfo::static_call_type && 1443 typ != relocInfo::opt_virtual_call_type && 1444 typ != relocInfo::static_stub_type) { 1445 return; 1446 } 1447 address destination = call->destination(); 1448 if (destination != entry_point) { 1449 CodeBlob* callee = CodeCache::find_blob(destination); 1450 // callee == cb seems weird. It means calling interpreter thru stub. 1451 if (callee == cb || callee->is_adapter_blob()) { 1452 // static call or optimized virtual 1453 if (TraceCallFixup) { 1454 tty->print("fixup callsite at " INTPTR_FORMAT " to compiled code for", caller_pc); 1455 moop->print_short_name(tty); 1456 tty->print_cr(" to " INTPTR_FORMAT, entry_point); 1457 } 1458 call->set_destination_mt_safe(entry_point); 1459 } else { 1460 if (TraceCallFixup) { 1461 tty->print("failed to fixup callsite at " INTPTR_FORMAT " to compiled code for", caller_pc); 1462 moop->print_short_name(tty); 1463 tty->print_cr(" to " INTPTR_FORMAT, entry_point); 1464 } 1465 // assert is too strong could also be resolve destinations. 1466 // assert(InlineCacheBuffer::contains(destination) || VtableStubs::contains(destination), "must be"); 1467 } 1468 } else { 1469 if (TraceCallFixup) { 1470 tty->print("already patched callsite at " INTPTR_FORMAT " to compiled code for", caller_pc); 1471 moop->print_short_name(tty); 1472 tty->print_cr(" to " INTPTR_FORMAT, entry_point); 1473 } 1474 } 1475 } 1476 } 1477 1478IRT_END 1479 1480 1481// same as JVM_Arraycopy, but called directly from compiled code 1482JRT_ENTRY(void, SharedRuntime::slow_arraycopy_C(oopDesc* src, jint src_pos, 1483 oopDesc* dest, jint dest_pos, 1484 jint length, 1485 JavaThread* thread)) { 1486#ifndef PRODUCT 1487 _slow_array_copy_ctr++; 1488#endif 1489 // Check if we have null pointers 1490 if (src == NULL || dest == NULL) { 1491 THROW(vmSymbols::java_lang_NullPointerException()); 1492 } 1493 // Do the copy. The casts to arrayOop are necessary to the copy_array API, 1494 // even though the copy_array API also performs dynamic checks to ensure 1495 // that src and dest are truly arrays (and are conformable). 1496 // The copy_array mechanism is awkward and could be removed, but 1497 // the compilers don't call this function except as a last resort, 1498 // so it probably doesn't matter. 1499 Klass::cast(src->klass())->copy_array((arrayOopDesc*)src, src_pos, 1500 (arrayOopDesc*)dest, dest_pos, 1501 length, thread); 1502} 1503JRT_END 1504 1505char* SharedRuntime::generate_class_cast_message( 1506 JavaThread* thread, const char* objName) { 1507 1508 // Get target class name from the checkcast instruction 1509 vframeStream vfst(thread, true); 1510 assert(!vfst.at_end(), "Java frame must exist"); 1511 Bytecode_checkcast* cc = Bytecode_checkcast_at( 1512 vfst.method()->bcp_from(vfst.bci())); 1513 Klass* targetKlass = Klass::cast(vfst.method()->constants()->klass_at( 1514 cc->index(), thread)); 1515 return generate_class_cast_message(objName, targetKlass->external_name()); 1516} 1517 1518char* SharedRuntime::generate_wrong_method_type_message(JavaThread* thread, 1519 oopDesc* required, 1520 oopDesc* actual) { 1521 assert(EnableMethodHandles, ""); 1522 oop singleKlass = wrong_method_type_is_for_single_argument(thread, required); 1523 if (singleKlass != NULL) { 1524 const char* objName = "argument or return value"; 1525 if (actual != NULL) { 1526 // be flexible about the junk passed in: 1527 klassOop ak = (actual->is_klass() 1528 ? (klassOop)actual 1529 : actual->klass()); 1530 objName = Klass::cast(ak)->external_name(); 1531 } 1532 Klass* targetKlass = Klass::cast(required->is_klass() 1533 ? (klassOop)required 1534 : java_lang_Class::as_klassOop(required)); 1535 return generate_class_cast_message(objName, targetKlass->external_name()); 1536 } else { 1537 // %%% need to get the MethodType string, without messing around too much 1538 // Get a signature from the invoke instruction 1539 const char* mhName = "method handle"; 1540 const char* targetType = "the required signature"; 1541 vframeStream vfst(thread, true); 1542 if (!vfst.at_end()) { 1543 Bytecode_invoke* call = Bytecode_invoke_at(vfst.method(), vfst.bci()); 1544 methodHandle target; 1545 { 1546 EXCEPTION_MARK; 1547 target = call->static_target(THREAD); 1548 if (HAS_PENDING_EXCEPTION) { CLEAR_PENDING_EXCEPTION; } 1549 } 1550 if (target.not_null() 1551 && target->is_method_handle_invoke() 1552 && required == target->method_handle_type()) { 1553 targetType = target->signature()->as_C_string(); 1554 } 1555 } 1556 klassOop kignore; int fignore; 1557 methodOop actual_method = MethodHandles::decode_method(actual, 1558 kignore, fignore); 1559 if (actual_method != NULL) { 1560 if (actual_method->name() == vmSymbols::invoke_name()) 1561 mhName = "$"; 1562 else 1563 mhName = actual_method->signature()->as_C_string(); 1564 if (mhName[0] == '$') 1565 mhName = actual_method->signature()->as_C_string(); 1566 } 1567 return generate_class_cast_message(mhName, targetType, 1568 " cannot be called as "); 1569 } 1570} 1571 1572oop SharedRuntime::wrong_method_type_is_for_single_argument(JavaThread* thr, 1573 oopDesc* required) { 1574 if (required == NULL) return NULL; 1575 if (required->klass() == SystemDictionary::Class_klass()) 1576 return required; 1577 if (required->is_klass()) 1578 return Klass::cast(klassOop(required))->java_mirror(); 1579 return NULL; 1580} 1581 1582 1583char* SharedRuntime::generate_class_cast_message( 1584 const char* objName, const char* targetKlassName, const char* desc) { 1585 size_t msglen = strlen(objName) + strlen(desc) + strlen(targetKlassName) + 1; 1586 1587 char* message = NEW_RESOURCE_ARRAY(char, msglen); 1588 if (NULL == message) { 1589 // Shouldn't happen, but don't cause even more problems if it does 1590 message = const_cast<char*>(objName); 1591 } else { 1592 jio_snprintf(message, msglen, "%s%s%s", objName, desc, targetKlassName); 1593 } 1594 return message; 1595} 1596 1597JRT_LEAF(void, SharedRuntime::reguard_yellow_pages()) 1598 (void) JavaThread::current()->reguard_stack(); 1599JRT_END 1600 1601 1602// Handles the uncommon case in locking, i.e., contention or an inflated lock. 1603#ifndef PRODUCT 1604int SharedRuntime::_monitor_enter_ctr=0; 1605#endif 1606JRT_ENTRY_NO_ASYNC(void, SharedRuntime::complete_monitor_locking_C(oopDesc* _obj, BasicLock* lock, JavaThread* thread)) 1607 oop obj(_obj); 1608#ifndef PRODUCT 1609 _monitor_enter_ctr++; // monitor enter slow 1610#endif 1611 if (PrintBiasedLockingStatistics) { 1612 Atomic::inc(BiasedLocking::slow_path_entry_count_addr()); 1613 } 1614 Handle h_obj(THREAD, obj); 1615 if (UseBiasedLocking) { 1616 // Retry fast entry if bias is revoked to avoid unnecessary inflation 1617 ObjectSynchronizer::fast_enter(h_obj, lock, true, CHECK); 1618 } else { 1619 ObjectSynchronizer::slow_enter(h_obj, lock, CHECK); 1620 } 1621 assert(!HAS_PENDING_EXCEPTION, "Should have no exception here"); 1622JRT_END 1623 1624#ifndef PRODUCT 1625int SharedRuntime::_monitor_exit_ctr=0; 1626#endif 1627// Handles the uncommon cases of monitor unlocking in compiled code 1628JRT_LEAF(void, SharedRuntime::complete_monitor_unlocking_C(oopDesc* _obj, BasicLock* lock)) 1629 oop obj(_obj); 1630#ifndef PRODUCT 1631 _monitor_exit_ctr++; // monitor exit slow 1632#endif 1633 Thread* THREAD = JavaThread::current(); 1634 // I'm not convinced we need the code contained by MIGHT_HAVE_PENDING anymore 1635 // testing was unable to ever fire the assert that guarded it so I have removed it. 1636 assert(!HAS_PENDING_EXCEPTION, "Do we need code below anymore?"); 1637#undef MIGHT_HAVE_PENDING 1638#ifdef MIGHT_HAVE_PENDING 1639 // Save and restore any pending_exception around the exception mark. 1640 // While the slow_exit must not throw an exception, we could come into 1641 // this routine with one set. 1642 oop pending_excep = NULL; 1643 const char* pending_file; 1644 int pending_line; 1645 if (HAS_PENDING_EXCEPTION) { 1646 pending_excep = PENDING_EXCEPTION; 1647 pending_file = THREAD->exception_file(); 1648 pending_line = THREAD->exception_line(); 1649 CLEAR_PENDING_EXCEPTION; 1650 } 1651#endif /* MIGHT_HAVE_PENDING */ 1652 1653 { 1654 // Exit must be non-blocking, and therefore no exceptions can be thrown. 1655 EXCEPTION_MARK; 1656 ObjectSynchronizer::slow_exit(obj, lock, THREAD); 1657 } 1658 1659#ifdef MIGHT_HAVE_PENDING 1660 if (pending_excep != NULL) { 1661 THREAD->set_pending_exception(pending_excep, pending_file, pending_line); 1662 } 1663#endif /* MIGHT_HAVE_PENDING */ 1664JRT_END 1665 1666#ifndef PRODUCT 1667 1668void SharedRuntime::print_statistics() { 1669 ttyLocker ttyl; 1670 if (xtty != NULL) xtty->head("statistics type='SharedRuntime'"); 1671 1672 if (_monitor_enter_ctr ) tty->print_cr("%5d monitor enter slow", _monitor_enter_ctr); 1673 if (_monitor_exit_ctr ) tty->print_cr("%5d monitor exit slow", _monitor_exit_ctr); 1674 if (_throw_null_ctr) tty->print_cr("%5d implicit null throw", _throw_null_ctr); 1675 1676 SharedRuntime::print_ic_miss_histogram(); 1677 1678 if (CountRemovableExceptions) { 1679 if (_nof_removable_exceptions > 0) { 1680 Unimplemented(); // this counter is not yet incremented 1681 tty->print_cr("Removable exceptions: %d", _nof_removable_exceptions); 1682 } 1683 } 1684 1685 // Dump the JRT_ENTRY counters 1686 if( _new_instance_ctr ) tty->print_cr("%5d new instance requires GC", _new_instance_ctr); 1687 if( _new_array_ctr ) tty->print_cr("%5d new array requires GC", _new_array_ctr); 1688 if( _multi1_ctr ) tty->print_cr("%5d multianewarray 1 dim", _multi1_ctr); 1689 if( _multi2_ctr ) tty->print_cr("%5d multianewarray 2 dim", _multi2_ctr); 1690 if( _multi3_ctr ) tty->print_cr("%5d multianewarray 3 dim", _multi3_ctr); 1691 if( _multi4_ctr ) tty->print_cr("%5d multianewarray 4 dim", _multi4_ctr); 1692 if( _multi5_ctr ) tty->print_cr("%5d multianewarray 5 dim", _multi5_ctr); 1693 1694 tty->print_cr("%5d inline cache miss in compiled", _ic_miss_ctr ); 1695 tty->print_cr("%5d wrong method", _wrong_method_ctr ); 1696 tty->print_cr("%5d unresolved static call site", _resolve_static_ctr ); 1697 tty->print_cr("%5d unresolved virtual call site", _resolve_virtual_ctr ); 1698 tty->print_cr("%5d unresolved opt virtual call site", _resolve_opt_virtual_ctr ); 1699 1700 if( _mon_enter_stub_ctr ) tty->print_cr("%5d monitor enter stub", _mon_enter_stub_ctr ); 1701 if( _mon_exit_stub_ctr ) tty->print_cr("%5d monitor exit stub", _mon_exit_stub_ctr ); 1702 if( _mon_enter_ctr ) tty->print_cr("%5d monitor enter slow", _mon_enter_ctr ); 1703 if( _mon_exit_ctr ) tty->print_cr("%5d monitor exit slow", _mon_exit_ctr ); 1704 if( _partial_subtype_ctr) tty->print_cr("%5d slow partial subtype", _partial_subtype_ctr ); 1705 if( _jbyte_array_copy_ctr ) tty->print_cr("%5d byte array copies", _jbyte_array_copy_ctr ); 1706 if( _jshort_array_copy_ctr ) tty->print_cr("%5d short array copies", _jshort_array_copy_ctr ); 1707 if( _jint_array_copy_ctr ) tty->print_cr("%5d int array copies", _jint_array_copy_ctr ); 1708 if( _jlong_array_copy_ctr ) tty->print_cr("%5d long array copies", _jlong_array_copy_ctr ); 1709 if( _oop_array_copy_ctr ) tty->print_cr("%5d oop array copies", _oop_array_copy_ctr ); 1710 if( _checkcast_array_copy_ctr ) tty->print_cr("%5d checkcast array copies", _checkcast_array_copy_ctr ); 1711 if( _unsafe_array_copy_ctr ) tty->print_cr("%5d unsafe array copies", _unsafe_array_copy_ctr ); 1712 if( _generic_array_copy_ctr ) tty->print_cr("%5d generic array copies", _generic_array_copy_ctr ); 1713 if( _slow_array_copy_ctr ) tty->print_cr("%5d slow array copies", _slow_array_copy_ctr ); 1714 if( _find_handler_ctr ) tty->print_cr("%5d find exception handler", _find_handler_ctr ); 1715 if( _rethrow_ctr ) tty->print_cr("%5d rethrow handler", _rethrow_ctr ); 1716 1717 AdapterHandlerLibrary::print_statistics(); 1718 1719 if (xtty != NULL) xtty->tail("statistics"); 1720} 1721 1722inline double percent(int x, int y) { 1723 return 100.0 * x / MAX2(y, 1); 1724} 1725 1726class MethodArityHistogram { 1727 public: 1728 enum { MAX_ARITY = 256 }; 1729 private: 1730 static int _arity_histogram[MAX_ARITY]; // histogram of #args 1731 static int _size_histogram[MAX_ARITY]; // histogram of arg size in words 1732 static int _max_arity; // max. arity seen 1733 static int _max_size; // max. arg size seen 1734 1735 static void add_method_to_histogram(nmethod* nm) { 1736 methodOop m = nm->method(); 1737 ArgumentCount args(m->signature()); 1738 int arity = args.size() + (m->is_static() ? 0 : 1); 1739 int argsize = m->size_of_parameters(); 1740 arity = MIN2(arity, MAX_ARITY-1); 1741 argsize = MIN2(argsize, MAX_ARITY-1); 1742 int count = nm->method()->compiled_invocation_count(); 1743 _arity_histogram[arity] += count; 1744 _size_histogram[argsize] += count; 1745 _max_arity = MAX2(_max_arity, arity); 1746 _max_size = MAX2(_max_size, argsize); 1747 } 1748 1749 void print_histogram_helper(int n, int* histo, const char* name) { 1750 const int N = MIN2(5, n); 1751 tty->print_cr("\nHistogram of call arity (incl. rcvr, calls to compiled methods only):"); 1752 double sum = 0; 1753 double weighted_sum = 0; 1754 int i; 1755 for (i = 0; i <= n; i++) { sum += histo[i]; weighted_sum += i*histo[i]; } 1756 double rest = sum; 1757 double percent = sum / 100; 1758 for (i = 0; i <= N; i++) { 1759 rest -= histo[i]; 1760 tty->print_cr("%4d: %7d (%5.1f%%)", i, histo[i], histo[i] / percent); 1761 } 1762 tty->print_cr("rest: %7d (%5.1f%%))", (int)rest, rest / percent); 1763 tty->print_cr("(avg. %s = %3.1f, max = %d)", name, weighted_sum / sum, n); 1764 } 1765 1766 void print_histogram() { 1767 tty->print_cr("\nHistogram of call arity (incl. rcvr, calls to compiled methods only):"); 1768 print_histogram_helper(_max_arity, _arity_histogram, "arity"); 1769 tty->print_cr("\nSame for parameter size (in words):"); 1770 print_histogram_helper(_max_size, _size_histogram, "size"); 1771 tty->cr(); 1772 } 1773 1774 public: 1775 MethodArityHistogram() { 1776 MutexLockerEx mu(CodeCache_lock, Mutex::_no_safepoint_check_flag); 1777 _max_arity = _max_size = 0; 1778 for (int i = 0; i < MAX_ARITY; i++) _arity_histogram[i] = _size_histogram [i] = 0; 1779 CodeCache::nmethods_do(add_method_to_histogram); 1780 print_histogram(); 1781 } 1782}; 1783 1784int MethodArityHistogram::_arity_histogram[MethodArityHistogram::MAX_ARITY]; 1785int MethodArityHistogram::_size_histogram[MethodArityHistogram::MAX_ARITY]; 1786int MethodArityHistogram::_max_arity; 1787int MethodArityHistogram::_max_size; 1788 1789void SharedRuntime::print_call_statistics(int comp_total) { 1790 tty->print_cr("Calls from compiled code:"); 1791 int total = _nof_normal_calls + _nof_interface_calls + _nof_static_calls; 1792 int mono_c = _nof_normal_calls - _nof_optimized_calls - _nof_megamorphic_calls; 1793 int mono_i = _nof_interface_calls - _nof_optimized_interface_calls - _nof_megamorphic_interface_calls; 1794 tty->print_cr("\t%9d (%4.1f%%) total non-inlined ", total, percent(total, total)); 1795 tty->print_cr("\t%9d (%4.1f%%) virtual calls ", _nof_normal_calls, percent(_nof_normal_calls, total)); 1796 tty->print_cr("\t %9d (%3.0f%%) inlined ", _nof_inlined_calls, percent(_nof_inlined_calls, _nof_normal_calls)); 1797 tty->print_cr("\t %9d (%3.0f%%) optimized ", _nof_optimized_calls, percent(_nof_optimized_calls, _nof_normal_calls)); 1798 tty->print_cr("\t %9d (%3.0f%%) monomorphic ", mono_c, percent(mono_c, _nof_normal_calls)); 1799 tty->print_cr("\t %9d (%3.0f%%) megamorphic ", _nof_megamorphic_calls, percent(_nof_megamorphic_calls, _nof_normal_calls)); 1800 tty->print_cr("\t%9d (%4.1f%%) interface calls ", _nof_interface_calls, percent(_nof_interface_calls, total)); 1801 tty->print_cr("\t %9d (%3.0f%%) inlined ", _nof_inlined_interface_calls, percent(_nof_inlined_interface_calls, _nof_interface_calls)); 1802 tty->print_cr("\t %9d (%3.0f%%) optimized ", _nof_optimized_interface_calls, percent(_nof_optimized_interface_calls, _nof_interface_calls)); 1803 tty->print_cr("\t %9d (%3.0f%%) monomorphic ", mono_i, percent(mono_i, _nof_interface_calls)); 1804 tty->print_cr("\t %9d (%3.0f%%) megamorphic ", _nof_megamorphic_interface_calls, percent(_nof_megamorphic_interface_calls, _nof_interface_calls)); 1805 tty->print_cr("\t%9d (%4.1f%%) static/special calls", _nof_static_calls, percent(_nof_static_calls, total)); 1806 tty->print_cr("\t %9d (%3.0f%%) inlined ", _nof_inlined_static_calls, percent(_nof_inlined_static_calls, _nof_static_calls)); 1807 tty->cr(); 1808 tty->print_cr("Note 1: counter updates are not MT-safe."); 1809 tty->print_cr("Note 2: %% in major categories are relative to total non-inlined calls;"); 1810 tty->print_cr(" %% in nested categories are relative to their category"); 1811 tty->print_cr(" (and thus add up to more than 100%% with inlining)"); 1812 tty->cr(); 1813 1814 MethodArityHistogram h; 1815} 1816#endif 1817 1818 1819// A simple wrapper class around the calling convention information 1820// that allows sharing of adapters for the same calling convention. 1821class AdapterFingerPrint : public CHeapObj { 1822 private: 1823 union { 1824 int _compact[3]; 1825 int* _fingerprint; 1826 } _value; 1827 int _length; // A negative length indicates the fingerprint is in the compact form, 1828 // Otherwise _value._fingerprint is the array. 1829 1830 // Remap BasicTypes that are handled equivalently by the adapters. 1831 // These are correct for the current system but someday it might be 1832 // necessary to make this mapping platform dependent. 1833 static BasicType adapter_encoding(BasicType in) { 1834 assert((~0xf & in) == 0, "must fit in 4 bits"); 1835 switch(in) { 1836 case T_BOOLEAN: 1837 case T_BYTE: 1838 case T_SHORT: 1839 case T_CHAR: 1840 // There are all promoted to T_INT in the calling convention 1841 return T_INT; 1842 1843 case T_OBJECT: 1844 case T_ARRAY: 1845#ifdef _LP64 1846 return T_LONG; 1847#else 1848 return T_INT; 1849#endif 1850 1851 case T_INT: 1852 case T_LONG: 1853 case T_FLOAT: 1854 case T_DOUBLE: 1855 case T_VOID: 1856 return in; 1857 1858 default: 1859 ShouldNotReachHere(); 1860 return T_CONFLICT; 1861 } 1862 } 1863 1864 public: 1865 AdapterFingerPrint(int total_args_passed, BasicType* sig_bt) { 1866 // The fingerprint is based on the BasicType signature encoded 1867 // into an array of ints with four entries per int. 1868 int* ptr; 1869 int len = (total_args_passed + 3) >> 2; 1870 if (len <= (int)(sizeof(_value._compact) / sizeof(int))) { 1871 _value._compact[0] = _value._compact[1] = _value._compact[2] = 0; 1872 // Storing the signature encoded as signed chars hits about 98% 1873 // of the time. 1874 _length = -len; 1875 ptr = _value._compact; 1876 } else { 1877 _length = len; 1878 _value._fingerprint = NEW_C_HEAP_ARRAY(int, _length); 1879 ptr = _value._fingerprint; 1880 } 1881 1882 // Now pack the BasicTypes with 4 per int 1883 int sig_index = 0; 1884 for (int index = 0; index < len; index++) { 1885 int value = 0; 1886 for (int byte = 0; byte < 4; byte++) { 1887 if (sig_index < total_args_passed) { 1888 value = (value << 4) | adapter_encoding(sig_bt[sig_index++]); 1889 } 1890 } 1891 ptr[index] = value; 1892 } 1893 } 1894 1895 ~AdapterFingerPrint() { 1896 if (_length > 0) { 1897 FREE_C_HEAP_ARRAY(int, _value._fingerprint); 1898 } 1899 } 1900 1901 int value(int index) { 1902 if (_length < 0) { 1903 return _value._compact[index]; 1904 } 1905 return _value._fingerprint[index]; 1906 } 1907 int length() { 1908 if (_length < 0) return -_length; 1909 return _length; 1910 } 1911 1912 bool is_compact() { 1913 return _length <= 0; 1914 } 1915 1916 unsigned int compute_hash() { 1917 int hash = 0; 1918 for (int i = 0; i < length(); i++) { 1919 int v = value(i); 1920 hash = (hash << 8) ^ v ^ (hash >> 5); 1921 } 1922 return (unsigned int)hash; 1923 } 1924 1925 const char* as_string() { 1926 stringStream st; 1927 for (int i = 0; i < length(); i++) { 1928 st.print(PTR_FORMAT, value(i)); 1929 } 1930 return st.as_string(); 1931 } 1932 1933 bool equals(AdapterFingerPrint* other) { 1934 if (other->_length != _length) { 1935 return false; 1936 } 1937 if (_length < 0) { 1938 return _value._compact[0] == other->_value._compact[0] && 1939 _value._compact[1] == other->_value._compact[1] && 1940 _value._compact[2] == other->_value._compact[2]; 1941 } else { 1942 for (int i = 0; i < _length; i++) { 1943 if (_value._fingerprint[i] != other->_value._fingerprint[i]) { 1944 return false; 1945 } 1946 } 1947 } 1948 return true; 1949 } 1950}; 1951 1952 1953// A hashtable mapping from AdapterFingerPrints to AdapterHandlerEntries 1954class AdapterHandlerTable : public BasicHashtable { 1955 friend class AdapterHandlerTableIterator; 1956 1957 private: 1958 1959#ifndef PRODUCT 1960 static int _lookups; // number of calls to lookup 1961 static int _buckets; // number of buckets checked 1962 static int _equals; // number of buckets checked with matching hash 1963 static int _hits; // number of successful lookups 1964 static int _compact; // number of equals calls with compact signature 1965#endif 1966 1967 AdapterHandlerEntry* bucket(int i) { 1968 return (AdapterHandlerEntry*)BasicHashtable::bucket(i); 1969 } 1970 1971 public: 1972 AdapterHandlerTable() 1973 : BasicHashtable(293, sizeof(AdapterHandlerEntry)) { } 1974 1975 // Create a new entry suitable for insertion in the table 1976 AdapterHandlerEntry* new_entry(AdapterFingerPrint* fingerprint, address i2c_entry, address c2i_entry, address c2i_unverified_entry) { 1977 AdapterHandlerEntry* entry = (AdapterHandlerEntry*)BasicHashtable::new_entry(fingerprint->compute_hash()); 1978 entry->init(fingerprint, i2c_entry, c2i_entry, c2i_unverified_entry); 1979 return entry; 1980 } 1981 1982 // Insert an entry into the table 1983 void add(AdapterHandlerEntry* entry) { 1984 int index = hash_to_index(entry->hash()); 1985 add_entry(index, entry); 1986 } 1987 1988 void free_entry(AdapterHandlerEntry* entry) { 1989 entry->deallocate(); 1990 BasicHashtable::free_entry(entry); 1991 } 1992 1993 // Find a entry with the same fingerprint if it exists 1994 AdapterHandlerEntry* lookup(int total_args_passed, BasicType* sig_bt) { 1995 NOT_PRODUCT(_lookups++); 1996 AdapterFingerPrint fp(total_args_passed, sig_bt); 1997 unsigned int hash = fp.compute_hash(); 1998 int index = hash_to_index(hash); 1999 for (AdapterHandlerEntry* e = bucket(index); e != NULL; e = e->next()) { 2000 NOT_PRODUCT(_buckets++); 2001 if (e->hash() == hash) { 2002 NOT_PRODUCT(_equals++); 2003 if (fp.equals(e->fingerprint())) { 2004#ifndef PRODUCT 2005 if (fp.is_compact()) _compact++; 2006 _hits++; 2007#endif 2008 return e; 2009 } 2010 } 2011 } 2012 return NULL; 2013 } 2014 2015#ifndef PRODUCT 2016 void print_statistics() { 2017 ResourceMark rm; 2018 int longest = 0; 2019 int empty = 0; 2020 int total = 0; 2021 int nonempty = 0; 2022 for (int index = 0; index < table_size(); index++) { 2023 int count = 0; 2024 for (AdapterHandlerEntry* e = bucket(index); e != NULL; e = e->next()) { 2025 count++; 2026 } 2027 if (count != 0) nonempty++; 2028 if (count == 0) empty++; 2029 if (count > longest) longest = count; 2030 total += count; 2031 } 2032 tty->print_cr("AdapterHandlerTable: empty %d longest %d total %d average %f", 2033 empty, longest, total, total / (double)nonempty); 2034 tty->print_cr("AdapterHandlerTable: lookups %d buckets %d equals %d hits %d compact %d", 2035 _lookups, _buckets, _equals, _hits, _compact); 2036 } 2037#endif 2038}; 2039 2040 2041#ifndef PRODUCT 2042 2043int AdapterHandlerTable::_lookups; 2044int AdapterHandlerTable::_buckets; 2045int AdapterHandlerTable::_equals; 2046int AdapterHandlerTable::_hits; 2047int AdapterHandlerTable::_compact; 2048 2049class AdapterHandlerTableIterator : public StackObj { 2050 private: 2051 AdapterHandlerTable* _table; 2052 int _index; 2053 AdapterHandlerEntry* _current; 2054 2055 void scan() { 2056 while (_index < _table->table_size()) { 2057 AdapterHandlerEntry* a = _table->bucket(_index); 2058 if (a != NULL) { 2059 _current = a; 2060 return; 2061 } 2062 _index++; 2063 } 2064 } 2065 2066 public: 2067 AdapterHandlerTableIterator(AdapterHandlerTable* table): _table(table), _index(0), _current(NULL) { 2068 scan(); 2069 } 2070 bool has_next() { 2071 return _current != NULL; 2072 } 2073 AdapterHandlerEntry* next() { 2074 if (_current != NULL) { 2075 AdapterHandlerEntry* result = _current; 2076 _current = _current->next(); 2077 if (_current == NULL) scan(); 2078 return result; 2079 } else { 2080 return NULL; 2081 } 2082 } 2083}; 2084#endif 2085 2086 2087// --------------------------------------------------------------------------- 2088// Implementation of AdapterHandlerLibrary 2089AdapterHandlerTable* AdapterHandlerLibrary::_adapters = NULL; 2090AdapterHandlerEntry* AdapterHandlerLibrary::_abstract_method_handler = NULL; 2091const int AdapterHandlerLibrary_size = 16*K; 2092BufferBlob* AdapterHandlerLibrary::_buffer = NULL; 2093 2094BufferBlob* AdapterHandlerLibrary::buffer_blob() { 2095 // Should be called only when AdapterHandlerLibrary_lock is active. 2096 if (_buffer == NULL) // Initialize lazily 2097 _buffer = BufferBlob::create("adapters", AdapterHandlerLibrary_size); 2098 return _buffer; 2099} 2100 2101void AdapterHandlerLibrary::initialize() { 2102 if (_adapters != NULL) return; 2103 _adapters = new AdapterHandlerTable(); 2104 2105 // Create a special handler for abstract methods. Abstract methods 2106 // are never compiled so an i2c entry is somewhat meaningless, but 2107 // fill it in with something appropriate just in case. Pass handle 2108 // wrong method for the c2i transitions. 2109 address wrong_method = SharedRuntime::get_handle_wrong_method_stub(); 2110 _abstract_method_handler = AdapterHandlerLibrary::new_entry(new AdapterFingerPrint(0, NULL), 2111 StubRoutines::throw_AbstractMethodError_entry(), 2112 wrong_method, wrong_method); 2113} 2114 2115AdapterHandlerEntry* AdapterHandlerLibrary::new_entry(AdapterFingerPrint* fingerprint, 2116 address i2c_entry, 2117 address c2i_entry, 2118 address c2i_unverified_entry) { 2119 return _adapters->new_entry(fingerprint, i2c_entry, c2i_entry, c2i_unverified_entry); 2120} 2121 2122AdapterHandlerEntry* AdapterHandlerLibrary::get_adapter(methodHandle method) { 2123 // Use customized signature handler. Need to lock around updates to 2124 // the AdapterHandlerTable (it is not safe for concurrent readers 2125 // and a single writer: this could be fixed if it becomes a 2126 // problem). 2127 2128 // Get the address of the ic_miss handlers before we grab the 2129 // AdapterHandlerLibrary_lock. This fixes bug 6236259 which 2130 // was caused by the initialization of the stubs happening 2131 // while we held the lock and then notifying jvmti while 2132 // holding it. This just forces the initialization to be a little 2133 // earlier. 2134 address ic_miss = SharedRuntime::get_ic_miss_stub(); 2135 assert(ic_miss != NULL, "must have handler"); 2136 2137 ResourceMark rm; 2138 2139 NOT_PRODUCT(int code_size); 2140 AdapterBlob* B = NULL; 2141 AdapterHandlerEntry* entry = NULL; 2142 AdapterFingerPrint* fingerprint = NULL; 2143 { 2144 MutexLocker mu(AdapterHandlerLibrary_lock); 2145 // make sure data structure is initialized 2146 initialize(); 2147 2148 if (method->is_abstract()) { 2149 return _abstract_method_handler; 2150 } 2151 2152 // Fill in the signature array, for the calling-convention call. 2153 int total_args_passed = method->size_of_parameters(); // All args on stack 2154 2155 BasicType* sig_bt = NEW_RESOURCE_ARRAY(BasicType, total_args_passed); 2156 VMRegPair* regs = NEW_RESOURCE_ARRAY(VMRegPair, total_args_passed); 2157 int i = 0; 2158 if (!method->is_static()) // Pass in receiver first 2159 sig_bt[i++] = T_OBJECT; 2160 for (SignatureStream ss(method->signature()); !ss.at_return_type(); ss.next()) { 2161 sig_bt[i++] = ss.type(); // Collect remaining bits of signature 2162 if (ss.type() == T_LONG || ss.type() == T_DOUBLE) 2163 sig_bt[i++] = T_VOID; // Longs & doubles take 2 Java slots 2164 } 2165 assert(i == total_args_passed, ""); 2166 2167 // Lookup method signature's fingerprint 2168 entry = _adapters->lookup(total_args_passed, sig_bt); 2169 2170#ifdef ASSERT 2171 AdapterHandlerEntry* shared_entry = NULL; 2172 if (VerifyAdapterSharing && entry != NULL) { 2173 shared_entry = entry; 2174 entry = NULL; 2175 } 2176#endif 2177 2178 if (entry != NULL) { 2179 return entry; 2180 } 2181 2182 // Get a description of the compiled java calling convention and the largest used (VMReg) stack slot usage 2183 int comp_args_on_stack = SharedRuntime::java_calling_convention(sig_bt, regs, total_args_passed, false); 2184 2185 // Make a C heap allocated version of the fingerprint to store in the adapter 2186 fingerprint = new AdapterFingerPrint(total_args_passed, sig_bt); 2187 2188 // Create I2C & C2I handlers 2189 2190 BufferBlob* buf = buffer_blob(); // the temporary code buffer in CodeCache 2191 if (buf != NULL) { 2192 CodeBuffer buffer(buf->instructions_begin(), buf->instructions_size()); 2193 short buffer_locs[20]; 2194 buffer.insts()->initialize_shared_locs((relocInfo*)buffer_locs, 2195 sizeof(buffer_locs)/sizeof(relocInfo)); 2196 MacroAssembler _masm(&buffer); 2197 2198 entry = SharedRuntime::generate_i2c2i_adapters(&_masm, 2199 total_args_passed, 2200 comp_args_on_stack, 2201 sig_bt, 2202 regs, 2203 fingerprint); 2204 2205#ifdef ASSERT 2206 if (VerifyAdapterSharing) { 2207 if (shared_entry != NULL) { 2208 assert(shared_entry->compare_code(buf->instructions_begin(), buffer.code_size(), total_args_passed, sig_bt), 2209 "code must match"); 2210 // Release the one just created and return the original 2211 _adapters->free_entry(entry); 2212 return shared_entry; 2213 } else { 2214 entry->save_code(buf->instructions_begin(), buffer.code_size(), total_args_passed, sig_bt); 2215 } 2216 } 2217#endif 2218 2219 B = AdapterBlob::create(&buffer); 2220 NOT_PRODUCT(code_size = buffer.code_size()); 2221 } 2222 if (B == NULL) { 2223 // CodeCache is full, disable compilation 2224 // Ought to log this but compile log is only per compile thread 2225 // and we're some non descript Java thread. 2226 MutexUnlocker mu(AdapterHandlerLibrary_lock); 2227 CompileBroker::handle_full_code_cache(); 2228 return NULL; // Out of CodeCache space 2229 } 2230 entry->relocate(B->instructions_begin()); 2231#ifndef PRODUCT 2232 // debugging suppport 2233 if (PrintAdapterHandlers) { 2234 tty->cr(); 2235 tty->print_cr("i2c argument handler #%d for: %s %s (fingerprint = %s, %d bytes generated)", 2236 _adapters->number_of_entries(), (method->is_static() ? "static" : "receiver"), 2237 method->signature()->as_C_string(), fingerprint->as_string(), code_size ); 2238 tty->print_cr("c2i argument handler starts at %p",entry->get_c2i_entry()); 2239 Disassembler::decode(entry->get_i2c_entry(), entry->get_i2c_entry() + code_size); 2240 } 2241#endif 2242 2243 _adapters->add(entry); 2244 } 2245 // Outside of the lock 2246 if (B != NULL) { 2247 char blob_id[256]; 2248 jio_snprintf(blob_id, 2249 sizeof(blob_id), 2250 "%s(%s)@" PTR_FORMAT, 2251 B->name(), 2252 fingerprint->as_string(), 2253 B->instructions_begin()); 2254 VTune::register_stub(blob_id, B->instructions_begin(), B->instructions_end()); 2255 Forte::register_stub(blob_id, B->instructions_begin(), B->instructions_end()); 2256 2257 if (JvmtiExport::should_post_dynamic_code_generated()) { 2258 JvmtiExport::post_dynamic_code_generated(blob_id, 2259 B->instructions_begin(), 2260 B->instructions_end()); 2261 } 2262 } 2263 return entry; 2264} 2265 2266void AdapterHandlerEntry::relocate(address new_base) { 2267 ptrdiff_t delta = new_base - _i2c_entry; 2268 _i2c_entry += delta; 2269 _c2i_entry += delta; 2270 _c2i_unverified_entry += delta; 2271} 2272 2273 2274void AdapterHandlerEntry::deallocate() { 2275 delete _fingerprint; 2276#ifdef ASSERT 2277 if (_saved_code) FREE_C_HEAP_ARRAY(unsigned char, _saved_code); 2278 if (_saved_sig) FREE_C_HEAP_ARRAY(Basictype, _saved_sig); 2279#endif 2280} 2281 2282 2283#ifdef ASSERT 2284// Capture the code before relocation so that it can be compared 2285// against other versions. If the code is captured after relocation 2286// then relative instructions won't be equivalent. 2287void AdapterHandlerEntry::save_code(unsigned char* buffer, int length, int total_args_passed, BasicType* sig_bt) { 2288 _saved_code = NEW_C_HEAP_ARRAY(unsigned char, length); 2289 _code_length = length; 2290 memcpy(_saved_code, buffer, length); 2291 _total_args_passed = total_args_passed; 2292 _saved_sig = NEW_C_HEAP_ARRAY(BasicType, _total_args_passed); 2293 memcpy(_saved_sig, sig_bt, _total_args_passed * sizeof(BasicType)); 2294} 2295 2296 2297bool AdapterHandlerEntry::compare_code(unsigned char* buffer, int length, int total_args_passed, BasicType* sig_bt) { 2298 if (length != _code_length) { 2299 return false; 2300 } 2301 for (int i = 0; i < length; i++) { 2302 if (buffer[i] != _saved_code[i]) { 2303 return false; 2304 } 2305 } 2306 return true; 2307} 2308#endif 2309 2310 2311// Create a native wrapper for this native method. The wrapper converts the 2312// java compiled calling convention to the native convention, handlizes 2313// arguments, and transitions to native. On return from the native we transition 2314// back to java blocking if a safepoint is in progress. 2315nmethod *AdapterHandlerLibrary::create_native_wrapper(methodHandle method) { 2316 ResourceMark rm; 2317 nmethod* nm = NULL; 2318 2319 if (PrintCompilation) { 2320 ttyLocker ttyl; 2321 tty->print("--- n%s ", (method->is_synchronized() ? "s" : " ")); 2322 method->print_short_name(tty); 2323 if (method->is_static()) { 2324 tty->print(" (static)"); 2325 } 2326 tty->cr(); 2327 } 2328 2329 assert(method->has_native_function(), "must have something valid to call!"); 2330 2331 { 2332 // perform the work while holding the lock, but perform any printing outside the lock 2333 MutexLocker mu(AdapterHandlerLibrary_lock); 2334 // See if somebody beat us to it 2335 nm = method->code(); 2336 if (nm) { 2337 return nm; 2338 } 2339 2340 ResourceMark rm; 2341 2342 BufferBlob* buf = buffer_blob(); // the temporary code buffer in CodeCache 2343 if (buf != NULL) { 2344 CodeBuffer buffer(buf->instructions_begin(), buf->instructions_size()); 2345 double locs_buf[20]; 2346 buffer.insts()->initialize_shared_locs((relocInfo*)locs_buf, sizeof(locs_buf) / sizeof(relocInfo)); 2347 MacroAssembler _masm(&buffer); 2348 2349 // Fill in the signature array, for the calling-convention call. 2350 int total_args_passed = method->size_of_parameters(); 2351 2352 BasicType* sig_bt = NEW_RESOURCE_ARRAY(BasicType,total_args_passed); 2353 VMRegPair* regs = NEW_RESOURCE_ARRAY(VMRegPair,total_args_passed); 2354 int i=0; 2355 if( !method->is_static() ) // Pass in receiver first 2356 sig_bt[i++] = T_OBJECT; 2357 SignatureStream ss(method->signature()); 2358 for( ; !ss.at_return_type(); ss.next()) { 2359 sig_bt[i++] = ss.type(); // Collect remaining bits of signature 2360 if( ss.type() == T_LONG || ss.type() == T_DOUBLE ) 2361 sig_bt[i++] = T_VOID; // Longs & doubles take 2 Java slots 2362 } 2363 assert( i==total_args_passed, "" ); 2364 BasicType ret_type = ss.type(); 2365 2366 // Now get the compiled-Java layout as input arguments 2367 int comp_args_on_stack; 2368 comp_args_on_stack = SharedRuntime::java_calling_convention(sig_bt, regs, total_args_passed, false); 2369 2370 // Generate the compiled-to-native wrapper code 2371 nm = SharedRuntime::generate_native_wrapper(&_masm, 2372 method, 2373 total_args_passed, 2374 comp_args_on_stack, 2375 sig_bt,regs, 2376 ret_type); 2377 } 2378 } 2379 2380 // Must unlock before calling set_code 2381 // Install the generated code. 2382 if (nm != NULL) { 2383 method->set_code(method, nm); 2384 nm->post_compiled_method_load_event(); 2385 } else { 2386 // CodeCache is full, disable compilation 2387 // Ought to log this but compile log is only per compile thread 2388 // and we're some non descript Java thread. 2389 MutexUnlocker mu(AdapterHandlerLibrary_lock); 2390 CompileBroker::handle_full_code_cache(); 2391 } 2392 return nm; 2393} 2394 2395#ifdef HAVE_DTRACE_H 2396// Create a dtrace nmethod for this method. The wrapper converts the 2397// java compiled calling convention to the native convention, makes a dummy call 2398// (actually nops for the size of the call instruction, which become a trap if 2399// probe is enabled). The returns to the caller. Since this all looks like a 2400// leaf no thread transition is needed. 2401 2402nmethod *AdapterHandlerLibrary::create_dtrace_nmethod(methodHandle method) { 2403 ResourceMark rm; 2404 nmethod* nm = NULL; 2405 2406 if (PrintCompilation) { 2407 ttyLocker ttyl; 2408 tty->print("--- n%s "); 2409 method->print_short_name(tty); 2410 if (method->is_static()) { 2411 tty->print(" (static)"); 2412 } 2413 tty->cr(); 2414 } 2415 2416 { 2417 // perform the work while holding the lock, but perform any printing 2418 // outside the lock 2419 MutexLocker mu(AdapterHandlerLibrary_lock); 2420 // See if somebody beat us to it 2421 nm = method->code(); 2422 if (nm) { 2423 return nm; 2424 } 2425 2426 ResourceMark rm; 2427 2428 BufferBlob* buf = buffer_blob(); // the temporary code buffer in CodeCache 2429 if (buf != NULL) { 2430 CodeBuffer buffer(buf->instructions_begin(), buf->instructions_size()); 2431 // Need a few relocation entries 2432 double locs_buf[20]; 2433 buffer.insts()->initialize_shared_locs( 2434 (relocInfo*)locs_buf, sizeof(locs_buf) / sizeof(relocInfo)); 2435 MacroAssembler _masm(&buffer); 2436 2437 // Generate the compiled-to-native wrapper code 2438 nm = SharedRuntime::generate_dtrace_nmethod(&_masm, method); 2439 } 2440 } 2441 return nm; 2442} 2443 2444// the dtrace method needs to convert java lang string to utf8 string. 2445void SharedRuntime::get_utf(oopDesc* src, address dst) { 2446 typeArrayOop jlsValue = java_lang_String::value(src); 2447 int jlsOffset = java_lang_String::offset(src); 2448 int jlsLen = java_lang_String::length(src); 2449 jchar* jlsPos = (jlsLen == 0) ? NULL : 2450 jlsValue->char_at_addr(jlsOffset); 2451 (void) UNICODE::as_utf8(jlsPos, jlsLen, (char *)dst, max_dtrace_string_size); 2452} 2453#endif // ndef HAVE_DTRACE_H 2454 2455// ------------------------------------------------------------------------- 2456// Java-Java calling convention 2457// (what you use when Java calls Java) 2458 2459//------------------------------name_for_receiver---------------------------------- 2460// For a given signature, return the VMReg for parameter 0. 2461VMReg SharedRuntime::name_for_receiver() { 2462 VMRegPair regs; 2463 BasicType sig_bt = T_OBJECT; 2464 (void) java_calling_convention(&sig_bt, ®s, 1, true); 2465 // Return argument 0 register. In the LP64 build pointers 2466 // take 2 registers, but the VM wants only the 'main' name. 2467 return regs.first(); 2468} 2469 2470VMRegPair *SharedRuntime::find_callee_arguments(symbolOop sig, bool has_receiver, int* arg_size) { 2471 // This method is returning a data structure allocating as a 2472 // ResourceObject, so do not put any ResourceMarks in here. 2473 char *s = sig->as_C_string(); 2474 int len = (int)strlen(s); 2475 *s++; len--; // Skip opening paren 2476 char *t = s+len; 2477 while( *(--t) != ')' ) ; // Find close paren 2478 2479 BasicType *sig_bt = NEW_RESOURCE_ARRAY( BasicType, 256 ); 2480 VMRegPair *regs = NEW_RESOURCE_ARRAY( VMRegPair, 256 ); 2481 int cnt = 0; 2482 if (has_receiver) { 2483 sig_bt[cnt++] = T_OBJECT; // Receiver is argument 0; not in signature 2484 } 2485 2486 while( s < t ) { 2487 switch( *s++ ) { // Switch on signature character 2488 case 'B': sig_bt[cnt++] = T_BYTE; break; 2489 case 'C': sig_bt[cnt++] = T_CHAR; break; 2490 case 'D': sig_bt[cnt++] = T_DOUBLE; sig_bt[cnt++] = T_VOID; break; 2491 case 'F': sig_bt[cnt++] = T_FLOAT; break; 2492 case 'I': sig_bt[cnt++] = T_INT; break; 2493 case 'J': sig_bt[cnt++] = T_LONG; sig_bt[cnt++] = T_VOID; break; 2494 case 'S': sig_bt[cnt++] = T_SHORT; break; 2495 case 'Z': sig_bt[cnt++] = T_BOOLEAN; break; 2496 case 'V': sig_bt[cnt++] = T_VOID; break; 2497 case 'L': // Oop 2498 while( *s++ != ';' ) ; // Skip signature 2499 sig_bt[cnt++] = T_OBJECT; 2500 break; 2501 case '[': { // Array 2502 do { // Skip optional size 2503 while( *s >= '0' && *s <= '9' ) s++; 2504 } while( *s++ == '[' ); // Nested arrays? 2505 // Skip element type 2506 if( s[-1] == 'L' ) 2507 while( *s++ != ';' ) ; // Skip signature 2508 sig_bt[cnt++] = T_ARRAY; 2509 break; 2510 } 2511 default : ShouldNotReachHere(); 2512 } 2513 } 2514 assert( cnt < 256, "grow table size" ); 2515 2516 int comp_args_on_stack; 2517 comp_args_on_stack = java_calling_convention(sig_bt, regs, cnt, true); 2518 2519 // the calling convention doesn't count out_preserve_stack_slots so 2520 // we must add that in to get "true" stack offsets. 2521 2522 if (comp_args_on_stack) { 2523 for (int i = 0; i < cnt; i++) { 2524 VMReg reg1 = regs[i].first(); 2525 if( reg1->is_stack()) { 2526 // Yuck 2527 reg1 = reg1->bias(out_preserve_stack_slots()); 2528 } 2529 VMReg reg2 = regs[i].second(); 2530 if( reg2->is_stack()) { 2531 // Yuck 2532 reg2 = reg2->bias(out_preserve_stack_slots()); 2533 } 2534 regs[i].set_pair(reg2, reg1); 2535 } 2536 } 2537 2538 // results 2539 *arg_size = cnt; 2540 return regs; 2541} 2542 2543// OSR Migration Code 2544// 2545// This code is used convert interpreter frames into compiled frames. It is 2546// called from very start of a compiled OSR nmethod. A temp array is 2547// allocated to hold the interesting bits of the interpreter frame. All 2548// active locks are inflated to allow them to move. The displaced headers and 2549// active interpeter locals are copied into the temp buffer. Then we return 2550// back to the compiled code. The compiled code then pops the current 2551// interpreter frame off the stack and pushes a new compiled frame. Then it 2552// copies the interpreter locals and displaced headers where it wants. 2553// Finally it calls back to free the temp buffer. 2554// 2555// All of this is done NOT at any Safepoint, nor is any safepoint or GC allowed. 2556 2557JRT_LEAF(intptr_t*, SharedRuntime::OSR_migration_begin( JavaThread *thread) ) 2558 2559#ifdef IA64 2560 ShouldNotReachHere(); // NYI 2561#endif /* IA64 */ 2562 2563 // 2564 // This code is dependent on the memory layout of the interpreter local 2565 // array and the monitors. On all of our platforms the layout is identical 2566 // so this code is shared. If some platform lays the their arrays out 2567 // differently then this code could move to platform specific code or 2568 // the code here could be modified to copy items one at a time using 2569 // frame accessor methods and be platform independent. 2570 2571 frame fr = thread->last_frame(); 2572 assert( fr.is_interpreted_frame(), "" ); 2573 assert( fr.interpreter_frame_expression_stack_size()==0, "only handle empty stacks" ); 2574 2575 // Figure out how many monitors are active. 2576 int active_monitor_count = 0; 2577 for( BasicObjectLock *kptr = fr.interpreter_frame_monitor_end(); 2578 kptr < fr.interpreter_frame_monitor_begin(); 2579 kptr = fr.next_monitor_in_interpreter_frame(kptr) ) { 2580 if( kptr->obj() != NULL ) active_monitor_count++; 2581 } 2582 2583 // QQQ we could place number of active monitors in the array so that compiled code 2584 // could double check it. 2585 2586 methodOop moop = fr.interpreter_frame_method(); 2587 int max_locals = moop->max_locals(); 2588 // Allocate temp buffer, 1 word per local & 2 per active monitor 2589 int buf_size_words = max_locals + active_monitor_count*2; 2590 intptr_t *buf = NEW_C_HEAP_ARRAY(intptr_t,buf_size_words); 2591 2592 // Copy the locals. Order is preserved so that loading of longs works. 2593 // Since there's no GC I can copy the oops blindly. 2594 assert( sizeof(HeapWord)==sizeof(intptr_t), "fix this code"); 2595 Copy::disjoint_words((HeapWord*)fr.interpreter_frame_local_at(max_locals-1), 2596 (HeapWord*)&buf[0], 2597 max_locals); 2598 2599 // Inflate locks. Copy the displaced headers. Be careful, there can be holes. 2600 int i = max_locals; 2601 for( BasicObjectLock *kptr2 = fr.interpreter_frame_monitor_end(); 2602 kptr2 < fr.interpreter_frame_monitor_begin(); 2603 kptr2 = fr.next_monitor_in_interpreter_frame(kptr2) ) { 2604 if( kptr2->obj() != NULL) { // Avoid 'holes' in the monitor array 2605 BasicLock *lock = kptr2->lock(); 2606 // Inflate so the displaced header becomes position-independent 2607 if (lock->displaced_header()->is_unlocked()) 2608 ObjectSynchronizer::inflate_helper(kptr2->obj()); 2609 // Now the displaced header is free to move 2610 buf[i++] = (intptr_t)lock->displaced_header(); 2611 buf[i++] = (intptr_t)kptr2->obj(); 2612 } 2613 } 2614 assert( i - max_locals == active_monitor_count*2, "found the expected number of monitors" ); 2615 2616 return buf; 2617JRT_END 2618 2619JRT_LEAF(void, SharedRuntime::OSR_migration_end( intptr_t* buf) ) 2620 FREE_C_HEAP_ARRAY(intptr_t,buf); 2621JRT_END 2622 2623#ifndef PRODUCT 2624bool AdapterHandlerLibrary::contains(CodeBlob* b) { 2625 AdapterHandlerTableIterator iter(_adapters); 2626 while (iter.has_next()) { 2627 AdapterHandlerEntry* a = iter.next(); 2628 if ( b == CodeCache::find_blob(a->get_i2c_entry()) ) return true; 2629 } 2630 return false; 2631} 2632 2633void AdapterHandlerLibrary::print_handler(CodeBlob* b) { 2634 AdapterHandlerTableIterator iter(_adapters); 2635 while (iter.has_next()) { 2636 AdapterHandlerEntry* a = iter.next(); 2637 if ( b == CodeCache::find_blob(a->get_i2c_entry()) ) { 2638 tty->print("Adapter for signature: "); 2639 tty->print_cr("%s i2c: " INTPTR_FORMAT " c2i: " INTPTR_FORMAT " c2iUV: " INTPTR_FORMAT, 2640 a->fingerprint()->as_string(), 2641 a->get_i2c_entry(), a->get_c2i_entry(), a->get_c2i_unverified_entry()); 2642 return; 2643 } 2644 } 2645 assert(false, "Should have found handler"); 2646} 2647 2648void AdapterHandlerLibrary::print_statistics() { 2649 _adapters->print_statistics(); 2650} 2651 2652#endif /* PRODUCT */ 2653