1/* 2 * Copyright (c) 1997, 2016, Oracle and/or its affiliates. All rights reserved. 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 4 * 5 * This code is free software; you can redistribute it and/or modify it 6 * under the terms of the GNU General Public License version 2 only, as 7 * published by the Free Software Foundation. 8 * 9 * This code is distributed in the hope that it will be useful, but WITHOUT 10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License 12 * version 2 for more details (a copy is included in the LICENSE file that 13 * accompanied this code). 14 * 15 * You should have received a copy of the GNU General Public License version 16 * 2 along with this work; if not, write to the Free Software Foundation, 17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. 18 * 19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA 20 * or visit www.oracle.com if you need additional information or have any 21 * questions. 22 * 23 */ 24 25#include "precompiled.hpp" 26#include "aot/aotLoader.hpp" 27#include "code/codeBlob.hpp" 28#include "code/codeCache.hpp" 29#include "code/compiledIC.hpp" 30#include "code/dependencies.hpp" 31#include "code/icBuffer.hpp" 32#include "code/nmethod.hpp" 33#include "code/pcDesc.hpp" 34#include "compiler/compileBroker.hpp" 35#include "gc/shared/gcLocker.hpp" 36#include "memory/allocation.inline.hpp" 37#include "memory/iterator.hpp" 38#include "memory/resourceArea.hpp" 39#include "oops/method.hpp" 40#include "oops/objArrayOop.hpp" 41#include "oops/oop.inline.hpp" 42#include "oops/verifyOopClosure.hpp" 43#include "runtime/arguments.hpp" 44#include "runtime/compilationPolicy.hpp" 45#include "runtime/deoptimization.hpp" 46#include "runtime/handles.inline.hpp" 47#include "runtime/icache.hpp" 48#include "runtime/java.hpp" 49#include "runtime/mutexLocker.hpp" 50#include "runtime/sweeper.hpp" 51#include "services/memoryService.hpp" 52#include "trace/tracing.hpp" 53#include "utilities/xmlstream.hpp" 54#ifdef COMPILER1 55#include "c1/c1_Compilation.hpp" 56#include "c1/c1_Compiler.hpp" 57#endif 58#ifdef COMPILER2 59#include "opto/c2compiler.hpp" 60#include "opto/compile.hpp" 61#include "opto/node.hpp" 62#endif 63 64// Helper class for printing in CodeCache 65class CodeBlob_sizes { 66 private: 67 int count; 68 int total_size; 69 int header_size; 70 int code_size; 71 int stub_size; 72 int relocation_size; 73 int scopes_oop_size; 74 int scopes_metadata_size; 75 int scopes_data_size; 76 int scopes_pcs_size; 77 78 public: 79 CodeBlob_sizes() { 80 count = 0; 81 total_size = 0; 82 header_size = 0; 83 code_size = 0; 84 stub_size = 0; 85 relocation_size = 0; 86 scopes_oop_size = 0; 87 scopes_metadata_size = 0; 88 scopes_data_size = 0; 89 scopes_pcs_size = 0; 90 } 91 92 int total() { return total_size; } 93 bool is_empty() { return count == 0; } 94 95 void print(const char* title) { 96 tty->print_cr(" #%d %s = %dK (hdr %d%%, loc %d%%, code %d%%, stub %d%%, [oops %d%%, metadata %d%%, data %d%%, pcs %d%%])", 97 count, 98 title, 99 (int)(total() / K), 100 header_size * 100 / total_size, 101 relocation_size * 100 / total_size, 102 code_size * 100 / total_size, 103 stub_size * 100 / total_size, 104 scopes_oop_size * 100 / total_size, 105 scopes_metadata_size * 100 / total_size, 106 scopes_data_size * 100 / total_size, 107 scopes_pcs_size * 100 / total_size); 108 } 109 110 void add(CodeBlob* cb) { 111 count++; 112 total_size += cb->size(); 113 header_size += cb->header_size(); 114 relocation_size += cb->relocation_size(); 115 if (cb->is_nmethod()) { 116 nmethod* nm = cb->as_nmethod_or_null(); 117 code_size += nm->insts_size(); 118 stub_size += nm->stub_size(); 119 120 scopes_oop_size += nm->oops_size(); 121 scopes_metadata_size += nm->metadata_size(); 122 scopes_data_size += nm->scopes_data_size(); 123 scopes_pcs_size += nm->scopes_pcs_size(); 124 } else { 125 code_size += cb->code_size(); 126 } 127 } 128}; 129 130// Iterate over all CodeHeaps 131#define FOR_ALL_HEAPS(heap) for (GrowableArrayIterator<CodeHeap*> heap = _heaps->begin(); heap != _heaps->end(); ++heap) 132#define FOR_ALL_NMETHOD_HEAPS(heap) for (GrowableArrayIterator<CodeHeap*> heap = _nmethod_heaps->begin(); heap != _nmethod_heaps->end(); ++heap) 133#define FOR_ALL_ALLOCABLE_HEAPS(heap) for (GrowableArrayIterator<CodeHeap*> heap = _allocable_heaps->begin(); heap != _allocable_heaps->end(); ++heap) 134 135// Iterate over all CodeBlobs (cb) on the given CodeHeap 136#define FOR_ALL_BLOBS(cb, heap) for (CodeBlob* cb = first_blob(heap); cb != NULL; cb = next_blob(heap, cb)) 137 138address CodeCache::_low_bound = 0; 139address CodeCache::_high_bound = 0; 140int CodeCache::_number_of_nmethods_with_dependencies = 0; 141bool CodeCache::_needs_cache_clean = false; 142nmethod* CodeCache::_scavenge_root_nmethods = NULL; 143 144// Initialize arrays of CodeHeap subsets 145GrowableArray<CodeHeap*>* CodeCache::_heaps = new(ResourceObj::C_HEAP, mtCode) GrowableArray<CodeHeap*> (CodeBlobType::All, true); 146GrowableArray<CodeHeap*>* CodeCache::_compiled_heaps = new(ResourceObj::C_HEAP, mtCode) GrowableArray<CodeHeap*> (CodeBlobType::All, true); 147GrowableArray<CodeHeap*>* CodeCache::_nmethod_heaps = new(ResourceObj::C_HEAP, mtCode) GrowableArray<CodeHeap*> (CodeBlobType::All, true); 148GrowableArray<CodeHeap*>* CodeCache::_allocable_heaps = new(ResourceObj::C_HEAP, mtCode) GrowableArray<CodeHeap*> (CodeBlobType::All, true); 149 150void CodeCache::check_heap_sizes(size_t non_nmethod_size, size_t profiled_size, size_t non_profiled_size, size_t cache_size, bool all_set) { 151 size_t total_size = non_nmethod_size + profiled_size + non_profiled_size; 152 // Prepare error message 153 const char* error = "Invalid code heap sizes"; 154 err_msg message("NonNMethodCodeHeapSize (" SIZE_FORMAT "K) + ProfiledCodeHeapSize (" SIZE_FORMAT "K)" 155 " + NonProfiledCodeHeapSize (" SIZE_FORMAT "K) = " SIZE_FORMAT "K", 156 non_nmethod_size/K, profiled_size/K, non_profiled_size/K, total_size/K); 157 158 if (total_size > cache_size) { 159 // Some code heap sizes were explicitly set: total_size must be <= cache_size 160 message.append(" is greater than ReservedCodeCacheSize (" SIZE_FORMAT "K).", cache_size/K); 161 vm_exit_during_initialization(error, message); 162 } else if (all_set && total_size != cache_size) { 163 // All code heap sizes were explicitly set: total_size must equal cache_size 164 message.append(" is not equal to ReservedCodeCacheSize (" SIZE_FORMAT "K).", cache_size/K); 165 vm_exit_during_initialization(error, message); 166 } 167} 168 169void CodeCache::initialize_heaps() { 170 bool non_nmethod_set = FLAG_IS_CMDLINE(NonNMethodCodeHeapSize); 171 bool profiled_set = FLAG_IS_CMDLINE(ProfiledCodeHeapSize); 172 bool non_profiled_set = FLAG_IS_CMDLINE(NonProfiledCodeHeapSize); 173 size_t min_size = os::vm_page_size(); 174 size_t cache_size = ReservedCodeCacheSize; 175 size_t non_nmethod_size = NonNMethodCodeHeapSize; 176 size_t profiled_size = ProfiledCodeHeapSize; 177 size_t non_profiled_size = NonProfiledCodeHeapSize; 178 // Check if total size set via command line flags exceeds the reserved size 179 check_heap_sizes((non_nmethod_set ? non_nmethod_size : min_size), 180 (profiled_set ? profiled_size : min_size), 181 (non_profiled_set ? non_profiled_size : min_size), 182 cache_size, 183 non_nmethod_set && profiled_set && non_profiled_set); 184 185 // Determine size of compiler buffers 186 size_t code_buffers_size = 0; 187#ifdef COMPILER1 188 // C1 temporary code buffers (see Compiler::init_buffer_blob()) 189 const int c1_count = CompilationPolicy::policy()->compiler_count(CompLevel_simple); 190 code_buffers_size += c1_count * Compiler::code_buffer_size(); 191#endif 192#ifdef COMPILER2 193 // C2 scratch buffers (see Compile::init_scratch_buffer_blob()) 194 const int c2_count = CompilationPolicy::policy()->compiler_count(CompLevel_full_optimization); 195 // Initial size of constant table (this may be increased if a compiled method needs more space) 196 code_buffers_size += c2_count * C2Compiler::initial_code_buffer_size(); 197#endif 198 199 // Increase default non_nmethod_size to account for compiler buffers 200 if (!non_nmethod_set) { 201 non_nmethod_size += code_buffers_size; 202 } 203 // Calculate default CodeHeap sizes if not set by user 204 if (!non_nmethod_set && !profiled_set && !non_profiled_set) { 205 // Check if we have enough space for the non-nmethod code heap 206 if (cache_size > non_nmethod_size) { 207 // Use the default value for non_nmethod_size and one half of the 208 // remaining size for non-profiled and one half for profiled methods 209 size_t remaining_size = cache_size - non_nmethod_size; 210 profiled_size = remaining_size / 2; 211 non_profiled_size = remaining_size - profiled_size; 212 } else { 213 // Use all space for the non-nmethod heap and set other heaps to minimal size 214 non_nmethod_size = cache_size - 2 * min_size; 215 profiled_size = min_size; 216 non_profiled_size = min_size; 217 } 218 } else if (!non_nmethod_set || !profiled_set || !non_profiled_set) { 219 // The user explicitly set some code heap sizes. Increase or decrease the (default) 220 // sizes of the other code heaps accordingly. First adapt non-profiled and profiled 221 // code heap sizes and then only change non-nmethod code heap size if still necessary. 222 intx diff_size = cache_size - (non_nmethod_size + profiled_size + non_profiled_size); 223 if (non_profiled_set) { 224 if (!profiled_set) { 225 // Adapt size of profiled code heap 226 if (diff_size < 0 && ((intx)profiled_size + diff_size) <= 0) { 227 // Not enough space available, set to minimum size 228 diff_size += profiled_size - min_size; 229 profiled_size = min_size; 230 } else { 231 profiled_size += diff_size; 232 diff_size = 0; 233 } 234 } 235 } else if (profiled_set) { 236 // Adapt size of non-profiled code heap 237 if (diff_size < 0 && ((intx)non_profiled_size + diff_size) <= 0) { 238 // Not enough space available, set to minimum size 239 diff_size += non_profiled_size - min_size; 240 non_profiled_size = min_size; 241 } else { 242 non_profiled_size += diff_size; 243 diff_size = 0; 244 } 245 } else if (non_nmethod_set) { 246 // Distribute remaining size between profiled and non-profiled code heaps 247 diff_size = cache_size - non_nmethod_size; 248 profiled_size = diff_size / 2; 249 non_profiled_size = diff_size - profiled_size; 250 diff_size = 0; 251 } 252 if (diff_size != 0) { 253 // Use non-nmethod code heap for remaining space requirements 254 assert(!non_nmethod_set && ((intx)non_nmethod_size + diff_size) > 0, "sanity"); 255 non_nmethod_size += diff_size; 256 } 257 } 258 259 // We do not need the profiled CodeHeap, use all space for the non-profiled CodeHeap 260 if(!heap_available(CodeBlobType::MethodProfiled)) { 261 non_profiled_size += profiled_size; 262 profiled_size = 0; 263 } 264 // We do not need the non-profiled CodeHeap, use all space for the non-nmethod CodeHeap 265 if(!heap_available(CodeBlobType::MethodNonProfiled)) { 266 non_nmethod_size += non_profiled_size; 267 non_profiled_size = 0; 268 } 269 // Make sure we have enough space for VM internal code 270 uint min_code_cache_size = CodeCacheMinimumUseSpace DEBUG_ONLY(* 3); 271 if (non_nmethod_size < (min_code_cache_size + code_buffers_size)) { 272 vm_exit_during_initialization(err_msg( 273 "Not enough space in non-nmethod code heap to run VM: " SIZE_FORMAT "K < " SIZE_FORMAT "K", 274 non_nmethod_size/K, (min_code_cache_size + code_buffers_size)/K)); 275 } 276 277 // Verify sizes and update flag values 278 assert(non_profiled_size + profiled_size + non_nmethod_size == cache_size, "Invalid code heap sizes"); 279 FLAG_SET_ERGO(uintx, NonNMethodCodeHeapSize, non_nmethod_size); 280 FLAG_SET_ERGO(uintx, ProfiledCodeHeapSize, profiled_size); 281 FLAG_SET_ERGO(uintx, NonProfiledCodeHeapSize, non_profiled_size); 282 283 // Align CodeHeaps 284 size_t alignment = heap_alignment(); 285 non_nmethod_size = align_size_up(non_nmethod_size, alignment); 286 profiled_size = align_size_down(profiled_size, alignment); 287 288 // Reserve one continuous chunk of memory for CodeHeaps and split it into 289 // parts for the individual heaps. The memory layout looks like this: 290 // ---------- high ----------- 291 // Non-profiled nmethods 292 // Profiled nmethods 293 // Non-nmethods 294 // ---------- low ------------ 295 ReservedCodeSpace rs = reserve_heap_memory(cache_size); 296 ReservedSpace non_method_space = rs.first_part(non_nmethod_size); 297 ReservedSpace rest = rs.last_part(non_nmethod_size); 298 ReservedSpace profiled_space = rest.first_part(profiled_size); 299 ReservedSpace non_profiled_space = rest.last_part(profiled_size); 300 301 // Non-nmethods (stubs, adapters, ...) 302 add_heap(non_method_space, "CodeHeap 'non-nmethods'", CodeBlobType::NonNMethod); 303 // Tier 2 and tier 3 (profiled) methods 304 add_heap(profiled_space, "CodeHeap 'profiled nmethods'", CodeBlobType::MethodProfiled); 305 // Tier 1 and tier 4 (non-profiled) methods and native methods 306 add_heap(non_profiled_space, "CodeHeap 'non-profiled nmethods'", CodeBlobType::MethodNonProfiled); 307} 308 309size_t CodeCache::heap_alignment() { 310 // If large page support is enabled, align code heaps according to large 311 // page size to make sure that code cache is covered by large pages. 312 const size_t page_size = os::can_execute_large_page_memory() ? 313 os::page_size_for_region_unaligned(ReservedCodeCacheSize, 8) : 314 os::vm_page_size(); 315 return MAX2(page_size, (size_t) os::vm_allocation_granularity()); 316} 317 318ReservedCodeSpace CodeCache::reserve_heap_memory(size_t size) { 319 // Determine alignment 320 const size_t page_size = os::can_execute_large_page_memory() ? 321 MIN2(os::page_size_for_region_aligned(InitialCodeCacheSize, 8), 322 os::page_size_for_region_aligned(size, 8)) : 323 os::vm_page_size(); 324 const size_t granularity = os::vm_allocation_granularity(); 325 const size_t r_align = MAX2(page_size, granularity); 326 const size_t r_size = align_size_up(size, r_align); 327 const size_t rs_align = page_size == (size_t) os::vm_page_size() ? 0 : 328 MAX2(page_size, granularity); 329 330 ReservedCodeSpace rs(r_size, rs_align, rs_align > 0); 331 332 if (!rs.is_reserved()) { 333 vm_exit_during_initialization("Could not reserve enough space for code cache"); 334 } 335 336 // Initialize bounds 337 _low_bound = (address)rs.base(); 338 _high_bound = _low_bound + rs.size(); 339 340 return rs; 341} 342 343// Heaps available for allocation 344bool CodeCache::heap_available(int code_blob_type) { 345 if (!SegmentedCodeCache) { 346 // No segmentation: use a single code heap 347 return (code_blob_type == CodeBlobType::All); 348 } else if (Arguments::is_interpreter_only()) { 349 // Interpreter only: we don't need any method code heaps 350 return (code_blob_type == CodeBlobType::NonNMethod); 351 } else if (TieredCompilation && (TieredStopAtLevel > CompLevel_simple)) { 352 // Tiered compilation: use all code heaps 353 return (code_blob_type < CodeBlobType::All); 354 } else { 355 // No TieredCompilation: we only need the non-nmethod and non-profiled code heap 356 return (code_blob_type == CodeBlobType::NonNMethod) || 357 (code_blob_type == CodeBlobType::MethodNonProfiled); 358 } 359} 360 361const char* CodeCache::get_code_heap_flag_name(int code_blob_type) { 362 switch(code_blob_type) { 363 case CodeBlobType::NonNMethod: 364 return "NonNMethodCodeHeapSize"; 365 break; 366 case CodeBlobType::MethodNonProfiled: 367 return "NonProfiledCodeHeapSize"; 368 break; 369 case CodeBlobType::MethodProfiled: 370 return "ProfiledCodeHeapSize"; 371 break; 372 } 373 ShouldNotReachHere(); 374 return NULL; 375} 376 377int CodeCache::code_heap_compare(CodeHeap* const &lhs, CodeHeap* const &rhs) { 378 if (lhs->code_blob_type() == rhs->code_blob_type()) { 379 return (lhs > rhs) ? 1 : ((lhs < rhs) ? -1 : 0); 380 } else { 381 return lhs->code_blob_type() - rhs->code_blob_type(); 382 } 383} 384 385void CodeCache::add_heap(CodeHeap* heap) { 386 assert(!Universe::is_fully_initialized(), "late heap addition?"); 387 388 _heaps->insert_sorted<code_heap_compare>(heap); 389 390 int type = heap->code_blob_type(); 391 if (code_blob_type_accepts_compiled(type)) { 392 _compiled_heaps->insert_sorted<code_heap_compare>(heap); 393 } 394 if (code_blob_type_accepts_nmethod(type)) { 395 _nmethod_heaps->insert_sorted<code_heap_compare>(heap); 396 } 397 if (code_blob_type_accepts_allocable(type)) { 398 _allocable_heaps->insert_sorted<code_heap_compare>(heap); 399 } 400} 401 402void CodeCache::add_heap(ReservedSpace rs, const char* name, int code_blob_type) { 403 // Check if heap is needed 404 if (!heap_available(code_blob_type)) { 405 return; 406 } 407 408 // Create CodeHeap 409 CodeHeap* heap = new CodeHeap(name, code_blob_type); 410 add_heap(heap); 411 412 // Reserve Space 413 size_t size_initial = MIN2(InitialCodeCacheSize, rs.size()); 414 size_initial = round_to(size_initial, os::vm_page_size()); 415 if (!heap->reserve(rs, size_initial, CodeCacheSegmentSize)) { 416 vm_exit_during_initialization("Could not reserve enough space for code cache"); 417 } 418 419 // Register the CodeHeap 420 MemoryService::add_code_heap_memory_pool(heap, name); 421} 422 423CodeHeap* CodeCache::get_code_heap(const CodeBlob* cb) { 424 assert(cb != NULL, "CodeBlob is null"); 425 FOR_ALL_HEAPS(heap) { 426 if ((*heap)->contains_blob(cb)) { 427 return *heap; 428 } 429 } 430 ShouldNotReachHere(); 431 return NULL; 432} 433 434CodeHeap* CodeCache::get_code_heap(int code_blob_type) { 435 FOR_ALL_HEAPS(heap) { 436 if ((*heap)->accepts(code_blob_type)) { 437 return *heap; 438 } 439 } 440 return NULL; 441} 442 443CodeBlob* CodeCache::first_blob(CodeHeap* heap) { 444 assert_locked_or_safepoint(CodeCache_lock); 445 assert(heap != NULL, "heap is null"); 446 return (CodeBlob*)heap->first(); 447} 448 449CodeBlob* CodeCache::first_blob(int code_blob_type) { 450 if (heap_available(code_blob_type)) { 451 return first_blob(get_code_heap(code_blob_type)); 452 } else { 453 return NULL; 454 } 455} 456 457CodeBlob* CodeCache::next_blob(CodeHeap* heap, CodeBlob* cb) { 458 assert_locked_or_safepoint(CodeCache_lock); 459 assert(heap != NULL, "heap is null"); 460 return (CodeBlob*)heap->next(cb); 461} 462 463/** 464 * Do not seize the CodeCache lock here--if the caller has not 465 * already done so, we are going to lose bigtime, since the code 466 * cache will contain a garbage CodeBlob until the caller can 467 * run the constructor for the CodeBlob subclass he is busy 468 * instantiating. 469 */ 470CodeBlob* CodeCache::allocate(int size, int code_blob_type, int orig_code_blob_type) { 471 // Possibly wakes up the sweeper thread. 472 NMethodSweeper::notify(code_blob_type); 473 assert_locked_or_safepoint(CodeCache_lock); 474 assert(size > 0, "Code cache allocation request must be > 0 but is %d", size); 475 if (size <= 0) { 476 return NULL; 477 } 478 CodeBlob* cb = NULL; 479 480 // Get CodeHeap for the given CodeBlobType 481 CodeHeap* heap = get_code_heap(code_blob_type); 482 assert(heap != NULL, "heap is null"); 483 484 while (true) { 485 cb = (CodeBlob*)heap->allocate(size); 486 if (cb != NULL) break; 487 if (!heap->expand_by(CodeCacheExpansionSize)) { 488 // Save original type for error reporting 489 if (orig_code_blob_type == CodeBlobType::All) { 490 orig_code_blob_type = code_blob_type; 491 } 492 // Expansion failed 493 if (SegmentedCodeCache) { 494 // Fallback solution: Try to store code in another code heap. 495 // NonNMethod -> MethodNonProfiled -> MethodProfiled (-> MethodNonProfiled) 496 // Note that in the sweeper, we check the reverse_free_ratio of the code heap 497 // and force stack scanning if less than 10% of the code heap are free. 498 int type = code_blob_type; 499 switch (type) { 500 case CodeBlobType::NonNMethod: 501 type = CodeBlobType::MethodNonProfiled; 502 break; 503 case CodeBlobType::MethodNonProfiled: 504 type = CodeBlobType::MethodProfiled; 505 break; 506 case CodeBlobType::MethodProfiled: 507 // Avoid loop if we already tried that code heap 508 if (type == orig_code_blob_type) { 509 type = CodeBlobType::MethodNonProfiled; 510 } 511 break; 512 } 513 if (type != code_blob_type && type != orig_code_blob_type && heap_available(type)) { 514 if (PrintCodeCacheExtension) { 515 tty->print_cr("Extension of %s failed. Trying to allocate in %s.", 516 heap->name(), get_code_heap(type)->name()); 517 } 518 return allocate(size, type, orig_code_blob_type); 519 } 520 } 521 MutexUnlockerEx mu(CodeCache_lock, Mutex::_no_safepoint_check_flag); 522 CompileBroker::handle_full_code_cache(orig_code_blob_type); 523 return NULL; 524 } 525 if (PrintCodeCacheExtension) { 526 ResourceMark rm; 527 if (_nmethod_heaps->length() >= 1) { 528 tty->print("%s", heap->name()); 529 } else { 530 tty->print("CodeCache"); 531 } 532 tty->print_cr(" extended to [" INTPTR_FORMAT ", " INTPTR_FORMAT "] (" SSIZE_FORMAT " bytes)", 533 (intptr_t)heap->low_boundary(), (intptr_t)heap->high(), 534 (address)heap->high() - (address)heap->low_boundary()); 535 } 536 } 537 print_trace("allocation", cb, size); 538 return cb; 539} 540 541void CodeCache::free(CodeBlob* cb) { 542 assert_locked_or_safepoint(CodeCache_lock); 543 CodeHeap* heap = get_code_heap(cb); 544 print_trace("free", cb); 545 if (cb->is_nmethod()) { 546 heap->set_nmethod_count(heap->nmethod_count() - 1); 547 if (((nmethod *)cb)->has_dependencies()) { 548 _number_of_nmethods_with_dependencies--; 549 } 550 } 551 if (cb->is_adapter_blob()) { 552 heap->set_adapter_count(heap->adapter_count() - 1); 553 } 554 555 // Get heap for given CodeBlob and deallocate 556 get_code_heap(cb)->deallocate(cb); 557 558 assert(heap->blob_count() >= 0, "sanity check"); 559} 560 561void CodeCache::commit(CodeBlob* cb) { 562 // this is called by nmethod::nmethod, which must already own CodeCache_lock 563 assert_locked_or_safepoint(CodeCache_lock); 564 CodeHeap* heap = get_code_heap(cb); 565 if (cb->is_nmethod()) { 566 heap->set_nmethod_count(heap->nmethod_count() + 1); 567 if (((nmethod *)cb)->has_dependencies()) { 568 _number_of_nmethods_with_dependencies++; 569 } 570 } 571 if (cb->is_adapter_blob()) { 572 heap->set_adapter_count(heap->adapter_count() + 1); 573 } 574 575 // flush the hardware I-cache 576 ICache::invalidate_range(cb->content_begin(), cb->content_size()); 577} 578 579bool CodeCache::contains(void *p) { 580 // S390 uses contains() in current_frame(), which is used before 581 // code cache initialization if NativeMemoryTracking=detail is set. 582 S390_ONLY(if (_heaps == NULL) return false;) 583 // It should be ok to call contains without holding a lock. 584 FOR_ALL_HEAPS(heap) { 585 if ((*heap)->contains(p)) { 586 return true; 587 } 588 } 589 return false; 590} 591 592bool CodeCache::contains(nmethod *nm) { 593 return contains((void *)nm); 594} 595 596// This method is safe to call without holding the CodeCache_lock, as long as a dead CodeBlob is not 597// looked up (i.e., one that has been marked for deletion). It only depends on the _segmap to contain 598// valid indices, which it will always do, as long as the CodeBlob is not in the process of being recycled. 599CodeBlob* CodeCache::find_blob(void* start) { 600 CodeBlob* result = find_blob_unsafe(start); 601 // We could potentially look up non_entrant methods 602 guarantee(result == NULL || !result->is_zombie() || result->is_locked_by_vm() || is_error_reported(), "unsafe access to zombie method"); 603 return result; 604} 605 606// Lookup that does not fail if you lookup a zombie method (if you call this, be sure to know 607// what you are doing) 608CodeBlob* CodeCache::find_blob_unsafe(void* start) { 609 // NMT can walk the stack before code cache is created 610 if (_heaps != NULL && !_heaps->is_empty()) { 611 FOR_ALL_HEAPS(heap) { 612 CodeBlob* result = (*heap)->find_blob_unsafe(start); 613 if (result != NULL) { 614 return result; 615 } 616 } 617 } 618 return NULL; 619} 620 621nmethod* CodeCache::find_nmethod(void* start) { 622 CodeBlob* cb = find_blob(start); 623 assert(cb->is_nmethod(), "did not find an nmethod"); 624 return (nmethod*)cb; 625} 626 627void CodeCache::blobs_do(void f(CodeBlob* nm)) { 628 assert_locked_or_safepoint(CodeCache_lock); 629 FOR_ALL_HEAPS(heap) { 630 FOR_ALL_BLOBS(cb, *heap) { 631 f(cb); 632 } 633 } 634} 635 636void CodeCache::nmethods_do(void f(nmethod* nm)) { 637 assert_locked_or_safepoint(CodeCache_lock); 638 NMethodIterator iter; 639 while(iter.next()) { 640 f(iter.method()); 641 } 642} 643 644void CodeCache::metadata_do(void f(Metadata* m)) { 645 assert_locked_or_safepoint(CodeCache_lock); 646 NMethodIterator iter; 647 while(iter.next_alive()) { 648 iter.method()->metadata_do(f); 649 } 650 AOTLoader::metadata_do(f); 651} 652 653int CodeCache::alignment_unit() { 654 return (int)_heaps->first()->alignment_unit(); 655} 656 657int CodeCache::alignment_offset() { 658 return (int)_heaps->first()->alignment_offset(); 659} 660 661// Mark nmethods for unloading if they contain otherwise unreachable oops. 662void CodeCache::do_unloading(BoolObjectClosure* is_alive, bool unloading_occurred) { 663 assert_locked_or_safepoint(CodeCache_lock); 664 CompiledMethodIterator iter; 665 while(iter.next_alive()) { 666 iter.method()->do_unloading(is_alive, unloading_occurred); 667 } 668} 669 670void CodeCache::blobs_do(CodeBlobClosure* f) { 671 assert_locked_or_safepoint(CodeCache_lock); 672 FOR_ALL_ALLOCABLE_HEAPS(heap) { 673 FOR_ALL_BLOBS(cb, *heap) { 674 if (cb->is_alive()) { 675 f->do_code_blob(cb); 676#ifdef ASSERT 677 if (cb->is_nmethod()) 678 ((nmethod*)cb)->verify_scavenge_root_oops(); 679#endif //ASSERT 680 } 681 } 682 } 683} 684 685// Walk the list of methods which might contain non-perm oops. 686void CodeCache::scavenge_root_nmethods_do(CodeBlobToOopClosure* f) { 687 assert_locked_or_safepoint(CodeCache_lock); 688 689 if (UseG1GC) { 690 return; 691 } 692 693 const bool fix_relocations = f->fix_relocations(); 694 debug_only(mark_scavenge_root_nmethods()); 695 696 nmethod* prev = NULL; 697 nmethod* cur = scavenge_root_nmethods(); 698 while (cur != NULL) { 699 debug_only(cur->clear_scavenge_root_marked()); 700 assert(cur->scavenge_root_not_marked(), ""); 701 assert(cur->on_scavenge_root_list(), "else shouldn't be on this list"); 702 703 bool is_live = (!cur->is_zombie() && !cur->is_unloaded()); 704 if (TraceScavenge) { 705 cur->print_on(tty, is_live ? "scavenge root" : "dead scavenge root"); tty->cr(); 706 } 707 if (is_live) { 708 // Perform cur->oops_do(f), maybe just once per nmethod. 709 f->do_code_blob(cur); 710 } 711 nmethod* const next = cur->scavenge_root_link(); 712 // The scavengable nmethod list must contain all methods with scavengable 713 // oops. It is safe to include more nmethod on the list, but we do not 714 // expect any live non-scavengable nmethods on the list. 715 if (fix_relocations) { 716 if (!is_live || !cur->detect_scavenge_root_oops()) { 717 unlink_scavenge_root_nmethod(cur, prev); 718 } else { 719 prev = cur; 720 } 721 } 722 cur = next; 723 } 724 725 // Check for stray marks. 726 debug_only(verify_perm_nmethods(NULL)); 727} 728 729void CodeCache::add_scavenge_root_nmethod(nmethod* nm) { 730 assert_locked_or_safepoint(CodeCache_lock); 731 732 if (UseG1GC) { 733 return; 734 } 735 736 nm->set_on_scavenge_root_list(); 737 nm->set_scavenge_root_link(_scavenge_root_nmethods); 738 set_scavenge_root_nmethods(nm); 739 print_trace("add_scavenge_root", nm); 740} 741 742void CodeCache::unlink_scavenge_root_nmethod(nmethod* nm, nmethod* prev) { 743 assert_locked_or_safepoint(CodeCache_lock); 744 745 assert((prev == NULL && scavenge_root_nmethods() == nm) || 746 (prev != NULL && prev->scavenge_root_link() == nm), "precondition"); 747 748 assert(!UseG1GC, "G1 does not use the scavenge_root_nmethods list"); 749 750 print_trace("unlink_scavenge_root", nm); 751 if (prev == NULL) { 752 set_scavenge_root_nmethods(nm->scavenge_root_link()); 753 } else { 754 prev->set_scavenge_root_link(nm->scavenge_root_link()); 755 } 756 nm->set_scavenge_root_link(NULL); 757 nm->clear_on_scavenge_root_list(); 758} 759 760void CodeCache::drop_scavenge_root_nmethod(nmethod* nm) { 761 assert_locked_or_safepoint(CodeCache_lock); 762 763 if (UseG1GC) { 764 return; 765 } 766 767 print_trace("drop_scavenge_root", nm); 768 nmethod* prev = NULL; 769 for (nmethod* cur = scavenge_root_nmethods(); cur != NULL; cur = cur->scavenge_root_link()) { 770 if (cur == nm) { 771 unlink_scavenge_root_nmethod(cur, prev); 772 return; 773 } 774 prev = cur; 775 } 776 assert(false, "should have been on list"); 777} 778 779void CodeCache::prune_scavenge_root_nmethods() { 780 assert_locked_or_safepoint(CodeCache_lock); 781 782 if (UseG1GC) { 783 return; 784 } 785 786 debug_only(mark_scavenge_root_nmethods()); 787 788 nmethod* last = NULL; 789 nmethod* cur = scavenge_root_nmethods(); 790 while (cur != NULL) { 791 nmethod* next = cur->scavenge_root_link(); 792 debug_only(cur->clear_scavenge_root_marked()); 793 assert(cur->scavenge_root_not_marked(), ""); 794 assert(cur->on_scavenge_root_list(), "else shouldn't be on this list"); 795 796 if (!cur->is_zombie() && !cur->is_unloaded() 797 && cur->detect_scavenge_root_oops()) { 798 // Keep it. Advance 'last' to prevent deletion. 799 last = cur; 800 } else { 801 // Prune it from the list, so we don't have to look at it any more. 802 print_trace("prune_scavenge_root", cur); 803 unlink_scavenge_root_nmethod(cur, last); 804 } 805 cur = next; 806 } 807 808 // Check for stray marks. 809 debug_only(verify_perm_nmethods(NULL)); 810} 811 812#ifndef PRODUCT 813void CodeCache::asserted_non_scavengable_nmethods_do(CodeBlobClosure* f) { 814 if (UseG1GC) { 815 return; 816 } 817 818 // While we are here, verify the integrity of the list. 819 mark_scavenge_root_nmethods(); 820 for (nmethod* cur = scavenge_root_nmethods(); cur != NULL; cur = cur->scavenge_root_link()) { 821 assert(cur->on_scavenge_root_list(), "else shouldn't be on this list"); 822 cur->clear_scavenge_root_marked(); 823 } 824 verify_perm_nmethods(f); 825} 826 827// Temporarily mark nmethods that are claimed to be on the non-perm list. 828void CodeCache::mark_scavenge_root_nmethods() { 829 NMethodIterator iter; 830 while(iter.next_alive()) { 831 nmethod* nm = iter.method(); 832 assert(nm->scavenge_root_not_marked(), "clean state"); 833 if (nm->on_scavenge_root_list()) 834 nm->set_scavenge_root_marked(); 835 } 836} 837 838// If the closure is given, run it on the unlisted nmethods. 839// Also make sure that the effects of mark_scavenge_root_nmethods is gone. 840void CodeCache::verify_perm_nmethods(CodeBlobClosure* f_or_null) { 841 NMethodIterator iter; 842 while(iter.next_alive()) { 843 nmethod* nm = iter.method(); 844 bool call_f = (f_or_null != NULL); 845 assert(nm->scavenge_root_not_marked(), "must be already processed"); 846 if (nm->on_scavenge_root_list()) 847 call_f = false; // don't show this one to the client 848 nm->verify_scavenge_root_oops(); 849 if (call_f) f_or_null->do_code_blob(nm); 850 } 851} 852#endif //PRODUCT 853 854void CodeCache::verify_clean_inline_caches() { 855#ifdef ASSERT 856 NMethodIterator iter; 857 while(iter.next_alive()) { 858 nmethod* nm = iter.method(); 859 assert(!nm->is_unloaded(), "Tautology"); 860 nm->verify_clean_inline_caches(); 861 nm->verify(); 862 } 863#endif 864} 865 866void CodeCache::verify_icholder_relocations() { 867#ifdef ASSERT 868 // make sure that we aren't leaking icholders 869 int count = 0; 870 FOR_ALL_HEAPS(heap) { 871 FOR_ALL_BLOBS(cb, *heap) { 872 CompiledMethod *nm = cb->as_compiled_method_or_null(); 873 if (nm != NULL) { 874 count += nm->verify_icholder_relocations(); 875 } 876 } 877 } 878 assert(count + InlineCacheBuffer::pending_icholder_count() + CompiledICHolder::live_not_claimed_count() == 879 CompiledICHolder::live_count(), "must agree"); 880#endif 881} 882 883void CodeCache::gc_prologue() { 884} 885 886void CodeCache::gc_epilogue() { 887 assert_locked_or_safepoint(CodeCache_lock); 888 NOT_DEBUG(if (needs_cache_clean())) { 889 CompiledMethodIterator iter; 890 while(iter.next_alive()) { 891 CompiledMethod* cm = iter.method(); 892 assert(!cm->is_unloaded(), "Tautology"); 893 DEBUG_ONLY(if (needs_cache_clean())) { 894 cm->cleanup_inline_caches(); 895 } 896 DEBUG_ONLY(cm->verify()); 897 DEBUG_ONLY(cm->verify_oop_relocations()); 898 } 899 } 900 901 set_needs_cache_clean(false); 902 prune_scavenge_root_nmethods(); 903 904 verify_icholder_relocations(); 905} 906 907void CodeCache::verify_oops() { 908 MutexLockerEx mu(CodeCache_lock, Mutex::_no_safepoint_check_flag); 909 VerifyOopClosure voc; 910 NMethodIterator iter; 911 while(iter.next_alive()) { 912 nmethod* nm = iter.method(); 913 nm->oops_do(&voc); 914 nm->verify_oop_relocations(); 915 } 916} 917 918int CodeCache::blob_count(int code_blob_type) { 919 CodeHeap* heap = get_code_heap(code_blob_type); 920 return (heap != NULL) ? heap->blob_count() : 0; 921} 922 923int CodeCache::blob_count() { 924 int count = 0; 925 FOR_ALL_HEAPS(heap) { 926 count += (*heap)->blob_count(); 927 } 928 return count; 929} 930 931int CodeCache::nmethod_count(int code_blob_type) { 932 CodeHeap* heap = get_code_heap(code_blob_type); 933 return (heap != NULL) ? heap->nmethod_count() : 0; 934} 935 936int CodeCache::nmethod_count() { 937 int count = 0; 938 FOR_ALL_NMETHOD_HEAPS(heap) { 939 count += (*heap)->nmethod_count(); 940 } 941 return count; 942} 943 944int CodeCache::adapter_count(int code_blob_type) { 945 CodeHeap* heap = get_code_heap(code_blob_type); 946 return (heap != NULL) ? heap->adapter_count() : 0; 947} 948 949int CodeCache::adapter_count() { 950 int count = 0; 951 FOR_ALL_HEAPS(heap) { 952 count += (*heap)->adapter_count(); 953 } 954 return count; 955} 956 957address CodeCache::low_bound(int code_blob_type) { 958 CodeHeap* heap = get_code_heap(code_blob_type); 959 return (heap != NULL) ? (address)heap->low_boundary() : NULL; 960} 961 962address CodeCache::high_bound(int code_blob_type) { 963 CodeHeap* heap = get_code_heap(code_blob_type); 964 return (heap != NULL) ? (address)heap->high_boundary() : NULL; 965} 966 967size_t CodeCache::capacity() { 968 size_t cap = 0; 969 FOR_ALL_ALLOCABLE_HEAPS(heap) { 970 cap += (*heap)->capacity(); 971 } 972 return cap; 973} 974 975size_t CodeCache::unallocated_capacity(int code_blob_type) { 976 CodeHeap* heap = get_code_heap(code_blob_type); 977 return (heap != NULL) ? heap->unallocated_capacity() : 0; 978} 979 980size_t CodeCache::unallocated_capacity() { 981 size_t unallocated_cap = 0; 982 FOR_ALL_ALLOCABLE_HEAPS(heap) { 983 unallocated_cap += (*heap)->unallocated_capacity(); 984 } 985 return unallocated_cap; 986} 987 988size_t CodeCache::max_capacity() { 989 size_t max_cap = 0; 990 FOR_ALL_ALLOCABLE_HEAPS(heap) { 991 max_cap += (*heap)->max_capacity(); 992 } 993 return max_cap; 994} 995 996/** 997 * Returns the reverse free ratio. E.g., if 25% (1/4) of the code heap 998 * is free, reverse_free_ratio() returns 4. 999 */ 1000double CodeCache::reverse_free_ratio(int code_blob_type) { 1001 CodeHeap* heap = get_code_heap(code_blob_type); 1002 if (heap == NULL) { 1003 return 0; 1004 } 1005 1006 double unallocated_capacity = MAX2((double)heap->unallocated_capacity(), 1.0); // Avoid division by 0; 1007 double max_capacity = (double)heap->max_capacity(); 1008 double result = max_capacity / unallocated_capacity; 1009 assert (max_capacity >= unallocated_capacity, "Must be"); 1010 assert (result >= 1.0, "reverse_free_ratio must be at least 1. It is %f", result); 1011 return result; 1012} 1013 1014size_t CodeCache::bytes_allocated_in_freelists() { 1015 size_t allocated_bytes = 0; 1016 FOR_ALL_ALLOCABLE_HEAPS(heap) { 1017 allocated_bytes += (*heap)->allocated_in_freelist(); 1018 } 1019 return allocated_bytes; 1020} 1021 1022int CodeCache::allocated_segments() { 1023 int number_of_segments = 0; 1024 FOR_ALL_ALLOCABLE_HEAPS(heap) { 1025 number_of_segments += (*heap)->allocated_segments(); 1026 } 1027 return number_of_segments; 1028} 1029 1030size_t CodeCache::freelists_length() { 1031 size_t length = 0; 1032 FOR_ALL_ALLOCABLE_HEAPS(heap) { 1033 length += (*heap)->freelist_length(); 1034 } 1035 return length; 1036} 1037 1038void icache_init(); 1039 1040void CodeCache::initialize() { 1041 assert(CodeCacheSegmentSize >= (uintx)CodeEntryAlignment, "CodeCacheSegmentSize must be large enough to align entry points"); 1042#ifdef COMPILER2 1043 assert(CodeCacheSegmentSize >= (uintx)OptoLoopAlignment, "CodeCacheSegmentSize must be large enough to align inner loops"); 1044#endif 1045 assert(CodeCacheSegmentSize >= sizeof(jdouble), "CodeCacheSegmentSize must be large enough to align constants"); 1046 // This was originally just a check of the alignment, causing failure, instead, round 1047 // the code cache to the page size. In particular, Solaris is moving to a larger 1048 // default page size. 1049 CodeCacheExpansionSize = round_to(CodeCacheExpansionSize, os::vm_page_size()); 1050 1051 if (SegmentedCodeCache) { 1052 // Use multiple code heaps 1053 initialize_heaps(); 1054 } else { 1055 // Use a single code heap 1056 FLAG_SET_ERGO(uintx, NonNMethodCodeHeapSize, 0); 1057 FLAG_SET_ERGO(uintx, ProfiledCodeHeapSize, 0); 1058 FLAG_SET_ERGO(uintx, NonProfiledCodeHeapSize, 0); 1059 ReservedCodeSpace rs = reserve_heap_memory(ReservedCodeCacheSize); 1060 add_heap(rs, "CodeCache", CodeBlobType::All); 1061 } 1062 1063 // Initialize ICache flush mechanism 1064 // This service is needed for os::register_code_area 1065 icache_init(); 1066 1067 // Give OS a chance to register generated code area. 1068 // This is used on Windows 64 bit platforms to register 1069 // Structured Exception Handlers for our generated code. 1070 os::register_code_area((char*)low_bound(), (char*)high_bound()); 1071} 1072 1073void codeCache_init() { 1074 CodeCache::initialize(); 1075 // Load AOT libraries and add AOT code heaps. 1076 AOTLoader::initialize(); 1077} 1078 1079//------------------------------------------------------------------------------------------------ 1080 1081int CodeCache::number_of_nmethods_with_dependencies() { 1082 return _number_of_nmethods_with_dependencies; 1083} 1084 1085void CodeCache::clear_inline_caches() { 1086 assert_locked_or_safepoint(CodeCache_lock); 1087 CompiledMethodIterator iter; 1088 while(iter.next_alive()) { 1089 iter.method()->clear_inline_caches(); 1090 } 1091} 1092 1093void CodeCache::cleanup_inline_caches() { 1094 assert_locked_or_safepoint(CodeCache_lock); 1095 NMethodIterator iter; 1096 while(iter.next_alive()) { 1097 iter.method()->cleanup_inline_caches(/*clean_all=*/true); 1098 } 1099} 1100 1101// Keeps track of time spent for checking dependencies 1102NOT_PRODUCT(static elapsedTimer dependentCheckTime;) 1103 1104int CodeCache::mark_for_deoptimization(KlassDepChange& changes) { 1105 MutexLockerEx mu(CodeCache_lock, Mutex::_no_safepoint_check_flag); 1106 int number_of_marked_CodeBlobs = 0; 1107 1108 // search the hierarchy looking for nmethods which are affected by the loading of this class 1109 1110 // then search the interfaces this class implements looking for nmethods 1111 // which might be dependent of the fact that an interface only had one 1112 // implementor. 1113 // nmethod::check_all_dependencies works only correctly, if no safepoint 1114 // can happen 1115 NoSafepointVerifier nsv; 1116 for (DepChange::ContextStream str(changes, nsv); str.next(); ) { 1117 Klass* d = str.klass(); 1118 number_of_marked_CodeBlobs += InstanceKlass::cast(d)->mark_dependent_nmethods(changes); 1119 } 1120 1121#ifndef PRODUCT 1122 if (VerifyDependencies) { 1123 // Object pointers are used as unique identifiers for dependency arguments. This 1124 // is only possible if no safepoint, i.e., GC occurs during the verification code. 1125 dependentCheckTime.start(); 1126 nmethod::check_all_dependencies(changes); 1127 dependentCheckTime.stop(); 1128 } 1129#endif 1130 1131 return number_of_marked_CodeBlobs; 1132} 1133 1134CompiledMethod* CodeCache::find_compiled(void* start) { 1135 CodeBlob *cb = find_blob(start); 1136 assert(cb == NULL || cb->is_compiled(), "did not find an compiled_method"); 1137 return (CompiledMethod*)cb; 1138} 1139 1140bool CodeCache::is_far_target(address target) { 1141#if INCLUDE_AOT 1142 return NativeCall::is_far_call(_low_bound, target) || 1143 NativeCall::is_far_call(_high_bound, target); 1144#else 1145 return false; 1146#endif 1147} 1148 1149#ifdef HOTSWAP 1150int CodeCache::mark_for_evol_deoptimization(instanceKlassHandle dependee) { 1151 MutexLockerEx mu(CodeCache_lock, Mutex::_no_safepoint_check_flag); 1152 int number_of_marked_CodeBlobs = 0; 1153 1154 // Deoptimize all methods of the evolving class itself 1155 Array<Method*>* old_methods = dependee->methods(); 1156 for (int i = 0; i < old_methods->length(); i++) { 1157 ResourceMark rm; 1158 Method* old_method = old_methods->at(i); 1159 CompiledMethod* nm = old_method->code(); 1160 if (nm != NULL) { 1161 nm->mark_for_deoptimization(); 1162 number_of_marked_CodeBlobs++; 1163 } 1164 } 1165 1166 CompiledMethodIterator iter; 1167 while(iter.next_alive()) { 1168 CompiledMethod* nm = iter.method(); 1169 if (nm->is_marked_for_deoptimization()) { 1170 // ...Already marked in the previous pass; don't count it again. 1171 } else if (nm->is_evol_dependent_on(dependee())) { 1172 ResourceMark rm; 1173 nm->mark_for_deoptimization(); 1174 number_of_marked_CodeBlobs++; 1175 } else { 1176 // flush caches in case they refer to a redefined Method* 1177 nm->clear_inline_caches(); 1178 } 1179 } 1180 1181 return number_of_marked_CodeBlobs; 1182} 1183#endif // HOTSWAP 1184 1185 1186// Deoptimize all methods 1187void CodeCache::mark_all_nmethods_for_deoptimization() { 1188 MutexLockerEx mu(CodeCache_lock, Mutex::_no_safepoint_check_flag); 1189 CompiledMethodIterator iter; 1190 while(iter.next_alive()) { 1191 CompiledMethod* nm = iter.method(); 1192 if (!nm->method()->is_method_handle_intrinsic()) { 1193 nm->mark_for_deoptimization(); 1194 } 1195 } 1196} 1197 1198int CodeCache::mark_for_deoptimization(Method* dependee) { 1199 MutexLockerEx mu(CodeCache_lock, Mutex::_no_safepoint_check_flag); 1200 int number_of_marked_CodeBlobs = 0; 1201 1202 CompiledMethodIterator iter; 1203 while(iter.next_alive()) { 1204 CompiledMethod* nm = iter.method(); 1205 if (nm->is_dependent_on_method(dependee)) { 1206 ResourceMark rm; 1207 nm->mark_for_deoptimization(); 1208 number_of_marked_CodeBlobs++; 1209 } 1210 } 1211 1212 return number_of_marked_CodeBlobs; 1213} 1214 1215void CodeCache::make_marked_nmethods_not_entrant() { 1216 assert_locked_or_safepoint(CodeCache_lock); 1217 CompiledMethodIterator iter; 1218 while(iter.next_alive()) { 1219 CompiledMethod* nm = iter.method(); 1220 if (nm->is_marked_for_deoptimization() && !nm->is_not_entrant()) { 1221 nm->make_not_entrant(); 1222 } 1223 } 1224} 1225 1226// Flushes compiled methods dependent on dependee. 1227void CodeCache::flush_dependents_on(instanceKlassHandle dependee) { 1228 assert_lock_strong(Compile_lock); 1229 1230 if (number_of_nmethods_with_dependencies() == 0) return; 1231 1232 // CodeCache can only be updated by a thread_in_VM and they will all be 1233 // stopped during the safepoint so CodeCache will be safe to update without 1234 // holding the CodeCache_lock. 1235 1236 KlassDepChange changes(dependee); 1237 1238 // Compute the dependent nmethods 1239 if (mark_for_deoptimization(changes) > 0) { 1240 // At least one nmethod has been marked for deoptimization 1241 VM_Deoptimize op; 1242 VMThread::execute(&op); 1243 } 1244} 1245 1246#ifdef HOTSWAP 1247// Flushes compiled methods dependent on dependee in the evolutionary sense 1248void CodeCache::flush_evol_dependents_on(instanceKlassHandle ev_k_h) { 1249 // --- Compile_lock is not held. However we are at a safepoint. 1250 assert_locked_or_safepoint(Compile_lock); 1251 if (number_of_nmethods_with_dependencies() == 0 && !UseAOT) return; 1252 1253 // CodeCache can only be updated by a thread_in_VM and they will all be 1254 // stopped during the safepoint so CodeCache will be safe to update without 1255 // holding the CodeCache_lock. 1256 1257 // Compute the dependent nmethods 1258 if (mark_for_evol_deoptimization(ev_k_h) > 0) { 1259 // At least one nmethod has been marked for deoptimization 1260 1261 // All this already happens inside a VM_Operation, so we'll do all the work here. 1262 // Stuff copied from VM_Deoptimize and modified slightly. 1263 1264 // We do not want any GCs to happen while we are in the middle of this VM operation 1265 ResourceMark rm; 1266 DeoptimizationMarker dm; 1267 1268 // Deoptimize all activations depending on marked nmethods 1269 Deoptimization::deoptimize_dependents(); 1270 1271 // Make the dependent methods not entrant 1272 make_marked_nmethods_not_entrant(); 1273 } 1274} 1275#endif // HOTSWAP 1276 1277 1278// Flushes compiled methods dependent on dependee 1279void CodeCache::flush_dependents_on_method(methodHandle m_h) { 1280 // --- Compile_lock is not held. However we are at a safepoint. 1281 assert_locked_or_safepoint(Compile_lock); 1282 1283 // CodeCache can only be updated by a thread_in_VM and they will all be 1284 // stopped dring the safepoint so CodeCache will be safe to update without 1285 // holding the CodeCache_lock. 1286 1287 // Compute the dependent nmethods 1288 if (mark_for_deoptimization(m_h()) > 0) { 1289 // At least one nmethod has been marked for deoptimization 1290 1291 // All this already happens inside a VM_Operation, so we'll do all the work here. 1292 // Stuff copied from VM_Deoptimize and modified slightly. 1293 1294 // We do not want any GCs to happen while we are in the middle of this VM operation 1295 ResourceMark rm; 1296 DeoptimizationMarker dm; 1297 1298 // Deoptimize all activations depending on marked nmethods 1299 Deoptimization::deoptimize_dependents(); 1300 1301 // Make the dependent methods not entrant 1302 make_marked_nmethods_not_entrant(); 1303 } 1304} 1305 1306void CodeCache::verify() { 1307 assert_locked_or_safepoint(CodeCache_lock); 1308 FOR_ALL_HEAPS(heap) { 1309 (*heap)->verify(); 1310 FOR_ALL_BLOBS(cb, *heap) { 1311 if (cb->is_alive()) { 1312 cb->verify(); 1313 } 1314 } 1315 } 1316} 1317 1318// A CodeHeap is full. Print out warning and report event. 1319void CodeCache::report_codemem_full(int code_blob_type, bool print) { 1320 // Get nmethod heap for the given CodeBlobType and build CodeCacheFull event 1321 CodeHeap* heap = get_code_heap(code_blob_type); 1322 assert(heap != NULL, "heap is null"); 1323 1324 if ((heap->full_count() == 0) || print) { 1325 // Not yet reported for this heap, report 1326 if (SegmentedCodeCache) { 1327 warning("%s is full. Compiler has been disabled.", get_code_heap_name(code_blob_type)); 1328 warning("Try increasing the code heap size using -XX:%s=", get_code_heap_flag_name(code_blob_type)); 1329 } else { 1330 warning("CodeCache is full. Compiler has been disabled."); 1331 warning("Try increasing the code cache size using -XX:ReservedCodeCacheSize="); 1332 } 1333 ResourceMark rm; 1334 stringStream s; 1335 // Dump code cache into a buffer before locking the tty, 1336 { 1337 MutexLockerEx mu(CodeCache_lock, Mutex::_no_safepoint_check_flag); 1338 print_summary(&s); 1339 } 1340 ttyLocker ttyl; 1341 tty->print("%s", s.as_string()); 1342 } 1343 1344 heap->report_full(); 1345 1346 EventCodeCacheFull event; 1347 if (event.should_commit()) { 1348 event.set_codeBlobType((u1)code_blob_type); 1349 event.set_startAddress((u8)heap->low_boundary()); 1350 event.set_commitedTopAddress((u8)heap->high()); 1351 event.set_reservedTopAddress((u8)heap->high_boundary()); 1352 event.set_entryCount(heap->blob_count()); 1353 event.set_methodCount(heap->nmethod_count()); 1354 event.set_adaptorCount(heap->adapter_count()); 1355 event.set_unallocatedCapacity(heap->unallocated_capacity()); 1356 event.set_fullCount(heap->full_count()); 1357 event.commit(); 1358 } 1359} 1360 1361void CodeCache::print_memory_overhead() { 1362 size_t wasted_bytes = 0; 1363 FOR_ALL_ALLOCABLE_HEAPS(heap) { 1364 CodeHeap* curr_heap = *heap; 1365 for (CodeBlob* cb = (CodeBlob*)curr_heap->first(); cb != NULL; cb = (CodeBlob*)curr_heap->next(cb)) { 1366 HeapBlock* heap_block = ((HeapBlock*)cb) - 1; 1367 wasted_bytes += heap_block->length() * CodeCacheSegmentSize - cb->size(); 1368 } 1369 } 1370 // Print bytes that are allocated in the freelist 1371 ttyLocker ttl; 1372 tty->print_cr("Number of elements in freelist: " SSIZE_FORMAT, freelists_length()); 1373 tty->print_cr("Allocated in freelist: " SSIZE_FORMAT "kB", bytes_allocated_in_freelists()/K); 1374 tty->print_cr("Unused bytes in CodeBlobs: " SSIZE_FORMAT "kB", (wasted_bytes/K)); 1375 tty->print_cr("Segment map size: " SSIZE_FORMAT "kB", allocated_segments()/K); // 1 byte per segment 1376} 1377 1378//------------------------------------------------------------------------------------------------ 1379// Non-product version 1380 1381#ifndef PRODUCT 1382 1383void CodeCache::print_trace(const char* event, CodeBlob* cb, int size) { 1384 if (PrintCodeCache2) { // Need to add a new flag 1385 ResourceMark rm; 1386 if (size == 0) size = cb->size(); 1387 tty->print_cr("CodeCache %s: addr: " INTPTR_FORMAT ", size: 0x%x", event, p2i(cb), size); 1388 } 1389} 1390 1391void CodeCache::print_internals() { 1392 int nmethodCount = 0; 1393 int runtimeStubCount = 0; 1394 int adapterCount = 0; 1395 int deoptimizationStubCount = 0; 1396 int uncommonTrapStubCount = 0; 1397 int bufferBlobCount = 0; 1398 int total = 0; 1399 int nmethodAlive = 0; 1400 int nmethodNotEntrant = 0; 1401 int nmethodZombie = 0; 1402 int nmethodUnloaded = 0; 1403 int nmethodJava = 0; 1404 int nmethodNative = 0; 1405 int max_nm_size = 0; 1406 ResourceMark rm; 1407 1408 int i = 0; 1409 FOR_ALL_ALLOCABLE_HEAPS(heap) { 1410 if ((_nmethod_heaps->length() >= 1) && Verbose) { 1411 tty->print_cr("-- %s --", (*heap)->name()); 1412 } 1413 FOR_ALL_BLOBS(cb, *heap) { 1414 total++; 1415 if (cb->is_nmethod()) { 1416 nmethod* nm = (nmethod*)cb; 1417 1418 if (Verbose && nm->method() != NULL) { 1419 ResourceMark rm; 1420 char *method_name = nm->method()->name_and_sig_as_C_string(); 1421 tty->print("%s", method_name); 1422 if(nm->is_alive()) { tty->print_cr(" alive"); } 1423 if(nm->is_not_entrant()) { tty->print_cr(" not-entrant"); } 1424 if(nm->is_zombie()) { tty->print_cr(" zombie"); } 1425 } 1426 1427 nmethodCount++; 1428 1429 if(nm->is_alive()) { nmethodAlive++; } 1430 if(nm->is_not_entrant()) { nmethodNotEntrant++; } 1431 if(nm->is_zombie()) { nmethodZombie++; } 1432 if(nm->is_unloaded()) { nmethodUnloaded++; } 1433 if(nm->method() != NULL && nm->is_native_method()) { nmethodNative++; } 1434 1435 if(nm->method() != NULL && nm->is_java_method()) { 1436 nmethodJava++; 1437 max_nm_size = MAX2(max_nm_size, nm->size()); 1438 } 1439 } else if (cb->is_runtime_stub()) { 1440 runtimeStubCount++; 1441 } else if (cb->is_deoptimization_stub()) { 1442 deoptimizationStubCount++; 1443 } else if (cb->is_uncommon_trap_stub()) { 1444 uncommonTrapStubCount++; 1445 } else if (cb->is_adapter_blob()) { 1446 adapterCount++; 1447 } else if (cb->is_buffer_blob()) { 1448 bufferBlobCount++; 1449 } 1450 } 1451 } 1452 1453 int bucketSize = 512; 1454 int bucketLimit = max_nm_size / bucketSize + 1; 1455 int *buckets = NEW_C_HEAP_ARRAY(int, bucketLimit, mtCode); 1456 memset(buckets, 0, sizeof(int) * bucketLimit); 1457 1458 NMethodIterator iter; 1459 while(iter.next()) { 1460 nmethod* nm = iter.method(); 1461 if(nm->method() != NULL && nm->is_java_method()) { 1462 buckets[nm->size() / bucketSize]++; 1463 } 1464 } 1465 1466 tty->print_cr("Code Cache Entries (total of %d)",total); 1467 tty->print_cr("-------------------------------------------------"); 1468 tty->print_cr("nmethods: %d",nmethodCount); 1469 tty->print_cr("\talive: %d",nmethodAlive); 1470 tty->print_cr("\tnot_entrant: %d",nmethodNotEntrant); 1471 tty->print_cr("\tzombie: %d",nmethodZombie); 1472 tty->print_cr("\tunloaded: %d",nmethodUnloaded); 1473 tty->print_cr("\tjava: %d",nmethodJava); 1474 tty->print_cr("\tnative: %d",nmethodNative); 1475 tty->print_cr("runtime_stubs: %d",runtimeStubCount); 1476 tty->print_cr("adapters: %d",adapterCount); 1477 tty->print_cr("buffer blobs: %d",bufferBlobCount); 1478 tty->print_cr("deoptimization_stubs: %d",deoptimizationStubCount); 1479 tty->print_cr("uncommon_traps: %d",uncommonTrapStubCount); 1480 tty->print_cr("\nnmethod size distribution (non-zombie java)"); 1481 tty->print_cr("-------------------------------------------------"); 1482 1483 for(int i=0; i<bucketLimit; i++) { 1484 if(buckets[i] != 0) { 1485 tty->print("%d - %d bytes",i*bucketSize,(i+1)*bucketSize); 1486 tty->fill_to(40); 1487 tty->print_cr("%d",buckets[i]); 1488 } 1489 } 1490 1491 FREE_C_HEAP_ARRAY(int, buckets); 1492 print_memory_overhead(); 1493} 1494 1495#endif // !PRODUCT 1496 1497void CodeCache::print() { 1498 print_summary(tty); 1499 1500#ifndef PRODUCT 1501 if (!Verbose) return; 1502 1503 CodeBlob_sizes live; 1504 CodeBlob_sizes dead; 1505 1506 FOR_ALL_ALLOCABLE_HEAPS(heap) { 1507 FOR_ALL_BLOBS(cb, *heap) { 1508 if (!cb->is_alive()) { 1509 dead.add(cb); 1510 } else { 1511 live.add(cb); 1512 } 1513 } 1514 } 1515 1516 tty->print_cr("CodeCache:"); 1517 tty->print_cr("nmethod dependency checking time %fs", dependentCheckTime.seconds()); 1518 1519 if (!live.is_empty()) { 1520 live.print("live"); 1521 } 1522 if (!dead.is_empty()) { 1523 dead.print("dead"); 1524 } 1525 1526 if (WizardMode) { 1527 // print the oop_map usage 1528 int code_size = 0; 1529 int number_of_blobs = 0; 1530 int number_of_oop_maps = 0; 1531 int map_size = 0; 1532 FOR_ALL_ALLOCABLE_HEAPS(heap) { 1533 FOR_ALL_BLOBS(cb, *heap) { 1534 if (cb->is_alive()) { 1535 number_of_blobs++; 1536 code_size += cb->code_size(); 1537 ImmutableOopMapSet* set = cb->oop_maps(); 1538 if (set != NULL) { 1539 number_of_oop_maps += set->count(); 1540 map_size += set->nr_of_bytes(); 1541 } 1542 } 1543 } 1544 } 1545 tty->print_cr("OopMaps"); 1546 tty->print_cr(" #blobs = %d", number_of_blobs); 1547 tty->print_cr(" code size = %d", code_size); 1548 tty->print_cr(" #oop_maps = %d", number_of_oop_maps); 1549 tty->print_cr(" map size = %d", map_size); 1550 } 1551 1552#endif // !PRODUCT 1553} 1554 1555void CodeCache::print_summary(outputStream* st, bool detailed) { 1556 FOR_ALL_HEAPS(heap_iterator) { 1557 CodeHeap* heap = (*heap_iterator); 1558 size_t total = (heap->high_boundary() - heap->low_boundary()); 1559 if (_heaps->length() >= 1) { 1560 st->print("%s:", heap->name()); 1561 } else { 1562 st->print("CodeCache:"); 1563 } 1564 st->print_cr(" size=" SIZE_FORMAT "Kb used=" SIZE_FORMAT 1565 "Kb max_used=" SIZE_FORMAT "Kb free=" SIZE_FORMAT "Kb", 1566 total/K, (total - heap->unallocated_capacity())/K, 1567 heap->max_allocated_capacity()/K, heap->unallocated_capacity()/K); 1568 1569 if (detailed) { 1570 st->print_cr(" bounds [" INTPTR_FORMAT ", " INTPTR_FORMAT ", " INTPTR_FORMAT "]", 1571 p2i(heap->low_boundary()), 1572 p2i(heap->high()), 1573 p2i(heap->high_boundary())); 1574 } 1575 } 1576 1577 if (detailed) { 1578 st->print_cr(" total_blobs=" UINT32_FORMAT " nmethods=" UINT32_FORMAT 1579 " adapters=" UINT32_FORMAT, 1580 blob_count(), nmethod_count(), adapter_count()); 1581 st->print_cr(" compilation: %s", CompileBroker::should_compile_new_jobs() ? 1582 "enabled" : Arguments::mode() == Arguments::_int ? 1583 "disabled (interpreter mode)" : 1584 "disabled (not enough contiguous free space left)"); 1585 } 1586} 1587 1588void CodeCache::print_codelist(outputStream* st) { 1589 MutexLockerEx mu(CodeCache_lock, Mutex::_no_safepoint_check_flag); 1590 1591 NMethodIterator iter; 1592 while(iter.next_alive()) { 1593 nmethod* nm = iter.method(); 1594 ResourceMark rm; 1595 char *method_name = nm->method()->name_and_sig_as_C_string(); 1596 st->print_cr("%d %d %s [" INTPTR_FORMAT ", " INTPTR_FORMAT " - " INTPTR_FORMAT "]", 1597 nm->compile_id(), nm->comp_level(), method_name, (intptr_t)nm->header_begin(), 1598 (intptr_t)nm->code_begin(), (intptr_t)nm->code_end()); 1599 } 1600} 1601 1602void CodeCache::print_layout(outputStream* st) { 1603 MutexLockerEx mu(CodeCache_lock, Mutex::_no_safepoint_check_flag); 1604 ResourceMark rm; 1605 print_summary(st, true); 1606} 1607 1608void CodeCache::log_state(outputStream* st) { 1609 st->print(" total_blobs='" UINT32_FORMAT "' nmethods='" UINT32_FORMAT "'" 1610 " adapters='" UINT32_FORMAT "' free_code_cache='" SIZE_FORMAT "'", 1611 blob_count(), nmethod_count(), adapter_count(), 1612 unallocated_capacity()); 1613} 1614 1615