codeCache.cpp revision 12253:59da89afe788
1/*
2 * Copyright (c) 1997, 2016, Oracle and/or its affiliates. All rights reserved.
3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
4 *
5 * This code is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 only, as
7 * published by the Free Software Foundation.
8 *
9 * This code is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
12 * version 2 for more details (a copy is included in the LICENSE file that
13 * accompanied this code).
14 *
15 * You should have received a copy of the GNU General Public License version
16 * 2 along with this work; if not, write to the Free Software Foundation,
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18 *
19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20 * or visit www.oracle.com if you need additional information or have any
21 * questions.
22 *
23 */
24
25#include "precompiled.hpp"
26#include "code/codeBlob.hpp"
27#include "code/codeCache.hpp"
28#include "code/compiledIC.hpp"
29#include "code/dependencies.hpp"
30#include "code/icBuffer.hpp"
31#include "code/nmethod.hpp"
32#include "code/pcDesc.hpp"
33#include "compiler/compileBroker.hpp"
34#include "gc/shared/gcLocker.hpp"
35#include "memory/allocation.inline.hpp"
36#include "memory/iterator.hpp"
37#include "memory/resourceArea.hpp"
38#include "oops/method.hpp"
39#include "oops/objArrayOop.hpp"
40#include "oops/oop.inline.hpp"
41#include "oops/verifyOopClosure.hpp"
42#include "runtime/arguments.hpp"
43#include "runtime/compilationPolicy.hpp"
44#include "runtime/deoptimization.hpp"
45#include "runtime/handles.inline.hpp"
46#include "runtime/icache.hpp"
47#include "runtime/java.hpp"
48#include "runtime/mutexLocker.hpp"
49#include "runtime/sweeper.hpp"
50#include "services/memoryService.hpp"
51#include "trace/tracing.hpp"
52#include "utilities/xmlstream.hpp"
53#ifdef COMPILER1
54#include "c1/c1_Compilation.hpp"
55#include "c1/c1_Compiler.hpp"
56#endif
57#ifdef COMPILER2
58#include "opto/c2compiler.hpp"
59#include "opto/compile.hpp"
60#include "opto/node.hpp"
61#endif
62
63// Helper class for printing in CodeCache
64class CodeBlob_sizes {
65 private:
66  int count;
67  int total_size;
68  int header_size;
69  int code_size;
70  int stub_size;
71  int relocation_size;
72  int scopes_oop_size;
73  int scopes_metadata_size;
74  int scopes_data_size;
75  int scopes_pcs_size;
76
77 public:
78  CodeBlob_sizes() {
79    count            = 0;
80    total_size       = 0;
81    header_size      = 0;
82    code_size        = 0;
83    stub_size        = 0;
84    relocation_size  = 0;
85    scopes_oop_size  = 0;
86    scopes_metadata_size  = 0;
87    scopes_data_size = 0;
88    scopes_pcs_size  = 0;
89  }
90
91  int total()                                    { return total_size; }
92  bool is_empty()                                { return count == 0; }
93
94  void print(const char* title) {
95    tty->print_cr(" #%d %s = %dK (hdr %d%%,  loc %d%%, code %d%%, stub %d%%, [oops %d%%, metadata %d%%, data %d%%, pcs %d%%])",
96                  count,
97                  title,
98                  (int)(total() / K),
99                  header_size             * 100 / total_size,
100                  relocation_size         * 100 / total_size,
101                  code_size               * 100 / total_size,
102                  stub_size               * 100 / total_size,
103                  scopes_oop_size         * 100 / total_size,
104                  scopes_metadata_size    * 100 / total_size,
105                  scopes_data_size        * 100 / total_size,
106                  scopes_pcs_size         * 100 / total_size);
107  }
108
109  void add(CodeBlob* cb) {
110    count++;
111    total_size       += cb->size();
112    header_size      += cb->header_size();
113    relocation_size  += cb->relocation_size();
114    if (cb->is_nmethod()) {
115      nmethod* nm = cb->as_nmethod_or_null();
116      code_size        += nm->insts_size();
117      stub_size        += nm->stub_size();
118
119      scopes_oop_size  += nm->oops_size();
120      scopes_metadata_size  += nm->metadata_size();
121      scopes_data_size += nm->scopes_data_size();
122      scopes_pcs_size  += nm->scopes_pcs_size();
123    } else {
124      code_size        += cb->code_size();
125    }
126  }
127};
128
129// Iterate over all CodeHeaps
130#define FOR_ALL_HEAPS(heap) for (GrowableArrayIterator<CodeHeap*> heap = _heaps->begin(); heap != _heaps->end(); ++heap)
131// Iterate over all CodeBlobs (cb) on the given CodeHeap
132#define FOR_ALL_BLOBS(cb, heap) for (CodeBlob* cb = first_blob(heap); cb != NULL; cb = next_blob(heap, cb))
133
134address CodeCache::_low_bound = 0;
135address CodeCache::_high_bound = 0;
136int CodeCache::_number_of_nmethods_with_dependencies = 0;
137bool CodeCache::_needs_cache_clean = false;
138nmethod* CodeCache::_scavenge_root_nmethods = NULL;
139
140// Initialize array of CodeHeaps
141GrowableArray<CodeHeap*>* CodeCache::_heaps = new(ResourceObj::C_HEAP, mtCode) GrowableArray<CodeHeap*> (CodeBlobType::All, true);
142
143void CodeCache::check_heap_sizes(size_t non_nmethod_size, size_t profiled_size, size_t non_profiled_size, size_t cache_size, bool all_set) {
144  size_t total_size = non_nmethod_size + profiled_size + non_profiled_size;
145  // Prepare error message
146  const char* error = "Invalid code heap sizes";
147  err_msg message("NonNMethodCodeHeapSize (%zuK) + ProfiledCodeHeapSize (%zuK) + NonProfiledCodeHeapSize (%zuK) = %zuK",
148          non_nmethod_size/K, profiled_size/K, non_profiled_size/K, total_size/K);
149
150  if (total_size > cache_size) {
151    // Some code heap sizes were explicitly set: total_size must be <= cache_size
152    message.append(" is greater than ReservedCodeCacheSize (%zuK).", cache_size/K);
153    vm_exit_during_initialization(error, message);
154  } else if (all_set && total_size != cache_size) {
155    // All code heap sizes were explicitly set: total_size must equal cache_size
156    message.append(" is not equal to ReservedCodeCacheSize (%zuK).", cache_size/K);
157    vm_exit_during_initialization(error, message);
158  }
159}
160
161void CodeCache::initialize_heaps() {
162  bool non_nmethod_set      = FLAG_IS_CMDLINE(NonNMethodCodeHeapSize);
163  bool profiled_set         = FLAG_IS_CMDLINE(ProfiledCodeHeapSize);
164  bool non_profiled_set     = FLAG_IS_CMDLINE(NonProfiledCodeHeapSize);
165  size_t min_size           = os::vm_page_size();
166  size_t cache_size         = ReservedCodeCacheSize;
167  size_t non_nmethod_size   = NonNMethodCodeHeapSize;
168  size_t profiled_size      = ProfiledCodeHeapSize;
169  size_t non_profiled_size  = NonProfiledCodeHeapSize;
170  // Check if total size set via command line flags exceeds the reserved size
171  check_heap_sizes((non_nmethod_set  ? non_nmethod_size  : min_size),
172                   (profiled_set     ? profiled_size     : min_size),
173                   (non_profiled_set ? non_profiled_size : min_size),
174                   cache_size,
175                   non_nmethod_set && profiled_set && non_profiled_set);
176
177  // Determine size of compiler buffers
178  size_t code_buffers_size = 0;
179#ifdef COMPILER1
180  // C1 temporary code buffers (see Compiler::init_buffer_blob())
181  const int c1_count = CompilationPolicy::policy()->compiler_count(CompLevel_simple);
182  code_buffers_size += c1_count * Compiler::code_buffer_size();
183#endif
184#ifdef COMPILER2
185  // C2 scratch buffers (see Compile::init_scratch_buffer_blob())
186  const int c2_count = CompilationPolicy::policy()->compiler_count(CompLevel_full_optimization);
187  // Initial size of constant table (this may be increased if a compiled method needs more space)
188  code_buffers_size += c2_count * C2Compiler::initial_code_buffer_size();
189#endif
190
191  // Increase default non_nmethod_size to account for compiler buffers
192  if (!non_nmethod_set) {
193    non_nmethod_size += code_buffers_size;
194  }
195  // Calculate default CodeHeap sizes if not set by user
196  if (!non_nmethod_set && !profiled_set && !non_profiled_set) {
197    // Check if we have enough space for the non-nmethod code heap
198    if (cache_size > non_nmethod_size) {
199      // Use the default value for non_nmethod_size and one half of the
200      // remaining size for non-profiled and one half for profiled methods
201      size_t remaining_size = cache_size - non_nmethod_size;
202      profiled_size = remaining_size / 2;
203      non_profiled_size = remaining_size - profiled_size;
204    } else {
205      // Use all space for the non-nmethod heap and set other heaps to minimal size
206      non_nmethod_size = cache_size - 2 * min_size;
207      profiled_size = min_size;
208      non_profiled_size = min_size;
209    }
210  } else if (!non_nmethod_set || !profiled_set || !non_profiled_set) {
211    // The user explicitly set some code heap sizes. Increase or decrease the (default)
212    // sizes of the other code heaps accordingly. First adapt non-profiled and profiled
213    // code heap sizes and then only change non-nmethod code heap size if still necessary.
214    intx diff_size = cache_size - (non_nmethod_size + profiled_size + non_profiled_size);
215    if (non_profiled_set) {
216      if (!profiled_set) {
217        // Adapt size of profiled code heap
218        if (diff_size < 0 && ((intx)profiled_size + diff_size) <= 0) {
219          // Not enough space available, set to minimum size
220          diff_size += profiled_size - min_size;
221          profiled_size = min_size;
222        } else {
223          profiled_size += diff_size;
224          diff_size = 0;
225        }
226      }
227    } else if (profiled_set) {
228      // Adapt size of non-profiled code heap
229      if (diff_size < 0 && ((intx)non_profiled_size + diff_size) <= 0) {
230        // Not enough space available, set to minimum size
231        diff_size += non_profiled_size - min_size;
232        non_profiled_size = min_size;
233      } else {
234        non_profiled_size += diff_size;
235        diff_size = 0;
236      }
237    } else if (non_nmethod_set) {
238      // Distribute remaining size between profiled and non-profiled code heaps
239      diff_size = cache_size - non_nmethod_size;
240      profiled_size = diff_size / 2;
241      non_profiled_size = diff_size - profiled_size;
242      diff_size = 0;
243    }
244    if (diff_size != 0) {
245      // Use non-nmethod code heap for remaining space requirements
246      assert(!non_nmethod_set && ((intx)non_nmethod_size + diff_size) > 0, "sanity");
247      non_nmethod_size += diff_size;
248    }
249  }
250
251  // We do not need the profiled CodeHeap, use all space for the non-profiled CodeHeap
252  if(!heap_available(CodeBlobType::MethodProfiled)) {
253    non_profiled_size += profiled_size;
254    profiled_size = 0;
255  }
256  // We do not need the non-profiled CodeHeap, use all space for the non-nmethod CodeHeap
257  if(!heap_available(CodeBlobType::MethodNonProfiled)) {
258    non_nmethod_size += non_profiled_size;
259    non_profiled_size = 0;
260  }
261  // Make sure we have enough space for VM internal code
262  uint min_code_cache_size = CodeCacheMinimumUseSpace DEBUG_ONLY(* 3);
263  if (non_nmethod_size < (min_code_cache_size + code_buffers_size)) {
264    vm_exit_during_initialization(err_msg(
265        "Not enough space in non-nmethod code heap to run VM: %zuK < %zuK",
266        non_nmethod_size/K, (min_code_cache_size + code_buffers_size)/K));
267  }
268
269  // Verify sizes and update flag values
270  assert(non_profiled_size + profiled_size + non_nmethod_size == cache_size, "Invalid code heap sizes");
271  FLAG_SET_ERGO(uintx, NonNMethodCodeHeapSize, non_nmethod_size);
272  FLAG_SET_ERGO(uintx, ProfiledCodeHeapSize, profiled_size);
273  FLAG_SET_ERGO(uintx, NonProfiledCodeHeapSize, non_profiled_size);
274
275  // Align CodeHeaps
276  size_t alignment = heap_alignment();
277  non_nmethod_size = align_size_up(non_nmethod_size, alignment);
278  profiled_size   = align_size_down(profiled_size, alignment);
279
280  // Reserve one continuous chunk of memory for CodeHeaps and split it into
281  // parts for the individual heaps. The memory layout looks like this:
282  // ---------- high -----------
283  //    Non-profiled nmethods
284  //      Profiled nmethods
285  //         Non-nmethods
286  // ---------- low ------------
287  ReservedCodeSpace rs = reserve_heap_memory(cache_size);
288  ReservedSpace non_method_space    = rs.first_part(non_nmethod_size);
289  ReservedSpace rest                = rs.last_part(non_nmethod_size);
290  ReservedSpace profiled_space      = rest.first_part(profiled_size);
291  ReservedSpace non_profiled_space  = rest.last_part(profiled_size);
292
293  // Non-nmethods (stubs, adapters, ...)
294  add_heap(non_method_space, "CodeHeap 'non-nmethods'", CodeBlobType::NonNMethod);
295  // Tier 2 and tier 3 (profiled) methods
296  add_heap(profiled_space, "CodeHeap 'profiled nmethods'", CodeBlobType::MethodProfiled);
297  // Tier 1 and tier 4 (non-profiled) methods and native methods
298  add_heap(non_profiled_space, "CodeHeap 'non-profiled nmethods'", CodeBlobType::MethodNonProfiled);
299}
300
301size_t CodeCache::heap_alignment() {
302  // If large page support is enabled, align code heaps according to large
303  // page size to make sure that code cache is covered by large pages.
304  const size_t page_size = os::can_execute_large_page_memory() ?
305             os::page_size_for_region_unaligned(ReservedCodeCacheSize, 8) :
306             os::vm_page_size();
307  return MAX2(page_size, (size_t) os::vm_allocation_granularity());
308}
309
310ReservedCodeSpace CodeCache::reserve_heap_memory(size_t size) {
311  // Determine alignment
312  const size_t page_size = os::can_execute_large_page_memory() ?
313          MIN2(os::page_size_for_region_aligned(InitialCodeCacheSize, 8),
314               os::page_size_for_region_aligned(size, 8)) :
315          os::vm_page_size();
316  const size_t granularity = os::vm_allocation_granularity();
317  const size_t r_align = MAX2(page_size, granularity);
318  const size_t r_size = align_size_up(size, r_align);
319  const size_t rs_align = page_size == (size_t) os::vm_page_size() ? 0 :
320    MAX2(page_size, granularity);
321
322  ReservedCodeSpace rs(r_size, rs_align, rs_align > 0);
323
324  if (!rs.is_reserved()) {
325    vm_exit_during_initialization("Could not reserve enough space for code cache");
326  }
327
328  // Initialize bounds
329  _low_bound = (address)rs.base();
330  _high_bound = _low_bound + rs.size();
331
332  return rs;
333}
334
335bool CodeCache::heap_available(int code_blob_type) {
336  if (!SegmentedCodeCache) {
337    // No segmentation: use a single code heap
338    return (code_blob_type == CodeBlobType::All);
339  } else if (Arguments::is_interpreter_only()) {
340    // Interpreter only: we don't need any method code heaps
341    return (code_blob_type == CodeBlobType::NonNMethod);
342  } else if (TieredCompilation && (TieredStopAtLevel > CompLevel_simple)) {
343    // Tiered compilation: use all code heaps
344    return (code_blob_type < CodeBlobType::All);
345  } else {
346    // No TieredCompilation: we only need the non-nmethod and non-profiled code heap
347    return (code_blob_type == CodeBlobType::NonNMethod) ||
348           (code_blob_type == CodeBlobType::MethodNonProfiled);
349  }
350}
351
352const char* CodeCache::get_code_heap_flag_name(int code_blob_type) {
353  switch(code_blob_type) {
354  case CodeBlobType::NonNMethod:
355    return "NonNMethodCodeHeapSize";
356    break;
357  case CodeBlobType::MethodNonProfiled:
358    return "NonProfiledCodeHeapSize";
359    break;
360  case CodeBlobType::MethodProfiled:
361    return "ProfiledCodeHeapSize";
362    break;
363  }
364  ShouldNotReachHere();
365  return NULL;
366}
367
368void CodeCache::add_heap(ReservedSpace rs, const char* name, int code_blob_type) {
369  // Check if heap is needed
370  if (!heap_available(code_blob_type)) {
371    return;
372  }
373
374  // Create CodeHeap
375  CodeHeap* heap = new CodeHeap(name, code_blob_type);
376  _heaps->append(heap);
377
378  // Reserve Space
379  size_t size_initial = MIN2(InitialCodeCacheSize, rs.size());
380  size_initial = round_to(size_initial, os::vm_page_size());
381  if (!heap->reserve(rs, size_initial, CodeCacheSegmentSize)) {
382    vm_exit_during_initialization("Could not reserve enough space for code cache");
383  }
384
385  // Register the CodeHeap
386  MemoryService::add_code_heap_memory_pool(heap, name);
387}
388
389CodeHeap* CodeCache::get_code_heap(const CodeBlob* cb) {
390  assert(cb != NULL, "CodeBlob is null");
391  FOR_ALL_HEAPS(heap) {
392    if ((*heap)->contains(cb)) {
393      return *heap;
394    }
395  }
396  ShouldNotReachHere();
397  return NULL;
398}
399
400CodeHeap* CodeCache::get_code_heap(int code_blob_type) {
401  FOR_ALL_HEAPS(heap) {
402    if ((*heap)->accepts(code_blob_type)) {
403      return *heap;
404    }
405  }
406  return NULL;
407}
408
409CodeBlob* CodeCache::first_blob(CodeHeap* heap) {
410  assert_locked_or_safepoint(CodeCache_lock);
411  assert(heap != NULL, "heap is null");
412  return (CodeBlob*)heap->first();
413}
414
415CodeBlob* CodeCache::first_blob(int code_blob_type) {
416  if (heap_available(code_blob_type)) {
417    return first_blob(get_code_heap(code_blob_type));
418  } else {
419    return NULL;
420  }
421}
422
423CodeBlob* CodeCache::next_blob(CodeHeap* heap, CodeBlob* cb) {
424  assert_locked_or_safepoint(CodeCache_lock);
425  assert(heap != NULL, "heap is null");
426  return (CodeBlob*)heap->next(cb);
427}
428
429CodeBlob* CodeCache::next_blob(CodeBlob* cb) {
430  return next_blob(get_code_heap(cb), cb);
431}
432
433/**
434 * Do not seize the CodeCache lock here--if the caller has not
435 * already done so, we are going to lose bigtime, since the code
436 * cache will contain a garbage CodeBlob until the caller can
437 * run the constructor for the CodeBlob subclass he is busy
438 * instantiating.
439 */
440CodeBlob* CodeCache::allocate(int size, int code_blob_type, int orig_code_blob_type) {
441  // Possibly wakes up the sweeper thread.
442  NMethodSweeper::notify(code_blob_type);
443  assert_locked_or_safepoint(CodeCache_lock);
444  assert(size > 0, "Code cache allocation request must be > 0 but is %d", size);
445  if (size <= 0) {
446    return NULL;
447  }
448  CodeBlob* cb = NULL;
449
450  // Get CodeHeap for the given CodeBlobType
451  CodeHeap* heap = get_code_heap(code_blob_type);
452  assert(heap != NULL, "heap is null");
453
454  while (true) {
455    cb = (CodeBlob*)heap->allocate(size);
456    if (cb != NULL) break;
457    if (!heap->expand_by(CodeCacheExpansionSize)) {
458      // Save original type for error reporting
459      if (orig_code_blob_type == CodeBlobType::All) {
460        orig_code_blob_type = code_blob_type;
461      }
462      // Expansion failed
463      if (SegmentedCodeCache) {
464        // Fallback solution: Try to store code in another code heap.
465        // NonNMethod -> MethodNonProfiled -> MethodProfiled (-> MethodNonProfiled)
466        // Note that in the sweeper, we check the reverse_free_ratio of the code heap
467        // and force stack scanning if less than 10% of the code heap are free.
468        int type = code_blob_type;
469        switch (type) {
470        case CodeBlobType::NonNMethod:
471          type = CodeBlobType::MethodNonProfiled;
472          break;
473        case CodeBlobType::MethodNonProfiled:
474          type = CodeBlobType::MethodProfiled;
475          break;
476        case CodeBlobType::MethodProfiled:
477          // Avoid loop if we already tried that code heap
478          if (type == orig_code_blob_type) {
479            type = CodeBlobType::MethodNonProfiled;
480          }
481          break;
482        }
483        if (type != code_blob_type && type != orig_code_blob_type && heap_available(type)) {
484          if (PrintCodeCacheExtension) {
485            tty->print_cr("Extension of %s failed. Trying to allocate in %s.",
486                          heap->name(), get_code_heap(type)->name());
487          }
488          return allocate(size, type, orig_code_blob_type);
489        }
490      }
491      MutexUnlockerEx mu(CodeCache_lock, Mutex::_no_safepoint_check_flag);
492      CompileBroker::handle_full_code_cache(orig_code_blob_type);
493      return NULL;
494    }
495    if (PrintCodeCacheExtension) {
496      ResourceMark rm;
497      if (_heaps->length() >= 1) {
498        tty->print("%s", heap->name());
499      } else {
500        tty->print("CodeCache");
501      }
502      tty->print_cr(" extended to [" INTPTR_FORMAT ", " INTPTR_FORMAT "] (" SSIZE_FORMAT " bytes)",
503                    (intptr_t)heap->low_boundary(), (intptr_t)heap->high(),
504                    (address)heap->high() - (address)heap->low_boundary());
505    }
506  }
507  print_trace("allocation", cb, size);
508  return cb;
509}
510
511void CodeCache::free(CodeBlob* cb) {
512  assert_locked_or_safepoint(CodeCache_lock);
513  CodeHeap* heap = get_code_heap(cb);
514  print_trace("free", cb);
515  if (cb->is_nmethod()) {
516    heap->set_nmethod_count(heap->nmethod_count() - 1);
517    if (((nmethod *)cb)->has_dependencies()) {
518      _number_of_nmethods_with_dependencies--;
519    }
520  }
521  if (cb->is_adapter_blob()) {
522    heap->set_adapter_count(heap->adapter_count() - 1);
523  }
524
525  // Get heap for given CodeBlob and deallocate
526  get_code_heap(cb)->deallocate(cb);
527
528  assert(heap->blob_count() >= 0, "sanity check");
529}
530
531void CodeCache::commit(CodeBlob* cb) {
532  // this is called by nmethod::nmethod, which must already own CodeCache_lock
533  assert_locked_or_safepoint(CodeCache_lock);
534  CodeHeap* heap = get_code_heap(cb);
535  if (cb->is_nmethod()) {
536    heap->set_nmethod_count(heap->nmethod_count() + 1);
537    if (((nmethod *)cb)->has_dependencies()) {
538      _number_of_nmethods_with_dependencies++;
539    }
540  }
541  if (cb->is_adapter_blob()) {
542    heap->set_adapter_count(heap->adapter_count() + 1);
543  }
544
545  // flush the hardware I-cache
546  ICache::invalidate_range(cb->content_begin(), cb->content_size());
547}
548
549bool CodeCache::contains(void *p) {
550  // S390 uses contains() in current_frame(), which is used before
551  // code cache initialization if NativeMemoryTracking=detail is set.
552  S390_ONLY(if (_heaps == NULL) return false;)
553  // It should be ok to call contains without holding a lock.
554  FOR_ALL_HEAPS(heap) {
555    if ((*heap)->contains(p)) {
556      return true;
557    }
558  }
559  return false;
560}
561
562// This method is safe to call without holding the CodeCache_lock, as long as a dead CodeBlob is not
563// looked up (i.e., one that has been marked for deletion). It only depends on the _segmap to contain
564// valid indices, which it will always do, as long as the CodeBlob is not in the process of being recycled.
565CodeBlob* CodeCache::find_blob(void* start) {
566  CodeBlob* result = find_blob_unsafe(start);
567  // We could potentially look up non_entrant methods
568  guarantee(result == NULL || !result->is_zombie() || result->is_locked_by_vm() || is_error_reported(), "unsafe access to zombie method");
569  return result;
570}
571
572// Lookup that does not fail if you lookup a zombie method (if you call this, be sure to know
573// what you are doing)
574CodeBlob* CodeCache::find_blob_unsafe(void* start) {
575  // NMT can walk the stack before code cache is created
576  if (_heaps != NULL && !_heaps->is_empty()) {
577    FOR_ALL_HEAPS(heap) {
578      CodeBlob* result = (CodeBlob*) (*heap)->find_start(start);
579      if (result != NULL && result->blob_contains((address)start)) {
580        return result;
581      }
582    }
583  }
584  return NULL;
585}
586
587nmethod* CodeCache::find_nmethod(void* start) {
588  CodeBlob* cb = find_blob(start);
589  assert(cb->is_nmethod(), "did not find an nmethod");
590  return (nmethod*)cb;
591}
592
593void CodeCache::blobs_do(void f(CodeBlob* nm)) {
594  assert_locked_or_safepoint(CodeCache_lock);
595  FOR_ALL_HEAPS(heap) {
596    FOR_ALL_BLOBS(cb, *heap) {
597      f(cb);
598    }
599  }
600}
601
602void CodeCache::nmethods_do(void f(nmethod* nm)) {
603  assert_locked_or_safepoint(CodeCache_lock);
604  NMethodIterator iter;
605  while(iter.next()) {
606    f(iter.method());
607  }
608}
609
610void CodeCache::metadata_do(void f(Metadata* m)) {
611  assert_locked_or_safepoint(CodeCache_lock);
612  NMethodIterator iter;
613  while(iter.next_alive()) {
614    iter.method()->metadata_do(f);
615  }
616}
617
618int CodeCache::alignment_unit() {
619  return (int)_heaps->first()->alignment_unit();
620}
621
622int CodeCache::alignment_offset() {
623  return (int)_heaps->first()->alignment_offset();
624}
625
626// Mark nmethods for unloading if they contain otherwise unreachable oops.
627void CodeCache::do_unloading(BoolObjectClosure* is_alive, bool unloading_occurred) {
628  assert_locked_or_safepoint(CodeCache_lock);
629  CompiledMethodIterator iter;
630  while(iter.next_alive()) {
631    iter.method()->do_unloading(is_alive, unloading_occurred);
632  }
633}
634
635void CodeCache::blobs_do(CodeBlobClosure* f) {
636  assert_locked_or_safepoint(CodeCache_lock);
637  FOR_ALL_HEAPS(heap) {
638    FOR_ALL_BLOBS(cb, *heap) {
639      if (cb->is_alive()) {
640        f->do_code_blob(cb);
641
642#ifdef ASSERT
643        if (cb->is_nmethod())
644        ((nmethod*)cb)->verify_scavenge_root_oops();
645#endif //ASSERT
646      }
647    }
648  }
649}
650
651// Walk the list of methods which might contain non-perm oops.
652void CodeCache::scavenge_root_nmethods_do(CodeBlobToOopClosure* f) {
653  assert_locked_or_safepoint(CodeCache_lock);
654
655  if (UseG1GC) {
656    return;
657  }
658
659  const bool fix_relocations = f->fix_relocations();
660  debug_only(mark_scavenge_root_nmethods());
661
662  nmethod* prev = NULL;
663  nmethod* cur = scavenge_root_nmethods();
664  while (cur != NULL) {
665    debug_only(cur->clear_scavenge_root_marked());
666    assert(cur->scavenge_root_not_marked(), "");
667    assert(cur->on_scavenge_root_list(), "else shouldn't be on this list");
668
669    bool is_live = (!cur->is_zombie() && !cur->is_unloaded());
670    if (TraceScavenge) {
671      cur->print_on(tty, is_live ? "scavenge root" : "dead scavenge root"); tty->cr();
672    }
673    if (is_live) {
674      // Perform cur->oops_do(f), maybe just once per nmethod.
675      f->do_code_blob(cur);
676    }
677    nmethod* const next = cur->scavenge_root_link();
678    // The scavengable nmethod list must contain all methods with scavengable
679    // oops. It is safe to include more nmethod on the list, but we do not
680    // expect any live non-scavengable nmethods on the list.
681    if (fix_relocations) {
682      if (!is_live || !cur->detect_scavenge_root_oops()) {
683        unlink_scavenge_root_nmethod(cur, prev);
684      } else {
685        prev = cur;
686      }
687    }
688    cur = next;
689  }
690
691  // Check for stray marks.
692  debug_only(verify_perm_nmethods(NULL));
693}
694
695void CodeCache::add_scavenge_root_nmethod(nmethod* nm) {
696  assert_locked_or_safepoint(CodeCache_lock);
697
698  if (UseG1GC) {
699    return;
700  }
701
702  nm->set_on_scavenge_root_list();
703  nm->set_scavenge_root_link(_scavenge_root_nmethods);
704  set_scavenge_root_nmethods(nm);
705  print_trace("add_scavenge_root", nm);
706}
707
708void CodeCache::unlink_scavenge_root_nmethod(nmethod* nm, nmethod* prev) {
709  assert_locked_or_safepoint(CodeCache_lock);
710
711  assert((prev == NULL && scavenge_root_nmethods() == nm) ||
712         (prev != NULL && prev->scavenge_root_link() == nm), "precondition");
713
714  assert(!UseG1GC, "G1 does not use the scavenge_root_nmethods list");
715
716  print_trace("unlink_scavenge_root", nm);
717  if (prev == NULL) {
718    set_scavenge_root_nmethods(nm->scavenge_root_link());
719  } else {
720    prev->set_scavenge_root_link(nm->scavenge_root_link());
721  }
722  nm->set_scavenge_root_link(NULL);
723  nm->clear_on_scavenge_root_list();
724}
725
726void CodeCache::drop_scavenge_root_nmethod(nmethod* nm) {
727  assert_locked_or_safepoint(CodeCache_lock);
728
729  if (UseG1GC) {
730    return;
731  }
732
733  print_trace("drop_scavenge_root", nm);
734  nmethod* prev = NULL;
735  for (nmethod* cur = scavenge_root_nmethods(); cur != NULL; cur = cur->scavenge_root_link()) {
736    if (cur == nm) {
737      unlink_scavenge_root_nmethod(cur, prev);
738      return;
739    }
740    prev = cur;
741  }
742  assert(false, "should have been on list");
743}
744
745void CodeCache::prune_scavenge_root_nmethods() {
746  assert_locked_or_safepoint(CodeCache_lock);
747
748  if (UseG1GC) {
749    return;
750  }
751
752  debug_only(mark_scavenge_root_nmethods());
753
754  nmethod* last = NULL;
755  nmethod* cur = scavenge_root_nmethods();
756  while (cur != NULL) {
757    nmethod* next = cur->scavenge_root_link();
758    debug_only(cur->clear_scavenge_root_marked());
759    assert(cur->scavenge_root_not_marked(), "");
760    assert(cur->on_scavenge_root_list(), "else shouldn't be on this list");
761
762    if (!cur->is_zombie() && !cur->is_unloaded()
763        && cur->detect_scavenge_root_oops()) {
764      // Keep it.  Advance 'last' to prevent deletion.
765      last = cur;
766    } else {
767      // Prune it from the list, so we don't have to look at it any more.
768      print_trace("prune_scavenge_root", cur);
769      unlink_scavenge_root_nmethod(cur, last);
770    }
771    cur = next;
772  }
773
774  // Check for stray marks.
775  debug_only(verify_perm_nmethods(NULL));
776}
777
778#ifndef PRODUCT
779void CodeCache::asserted_non_scavengable_nmethods_do(CodeBlobClosure* f) {
780  if (UseG1GC) {
781    return;
782  }
783
784  // While we are here, verify the integrity of the list.
785  mark_scavenge_root_nmethods();
786  for (nmethod* cur = scavenge_root_nmethods(); cur != NULL; cur = cur->scavenge_root_link()) {
787    assert(cur->on_scavenge_root_list(), "else shouldn't be on this list");
788    cur->clear_scavenge_root_marked();
789  }
790  verify_perm_nmethods(f);
791}
792
793// Temporarily mark nmethods that are claimed to be on the non-perm list.
794void CodeCache::mark_scavenge_root_nmethods() {
795  NMethodIterator iter;
796  while(iter.next_alive()) {
797    nmethod* nm = iter.method();
798    assert(nm->scavenge_root_not_marked(), "clean state");
799    if (nm->on_scavenge_root_list())
800      nm->set_scavenge_root_marked();
801  }
802}
803
804// If the closure is given, run it on the unlisted nmethods.
805// Also make sure that the effects of mark_scavenge_root_nmethods is gone.
806void CodeCache::verify_perm_nmethods(CodeBlobClosure* f_or_null) {
807  NMethodIterator iter;
808  while(iter.next_alive()) {
809    nmethod* nm = iter.method();
810    bool call_f = (f_or_null != NULL);
811    assert(nm->scavenge_root_not_marked(), "must be already processed");
812    if (nm->on_scavenge_root_list())
813      call_f = false;  // don't show this one to the client
814    nm->verify_scavenge_root_oops();
815    if (call_f)  f_or_null->do_code_blob(nm);
816  }
817}
818#endif //PRODUCT
819
820void CodeCache::verify_clean_inline_caches() {
821#ifdef ASSERT
822  NMethodIterator iter;
823  while(iter.next_alive()) {
824    nmethod* nm = iter.method();
825    assert(!nm->is_unloaded(), "Tautology");
826    nm->verify_clean_inline_caches();
827    nm->verify();
828  }
829#endif
830}
831
832void CodeCache::verify_icholder_relocations() {
833#ifdef ASSERT
834  // make sure that we aren't leaking icholders
835  int count = 0;
836  FOR_ALL_HEAPS(heap) {
837    FOR_ALL_BLOBS(cb, *heap) {
838      if (cb->is_nmethod()) {
839        nmethod* nm = (nmethod*)cb;
840        count += nm->verify_icholder_relocations();
841      }
842    }
843  }
844
845  assert(count + InlineCacheBuffer::pending_icholder_count() + CompiledICHolder::live_not_claimed_count() ==
846         CompiledICHolder::live_count(), "must agree");
847#endif
848}
849
850void CodeCache::gc_prologue() {
851}
852
853void CodeCache::gc_epilogue() {
854  assert_locked_or_safepoint(CodeCache_lock);
855  NOT_DEBUG(if (needs_cache_clean())) {
856    CompiledMethodIterator iter;
857    while(iter.next_alive()) {
858      CompiledMethod* cm = iter.method();
859      assert(!cm->is_unloaded(), "Tautology");
860      DEBUG_ONLY(if (needs_cache_clean())) {
861        cm->cleanup_inline_caches();
862      }
863      DEBUG_ONLY(cm->verify());
864      DEBUG_ONLY(cm->verify_oop_relocations());
865    }
866  }
867
868  set_needs_cache_clean(false);
869  prune_scavenge_root_nmethods();
870
871  verify_icholder_relocations();
872}
873
874void CodeCache::verify_oops() {
875  MutexLockerEx mu(CodeCache_lock, Mutex::_no_safepoint_check_flag);
876  VerifyOopClosure voc;
877  NMethodIterator iter;
878  while(iter.next_alive()) {
879    nmethod* nm = iter.method();
880    nm->oops_do(&voc);
881    nm->verify_oop_relocations();
882  }
883}
884
885int CodeCache::blob_count(int code_blob_type) {
886  CodeHeap* heap = get_code_heap(code_blob_type);
887  return (heap != NULL) ? heap->blob_count() : 0;
888}
889
890int CodeCache::blob_count() {
891  int count = 0;
892  FOR_ALL_HEAPS(heap) {
893    count += (*heap)->blob_count();
894  }
895  return count;
896}
897
898int CodeCache::nmethod_count(int code_blob_type) {
899  CodeHeap* heap = get_code_heap(code_blob_type);
900  return (heap != NULL) ? heap->nmethod_count() : 0;
901}
902
903int CodeCache::nmethod_count() {
904  int count = 0;
905  FOR_ALL_HEAPS(heap) {
906    count += (*heap)->nmethod_count();
907  }
908  return count;
909}
910
911int CodeCache::adapter_count(int code_blob_type) {
912  CodeHeap* heap = get_code_heap(code_blob_type);
913  return (heap != NULL) ? heap->adapter_count() : 0;
914}
915
916int CodeCache::adapter_count() {
917  int count = 0;
918  FOR_ALL_HEAPS(heap) {
919    count += (*heap)->adapter_count();
920  }
921  return count;
922}
923
924address CodeCache::low_bound(int code_blob_type) {
925  CodeHeap* heap = get_code_heap(code_blob_type);
926  return (heap != NULL) ? (address)heap->low_boundary() : NULL;
927}
928
929address CodeCache::high_bound(int code_blob_type) {
930  CodeHeap* heap = get_code_heap(code_blob_type);
931  return (heap != NULL) ? (address)heap->high_boundary() : NULL;
932}
933
934size_t CodeCache::capacity() {
935  size_t cap = 0;
936  FOR_ALL_HEAPS(heap) {
937    cap += (*heap)->capacity();
938  }
939  return cap;
940}
941
942size_t CodeCache::unallocated_capacity(int code_blob_type) {
943  CodeHeap* heap = get_code_heap(code_blob_type);
944  return (heap != NULL) ? heap->unallocated_capacity() : 0;
945}
946
947size_t CodeCache::unallocated_capacity() {
948  size_t unallocated_cap = 0;
949  FOR_ALL_HEAPS(heap) {
950    unallocated_cap += (*heap)->unallocated_capacity();
951  }
952  return unallocated_cap;
953}
954
955size_t CodeCache::max_capacity() {
956  size_t max_cap = 0;
957  FOR_ALL_HEAPS(heap) {
958    max_cap += (*heap)->max_capacity();
959  }
960  return max_cap;
961}
962
963/**
964 * Returns the reverse free ratio. E.g., if 25% (1/4) of the code heap
965 * is free, reverse_free_ratio() returns 4.
966 */
967double CodeCache::reverse_free_ratio(int code_blob_type) {
968  CodeHeap* heap = get_code_heap(code_blob_type);
969  if (heap == NULL) {
970    return 0;
971  }
972
973  double unallocated_capacity = MAX2((double)heap->unallocated_capacity(), 1.0); // Avoid division by 0;
974  double max_capacity = (double)heap->max_capacity();
975  double result = max_capacity / unallocated_capacity;
976  assert (max_capacity >= unallocated_capacity, "Must be");
977  assert (result >= 1.0, "reverse_free_ratio must be at least 1. It is %f", result);
978  return result;
979}
980
981size_t CodeCache::bytes_allocated_in_freelists() {
982  size_t allocated_bytes = 0;
983  FOR_ALL_HEAPS(heap) {
984    allocated_bytes += (*heap)->allocated_in_freelist();
985  }
986  return allocated_bytes;
987}
988
989int CodeCache::allocated_segments() {
990  int number_of_segments = 0;
991  FOR_ALL_HEAPS(heap) {
992    number_of_segments += (*heap)->allocated_segments();
993  }
994  return number_of_segments;
995}
996
997size_t CodeCache::freelists_length() {
998  size_t length = 0;
999  FOR_ALL_HEAPS(heap) {
1000    length += (*heap)->freelist_length();
1001  }
1002  return length;
1003}
1004
1005void icache_init();
1006
1007void CodeCache::initialize() {
1008  assert(CodeCacheSegmentSize >= (uintx)CodeEntryAlignment, "CodeCacheSegmentSize must be large enough to align entry points");
1009#ifdef COMPILER2
1010  assert(CodeCacheSegmentSize >= (uintx)OptoLoopAlignment,  "CodeCacheSegmentSize must be large enough to align inner loops");
1011#endif
1012  assert(CodeCacheSegmentSize >= sizeof(jdouble),    "CodeCacheSegmentSize must be large enough to align constants");
1013  // This was originally just a check of the alignment, causing failure, instead, round
1014  // the code cache to the page size.  In particular, Solaris is moving to a larger
1015  // default page size.
1016  CodeCacheExpansionSize = round_to(CodeCacheExpansionSize, os::vm_page_size());
1017
1018  if (SegmentedCodeCache) {
1019    // Use multiple code heaps
1020    initialize_heaps();
1021  } else {
1022    // Use a single code heap
1023    FLAG_SET_ERGO(uintx, NonNMethodCodeHeapSize, 0);
1024    FLAG_SET_ERGO(uintx, ProfiledCodeHeapSize, 0);
1025    FLAG_SET_ERGO(uintx, NonProfiledCodeHeapSize, 0);
1026    ReservedCodeSpace rs = reserve_heap_memory(ReservedCodeCacheSize);
1027    add_heap(rs, "CodeCache", CodeBlobType::All);
1028  }
1029
1030  // Initialize ICache flush mechanism
1031  // This service is needed for os::register_code_area
1032  icache_init();
1033
1034  // Give OS a chance to register generated code area.
1035  // This is used on Windows 64 bit platforms to register
1036  // Structured Exception Handlers for our generated code.
1037  os::register_code_area((char*)low_bound(), (char*)high_bound());
1038}
1039
1040void codeCache_init() {
1041  CodeCache::initialize();
1042}
1043
1044//------------------------------------------------------------------------------------------------
1045
1046int CodeCache::number_of_nmethods_with_dependencies() {
1047  return _number_of_nmethods_with_dependencies;
1048}
1049
1050void CodeCache::clear_inline_caches() {
1051  assert_locked_or_safepoint(CodeCache_lock);
1052  CompiledMethodIterator iter;
1053  while(iter.next_alive()) {
1054    iter.method()->clear_inline_caches();
1055  }
1056}
1057
1058void CodeCache::cleanup_inline_caches() {
1059  assert_locked_or_safepoint(CodeCache_lock);
1060  NMethodIterator iter;
1061  while(iter.next_alive()) {
1062    iter.method()->cleanup_inline_caches(/*clean_all=*/true);
1063  }
1064}
1065
1066// Keeps track of time spent for checking dependencies
1067NOT_PRODUCT(static elapsedTimer dependentCheckTime;)
1068
1069int CodeCache::mark_for_deoptimization(KlassDepChange& changes) {
1070  MutexLockerEx mu(CodeCache_lock, Mutex::_no_safepoint_check_flag);
1071  int number_of_marked_CodeBlobs = 0;
1072
1073  // search the hierarchy looking for nmethods which are affected by the loading of this class
1074
1075  // then search the interfaces this class implements looking for nmethods
1076  // which might be dependent of the fact that an interface only had one
1077  // implementor.
1078  // nmethod::check_all_dependencies works only correctly, if no safepoint
1079  // can happen
1080  NoSafepointVerifier nsv;
1081  for (DepChange::ContextStream str(changes, nsv); str.next(); ) {
1082    Klass* d = str.klass();
1083    number_of_marked_CodeBlobs += InstanceKlass::cast(d)->mark_dependent_nmethods(changes);
1084  }
1085
1086#ifndef PRODUCT
1087  if (VerifyDependencies) {
1088    // Object pointers are used as unique identifiers for dependency arguments. This
1089    // is only possible if no safepoint, i.e., GC occurs during the verification code.
1090    dependentCheckTime.start();
1091    nmethod::check_all_dependencies(changes);
1092    dependentCheckTime.stop();
1093  }
1094#endif
1095
1096  return number_of_marked_CodeBlobs;
1097}
1098
1099CompiledMethod* CodeCache::find_compiled(void* start) {
1100  CodeBlob *cb = find_blob(start);
1101  assert(cb == NULL || cb->is_compiled(), "did not find an compiled_method");
1102  return (CompiledMethod*)cb;
1103}
1104
1105#ifdef HOTSWAP
1106int CodeCache::mark_for_evol_deoptimization(instanceKlassHandle dependee) {
1107  MutexLockerEx mu(CodeCache_lock, Mutex::_no_safepoint_check_flag);
1108  int number_of_marked_CodeBlobs = 0;
1109
1110  // Deoptimize all methods of the evolving class itself
1111  Array<Method*>* old_methods = dependee->methods();
1112  for (int i = 0; i < old_methods->length(); i++) {
1113    ResourceMark rm;
1114    Method* old_method = old_methods->at(i);
1115    CompiledMethod* nm = old_method->code();
1116    if (nm != NULL) {
1117      nm->mark_for_deoptimization();
1118      number_of_marked_CodeBlobs++;
1119    }
1120  }
1121
1122  CompiledMethodIterator iter;
1123  while(iter.next_alive()) {
1124    CompiledMethod* nm = iter.method();
1125    if (nm->is_marked_for_deoptimization()) {
1126      // ...Already marked in the previous pass; don't count it again.
1127    } else if (nm->is_evol_dependent_on(dependee())) {
1128      ResourceMark rm;
1129      nm->mark_for_deoptimization();
1130      number_of_marked_CodeBlobs++;
1131    } else  {
1132      // flush caches in case they refer to a redefined Method*
1133      nm->clear_inline_caches();
1134    }
1135  }
1136
1137  return number_of_marked_CodeBlobs;
1138}
1139#endif // HOTSWAP
1140
1141
1142// Deoptimize all methods
1143void CodeCache::mark_all_nmethods_for_deoptimization() {
1144  MutexLockerEx mu(CodeCache_lock, Mutex::_no_safepoint_check_flag);
1145  CompiledMethodIterator iter;
1146  while(iter.next_alive()) {
1147    CompiledMethod* nm = iter.method();
1148    if (!nm->method()->is_method_handle_intrinsic()) {
1149      nm->mark_for_deoptimization();
1150    }
1151  }
1152}
1153
1154int CodeCache::mark_for_deoptimization(Method* dependee) {
1155  MutexLockerEx mu(CodeCache_lock, Mutex::_no_safepoint_check_flag);
1156  int number_of_marked_CodeBlobs = 0;
1157
1158  CompiledMethodIterator iter;
1159  while(iter.next_alive()) {
1160    CompiledMethod* nm = iter.method();
1161    if (nm->is_dependent_on_method(dependee)) {
1162      ResourceMark rm;
1163      nm->mark_for_deoptimization();
1164      number_of_marked_CodeBlobs++;
1165    }
1166  }
1167
1168  return number_of_marked_CodeBlobs;
1169}
1170
1171void CodeCache::make_marked_nmethods_not_entrant() {
1172  assert_locked_or_safepoint(CodeCache_lock);
1173  CompiledMethodIterator iter;
1174  while(iter.next_alive()) {
1175    CompiledMethod* nm = iter.method();
1176    if (nm->is_marked_for_deoptimization()) {
1177      nm->make_not_entrant();
1178    }
1179  }
1180}
1181
1182// Flushes compiled methods dependent on dependee.
1183void CodeCache::flush_dependents_on(instanceKlassHandle dependee) {
1184  assert_lock_strong(Compile_lock);
1185
1186  if (number_of_nmethods_with_dependencies() == 0) return;
1187
1188  // CodeCache can only be updated by a thread_in_VM and they will all be
1189  // stopped during the safepoint so CodeCache will be safe to update without
1190  // holding the CodeCache_lock.
1191
1192  KlassDepChange changes(dependee);
1193
1194  // Compute the dependent nmethods
1195  if (mark_for_deoptimization(changes) > 0) {
1196    // At least one nmethod has been marked for deoptimization
1197    VM_Deoptimize op;
1198    VMThread::execute(&op);
1199  }
1200}
1201
1202#ifdef HOTSWAP
1203// Flushes compiled methods dependent on dependee in the evolutionary sense
1204void CodeCache::flush_evol_dependents_on(instanceKlassHandle ev_k_h) {
1205  // --- Compile_lock is not held. However we are at a safepoint.
1206  assert_locked_or_safepoint(Compile_lock);
1207  if (number_of_nmethods_with_dependencies() == 0) return;
1208
1209  // CodeCache can only be updated by a thread_in_VM and they will all be
1210  // stopped during the safepoint so CodeCache will be safe to update without
1211  // holding the CodeCache_lock.
1212
1213  // Compute the dependent nmethods
1214  if (mark_for_evol_deoptimization(ev_k_h) > 0) {
1215    // At least one nmethod has been marked for deoptimization
1216
1217    // All this already happens inside a VM_Operation, so we'll do all the work here.
1218    // Stuff copied from VM_Deoptimize and modified slightly.
1219
1220    // We do not want any GCs to happen while we are in the middle of this VM operation
1221    ResourceMark rm;
1222    DeoptimizationMarker dm;
1223
1224    // Deoptimize all activations depending on marked nmethods
1225    Deoptimization::deoptimize_dependents();
1226
1227    // Make the dependent methods not entrant
1228    make_marked_nmethods_not_entrant();
1229  }
1230}
1231#endif // HOTSWAP
1232
1233
1234// Flushes compiled methods dependent on dependee
1235void CodeCache::flush_dependents_on_method(methodHandle m_h) {
1236  // --- Compile_lock is not held. However we are at a safepoint.
1237  assert_locked_or_safepoint(Compile_lock);
1238
1239  // CodeCache can only be updated by a thread_in_VM and they will all be
1240  // stopped dring the safepoint so CodeCache will be safe to update without
1241  // holding the CodeCache_lock.
1242
1243  // Compute the dependent nmethods
1244  if (mark_for_deoptimization(m_h()) > 0) {
1245    // At least one nmethod has been marked for deoptimization
1246
1247    // All this already happens inside a VM_Operation, so we'll do all the work here.
1248    // Stuff copied from VM_Deoptimize and modified slightly.
1249
1250    // We do not want any GCs to happen while we are in the middle of this VM operation
1251    ResourceMark rm;
1252    DeoptimizationMarker dm;
1253
1254    // Deoptimize all activations depending on marked nmethods
1255    Deoptimization::deoptimize_dependents();
1256
1257    // Make the dependent methods not entrant
1258    make_marked_nmethods_not_entrant();
1259  }
1260}
1261
1262void CodeCache::verify() {
1263  assert_locked_or_safepoint(CodeCache_lock);
1264  FOR_ALL_HEAPS(heap) {
1265    (*heap)->verify();
1266    FOR_ALL_BLOBS(cb, *heap) {
1267      if (cb->is_alive()) {
1268        cb->verify();
1269      }
1270    }
1271  }
1272}
1273
1274// A CodeHeap is full. Print out warning and report event.
1275void CodeCache::report_codemem_full(int code_blob_type, bool print) {
1276  // Get nmethod heap for the given CodeBlobType and build CodeCacheFull event
1277  CodeHeap* heap = get_code_heap(code_blob_type);
1278  assert(heap != NULL, "heap is null");
1279
1280  if ((heap->full_count() == 0) || print) {
1281    // Not yet reported for this heap, report
1282    if (SegmentedCodeCache) {
1283      warning("%s is full. Compiler has been disabled.", get_code_heap_name(code_blob_type));
1284      warning("Try increasing the code heap size using -XX:%s=", get_code_heap_flag_name(code_blob_type));
1285    } else {
1286      warning("CodeCache is full. Compiler has been disabled.");
1287      warning("Try increasing the code cache size using -XX:ReservedCodeCacheSize=");
1288    }
1289    ResourceMark rm;
1290    stringStream s;
1291    // Dump code cache  into a buffer before locking the tty,
1292    {
1293      MutexLockerEx mu(CodeCache_lock, Mutex::_no_safepoint_check_flag);
1294      print_summary(&s);
1295    }
1296    ttyLocker ttyl;
1297    tty->print("%s", s.as_string());
1298  }
1299
1300  heap->report_full();
1301
1302  EventCodeCacheFull event;
1303  if (event.should_commit()) {
1304    event.set_codeBlobType((u1)code_blob_type);
1305    event.set_startAddress((u8)heap->low_boundary());
1306    event.set_commitedTopAddress((u8)heap->high());
1307    event.set_reservedTopAddress((u8)heap->high_boundary());
1308    event.set_entryCount(heap->blob_count());
1309    event.set_methodCount(heap->nmethod_count());
1310    event.set_adaptorCount(heap->adapter_count());
1311    event.set_unallocatedCapacity(heap->unallocated_capacity());
1312    event.set_fullCount(heap->full_count());
1313    event.commit();
1314  }
1315}
1316
1317void CodeCache::print_memory_overhead() {
1318  size_t wasted_bytes = 0;
1319  FOR_ALL_HEAPS(heap) {
1320      CodeHeap* curr_heap = *heap;
1321      for (CodeBlob* cb = (CodeBlob*)curr_heap->first(); cb != NULL; cb = (CodeBlob*)curr_heap->next(cb)) {
1322        HeapBlock* heap_block = ((HeapBlock*)cb) - 1;
1323        wasted_bytes += heap_block->length() * CodeCacheSegmentSize - cb->size();
1324      }
1325  }
1326  // Print bytes that are allocated in the freelist
1327  ttyLocker ttl;
1328  tty->print_cr("Number of elements in freelist: " SSIZE_FORMAT,       freelists_length());
1329  tty->print_cr("Allocated in freelist:          " SSIZE_FORMAT "kB",  bytes_allocated_in_freelists()/K);
1330  tty->print_cr("Unused bytes in CodeBlobs:      " SSIZE_FORMAT "kB",  (wasted_bytes/K));
1331  tty->print_cr("Segment map size:               " SSIZE_FORMAT "kB",  allocated_segments()/K); // 1 byte per segment
1332}
1333
1334//------------------------------------------------------------------------------------------------
1335// Non-product version
1336
1337#ifndef PRODUCT
1338
1339void CodeCache::print_trace(const char* event, CodeBlob* cb, int size) {
1340  if (PrintCodeCache2) {  // Need to add a new flag
1341    ResourceMark rm;
1342    if (size == 0)  size = cb->size();
1343    tty->print_cr("CodeCache %s:  addr: " INTPTR_FORMAT ", size: 0x%x", event, p2i(cb), size);
1344  }
1345}
1346
1347void CodeCache::print_internals() {
1348  int nmethodCount = 0;
1349  int runtimeStubCount = 0;
1350  int adapterCount = 0;
1351  int deoptimizationStubCount = 0;
1352  int uncommonTrapStubCount = 0;
1353  int bufferBlobCount = 0;
1354  int total = 0;
1355  int nmethodAlive = 0;
1356  int nmethodNotEntrant = 0;
1357  int nmethodZombie = 0;
1358  int nmethodUnloaded = 0;
1359  int nmethodJava = 0;
1360  int nmethodNative = 0;
1361  int max_nm_size = 0;
1362  ResourceMark rm;
1363
1364  int i = 0;
1365  FOR_ALL_HEAPS(heap) {
1366    if ((_heaps->length() >= 1) && Verbose) {
1367      tty->print_cr("-- %s --", (*heap)->name());
1368    }
1369    FOR_ALL_BLOBS(cb, *heap) {
1370      total++;
1371      if (cb->is_nmethod()) {
1372        nmethod* nm = (nmethod*)cb;
1373
1374        if (Verbose && nm->method() != NULL) {
1375          ResourceMark rm;
1376          char *method_name = nm->method()->name_and_sig_as_C_string();
1377          tty->print("%s", method_name);
1378          if(nm->is_alive()) { tty->print_cr(" alive"); }
1379          if(nm->is_not_entrant()) { tty->print_cr(" not-entrant"); }
1380          if(nm->is_zombie()) { tty->print_cr(" zombie"); }
1381        }
1382
1383        nmethodCount++;
1384
1385        if(nm->is_alive()) { nmethodAlive++; }
1386        if(nm->is_not_entrant()) { nmethodNotEntrant++; }
1387        if(nm->is_zombie()) { nmethodZombie++; }
1388        if(nm->is_unloaded()) { nmethodUnloaded++; }
1389        if(nm->method() != NULL && nm->is_native_method()) { nmethodNative++; }
1390
1391        if(nm->method() != NULL && nm->is_java_method()) {
1392          nmethodJava++;
1393          max_nm_size = MAX2(max_nm_size, nm->size());
1394        }
1395      } else if (cb->is_runtime_stub()) {
1396        runtimeStubCount++;
1397      } else if (cb->is_deoptimization_stub()) {
1398        deoptimizationStubCount++;
1399      } else if (cb->is_uncommon_trap_stub()) {
1400        uncommonTrapStubCount++;
1401      } else if (cb->is_adapter_blob()) {
1402        adapterCount++;
1403      } else if (cb->is_buffer_blob()) {
1404        bufferBlobCount++;
1405      }
1406    }
1407  }
1408
1409  int bucketSize = 512;
1410  int bucketLimit = max_nm_size / bucketSize + 1;
1411  int *buckets = NEW_C_HEAP_ARRAY(int, bucketLimit, mtCode);
1412  memset(buckets, 0, sizeof(int) * bucketLimit);
1413
1414  NMethodIterator iter;
1415  while(iter.next()) {
1416    nmethod* nm = iter.method();
1417    if(nm->method() != NULL && nm->is_java_method()) {
1418      buckets[nm->size() / bucketSize]++;
1419    }
1420  }
1421
1422  tty->print_cr("Code Cache Entries (total of %d)",total);
1423  tty->print_cr("-------------------------------------------------");
1424  tty->print_cr("nmethods: %d",nmethodCount);
1425  tty->print_cr("\talive: %d",nmethodAlive);
1426  tty->print_cr("\tnot_entrant: %d",nmethodNotEntrant);
1427  tty->print_cr("\tzombie: %d",nmethodZombie);
1428  tty->print_cr("\tunloaded: %d",nmethodUnloaded);
1429  tty->print_cr("\tjava: %d",nmethodJava);
1430  tty->print_cr("\tnative: %d",nmethodNative);
1431  tty->print_cr("runtime_stubs: %d",runtimeStubCount);
1432  tty->print_cr("adapters: %d",adapterCount);
1433  tty->print_cr("buffer blobs: %d",bufferBlobCount);
1434  tty->print_cr("deoptimization_stubs: %d",deoptimizationStubCount);
1435  tty->print_cr("uncommon_traps: %d",uncommonTrapStubCount);
1436  tty->print_cr("\nnmethod size distribution (non-zombie java)");
1437  tty->print_cr("-------------------------------------------------");
1438
1439  for(int i=0; i<bucketLimit; i++) {
1440    if(buckets[i] != 0) {
1441      tty->print("%d - %d bytes",i*bucketSize,(i+1)*bucketSize);
1442      tty->fill_to(40);
1443      tty->print_cr("%d",buckets[i]);
1444    }
1445  }
1446
1447  FREE_C_HEAP_ARRAY(int, buckets);
1448  print_memory_overhead();
1449}
1450
1451#endif // !PRODUCT
1452
1453void CodeCache::print() {
1454  print_summary(tty);
1455
1456#ifndef PRODUCT
1457  if (!Verbose) return;
1458
1459  CodeBlob_sizes live;
1460  CodeBlob_sizes dead;
1461
1462  FOR_ALL_HEAPS(heap) {
1463    FOR_ALL_BLOBS(cb, *heap) {
1464      if (!cb->is_alive()) {
1465        dead.add(cb);
1466      } else {
1467        live.add(cb);
1468      }
1469    }
1470  }
1471
1472  tty->print_cr("CodeCache:");
1473  tty->print_cr("nmethod dependency checking time %fs", dependentCheckTime.seconds());
1474
1475  if (!live.is_empty()) {
1476    live.print("live");
1477  }
1478  if (!dead.is_empty()) {
1479    dead.print("dead");
1480  }
1481
1482  if (WizardMode) {
1483     // print the oop_map usage
1484    int code_size = 0;
1485    int number_of_blobs = 0;
1486    int number_of_oop_maps = 0;
1487    int map_size = 0;
1488    FOR_ALL_HEAPS(heap) {
1489      FOR_ALL_BLOBS(cb, *heap) {
1490        if (cb->is_alive()) {
1491          number_of_blobs++;
1492          code_size += cb->code_size();
1493          ImmutableOopMapSet* set = cb->oop_maps();
1494          if (set != NULL) {
1495            number_of_oop_maps += set->count();
1496            map_size           += set->nr_of_bytes();
1497          }
1498        }
1499      }
1500    }
1501    tty->print_cr("OopMaps");
1502    tty->print_cr("  #blobs    = %d", number_of_blobs);
1503    tty->print_cr("  code size = %d", code_size);
1504    tty->print_cr("  #oop_maps = %d", number_of_oop_maps);
1505    tty->print_cr("  map size  = %d", map_size);
1506  }
1507
1508#endif // !PRODUCT
1509}
1510
1511void CodeCache::print_summary(outputStream* st, bool detailed) {
1512  FOR_ALL_HEAPS(heap_iterator) {
1513    CodeHeap* heap = (*heap_iterator);
1514    size_t total = (heap->high_boundary() - heap->low_boundary());
1515    if (_heaps->length() >= 1) {
1516      st->print("%s:", heap->name());
1517    } else {
1518      st->print("CodeCache:");
1519    }
1520    st->print_cr(" size=" SIZE_FORMAT "Kb used=" SIZE_FORMAT
1521                 "Kb max_used=" SIZE_FORMAT "Kb free=" SIZE_FORMAT "Kb",
1522                 total/K, (total - heap->unallocated_capacity())/K,
1523                 heap->max_allocated_capacity()/K, heap->unallocated_capacity()/K);
1524
1525    if (detailed) {
1526      st->print_cr(" bounds [" INTPTR_FORMAT ", " INTPTR_FORMAT ", " INTPTR_FORMAT "]",
1527                   p2i(heap->low_boundary()),
1528                   p2i(heap->high()),
1529                   p2i(heap->high_boundary()));
1530    }
1531  }
1532
1533  if (detailed) {
1534    st->print_cr(" total_blobs=" UINT32_FORMAT " nmethods=" UINT32_FORMAT
1535                       " adapters=" UINT32_FORMAT,
1536                       blob_count(), nmethod_count(), adapter_count());
1537    st->print_cr(" compilation: %s", CompileBroker::should_compile_new_jobs() ?
1538                 "enabled" : Arguments::mode() == Arguments::_int ?
1539                 "disabled (interpreter mode)" :
1540                 "disabled (not enough contiguous free space left)");
1541  }
1542}
1543
1544void CodeCache::print_codelist(outputStream* st) {
1545  MutexLockerEx mu(CodeCache_lock, Mutex::_no_safepoint_check_flag);
1546
1547  NMethodIterator iter;
1548  while(iter.next_alive()) {
1549    nmethod* nm = iter.method();
1550    ResourceMark rm;
1551    char *method_name = nm->method()->name_and_sig_as_C_string();
1552    st->print_cr("%d %d %s [" INTPTR_FORMAT ", " INTPTR_FORMAT " - " INTPTR_FORMAT "]",
1553                 nm->compile_id(), nm->comp_level(), method_name, (intptr_t)nm->header_begin(),
1554                 (intptr_t)nm->code_begin(), (intptr_t)nm->code_end());
1555  }
1556}
1557
1558void CodeCache::print_layout(outputStream* st) {
1559  MutexLockerEx mu(CodeCache_lock, Mutex::_no_safepoint_check_flag);
1560  ResourceMark rm;
1561  print_summary(st, true);
1562}
1563
1564void CodeCache::log_state(outputStream* st) {
1565  st->print(" total_blobs='" UINT32_FORMAT "' nmethods='" UINT32_FORMAT "'"
1566            " adapters='" UINT32_FORMAT "' free_code_cache='" SIZE_FORMAT "'",
1567            blob_count(), nmethod_count(), adapter_count(),
1568            unallocated_capacity());
1569}
1570
1571// Initialize iterator to given compiled method
1572void CompiledMethodIterator::initialize(CompiledMethod* cm) {
1573  _code_blob = (CodeBlob*)cm;
1574  if (!SegmentedCodeCache) {
1575    // Iterate over all CodeBlobs
1576    _code_blob_type = CodeBlobType::All;
1577  } else if (cm != NULL) {
1578    _code_blob_type = CodeCache::get_code_blob_type(cm);
1579  } else {
1580    // Only iterate over method code heaps, starting with non-profiled
1581    _code_blob_type = CodeBlobType::MethodNonProfiled;
1582  }
1583}
1584
1585// Advance iterator to the next compiled method in the current code heap
1586bool CompiledMethodIterator::next_compiled_method() {
1587  // Get first method CodeBlob
1588  if (_code_blob == NULL) {
1589    _code_blob = CodeCache::first_blob(_code_blob_type);
1590    if (_code_blob == NULL) {
1591      return false;
1592    } else if (_code_blob->is_nmethod()) {
1593      return true;
1594    }
1595  }
1596  // Search for next method CodeBlob
1597  _code_blob = CodeCache::next_blob(_code_blob);
1598  while (_code_blob != NULL && !_code_blob->is_compiled()) {
1599    _code_blob = CodeCache::next_blob(_code_blob);
1600  }
1601  return _code_blob != NULL;
1602}
1603