codeCache.hpp revision 2273:1d1603768966
1/*
2 * Copyright (c) 1997, 2011, Oracle and/or its affiliates. All rights reserved.
3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
4 *
5 * This code is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 only, as
7 * published by the Free Software Foundation.
8 *
9 * This code is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
12 * version 2 for more details (a copy is included in the LICENSE file that
13 * accompanied this code).
14 *
15 * You should have received a copy of the GNU General Public License version
16 * 2 along with this work; if not, write to the Free Software Foundation,
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18 *
19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20 * or visit www.oracle.com if you need additional information or have any
21 * questions.
22 *
23 */
24
25#ifndef SHARE_VM_CODE_CODECACHE_HPP
26#define SHARE_VM_CODE_CODECACHE_HPP
27
28#include "code/codeBlob.hpp"
29#include "memory/allocation.hpp"
30#include "memory/heap.hpp"
31#include "oops/instanceKlass.hpp"
32#include "oops/oopsHierarchy.hpp"
33
34// The CodeCache implements the code cache for various pieces of generated
35// code, e.g., compiled java methods, runtime stubs, transition frames, etc.
36// The entries in the CodeCache are all CodeBlob's.
37
38// Implementation:
39//   - Each CodeBlob occupies one chunk of memory.
40//   - Like the offset table in oldspace the zone has at table for
41//     locating a method given a addess of an instruction.
42
43class OopClosure;
44class DepChange;
45
46class CodeCache : AllStatic {
47  friend class VMStructs;
48 private:
49  // CodeHeap is malloc()'ed at startup and never deleted during shutdown,
50  // so that the generated assembly code is always there when it's needed.
51  // This may cause memory leak, but is necessary, for now. See 4423824,
52  // 4422213 or 4436291 for details.
53  static CodeHeap * _heap;
54  static int _number_of_blobs;
55  static int _number_of_adapters;
56  static int _number_of_nmethods;
57  static int _number_of_nmethods_with_dependencies;
58  static bool _needs_cache_clean;
59  static nmethod* _scavenge_root_nmethods;  // linked via nm->scavenge_root_link()
60  static nmethod* _saved_nmethods;          // linked via nm->saved_nmethod_look()
61
62  static void verify_if_often() PRODUCT_RETURN;
63
64  static void mark_scavenge_root_nmethods() PRODUCT_RETURN;
65  static void verify_perm_nmethods(CodeBlobClosure* f_or_null) PRODUCT_RETURN;
66
67 public:
68
69  // Initialization
70  static void initialize();
71
72  // Allocation/administration
73  static CodeBlob* allocate(int size);              // allocates a new CodeBlob
74  static void commit(CodeBlob* cb);                 // called when the allocated CodeBlob has been filled
75  static int alignment_unit();                      // guaranteed alignment of all CodeBlobs
76  static int alignment_offset();                    // guaranteed offset of first CodeBlob byte within alignment unit (i.e., allocation header)
77  static void free(CodeBlob* cb);                   // frees a CodeBlob
78  static void flush();                              // flushes all CodeBlobs
79  static bool contains(void *p);                    // returns whether p is included
80  static void blobs_do(void f(CodeBlob* cb));       // iterates over all CodeBlobs
81  static void blobs_do(CodeBlobClosure* f);         // iterates over all CodeBlobs
82  static void nmethods_do(void f(nmethod* nm));     // iterates over all nmethods
83
84  // Lookup
85  static CodeBlob* find_blob(void* start);
86  static nmethod*  find_nmethod(void* start);
87
88  // Lookup that does not fail if you lookup a zombie method (if you call this, be sure to know
89  // what you are doing)
90  static CodeBlob* find_blob_unsafe(void* start) {
91    CodeBlob* result = (CodeBlob*)_heap->find_start(start);
92    // this assert is too strong because the heap code will return the
93    // heapblock containing start. That block can often be larger than
94    // the codeBlob itself. If you look up an address that is within
95    // the heapblock but not in the codeBlob you will assert.
96    //
97    // Most things will not lookup such bad addresses. However
98    // AsyncGetCallTrace can see intermediate frames and get that kind
99    // of invalid address and so can a developer using hsfind.
100    //
101    // The more correct answer is to return NULL if blob_contains() returns
102    // false.
103    // assert(result == NULL || result->blob_contains((address)start), "found wrong CodeBlob");
104
105    if (result != NULL && !result->blob_contains((address)start)) {
106      result = NULL;
107    }
108    return result;
109  }
110
111  // Iteration
112  static CodeBlob* first();
113  static CodeBlob* next (CodeBlob* cb);
114  static CodeBlob* alive(CodeBlob *cb);
115  static nmethod* alive_nmethod(CodeBlob *cb);
116  static nmethod* first_nmethod();
117  static nmethod* next_nmethod (CodeBlob* cb);
118  static int       nof_blobs()                 { return _number_of_blobs; }
119  static int       nof_adapters()              { return _number_of_adapters; }
120  static int       nof_nmethods()              { return _number_of_nmethods; }
121
122  // GC support
123  static void gc_epilogue();
124  static void gc_prologue();
125  static void verify_oops();
126  // If "unloading_occurred" is true, then unloads (i.e., breaks root links
127  // to) any unmarked codeBlobs in the cache.  Sets "marked_for_unloading"
128  // to "true" iff some code got unloaded.
129  static void do_unloading(BoolObjectClosure* is_alive,
130                           OopClosure* keep_alive,
131                           bool unloading_occurred);
132  static void oops_do(OopClosure* f) {
133    CodeBlobToOopClosure oopc(f, /*do_marking=*/ false);
134    blobs_do(&oopc);
135  }
136  static void asserted_non_scavengable_nmethods_do(CodeBlobClosure* f = NULL) PRODUCT_RETURN;
137  static void scavenge_root_nmethods_do(CodeBlobClosure* f);
138
139  static nmethod* scavenge_root_nmethods()          { return _scavenge_root_nmethods; }
140  static void set_scavenge_root_nmethods(nmethod* nm) { _scavenge_root_nmethods = nm; }
141  static void add_scavenge_root_nmethod(nmethod* nm);
142  static void drop_scavenge_root_nmethod(nmethod* nm);
143  static void prune_scavenge_root_nmethods();
144
145  // Printing/debugging
146  static void print()   PRODUCT_RETURN;          // prints summary
147  static void print_internals();
148  static void verify();                          // verifies the code cache
149  static void print_trace(const char* event, CodeBlob* cb, int size = 0) PRODUCT_RETURN;
150  static void print_bounds(outputStream* st);    // Prints a summary of the bounds of the code cache
151  static void log_state(outputStream* st);
152
153  // The full limits of the codeCache
154  static address  low_bound()                    { return (address) _heap->low_boundary(); }
155  static address  high_bound()                   { return (address) _heap->high_boundary(); }
156
157  // Profiling
158  static address first_address();                // first address used for CodeBlobs
159  static address last_address();                 // last  address used for CodeBlobs
160  static size_t  capacity()                      { return _heap->capacity(); }
161  static size_t  max_capacity()                  { return _heap->max_capacity(); }
162  static size_t  unallocated_capacity()          { return _heap->unallocated_capacity(); }
163  static size_t  largest_free_block()            { return _heap->largest_free_block(); }
164  static bool    needs_flushing()                { return largest_free_block() < CodeCacheFlushingMinimumFreeSpace; }
165
166  static bool needs_cache_clean()                { return _needs_cache_clean; }
167  static void set_needs_cache_clean(bool v)      { _needs_cache_clean = v;    }
168  static void clear_inline_caches();             // clear all inline caches
169
170  static nmethod* find_and_remove_saved_code(methodOop m);
171  static void remove_saved_code(nmethod* nm);
172  static void speculatively_disconnect(nmethod* nm);
173
174  // Deoptimization
175  static int  mark_for_deoptimization(DepChange& changes);
176#ifdef HOTSWAP
177  static int  mark_for_evol_deoptimization(instanceKlassHandle dependee);
178#endif // HOTSWAP
179
180  static void mark_all_nmethods_for_deoptimization();
181  static int  mark_for_deoptimization(methodOop dependee);
182  static void make_marked_nmethods_zombies();
183  static void make_marked_nmethods_not_entrant();
184
185    // tells how many nmethods have dependencies
186  static int number_of_nmethods_with_dependencies();
187};
188
189#endif // SHARE_VM_CODE_CODECACHE_HPP
190