compiledIC.hpp revision 6761:739468857ffb
1/* 2 * Copyright (c) 1997, 2014, Oracle and/or its affiliates. All rights reserved. 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 4 * 5 * This code is free software; you can redistribute it and/or modify it 6 * under the terms of the GNU General Public License version 2 only, as 7 * published by the Free Software Foundation. 8 * 9 * This code is distributed in the hope that it will be useful, but WITHOUT 10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License 12 * version 2 for more details (a copy is included in the LICENSE file that 13 * accompanied this code). 14 * 15 * You should have received a copy of the GNU General Public License version 16 * 2 along with this work; if not, write to the Free Software Foundation, 17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. 18 * 19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA 20 * or visit www.oracle.com if you need additional information or have any 21 * questions. 22 * 23 */ 24 25#ifndef SHARE_VM_CODE_COMPILEDIC_HPP 26#define SHARE_VM_CODE_COMPILEDIC_HPP 27 28#include "code/nativeInst.hpp" 29#include "interpreter/linkResolver.hpp" 30#include "oops/compiledICHolder.hpp" 31 32//----------------------------------------------------------------------------- 33// The CompiledIC represents a compiled inline cache. 34// 35// In order to make patching of the inline cache MT-safe, we only allow the following 36// transitions (when not at a safepoint): 37// 38// 39// [1] --<-- Clean -->--- [1] 40// / (null) \ 41// / \ /-<-\ 42// / [2] \ / \ 43// Interpreted ---------> Monomorphic | [3] 44// (CompiledICHolder*) (Klass*) | 45// \ / \ / 46// [4] \ / [4] \->-/ 47// \->- Megamorphic -<-/ 48// (Method*) 49// 50// The text in paranteses () refere to the value of the inline cache receiver (mov instruction) 51// 52// The numbers in square brackets refere to the kind of transition: 53// [1]: Initial fixup. Receiver it found from debug information 54// [2]: Compilation of a method 55// [3]: Recompilation of a method (note: only entry is changed. The Klass* must stay the same) 56// [4]: Inline cache miss. We go directly to megamorphic call. 57// 58// The class automatically inserts transition stubs (using the InlineCacheBuffer) when an MT-unsafe 59// transition is made to a stub. 60// 61class CompiledIC; 62class ICStub; 63 64class CompiledICInfo : public StackObj { 65 private: 66 address _entry; // entry point for call 67 void* _cached_value; // Value of cached_value (either in stub or inline cache) 68 bool _is_icholder; // Is the cached value a CompiledICHolder* 69 bool _is_optimized; // it is an optimized virtual call (i.e., can be statically bound) 70 bool _to_interpreter; // Call it to interpreter 71 bool _release_icholder; 72 public: 73 address entry() const { return _entry; } 74 Metadata* cached_metadata() const { assert(!_is_icholder, ""); return (Metadata*)_cached_value; } 75 CompiledICHolder* claim_cached_icholder() { 76 assert(_is_icholder, ""); 77 assert(_cached_value != NULL, "must be non-NULL"); 78 _release_icholder = false; 79 CompiledICHolder* icholder = (CompiledICHolder*)_cached_value; 80 icholder->claim(); 81 return icholder; 82 } 83 bool is_optimized() const { return _is_optimized; } 84 bool to_interpreter() const { return _to_interpreter; } 85 86 void set_compiled_entry(address entry, Klass* klass, bool is_optimized) { 87 _entry = entry; 88 _cached_value = (void*)klass; 89 _to_interpreter = false; 90 _is_icholder = false; 91 _is_optimized = is_optimized; 92 _release_icholder = false; 93 } 94 95 void set_interpreter_entry(address entry, Method* method) { 96 _entry = entry; 97 _cached_value = (void*)method; 98 _to_interpreter = true; 99 _is_icholder = false; 100 _is_optimized = true; 101 _release_icholder = false; 102 } 103 104 void set_icholder_entry(address entry, CompiledICHolder* icholder) { 105 _entry = entry; 106 _cached_value = (void*)icholder; 107 _to_interpreter = true; 108 _is_icholder = true; 109 _is_optimized = false; 110 _release_icholder = true; 111 } 112 113 CompiledICInfo(): _entry(NULL), _cached_value(NULL), _is_icholder(false), 114 _to_interpreter(false), _is_optimized(false), _release_icholder(false) { 115 } 116 ~CompiledICInfo() { 117 // In rare cases the info is computed but not used, so release any 118 // CompiledICHolder* that was created 119 if (_release_icholder) { 120 assert(_is_icholder, "must be"); 121 CompiledICHolder* icholder = (CompiledICHolder*)_cached_value; 122 icholder->claim(); 123 delete icholder; 124 } 125 } 126}; 127 128class CompiledIC: public ResourceObj { 129 friend class InlineCacheBuffer; 130 friend class ICStub; 131 132 133 private: 134 NativeCall* _ic_call; // the call instruction 135 NativeMovConstReg* _value; // patchable value cell for this IC 136 bool _is_optimized; // an optimized virtual call (i.e., no compiled IC) 137 138 CompiledIC(nmethod* nm, NativeCall* ic_call); 139 CompiledIC(RelocIterator* iter); 140 141 void initialize_from_iter(RelocIterator* iter); 142 143 static bool is_icholder_entry(address entry); 144 145 // low-level inline-cache manipulation. Cannot be accessed directly, since it might not be MT-safe 146 // to change an inline-cache. These changes the underlying inline-cache directly. They *newer* make 147 // changes to a transition stub. 148 void internal_set_ic_destination(address entry_point, bool is_icstub, void* cache, bool is_icholder); 149 void set_ic_destination(ICStub* stub); 150 void set_ic_destination(address entry_point) { 151 assert(_is_optimized, "use set_ic_destination_and_value instead"); 152 internal_set_ic_destination(entry_point, false, NULL, false); 153 } 154 // This only for use by ICStubs where the type of the value isn't known 155 void set_ic_destination_and_value(address entry_point, void* value) { 156 internal_set_ic_destination(entry_point, false, value, is_icholder_entry(entry_point)); 157 } 158 void set_ic_destination_and_value(address entry_point, Metadata* value) { 159 internal_set_ic_destination(entry_point, false, value, false); 160 } 161 void set_ic_destination_and_value(address entry_point, CompiledICHolder* value) { 162 internal_set_ic_destination(entry_point, false, value, true); 163 } 164 165 // Reads the location of the transition stub. This will fail with an assertion, if no transition stub is 166 // associated with the inline cache. 167 address stub_address() const; 168 bool is_in_transition_state() const; // Use InlineCacheBuffer 169 170 public: 171 // conversion (machine PC to CompiledIC*) 172 friend CompiledIC* CompiledIC_before(nmethod* nm, address return_addr); 173 friend CompiledIC* CompiledIC_at(nmethod* nm, address call_site); 174 friend CompiledIC* CompiledIC_at(Relocation* call_site); 175 friend CompiledIC* CompiledIC_at(RelocIterator* reloc_iter); 176 177 // This is used to release CompiledICHolder*s from nmethods that 178 // are about to be freed. The callsite might contain other stale 179 // values of other kinds so it must be careful. 180 static void cleanup_call_site(virtual_call_Relocation* call_site); 181 static bool is_icholder_call_site(virtual_call_Relocation* call_site); 182 183 // Return the cached_metadata/destination associated with this inline cache. If the cache currently points 184 // to a transition stub, it will read the values from the transition stub. 185 void* cached_value() const; 186 CompiledICHolder* cached_icholder() const { 187 assert(is_icholder_call(), "must be"); 188 return (CompiledICHolder*) cached_value(); 189 } 190 Metadata* cached_metadata() const { 191 assert(!is_icholder_call(), "must be"); 192 return (Metadata*) cached_value(); 193 } 194 195 address ic_destination() const; 196 197 bool is_optimized() const { return _is_optimized; } 198 199 // State 200 bool is_clean() const; 201 bool is_megamorphic() const; 202 bool is_call_to_compiled() const; 203 bool is_call_to_interpreted() const; 204 205 bool is_icholder_call() const; 206 207 address end_of_call() { return _ic_call->return_address(); } 208 209 // MT-safe patching of inline caches. Note: Only safe to call is_xxx when holding the CompiledIC_ock 210 // so you are guaranteed that no patching takes place. The same goes for verify. 211 // 212 // Note: We do not provide any direct access to the stub code, to prevent parts of the code 213 // to manipulate the inline cache in MT-unsafe ways. 214 // 215 // They all takes a TRAP argument, since they can cause a GC if the inline-cache buffer is full. 216 // 217 void set_to_clean(); // Can only be called during a safepoint operation 218 void set_to_monomorphic(CompiledICInfo& info); 219 220 // Returns true if successful and false otherwise. The call can fail if memory 221 // allocation in the code cache fails. 222 bool set_to_megamorphic(CallInfo* call_info, Bytecodes::Code bytecode, TRAPS); 223 224 static void compute_monomorphic_entry(methodHandle method, KlassHandle receiver_klass, 225 bool is_optimized, bool static_bound, CompiledICInfo& info, TRAPS); 226 227 // Location 228 address instruction_address() const { return _ic_call->instruction_address(); } 229 230 // Misc 231 void print() PRODUCT_RETURN; 232 void print_compiled_ic() PRODUCT_RETURN; 233 void verify() PRODUCT_RETURN; 234}; 235 236inline CompiledIC* CompiledIC_before(nmethod* nm, address return_addr) { 237 CompiledIC* c_ic = new CompiledIC(nm, nativeCall_before(return_addr)); 238 c_ic->verify(); 239 return c_ic; 240} 241 242inline CompiledIC* CompiledIC_at(nmethod* nm, address call_site) { 243 CompiledIC* c_ic = new CompiledIC(nm, nativeCall_at(call_site)); 244 c_ic->verify(); 245 return c_ic; 246} 247 248inline CompiledIC* CompiledIC_at(Relocation* call_site) { 249 assert(call_site->type() == relocInfo::virtual_call_type || 250 call_site->type() == relocInfo::opt_virtual_call_type, "wrong reloc. info"); 251 CompiledIC* c_ic = new CompiledIC(call_site->code(), nativeCall_at(call_site->addr())); 252 c_ic->verify(); 253 return c_ic; 254} 255 256inline CompiledIC* CompiledIC_at(RelocIterator* reloc_iter) { 257 assert(reloc_iter->type() == relocInfo::virtual_call_type || 258 reloc_iter->type() == relocInfo::opt_virtual_call_type, "wrong reloc. info"); 259 CompiledIC* c_ic = new CompiledIC(reloc_iter); 260 c_ic->verify(); 261 return c_ic; 262} 263 264//----------------------------------------------------------------------------- 265// The CompiledStaticCall represents a call to a static method in the compiled 266// 267// Transition diagram of a static call site is somewhat simpler than for an inlined cache: 268// 269// 270// -----<----- Clean ----->----- 271// / \ 272// / \ 273// compilled code <------------> interpreted code 274// 275// Clean: Calls directly to runtime method for fixup 276// Compiled code: Calls directly to compiled code 277// Interpreted code: Calls to stub that set Method* reference 278// 279// 280class CompiledStaticCall; 281 282class StaticCallInfo { 283 private: 284 address _entry; // Entrypoint 285 methodHandle _callee; // Callee (used when calling interpreter) 286 bool _to_interpreter; // call to interpreted method (otherwise compiled) 287 288 friend class CompiledStaticCall; 289 public: 290 address entry() const { return _entry; } 291 methodHandle callee() const { return _callee; } 292}; 293 294 295class CompiledStaticCall: public NativeCall { 296 friend class CompiledIC; 297 298 // Also used by CompiledIC 299 void set_to_interpreted(methodHandle callee, address entry); 300 bool is_optimized_virtual(); 301 302 public: 303 friend CompiledStaticCall* compiledStaticCall_before(address return_addr); 304 friend CompiledStaticCall* compiledStaticCall_at(address native_call); 305 friend CompiledStaticCall* compiledStaticCall_at(Relocation* call_site); 306 307 // Code 308 static void emit_to_interp_stub(CodeBuffer &cbuf); 309 static int to_interp_stub_size(); 310 static int reloc_to_interp_stub(); 311 312 // State 313 bool is_clean() const; 314 bool is_call_to_compiled() const; 315 bool is_call_to_interpreted() const; 316 317 // Clean static call (will force resolving on next use) 318 void set_to_clean(); 319 320 // Set state. The entry must be the same, as computed by compute_entry. 321 // Computation and setting is split up, since the actions are separate during 322 // a OptoRuntime::resolve_xxx. 323 void set(const StaticCallInfo& info); 324 325 // Compute entry point given a method 326 static void compute_entry(methodHandle m, StaticCallInfo& info); 327 328 // Stub support 329 address find_stub(); 330 static void set_stub_to_clean(static_stub_Relocation* static_stub); 331 332 // Misc. 333 void print() PRODUCT_RETURN; 334 void verify() PRODUCT_RETURN; 335}; 336 337 338inline CompiledStaticCall* compiledStaticCall_before(address return_addr) { 339 CompiledStaticCall* st = (CompiledStaticCall*)nativeCall_before(return_addr); 340 st->verify(); 341 return st; 342} 343 344inline CompiledStaticCall* compiledStaticCall_at(address native_call) { 345 CompiledStaticCall* st = (CompiledStaticCall*)native_call; 346 st->verify(); 347 return st; 348} 349 350inline CompiledStaticCall* compiledStaticCall_at(Relocation* call_site) { 351 return compiledStaticCall_at(call_site->addr()); 352} 353 354#endif // SHARE_VM_CODE_COMPILEDIC_HPP 355