compiledIC.cpp revision 3602:da91efe96a93
1/*
2 * Copyright (c) 1997, 2012, Oracle and/or its affiliates. All rights reserved.
3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
4 *
5 * This code is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 only, as
7 * published by the Free Software Foundation.
8 *
9 * This code is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
12 * version 2 for more details (a copy is included in the LICENSE file that
13 * accompanied this code).
14 *
15 * You should have received a copy of the GNU General Public License version
16 * 2 along with this work; if not, write to the Free Software Foundation,
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18 *
19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20 * or visit www.oracle.com if you need additional information or have any
21 * questions.
22 *
23 */
24
25#include "precompiled.hpp"
26#include "classfile/systemDictionary.hpp"
27#include "code/codeCache.hpp"
28#include "code/compiledIC.hpp"
29#include "code/icBuffer.hpp"
30#include "code/nmethod.hpp"
31#include "code/vtableStubs.hpp"
32#include "interpreter/interpreter.hpp"
33#include "interpreter/linkResolver.hpp"
34#include "memory/metadataFactory.hpp"
35#include "memory/oopFactory.hpp"
36#include "oops/method.hpp"
37#include "oops/oop.inline.hpp"
38#include "oops/symbol.hpp"
39#include "runtime/icache.hpp"
40#include "runtime/sharedRuntime.hpp"
41#include "runtime/stubRoutines.hpp"
42#include "utilities/events.hpp"
43
44
45// Every time a compiled IC is changed or its type is being accessed,
46// either the CompiledIC_lock must be set or we must be at a safe point.
47
48
49// Release the CompiledICHolder* associated with this call site is there is one.
50void CompiledIC::cleanup_call_site(virtual_call_Relocation* call_site) {
51  // This call site might have become stale so inspect it carefully.
52  NativeCall* call = nativeCall_at(call_site->addr());
53  if (is_icholder_entry(call->destination())) {
54    NativeMovConstReg* value = nativeMovConstReg_at(call_site->cached_value());
55    InlineCacheBuffer::queue_for_release((CompiledICHolder*)value->data());
56  }
57}
58
59
60bool CompiledIC::is_icholder_call_site(virtual_call_Relocation* call_site) {
61  // This call site might have become stale so inspect it carefully.
62  NativeCall* call = nativeCall_at(call_site->addr());
63  return is_icholder_entry(call->destination());
64}
65
66
67//-----------------------------------------------------------------------------
68// Low-level access to an inline cache. Private, since they might not be
69// MT-safe to use.
70
71void* CompiledIC::cached_value() const {
72  assert (CompiledIC_lock->is_locked() || SafepointSynchronize::is_at_safepoint(), "");
73  assert (!is_optimized(), "an optimized virtual call does not have a cached metadata");
74
75  if (!is_in_transition_state()) {
76    void* data = (void*)_value->data();
77    // If we let the metadata value here be initialized to zero...
78    assert(data != NULL || Universe::non_oop_word() == NULL,
79           "no raw nulls in CompiledIC metadatas, because of patching races");
80    return (data == (void*)Universe::non_oop_word()) ? NULL : data;
81  } else {
82    return InlineCacheBuffer::cached_value_for((CompiledIC *)this);
83  }
84}
85
86
87void CompiledIC::internal_set_ic_destination(address entry_point, bool is_icstub, void* cache, bool is_icholder) {
88  assert(entry_point != NULL, "must set legal entry point");
89  assert(CompiledIC_lock->is_locked() || SafepointSynchronize::is_at_safepoint(), "");
90  assert (!is_optimized() || cache == NULL, "an optimized virtual call does not have a cached metadata");
91  assert (cache == NULL || cache != (Metadata*)badOopVal, "invalid metadata");
92
93  assert(!is_icholder || is_icholder_entry(entry_point), "must be");
94
95  // Don't use ic_destination for this test since that forwards
96  // through ICBuffer instead of returning the actual current state of
97  // the CompiledIC.
98  if (is_icholder_entry(_ic_call->destination())) {
99    // When patching for the ICStub case the cached value isn't
100    // overwritten until the ICStub copied into the CompiledIC during
101    // the next safepoint.  Make sure that the CompiledICHolder* is
102    // marked for release at this point since it won't be identifiable
103    // once the entry point is overwritten.
104    InlineCacheBuffer::queue_for_release((CompiledICHolder*)_value->data());
105  }
106
107  if (TraceCompiledIC) {
108    tty->print("  ");
109    print_compiled_ic();
110    tty->print(" changing destination to " INTPTR_FORMAT, entry_point);
111    if (!is_optimized()) {
112      tty->print(" changing cached %s to " INTPTR_FORMAT, is_icholder ? "icholder" : "metadata", (address)cache);
113    }
114    if (is_icstub) {
115      tty->print(" (icstub)");
116    }
117    tty->cr();
118  }
119
120  {
121  MutexLockerEx pl(Patching_lock, Mutex::_no_safepoint_check_flag);
122#ifdef ASSERT
123  CodeBlob* cb = CodeCache::find_blob_unsafe(_ic_call);
124  assert(cb != NULL && cb->is_nmethod(), "must be nmethod");
125#endif
126  _ic_call->set_destination_mt_safe(entry_point);
127}
128
129  if (is_optimized() || is_icstub) {
130    // Optimized call sites don't have a cache value and ICStub call
131    // sites only change the entry point.  Changing the value in that
132    // case could lead to MT safety issues.
133    assert(cache == NULL, "must be null");
134    return;
135  }
136
137  if (cache == NULL)  cache = (void*)Universe::non_oop_word();
138
139  _value->set_data((intptr_t)cache);
140}
141
142
143void CompiledIC::set_ic_destination(ICStub* stub) {
144  internal_set_ic_destination(stub->code_begin(), true, NULL, false);
145}
146
147
148
149address CompiledIC::ic_destination() const {
150 assert (CompiledIC_lock->is_locked() || SafepointSynchronize::is_at_safepoint(), "");
151 if (!is_in_transition_state()) {
152   return _ic_call->destination();
153 } else {
154   return InlineCacheBuffer::ic_destination_for((CompiledIC *)this);
155 }
156}
157
158
159bool CompiledIC::is_in_transition_state() const {
160  assert (CompiledIC_lock->is_locked() || SafepointSynchronize::is_at_safepoint(), "");
161  return InlineCacheBuffer::contains(_ic_call->destination());
162}
163
164
165bool CompiledIC::is_icholder_call() const {
166  assert (CompiledIC_lock->is_locked() || SafepointSynchronize::is_at_safepoint(), "");
167  return !_is_optimized && is_icholder_entry(ic_destination());
168}
169
170// Returns native address of 'call' instruction in inline-cache. Used by
171// the InlineCacheBuffer when it needs to find the stub.
172address CompiledIC::stub_address() const {
173  assert(is_in_transition_state(), "should only be called when we are in a transition state");
174  return _ic_call->destination();
175}
176
177
178//-----------------------------------------------------------------------------
179// High-level access to an inline cache. Guaranteed to be MT-safe.
180
181
182void CompiledIC::set_to_megamorphic(CallInfo* call_info, Bytecodes::Code bytecode, TRAPS) {
183  methodHandle method = call_info->selected_method();
184  bool is_invoke_interface = (bytecode == Bytecodes::_invokeinterface && !call_info->has_vtable_index());
185  assert(CompiledIC_lock->is_locked() || SafepointSynchronize::is_at_safepoint(), "");
186  assert(!is_optimized(), "cannot set an optimized virtual call to megamorphic");
187  assert(is_call_to_compiled() || is_call_to_interpreted(), "going directly to megamorphic?");
188
189  address entry;
190  if (is_invoke_interface) {
191    int index = klassItable::compute_itable_index(call_info->resolved_method()());
192    entry = VtableStubs::create_stub(false, index, method());
193    assert(entry != NULL, "entry not computed");
194    Klass* k = call_info->resolved_method()->method_holder();
195    assert(Klass::cast(k)->is_interface(), "sanity check");
196    InlineCacheBuffer::create_transition_stub(this, k, entry);
197  } else {
198    // Can be different than method->vtable_index(), due to package-private etc.
199    int vtable_index = call_info->vtable_index();
200    entry = VtableStubs::create_stub(true, vtable_index, method());
201    InlineCacheBuffer::create_transition_stub(this, method(), entry);
202  }
203
204  if (TraceICs) {
205    ResourceMark rm;
206    tty->print_cr ("IC@" INTPTR_FORMAT ": to megamorphic %s entry: " INTPTR_FORMAT,
207                   instruction_address(), method->print_value_string(), entry);
208  }
209
210  // We can't check this anymore. With lazy deopt we could have already
211  // cleaned this IC entry before we even return. This is possible if
212  // we ran out of space in the inline cache buffer trying to do the
213  // set_next and we safepointed to free up space. This is a benign
214  // race because the IC entry was complete when we safepointed so
215  // cleaning it immediately is harmless.
216  // assert(is_megamorphic(), "sanity check");
217}
218
219
220// true if destination is megamorphic stub
221bool CompiledIC::is_megamorphic() const {
222  assert(CompiledIC_lock->is_locked() || SafepointSynchronize::is_at_safepoint(), "");
223  assert(!is_optimized(), "an optimized call cannot be megamorphic");
224
225  // Cannot rely on cached_value. It is either an interface or a method.
226  return VtableStubs::is_entry_point(ic_destination());
227}
228
229bool CompiledIC::is_call_to_compiled() const {
230  assert (CompiledIC_lock->is_locked() || SafepointSynchronize::is_at_safepoint(), "");
231
232  // Use unsafe, since an inline cache might point to a zombie method. However, the zombie
233  // method is guaranteed to still exist, since we only remove methods after all inline caches
234  // has been cleaned up
235  CodeBlob* cb = CodeCache::find_blob_unsafe(ic_destination());
236  bool is_monomorphic = (cb != NULL && cb->is_nmethod());
237  // Check that the cached_value is a klass for non-optimized monomorphic calls
238  // This assertion is invalid for compiler1: a call that does not look optimized (no static stub) can be used
239  // for calling directly to vep without using the inline cache (i.e., cached_value == NULL)
240#ifdef ASSERT
241  CodeBlob* caller = CodeCache::find_blob_unsafe(instruction_address());
242  bool is_c1_method = caller->is_compiled_by_c1();
243  assert( is_c1_method ||
244         !is_monomorphic ||
245         is_optimized() ||
246         (cached_metadata() != NULL && cached_metadata()->is_klass()), "sanity check");
247#endif // ASSERT
248  return is_monomorphic;
249}
250
251
252bool CompiledIC::is_call_to_interpreted() const {
253  assert (CompiledIC_lock->is_locked() || SafepointSynchronize::is_at_safepoint(), "");
254  // Call to interpreter if destination is either calling to a stub (if it
255  // is optimized), or calling to an I2C blob
256  bool is_call_to_interpreted = false;
257  if (!is_optimized()) {
258    // must use unsafe because the destination can be a zombie (and we're cleaning)
259    // and the print_compiled_ic code wants to know if site (in the non-zombie)
260    // is to the interpreter.
261    CodeBlob* cb = CodeCache::find_blob_unsafe(ic_destination());
262    is_call_to_interpreted = (cb != NULL && cb->is_adapter_blob());
263    assert(!is_call_to_interpreted || (is_icholder_call() && cached_icholder() != NULL), "sanity check");
264  } else {
265    // Check if we are calling into our own codeblob (i.e., to a stub)
266    CodeBlob* cb = CodeCache::find_blob(_ic_call->instruction_address());
267    address dest = ic_destination();
268#ifdef ASSERT
269    {
270      CodeBlob* db = CodeCache::find_blob_unsafe(dest);
271      assert(!db->is_adapter_blob(), "must use stub!");
272    }
273#endif /* ASSERT */
274    is_call_to_interpreted = cb->contains(dest);
275  }
276  return is_call_to_interpreted;
277}
278
279
280void CompiledIC::set_to_clean() {
281  assert(SafepointSynchronize::is_at_safepoint() || CompiledIC_lock->is_locked() , "MT-unsafe call");
282  if (TraceInlineCacheClearing || TraceICs) {
283    tty->print_cr("IC@" INTPTR_FORMAT ": set to clean", instruction_address());
284    print();
285  }
286
287  address entry;
288  if (is_optimized()) {
289    entry = SharedRuntime::get_resolve_opt_virtual_call_stub();
290  } else {
291    entry = SharedRuntime::get_resolve_virtual_call_stub();
292  }
293
294  // A zombie transition will always be safe, since the metadata has already been set to NULL, so
295  // we only need to patch the destination
296  bool safe_transition = is_optimized() || SafepointSynchronize::is_at_safepoint();
297
298  if (safe_transition) {
299    // Kill any leftover stub we might have too
300    if (is_in_transition_state()) {
301      ICStub* old_stub = ICStub_from_destination_address(stub_address());
302      old_stub->clear();
303    }
304    if (is_optimized()) {
305    set_ic_destination(entry);
306  } else {
307      set_ic_destination_and_value(entry, (void*)NULL);
308    }
309  } else {
310    // Unsafe transition - create stub.
311    InlineCacheBuffer::create_transition_stub(this, NULL, entry);
312  }
313  // We can't check this anymore. With lazy deopt we could have already
314  // cleaned this IC entry before we even return. This is possible if
315  // we ran out of space in the inline cache buffer trying to do the
316  // set_next and we safepointed to free up space. This is a benign
317  // race because the IC entry was complete when we safepointed so
318  // cleaning it immediately is harmless.
319  // assert(is_clean(), "sanity check");
320}
321
322
323bool CompiledIC::is_clean() const {
324  assert (CompiledIC_lock->is_locked() || SafepointSynchronize::is_at_safepoint(), "");
325  bool is_clean = false;
326  address dest = ic_destination();
327  is_clean = dest == SharedRuntime::get_resolve_opt_virtual_call_stub() ||
328             dest == SharedRuntime::get_resolve_virtual_call_stub();
329  assert(!is_clean || is_optimized() || cached_value() == NULL, "sanity check");
330  return is_clean;
331}
332
333
334void CompiledIC::set_to_monomorphic(CompiledICInfo& info) {
335  assert (CompiledIC_lock->is_locked() || SafepointSynchronize::is_at_safepoint(), "");
336  // Updating a cache to the wrong entry can cause bugs that are very hard
337  // to track down - if cache entry gets invalid - we just clean it. In
338  // this way it is always the same code path that is responsible for
339  // updating and resolving an inline cache
340  //
341  // The above is no longer true. SharedRuntime::fixup_callers_callsite will change optimized
342  // callsites. In addition ic_miss code will update a site to monomorphic if it determines
343  // that an monomorphic call to the interpreter can now be monomorphic to compiled code.
344  //
345  // In both of these cases the only thing being modifed is the jump/call target and these
346  // transitions are mt_safe
347
348  Thread *thread = Thread::current();
349  if (info.to_interpreter()) {
350    // Call to interpreter
351    if (info.is_optimized() && is_optimized()) {
352       assert(is_clean(), "unsafe IC path");
353       MutexLockerEx pl(Patching_lock, Mutex::_no_safepoint_check_flag);
354      // the call analysis (callee structure) specifies that the call is optimized
355      // (either because of CHA or the static target is final)
356      // At code generation time, this call has been emitted as static call
357      // Call via stub
358      assert(info.cached_metadata() != NULL && info.cached_metadata()->is_method(), "sanity check");
359      CompiledStaticCall* csc = compiledStaticCall_at(instruction_address());
360      methodHandle method (thread, (Method*)info.cached_metadata());
361      csc->set_to_interpreted(method, info.entry());
362      if (TraceICs) {
363         ResourceMark rm(thread);
364         tty->print_cr ("IC@" INTPTR_FORMAT ": monomorphic to interpreter: %s",
365           instruction_address(),
366           method->print_value_string());
367      }
368    } else {
369      // Call via method-klass-holder
370      InlineCacheBuffer::create_transition_stub(this, info.claim_cached_icholder(), info.entry());
371      if (TraceICs) {
372         ResourceMark rm(thread);
373         tty->print_cr ("IC@" INTPTR_FORMAT ": monomorphic to interpreter via icholder ", instruction_address());
374      }
375    }
376  } else {
377    // Call to compiled code
378    bool static_bound = info.is_optimized() || (info.cached_metadata() == NULL);
379#ifdef ASSERT
380    CodeBlob* cb = CodeCache::find_blob_unsafe(info.entry());
381    assert (cb->is_nmethod(), "must be compiled!");
382#endif /* ASSERT */
383
384    // This is MT safe if we come from a clean-cache and go through a
385    // non-verified entry point
386    bool safe = SafepointSynchronize::is_at_safepoint() ||
387                (!is_in_transition_state() && (info.is_optimized() || static_bound || is_clean()));
388
389    if (!safe) {
390      InlineCacheBuffer::create_transition_stub(this, info.cached_metadata(), info.entry());
391    } else {
392      if (is_optimized()) {
393      set_ic_destination(info.entry());
394      } else {
395        set_ic_destination_and_value(info.entry(), info.cached_metadata());
396      }
397    }
398
399    if (TraceICs) {
400      ResourceMark rm(thread);
401      assert(info.cached_metadata() == NULL || info.cached_metadata()->is_klass(), "must be");
402      tty->print_cr ("IC@" INTPTR_FORMAT ": monomorphic to compiled (rcvr klass) %s: %s",
403        instruction_address(),
404        ((Klass*)info.cached_metadata())->print_value_string(),
405        (safe) ? "" : "via stub");
406    }
407  }
408  // We can't check this anymore. With lazy deopt we could have already
409  // cleaned this IC entry before we even return. This is possible if
410  // we ran out of space in the inline cache buffer trying to do the
411  // set_next and we safepointed to free up space. This is a benign
412  // race because the IC entry was complete when we safepointed so
413  // cleaning it immediately is harmless.
414  // assert(is_call_to_compiled() || is_call_to_interpreted(), "sanity check");
415}
416
417
418// is_optimized: Compiler has generated an optimized call (i.e., no inline
419// cache) static_bound: The call can be static bound (i.e, no need to use
420// inline cache)
421void CompiledIC::compute_monomorphic_entry(methodHandle method,
422                                           KlassHandle receiver_klass,
423                                           bool is_optimized,
424                                           bool static_bound,
425                                           CompiledICInfo& info,
426                                           TRAPS) {
427  nmethod* method_code = method->code();
428  address entry = NULL;
429  if (method_code != NULL) {
430    // Call to compiled code
431    if (static_bound || is_optimized) {
432      entry      = method_code->verified_entry_point();
433    } else {
434      entry      = method_code->entry_point();
435    }
436  }
437  if (entry != NULL) {
438    // Call to compiled code
439    info.set_compiled_entry(entry, (static_bound || is_optimized) ? NULL : receiver_klass(), is_optimized);
440  } else {
441    // Note: the following problem exists with Compiler1:
442    //   - at compile time we may or may not know if the destination is final
443    //   - if we know that the destination is final, we will emit an optimized
444    //     virtual call (no inline cache), and need a Method* to make a call
445    //     to the interpreter
446    //   - if we do not know if the destination is final, we emit a standard
447    //     virtual call, and use CompiledICHolder to call interpreted code
448    //     (no static call stub has been generated)
449    //     However in that case we will now notice it is static_bound
450    //     and convert the call into what looks to be an optimized
451    //     virtual call. This causes problems in verifying the IC because
452    //     it look vanilla but is optimized. Code in is_call_to_interpreted
453    //     is aware of this and weakens its asserts.
454
455    // static_bound should imply is_optimized -- otherwise we have a
456    // performance bug (statically-bindable method is called via
457    // dynamically-dispatched call note: the reverse implication isn't
458    // necessarily true -- the call may have been optimized based on compiler
459    // analysis (static_bound is only based on "final" etc.)
460#ifdef COMPILER2
461#ifdef TIERED
462#if defined(ASSERT)
463    // can't check the assert because we don't have the CompiledIC with which to
464    // find the address if the call instruction.
465    //
466    // CodeBlob* cb = find_blob_unsafe(instruction_address());
467    // assert(cb->is_compiled_by_c1() || !static_bound || is_optimized, "static_bound should imply is_optimized");
468#endif // ASSERT
469#else
470    assert(!static_bound || is_optimized, "static_bound should imply is_optimized");
471#endif // TIERED
472#endif // COMPILER2
473    if (is_optimized) {
474      // Use stub entry
475      info.set_interpreter_entry(method()->get_c2i_entry(), method());
476    } else {
477      // Use icholder entry
478      CompiledICHolder* holder = new CompiledICHolder(method(), receiver_klass());
479      info.set_icholder_entry(method()->get_c2i_unverified_entry(), holder);
480    }
481  }
482  assert(info.is_optimized() == is_optimized, "must agree");
483}
484
485
486bool CompiledIC::is_icholder_entry(address entry) {
487  CodeBlob* cb = CodeCache::find_blob_unsafe(entry);
488  return (cb != NULL && cb->is_adapter_blob());
489}
490
491
492CompiledIC::CompiledIC(nmethod* nm, NativeCall* call)
493  : _ic_call(call)
494{
495  address ic_call = call->instruction_address();
496
497  assert(ic_call != NULL, "ic_call address must be set");
498  assert(nm != NULL, "must pass nmethod");
499  assert(nm->contains(ic_call),   "must be in nmethod");
500
501  // search for the ic_call at the given address
502  RelocIterator iter(nm, ic_call, ic_call+1);
503  bool ret = iter.next();
504  assert(ret == true, "relocInfo must exist at this address");
505  assert(iter.addr() == ic_call, "must find ic_call");
506  if (iter.type() == relocInfo::virtual_call_type) {
507    virtual_call_Relocation* r = iter.virtual_call_reloc();
508    _is_optimized = false;
509    _value = nativeMovConstReg_at(r->cached_value());
510  } else {
511    assert(iter.type() == relocInfo::opt_virtual_call_type, "must be a virtual call");
512    _is_optimized = true;
513    _value = NULL;
514}
515}
516
517
518// ----------------------------------------------------------------------------
519
520void CompiledStaticCall::set_to_clean() {
521  assert (CompiledIC_lock->is_locked() || SafepointSynchronize::is_at_safepoint(), "mt unsafe call");
522  // Reset call site
523  MutexLockerEx pl(Patching_lock, Mutex::_no_safepoint_check_flag);
524#ifdef ASSERT
525  CodeBlob* cb = CodeCache::find_blob_unsafe(this);
526  assert(cb != NULL && cb->is_nmethod(), "must be nmethod");
527#endif
528  set_destination_mt_safe(SharedRuntime::get_resolve_static_call_stub());
529
530  // Do not reset stub here:  It is too expensive to call find_stub.
531  // Instead, rely on caller (nmethod::clear_inline_caches) to clear
532  // both the call and its stub.
533}
534
535
536bool CompiledStaticCall::is_clean() const {
537  return destination() == SharedRuntime::get_resolve_static_call_stub();
538}
539
540bool CompiledStaticCall::is_call_to_compiled() const {
541  return CodeCache::contains(destination());
542}
543
544
545bool CompiledStaticCall::is_call_to_interpreted() const {
546  // It is a call to interpreted, if it calls to a stub. Hence, the destination
547  // must be in the stub part of the nmethod that contains the call
548  nmethod* nm = CodeCache::find_nmethod(instruction_address());
549  return nm->stub_contains(destination());
550}
551
552
553void CompiledStaticCall::set_to_interpreted(methodHandle callee, address entry) {
554  address stub=find_stub();
555  assert(stub!=NULL, "stub not found");
556
557  if (TraceICs) {
558    ResourceMark rm;
559    tty->print_cr("CompiledStaticCall@" INTPTR_FORMAT ": set_to_interpreted %s",
560                  instruction_address(),
561                  callee->name_and_sig_as_C_string());
562  }
563
564  NativeMovConstReg* method_holder = nativeMovConstReg_at(stub);   // creation also verifies the object
565  NativeJump*        jump          = nativeJump_at(method_holder->next_instruction_address());
566
567  assert(method_holder->data()    == 0           || method_holder->data()    == (intptr_t)callee(), "a) MT-unsafe modification of inline cache");
568  assert(jump->jump_destination() == (address)-1 || jump->jump_destination() == entry, "b) MT-unsafe modification of inline cache");
569
570  // Update stub
571  method_holder->set_data((intptr_t)callee());
572  jump->set_jump_destination(entry);
573
574  // Update jump to call
575  set_destination_mt_safe(stub);
576}
577
578
579void CompiledStaticCall::set(const StaticCallInfo& info) {
580  assert (CompiledIC_lock->is_locked() || SafepointSynchronize::is_at_safepoint(), "mt unsafe call");
581  MutexLockerEx pl(Patching_lock, Mutex::_no_safepoint_check_flag);
582  // Updating a cache to the wrong entry can cause bugs that are very hard
583  // to track down - if cache entry gets invalid - we just clean it. In
584  // this way it is always the same code path that is responsible for
585  // updating and resolving an inline cache
586  assert(is_clean(), "do not update a call entry - use clean");
587
588  if (info._to_interpreter) {
589    // Call to interpreted code
590    set_to_interpreted(info.callee(), info.entry());
591  } else {
592    if (TraceICs) {
593      ResourceMark rm;
594      tty->print_cr("CompiledStaticCall@" INTPTR_FORMAT ": set_to_compiled " INTPTR_FORMAT,
595                    instruction_address(),
596                    info.entry());
597    }
598    // Call to compiled code
599    assert (CodeCache::contains(info.entry()), "wrong entry point");
600    set_destination_mt_safe(info.entry());
601  }
602}
603
604
605// Compute settings for a CompiledStaticCall. Since we might have to set
606// the stub when calling to the interpreter, we need to return arguments.
607void CompiledStaticCall::compute_entry(methodHandle m, StaticCallInfo& info) {
608  nmethod* m_code = m->code();
609  info._callee = m;
610  if (m_code != NULL) {
611    info._to_interpreter = false;
612    info._entry  = m_code->verified_entry_point();
613  } else {
614    // Callee is interpreted code.  In any case entering the interpreter
615    // puts a converter-frame on the stack to save arguments.
616    info._to_interpreter = true;
617    info._entry      = m()->get_c2i_entry();
618  }
619}
620
621
622void CompiledStaticCall::set_stub_to_clean(static_stub_Relocation* static_stub) {
623  assert (CompiledIC_lock->is_locked() || SafepointSynchronize::is_at_safepoint(), "mt unsafe call");
624  // Reset stub
625  address stub = static_stub->addr();
626  assert(stub!=NULL, "stub not found");
627  NativeMovConstReg* method_holder = nativeMovConstReg_at(stub);   // creation also verifies the object
628  NativeJump*        jump          = nativeJump_at(method_holder->next_instruction_address());
629  method_holder->set_data(0);
630  jump->set_jump_destination((address)-1);
631}
632
633
634address CompiledStaticCall::find_stub() {
635  // Find reloc. information containing this call-site
636  RelocIterator iter((nmethod*)NULL, instruction_address());
637  while (iter.next()) {
638    if (iter.addr() == instruction_address()) {
639      switch(iter.type()) {
640        case relocInfo::static_call_type:
641          return iter.static_call_reloc()->static_stub();
642        // We check here for opt_virtual_call_type, since we reuse the code
643        // from the CompiledIC implementation
644        case relocInfo::opt_virtual_call_type:
645          return iter.opt_virtual_call_reloc()->static_stub();
646        case relocInfo::poll_type:
647        case relocInfo::poll_return_type: // A safepoint can't overlap a call.
648        default:
649          ShouldNotReachHere();
650      }
651    }
652  }
653  return NULL;
654}
655
656
657//-----------------------------------------------------------------------------
658// Non-product mode code
659#ifndef PRODUCT
660
661void CompiledIC::verify() {
662  // make sure code pattern is actually a call imm32 instruction
663  _ic_call->verify();
664  if (os::is_MP()) {
665    _ic_call->verify_alignment();
666  }
667  assert(is_clean() || is_call_to_compiled() || is_call_to_interpreted()
668          || is_optimized() || is_megamorphic(), "sanity check");
669}
670
671
672void CompiledIC::print() {
673  print_compiled_ic();
674  tty->cr();
675}
676
677
678void CompiledIC::print_compiled_ic() {
679  tty->print("Inline cache at " INTPTR_FORMAT ", calling %s " INTPTR_FORMAT " cached_value " INTPTR_FORMAT,
680             instruction_address(), is_call_to_interpreted() ? "interpreted " : "", ic_destination(), is_optimized() ? NULL : cached_value());
681}
682
683
684void CompiledStaticCall::print() {
685  tty->print("static call at " INTPTR_FORMAT " -> ", instruction_address());
686  if (is_clean()) {
687    tty->print("clean");
688  } else if (is_call_to_compiled()) {
689    tty->print("compiled");
690  } else if (is_call_to_interpreted()) {
691    tty->print("interpreted");
692  }
693  tty->cr();
694}
695
696void CompiledStaticCall::verify() {
697  // Verify call
698  NativeCall::verify();
699  if (os::is_MP()) {
700    verify_alignment();
701  }
702
703  // Verify stub
704  address stub = find_stub();
705  assert(stub != NULL, "no stub found for static call");
706  NativeMovConstReg* method_holder = nativeMovConstReg_at(stub);   // creation also verifies the object
707  NativeJump*        jump          = nativeJump_at(method_holder->next_instruction_address());
708
709  // Verify state
710  assert(is_clean() || is_call_to_compiled() || is_call_to_interpreted(), "sanity check");
711}
712
713#endif
714