1/*
2 * Copyright (c) 2000, 2017, Oracle and/or its affiliates. All rights reserved.
3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
4 *
5 * This code is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 only, as
7 * published by the Free Software Foundation.
8 *
9 * This code is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
12 * version 2 for more details (a copy is included in the LICENSE file that
13 * accompanied this code).
14 *
15 * You should have received a copy of the GNU General Public License version
16 * 2 along with this work; if not, write to the Free Software Foundation,
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18 *
19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20 * or visit www.oracle.com if you need additional information or have any
21 * questions.
22 *
23 */
24
25#include "precompiled.hpp"
26#include "code/compiledIC.hpp"
27#include "code/nmethod.hpp"
28#include "code/scopeDesc.hpp"
29#include "interpreter/interpreter.hpp"
30#include "memory/resourceArea.hpp"
31#include "oops/methodData.hpp"
32#include "oops/method.hpp"
33#include "oops/oop.inline.hpp"
34#include "prims/nativeLookup.hpp"
35#include "runtime/advancedThresholdPolicy.hpp"
36#include "runtime/compilationPolicy.hpp"
37#include "runtime/frame.hpp"
38#include "runtime/handles.inline.hpp"
39#include "runtime/rframe.hpp"
40#include "runtime/simpleThresholdPolicy.hpp"
41#include "runtime/stubRoutines.hpp"
42#include "runtime/thread.hpp"
43#include "runtime/timer.hpp"
44#include "runtime/vframe.hpp"
45#include "runtime/vm_operations.hpp"
46#include "utilities/events.hpp"
47#include "utilities/globalDefinitions.hpp"
48
49CompilationPolicy* CompilationPolicy::_policy;
50elapsedTimer       CompilationPolicy::_accumulated_time;
51bool               CompilationPolicy::_in_vm_startup;
52
53// Determine compilation policy based on command line argument
54void compilationPolicy_init() {
55  CompilationPolicy::set_in_vm_startup(DelayCompilationDuringStartup);
56
57  switch(CompilationPolicyChoice) {
58  case 0:
59    CompilationPolicy::set_policy(new SimpleCompPolicy());
60    break;
61
62  case 1:
63#ifdef COMPILER2
64    CompilationPolicy::set_policy(new StackWalkCompPolicy());
65#else
66    Unimplemented();
67#endif
68    break;
69  case 2:
70#ifdef TIERED
71    CompilationPolicy::set_policy(new SimpleThresholdPolicy());
72#else
73    Unimplemented();
74#endif
75    break;
76  case 3:
77#ifdef TIERED
78    CompilationPolicy::set_policy(new AdvancedThresholdPolicy());
79#else
80    Unimplemented();
81#endif
82    break;
83  default:
84    fatal("CompilationPolicyChoice must be in the range: [0-3]");
85  }
86  CompilationPolicy::policy()->initialize();
87}
88
89void CompilationPolicy::completed_vm_startup() {
90  if (TraceCompilationPolicy) {
91    tty->print("CompilationPolicy: completed vm startup.\n");
92  }
93  _in_vm_startup = false;
94}
95
96// Returns true if m must be compiled before executing it
97// This is intended to force compiles for methods (usually for
98// debugging) that would otherwise be interpreted for some reason.
99bool CompilationPolicy::must_be_compiled(const methodHandle& m, int comp_level) {
100  // Don't allow Xcomp to cause compiles in replay mode
101  if (ReplayCompiles) return false;
102
103  if (m->has_compiled_code()) return false;       // already compiled
104  if (!can_be_compiled(m, comp_level)) return false;
105
106  return !UseInterpreter ||                                              // must compile all methods
107         (UseCompiler && AlwaysCompileLoopMethods && m->has_loops() && CompileBroker::should_compile_new_jobs()); // eagerly compile loop methods
108}
109
110void CompilationPolicy::compile_if_required(const methodHandle& selected_method, TRAPS) {
111  if (must_be_compiled(selected_method)) {
112    // This path is unusual, mostly used by the '-Xcomp' stress test mode.
113
114    // Note: with several active threads, the must_be_compiled may be true
115    //       while can_be_compiled is false; remove assert
116    // assert(CompilationPolicy::can_be_compiled(selected_method), "cannot compile");
117    if (!THREAD->can_call_java() || THREAD->is_Compiler_thread()) {
118      // don't force compilation, resolve was on behalf of compiler
119      return;
120    }
121    if (selected_method->method_holder()->is_not_initialized()) {
122      // 'is_not_initialized' means not only '!is_initialized', but also that
123      // initialization has not been started yet ('!being_initialized')
124      // Do not force compilation of methods in uninitialized classes.
125      // Note that doing this would throw an assert later,
126      // in CompileBroker::compile_method.
127      // We sometimes use the link resolver to do reflective lookups
128      // even before classes are initialized.
129      return;
130    }
131    CompileBroker::compile_method(selected_method, InvocationEntryBci,
132        CompilationPolicy::policy()->initial_compile_level(),
133        methodHandle(), 0, CompileTask::Reason_MustBeCompiled, CHECK);
134  }
135}
136
137// Returns true if m is allowed to be compiled
138bool CompilationPolicy::can_be_compiled(const methodHandle& m, int comp_level) {
139  // allow any levels for WhiteBox
140  assert(WhiteBoxAPI || comp_level == CompLevel_all || is_compile(comp_level), "illegal compilation level");
141
142  if (m->is_abstract()) return false;
143  if (DontCompileHugeMethods && m->code_size() > HugeMethodLimit) return false;
144
145  // Math intrinsics should never be compiled as this can lead to
146  // monotonicity problems because the interpreter will prefer the
147  // compiled code to the intrinsic version.  This can't happen in
148  // production because the invocation counter can't be incremented
149  // but we shouldn't expose the system to this problem in testing
150  // modes.
151  if (!AbstractInterpreter::can_be_compiled(m)) {
152    return false;
153  }
154  if (comp_level == CompLevel_all) {
155    if (TieredCompilation) {
156      // enough to be compilable at any level for tiered
157      return !m->is_not_compilable(CompLevel_simple) || !m->is_not_compilable(CompLevel_full_optimization);
158    } else {
159      // must be compilable at available level for non-tiered
160      return !m->is_not_compilable(CompLevel_highest_tier);
161    }
162  } else if (is_compile(comp_level)) {
163    return !m->is_not_compilable(comp_level);
164  }
165  return false;
166}
167
168// Returns true if m is allowed to be osr compiled
169bool CompilationPolicy::can_be_osr_compiled(const methodHandle& m, int comp_level) {
170  bool result = false;
171  if (comp_level == CompLevel_all) {
172    if (TieredCompilation) {
173      // enough to be osr compilable at any level for tiered
174      result = !m->is_not_osr_compilable(CompLevel_simple) || !m->is_not_osr_compilable(CompLevel_full_optimization);
175    } else {
176      // must be osr compilable at available level for non-tiered
177      result = !m->is_not_osr_compilable(CompLevel_highest_tier);
178    }
179  } else if (is_compile(comp_level)) {
180    result = !m->is_not_osr_compilable(comp_level);
181  }
182  return (result && can_be_compiled(m, comp_level));
183}
184
185bool CompilationPolicy::is_compilation_enabled() {
186  // NOTE: CompileBroker::should_compile_new_jobs() checks for UseCompiler
187  return !delay_compilation_during_startup() && CompileBroker::should_compile_new_jobs();
188}
189
190CompileTask* CompilationPolicy::select_task_helper(CompileQueue* compile_queue) {
191#if INCLUDE_JVMCI
192  if (UseJVMCICompiler && !BackgroundCompilation) {
193    /*
194     * In blocking compilation mode, the CompileBroker will make
195     * compilations submitted by a JVMCI compiler thread non-blocking. These
196     * compilations should be scheduled after all blocking compilations
197     * to service non-compiler related compilations sooner and reduce the
198     * chance of such compilations timing out.
199     */
200    for (CompileTask* task = compile_queue->first(); task != NULL; task = task->next()) {
201      if (task->is_blocking()) {
202        return task;
203      }
204    }
205  }
206#endif
207  return compile_queue->first();
208}
209
210#ifndef PRODUCT
211void CompilationPolicy::print_time() {
212  tty->print_cr ("Accumulated compilationPolicy times:");
213  tty->print_cr ("---------------------------");
214  tty->print_cr ("  Total: %3.3f sec.", _accumulated_time.seconds());
215}
216
217void NonTieredCompPolicy::trace_osr_completion(nmethod* osr_nm) {
218  if (TraceOnStackReplacement) {
219    if (osr_nm == NULL) tty->print_cr("compilation failed");
220    else tty->print_cr("nmethod " INTPTR_FORMAT, p2i(osr_nm));
221  }
222}
223#endif // !PRODUCT
224
225void NonTieredCompPolicy::initialize() {
226  // Setup the compiler thread numbers
227  if (CICompilerCountPerCPU) {
228    // Example: if CICompilerCountPerCPU is true, then we get
229    // max(log2(8)-1,1) = 2 compiler threads on an 8-way machine.
230    // May help big-app startup time.
231    _compiler_count = MAX2(log2_intptr(os::active_processor_count())-1,1);
232    FLAG_SET_ERGO(intx, CICompilerCount, _compiler_count);
233  } else {
234    _compiler_count = CICompilerCount;
235  }
236}
237
238// Note: this policy is used ONLY if TieredCompilation is off.
239// compiler_count() behaves the following way:
240// - with TIERED build (with both COMPILER1 and COMPILER2 defined) it should return
241//   zero for the c1 compilation levels in server compilation mode runs
242//   and c2 compilation levels in client compilation mode runs.
243// - with COMPILER2 not defined it should return zero for c2 compilation levels.
244// - with COMPILER1 not defined it should return zero for c1 compilation levels.
245// - if neither is defined - always return zero.
246int NonTieredCompPolicy::compiler_count(CompLevel comp_level) {
247  assert(!TieredCompilation, "This policy should not be used with TieredCompilation");
248  if (COMPILER2_PRESENT(is_server_compilation_mode_vm() && is_c2_compile(comp_level) ||)
249      is_client_compilation_mode_vm() && is_c1_compile(comp_level)) {
250    return _compiler_count;
251  }
252  return 0;
253}
254
255void NonTieredCompPolicy::reset_counter_for_invocation_event(const methodHandle& m) {
256  // Make sure invocation and backedge counter doesn't overflow again right away
257  // as would be the case for native methods.
258
259  // BUT also make sure the method doesn't look like it was never executed.
260  // Set carry bit and reduce counter's value to min(count, CompileThreshold/2).
261  MethodCounters* mcs = m->method_counters();
262  assert(mcs != NULL, "MethodCounters cannot be NULL for profiling");
263  mcs->invocation_counter()->set_carry();
264  mcs->backedge_counter()->set_carry();
265
266  assert(!m->was_never_executed(), "don't reset to 0 -- could be mistaken for never-executed");
267}
268
269void NonTieredCompPolicy::reset_counter_for_back_branch_event(const methodHandle& m) {
270  // Delay next back-branch event but pump up invocation counter to trigger
271  // whole method compilation.
272  MethodCounters* mcs = m->method_counters();
273  assert(mcs != NULL, "MethodCounters cannot be NULL for profiling");
274  InvocationCounter* i = mcs->invocation_counter();
275  InvocationCounter* b = mcs->backedge_counter();
276
277  // Don't set invocation_counter's value too low otherwise the method will
278  // look like immature (ic < ~5300) which prevents the inlining based on
279  // the type profiling.
280  i->set(i->state(), CompileThreshold);
281  // Don't reset counter too low - it is used to check if OSR method is ready.
282  b->set(b->state(), CompileThreshold / 2);
283}
284
285//
286// CounterDecay
287//
288// Iterates through invocation counters and decrements them. This
289// is done at each safepoint.
290//
291class CounterDecay : public AllStatic {
292  static jlong _last_timestamp;
293  static void do_method(Method* m) {
294    MethodCounters* mcs = m->method_counters();
295    if (mcs != NULL) {
296      mcs->invocation_counter()->decay();
297    }
298  }
299public:
300  static void decay();
301  static bool is_decay_needed() {
302    return (os::javaTimeMillis() - _last_timestamp) > CounterDecayMinIntervalLength;
303  }
304};
305
306jlong CounterDecay::_last_timestamp = 0;
307
308void CounterDecay::decay() {
309  _last_timestamp = os::javaTimeMillis();
310
311  // This operation is going to be performed only at the end of a safepoint
312  // and hence GC's will not be going on, all Java mutators are suspended
313  // at this point and hence SystemDictionary_lock is also not needed.
314  assert(SafepointSynchronize::is_at_safepoint(), "can only be executed at a safepoint");
315  int nclasses = InstanceKlass::number_of_instance_classes();
316  int classes_per_tick = nclasses * (CounterDecayMinIntervalLength * 1e-3 /
317                                        CounterHalfLifeTime);
318  for (int i = 0; i < classes_per_tick; i++) {
319    InstanceKlass* k = ClassLoaderDataGraph::try_get_next_class();
320    if (k != NULL) {
321      k->methods_do(do_method);
322    }
323  }
324}
325
326// Called at the end of the safepoint
327void NonTieredCompPolicy::do_safepoint_work() {
328  if(UseCounterDecay && CounterDecay::is_decay_needed()) {
329    CounterDecay::decay();
330  }
331}
332
333void NonTieredCompPolicy::reprofile(ScopeDesc* trap_scope, bool is_osr) {
334  ScopeDesc* sd = trap_scope;
335  MethodCounters* mcs;
336  InvocationCounter* c;
337  for (; !sd->is_top(); sd = sd->sender()) {
338    mcs = sd->method()->method_counters();
339    if (mcs != NULL) {
340      // Reset ICs of inlined methods, since they can trigger compilations also.
341      mcs->invocation_counter()->reset();
342    }
343  }
344  mcs = sd->method()->method_counters();
345  if (mcs != NULL) {
346    c = mcs->invocation_counter();
347    if (is_osr) {
348      // It was an OSR method, so bump the count higher.
349      c->set(c->state(), CompileThreshold);
350    } else {
351      c->reset();
352    }
353    mcs->backedge_counter()->reset();
354  }
355}
356
357// This method can be called by any component of the runtime to notify the policy
358// that it's recommended to delay the compilation of this method.
359void NonTieredCompPolicy::delay_compilation(Method* method) {
360  MethodCounters* mcs = method->method_counters();
361  if (mcs != NULL) {
362    mcs->invocation_counter()->decay();
363    mcs->backedge_counter()->decay();
364  }
365}
366
367void NonTieredCompPolicy::disable_compilation(Method* method) {
368  MethodCounters* mcs = method->method_counters();
369  if (mcs != NULL) {
370    mcs->invocation_counter()->set_state(InvocationCounter::wait_for_nothing);
371    mcs->backedge_counter()->set_state(InvocationCounter::wait_for_nothing);
372  }
373}
374
375CompileTask* NonTieredCompPolicy::select_task(CompileQueue* compile_queue) {
376  return select_task_helper(compile_queue);
377}
378
379bool NonTieredCompPolicy::is_mature(Method* method) {
380  MethodData* mdo = method->method_data();
381  assert(mdo != NULL, "Should be");
382  uint current = mdo->mileage_of(method);
383  uint initial = mdo->creation_mileage();
384  if (current < initial)
385    return true;  // some sort of overflow
386  uint target;
387  if (ProfileMaturityPercentage <= 0)
388    target = (uint) -ProfileMaturityPercentage;  // absolute value
389  else
390    target = (uint)( (ProfileMaturityPercentage * CompileThreshold) / 100 );
391  return (current >= initial + target);
392}
393
394nmethod* NonTieredCompPolicy::event(const methodHandle& method, const methodHandle& inlinee, int branch_bci,
395                                    int bci, CompLevel comp_level, CompiledMethod* nm, JavaThread* thread) {
396  assert(comp_level == CompLevel_none, "This should be only called from the interpreter");
397  NOT_PRODUCT(trace_frequency_counter_overflow(method, branch_bci, bci));
398  if (JvmtiExport::can_post_interpreter_events() && thread->is_interp_only_mode()) {
399    // If certain JVMTI events (e.g. frame pop event) are requested then the
400    // thread is forced to remain in interpreted code. This is
401    // implemented partly by a check in the run_compiled_code
402    // section of the interpreter whether we should skip running
403    // compiled code, and partly by skipping OSR compiles for
404    // interpreted-only threads.
405    if (bci != InvocationEntryBci) {
406      reset_counter_for_back_branch_event(method);
407      return NULL;
408    }
409  }
410  if (CompileTheWorld || ReplayCompiles) {
411    // Don't trigger other compiles in testing mode
412    if (bci == InvocationEntryBci) {
413      reset_counter_for_invocation_event(method);
414    } else {
415      reset_counter_for_back_branch_event(method);
416    }
417    return NULL;
418  }
419
420  if (bci == InvocationEntryBci) {
421    // when code cache is full, compilation gets switched off, UseCompiler
422    // is set to false
423    if (!method->has_compiled_code() && UseCompiler) {
424      method_invocation_event(method, thread);
425    } else {
426      // Force counter overflow on method entry, even if no compilation
427      // happened.  (The method_invocation_event call does this also.)
428      reset_counter_for_invocation_event(method);
429    }
430    // compilation at an invocation overflow no longer goes and retries test for
431    // compiled method. We always run the loser of the race as interpreted.
432    // so return NULL
433    return NULL;
434  } else {
435    // counter overflow in a loop => try to do on-stack-replacement
436    nmethod* osr_nm = method->lookup_osr_nmethod_for(bci, CompLevel_highest_tier, true);
437    NOT_PRODUCT(trace_osr_request(method, osr_nm, bci));
438    // when code cache is full, we should not compile any more...
439    if (osr_nm == NULL && UseCompiler) {
440      method_back_branch_event(method, bci, thread);
441      osr_nm = method->lookup_osr_nmethod_for(bci, CompLevel_highest_tier, true);
442    }
443    if (osr_nm == NULL) {
444      reset_counter_for_back_branch_event(method);
445      return NULL;
446    }
447    return osr_nm;
448  }
449  return NULL;
450}
451
452#ifndef PRODUCT
453void NonTieredCompPolicy::trace_frequency_counter_overflow(const methodHandle& m, int branch_bci, int bci) {
454  if (TraceInvocationCounterOverflow) {
455    MethodCounters* mcs = m->method_counters();
456    assert(mcs != NULL, "MethodCounters cannot be NULL for profiling");
457    InvocationCounter* ic = mcs->invocation_counter();
458    InvocationCounter* bc = mcs->backedge_counter();
459    ResourceMark rm;
460    if (bci == InvocationEntryBci) {
461      tty->print("comp-policy cntr ovfl @ %d in entry of ", bci);
462    } else {
463      tty->print("comp-policy cntr ovfl @ %d in loop of ", bci);
464    }
465    m->print_value();
466    tty->cr();
467    ic->print();
468    bc->print();
469    if (ProfileInterpreter) {
470      if (bci != InvocationEntryBci) {
471        MethodData* mdo = m->method_data();
472        if (mdo != NULL) {
473          int count = mdo->bci_to_data(branch_bci)->as_JumpData()->taken();
474          tty->print_cr("back branch count = %d", count);
475        }
476      }
477    }
478  }
479}
480
481void NonTieredCompPolicy::trace_osr_request(const methodHandle& method, nmethod* osr, int bci) {
482  if (TraceOnStackReplacement) {
483    ResourceMark rm;
484    tty->print(osr != NULL ? "Reused OSR entry for " : "Requesting OSR entry for ");
485    method->print_short_name(tty);
486    tty->print_cr(" at bci %d", bci);
487  }
488}
489#endif // !PRODUCT
490
491// SimpleCompPolicy - compile current method
492
493void SimpleCompPolicy::method_invocation_event(const methodHandle& m, JavaThread* thread) {
494  const int comp_level = CompLevel_highest_tier;
495  const int hot_count = m->invocation_count();
496  reset_counter_for_invocation_event(m);
497
498  if (is_compilation_enabled() && can_be_compiled(m, comp_level)) {
499    CompiledMethod* nm = m->code();
500    if (nm == NULL ) {
501      CompileBroker::compile_method(m, InvocationEntryBci, comp_level, m, hot_count, CompileTask::Reason_InvocationCount, thread);
502    }
503  }
504}
505
506void SimpleCompPolicy::method_back_branch_event(const methodHandle& m, int bci, JavaThread* thread) {
507  const int comp_level = CompLevel_highest_tier;
508  const int hot_count = m->backedge_count();
509
510  if (is_compilation_enabled() && can_be_osr_compiled(m, comp_level)) {
511    CompileBroker::compile_method(m, bci, comp_level, m, hot_count, CompileTask::Reason_BackedgeCount, thread);
512    NOT_PRODUCT(trace_osr_completion(m->lookup_osr_nmethod_for(bci, comp_level, true));)
513  }
514}
515// StackWalkCompPolicy - walk up stack to find a suitable method to compile
516
517#ifdef COMPILER2
518const char* StackWalkCompPolicy::_msg = NULL;
519
520
521// Consider m for compilation
522void StackWalkCompPolicy::method_invocation_event(const methodHandle& m, JavaThread* thread) {
523  const int comp_level = CompLevel_highest_tier;
524  const int hot_count = m->invocation_count();
525  reset_counter_for_invocation_event(m);
526
527  if (is_compilation_enabled() && m->code() == NULL && can_be_compiled(m, comp_level)) {
528    ResourceMark rm(thread);
529    frame       fr     = thread->last_frame();
530    assert(fr.is_interpreted_frame(), "must be interpreted");
531    assert(fr.interpreter_frame_method() == m(), "bad method");
532
533    if (TraceCompilationPolicy) {
534      tty->print("method invocation trigger: ");
535      m->print_short_name(tty);
536      tty->print(" ( interpreted " INTPTR_FORMAT ", size=%d ) ", p2i((address)m()), m->code_size());
537    }
538    RegisterMap reg_map(thread, false);
539    javaVFrame* triggerVF = thread->last_java_vframe(&reg_map);
540    // triggerVF is the frame that triggered its counter
541    RFrame* first = new InterpretedRFrame(triggerVF->fr(), thread, m());
542
543    if (first->top_method()->code() != NULL) {
544      // called obsolete method/nmethod -- no need to recompile
545      if (TraceCompilationPolicy) tty->print_cr(" --> " INTPTR_FORMAT, p2i(first->top_method()->code()));
546    } else {
547      if (TimeCompilationPolicy) accumulated_time()->start();
548      GrowableArray<RFrame*>* stack = new GrowableArray<RFrame*>(50);
549      stack->push(first);
550      RFrame* top = findTopInlinableFrame(stack);
551      if (TimeCompilationPolicy) accumulated_time()->stop();
552      assert(top != NULL, "findTopInlinableFrame returned null");
553      if (TraceCompilationPolicy) top->print();
554      CompileBroker::compile_method(top->top_method(), InvocationEntryBci, comp_level,
555                                    m, hot_count, CompileTask::Reason_InvocationCount, thread);
556    }
557  }
558}
559
560void StackWalkCompPolicy::method_back_branch_event(const methodHandle& m, int bci, JavaThread* thread) {
561  const int comp_level = CompLevel_highest_tier;
562  const int hot_count = m->backedge_count();
563
564  if (is_compilation_enabled() && can_be_osr_compiled(m, comp_level)) {
565    CompileBroker::compile_method(m, bci, comp_level, m, hot_count, CompileTask::Reason_BackedgeCount, thread);
566    NOT_PRODUCT(trace_osr_completion(m->lookup_osr_nmethod_for(bci, comp_level, true));)
567  }
568}
569
570RFrame* StackWalkCompPolicy::findTopInlinableFrame(GrowableArray<RFrame*>* stack) {
571  // go up the stack until finding a frame that (probably) won't be inlined
572  // into its caller
573  RFrame* current = stack->at(0); // current choice for stopping
574  assert( current && !current->is_compiled(), "" );
575  const char* msg = NULL;
576
577  while (1) {
578
579    // before going up the stack further, check if doing so would get us into
580    // compiled code
581    RFrame* next = senderOf(current, stack);
582    if( !next )               // No next frame up the stack?
583      break;                  // Then compile with current frame
584
585    Method* m = current->top_method();
586    Method* next_m = next->top_method();
587
588    if (TraceCompilationPolicy && Verbose) {
589      tty->print("[caller: ");
590      next_m->print_short_name(tty);
591      tty->print("] ");
592    }
593
594    if( !Inline ) {           // Inlining turned off
595      msg = "Inlining turned off";
596      break;
597    }
598    if (next_m->is_not_compilable()) { // Did fail to compile this before/
599      msg = "caller not compilable";
600      break;
601    }
602    if (next->num() > MaxRecompilationSearchLength) {
603      // don't go up too high when searching for recompilees
604      msg = "don't go up any further: > MaxRecompilationSearchLength";
605      break;
606    }
607    if (next->distance() > MaxInterpretedSearchLength) {
608      // don't go up too high when searching for recompilees
609      msg = "don't go up any further: next > MaxInterpretedSearchLength";
610      break;
611    }
612    // Compiled frame above already decided not to inline;
613    // do not recompile him.
614    if (next->is_compiled()) {
615      msg = "not going up into optimized code";
616      break;
617    }
618
619    // Interpreted frame above us was already compiled.  Do not force
620    // a recompile, although if the frame above us runs long enough an
621    // OSR might still happen.
622    if( current->is_interpreted() && next_m->has_compiled_code() ) {
623      msg = "not going up -- already compiled caller";
624      break;
625    }
626
627    // Compute how frequent this call site is.  We have current method 'm'.
628    // We know next method 'next_m' is interpreted.  Find the call site and
629    // check the various invocation counts.
630    int invcnt = 0;             // Caller counts
631    if (ProfileInterpreter) {
632      invcnt = next_m->interpreter_invocation_count();
633    }
634    int cnt = 0;                // Call site counts
635    if (ProfileInterpreter && next_m->method_data() != NULL) {
636      ResourceMark rm;
637      int bci = next->top_vframe()->bci();
638      ProfileData* data = next_m->method_data()->bci_to_data(bci);
639      if (data != NULL && data->is_CounterData())
640        cnt = data->as_CounterData()->count();
641    }
642
643    // Caller counts / call-site counts; i.e. is this call site
644    // a hot call site for method next_m?
645    int freq = (invcnt) ? cnt/invcnt : cnt;
646
647    // Check size and frequency limits
648    if ((msg = shouldInline(m, freq, cnt)) != NULL) {
649      break;
650    }
651    // Check inlining negative tests
652    if ((msg = shouldNotInline(m)) != NULL) {
653      break;
654    }
655
656
657    // If the caller method is too big or something then we do not want to
658    // compile it just to inline a method
659    if (!can_be_compiled(next_m, CompLevel_any)) {
660      msg = "caller cannot be compiled";
661      break;
662    }
663
664    if( next_m->name() == vmSymbols::class_initializer_name() ) {
665      msg = "do not compile class initializer (OSR ok)";
666      break;
667    }
668
669    if (TraceCompilationPolicy && Verbose) {
670      tty->print("\n\t     check caller: ");
671      next_m->print_short_name(tty);
672      tty->print(" ( interpreted " INTPTR_FORMAT ", size=%d ) ", p2i((address)next_m), next_m->code_size());
673    }
674
675    current = next;
676  }
677
678  assert( !current || !current->is_compiled(), "" );
679
680  if (TraceCompilationPolicy && msg) tty->print("(%s)\n", msg);
681
682  return current;
683}
684
685RFrame* StackWalkCompPolicy::senderOf(RFrame* rf, GrowableArray<RFrame*>* stack) {
686  RFrame* sender = rf->caller();
687  if (sender && sender->num() == stack->length()) stack->push(sender);
688  return sender;
689}
690
691
692const char* StackWalkCompPolicy::shouldInline(const methodHandle& m, float freq, int cnt) {
693  // Allows targeted inlining
694  // positive filter: should send be inlined?  returns NULL (--> yes)
695  // or rejection msg
696  int max_size = MaxInlineSize;
697  int cost = m->code_size();
698
699  // Check for too many throws (and not too huge)
700  if (m->interpreter_throwout_count() > InlineThrowCount && cost < InlineThrowMaxSize ) {
701    return NULL;
702  }
703
704  // bump the max size if the call is frequent
705  if ((freq >= InlineFrequencyRatio) || (cnt >= InlineFrequencyCount)) {
706    if (TraceFrequencyInlining) {
707      tty->print("(Inlined frequent method)\n");
708      m->print();
709    }
710    max_size = FreqInlineSize;
711  }
712  if (cost > max_size) {
713    return (_msg = "too big");
714  }
715  return NULL;
716}
717
718
719const char* StackWalkCompPolicy::shouldNotInline(const methodHandle& m) {
720  // negative filter: should send NOT be inlined?  returns NULL (--> inline) or rejection msg
721  if (m->is_abstract()) return (_msg = "abstract method");
722  // note: we allow ik->is_abstract()
723  if (!m->method_holder()->is_initialized()) return (_msg = "method holder not initialized");
724  if (m->is_native()) return (_msg = "native method");
725  CompiledMethod* m_code = m->code();
726  if (m_code != NULL && m_code->code_size() > InlineSmallCode)
727    return (_msg = "already compiled into a big method");
728
729  // use frequency-based objections only for non-trivial methods
730  if (m->code_size() <= MaxTrivialSize) return NULL;
731  if (UseInterpreter) {     // don't use counts with -Xcomp
732    if ((m->code() == NULL) && m->was_never_executed()) return (_msg = "never executed");
733    if (!m->was_executed_more_than(MIN2(MinInliningThreshold, CompileThreshold >> 1))) return (_msg = "executed < MinInliningThreshold times");
734  }
735  if (Method::has_unloaded_classes_in_signature(m, JavaThread::current())) return (_msg = "unloaded signature classes");
736
737  return NULL;
738}
739
740
741
742#endif // COMPILER2
743