advancedThresholdPolicy.cpp revision 9248:6ab7e19c9220
1/*
2 * Copyright (c) 2010, 2015, Oracle and/or its affiliates. All rights reserved.
3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
4 *
5 * This code is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 only, as
7 * published by the Free Software Foundation.
8 *
9 * This code is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
12 * version 2 for more details (a copy is included in the LICENSE file that
13 * accompanied this code).
14 *
15 * You should have received a copy of the GNU General Public License version
16 * 2 along with this work; if not, write to the Free Software Foundation,
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18 *
19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20 * or visit www.oracle.com if you need additional information or have any
21 * questions.
22 *
23 */
24
25#include "precompiled.hpp"
26#include "code/codeCache.hpp"
27#include "compiler/compileTask.hpp"
28#include "runtime/advancedThresholdPolicy.hpp"
29#include "runtime/simpleThresholdPolicy.inline.hpp"
30
31#ifdef TIERED
32// Print an event.
33void AdvancedThresholdPolicy::print_specific(EventType type, methodHandle mh, methodHandle imh,
34                                             int bci, CompLevel level) {
35  tty->print(" rate=");
36  if (mh->prev_time() == 0) tty->print("n/a");
37  else tty->print("%f", mh->rate());
38
39  tty->print(" k=%.2lf,%.2lf", threshold_scale(CompLevel_full_profile, Tier3LoadFeedback),
40                               threshold_scale(CompLevel_full_optimization, Tier4LoadFeedback));
41
42}
43
44void AdvancedThresholdPolicy::initialize() {
45  // Turn on ergonomic compiler count selection
46  if (FLAG_IS_DEFAULT(CICompilerCountPerCPU) && FLAG_IS_DEFAULT(CICompilerCount)) {
47    FLAG_SET_DEFAULT(CICompilerCountPerCPU, true);
48  }
49  int count = CICompilerCount;
50  if (CICompilerCountPerCPU) {
51    // Simple log n seems to grow too slowly for tiered, try something faster: log n * log log n
52    int log_cpu = log2_intptr(os::active_processor_count());
53    int loglog_cpu = log2_intptr(MAX2(log_cpu, 1));
54    count = MAX2(log_cpu * loglog_cpu, 1) * 3 / 2;
55  }
56
57  set_c1_count(MAX2(count / 3, 1));
58  set_c2_count(MAX2(count - c1_count(), 1));
59  FLAG_SET_ERGO(intx, CICompilerCount, c1_count() + c2_count());
60
61  // Some inlining tuning
62#ifdef X86
63  if (FLAG_IS_DEFAULT(InlineSmallCode)) {
64    FLAG_SET_DEFAULT(InlineSmallCode, 2000);
65  }
66#endif
67
68#if defined SPARC || defined AARCH64
69  if (FLAG_IS_DEFAULT(InlineSmallCode)) {
70    FLAG_SET_DEFAULT(InlineSmallCode, 2500);
71  }
72#endif
73
74  set_increase_threshold_at_ratio();
75  set_start_time(os::javaTimeMillis());
76}
77
78// update_rate() is called from select_task() while holding a compile queue lock.
79void AdvancedThresholdPolicy::update_rate(jlong t, Method* m) {
80  // Skip update if counters are absent.
81  // Can't allocate them since we are holding compile queue lock.
82  if (m->method_counters() == NULL)  return;
83
84  if (is_old(m)) {
85    // We don't remove old methods from the queue,
86    // so we can just zero the rate.
87    m->set_rate(0);
88    return;
89  }
90
91  // We don't update the rate if we've just came out of a safepoint.
92  // delta_s is the time since last safepoint in milliseconds.
93  jlong delta_s = t - SafepointSynchronize::end_of_last_safepoint();
94  jlong delta_t = t - (m->prev_time() != 0 ? m->prev_time() : start_time()); // milliseconds since the last measurement
95  // How many events were there since the last time?
96  int event_count = m->invocation_count() + m->backedge_count();
97  int delta_e = event_count - m->prev_event_count();
98
99  // We should be running for at least 1ms.
100  if (delta_s >= TieredRateUpdateMinTime) {
101    // And we must've taken the previous point at least 1ms before.
102    if (delta_t >= TieredRateUpdateMinTime && delta_e > 0) {
103      m->set_prev_time(t);
104      m->set_prev_event_count(event_count);
105      m->set_rate((float)delta_e / (float)delta_t); // Rate is events per millisecond
106    } else {
107      if (delta_t > TieredRateUpdateMaxTime && delta_e == 0) {
108        // If nothing happened for 25ms, zero the rate. Don't modify prev values.
109        m->set_rate(0);
110      }
111    }
112  }
113}
114
115// Check if this method has been stale from a given number of milliseconds.
116// See select_task().
117bool AdvancedThresholdPolicy::is_stale(jlong t, jlong timeout, Method* m) {
118  jlong delta_s = t - SafepointSynchronize::end_of_last_safepoint();
119  jlong delta_t = t - m->prev_time();
120  if (delta_t > timeout && delta_s > timeout) {
121    int event_count = m->invocation_count() + m->backedge_count();
122    int delta_e = event_count - m->prev_event_count();
123    // Return true if there were no events.
124    return delta_e == 0;
125  }
126  return false;
127}
128
129// We don't remove old methods from the compile queue even if they have
130// very low activity. See select_task().
131bool AdvancedThresholdPolicy::is_old(Method* method) {
132  return method->invocation_count() > 50000 || method->backedge_count() > 500000;
133}
134
135double AdvancedThresholdPolicy::weight(Method* method) {
136  return (method->rate() + 1) * ((method->invocation_count() + 1) *  (method->backedge_count() + 1));
137}
138
139// Apply heuristics and return true if x should be compiled before y
140bool AdvancedThresholdPolicy::compare_methods(Method* x, Method* y) {
141  if (x->highest_comp_level() > y->highest_comp_level()) {
142    // recompilation after deopt
143    return true;
144  } else
145    if (x->highest_comp_level() == y->highest_comp_level()) {
146      if (weight(x) > weight(y)) {
147        return true;
148      }
149    }
150  return false;
151}
152
153// Is method profiled enough?
154bool AdvancedThresholdPolicy::is_method_profiled(Method* method) {
155  MethodData* mdo = method->method_data();
156  if (mdo != NULL) {
157    int i = mdo->invocation_count_delta();
158    int b = mdo->backedge_count_delta();
159    return call_predicate_helper<CompLevel_full_profile>(i, b, 1, method);
160  }
161  return false;
162}
163
164// Called with the queue locked and with at least one element
165CompileTask* AdvancedThresholdPolicy::select_task(CompileQueue* compile_queue) {
166#if INCLUDE_JVMCI
167  CompileTask *max_non_jvmci_task = NULL;
168#endif
169  CompileTask *max_task = NULL;
170  Method* max_method = NULL;
171  jlong t = os::javaTimeMillis();
172  // Iterate through the queue and find a method with a maximum rate.
173  for (CompileTask* task = compile_queue->first(); task != NULL;) {
174    CompileTask* next_task = task->next();
175    Method* method = task->method();
176    update_rate(t, method);
177    if (max_task == NULL) {
178      max_task = task;
179      max_method = method;
180    } else {
181      // If a method has been stale for some time, remove it from the queue.
182      if (is_stale(t, TieredCompileTaskTimeout, method) && !is_old(method)) {
183        if (PrintTieredEvents) {
184          print_event(REMOVE_FROM_QUEUE, method, method, task->osr_bci(), (CompLevel)task->comp_level());
185        }
186        task->log_task_dequeued("stale");
187        compile_queue->remove_and_mark_stale(task);
188        method->clear_queued_for_compilation();
189        task = next_task;
190        continue;
191      }
192
193      // Select a method with a higher rate
194      if (compare_methods(method, max_method)) {
195        max_task = task;
196        max_method = method;
197      }
198    }
199    task = next_task;
200  }
201
202#if INCLUDE_JVMCI
203  if (UseJVMCICompiler) {
204    if (max_non_jvmci_task != NULL) {
205      max_task = max_non_jvmci_task;
206      max_method = max_task->method();
207    }
208  }
209#endif
210
211  if (max_task->comp_level() == CompLevel_full_profile && TieredStopAtLevel > CompLevel_full_profile
212      && is_method_profiled(max_method)) {
213    max_task->set_comp_level(CompLevel_limited_profile);
214    if (PrintTieredEvents) {
215      print_event(UPDATE_IN_QUEUE, max_method, max_method, max_task->osr_bci(), (CompLevel)max_task->comp_level());
216    }
217  }
218
219  return max_task;
220}
221
222double AdvancedThresholdPolicy::threshold_scale(CompLevel level, int feedback_k) {
223  double queue_size = CompileBroker::queue_size(level);
224  int comp_count = compiler_count(level);
225  double k = queue_size / (feedback_k * comp_count) + 1;
226
227  // Increase C1 compile threshold when the code cache is filled more
228  // than specified by IncreaseFirstTierCompileThresholdAt percentage.
229  // The main intention is to keep enough free space for C2 compiled code
230  // to achieve peak performance if the code cache is under stress.
231  if ((TieredStopAtLevel == CompLevel_full_optimization) && (level != CompLevel_full_optimization))  {
232    double current_reverse_free_ratio = CodeCache::reverse_free_ratio(CodeCache::get_code_blob_type(level));
233    if (current_reverse_free_ratio > _increase_threshold_at_ratio) {
234      k *= exp(current_reverse_free_ratio - _increase_threshold_at_ratio);
235    }
236  }
237  return k;
238}
239
240// Call and loop predicates determine whether a transition to a higher
241// compilation level should be performed (pointers to predicate functions
242// are passed to common()).
243// Tier?LoadFeedback is basically a coefficient that determines of
244// how many methods per compiler thread can be in the queue before
245// the threshold values double.
246bool AdvancedThresholdPolicy::loop_predicate(int i, int b, CompLevel cur_level, Method* method) {
247  switch(cur_level) {
248  case CompLevel_none:
249  case CompLevel_limited_profile: {
250    double k = threshold_scale(CompLevel_full_profile, Tier3LoadFeedback);
251    return loop_predicate_helper<CompLevel_none>(i, b, k, method);
252  }
253  case CompLevel_full_profile: {
254    double k = threshold_scale(CompLevel_full_optimization, Tier4LoadFeedback);
255    return loop_predicate_helper<CompLevel_full_profile>(i, b, k, method);
256  }
257  default:
258    return true;
259  }
260}
261
262bool AdvancedThresholdPolicy::call_predicate(int i, int b, CompLevel cur_level, Method* method) {
263  switch(cur_level) {
264  case CompLevel_none:
265  case CompLevel_limited_profile: {
266    double k = threshold_scale(CompLevel_full_profile, Tier3LoadFeedback);
267    return call_predicate_helper<CompLevel_none>(i, b, k, method);
268  }
269  case CompLevel_full_profile: {
270    double k = threshold_scale(CompLevel_full_optimization, Tier4LoadFeedback);
271    return call_predicate_helper<CompLevel_full_profile>(i, b, k, method);
272  }
273  default:
274    return true;
275  }
276}
277
278// If a method is old enough and is still in the interpreter we would want to
279// start profiling without waiting for the compiled method to arrive.
280// We also take the load on compilers into the account.
281bool AdvancedThresholdPolicy::should_create_mdo(Method* method, CompLevel cur_level) {
282  if (cur_level == CompLevel_none &&
283      CompileBroker::queue_size(CompLevel_full_optimization) <=
284      Tier3DelayOn * compiler_count(CompLevel_full_optimization)) {
285    int i = method->invocation_count();
286    int b = method->backedge_count();
287    double k = Tier0ProfilingStartPercentage / 100.0;
288    return call_predicate_helper<CompLevel_none>(i, b, k, method) || loop_predicate_helper<CompLevel_none>(i, b, k, method);
289  }
290  return false;
291}
292
293// Inlining control: if we're compiling a profiled method with C1 and the callee
294// is known to have OSRed in a C2 version, don't inline it.
295bool AdvancedThresholdPolicy::should_not_inline(ciEnv* env, ciMethod* callee) {
296  CompLevel comp_level = (CompLevel)env->comp_level();
297  if (comp_level == CompLevel_full_profile ||
298      comp_level == CompLevel_limited_profile) {
299    return callee->highest_osr_comp_level() == CompLevel_full_optimization;
300  }
301  return false;
302}
303
304// Create MDO if necessary.
305void AdvancedThresholdPolicy::create_mdo(methodHandle mh, JavaThread* THREAD) {
306  if (mh->is_native() ||
307      mh->is_abstract() ||
308      mh->is_accessor() ||
309      mh->is_constant_getter()) {
310    return;
311  }
312  if (mh->method_data() == NULL) {
313    Method::build_interpreter_method_data(mh, CHECK_AND_CLEAR);
314  }
315}
316
317
318/*
319 * Method states:
320 *   0 - interpreter (CompLevel_none)
321 *   1 - pure C1 (CompLevel_simple)
322 *   2 - C1 with invocation and backedge counting (CompLevel_limited_profile)
323 *   3 - C1 with full profiling (CompLevel_full_profile)
324 *   4 - C2 (CompLevel_full_optimization)
325 *
326 * Common state transition patterns:
327 * a. 0 -> 3 -> 4.
328 *    The most common path. But note that even in this straightforward case
329 *    profiling can start at level 0 and finish at level 3.
330 *
331 * b. 0 -> 2 -> 3 -> 4.
332 *    This case occurs when the load on C2 is deemed too high. So, instead of transitioning
333 *    into state 3 directly and over-profiling while a method is in the C2 queue we transition to
334 *    level 2 and wait until the load on C2 decreases. This path is disabled for OSRs.
335 *
336 * c. 0 -> (3->2) -> 4.
337 *    In this case we enqueue a method for compilation at level 3, but the C1 queue is long enough
338 *    to enable the profiling to fully occur at level 0. In this case we change the compilation level
339 *    of the method to 2 while the request is still in-queue, because it'll allow it to run much faster
340 *    without full profiling while c2 is compiling.
341 *
342 * d. 0 -> 3 -> 1 or 0 -> 2 -> 1.
343 *    After a method was once compiled with C1 it can be identified as trivial and be compiled to
344 *    level 1. These transition can also occur if a method can't be compiled with C2 but can with C1.
345 *
346 * e. 0 -> 4.
347 *    This can happen if a method fails C1 compilation (it will still be profiled in the interpreter)
348 *    or because of a deopt that didn't require reprofiling (compilation won't happen in this case because
349 *    the compiled version already exists).
350 *
351 * Note that since state 0 can be reached from any other state via deoptimization different loops
352 * are possible.
353 *
354 */
355
356// Common transition function. Given a predicate determines if a method should transition to another level.
357CompLevel AdvancedThresholdPolicy::common(Predicate p, Method* method, CompLevel cur_level, bool disable_feedback) {
358  CompLevel next_level = cur_level;
359  int i = method->invocation_count();
360  int b = method->backedge_count();
361
362  if (is_trivial(method)) {
363    next_level = CompLevel_simple;
364  } else {
365    switch(cur_level) {
366    case CompLevel_none:
367      // If we were at full profile level, would we switch to full opt?
368      if (common(p, method, CompLevel_full_profile, disable_feedback) == CompLevel_full_optimization) {
369        next_level = CompLevel_full_optimization;
370      } else if ((this->*p)(i, b, cur_level, method)) {
371#if INCLUDE_JVMCI
372        if (UseJVMCICompiler) {
373          // Since JVMCI takes a while to warm up, its queue inevitably backs up during
374          // early VM execution.
375          next_level = CompLevel_full_profile;
376          break;
377        }
378#endif
379        // C1-generated fully profiled code is about 30% slower than the limited profile
380        // code that has only invocation and backedge counters. The observation is that
381        // if C2 queue is large enough we can spend too much time in the fully profiled code
382        // while waiting for C2 to pick the method from the queue. To alleviate this problem
383        // we introduce a feedback on the C2 queue size. If the C2 queue is sufficiently long
384        // we choose to compile a limited profiled version and then recompile with full profiling
385        // when the load on C2 goes down.
386        if (!disable_feedback && CompileBroker::queue_size(CompLevel_full_optimization) >
387            Tier3DelayOn * compiler_count(CompLevel_full_optimization)) {
388          next_level = CompLevel_limited_profile;
389        } else {
390          next_level = CompLevel_full_profile;
391        }
392      }
393      break;
394    case CompLevel_limited_profile:
395      if (is_method_profiled(method)) {
396        // Special case: we got here because this method was fully profiled in the interpreter.
397        next_level = CompLevel_full_optimization;
398      } else {
399        MethodData* mdo = method->method_data();
400        if (mdo != NULL) {
401          if (mdo->would_profile()) {
402            if (disable_feedback || (CompileBroker::queue_size(CompLevel_full_optimization) <=
403                                     Tier3DelayOff * compiler_count(CompLevel_full_optimization) &&
404                                     (this->*p)(i, b, cur_level, method))) {
405              next_level = CompLevel_full_profile;
406            }
407          } else {
408            next_level = CompLevel_full_optimization;
409          }
410        }
411      }
412      break;
413    case CompLevel_full_profile:
414      {
415        MethodData* mdo = method->method_data();
416        if (mdo != NULL) {
417          if (mdo->would_profile()) {
418            int mdo_i = mdo->invocation_count_delta();
419            int mdo_b = mdo->backedge_count_delta();
420            if ((this->*p)(mdo_i, mdo_b, cur_level, method)) {
421              next_level = CompLevel_full_optimization;
422            }
423          } else {
424            next_level = CompLevel_full_optimization;
425          }
426        }
427      }
428      break;
429    }
430  }
431  return MIN2(next_level, (CompLevel)TieredStopAtLevel);
432}
433
434// Determine if a method should be compiled with a normal entry point at a different level.
435CompLevel AdvancedThresholdPolicy::call_event(Method* method, CompLevel cur_level) {
436  CompLevel osr_level = MIN2((CompLevel) method->highest_osr_comp_level(),
437                             common(&AdvancedThresholdPolicy::loop_predicate, method, cur_level, true));
438  CompLevel next_level = common(&AdvancedThresholdPolicy::call_predicate, method, cur_level);
439
440  // If OSR method level is greater than the regular method level, the levels should be
441  // equalized by raising the regular method level in order to avoid OSRs during each
442  // invocation of the method.
443  if (osr_level == CompLevel_full_optimization && cur_level == CompLevel_full_profile) {
444    MethodData* mdo = method->method_data();
445    guarantee(mdo != NULL, "MDO should not be NULL");
446    if (mdo->invocation_count() >= 1) {
447      next_level = CompLevel_full_optimization;
448    }
449  } else {
450    next_level = MAX2(osr_level, next_level);
451  }
452  return next_level;
453}
454
455// Determine if we should do an OSR compilation of a given method.
456CompLevel AdvancedThresholdPolicy::loop_event(Method* method, CompLevel cur_level) {
457  CompLevel next_level = common(&AdvancedThresholdPolicy::loop_predicate, method, cur_level, true);
458  if (cur_level == CompLevel_none) {
459    // If there is a live OSR method that means that we deopted to the interpreter
460    // for the transition.
461    CompLevel osr_level = MIN2((CompLevel)method->highest_osr_comp_level(), next_level);
462    if (osr_level > CompLevel_none) {
463      return osr_level;
464    }
465  }
466  return next_level;
467}
468
469// Update the rate and submit compile
470void AdvancedThresholdPolicy::submit_compile(const methodHandle& mh, int bci, CompLevel level, JavaThread* thread) {
471  int hot_count = (bci == InvocationEntryBci) ? mh->invocation_count() : mh->backedge_count();
472  update_rate(os::javaTimeMillis(), mh());
473  CompileBroker::compile_method(mh, bci, level, mh, hot_count, "tiered", thread);
474}
475
476// Handle the invocation event.
477void AdvancedThresholdPolicy::method_invocation_event(const methodHandle& mh, const methodHandle& imh,
478                                                      CompLevel level, nmethod* nm, JavaThread* thread) {
479  if (should_create_mdo(mh(), level)) {
480    create_mdo(mh, thread);
481  }
482  if (is_compilation_enabled() && !CompileBroker::compilation_is_in_queue(mh)) {
483    CompLevel next_level = call_event(mh(), level);
484    if (next_level != level) {
485      compile(mh, InvocationEntryBci, next_level, thread);
486    }
487  }
488}
489
490// Handle the back branch event. Notice that we can compile the method
491// with a regular entry from here.
492void AdvancedThresholdPolicy::method_back_branch_event(const methodHandle& mh, const methodHandle& imh,
493                                                       int bci, CompLevel level, nmethod* nm, JavaThread* thread) {
494  if (should_create_mdo(mh(), level)) {
495    create_mdo(mh, thread);
496  }
497  // Check if MDO should be created for the inlined method
498  if (should_create_mdo(imh(), level)) {
499    create_mdo(imh, thread);
500  }
501
502  if (is_compilation_enabled()) {
503    CompLevel next_osr_level = loop_event(imh(), level);
504    CompLevel max_osr_level = (CompLevel)imh->highest_osr_comp_level();
505    // At the very least compile the OSR version
506    if (!CompileBroker::compilation_is_in_queue(imh) && (next_osr_level != level)) {
507      compile(imh, bci, next_osr_level, thread);
508    }
509
510    // Use loop event as an opportunity to also check if there's been
511    // enough calls.
512    CompLevel cur_level, next_level;
513    if (mh() != imh()) { // If there is an enclosing method
514      guarantee(nm != NULL, "Should have nmethod here");
515      cur_level = comp_level(mh());
516      next_level = call_event(mh(), cur_level);
517
518      if (max_osr_level == CompLevel_full_optimization) {
519        // The inlinee OSRed to full opt, we need to modify the enclosing method to avoid deopts
520        bool make_not_entrant = false;
521        if (nm->is_osr_method()) {
522          // This is an osr method, just make it not entrant and recompile later if needed
523          make_not_entrant = true;
524        } else {
525          if (next_level != CompLevel_full_optimization) {
526            // next_level is not full opt, so we need to recompile the
527            // enclosing method without the inlinee
528            cur_level = CompLevel_none;
529            make_not_entrant = true;
530          }
531        }
532        if (make_not_entrant) {
533          if (PrintTieredEvents) {
534            int osr_bci = nm->is_osr_method() ? nm->osr_entry_bci() : InvocationEntryBci;
535            print_event(MAKE_NOT_ENTRANT, mh(), mh(), osr_bci, level);
536          }
537          nm->make_not_entrant();
538        }
539      }
540      if (!CompileBroker::compilation_is_in_queue(mh)) {
541        // Fix up next_level if necessary to avoid deopts
542        if (next_level == CompLevel_limited_profile && max_osr_level == CompLevel_full_profile) {
543          next_level = CompLevel_full_profile;
544        }
545        if (cur_level != next_level) {
546          compile(mh, InvocationEntryBci, next_level, thread);
547        }
548      }
549    } else {
550      cur_level = comp_level(imh());
551      next_level = call_event(imh(), cur_level);
552      if (!CompileBroker::compilation_is_in_queue(imh) && (next_level != cur_level)) {
553        compile(imh, InvocationEntryBci, next_level, thread);
554      }
555    }
556  }
557}
558
559#endif // TIERED
560