advancedThresholdPolicy.cpp revision 6760:22b98ab2a69f
1/*
2 * Copyright (c) 2010, 2014, Oracle and/or its affiliates. All rights reserved.
3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
4 *
5 * This code is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 only, as
7 * published by the Free Software Foundation.
8 *
9 * This code is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
12 * version 2 for more details (a copy is included in the LICENSE file that
13 * accompanied this code).
14 *
15 * You should have received a copy of the GNU General Public License version
16 * 2 along with this work; if not, write to the Free Software Foundation,
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18 *
19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20 * or visit www.oracle.com if you need additional information or have any
21 * questions.
22 *
23 */
24
25#include "precompiled.hpp"
26#include "code/codeCache.hpp"
27#include "runtime/advancedThresholdPolicy.hpp"
28#include "runtime/simpleThresholdPolicy.inline.hpp"
29
30#ifdef TIERED
31// Print an event.
32void AdvancedThresholdPolicy::print_specific(EventType type, methodHandle mh, methodHandle imh,
33                                             int bci, CompLevel level) {
34  tty->print(" rate=");
35  if (mh->prev_time() == 0) tty->print("n/a");
36  else tty->print("%f", mh->rate());
37
38  tty->print(" k=%.2lf,%.2lf", threshold_scale(CompLevel_full_profile, Tier3LoadFeedback),
39                               threshold_scale(CompLevel_full_optimization, Tier4LoadFeedback));
40
41}
42
43void AdvancedThresholdPolicy::initialize() {
44  // Turn on ergonomic compiler count selection
45  if (FLAG_IS_DEFAULT(CICompilerCountPerCPU) && FLAG_IS_DEFAULT(CICompilerCount)) {
46    FLAG_SET_DEFAULT(CICompilerCountPerCPU, true);
47  }
48  int count = CICompilerCount;
49  if (CICompilerCountPerCPU) {
50    // Simple log n seems to grow too slowly for tiered, try something faster: log n * log log n
51    int log_cpu = log2_intptr(os::active_processor_count());
52    int loglog_cpu = log2_intptr(MAX2(log_cpu, 1));
53    count = MAX2(log_cpu * loglog_cpu, 1) * 3 / 2;
54  }
55
56  set_c1_count(MAX2(count / 3, 1));
57  set_c2_count(MAX2(count - c1_count(), 1));
58  FLAG_SET_ERGO(intx, CICompilerCount, c1_count() + c2_count());
59
60  // Some inlining tuning
61#ifdef X86
62  if (FLAG_IS_DEFAULT(InlineSmallCode)) {
63    FLAG_SET_DEFAULT(InlineSmallCode, 2000);
64  }
65#endif
66
67#ifdef SPARC
68  if (FLAG_IS_DEFAULT(InlineSmallCode)) {
69    FLAG_SET_DEFAULT(InlineSmallCode, 2500);
70  }
71#endif
72
73  set_increase_threshold_at_ratio();
74  set_start_time(os::javaTimeMillis());
75}
76
77// update_rate() is called from select_task() while holding a compile queue lock.
78void AdvancedThresholdPolicy::update_rate(jlong t, Method* m) {
79  // Skip update if counters are absent.
80  // Can't allocate them since we are holding compile queue lock.
81  if (m->method_counters() == NULL)  return;
82
83  if (is_old(m)) {
84    // We don't remove old methods from the queue,
85    // so we can just zero the rate.
86    m->set_rate(0);
87    return;
88  }
89
90  // We don't update the rate if we've just came out of a safepoint.
91  // delta_s is the time since last safepoint in milliseconds.
92  jlong delta_s = t - SafepointSynchronize::end_of_last_safepoint();
93  jlong delta_t = t - (m->prev_time() != 0 ? m->prev_time() : start_time()); // milliseconds since the last measurement
94  // How many events were there since the last time?
95  int event_count = m->invocation_count() + m->backedge_count();
96  int delta_e = event_count - m->prev_event_count();
97
98  // We should be running for at least 1ms.
99  if (delta_s >= TieredRateUpdateMinTime) {
100    // And we must've taken the previous point at least 1ms before.
101    if (delta_t >= TieredRateUpdateMinTime && delta_e > 0) {
102      m->set_prev_time(t);
103      m->set_prev_event_count(event_count);
104      m->set_rate((float)delta_e / (float)delta_t); // Rate is events per millisecond
105    } else {
106      if (delta_t > TieredRateUpdateMaxTime && delta_e == 0) {
107        // If nothing happened for 25ms, zero the rate. Don't modify prev values.
108        m->set_rate(0);
109      }
110    }
111  }
112}
113
114// Check if this method has been stale from a given number of milliseconds.
115// See select_task().
116bool AdvancedThresholdPolicy::is_stale(jlong t, jlong timeout, Method* m) {
117  jlong delta_s = t - SafepointSynchronize::end_of_last_safepoint();
118  jlong delta_t = t - m->prev_time();
119  if (delta_t > timeout && delta_s > timeout) {
120    int event_count = m->invocation_count() + m->backedge_count();
121    int delta_e = event_count - m->prev_event_count();
122    // Return true if there were no events.
123    return delta_e == 0;
124  }
125  return false;
126}
127
128// We don't remove old methods from the compile queue even if they have
129// very low activity. See select_task().
130bool AdvancedThresholdPolicy::is_old(Method* method) {
131  return method->invocation_count() > 50000 || method->backedge_count() > 500000;
132}
133
134double AdvancedThresholdPolicy::weight(Method* method) {
135  return (method->rate() + 1) * ((method->invocation_count() + 1) *  (method->backedge_count() + 1));
136}
137
138// Apply heuristics and return true if x should be compiled before y
139bool AdvancedThresholdPolicy::compare_methods(Method* x, Method* y) {
140  if (x->highest_comp_level() > y->highest_comp_level()) {
141    // recompilation after deopt
142    return true;
143  } else
144    if (x->highest_comp_level() == y->highest_comp_level()) {
145      if (weight(x) > weight(y)) {
146        return true;
147      }
148    }
149  return false;
150}
151
152// Is method profiled enough?
153bool AdvancedThresholdPolicy::is_method_profiled(Method* method) {
154  MethodData* mdo = method->method_data();
155  if (mdo != NULL) {
156    int i = mdo->invocation_count_delta();
157    int b = mdo->backedge_count_delta();
158    return call_predicate_helper<CompLevel_full_profile>(i, b, 1);
159  }
160  return false;
161}
162
163// Called with the queue locked and with at least one element
164CompileTask* AdvancedThresholdPolicy::select_task(CompileQueue* compile_queue) {
165  CompileTask *max_task = NULL;
166  Method* max_method = NULL;
167  jlong t = os::javaTimeMillis();
168  // Iterate through the queue and find a method with a maximum rate.
169  for (CompileTask* task = compile_queue->first(); task != NULL;) {
170    CompileTask* next_task = task->next();
171    Method* method = task->method();
172    update_rate(t, method);
173    if (max_task == NULL) {
174      max_task = task;
175      max_method = method;
176    } else {
177      // If a method has been stale for some time, remove it from the queue.
178      if (is_stale(t, TieredCompileTaskTimeout, method) && !is_old(method)) {
179        if (PrintTieredEvents) {
180          print_event(REMOVE_FROM_QUEUE, method, method, task->osr_bci(), (CompLevel)task->comp_level());
181        }
182        compile_queue->remove_and_mark_stale(task);
183        method->clear_queued_for_compilation();
184        task = next_task;
185        continue;
186      }
187
188      // Select a method with a higher rate
189      if (compare_methods(method, max_method)) {
190        max_task = task;
191        max_method = method;
192      }
193    }
194    task = next_task;
195  }
196
197  if (max_task->comp_level() == CompLevel_full_profile && TieredStopAtLevel > CompLevel_full_profile
198      && is_method_profiled(max_method)) {
199    max_task->set_comp_level(CompLevel_limited_profile);
200    if (PrintTieredEvents) {
201      print_event(UPDATE_IN_QUEUE, max_method, max_method, max_task->osr_bci(), (CompLevel)max_task->comp_level());
202    }
203  }
204
205  return max_task;
206}
207
208double AdvancedThresholdPolicy::threshold_scale(CompLevel level, int feedback_k) {
209  double queue_size = CompileBroker::queue_size(level);
210  int comp_count = compiler_count(level);
211  double k = queue_size / (feedback_k * comp_count) + 1;
212
213  // Increase C1 compile threshold when the code cache is filled more
214  // than specified by IncreaseFirstTierCompileThresholdAt percentage.
215  // The main intention is to keep enough free space for C2 compiled code
216  // to achieve peak performance if the code cache is under stress.
217  if ((TieredStopAtLevel == CompLevel_full_optimization) && (level != CompLevel_full_optimization))  {
218    double current_reverse_free_ratio = CodeCache::reverse_free_ratio();
219    if (current_reverse_free_ratio > _increase_threshold_at_ratio) {
220      k *= exp(current_reverse_free_ratio - _increase_threshold_at_ratio);
221    }
222  }
223  return k;
224}
225
226// Call and loop predicates determine whether a transition to a higher
227// compilation level should be performed (pointers to predicate functions
228// are passed to common()).
229// Tier?LoadFeedback is basically a coefficient that determines of
230// how many methods per compiler thread can be in the queue before
231// the threshold values double.
232bool AdvancedThresholdPolicy::loop_predicate(int i, int b, CompLevel cur_level) {
233  switch(cur_level) {
234  case CompLevel_none:
235  case CompLevel_limited_profile: {
236    double k = threshold_scale(CompLevel_full_profile, Tier3LoadFeedback);
237    return loop_predicate_helper<CompLevel_none>(i, b, k);
238  }
239  case CompLevel_full_profile: {
240    double k = threshold_scale(CompLevel_full_optimization, Tier4LoadFeedback);
241    return loop_predicate_helper<CompLevel_full_profile>(i, b, k);
242  }
243  default:
244    return true;
245  }
246}
247
248bool AdvancedThresholdPolicy::call_predicate(int i, int b, CompLevel cur_level) {
249  switch(cur_level) {
250  case CompLevel_none:
251  case CompLevel_limited_profile: {
252    double k = threshold_scale(CompLevel_full_profile, Tier3LoadFeedback);
253    return call_predicate_helper<CompLevel_none>(i, b, k);
254  }
255  case CompLevel_full_profile: {
256    double k = threshold_scale(CompLevel_full_optimization, Tier4LoadFeedback);
257    return call_predicate_helper<CompLevel_full_profile>(i, b, k);
258  }
259  default:
260    return true;
261  }
262}
263
264// If a method is old enough and is still in the interpreter we would want to
265// start profiling without waiting for the compiled method to arrive.
266// We also take the load on compilers into the account.
267bool AdvancedThresholdPolicy::should_create_mdo(Method* method, CompLevel cur_level) {
268  if (cur_level == CompLevel_none &&
269      CompileBroker::queue_size(CompLevel_full_optimization) <=
270      Tier3DelayOn * compiler_count(CompLevel_full_optimization)) {
271    int i = method->invocation_count();
272    int b = method->backedge_count();
273    double k = Tier0ProfilingStartPercentage / 100.0;
274    return call_predicate_helper<CompLevel_none>(i, b, k) || loop_predicate_helper<CompLevel_none>(i, b, k);
275  }
276  return false;
277}
278
279// Inlining control: if we're compiling a profiled method with C1 and the callee
280// is known to have OSRed in a C2 version, don't inline it.
281bool AdvancedThresholdPolicy::should_not_inline(ciEnv* env, ciMethod* callee) {
282  CompLevel comp_level = (CompLevel)env->comp_level();
283  if (comp_level == CompLevel_full_profile ||
284      comp_level == CompLevel_limited_profile) {
285    return callee->highest_osr_comp_level() == CompLevel_full_optimization;
286  }
287  return false;
288}
289
290// Create MDO if necessary.
291void AdvancedThresholdPolicy::create_mdo(methodHandle mh, JavaThread* THREAD) {
292  if (mh->is_native() || mh->is_abstract() || mh->is_accessor()) return;
293  if (mh->method_data() == NULL) {
294    Method::build_interpreter_method_data(mh, CHECK_AND_CLEAR);
295  }
296}
297
298
299/*
300 * Method states:
301 *   0 - interpreter (CompLevel_none)
302 *   1 - pure C1 (CompLevel_simple)
303 *   2 - C1 with invocation and backedge counting (CompLevel_limited_profile)
304 *   3 - C1 with full profiling (CompLevel_full_profile)
305 *   4 - C2 (CompLevel_full_optimization)
306 *
307 * Common state transition patterns:
308 * a. 0 -> 3 -> 4.
309 *    The most common path. But note that even in this straightforward case
310 *    profiling can start at level 0 and finish at level 3.
311 *
312 * b. 0 -> 2 -> 3 -> 4.
313 *    This case occurs when the load on C2 is deemed too high. So, instead of transitioning
314 *    into state 3 directly and over-profiling while a method is in the C2 queue we transition to
315 *    level 2 and wait until the load on C2 decreases. This path is disabled for OSRs.
316 *
317 * c. 0 -> (3->2) -> 4.
318 *    In this case we enqueue a method for compilation at level 3, but the C1 queue is long enough
319 *    to enable the profiling to fully occur at level 0. In this case we change the compilation level
320 *    of the method to 2, because it'll allow it to run much faster without full profiling while c2
321 *    is compiling.
322 *
323 * d. 0 -> 3 -> 1 or 0 -> 2 -> 1.
324 *    After a method was once compiled with C1 it can be identified as trivial and be compiled to
325 *    level 1. These transition can also occur if a method can't be compiled with C2 but can with C1.
326 *
327 * e. 0 -> 4.
328 *    This can happen if a method fails C1 compilation (it will still be profiled in the interpreter)
329 *    or because of a deopt that didn't require reprofiling (compilation won't happen in this case because
330 *    the compiled version already exists).
331 *
332 * Note that since state 0 can be reached from any other state via deoptimization different loops
333 * are possible.
334 *
335 */
336
337// Common transition function. Given a predicate determines if a method should transition to another level.
338CompLevel AdvancedThresholdPolicy::common(Predicate p, Method* method, CompLevel cur_level, bool disable_feedback) {
339  CompLevel next_level = cur_level;
340  int i = method->invocation_count();
341  int b = method->backedge_count();
342
343  if (is_trivial(method)) {
344    next_level = CompLevel_simple;
345  } else {
346    switch(cur_level) {
347    case CompLevel_none:
348      // If we were at full profile level, would we switch to full opt?
349      if (common(p, method, CompLevel_full_profile, disable_feedback) == CompLevel_full_optimization) {
350        next_level = CompLevel_full_optimization;
351      } else if ((this->*p)(i, b, cur_level)) {
352        // C1-generated fully profiled code is about 30% slower than the limited profile
353        // code that has only invocation and backedge counters. The observation is that
354        // if C2 queue is large enough we can spend too much time in the fully profiled code
355        // while waiting for C2 to pick the method from the queue. To alleviate this problem
356        // we introduce a feedback on the C2 queue size. If the C2 queue is sufficiently long
357        // we choose to compile a limited profiled version and then recompile with full profiling
358        // when the load on C2 goes down.
359        if (!disable_feedback && CompileBroker::queue_size(CompLevel_full_optimization) >
360                                 Tier3DelayOn * compiler_count(CompLevel_full_optimization)) {
361          next_level = CompLevel_limited_profile;
362        } else {
363          next_level = CompLevel_full_profile;
364        }
365      }
366      break;
367    case CompLevel_limited_profile:
368      if (is_method_profiled(method)) {
369        // Special case: we got here because this method was fully profiled in the interpreter.
370        next_level = CompLevel_full_optimization;
371      } else {
372        MethodData* mdo = method->method_data();
373        if (mdo != NULL) {
374          if (mdo->would_profile()) {
375            if (disable_feedback || (CompileBroker::queue_size(CompLevel_full_optimization) <=
376                                     Tier3DelayOff * compiler_count(CompLevel_full_optimization) &&
377                                     (this->*p)(i, b, cur_level))) {
378              next_level = CompLevel_full_profile;
379            }
380          } else {
381            next_level = CompLevel_full_optimization;
382          }
383        }
384      }
385      break;
386    case CompLevel_full_profile:
387      {
388        MethodData* mdo = method->method_data();
389        if (mdo != NULL) {
390          if (mdo->would_profile()) {
391            int mdo_i = mdo->invocation_count_delta();
392            int mdo_b = mdo->backedge_count_delta();
393            if ((this->*p)(mdo_i, mdo_b, cur_level)) {
394              next_level = CompLevel_full_optimization;
395            }
396          } else {
397            next_level = CompLevel_full_optimization;
398          }
399        }
400      }
401      break;
402    }
403  }
404  return MIN2(next_level, (CompLevel)TieredStopAtLevel);
405}
406
407// Determine if a method should be compiled with a normal entry point at a different level.
408CompLevel AdvancedThresholdPolicy::call_event(Method* method, CompLevel cur_level) {
409  CompLevel osr_level = MIN2((CompLevel) method->highest_osr_comp_level(),
410                             common(&AdvancedThresholdPolicy::loop_predicate, method, cur_level, true));
411  CompLevel next_level = common(&AdvancedThresholdPolicy::call_predicate, method, cur_level);
412
413  // If OSR method level is greater than the regular method level, the levels should be
414  // equalized by raising the regular method level in order to avoid OSRs during each
415  // invocation of the method.
416  if (osr_level == CompLevel_full_optimization && cur_level == CompLevel_full_profile) {
417    MethodData* mdo = method->method_data();
418    guarantee(mdo != NULL, "MDO should not be NULL");
419    if (mdo->invocation_count() >= 1) {
420      next_level = CompLevel_full_optimization;
421    }
422  } else {
423    next_level = MAX2(osr_level, next_level);
424  }
425  return next_level;
426}
427
428// Determine if we should do an OSR compilation of a given method.
429CompLevel AdvancedThresholdPolicy::loop_event(Method* method, CompLevel cur_level) {
430  CompLevel next_level = common(&AdvancedThresholdPolicy::loop_predicate, method, cur_level, true);
431  if (cur_level == CompLevel_none) {
432    // If there is a live OSR method that means that we deopted to the interpreter
433    // for the transition.
434    CompLevel osr_level = MIN2((CompLevel)method->highest_osr_comp_level(), next_level);
435    if (osr_level > CompLevel_none) {
436      return osr_level;
437    }
438  }
439  return next_level;
440}
441
442// Update the rate and submit compile
443void AdvancedThresholdPolicy::submit_compile(methodHandle mh, int bci, CompLevel level, JavaThread* thread) {
444  int hot_count = (bci == InvocationEntryBci) ? mh->invocation_count() : mh->backedge_count();
445  update_rate(os::javaTimeMillis(), mh());
446  CompileBroker::compile_method(mh, bci, level, mh, hot_count, "tiered", thread);
447}
448
449// Handle the invocation event.
450void AdvancedThresholdPolicy::method_invocation_event(methodHandle mh, methodHandle imh,
451                                                      CompLevel level, nmethod* nm, JavaThread* thread) {
452  if (should_create_mdo(mh(), level)) {
453    create_mdo(mh, thread);
454  }
455  if (is_compilation_enabled() && !CompileBroker::compilation_is_in_queue(mh)) {
456    CompLevel next_level = call_event(mh(), level);
457    if (next_level != level) {
458      compile(mh, InvocationEntryBci, next_level, thread);
459    }
460  }
461}
462
463// Handle the back branch event. Notice that we can compile the method
464// with a regular entry from here.
465void AdvancedThresholdPolicy::method_back_branch_event(methodHandle mh, methodHandle imh,
466                                                       int bci, CompLevel level, nmethod* nm, JavaThread* thread) {
467  if (should_create_mdo(mh(), level)) {
468    create_mdo(mh, thread);
469  }
470  // Check if MDO should be created for the inlined method
471  if (should_create_mdo(imh(), level)) {
472    create_mdo(imh, thread);
473  }
474
475  if (is_compilation_enabled()) {
476    CompLevel next_osr_level = loop_event(imh(), level);
477    CompLevel max_osr_level = (CompLevel)imh->highest_osr_comp_level();
478    // At the very least compile the OSR version
479    if (!CompileBroker::compilation_is_in_queue(imh) && (next_osr_level != level)) {
480      compile(imh, bci, next_osr_level, thread);
481    }
482
483    // Use loop event as an opportunity to also check if there's been
484    // enough calls.
485    CompLevel cur_level, next_level;
486    if (mh() != imh()) { // If there is an enclosing method
487      guarantee(nm != NULL, "Should have nmethod here");
488      cur_level = comp_level(mh());
489      next_level = call_event(mh(), cur_level);
490
491      if (max_osr_level == CompLevel_full_optimization) {
492        // The inlinee OSRed to full opt, we need to modify the enclosing method to avoid deopts
493        bool make_not_entrant = false;
494        if (nm->is_osr_method()) {
495          // This is an osr method, just make it not entrant and recompile later if needed
496          make_not_entrant = true;
497        } else {
498          if (next_level != CompLevel_full_optimization) {
499            // next_level is not full opt, so we need to recompile the
500            // enclosing method without the inlinee
501            cur_level = CompLevel_none;
502            make_not_entrant = true;
503          }
504        }
505        if (make_not_entrant) {
506          if (PrintTieredEvents) {
507            int osr_bci = nm->is_osr_method() ? nm->osr_entry_bci() : InvocationEntryBci;
508            print_event(MAKE_NOT_ENTRANT, mh(), mh(), osr_bci, level);
509          }
510          nm->make_not_entrant();
511        }
512      }
513      if (!CompileBroker::compilation_is_in_queue(mh)) {
514        // Fix up next_level if necessary to avoid deopts
515        if (next_level == CompLevel_limited_profile && max_osr_level == CompLevel_full_profile) {
516          next_level = CompLevel_full_profile;
517        }
518        if (cur_level != next_level) {
519          compile(mh, InvocationEntryBci, next_level, thread);
520        }
521      }
522    } else {
523      cur_level = comp_level(imh());
524      next_level = call_event(imh(), cur_level);
525      if (!CompileBroker::compilation_is_in_queue(imh) && (next_level != cur_level)) {
526        compile(imh, InvocationEntryBci, next_level, thread);
527      }
528    }
529  }
530}
531
532#endif // TIERED
533