callGenerator.cpp revision 5776:de6a9e811145
1/*
2 * Copyright (c) 2000, 2013, Oracle and/or its affiliates. All rights reserved.
3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
4 *
5 * This code is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 only, as
7 * published by the Free Software Foundation.
8 *
9 * This code is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
12 * version 2 for more details (a copy is included in the LICENSE file that
13 * accompanied this code).
14 *
15 * You should have received a copy of the GNU General Public License version
16 * 2 along with this work; if not, write to the Free Software Foundation,
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18 *
19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20 * or visit www.oracle.com if you need additional information or have any
21 * questions.
22 *
23 */
24
25#include "precompiled.hpp"
26#include "ci/bcEscapeAnalyzer.hpp"
27#include "ci/ciCallSite.hpp"
28#include "ci/ciObjArray.hpp"
29#include "ci/ciMemberName.hpp"
30#include "ci/ciMethodHandle.hpp"
31#include "classfile/javaClasses.hpp"
32#include "compiler/compileLog.hpp"
33#include "opto/addnode.hpp"
34#include "opto/callGenerator.hpp"
35#include "opto/callnode.hpp"
36#include "opto/cfgnode.hpp"
37#include "opto/connode.hpp"
38#include "opto/parse.hpp"
39#include "opto/rootnode.hpp"
40#include "opto/runtime.hpp"
41#include "opto/subnode.hpp"
42
43
44// Utility function.
45const TypeFunc* CallGenerator::tf() const {
46  return TypeFunc::make(method());
47}
48
49//-----------------------------ParseGenerator---------------------------------
50// Internal class which handles all direct bytecode traversal.
51class ParseGenerator : public InlineCallGenerator {
52private:
53  bool  _is_osr;
54  float _expected_uses;
55
56public:
57  ParseGenerator(ciMethod* method, float expected_uses, bool is_osr = false)
58    : InlineCallGenerator(method)
59  {
60    _is_osr        = is_osr;
61    _expected_uses = expected_uses;
62    assert(InlineTree::check_can_parse(method) == NULL, "parse must be possible");
63  }
64
65  virtual bool      is_parse() const           { return true; }
66  virtual JVMState* generate(JVMState* jvms, Parse* parent_parser);
67  int is_osr() { return _is_osr; }
68
69};
70
71JVMState* ParseGenerator::generate(JVMState* jvms, Parse* parent_parser) {
72  Compile* C = Compile::current();
73
74  if (is_osr()) {
75    // The JVMS for a OSR has a single argument (see its TypeFunc).
76    assert(jvms->depth() == 1, "no inline OSR");
77  }
78
79  if (C->failing()) {
80    return NULL;  // bailing out of the compile; do not try to parse
81  }
82
83  Parse parser(jvms, method(), _expected_uses, parent_parser);
84  // Grab signature for matching/allocation
85#ifdef ASSERT
86  if (parser.tf() != (parser.depth() == 1 ? C->tf() : tf())) {
87    MutexLockerEx ml(Compile_lock, Mutex::_no_safepoint_check_flag);
88    assert(C->env()->system_dictionary_modification_counter_changed(),
89           "Must invalidate if TypeFuncs differ");
90  }
91#endif
92
93  GraphKit& exits = parser.exits();
94
95  if (C->failing()) {
96    while (exits.pop_exception_state() != NULL) ;
97    return NULL;
98  }
99
100  assert(exits.jvms()->same_calls_as(jvms), "sanity");
101
102  // Simply return the exit state of the parser,
103  // augmented by any exceptional states.
104  return exits.transfer_exceptions_into_jvms();
105}
106
107//---------------------------DirectCallGenerator------------------------------
108// Internal class which handles all out-of-line calls w/o receiver type checks.
109class DirectCallGenerator : public CallGenerator {
110 private:
111  CallStaticJavaNode* _call_node;
112  // Force separate memory and I/O projections for the exceptional
113  // paths to facilitate late inlinig.
114  bool                _separate_io_proj;
115
116 public:
117  DirectCallGenerator(ciMethod* method, bool separate_io_proj)
118    : CallGenerator(method),
119      _separate_io_proj(separate_io_proj)
120  {
121  }
122  virtual JVMState* generate(JVMState* jvms, Parse* parent_parser);
123
124  CallStaticJavaNode* call_node() const { return _call_node; }
125};
126
127JVMState* DirectCallGenerator::generate(JVMState* jvms, Parse* parent_parser) {
128  GraphKit kit(jvms);
129  bool is_static = method()->is_static();
130  address target = is_static ? SharedRuntime::get_resolve_static_call_stub()
131                             : SharedRuntime::get_resolve_opt_virtual_call_stub();
132
133  if (kit.C->log() != NULL) {
134    kit.C->log()->elem("direct_call bci='%d'", jvms->bci());
135  }
136
137  CallStaticJavaNode *call = new (kit.C) CallStaticJavaNode(kit.C, tf(), target, method(), kit.bci());
138  _call_node = call;  // Save the call node in case we need it later
139  if (!is_static) {
140    // Make an explicit receiver null_check as part of this call.
141    // Since we share a map with the caller, his JVMS gets adjusted.
142    kit.null_check_receiver_before_call(method());
143    if (kit.stopped()) {
144      // And dump it back to the caller, decorated with any exceptions:
145      return kit.transfer_exceptions_into_jvms();
146    }
147    // Mark the call node as virtual, sort of:
148    call->set_optimized_virtual(true);
149    if (method()->is_method_handle_intrinsic() ||
150        method()->is_compiled_lambda_form()) {
151      call->set_method_handle_invoke(true);
152    }
153  }
154  kit.set_arguments_for_java_call(call);
155  kit.set_edges_for_java_call(call, false, _separate_io_proj);
156  Node* ret = kit.set_results_for_java_call(call, _separate_io_proj);
157  kit.push_node(method()->return_type()->basic_type(), ret);
158  return kit.transfer_exceptions_into_jvms();
159}
160
161//--------------------------VirtualCallGenerator------------------------------
162// Internal class which handles all out-of-line calls checking receiver type.
163class VirtualCallGenerator : public CallGenerator {
164private:
165  int _vtable_index;
166public:
167  VirtualCallGenerator(ciMethod* method, int vtable_index)
168    : CallGenerator(method), _vtable_index(vtable_index)
169  {
170    assert(vtable_index == Method::invalid_vtable_index ||
171           vtable_index >= 0, "either invalid or usable");
172  }
173  virtual bool      is_virtual() const          { return true; }
174  virtual JVMState* generate(JVMState* jvms, Parse* parent_parser);
175};
176
177JVMState* VirtualCallGenerator::generate(JVMState* jvms, Parse* parent_parser) {
178  GraphKit kit(jvms);
179  Node* receiver = kit.argument(0);
180
181  if (kit.C->log() != NULL) {
182    kit.C->log()->elem("virtual_call bci='%d'", jvms->bci());
183  }
184
185  // If the receiver is a constant null, do not torture the system
186  // by attempting to call through it.  The compile will proceed
187  // correctly, but may bail out in final_graph_reshaping, because
188  // the call instruction will have a seemingly deficient out-count.
189  // (The bailout says something misleading about an "infinite loop".)
190  if (kit.gvn().type(receiver)->higher_equal(TypePtr::NULL_PTR)) {
191    kit.inc_sp(method()->arg_size());  // restore arguments
192    kit.uncommon_trap(Deoptimization::Reason_null_check,
193                      Deoptimization::Action_none,
194                      NULL, "null receiver");
195    return kit.transfer_exceptions_into_jvms();
196  }
197
198  // Ideally we would unconditionally do a null check here and let it
199  // be converted to an implicit check based on profile information.
200  // However currently the conversion to implicit null checks in
201  // Block::implicit_null_check() only looks for loads and stores, not calls.
202  ciMethod *caller = kit.method();
203  ciMethodData *caller_md = (caller == NULL) ? NULL : caller->method_data();
204  if (!UseInlineCaches || !ImplicitNullChecks ||
205       ((ImplicitNullCheckThreshold > 0) && caller_md &&
206       (caller_md->trap_count(Deoptimization::Reason_null_check)
207       >= (uint)ImplicitNullCheckThreshold))) {
208    // Make an explicit receiver null_check as part of this call.
209    // Since we share a map with the caller, his JVMS gets adjusted.
210    receiver = kit.null_check_receiver_before_call(method());
211    if (kit.stopped()) {
212      // And dump it back to the caller, decorated with any exceptions:
213      return kit.transfer_exceptions_into_jvms();
214    }
215  }
216
217  assert(!method()->is_static(), "virtual call must not be to static");
218  assert(!method()->is_final(), "virtual call should not be to final");
219  assert(!method()->is_private(), "virtual call should not be to private");
220  assert(_vtable_index == Method::invalid_vtable_index || !UseInlineCaches,
221         "no vtable calls if +UseInlineCaches ");
222  address target = SharedRuntime::get_resolve_virtual_call_stub();
223  // Normal inline cache used for call
224  CallDynamicJavaNode *call = new (kit.C) CallDynamicJavaNode(tf(), target, method(), _vtable_index, kit.bci());
225  kit.set_arguments_for_java_call(call);
226  kit.set_edges_for_java_call(call);
227  Node* ret = kit.set_results_for_java_call(call);
228  kit.push_node(method()->return_type()->basic_type(), ret);
229
230  // Represent the effect of an implicit receiver null_check
231  // as part of this call.  Since we share a map with the caller,
232  // his JVMS gets adjusted.
233  kit.cast_not_null(receiver);
234  return kit.transfer_exceptions_into_jvms();
235}
236
237CallGenerator* CallGenerator::for_inline(ciMethod* m, float expected_uses) {
238  if (InlineTree::check_can_parse(m) != NULL)  return NULL;
239  return new ParseGenerator(m, expected_uses);
240}
241
242// As a special case, the JVMS passed to this CallGenerator is
243// for the method execution already in progress, not just the JVMS
244// of the caller.  Thus, this CallGenerator cannot be mixed with others!
245CallGenerator* CallGenerator::for_osr(ciMethod* m, int osr_bci) {
246  if (InlineTree::check_can_parse(m) != NULL)  return NULL;
247  float past_uses = m->interpreter_invocation_count();
248  float expected_uses = past_uses;
249  return new ParseGenerator(m, expected_uses, true);
250}
251
252CallGenerator* CallGenerator::for_direct_call(ciMethod* m, bool separate_io_proj) {
253  assert(!m->is_abstract(), "for_direct_call mismatch");
254  return new DirectCallGenerator(m, separate_io_proj);
255}
256
257CallGenerator* CallGenerator::for_virtual_call(ciMethod* m, int vtable_index) {
258  assert(!m->is_static(), "for_virtual_call mismatch");
259  assert(!m->is_method_handle_intrinsic(), "should be a direct call");
260  return new VirtualCallGenerator(m, vtable_index);
261}
262
263// Allow inlining decisions to be delayed
264class LateInlineCallGenerator : public DirectCallGenerator {
265 protected:
266  CallGenerator* _inline_cg;
267
268  virtual bool do_late_inline_check(JVMState* jvms) { return true; }
269
270 public:
271  LateInlineCallGenerator(ciMethod* method, CallGenerator* inline_cg) :
272    DirectCallGenerator(method, true), _inline_cg(inline_cg) {}
273
274  virtual bool      is_late_inline() const { return true; }
275
276  // Convert the CallStaticJava into an inline
277  virtual void do_late_inline();
278
279  virtual JVMState* generate(JVMState* jvms, Parse* parent_parser) {
280    Compile *C = Compile::current();
281    C->print_inlining_skip(this);
282
283    // Record that this call site should be revisited once the main
284    // parse is finished.
285    if (!is_mh_late_inline()) {
286      C->add_late_inline(this);
287    }
288
289    // Emit the CallStaticJava and request separate projections so
290    // that the late inlining logic can distinguish between fall
291    // through and exceptional uses of the memory and io projections
292    // as is done for allocations and macro expansion.
293    return DirectCallGenerator::generate(jvms, parent_parser);
294  }
295
296  virtual void print_inlining_late(const char* msg) {
297    CallNode* call = call_node();
298    Compile* C = Compile::current();
299    C->print_inlining_insert(this);
300    C->print_inlining(method(), call->jvms()->depth()-1, call->jvms()->bci(), msg);
301  }
302
303};
304
305void LateInlineCallGenerator::do_late_inline() {
306  // Can't inline it
307  CallStaticJavaNode* call = call_node();
308  if (call == NULL || call->outcnt() == 0 ||
309      call->in(0) == NULL || call->in(0)->is_top()) {
310    return;
311  }
312
313  const TypeTuple *r = call->tf()->domain();
314  for (int i1 = 0; i1 < method()->arg_size(); i1++) {
315    if (call->in(TypeFunc::Parms + i1)->is_top() && r->field_at(TypeFunc::Parms + i1) != Type::HALF) {
316      assert(Compile::current()->inlining_incrementally(), "shouldn't happen during parsing");
317      return;
318    }
319  }
320
321  if (call->in(TypeFunc::Memory)->is_top()) {
322    assert(Compile::current()->inlining_incrementally(), "shouldn't happen during parsing");
323    return;
324  }
325
326  Compile* C = Compile::current();
327  // Remove inlined methods from Compiler's lists.
328  if (call->is_macro()) {
329    C->remove_macro_node(call);
330  }
331
332  // Make a clone of the JVMState that appropriate to use for driving a parse
333  JVMState* old_jvms = call->jvms();
334  JVMState* jvms = old_jvms->clone_shallow(C);
335  uint size = call->req();
336  SafePointNode* map = new (C) SafePointNode(size, jvms);
337  for (uint i1 = 0; i1 < size; i1++) {
338    map->init_req(i1, call->in(i1));
339  }
340
341  // Make sure the state is a MergeMem for parsing.
342  if (!map->in(TypeFunc::Memory)->is_MergeMem()) {
343    Node* mem = MergeMemNode::make(C, map->in(TypeFunc::Memory));
344    C->initial_gvn()->set_type_bottom(mem);
345    map->set_req(TypeFunc::Memory, mem);
346  }
347
348  uint nargs = method()->arg_size();
349  // blow away old call arguments
350  Node* top = C->top();
351  for (uint i1 = 0; i1 < nargs; i1++) {
352    map->set_req(TypeFunc::Parms + i1, top);
353  }
354  jvms->set_map(map);
355
356  // Make enough space in the expression stack to transfer
357  // the incoming arguments and return value.
358  map->ensure_stack(jvms, jvms->method()->max_stack());
359  for (uint i1 = 0; i1 < nargs; i1++) {
360    map->set_argument(jvms, i1, call->in(TypeFunc::Parms + i1));
361  }
362
363  // This check is done here because for_method_handle_inline() method
364  // needs jvms for inlined state.
365  if (!do_late_inline_check(jvms)) {
366    map->disconnect_inputs(NULL, C);
367    return;
368  }
369
370  C->print_inlining_insert(this);
371
372  CompileLog* log = C->log();
373  if (log != NULL) {
374    log->head("late_inline method='%d'", log->identify(method()));
375    JVMState* p = jvms;
376    while (p != NULL) {
377      log->elem("jvms bci='%d' method='%d'", p->bci(), log->identify(p->method()));
378      p = p->caller();
379    }
380    log->tail("late_inline");
381  }
382
383  // Setup default node notes to be picked up by the inlining
384  Node_Notes* old_nn = C->default_node_notes();
385  if (old_nn != NULL) {
386    Node_Notes* entry_nn = old_nn->clone(C);
387    entry_nn->set_jvms(jvms);
388    C->set_default_node_notes(entry_nn);
389  }
390
391  // Now perform the inling using the synthesized JVMState
392  JVMState* new_jvms = _inline_cg->generate(jvms, NULL);
393  if (new_jvms == NULL)  return;  // no change
394  if (C->failing())      return;
395
396  // Capture any exceptional control flow
397  GraphKit kit(new_jvms);
398
399  // Find the result object
400  Node* result = C->top();
401  int   result_size = method()->return_type()->size();
402  if (result_size != 0 && !kit.stopped()) {
403    result = (result_size == 1) ? kit.pop() : kit.pop_pair();
404  }
405
406  C->set_has_loops(C->has_loops() || _inline_cg->method()->has_loops());
407  C->env()->notice_inlined_method(_inline_cg->method());
408  C->set_inlining_progress(true);
409
410  kit.replace_call(call, result);
411}
412
413
414CallGenerator* CallGenerator::for_late_inline(ciMethod* method, CallGenerator* inline_cg) {
415  return new LateInlineCallGenerator(method, inline_cg);
416}
417
418class LateInlineMHCallGenerator : public LateInlineCallGenerator {
419  ciMethod* _caller;
420  int _attempt;
421  bool _input_not_const;
422
423  virtual bool do_late_inline_check(JVMState* jvms);
424  virtual bool already_attempted() const { return _attempt > 0; }
425
426 public:
427  LateInlineMHCallGenerator(ciMethod* caller, ciMethod* callee, bool input_not_const) :
428    LateInlineCallGenerator(callee, NULL), _caller(caller), _attempt(0), _input_not_const(input_not_const) {}
429
430  virtual bool is_mh_late_inline() const { return true; }
431
432  virtual JVMState* generate(JVMState* jvms, Parse* parent_parser) {
433    JVMState* new_jvms = LateInlineCallGenerator::generate(jvms, parent_parser);
434    if (_input_not_const) {
435      // inlining won't be possible so no need to enqueue right now.
436      call_node()->set_generator(this);
437    } else {
438      Compile::current()->add_late_inline(this);
439    }
440    return new_jvms;
441  }
442
443  virtual void print_inlining_late(const char* msg) {
444    if (!_input_not_const) return;
445    LateInlineCallGenerator::print_inlining_late(msg);
446  }
447};
448
449bool LateInlineMHCallGenerator::do_late_inline_check(JVMState* jvms) {
450
451  CallGenerator* cg = for_method_handle_inline(jvms, _caller, method(), _input_not_const);
452
453  if (!_input_not_const) {
454    _attempt++;
455  }
456
457  if (cg != NULL) {
458    assert(!cg->is_late_inline() && cg->is_inline(), "we're doing late inlining");
459    _inline_cg = cg;
460    Compile::current()->dec_number_of_mh_late_inlines();
461    return true;
462  }
463
464  call_node()->set_generator(this);
465  return false;
466}
467
468CallGenerator* CallGenerator::for_mh_late_inline(ciMethod* caller, ciMethod* callee, bool input_not_const) {
469  Compile::current()->inc_number_of_mh_late_inlines();
470  CallGenerator* cg = new LateInlineMHCallGenerator(caller, callee, input_not_const);
471  return cg;
472}
473
474class LateInlineStringCallGenerator : public LateInlineCallGenerator {
475
476 public:
477  LateInlineStringCallGenerator(ciMethod* method, CallGenerator* inline_cg) :
478    LateInlineCallGenerator(method, inline_cg) {}
479
480  virtual JVMState* generate(JVMState* jvms, Parse* parent_parser) {
481    Compile *C = Compile::current();
482    C->print_inlining_skip(this);
483
484    C->add_string_late_inline(this);
485
486    JVMState* new_jvms =  DirectCallGenerator::generate(jvms, parent_parser);
487    return new_jvms;
488  }
489
490  virtual bool is_string_late_inline() const { return true; }
491};
492
493CallGenerator* CallGenerator::for_string_late_inline(ciMethod* method, CallGenerator* inline_cg) {
494  return new LateInlineStringCallGenerator(method, inline_cg);
495}
496
497class LateInlineBoxingCallGenerator : public LateInlineCallGenerator {
498
499 public:
500  LateInlineBoxingCallGenerator(ciMethod* method, CallGenerator* inline_cg) :
501    LateInlineCallGenerator(method, inline_cg) {}
502
503  virtual JVMState* generate(JVMState* jvms, Parse* parent_parser) {
504    Compile *C = Compile::current();
505    C->print_inlining_skip(this);
506
507    C->add_boxing_late_inline(this);
508
509    JVMState* new_jvms =  DirectCallGenerator::generate(jvms, parent_parser);
510    return new_jvms;
511  }
512};
513
514CallGenerator* CallGenerator::for_boxing_late_inline(ciMethod* method, CallGenerator* inline_cg) {
515  return new LateInlineBoxingCallGenerator(method, inline_cg);
516}
517
518//---------------------------WarmCallGenerator--------------------------------
519// Internal class which handles initial deferral of inlining decisions.
520class WarmCallGenerator : public CallGenerator {
521  WarmCallInfo*   _call_info;
522  CallGenerator*  _if_cold;
523  CallGenerator*  _if_hot;
524  bool            _is_virtual;   // caches virtuality of if_cold
525  bool            _is_inline;    // caches inline-ness of if_hot
526
527public:
528  WarmCallGenerator(WarmCallInfo* ci,
529                    CallGenerator* if_cold,
530                    CallGenerator* if_hot)
531    : CallGenerator(if_cold->method())
532  {
533    assert(method() == if_hot->method(), "consistent choices");
534    _call_info  = ci;
535    _if_cold    = if_cold;
536    _if_hot     = if_hot;
537    _is_virtual = if_cold->is_virtual();
538    _is_inline  = if_hot->is_inline();
539  }
540
541  virtual bool      is_inline() const           { return _is_inline; }
542  virtual bool      is_virtual() const          { return _is_virtual; }
543  virtual bool      is_deferred() const         { return true; }
544
545  virtual JVMState* generate(JVMState* jvms, Parse* parent_parser);
546};
547
548
549CallGenerator* CallGenerator::for_warm_call(WarmCallInfo* ci,
550                                            CallGenerator* if_cold,
551                                            CallGenerator* if_hot) {
552  return new WarmCallGenerator(ci, if_cold, if_hot);
553}
554
555JVMState* WarmCallGenerator::generate(JVMState* jvms, Parse* parent_parser) {
556  Compile* C = Compile::current();
557  if (C->log() != NULL) {
558    C->log()->elem("warm_call bci='%d'", jvms->bci());
559  }
560  jvms = _if_cold->generate(jvms, parent_parser);
561  if (jvms != NULL) {
562    Node* m = jvms->map()->control();
563    if (m->is_CatchProj()) m = m->in(0);  else m = C->top();
564    if (m->is_Catch())     m = m->in(0);  else m = C->top();
565    if (m->is_Proj())      m = m->in(0);  else m = C->top();
566    if (m->is_CallJava()) {
567      _call_info->set_call(m->as_Call());
568      _call_info->set_hot_cg(_if_hot);
569#ifndef PRODUCT
570      if (PrintOpto || PrintOptoInlining) {
571        tty->print_cr("Queueing for warm inlining at bci %d:", jvms->bci());
572        tty->print("WCI: ");
573        _call_info->print();
574      }
575#endif
576      _call_info->set_heat(_call_info->compute_heat());
577      C->set_warm_calls(_call_info->insert_into(C->warm_calls()));
578    }
579  }
580  return jvms;
581}
582
583void WarmCallInfo::make_hot() {
584  Unimplemented();
585}
586
587void WarmCallInfo::make_cold() {
588  // No action:  Just dequeue.
589}
590
591
592//------------------------PredictedCallGenerator------------------------------
593// Internal class which handles all out-of-line calls checking receiver type.
594class PredictedCallGenerator : public CallGenerator {
595  ciKlass*       _predicted_receiver;
596  CallGenerator* _if_missed;
597  CallGenerator* _if_hit;
598  float          _hit_prob;
599
600public:
601  PredictedCallGenerator(ciKlass* predicted_receiver,
602                         CallGenerator* if_missed,
603                         CallGenerator* if_hit, float hit_prob)
604    : CallGenerator(if_missed->method())
605  {
606    // The call profile data may predict the hit_prob as extreme as 0 or 1.
607    // Remove the extremes values from the range.
608    if (hit_prob > PROB_MAX)   hit_prob = PROB_MAX;
609    if (hit_prob < PROB_MIN)   hit_prob = PROB_MIN;
610
611    _predicted_receiver = predicted_receiver;
612    _if_missed          = if_missed;
613    _if_hit             = if_hit;
614    _hit_prob           = hit_prob;
615  }
616
617  virtual bool      is_virtual()   const    { return true; }
618  virtual bool      is_inline()    const    { return _if_hit->is_inline(); }
619  virtual bool      is_deferred()  const    { return _if_hit->is_deferred(); }
620
621  virtual JVMState* generate(JVMState* jvms, Parse* parent_parser);
622};
623
624
625CallGenerator* CallGenerator::for_predicted_call(ciKlass* predicted_receiver,
626                                                 CallGenerator* if_missed,
627                                                 CallGenerator* if_hit,
628                                                 float hit_prob) {
629  return new PredictedCallGenerator(predicted_receiver, if_missed, if_hit, hit_prob);
630}
631
632
633JVMState* PredictedCallGenerator::generate(JVMState* jvms, Parse* parent_parser) {
634  GraphKit kit(jvms);
635  PhaseGVN& gvn = kit.gvn();
636  // We need an explicit receiver null_check before checking its type.
637  // We share a map with the caller, so his JVMS gets adjusted.
638  Node* receiver = kit.argument(0);
639
640  CompileLog* log = kit.C->log();
641  if (log != NULL) {
642    log->elem("predicted_call bci='%d' klass='%d'",
643              jvms->bci(), log->identify(_predicted_receiver));
644  }
645
646  receiver = kit.null_check_receiver_before_call(method());
647  if (kit.stopped()) {
648    return kit.transfer_exceptions_into_jvms();
649  }
650
651  Node* exact_receiver = receiver;  // will get updated in place...
652  Node* slow_ctl = kit.type_check_receiver(receiver,
653                                           _predicted_receiver, _hit_prob,
654                                           &exact_receiver);
655
656  SafePointNode* slow_map = NULL;
657  JVMState* slow_jvms;
658  { PreserveJVMState pjvms(&kit);
659    kit.set_control(slow_ctl);
660    if (!kit.stopped()) {
661      slow_jvms = _if_missed->generate(kit.sync_jvms(), parent_parser);
662      if (kit.failing())
663        return NULL;  // might happen because of NodeCountInliningCutoff
664      assert(slow_jvms != NULL, "must be");
665      kit.add_exception_states_from(slow_jvms);
666      kit.set_map(slow_jvms->map());
667      if (!kit.stopped())
668        slow_map = kit.stop();
669    }
670  }
671
672  if (kit.stopped()) {
673    // Instance exactly does not matches the desired type.
674    kit.set_jvms(slow_jvms);
675    return kit.transfer_exceptions_into_jvms();
676  }
677
678  // fall through if the instance exactly matches the desired type
679  kit.replace_in_map(receiver, exact_receiver);
680
681  // Make the hot call:
682  JVMState* new_jvms = _if_hit->generate(kit.sync_jvms(), parent_parser);
683  if (new_jvms == NULL) {
684    // Inline failed, so make a direct call.
685    assert(_if_hit->is_inline(), "must have been a failed inline");
686    CallGenerator* cg = CallGenerator::for_direct_call(_if_hit->method());
687    new_jvms = cg->generate(kit.sync_jvms(), parent_parser);
688  }
689  kit.add_exception_states_from(new_jvms);
690  kit.set_jvms(new_jvms);
691
692  // Need to merge slow and fast?
693  if (slow_map == NULL) {
694    // The fast path is the only path remaining.
695    return kit.transfer_exceptions_into_jvms();
696  }
697
698  if (kit.stopped()) {
699    // Inlined method threw an exception, so it's just the slow path after all.
700    kit.set_jvms(slow_jvms);
701    return kit.transfer_exceptions_into_jvms();
702  }
703
704  // Finish the diamond.
705  kit.C->set_has_split_ifs(true); // Has chance for split-if optimization
706  RegionNode* region = new (kit.C) RegionNode(3);
707  region->init_req(1, kit.control());
708  region->init_req(2, slow_map->control());
709  kit.set_control(gvn.transform(region));
710  Node* iophi = PhiNode::make(region, kit.i_o(), Type::ABIO);
711  iophi->set_req(2, slow_map->i_o());
712  kit.set_i_o(gvn.transform(iophi));
713  kit.merge_memory(slow_map->merged_memory(), region, 2);
714  uint tos = kit.jvms()->stkoff() + kit.sp();
715  uint limit = slow_map->req();
716  for (uint i = TypeFunc::Parms; i < limit; i++) {
717    // Skip unused stack slots; fast forward to monoff();
718    if (i == tos) {
719      i = kit.jvms()->monoff();
720      if( i >= limit ) break;
721    }
722    Node* m = kit.map()->in(i);
723    Node* n = slow_map->in(i);
724    if (m != n) {
725      const Type* t = gvn.type(m)->meet(gvn.type(n));
726      Node* phi = PhiNode::make(region, m, t);
727      phi->set_req(2, n);
728      kit.map()->set_req(i, gvn.transform(phi));
729    }
730  }
731  return kit.transfer_exceptions_into_jvms();
732}
733
734
735CallGenerator* CallGenerator::for_method_handle_call(JVMState* jvms, ciMethod* caller, ciMethod* callee, bool delayed_forbidden) {
736  assert(callee->is_method_handle_intrinsic() ||
737         callee->is_compiled_lambda_form(), "for_method_handle_call mismatch");
738  bool input_not_const;
739  CallGenerator* cg = CallGenerator::for_method_handle_inline(jvms, caller, callee, input_not_const);
740  Compile* C = Compile::current();
741  if (cg != NULL) {
742    if (!delayed_forbidden && AlwaysIncrementalInline) {
743      return CallGenerator::for_late_inline(callee, cg);
744    } else {
745      return cg;
746    }
747  }
748  int bci = jvms->bci();
749  ciCallProfile profile = caller->call_profile_at_bci(bci);
750  int call_site_count = caller->scale_count(profile.count());
751
752  if (IncrementalInline && call_site_count > 0 &&
753      (input_not_const || !C->inlining_incrementally() || C->over_inlining_cutoff())) {
754    return CallGenerator::for_mh_late_inline(caller, callee, input_not_const);
755  } else {
756    // Out-of-line call.
757    return CallGenerator::for_direct_call(callee);
758  }
759}
760
761CallGenerator* CallGenerator::for_method_handle_inline(JVMState* jvms, ciMethod* caller, ciMethod* callee, bool& input_not_const) {
762  GraphKit kit(jvms);
763  PhaseGVN& gvn = kit.gvn();
764  Compile* C = kit.C;
765  vmIntrinsics::ID iid = callee->intrinsic_id();
766  input_not_const = true;
767  switch (iid) {
768  case vmIntrinsics::_invokeBasic:
769    {
770      // Get MethodHandle receiver:
771      Node* receiver = kit.argument(0);
772      if (receiver->Opcode() == Op_ConP) {
773        input_not_const = false;
774        const TypeOopPtr* oop_ptr = receiver->bottom_type()->is_oopptr();
775        ciMethod* target = oop_ptr->const_oop()->as_method_handle()->get_vmtarget();
776        guarantee(!target->is_method_handle_intrinsic(), "should not happen");  // XXX remove
777        const int vtable_index = Method::invalid_vtable_index;
778        CallGenerator* cg = C->call_generator(target, vtable_index, false, jvms, true, PROB_ALWAYS, NULL, true, true);
779        assert(cg == NULL || !cg->is_late_inline() || cg->is_mh_late_inline(), "no late inline here");
780        if (cg != NULL && cg->is_inline())
781          return cg;
782      }
783    }
784    break;
785
786  case vmIntrinsics::_linkToVirtual:
787  case vmIntrinsics::_linkToStatic:
788  case vmIntrinsics::_linkToSpecial:
789  case vmIntrinsics::_linkToInterface:
790    {
791      // Get MemberName argument:
792      Node* member_name = kit.argument(callee->arg_size() - 1);
793      if (member_name->Opcode() == Op_ConP) {
794        input_not_const = false;
795        const TypeOopPtr* oop_ptr = member_name->bottom_type()->is_oopptr();
796        ciMethod* target = oop_ptr->const_oop()->as_member_name()->get_vmtarget();
797
798        // In lamda forms we erase signature types to avoid resolving issues
799        // involving class loaders.  When we optimize a method handle invoke
800        // to a direct call we must cast the receiver and arguments to its
801        // actual types.
802        ciSignature* signature = target->signature();
803        const int receiver_skip = target->is_static() ? 0 : 1;
804        // Cast receiver to its type.
805        if (!target->is_static()) {
806          Node* arg = kit.argument(0);
807          const TypeOopPtr* arg_type = arg->bottom_type()->isa_oopptr();
808          const Type*       sig_type = TypeOopPtr::make_from_klass(signature->accessing_klass());
809          if (arg_type != NULL && !arg_type->higher_equal(sig_type)) {
810            Node* cast_obj = gvn.transform(new (C) CheckCastPPNode(kit.control(), arg, sig_type));
811            kit.set_argument(0, cast_obj);
812          }
813        }
814        // Cast reference arguments to its type.
815        for (int i = 0; i < signature->count(); i++) {
816          ciType* t = signature->type_at(i);
817          if (t->is_klass()) {
818            Node* arg = kit.argument(receiver_skip + i);
819            const TypeOopPtr* arg_type = arg->bottom_type()->isa_oopptr();
820            const Type*       sig_type = TypeOopPtr::make_from_klass(t->as_klass());
821            if (arg_type != NULL && !arg_type->higher_equal(sig_type)) {
822              Node* cast_obj = gvn.transform(new (C) CheckCastPPNode(kit.control(), arg, sig_type));
823              kit.set_argument(receiver_skip + i, cast_obj);
824            }
825          }
826        }
827
828        // Try to get the most accurate receiver type
829        const bool is_virtual              = (iid == vmIntrinsics::_linkToVirtual);
830        const bool is_virtual_or_interface = (is_virtual || iid == vmIntrinsics::_linkToInterface);
831        int  vtable_index       = Method::invalid_vtable_index;
832        bool call_does_dispatch = false;
833
834        ciKlass* speculative_receiver_type = NULL;
835        if (is_virtual_or_interface) {
836          ciInstanceKlass* klass = target->holder();
837          Node*             receiver_node = kit.argument(0);
838          const TypeOopPtr* receiver_type = gvn.type(receiver_node)->isa_oopptr();
839          // call_does_dispatch and vtable_index are out-parameters.  They might be changed.
840          target = C->optimize_virtual_call(caller, jvms->bci(), klass, target, receiver_type,
841                                            is_virtual,
842                                            call_does_dispatch, vtable_index);  // out-parameters
843          // We lack profiling at this call but type speculation may
844          // provide us with a type
845          speculative_receiver_type = receiver_type->speculative_type();
846        }
847
848        CallGenerator* cg = C->call_generator(target, vtable_index, call_does_dispatch, jvms, true, PROB_ALWAYS, speculative_receiver_type, true, true);
849        assert(cg == NULL || !cg->is_late_inline() || cg->is_mh_late_inline(), "no late inline here");
850        if (cg != NULL && cg->is_inline())
851          return cg;
852      }
853    }
854    break;
855
856  default:
857    fatal(err_msg_res("unexpected intrinsic %d: %s", iid, vmIntrinsics::name_at(iid)));
858    break;
859  }
860  return NULL;
861}
862
863
864//------------------------PredictedIntrinsicGenerator------------------------------
865// Internal class which handles all predicted Intrinsic calls.
866class PredictedIntrinsicGenerator : public CallGenerator {
867  CallGenerator* _intrinsic;
868  CallGenerator* _cg;
869
870public:
871  PredictedIntrinsicGenerator(CallGenerator* intrinsic,
872                              CallGenerator* cg)
873    : CallGenerator(cg->method())
874  {
875    _intrinsic = intrinsic;
876    _cg        = cg;
877  }
878
879  virtual bool      is_virtual()   const    { return true; }
880  virtual bool      is_inlined()   const    { return true; }
881  virtual bool      is_intrinsic() const    { return true; }
882
883  virtual JVMState* generate(JVMState* jvms, Parse* parent_parser);
884};
885
886
887CallGenerator* CallGenerator::for_predicted_intrinsic(CallGenerator* intrinsic,
888                                                      CallGenerator* cg) {
889  return new PredictedIntrinsicGenerator(intrinsic, cg);
890}
891
892
893JVMState* PredictedIntrinsicGenerator::generate(JVMState* jvms, Parse* parent_parser) {
894  GraphKit kit(jvms);
895  PhaseGVN& gvn = kit.gvn();
896
897  CompileLog* log = kit.C->log();
898  if (log != NULL) {
899    log->elem("predicted_intrinsic bci='%d' method='%d'",
900              jvms->bci(), log->identify(method()));
901  }
902
903  Node* slow_ctl = _intrinsic->generate_predicate(kit.sync_jvms());
904  if (kit.failing())
905    return NULL;  // might happen because of NodeCountInliningCutoff
906
907  SafePointNode* slow_map = NULL;
908  JVMState* slow_jvms;
909  if (slow_ctl != NULL) {
910    PreserveJVMState pjvms(&kit);
911    kit.set_control(slow_ctl);
912    if (!kit.stopped()) {
913      slow_jvms = _cg->generate(kit.sync_jvms(), parent_parser);
914      if (kit.failing())
915        return NULL;  // might happen because of NodeCountInliningCutoff
916      assert(slow_jvms != NULL, "must be");
917      kit.add_exception_states_from(slow_jvms);
918      kit.set_map(slow_jvms->map());
919      if (!kit.stopped())
920        slow_map = kit.stop();
921    }
922  }
923
924  if (kit.stopped()) {
925    // Predicate is always false.
926    kit.set_jvms(slow_jvms);
927    return kit.transfer_exceptions_into_jvms();
928  }
929
930  // Generate intrinsic code:
931  JVMState* new_jvms = _intrinsic->generate(kit.sync_jvms(), parent_parser);
932  if (new_jvms == NULL) {
933    // Intrinsic failed, so use slow code or make a direct call.
934    if (slow_map == NULL) {
935      CallGenerator* cg = CallGenerator::for_direct_call(method());
936      new_jvms = cg->generate(kit.sync_jvms(), parent_parser);
937    } else {
938      kit.set_jvms(slow_jvms);
939      return kit.transfer_exceptions_into_jvms();
940    }
941  }
942  kit.add_exception_states_from(new_jvms);
943  kit.set_jvms(new_jvms);
944
945  // Need to merge slow and fast?
946  if (slow_map == NULL) {
947    // The fast path is the only path remaining.
948    return kit.transfer_exceptions_into_jvms();
949  }
950
951  if (kit.stopped()) {
952    // Intrinsic method threw an exception, so it's just the slow path after all.
953    kit.set_jvms(slow_jvms);
954    return kit.transfer_exceptions_into_jvms();
955  }
956
957  // Finish the diamond.
958  kit.C->set_has_split_ifs(true); // Has chance for split-if optimization
959  RegionNode* region = new (kit.C) RegionNode(3);
960  region->init_req(1, kit.control());
961  region->init_req(2, slow_map->control());
962  kit.set_control(gvn.transform(region));
963  Node* iophi = PhiNode::make(region, kit.i_o(), Type::ABIO);
964  iophi->set_req(2, slow_map->i_o());
965  kit.set_i_o(gvn.transform(iophi));
966  kit.merge_memory(slow_map->merged_memory(), region, 2);
967  uint tos = kit.jvms()->stkoff() + kit.sp();
968  uint limit = slow_map->req();
969  for (uint i = TypeFunc::Parms; i < limit; i++) {
970    // Skip unused stack slots; fast forward to monoff();
971    if (i == tos) {
972      i = kit.jvms()->monoff();
973      if( i >= limit ) break;
974    }
975    Node* m = kit.map()->in(i);
976    Node* n = slow_map->in(i);
977    if (m != n) {
978      const Type* t = gvn.type(m)->meet(gvn.type(n));
979      Node* phi = PhiNode::make(region, m, t);
980      phi->set_req(2, n);
981      kit.map()->set_req(i, gvn.transform(phi));
982    }
983  }
984  return kit.transfer_exceptions_into_jvms();
985}
986
987//-------------------------UncommonTrapCallGenerator-----------------------------
988// Internal class which handles all out-of-line calls checking receiver type.
989class UncommonTrapCallGenerator : public CallGenerator {
990  Deoptimization::DeoptReason _reason;
991  Deoptimization::DeoptAction _action;
992
993public:
994  UncommonTrapCallGenerator(ciMethod* m,
995                            Deoptimization::DeoptReason reason,
996                            Deoptimization::DeoptAction action)
997    : CallGenerator(m)
998  {
999    _reason = reason;
1000    _action = action;
1001  }
1002
1003  virtual bool      is_virtual() const          { ShouldNotReachHere(); return false; }
1004  virtual bool      is_trap() const             { return true; }
1005
1006  virtual JVMState* generate(JVMState* jvms, Parse* parent_parser);
1007};
1008
1009
1010CallGenerator*
1011CallGenerator::for_uncommon_trap(ciMethod* m,
1012                                 Deoptimization::DeoptReason reason,
1013                                 Deoptimization::DeoptAction action) {
1014  return new UncommonTrapCallGenerator(m, reason, action);
1015}
1016
1017
1018JVMState* UncommonTrapCallGenerator::generate(JVMState* jvms, Parse* parent_parser) {
1019  GraphKit kit(jvms);
1020  // Take the trap with arguments pushed on the stack.  (Cf. null_check_receiver).
1021  int nargs = method()->arg_size();
1022  kit.inc_sp(nargs);
1023  assert(nargs <= kit.sp() && kit.sp() <= jvms->stk_size(), "sane sp w/ args pushed");
1024  if (_reason == Deoptimization::Reason_class_check &&
1025      _action == Deoptimization::Action_maybe_recompile) {
1026    // Temp fix for 6529811
1027    // Don't allow uncommon_trap to override our decision to recompile in the event
1028    // of a class cast failure for a monomorphic call as it will never let us convert
1029    // the call to either bi-morphic or megamorphic and can lead to unc-trap loops
1030    bool keep_exact_action = true;
1031    kit.uncommon_trap(_reason, _action, NULL, "monomorphic vcall checkcast", false, keep_exact_action);
1032  } else {
1033    kit.uncommon_trap(_reason, _action);
1034  }
1035  return kit.transfer_exceptions_into_jvms();
1036}
1037
1038// (Note:  Moved hook_up_call to GraphKit::set_edges_for_java_call.)
1039
1040// (Node:  Merged hook_up_exits into ParseGenerator::generate.)
1041
1042#define NODES_OVERHEAD_PER_METHOD (30.0)
1043#define NODES_PER_BYTECODE (9.5)
1044
1045void WarmCallInfo::init(JVMState* call_site, ciMethod* call_method, ciCallProfile& profile, float prof_factor) {
1046  int call_count = profile.count();
1047  int code_size = call_method->code_size();
1048
1049  // Expected execution count is based on the historical count:
1050  _count = call_count < 0 ? 1 : call_site->method()->scale_count(call_count, prof_factor);
1051
1052  // Expected profit from inlining, in units of simple call-overheads.
1053  _profit = 1.0;
1054
1055  // Expected work performed by the call in units of call-overheads.
1056  // %%% need an empirical curve fit for "work" (time in call)
1057  float bytecodes_per_call = 3;
1058  _work = 1.0 + code_size / bytecodes_per_call;
1059
1060  // Expected size of compilation graph:
1061  // -XX:+PrintParseStatistics once reported:
1062  //  Methods seen: 9184  Methods parsed: 9184  Nodes created: 1582391
1063  //  Histogram of 144298 parsed bytecodes:
1064  // %%% Need an better predictor for graph size.
1065  _size = NODES_OVERHEAD_PER_METHOD + (NODES_PER_BYTECODE * code_size);
1066}
1067
1068// is_cold:  Return true if the node should never be inlined.
1069// This is true if any of the key metrics are extreme.
1070bool WarmCallInfo::is_cold() const {
1071  if (count()  <  WarmCallMinCount)        return true;
1072  if (profit() <  WarmCallMinProfit)       return true;
1073  if (work()   >  WarmCallMaxWork)         return true;
1074  if (size()   >  WarmCallMaxSize)         return true;
1075  return false;
1076}
1077
1078// is_hot:  Return true if the node should be inlined immediately.
1079// This is true if any of the key metrics are extreme.
1080bool WarmCallInfo::is_hot() const {
1081  assert(!is_cold(), "eliminate is_cold cases before testing is_hot");
1082  if (count()  >= HotCallCountThreshold)   return true;
1083  if (profit() >= HotCallProfitThreshold)  return true;
1084  if (work()   <= HotCallTrivialWork)      return true;
1085  if (size()   <= HotCallTrivialSize)      return true;
1086  return false;
1087}
1088
1089// compute_heat:
1090float WarmCallInfo::compute_heat() const {
1091  assert(!is_cold(), "compute heat only on warm nodes");
1092  assert(!is_hot(),  "compute heat only on warm nodes");
1093  int min_size = MAX2(0,   (int)HotCallTrivialSize);
1094  int max_size = MIN2(500, (int)WarmCallMaxSize);
1095  float method_size = (size() - min_size) / MAX2(1, max_size - min_size);
1096  float size_factor;
1097  if      (method_size < 0.05)  size_factor = 4;   // 2 sigmas better than avg.
1098  else if (method_size < 0.15)  size_factor = 2;   // 1 sigma better than avg.
1099  else if (method_size < 0.5)   size_factor = 1;   // better than avg.
1100  else                          size_factor = 0.5; // worse than avg.
1101  return (count() * profit() * size_factor);
1102}
1103
1104bool WarmCallInfo::warmer_than(WarmCallInfo* that) {
1105  assert(this != that, "compare only different WCIs");
1106  assert(this->heat() != 0 && that->heat() != 0, "call compute_heat 1st");
1107  if (this->heat() > that->heat())   return true;
1108  if (this->heat() < that->heat())   return false;
1109  assert(this->heat() == that->heat(), "no NaN heat allowed");
1110  // Equal heat.  Break the tie some other way.
1111  if (!this->call() || !that->call())  return (address)this > (address)that;
1112  return this->call()->_idx > that->call()->_idx;
1113}
1114
1115//#define UNINIT_NEXT ((WarmCallInfo*)badAddress)
1116#define UNINIT_NEXT ((WarmCallInfo*)NULL)
1117
1118WarmCallInfo* WarmCallInfo::insert_into(WarmCallInfo* head) {
1119  assert(next() == UNINIT_NEXT, "not yet on any list");
1120  WarmCallInfo* prev_p = NULL;
1121  WarmCallInfo* next_p = head;
1122  while (next_p != NULL && next_p->warmer_than(this)) {
1123    prev_p = next_p;
1124    next_p = prev_p->next();
1125  }
1126  // Install this between prev_p and next_p.
1127  this->set_next(next_p);
1128  if (prev_p == NULL)
1129    head = this;
1130  else
1131    prev_p->set_next(this);
1132  return head;
1133}
1134
1135WarmCallInfo* WarmCallInfo::remove_from(WarmCallInfo* head) {
1136  WarmCallInfo* prev_p = NULL;
1137  WarmCallInfo* next_p = head;
1138  while (next_p != this) {
1139    assert(next_p != NULL, "this must be in the list somewhere");
1140    prev_p = next_p;
1141    next_p = prev_p->next();
1142  }
1143  next_p = this->next();
1144  debug_only(this->set_next(UNINIT_NEXT));
1145  // Remove this from between prev_p and next_p.
1146  if (prev_p == NULL)
1147    head = next_p;
1148  else
1149    prev_p->set_next(next_p);
1150  return head;
1151}
1152
1153WarmCallInfo WarmCallInfo::_always_hot(WarmCallInfo::MAX_VALUE(), WarmCallInfo::MAX_VALUE(),
1154                                       WarmCallInfo::MIN_VALUE(), WarmCallInfo::MIN_VALUE());
1155WarmCallInfo WarmCallInfo::_always_cold(WarmCallInfo::MIN_VALUE(), WarmCallInfo::MIN_VALUE(),
1156                                        WarmCallInfo::MAX_VALUE(), WarmCallInfo::MAX_VALUE());
1157
1158WarmCallInfo* WarmCallInfo::always_hot() {
1159  assert(_always_hot.is_hot(), "must always be hot");
1160  return &_always_hot;
1161}
1162
1163WarmCallInfo* WarmCallInfo::always_cold() {
1164  assert(_always_cold.is_cold(), "must always be cold");
1165  return &_always_cold;
1166}
1167
1168
1169#ifndef PRODUCT
1170
1171void WarmCallInfo::print() const {
1172  tty->print("%s : C=%6.1f P=%6.1f W=%6.1f S=%6.1f H=%6.1f -> %p",
1173             is_cold() ? "cold" : is_hot() ? "hot " : "warm",
1174             count(), profit(), work(), size(), compute_heat(), next());
1175  tty->cr();
1176  if (call() != NULL)  call()->dump();
1177}
1178
1179void print_wci(WarmCallInfo* ci) {
1180  ci->print();
1181}
1182
1183void WarmCallInfo::print_all() const {
1184  for (const WarmCallInfo* p = this; p != NULL; p = p->next())
1185    p->print();
1186}
1187
1188int WarmCallInfo::count_all() const {
1189  int cnt = 0;
1190  for (const WarmCallInfo* p = this; p != NULL; p = p->next())
1191    cnt++;
1192  return cnt;
1193}
1194
1195#endif //PRODUCT
1196