macro.cpp revision 628:7bb995fbd3c0
1/*
2 * Copyright 2005-2009 Sun Microsystems, Inc.  All Rights Reserved.
3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
4 *
5 * This code is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 only, as
7 * published by the Free Software Foundation.
8 *
9 * This code is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
12 * version 2 for more details (a copy is included in the LICENSE file that
13 * accompanied this code).
14 *
15 * You should have received a copy of the GNU General Public License version
16 * 2 along with this work; if not, write to the Free Software Foundation,
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18 *
19 * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
20 * CA 95054 USA or visit www.sun.com if you need additional information or
21 * have any questions.
22 *
23 */
24
25#include "incls/_precompiled.incl"
26#include "incls/_macro.cpp.incl"
27
28
29//
30// Replace any references to "oldref" in inputs to "use" with "newref".
31// Returns the number of replacements made.
32//
33int PhaseMacroExpand::replace_input(Node *use, Node *oldref, Node *newref) {
34  int nreplacements = 0;
35  uint req = use->req();
36  for (uint j = 0; j < use->len(); j++) {
37    Node *uin = use->in(j);
38    if (uin == oldref) {
39      if (j < req)
40        use->set_req(j, newref);
41      else
42        use->set_prec(j, newref);
43      nreplacements++;
44    } else if (j >= req && uin == NULL) {
45      break;
46    }
47  }
48  return nreplacements;
49}
50
51void PhaseMacroExpand::copy_call_debug_info(CallNode *oldcall, CallNode * newcall) {
52  // Copy debug information and adjust JVMState information
53  uint old_dbg_start = oldcall->tf()->domain()->cnt();
54  uint new_dbg_start = newcall->tf()->domain()->cnt();
55  int jvms_adj  = new_dbg_start - old_dbg_start;
56  assert (new_dbg_start == newcall->req(), "argument count mismatch");
57
58  Dict* sosn_map = new Dict(cmpkey,hashkey);
59  for (uint i = old_dbg_start; i < oldcall->req(); i++) {
60    Node* old_in = oldcall->in(i);
61    // Clone old SafePointScalarObjectNodes, adjusting their field contents.
62    if (old_in != NULL && old_in->is_SafePointScalarObject()) {
63      SafePointScalarObjectNode* old_sosn = old_in->as_SafePointScalarObject();
64      uint old_unique = C->unique();
65      Node* new_in = old_sosn->clone(jvms_adj, sosn_map);
66      if (old_unique != C->unique()) {
67        new_in->set_req(0, newcall->in(0)); // reset control edge
68        new_in = transform_later(new_in); // Register new node.
69      }
70      old_in = new_in;
71    }
72    newcall->add_req(old_in);
73  }
74
75  newcall->set_jvms(oldcall->jvms());
76  for (JVMState *jvms = newcall->jvms(); jvms != NULL; jvms = jvms->caller()) {
77    jvms->set_map(newcall);
78    jvms->set_locoff(jvms->locoff()+jvms_adj);
79    jvms->set_stkoff(jvms->stkoff()+jvms_adj);
80    jvms->set_monoff(jvms->monoff()+jvms_adj);
81    jvms->set_scloff(jvms->scloff()+jvms_adj);
82    jvms->set_endoff(jvms->endoff()+jvms_adj);
83  }
84}
85
86Node* PhaseMacroExpand::opt_bits_test(Node* ctrl, Node* region, int edge, Node* word, int mask, int bits, bool return_fast_path) {
87  Node* cmp;
88  if (mask != 0) {
89    Node* and_node = transform_later(new (C, 3) AndXNode(word, MakeConX(mask)));
90    cmp = transform_later(new (C, 3) CmpXNode(and_node, MakeConX(bits)));
91  } else {
92    cmp = word;
93  }
94  Node* bol = transform_later(new (C, 2) BoolNode(cmp, BoolTest::ne));
95  IfNode* iff = new (C, 2) IfNode( ctrl, bol, PROB_MIN, COUNT_UNKNOWN );
96  transform_later(iff);
97
98  // Fast path taken.
99  Node *fast_taken = transform_later( new (C, 1) IfFalseNode(iff) );
100
101  // Fast path not-taken, i.e. slow path
102  Node *slow_taken = transform_later( new (C, 1) IfTrueNode(iff) );
103
104  if (return_fast_path) {
105    region->init_req(edge, slow_taken); // Capture slow-control
106    return fast_taken;
107  } else {
108    region->init_req(edge, fast_taken); // Capture fast-control
109    return slow_taken;
110  }
111}
112
113//--------------------copy_predefined_input_for_runtime_call--------------------
114void PhaseMacroExpand::copy_predefined_input_for_runtime_call(Node * ctrl, CallNode* oldcall, CallNode* call) {
115  // Set fixed predefined input arguments
116  call->init_req( TypeFunc::Control, ctrl );
117  call->init_req( TypeFunc::I_O    , oldcall->in( TypeFunc::I_O) );
118  call->init_req( TypeFunc::Memory , oldcall->in( TypeFunc::Memory ) ); // ?????
119  call->init_req( TypeFunc::ReturnAdr, oldcall->in( TypeFunc::ReturnAdr ) );
120  call->init_req( TypeFunc::FramePtr, oldcall->in( TypeFunc::FramePtr ) );
121}
122
123//------------------------------make_slow_call---------------------------------
124CallNode* PhaseMacroExpand::make_slow_call(CallNode *oldcall, const TypeFunc* slow_call_type, address slow_call, const char* leaf_name, Node* slow_path, Node* parm0, Node* parm1) {
125
126  // Slow-path call
127  int size = slow_call_type->domain()->cnt();
128 CallNode *call = leaf_name
129   ? (CallNode*)new (C, size) CallLeafNode      ( slow_call_type, slow_call, leaf_name, TypeRawPtr::BOTTOM )
130   : (CallNode*)new (C, size) CallStaticJavaNode( slow_call_type, slow_call, OptoRuntime::stub_name(slow_call), oldcall->jvms()->bci(), TypeRawPtr::BOTTOM );
131
132  // Slow path call has no side-effects, uses few values
133  copy_predefined_input_for_runtime_call(slow_path, oldcall, call );
134  if (parm0 != NULL)  call->init_req(TypeFunc::Parms+0, parm0);
135  if (parm1 != NULL)  call->init_req(TypeFunc::Parms+1, parm1);
136  copy_call_debug_info(oldcall, call);
137  call->set_cnt(PROB_UNLIKELY_MAG(4));  // Same effect as RC_UNCOMMON.
138  _igvn.hash_delete(oldcall);
139  _igvn.subsume_node(oldcall, call);
140  transform_later(call);
141
142  return call;
143}
144
145void PhaseMacroExpand::extract_call_projections(CallNode *call) {
146  _fallthroughproj = NULL;
147  _fallthroughcatchproj = NULL;
148  _ioproj_fallthrough = NULL;
149  _ioproj_catchall = NULL;
150  _catchallcatchproj = NULL;
151  _memproj_fallthrough = NULL;
152  _memproj_catchall = NULL;
153  _resproj = NULL;
154  for (DUIterator_Fast imax, i = call->fast_outs(imax); i < imax; i++) {
155    ProjNode *pn = call->fast_out(i)->as_Proj();
156    switch (pn->_con) {
157      case TypeFunc::Control:
158      {
159        // For Control (fallthrough) and I_O (catch_all_index) we have CatchProj -> Catch -> Proj
160        _fallthroughproj = pn;
161        DUIterator_Fast jmax, j = pn->fast_outs(jmax);
162        const Node *cn = pn->fast_out(j);
163        if (cn->is_Catch()) {
164          ProjNode *cpn = NULL;
165          for (DUIterator_Fast kmax, k = cn->fast_outs(kmax); k < kmax; k++) {
166            cpn = cn->fast_out(k)->as_Proj();
167            assert(cpn->is_CatchProj(), "must be a CatchProjNode");
168            if (cpn->_con == CatchProjNode::fall_through_index)
169              _fallthroughcatchproj = cpn;
170            else {
171              assert(cpn->_con == CatchProjNode::catch_all_index, "must be correct index.");
172              _catchallcatchproj = cpn;
173            }
174          }
175        }
176        break;
177      }
178      case TypeFunc::I_O:
179        if (pn->_is_io_use)
180          _ioproj_catchall = pn;
181        else
182          _ioproj_fallthrough = pn;
183        break;
184      case TypeFunc::Memory:
185        if (pn->_is_io_use)
186          _memproj_catchall = pn;
187        else
188          _memproj_fallthrough = pn;
189        break;
190      case TypeFunc::Parms:
191        _resproj = pn;
192        break;
193      default:
194        assert(false, "unexpected projection from allocation node.");
195    }
196  }
197
198}
199
200// Eliminate a card mark sequence.  p2x is a ConvP2XNode
201void PhaseMacroExpand::eliminate_card_mark(Node *p2x) {
202  assert(p2x->Opcode() == Op_CastP2X, "ConvP2XNode required");
203  Node *shift = p2x->unique_out();
204  Node *addp = shift->unique_out();
205  for (DUIterator_Last jmin, j = addp->last_outs(jmin); j >= jmin; --j) {
206    Node *st = addp->last_out(j);
207    assert(st->is_Store(), "store required");
208    _igvn.replace_node(st, st->in(MemNode::Memory));
209  }
210}
211
212// Search for a memory operation for the specified memory slice.
213static Node *scan_mem_chain(Node *mem, int alias_idx, int offset, Node *start_mem, Node *alloc, PhaseGVN *phase) {
214  Node *orig_mem = mem;
215  Node *alloc_mem = alloc->in(TypeFunc::Memory);
216  const TypeOopPtr *tinst = phase->C->get_adr_type(alias_idx)->isa_oopptr();
217  while (true) {
218    if (mem == alloc_mem || mem == start_mem ) {
219      return mem;  // hit one of our sentinels
220    } else if (mem->is_MergeMem()) {
221      mem = mem->as_MergeMem()->memory_at(alias_idx);
222    } else if (mem->is_Proj() && mem->as_Proj()->_con == TypeFunc::Memory) {
223      Node *in = mem->in(0);
224      // we can safely skip over safepoints, calls, locks and membars because we
225      // already know that the object is safe to eliminate.
226      if (in->is_Initialize() && in->as_Initialize()->allocation() == alloc) {
227        return in;
228      } else if (in->is_Call()) {
229        CallNode *call = in->as_Call();
230        if (!call->may_modify(tinst, phase)) {
231          mem = call->in(TypeFunc::Memory);
232        }
233        mem = in->in(TypeFunc::Memory);
234      } else if (in->is_MemBar()) {
235        mem = in->in(TypeFunc::Memory);
236      } else {
237        assert(false, "unexpected projection");
238      }
239    } else if (mem->is_Store()) {
240      const TypePtr* atype = mem->as_Store()->adr_type();
241      int adr_idx = Compile::current()->get_alias_index(atype);
242      if (adr_idx == alias_idx) {
243        assert(atype->isa_oopptr(), "address type must be oopptr");
244        int adr_offset = atype->offset();
245        uint adr_iid = atype->is_oopptr()->instance_id();
246        // Array elements references have the same alias_idx
247        // but different offset and different instance_id.
248        if (adr_offset == offset && adr_iid == alloc->_idx)
249          return mem;
250      } else {
251        assert(adr_idx == Compile::AliasIdxRaw, "address must match or be raw");
252      }
253      mem = mem->in(MemNode::Memory);
254    } else if (mem->Opcode() == Op_SCMemProj) {
255      assert(mem->in(0)->is_LoadStore(), "sanity");
256      const TypePtr* atype = mem->in(0)->in(MemNode::Address)->bottom_type()->is_ptr();
257      int adr_idx = Compile::current()->get_alias_index(atype);
258      if (adr_idx == alias_idx) {
259        assert(false, "Object is not scalar replaceable if a LoadStore node access its field");
260        return NULL;
261      }
262      mem = mem->in(0)->in(MemNode::Memory);
263    } else {
264      return mem;
265    }
266    assert(mem != orig_mem, "dead memory loop");
267  }
268}
269
270//
271// Given a Memory Phi, compute a value Phi containing the values from stores
272// on the input paths.
273// Note: this function is recursive, its depth is limied by the "level" argument
274// Returns the computed Phi, or NULL if it cannot compute it.
275Node *PhaseMacroExpand::value_from_mem_phi(Node *mem, BasicType ft, const Type *phi_type, const TypeOopPtr *adr_t, Node *alloc, Node_Stack *value_phis, int level) {
276  assert(mem->is_Phi(), "sanity");
277  int alias_idx = C->get_alias_index(adr_t);
278  int offset = adr_t->offset();
279  int instance_id = adr_t->instance_id();
280
281  // Check if an appropriate value phi already exists.
282  Node* region = mem->in(0);
283  for (DUIterator_Fast kmax, k = region->fast_outs(kmax); k < kmax; k++) {
284    Node* phi = region->fast_out(k);
285    if (phi->is_Phi() && phi != mem &&
286        phi->as_Phi()->is_same_inst_field(phi_type, instance_id, alias_idx, offset)) {
287      return phi;
288    }
289  }
290  // Check if an appropriate new value phi already exists.
291  Node* new_phi = NULL;
292  uint size = value_phis->size();
293  for (uint i=0; i < size; i++) {
294    if ( mem->_idx == value_phis->index_at(i) ) {
295      return value_phis->node_at(i);
296    }
297  }
298
299  if (level <= 0) {
300    return NULL; // Give up: phi tree too deep
301  }
302  Node *start_mem = C->start()->proj_out(TypeFunc::Memory);
303  Node *alloc_mem = alloc->in(TypeFunc::Memory);
304
305  uint length = mem->req();
306  GrowableArray <Node *> values(length, length, NULL);
307
308  // create a new Phi for the value
309  PhiNode *phi = new (C, length) PhiNode(mem->in(0), phi_type, NULL, instance_id, alias_idx, offset);
310  transform_later(phi);
311  value_phis->push(phi, mem->_idx);
312
313  for (uint j = 1; j < length; j++) {
314    Node *in = mem->in(j);
315    if (in == NULL || in->is_top()) {
316      values.at_put(j, in);
317    } else  {
318      Node *val = scan_mem_chain(in, alias_idx, offset, start_mem, alloc, &_igvn);
319      if (val == start_mem || val == alloc_mem) {
320        // hit a sentinel, return appropriate 0 value
321        values.at_put(j, _igvn.zerocon(ft));
322        continue;
323      }
324      if (val->is_Initialize()) {
325        val = val->as_Initialize()->find_captured_store(offset, type2aelembytes(ft), &_igvn);
326      }
327      if (val == NULL) {
328        return NULL;  // can't find a value on this path
329      }
330      if (val == mem) {
331        values.at_put(j, mem);
332      } else if (val->is_Store()) {
333        values.at_put(j, val->in(MemNode::ValueIn));
334      } else if(val->is_Proj() && val->in(0) == alloc) {
335        values.at_put(j, _igvn.zerocon(ft));
336      } else if (val->is_Phi()) {
337        val = value_from_mem_phi(val, ft, phi_type, adr_t, alloc, value_phis, level-1);
338        if (val == NULL) {
339          return NULL;
340        }
341        values.at_put(j, val);
342      } else if (val->Opcode() == Op_SCMemProj) {
343        assert(val->in(0)->is_LoadStore(), "sanity");
344        assert(false, "Object is not scalar replaceable if a LoadStore node access its field");
345        return NULL;
346      } else {
347#ifdef ASSERT
348        val->dump();
349        assert(false, "unknown node on this path");
350#endif
351        return NULL;  // unknown node on this path
352      }
353    }
354  }
355  // Set Phi's inputs
356  for (uint j = 1; j < length; j++) {
357    if (values.at(j) == mem) {
358      phi->init_req(j, phi);
359    } else {
360      phi->init_req(j, values.at(j));
361    }
362  }
363  return phi;
364}
365
366// Search the last value stored into the object's field.
367Node *PhaseMacroExpand::value_from_mem(Node *sfpt_mem, BasicType ft, const Type *ftype, const TypeOopPtr *adr_t, Node *alloc) {
368  assert(adr_t->is_known_instance_field(), "instance required");
369  int instance_id = adr_t->instance_id();
370  assert((uint)instance_id == alloc->_idx, "wrong allocation");
371
372  int alias_idx = C->get_alias_index(adr_t);
373  int offset = adr_t->offset();
374  Node *start_mem = C->start()->proj_out(TypeFunc::Memory);
375  Node *alloc_ctrl = alloc->in(TypeFunc::Control);
376  Node *alloc_mem = alloc->in(TypeFunc::Memory);
377  Arena *a = Thread::current()->resource_area();
378  VectorSet visited(a);
379
380
381  bool done = sfpt_mem == alloc_mem;
382  Node *mem = sfpt_mem;
383  while (!done) {
384    if (visited.test_set(mem->_idx)) {
385      return NULL;  // found a loop, give up
386    }
387    mem = scan_mem_chain(mem, alias_idx, offset, start_mem, alloc, &_igvn);
388    if (mem == start_mem || mem == alloc_mem) {
389      done = true;  // hit a sentinel, return appropriate 0 value
390    } else if (mem->is_Initialize()) {
391      mem = mem->as_Initialize()->find_captured_store(offset, type2aelembytes(ft), &_igvn);
392      if (mem == NULL) {
393        done = true; // Something go wrong.
394      } else if (mem->is_Store()) {
395        const TypePtr* atype = mem->as_Store()->adr_type();
396        assert(C->get_alias_index(atype) == Compile::AliasIdxRaw, "store is correct memory slice");
397        done = true;
398      }
399    } else if (mem->is_Store()) {
400      const TypeOopPtr* atype = mem->as_Store()->adr_type()->isa_oopptr();
401      assert(atype != NULL, "address type must be oopptr");
402      assert(C->get_alias_index(atype) == alias_idx &&
403             atype->is_known_instance_field() && atype->offset() == offset &&
404             atype->instance_id() == instance_id, "store is correct memory slice");
405      done = true;
406    } else if (mem->is_Phi()) {
407      // try to find a phi's unique input
408      Node *unique_input = NULL;
409      Node *top = C->top();
410      for (uint i = 1; i < mem->req(); i++) {
411        Node *n = scan_mem_chain(mem->in(i), alias_idx, offset, start_mem, alloc, &_igvn);
412        if (n == NULL || n == top || n == mem) {
413          continue;
414        } else if (unique_input == NULL) {
415          unique_input = n;
416        } else if (unique_input != n) {
417          unique_input = top;
418          break;
419        }
420      }
421      if (unique_input != NULL && unique_input != top) {
422        mem = unique_input;
423      } else {
424        done = true;
425      }
426    } else {
427      assert(false, "unexpected node");
428    }
429  }
430  if (mem != NULL) {
431    if (mem == start_mem || mem == alloc_mem) {
432      // hit a sentinel, return appropriate 0 value
433      return _igvn.zerocon(ft);
434    } else if (mem->is_Store()) {
435      return mem->in(MemNode::ValueIn);
436    } else if (mem->is_Phi()) {
437      // attempt to produce a Phi reflecting the values on the input paths of the Phi
438      Node_Stack value_phis(a, 8);
439      Node * phi = value_from_mem_phi(mem, ft, ftype, adr_t, alloc, &value_phis, ValueSearchLimit);
440      if (phi != NULL) {
441        return phi;
442      } else {
443        // Kill all new Phis
444        while(value_phis.is_nonempty()) {
445          Node* n = value_phis.node();
446          _igvn.hash_delete(n);
447          _igvn.subsume_node(n, C->top());
448          value_phis.pop();
449        }
450      }
451    }
452  }
453  // Something go wrong.
454  return NULL;
455}
456
457// Check the possibility of scalar replacement.
458bool PhaseMacroExpand::can_eliminate_allocation(AllocateNode *alloc, GrowableArray <SafePointNode *>& safepoints) {
459  //  Scan the uses of the allocation to check for anything that would
460  //  prevent us from eliminating it.
461  NOT_PRODUCT( const char* fail_eliminate = NULL; )
462  DEBUG_ONLY( Node* disq_node = NULL; )
463  bool  can_eliminate = true;
464
465  Node* res = alloc->result_cast();
466  const TypeOopPtr* res_type = NULL;
467  if (res == NULL) {
468    // All users were eliminated.
469  } else if (!res->is_CheckCastPP()) {
470    alloc->_is_scalar_replaceable = false;  // don't try again
471    NOT_PRODUCT(fail_eliminate = "Allocation does not have unique CheckCastPP";)
472    can_eliminate = false;
473  } else {
474    res_type = _igvn.type(res)->isa_oopptr();
475    if (res_type == NULL) {
476      NOT_PRODUCT(fail_eliminate = "Neither instance or array allocation";)
477      can_eliminate = false;
478    } else if (res_type->isa_aryptr()) {
479      int length = alloc->in(AllocateNode::ALength)->find_int_con(-1);
480      if (length < 0) {
481        NOT_PRODUCT(fail_eliminate = "Array's size is not constant";)
482        can_eliminate = false;
483      }
484    }
485  }
486
487  if (can_eliminate && res != NULL) {
488    for (DUIterator_Fast jmax, j = res->fast_outs(jmax);
489                               j < jmax && can_eliminate; j++) {
490      Node* use = res->fast_out(j);
491
492      if (use->is_AddP()) {
493        const TypePtr* addp_type = _igvn.type(use)->is_ptr();
494        int offset = addp_type->offset();
495
496        if (offset == Type::OffsetTop || offset == Type::OffsetBot) {
497          NOT_PRODUCT(fail_eliminate = "Undefined field referrence";)
498          can_eliminate = false;
499          break;
500        }
501        for (DUIterator_Fast kmax, k = use->fast_outs(kmax);
502                                   k < kmax && can_eliminate; k++) {
503          Node* n = use->fast_out(k);
504          if (!n->is_Store() && n->Opcode() != Op_CastP2X) {
505            DEBUG_ONLY(disq_node = n;)
506            if (n->is_Load() || n->is_LoadStore()) {
507              NOT_PRODUCT(fail_eliminate = "Field load";)
508            } else {
509              NOT_PRODUCT(fail_eliminate = "Not store field referrence";)
510            }
511            can_eliminate = false;
512          }
513        }
514      } else if (use->is_SafePoint()) {
515        SafePointNode* sfpt = use->as_SafePoint();
516        if (sfpt->is_Call() && sfpt->as_Call()->has_non_debug_use(res)) {
517          // Object is passed as argument.
518          DEBUG_ONLY(disq_node = use;)
519          NOT_PRODUCT(fail_eliminate = "Object is passed as argument";)
520          can_eliminate = false;
521        }
522        Node* sfptMem = sfpt->memory();
523        if (sfptMem == NULL || sfptMem->is_top()) {
524          DEBUG_ONLY(disq_node = use;)
525          NOT_PRODUCT(fail_eliminate = "NULL or TOP memory";)
526          can_eliminate = false;
527        } else {
528          safepoints.append_if_missing(sfpt);
529        }
530      } else if (use->Opcode() != Op_CastP2X) { // CastP2X is used by card mark
531        if (use->is_Phi()) {
532          if (use->outcnt() == 1 && use->unique_out()->Opcode() == Op_Return) {
533            NOT_PRODUCT(fail_eliminate = "Object is return value";)
534          } else {
535            NOT_PRODUCT(fail_eliminate = "Object is referenced by Phi";)
536          }
537          DEBUG_ONLY(disq_node = use;)
538        } else {
539          if (use->Opcode() == Op_Return) {
540            NOT_PRODUCT(fail_eliminate = "Object is return value";)
541          }else {
542            NOT_PRODUCT(fail_eliminate = "Object is referenced by node";)
543          }
544          DEBUG_ONLY(disq_node = use;)
545        }
546        can_eliminate = false;
547      }
548    }
549  }
550
551#ifndef PRODUCT
552  if (PrintEliminateAllocations) {
553    if (can_eliminate) {
554      tty->print("Scalar ");
555      if (res == NULL)
556        alloc->dump();
557      else
558        res->dump();
559    } else {
560      tty->print("NotScalar (%s)", fail_eliminate);
561      if (res == NULL)
562        alloc->dump();
563      else
564        res->dump();
565#ifdef ASSERT
566      if (disq_node != NULL) {
567          tty->print("  >>>> ");
568          disq_node->dump();
569      }
570#endif /*ASSERT*/
571    }
572  }
573#endif
574  return can_eliminate;
575}
576
577// Do scalar replacement.
578bool PhaseMacroExpand::scalar_replacement(AllocateNode *alloc, GrowableArray <SafePointNode *>& safepoints) {
579  GrowableArray <SafePointNode *> safepoints_done;
580
581  ciKlass* klass = NULL;
582  ciInstanceKlass* iklass = NULL;
583  int nfields = 0;
584  int array_base;
585  int element_size;
586  BasicType basic_elem_type;
587  ciType* elem_type;
588
589  Node* res = alloc->result_cast();
590  const TypeOopPtr* res_type = NULL;
591  if (res != NULL) { // Could be NULL when there are no users
592    res_type = _igvn.type(res)->isa_oopptr();
593  }
594
595  if (res != NULL) {
596    klass = res_type->klass();
597    if (res_type->isa_instptr()) {
598      // find the fields of the class which will be needed for safepoint debug information
599      assert(klass->is_instance_klass(), "must be an instance klass.");
600      iklass = klass->as_instance_klass();
601      nfields = iklass->nof_nonstatic_fields();
602    } else {
603      // find the array's elements which will be needed for safepoint debug information
604      nfields = alloc->in(AllocateNode::ALength)->find_int_con(-1);
605      assert(klass->is_array_klass() && nfields >= 0, "must be an array klass.");
606      elem_type = klass->as_array_klass()->element_type();
607      basic_elem_type = elem_type->basic_type();
608      array_base = arrayOopDesc::base_offset_in_bytes(basic_elem_type);
609      element_size = type2aelembytes(basic_elem_type);
610    }
611  }
612  //
613  // Process the safepoint uses
614  //
615  while (safepoints.length() > 0) {
616    SafePointNode* sfpt = safepoints.pop();
617    Node* mem = sfpt->memory();
618    uint first_ind = sfpt->req();
619    SafePointScalarObjectNode* sobj = new (C, 1) SafePointScalarObjectNode(res_type,
620#ifdef ASSERT
621                                                 alloc,
622#endif
623                                                 first_ind, nfields);
624    sobj->init_req(0, sfpt->in(TypeFunc::Control));
625    transform_later(sobj);
626
627    // Scan object's fields adding an input to the safepoint for each field.
628    for (int j = 0; j < nfields; j++) {
629      intptr_t offset;
630      ciField* field = NULL;
631      if (iklass != NULL) {
632        field = iklass->nonstatic_field_at(j);
633        offset = field->offset();
634        elem_type = field->type();
635        basic_elem_type = field->layout_type();
636      } else {
637        offset = array_base + j * (intptr_t)element_size;
638      }
639
640      const Type *field_type;
641      // The next code is taken from Parse::do_get_xxx().
642      if (basic_elem_type == T_OBJECT || basic_elem_type == T_ARRAY) {
643        if (!elem_type->is_loaded()) {
644          field_type = TypeInstPtr::BOTTOM;
645        } else if (field != NULL && field->is_constant()) {
646          // This can happen if the constant oop is non-perm.
647          ciObject* con = field->constant_value().as_object();
648          // Do not "join" in the previous type; it doesn't add value,
649          // and may yield a vacuous result if the field is of interface type.
650          field_type = TypeOopPtr::make_from_constant(con)->isa_oopptr();
651          assert(field_type != NULL, "field singleton type must be consistent");
652        } else {
653          field_type = TypeOopPtr::make_from_klass(elem_type->as_klass());
654        }
655        if (UseCompressedOops) {
656          field_type = field_type->make_narrowoop();
657          basic_elem_type = T_NARROWOOP;
658        }
659      } else {
660        field_type = Type::get_const_basic_type(basic_elem_type);
661      }
662
663      const TypeOopPtr *field_addr_type = res_type->add_offset(offset)->isa_oopptr();
664
665      Node *field_val = value_from_mem(mem, basic_elem_type, field_type, field_addr_type, alloc);
666      if (field_val == NULL) {
667        // we weren't able to find a value for this field,
668        // give up on eliminating this allocation
669        alloc->_is_scalar_replaceable = false;  // don't try again
670        // remove any extra entries we added to the safepoint
671        uint last = sfpt->req() - 1;
672        for (int k = 0;  k < j; k++) {
673          sfpt->del_req(last--);
674        }
675        // rollback processed safepoints
676        while (safepoints_done.length() > 0) {
677          SafePointNode* sfpt_done = safepoints_done.pop();
678          // remove any extra entries we added to the safepoint
679          last = sfpt_done->req() - 1;
680          for (int k = 0;  k < nfields; k++) {
681            sfpt_done->del_req(last--);
682          }
683          JVMState *jvms = sfpt_done->jvms();
684          jvms->set_endoff(sfpt_done->req());
685          // Now make a pass over the debug information replacing any references
686          // to SafePointScalarObjectNode with the allocated object.
687          int start = jvms->debug_start();
688          int end   = jvms->debug_end();
689          for (int i = start; i < end; i++) {
690            if (sfpt_done->in(i)->is_SafePointScalarObject()) {
691              SafePointScalarObjectNode* scobj = sfpt_done->in(i)->as_SafePointScalarObject();
692              if (scobj->first_index() == sfpt_done->req() &&
693                  scobj->n_fields() == (uint)nfields) {
694                assert(scobj->alloc() == alloc, "sanity");
695                sfpt_done->set_req(i, res);
696              }
697            }
698          }
699        }
700#ifndef PRODUCT
701        if (PrintEliminateAllocations) {
702          if (field != NULL) {
703            tty->print("=== At SafePoint node %d can't find value of Field: ",
704                       sfpt->_idx);
705            field->print();
706            int field_idx = C->get_alias_index(field_addr_type);
707            tty->print(" (alias_idx=%d)", field_idx);
708          } else { // Array's element
709            tty->print("=== At SafePoint node %d can't find value of array element [%d]",
710                       sfpt->_idx, j);
711          }
712          tty->print(", which prevents elimination of: ");
713          if (res == NULL)
714            alloc->dump();
715          else
716            res->dump();
717        }
718#endif
719        return false;
720      }
721      if (UseCompressedOops && field_type->isa_narrowoop()) {
722        // Enable "DecodeN(EncodeP(Allocate)) --> Allocate" transformation
723        // to be able scalar replace the allocation.
724        if (field_val->is_EncodeP()) {
725          field_val = field_val->in(1);
726        } else {
727          field_val = transform_later(new (C, 2) DecodeNNode(field_val, field_val->bottom_type()->make_ptr()));
728        }
729      }
730      sfpt->add_req(field_val);
731    }
732    JVMState *jvms = sfpt->jvms();
733    jvms->set_endoff(sfpt->req());
734    // Now make a pass over the debug information replacing any references
735    // to the allocated object with "sobj"
736    int start = jvms->debug_start();
737    int end   = jvms->debug_end();
738    for (int i = start; i < end; i++) {
739      if (sfpt->in(i) == res) {
740        sfpt->set_req(i, sobj);
741      }
742    }
743    safepoints_done.append_if_missing(sfpt); // keep it for rollback
744  }
745  return true;
746}
747
748// Process users of eliminated allocation.
749void PhaseMacroExpand::process_users_of_allocation(AllocateNode *alloc) {
750  Node* res = alloc->result_cast();
751  if (res != NULL) {
752    for (DUIterator_Last jmin, j = res->last_outs(jmin); j >= jmin; ) {
753      Node *use = res->last_out(j);
754      uint oc1 = res->outcnt();
755
756      if (use->is_AddP()) {
757        for (DUIterator_Last kmin, k = use->last_outs(kmin); k >= kmin; ) {
758          Node *n = use->last_out(k);
759          uint oc2 = use->outcnt();
760          if (n->is_Store()) {
761            _igvn.replace_node(n, n->in(MemNode::Memory));
762          } else {
763            assert( n->Opcode() == Op_CastP2X, "CastP2X required");
764            eliminate_card_mark(n);
765          }
766          k -= (oc2 - use->outcnt());
767        }
768      } else {
769        assert( !use->is_SafePoint(), "safepoint uses must have been already elimiated");
770        assert( use->Opcode() == Op_CastP2X, "CastP2X required");
771        eliminate_card_mark(use);
772      }
773      j -= (oc1 - res->outcnt());
774    }
775    assert(res->outcnt() == 0, "all uses of allocated objects must be deleted");
776    _igvn.remove_dead_node(res);
777  }
778
779  //
780  // Process other users of allocation's projections
781  //
782  if (_resproj != NULL && _resproj->outcnt() != 0) {
783    for (DUIterator_Last jmin, j = _resproj->last_outs(jmin); j >= jmin; ) {
784      Node *use = _resproj->last_out(j);
785      uint oc1 = _resproj->outcnt();
786      if (use->is_Initialize()) {
787        // Eliminate Initialize node.
788        InitializeNode *init = use->as_Initialize();
789        assert(init->outcnt() <= 2, "only a control and memory projection expected");
790        Node *ctrl_proj = init->proj_out(TypeFunc::Control);
791        if (ctrl_proj != NULL) {
792           assert(init->in(TypeFunc::Control) == _fallthroughcatchproj, "allocation control projection");
793          _igvn.replace_node(ctrl_proj, _fallthroughcatchproj);
794        }
795        Node *mem_proj = init->proj_out(TypeFunc::Memory);
796        if (mem_proj != NULL) {
797          Node *mem = init->in(TypeFunc::Memory);
798#ifdef ASSERT
799          if (mem->is_MergeMem()) {
800            assert(mem->in(TypeFunc::Memory) == _memproj_fallthrough, "allocation memory projection");
801          } else {
802            assert(mem == _memproj_fallthrough, "allocation memory projection");
803          }
804#endif
805          _igvn.replace_node(mem_proj, mem);
806        }
807      } else if (use->is_AddP()) {
808        // raw memory addresses used only by the initialization
809        _igvn.hash_delete(use);
810        _igvn.subsume_node(use, C->top());
811      } else  {
812        assert(false, "only Initialize or AddP expected");
813      }
814      j -= (oc1 - _resproj->outcnt());
815    }
816  }
817  if (_fallthroughcatchproj != NULL) {
818    _igvn.replace_node(_fallthroughcatchproj, alloc->in(TypeFunc::Control));
819  }
820  if (_memproj_fallthrough != NULL) {
821    _igvn.replace_node(_memproj_fallthrough, alloc->in(TypeFunc::Memory));
822  }
823  if (_memproj_catchall != NULL) {
824    _igvn.replace_node(_memproj_catchall, C->top());
825  }
826  if (_ioproj_fallthrough != NULL) {
827    _igvn.replace_node(_ioproj_fallthrough, alloc->in(TypeFunc::I_O));
828  }
829  if (_ioproj_catchall != NULL) {
830    _igvn.replace_node(_ioproj_catchall, C->top());
831  }
832  if (_catchallcatchproj != NULL) {
833    _igvn.replace_node(_catchallcatchproj, C->top());
834  }
835}
836
837bool PhaseMacroExpand::eliminate_allocate_node(AllocateNode *alloc) {
838
839  if (!EliminateAllocations || !alloc->_is_scalar_replaceable) {
840    return false;
841  }
842
843  extract_call_projections(alloc);
844
845  GrowableArray <SafePointNode *> safepoints;
846  if (!can_eliminate_allocation(alloc, safepoints)) {
847    return false;
848  }
849
850  if (!scalar_replacement(alloc, safepoints)) {
851    return false;
852  }
853
854  process_users_of_allocation(alloc);
855
856#ifndef PRODUCT
857if (PrintEliminateAllocations) {
858  if (alloc->is_AllocateArray())
859    tty->print_cr("++++ Eliminated: %d AllocateArray", alloc->_idx);
860  else
861    tty->print_cr("++++ Eliminated: %d Allocate", alloc->_idx);
862}
863#endif
864
865  return true;
866}
867
868
869//---------------------------set_eden_pointers-------------------------
870void PhaseMacroExpand::set_eden_pointers(Node* &eden_top_adr, Node* &eden_end_adr) {
871  if (UseTLAB) {                // Private allocation: load from TLS
872    Node* thread = transform_later(new (C, 1) ThreadLocalNode());
873    int tlab_top_offset = in_bytes(JavaThread::tlab_top_offset());
874    int tlab_end_offset = in_bytes(JavaThread::tlab_end_offset());
875    eden_top_adr = basic_plus_adr(top()/*not oop*/, thread, tlab_top_offset);
876    eden_end_adr = basic_plus_adr(top()/*not oop*/, thread, tlab_end_offset);
877  } else {                      // Shared allocation: load from globals
878    CollectedHeap* ch = Universe::heap();
879    address top_adr = (address)ch->top_addr();
880    address end_adr = (address)ch->end_addr();
881    eden_top_adr = makecon(TypeRawPtr::make(top_adr));
882    eden_end_adr = basic_plus_adr(eden_top_adr, end_adr - top_adr);
883  }
884}
885
886
887Node* PhaseMacroExpand::make_load(Node* ctl, Node* mem, Node* base, int offset, const Type* value_type, BasicType bt) {
888  Node* adr = basic_plus_adr(base, offset);
889  const TypePtr* adr_type = adr->bottom_type()->is_ptr();
890  Node* value = LoadNode::make(_igvn, ctl, mem, adr, adr_type, value_type, bt);
891  transform_later(value);
892  return value;
893}
894
895
896Node* PhaseMacroExpand::make_store(Node* ctl, Node* mem, Node* base, int offset, Node* value, BasicType bt) {
897  Node* adr = basic_plus_adr(base, offset);
898  mem = StoreNode::make(_igvn, ctl, mem, adr, NULL, value, bt);
899  transform_later(mem);
900  return mem;
901}
902
903//=============================================================================
904//
905//                              A L L O C A T I O N
906//
907// Allocation attempts to be fast in the case of frequent small objects.
908// It breaks down like this:
909//
910// 1) Size in doublewords is computed.  This is a constant for objects and
911// variable for most arrays.  Doubleword units are used to avoid size
912// overflow of huge doubleword arrays.  We need doublewords in the end for
913// rounding.
914//
915// 2) Size is checked for being 'too large'.  Too-large allocations will go
916// the slow path into the VM.  The slow path can throw any required
917// exceptions, and does all the special checks for very large arrays.  The
918// size test can constant-fold away for objects.  For objects with
919// finalizers it constant-folds the otherway: you always go slow with
920// finalizers.
921//
922// 3) If NOT using TLABs, this is the contended loop-back point.
923// Load-Locked the heap top.  If using TLABs normal-load the heap top.
924//
925// 4) Check that heap top + size*8 < max.  If we fail go the slow ` route.
926// NOTE: "top+size*8" cannot wrap the 4Gig line!  Here's why: for largish
927// "size*8" we always enter the VM, where "largish" is a constant picked small
928// enough that there's always space between the eden max and 4Gig (old space is
929// there so it's quite large) and large enough that the cost of entering the VM
930// is dwarfed by the cost to initialize the space.
931//
932// 5) If NOT using TLABs, Store-Conditional the adjusted heap top back
933// down.  If contended, repeat at step 3.  If using TLABs normal-store
934// adjusted heap top back down; there is no contention.
935//
936// 6) If !ZeroTLAB then Bulk-clear the object/array.  Fill in klass & mark
937// fields.
938//
939// 7) Merge with the slow-path; cast the raw memory pointer to the correct
940// oop flavor.
941//
942//=============================================================================
943// FastAllocateSizeLimit value is in DOUBLEWORDS.
944// Allocations bigger than this always go the slow route.
945// This value must be small enough that allocation attempts that need to
946// trigger exceptions go the slow route.  Also, it must be small enough so
947// that heap_top + size_in_bytes does not wrap around the 4Gig limit.
948//=============================================================================j//
949// %%% Here is an old comment from parseHelper.cpp; is it outdated?
950// The allocator will coalesce int->oop copies away.  See comment in
951// coalesce.cpp about how this works.  It depends critically on the exact
952// code shape produced here, so if you are changing this code shape
953// make sure the GC info for the heap-top is correct in and around the
954// slow-path call.
955//
956
957void PhaseMacroExpand::expand_allocate_common(
958            AllocateNode* alloc, // allocation node to be expanded
959            Node* length,  // array length for an array allocation
960            const TypeFunc* slow_call_type, // Type of slow call
961            address slow_call_address  // Address of slow call
962    )
963{
964
965  Node* ctrl = alloc->in(TypeFunc::Control);
966  Node* mem  = alloc->in(TypeFunc::Memory);
967  Node* i_o  = alloc->in(TypeFunc::I_O);
968  Node* size_in_bytes     = alloc->in(AllocateNode::AllocSize);
969  Node* klass_node        = alloc->in(AllocateNode::KlassNode);
970  Node* initial_slow_test = alloc->in(AllocateNode::InitialTest);
971
972  assert(ctrl != NULL, "must have control");
973  // We need a Region and corresponding Phi's to merge the slow-path and fast-path results.
974  // they will not be used if "always_slow" is set
975  enum { slow_result_path = 1, fast_result_path = 2 };
976  Node *result_region;
977  Node *result_phi_rawmem;
978  Node *result_phi_rawoop;
979  Node *result_phi_i_o;
980
981  // The initial slow comparison is a size check, the comparison
982  // we want to do is a BoolTest::gt
983  bool always_slow = false;
984  int tv = _igvn.find_int_con(initial_slow_test, -1);
985  if (tv >= 0) {
986    always_slow = (tv == 1);
987    initial_slow_test = NULL;
988  } else {
989    initial_slow_test = BoolNode::make_predicate(initial_slow_test, &_igvn);
990  }
991
992  if (DTraceAllocProbes ||
993      !UseTLAB && (!Universe::heap()->supports_inline_contig_alloc() ||
994                   (UseConcMarkSweepGC && CMSIncrementalMode))) {
995    // Force slow-path allocation
996    always_slow = true;
997    initial_slow_test = NULL;
998  }
999
1000
1001  enum { too_big_or_final_path = 1, need_gc_path = 2 };
1002  Node *slow_region = NULL;
1003  Node *toobig_false = ctrl;
1004
1005  assert (initial_slow_test == NULL || !always_slow, "arguments must be consistent");
1006  // generate the initial test if necessary
1007  if (initial_slow_test != NULL ) {
1008    slow_region = new (C, 3) RegionNode(3);
1009
1010    // Now make the initial failure test.  Usually a too-big test but
1011    // might be a TRUE for finalizers or a fancy class check for
1012    // newInstance0.
1013    IfNode *toobig_iff = new (C, 2) IfNode(ctrl, initial_slow_test, PROB_MIN, COUNT_UNKNOWN);
1014    transform_later(toobig_iff);
1015    // Plug the failing-too-big test into the slow-path region
1016    Node *toobig_true = new (C, 1) IfTrueNode( toobig_iff );
1017    transform_later(toobig_true);
1018    slow_region    ->init_req( too_big_or_final_path, toobig_true );
1019    toobig_false = new (C, 1) IfFalseNode( toobig_iff );
1020    transform_later(toobig_false);
1021  } else {         // No initial test, just fall into next case
1022    toobig_false = ctrl;
1023    debug_only(slow_region = NodeSentinel);
1024  }
1025
1026  Node *slow_mem = mem;  // save the current memory state for slow path
1027  // generate the fast allocation code unless we know that the initial test will always go slow
1028  if (!always_slow) {
1029    // Fast path modifies only raw memory.
1030    if (mem->is_MergeMem()) {
1031      mem = mem->as_MergeMem()->memory_at(Compile::AliasIdxRaw);
1032    }
1033
1034    Node* eden_top_adr;
1035    Node* eden_end_adr;
1036
1037    set_eden_pointers(eden_top_adr, eden_end_adr);
1038
1039    // Load Eden::end.  Loop invariant and hoisted.
1040    //
1041    // Note: We set the control input on "eden_end" and "old_eden_top" when using
1042    //       a TLAB to work around a bug where these values were being moved across
1043    //       a safepoint.  These are not oops, so they cannot be include in the oop
1044    //       map, but the can be changed by a GC.   The proper way to fix this would
1045    //       be to set the raw memory state when generating a  SafepointNode.  However
1046    //       this will require extensive changes to the loop optimization in order to
1047    //       prevent a degradation of the optimization.
1048    //       See comment in memnode.hpp, around line 227 in class LoadPNode.
1049    Node *eden_end = make_load(ctrl, mem, eden_end_adr, 0, TypeRawPtr::BOTTOM, T_ADDRESS);
1050
1051    // allocate the Region and Phi nodes for the result
1052    result_region = new (C, 3) RegionNode(3);
1053    result_phi_rawmem = new (C, 3) PhiNode( result_region, Type::MEMORY, TypeRawPtr::BOTTOM );
1054    result_phi_rawoop = new (C, 3) PhiNode( result_region, TypeRawPtr::BOTTOM );
1055    result_phi_i_o    = new (C, 3) PhiNode( result_region, Type::ABIO ); // I/O is used for Prefetch
1056
1057    // We need a Region for the loop-back contended case.
1058    enum { fall_in_path = 1, contended_loopback_path = 2 };
1059    Node *contended_region;
1060    Node *contended_phi_rawmem;
1061    if( UseTLAB ) {
1062      contended_region = toobig_false;
1063      contended_phi_rawmem = mem;
1064    } else {
1065      contended_region = new (C, 3) RegionNode(3);
1066      contended_phi_rawmem = new (C, 3) PhiNode( contended_region, Type::MEMORY, TypeRawPtr::BOTTOM);
1067      // Now handle the passing-too-big test.  We fall into the contended
1068      // loop-back merge point.
1069      contended_region    ->init_req( fall_in_path, toobig_false );
1070      contended_phi_rawmem->init_req( fall_in_path, mem );
1071      transform_later(contended_region);
1072      transform_later(contended_phi_rawmem);
1073    }
1074
1075    // Load(-locked) the heap top.
1076    // See note above concerning the control input when using a TLAB
1077    Node *old_eden_top = UseTLAB
1078      ? new (C, 3) LoadPNode     ( ctrl, contended_phi_rawmem, eden_top_adr, TypeRawPtr::BOTTOM, TypeRawPtr::BOTTOM )
1079      : new (C, 3) LoadPLockedNode( contended_region, contended_phi_rawmem, eden_top_adr );
1080
1081    transform_later(old_eden_top);
1082    // Add to heap top to get a new heap top
1083    Node *new_eden_top = new (C, 4) AddPNode( top(), old_eden_top, size_in_bytes );
1084    transform_later(new_eden_top);
1085    // Check for needing a GC; compare against heap end
1086    Node *needgc_cmp = new (C, 3) CmpPNode( new_eden_top, eden_end );
1087    transform_later(needgc_cmp);
1088    Node *needgc_bol = new (C, 2) BoolNode( needgc_cmp, BoolTest::ge );
1089    transform_later(needgc_bol);
1090    IfNode *needgc_iff = new (C, 2) IfNode(contended_region, needgc_bol, PROB_UNLIKELY_MAG(4), COUNT_UNKNOWN );
1091    transform_later(needgc_iff);
1092
1093    // Plug the failing-heap-space-need-gc test into the slow-path region
1094    Node *needgc_true = new (C, 1) IfTrueNode( needgc_iff );
1095    transform_later(needgc_true);
1096    if( initial_slow_test ) {
1097      slow_region    ->init_req( need_gc_path, needgc_true );
1098      // This completes all paths into the slow merge point
1099      transform_later(slow_region);
1100    } else {                      // No initial slow path needed!
1101      // Just fall from the need-GC path straight into the VM call.
1102      slow_region    = needgc_true;
1103    }
1104    // No need for a GC.  Setup for the Store-Conditional
1105    Node *needgc_false = new (C, 1) IfFalseNode( needgc_iff );
1106    transform_later(needgc_false);
1107
1108    // Grab regular I/O before optional prefetch may change it.
1109    // Slow-path does no I/O so just set it to the original I/O.
1110    result_phi_i_o->init_req( slow_result_path, i_o );
1111
1112    i_o = prefetch_allocation(i_o, needgc_false, contended_phi_rawmem,
1113                              old_eden_top, new_eden_top, length);
1114
1115    // Store (-conditional) the modified eden top back down.
1116    // StorePConditional produces flags for a test PLUS a modified raw
1117    // memory state.
1118    Node *store_eden_top;
1119    Node *fast_oop_ctrl;
1120    if( UseTLAB ) {
1121      store_eden_top = new (C, 4) StorePNode( needgc_false, contended_phi_rawmem, eden_top_adr, TypeRawPtr::BOTTOM, new_eden_top );
1122      transform_later(store_eden_top);
1123      fast_oop_ctrl = needgc_false; // No contention, so this is the fast path
1124    } else {
1125      store_eden_top = new (C, 5) StorePConditionalNode( needgc_false, contended_phi_rawmem, eden_top_adr, new_eden_top, old_eden_top );
1126      transform_later(store_eden_top);
1127      Node *contention_check = new (C, 2) BoolNode( store_eden_top, BoolTest::ne );
1128      transform_later(contention_check);
1129      store_eden_top = new (C, 1) SCMemProjNode(store_eden_top);
1130      transform_later(store_eden_top);
1131
1132      // If not using TLABs, check to see if there was contention.
1133      IfNode *contention_iff = new (C, 2) IfNode ( needgc_false, contention_check, PROB_MIN, COUNT_UNKNOWN );
1134      transform_later(contention_iff);
1135      Node *contention_true = new (C, 1) IfTrueNode( contention_iff );
1136      transform_later(contention_true);
1137      // If contention, loopback and try again.
1138      contended_region->init_req( contended_loopback_path, contention_true );
1139      contended_phi_rawmem->init_req( contended_loopback_path, store_eden_top );
1140
1141      // Fast-path succeeded with no contention!
1142      Node *contention_false = new (C, 1) IfFalseNode( contention_iff );
1143      transform_later(contention_false);
1144      fast_oop_ctrl = contention_false;
1145    }
1146
1147    // Rename successful fast-path variables to make meaning more obvious
1148    Node* fast_oop        = old_eden_top;
1149    Node* fast_oop_rawmem = store_eden_top;
1150    fast_oop_rawmem = initialize_object(alloc,
1151                                        fast_oop_ctrl, fast_oop_rawmem, fast_oop,
1152                                        klass_node, length, size_in_bytes);
1153
1154    if (ExtendedDTraceProbes) {
1155      // Slow-path call
1156      int size = TypeFunc::Parms + 2;
1157      CallLeafNode *call = new (C, size) CallLeafNode(OptoRuntime::dtrace_object_alloc_Type(),
1158                                                      CAST_FROM_FN_PTR(address, SharedRuntime::dtrace_object_alloc_base),
1159                                                      "dtrace_object_alloc",
1160                                                      TypeRawPtr::BOTTOM);
1161
1162      // Get base of thread-local storage area
1163      Node* thread = new (C, 1) ThreadLocalNode();
1164      transform_later(thread);
1165
1166      call->init_req(TypeFunc::Parms+0, thread);
1167      call->init_req(TypeFunc::Parms+1, fast_oop);
1168      call->init_req( TypeFunc::Control, fast_oop_ctrl );
1169      call->init_req( TypeFunc::I_O    , top() )        ;   // does no i/o
1170      call->init_req( TypeFunc::Memory , fast_oop_rawmem );
1171      call->init_req( TypeFunc::ReturnAdr, alloc->in(TypeFunc::ReturnAdr) );
1172      call->init_req( TypeFunc::FramePtr, alloc->in(TypeFunc::FramePtr) );
1173      transform_later(call);
1174      fast_oop_ctrl = new (C, 1) ProjNode(call,TypeFunc::Control);
1175      transform_later(fast_oop_ctrl);
1176      fast_oop_rawmem = new (C, 1) ProjNode(call,TypeFunc::Memory);
1177      transform_later(fast_oop_rawmem);
1178    }
1179
1180    // Plug in the successful fast-path into the result merge point
1181    result_region    ->init_req( fast_result_path, fast_oop_ctrl );
1182    result_phi_rawoop->init_req( fast_result_path, fast_oop );
1183    result_phi_i_o   ->init_req( fast_result_path, i_o );
1184    result_phi_rawmem->init_req( fast_result_path, fast_oop_rawmem );
1185  } else {
1186    slow_region = ctrl;
1187  }
1188
1189  // Generate slow-path call
1190  CallNode *call = new (C, slow_call_type->domain()->cnt())
1191    CallStaticJavaNode(slow_call_type, slow_call_address,
1192                       OptoRuntime::stub_name(slow_call_address),
1193                       alloc->jvms()->bci(),
1194                       TypePtr::BOTTOM);
1195  call->init_req( TypeFunc::Control, slow_region );
1196  call->init_req( TypeFunc::I_O    , top() )     ;   // does no i/o
1197  call->init_req( TypeFunc::Memory , slow_mem ); // may gc ptrs
1198  call->init_req( TypeFunc::ReturnAdr, alloc->in(TypeFunc::ReturnAdr) );
1199  call->init_req( TypeFunc::FramePtr, alloc->in(TypeFunc::FramePtr) );
1200
1201  call->init_req(TypeFunc::Parms+0, klass_node);
1202  if (length != NULL) {
1203    call->init_req(TypeFunc::Parms+1, length);
1204  }
1205
1206  // Copy debug information and adjust JVMState information, then replace
1207  // allocate node with the call
1208  copy_call_debug_info((CallNode *) alloc,  call);
1209  if (!always_slow) {
1210    call->set_cnt(PROB_UNLIKELY_MAG(4));  // Same effect as RC_UNCOMMON.
1211  }
1212  _igvn.hash_delete(alloc);
1213  _igvn.subsume_node(alloc, call);
1214  transform_later(call);
1215
1216  // Identify the output projections from the allocate node and
1217  // adjust any references to them.
1218  // The control and io projections look like:
1219  //
1220  //        v---Proj(ctrl) <-----+   v---CatchProj(ctrl)
1221  //  Allocate                   Catch
1222  //        ^---Proj(io) <-------+   ^---CatchProj(io)
1223  //
1224  //  We are interested in the CatchProj nodes.
1225  //
1226  extract_call_projections(call);
1227
1228  // An allocate node has separate memory projections for the uses on the control and i_o paths
1229  // Replace uses of the control memory projection with result_phi_rawmem (unless we are only generating a slow call)
1230  if (!always_slow && _memproj_fallthrough != NULL) {
1231    for (DUIterator_Fast imax, i = _memproj_fallthrough->fast_outs(imax); i < imax; i++) {
1232      Node *use = _memproj_fallthrough->fast_out(i);
1233      _igvn.hash_delete(use);
1234      imax -= replace_input(use, _memproj_fallthrough, result_phi_rawmem);
1235      _igvn._worklist.push(use);
1236      // back up iterator
1237      --i;
1238    }
1239  }
1240  // Now change uses of _memproj_catchall to use _memproj_fallthrough and delete _memproj_catchall so
1241  // we end up with a call that has only 1 memory projection
1242  if (_memproj_catchall != NULL ) {
1243    if (_memproj_fallthrough == NULL) {
1244      _memproj_fallthrough = new (C, 1) ProjNode(call, TypeFunc::Memory);
1245      transform_later(_memproj_fallthrough);
1246    }
1247    for (DUIterator_Fast imax, i = _memproj_catchall->fast_outs(imax); i < imax; i++) {
1248      Node *use = _memproj_catchall->fast_out(i);
1249      _igvn.hash_delete(use);
1250      imax -= replace_input(use, _memproj_catchall, _memproj_fallthrough);
1251      _igvn._worklist.push(use);
1252      // back up iterator
1253      --i;
1254    }
1255  }
1256
1257  // An allocate node has separate i_o projections for the uses on the control and i_o paths
1258  // Replace uses of the control i_o projection with result_phi_i_o (unless we are only generating a slow call)
1259  if (_ioproj_fallthrough == NULL) {
1260    _ioproj_fallthrough = new (C, 1) ProjNode(call, TypeFunc::I_O);
1261    transform_later(_ioproj_fallthrough);
1262  } else if (!always_slow) {
1263    for (DUIterator_Fast imax, i = _ioproj_fallthrough->fast_outs(imax); i < imax; i++) {
1264      Node *use = _ioproj_fallthrough->fast_out(i);
1265
1266      _igvn.hash_delete(use);
1267      imax -= replace_input(use, _ioproj_fallthrough, result_phi_i_o);
1268      _igvn._worklist.push(use);
1269      // back up iterator
1270      --i;
1271    }
1272  }
1273  // Now change uses of _ioproj_catchall to use _ioproj_fallthrough and delete _ioproj_catchall so
1274  // we end up with a call that has only 1 control projection
1275  if (_ioproj_catchall != NULL ) {
1276    for (DUIterator_Fast imax, i = _ioproj_catchall->fast_outs(imax); i < imax; i++) {
1277      Node *use = _ioproj_catchall->fast_out(i);
1278      _igvn.hash_delete(use);
1279      imax -= replace_input(use, _ioproj_catchall, _ioproj_fallthrough);
1280      _igvn._worklist.push(use);
1281      // back up iterator
1282      --i;
1283    }
1284  }
1285
1286  // if we generated only a slow call, we are done
1287  if (always_slow)
1288    return;
1289
1290
1291  if (_fallthroughcatchproj != NULL) {
1292    ctrl = _fallthroughcatchproj->clone();
1293    transform_later(ctrl);
1294    _igvn.hash_delete(_fallthroughcatchproj);
1295    _igvn.subsume_node(_fallthroughcatchproj, result_region);
1296  } else {
1297    ctrl = top();
1298  }
1299  Node *slow_result;
1300  if (_resproj == NULL) {
1301    // no uses of the allocation result
1302    slow_result = top();
1303  } else {
1304    slow_result = _resproj->clone();
1305    transform_later(slow_result);
1306    _igvn.hash_delete(_resproj);
1307    _igvn.subsume_node(_resproj, result_phi_rawoop);
1308  }
1309
1310  // Plug slow-path into result merge point
1311  result_region    ->init_req( slow_result_path, ctrl );
1312  result_phi_rawoop->init_req( slow_result_path, slow_result);
1313  result_phi_rawmem->init_req( slow_result_path, _memproj_fallthrough );
1314  transform_later(result_region);
1315  transform_later(result_phi_rawoop);
1316  transform_later(result_phi_rawmem);
1317  transform_later(result_phi_i_o);
1318  // This completes all paths into the result merge point
1319}
1320
1321
1322// Helper for PhaseMacroExpand::expand_allocate_common.
1323// Initializes the newly-allocated storage.
1324Node*
1325PhaseMacroExpand::initialize_object(AllocateNode* alloc,
1326                                    Node* control, Node* rawmem, Node* object,
1327                                    Node* klass_node, Node* length,
1328                                    Node* size_in_bytes) {
1329  InitializeNode* init = alloc->initialization();
1330  // Store the klass & mark bits
1331  Node* mark_node = NULL;
1332  // For now only enable fast locking for non-array types
1333  if (UseBiasedLocking && (length == NULL)) {
1334    mark_node = make_load(NULL, rawmem, klass_node, Klass::prototype_header_offset_in_bytes() + sizeof(oopDesc), TypeRawPtr::BOTTOM, T_ADDRESS);
1335  } else {
1336    mark_node = makecon(TypeRawPtr::make((address)markOopDesc::prototype()));
1337  }
1338  rawmem = make_store(control, rawmem, object, oopDesc::mark_offset_in_bytes(), mark_node, T_ADDRESS);
1339
1340  rawmem = make_store(control, rawmem, object, oopDesc::klass_offset_in_bytes(), klass_node, T_OBJECT);
1341  int header_size = alloc->minimum_header_size();  // conservatively small
1342
1343  // Array length
1344  if (length != NULL) {         // Arrays need length field
1345    rawmem = make_store(control, rawmem, object, arrayOopDesc::length_offset_in_bytes(), length, T_INT);
1346    // conservatively small header size:
1347    header_size = arrayOopDesc::base_offset_in_bytes(T_BYTE);
1348    ciKlass* k = _igvn.type(klass_node)->is_klassptr()->klass();
1349    if (k->is_array_klass())    // we know the exact header size in most cases:
1350      header_size = Klass::layout_helper_header_size(k->layout_helper());
1351  }
1352
1353  // Clear the object body, if necessary.
1354  if (init == NULL) {
1355    // The init has somehow disappeared; be cautious and clear everything.
1356    //
1357    // This can happen if a node is allocated but an uncommon trap occurs
1358    // immediately.  In this case, the Initialize gets associated with the
1359    // trap, and may be placed in a different (outer) loop, if the Allocate
1360    // is in a loop.  If (this is rare) the inner loop gets unrolled, then
1361    // there can be two Allocates to one Initialize.  The answer in all these
1362    // edge cases is safety first.  It is always safe to clear immediately
1363    // within an Allocate, and then (maybe or maybe not) clear some more later.
1364    if (!ZeroTLAB)
1365      rawmem = ClearArrayNode::clear_memory(control, rawmem, object,
1366                                            header_size, size_in_bytes,
1367                                            &_igvn);
1368  } else {
1369    if (!init->is_complete()) {
1370      // Try to win by zeroing only what the init does not store.
1371      // We can also try to do some peephole optimizations,
1372      // such as combining some adjacent subword stores.
1373      rawmem = init->complete_stores(control, rawmem, object,
1374                                     header_size, size_in_bytes, &_igvn);
1375    }
1376    // We have no more use for this link, since the AllocateNode goes away:
1377    init->set_req(InitializeNode::RawAddress, top());
1378    // (If we keep the link, it just confuses the register allocator,
1379    // who thinks he sees a real use of the address by the membar.)
1380  }
1381
1382  return rawmem;
1383}
1384
1385// Generate prefetch instructions for next allocations.
1386Node* PhaseMacroExpand::prefetch_allocation(Node* i_o, Node*& needgc_false,
1387                                        Node*& contended_phi_rawmem,
1388                                        Node* old_eden_top, Node* new_eden_top,
1389                                        Node* length) {
1390   if( UseTLAB && AllocatePrefetchStyle == 2 ) {
1391      // Generate prefetch allocation with watermark check.
1392      // As an allocation hits the watermark, we will prefetch starting
1393      // at a "distance" away from watermark.
1394      enum { fall_in_path = 1, pf_path = 2 };
1395
1396      Node *pf_region = new (C, 3) RegionNode(3);
1397      Node *pf_phi_rawmem = new (C, 3) PhiNode( pf_region, Type::MEMORY,
1398                                                TypeRawPtr::BOTTOM );
1399      // I/O is used for Prefetch
1400      Node *pf_phi_abio = new (C, 3) PhiNode( pf_region, Type::ABIO );
1401
1402      Node *thread = new (C, 1) ThreadLocalNode();
1403      transform_later(thread);
1404
1405      Node *eden_pf_adr = new (C, 4) AddPNode( top()/*not oop*/, thread,
1406                   _igvn.MakeConX(in_bytes(JavaThread::tlab_pf_top_offset())) );
1407      transform_later(eden_pf_adr);
1408
1409      Node *old_pf_wm = new (C, 3) LoadPNode( needgc_false,
1410                                   contended_phi_rawmem, eden_pf_adr,
1411                                   TypeRawPtr::BOTTOM, TypeRawPtr::BOTTOM );
1412      transform_later(old_pf_wm);
1413
1414      // check against new_eden_top
1415      Node *need_pf_cmp = new (C, 3) CmpPNode( new_eden_top, old_pf_wm );
1416      transform_later(need_pf_cmp);
1417      Node *need_pf_bol = new (C, 2) BoolNode( need_pf_cmp, BoolTest::ge );
1418      transform_later(need_pf_bol);
1419      IfNode *need_pf_iff = new (C, 2) IfNode( needgc_false, need_pf_bol,
1420                                       PROB_UNLIKELY_MAG(4), COUNT_UNKNOWN );
1421      transform_later(need_pf_iff);
1422
1423      // true node, add prefetchdistance
1424      Node *need_pf_true = new (C, 1) IfTrueNode( need_pf_iff );
1425      transform_later(need_pf_true);
1426
1427      Node *need_pf_false = new (C, 1) IfFalseNode( need_pf_iff );
1428      transform_later(need_pf_false);
1429
1430      Node *new_pf_wmt = new (C, 4) AddPNode( top(), old_pf_wm,
1431                                    _igvn.MakeConX(AllocatePrefetchDistance) );
1432      transform_later(new_pf_wmt );
1433      new_pf_wmt->set_req(0, need_pf_true);
1434
1435      Node *store_new_wmt = new (C, 4) StorePNode( need_pf_true,
1436                                       contended_phi_rawmem, eden_pf_adr,
1437                                       TypeRawPtr::BOTTOM, new_pf_wmt );
1438      transform_later(store_new_wmt);
1439
1440      // adding prefetches
1441      pf_phi_abio->init_req( fall_in_path, i_o );
1442
1443      Node *prefetch_adr;
1444      Node *prefetch;
1445      uint lines = AllocatePrefetchDistance / AllocatePrefetchStepSize;
1446      uint step_size = AllocatePrefetchStepSize;
1447      uint distance = 0;
1448
1449      for ( uint i = 0; i < lines; i++ ) {
1450        prefetch_adr = new (C, 4) AddPNode( old_pf_wm, new_pf_wmt,
1451                                            _igvn.MakeConX(distance) );
1452        transform_later(prefetch_adr);
1453        prefetch = new (C, 3) PrefetchWriteNode( i_o, prefetch_adr );
1454        transform_later(prefetch);
1455        distance += step_size;
1456        i_o = prefetch;
1457      }
1458      pf_phi_abio->set_req( pf_path, i_o );
1459
1460      pf_region->init_req( fall_in_path, need_pf_false );
1461      pf_region->init_req( pf_path, need_pf_true );
1462
1463      pf_phi_rawmem->init_req( fall_in_path, contended_phi_rawmem );
1464      pf_phi_rawmem->init_req( pf_path, store_new_wmt );
1465
1466      transform_later(pf_region);
1467      transform_later(pf_phi_rawmem);
1468      transform_later(pf_phi_abio);
1469
1470      needgc_false = pf_region;
1471      contended_phi_rawmem = pf_phi_rawmem;
1472      i_o = pf_phi_abio;
1473   } else if( AllocatePrefetchStyle > 0 ) {
1474      // Insert a prefetch for each allocation only on the fast-path
1475      Node *prefetch_adr;
1476      Node *prefetch;
1477      // Generate several prefetch instructions only for arrays.
1478      uint lines = (length != NULL) ? AllocatePrefetchLines : 1;
1479      uint step_size = AllocatePrefetchStepSize;
1480      uint distance = AllocatePrefetchDistance;
1481      for ( uint i = 0; i < lines; i++ ) {
1482        prefetch_adr = new (C, 4) AddPNode( old_eden_top, new_eden_top,
1483                                            _igvn.MakeConX(distance) );
1484        transform_later(prefetch_adr);
1485        prefetch = new (C, 3) PrefetchWriteNode( i_o, prefetch_adr );
1486        // Do not let it float too high, since if eden_top == eden_end,
1487        // both might be null.
1488        if( i == 0 ) { // Set control for first prefetch, next follows it
1489          prefetch->init_req(0, needgc_false);
1490        }
1491        transform_later(prefetch);
1492        distance += step_size;
1493        i_o = prefetch;
1494      }
1495   }
1496   return i_o;
1497}
1498
1499
1500void PhaseMacroExpand::expand_allocate(AllocateNode *alloc) {
1501  expand_allocate_common(alloc, NULL,
1502                         OptoRuntime::new_instance_Type(),
1503                         OptoRuntime::new_instance_Java());
1504}
1505
1506void PhaseMacroExpand::expand_allocate_array(AllocateArrayNode *alloc) {
1507  Node* length = alloc->in(AllocateNode::ALength);
1508  expand_allocate_common(alloc, length,
1509                         OptoRuntime::new_array_Type(),
1510                         OptoRuntime::new_array_Java());
1511}
1512
1513
1514// we have determined that this lock/unlock can be eliminated, we simply
1515// eliminate the node without expanding it.
1516//
1517// Note:  The membar's associated with the lock/unlock are currently not
1518//        eliminated.  This should be investigated as a future enhancement.
1519//
1520bool PhaseMacroExpand::eliminate_locking_node(AbstractLockNode *alock) {
1521
1522  if (!alock->is_eliminated()) {
1523    return false;
1524  }
1525  if (alock->is_Lock() && !alock->is_coarsened()) {
1526      // Create new "eliminated" BoxLock node and use it
1527      // in monitor debug info for the same object.
1528      BoxLockNode* oldbox = alock->box_node()->as_BoxLock();
1529      Node* obj = alock->obj_node();
1530      if (!oldbox->is_eliminated()) {
1531        BoxLockNode* newbox = oldbox->clone()->as_BoxLock();
1532        newbox->set_eliminated();
1533        transform_later(newbox);
1534        // Replace old box node with new box for all users
1535        // of the same object.
1536        for (uint i = 0; i < oldbox->outcnt();) {
1537
1538          bool next_edge = true;
1539          Node* u = oldbox->raw_out(i);
1540          if (u == alock) {
1541            i++;
1542            continue; // It will be removed below
1543          }
1544          if (u->is_Lock() &&
1545              u->as_Lock()->obj_node() == obj &&
1546              // oldbox could be referenced in debug info also
1547              u->as_Lock()->box_node() == oldbox) {
1548            assert(u->as_Lock()->is_eliminated(), "sanity");
1549            _igvn.hash_delete(u);
1550            u->set_req(TypeFunc::Parms + 1, newbox);
1551            next_edge = false;
1552#ifdef ASSERT
1553          } else if (u->is_Unlock() && u->as_Unlock()->obj_node() == obj) {
1554            assert(u->as_Unlock()->is_eliminated(), "sanity");
1555#endif
1556          }
1557          // Replace old box in monitor debug info.
1558          if (u->is_SafePoint() && u->as_SafePoint()->jvms()) {
1559            SafePointNode* sfn = u->as_SafePoint();
1560            JVMState* youngest_jvms = sfn->jvms();
1561            int max_depth = youngest_jvms->depth();
1562            for (int depth = 1; depth <= max_depth; depth++) {
1563              JVMState* jvms = youngest_jvms->of_depth(depth);
1564              int num_mon  = jvms->nof_monitors();
1565              // Loop over monitors
1566              for (int idx = 0; idx < num_mon; idx++) {
1567                Node* obj_node = sfn->monitor_obj(jvms, idx);
1568                Node* box_node = sfn->monitor_box(jvms, idx);
1569                if (box_node == oldbox && obj_node == obj) {
1570                  int j = jvms->monitor_box_offset(idx);
1571                  _igvn.hash_delete(u);
1572                  u->set_req(j, newbox);
1573                  next_edge = false;
1574                }
1575              } // for (int idx = 0;
1576            } // for (int depth = 1;
1577          } // if (u->is_SafePoint()
1578          if (next_edge) i++;
1579        } // for (uint i = 0; i < oldbox->outcnt();)
1580      } // if (!oldbox->is_eliminated())
1581  } // if (alock->is_Lock() && !lock->is_coarsened())
1582
1583  #ifndef PRODUCT
1584  if (PrintEliminateLocks) {
1585    if (alock->is_Lock()) {
1586      tty->print_cr("++++ Eliminating: %d Lock", alock->_idx);
1587    } else {
1588      tty->print_cr("++++ Eliminating: %d Unlock", alock->_idx);
1589    }
1590  }
1591  #endif
1592
1593  Node* mem  = alock->in(TypeFunc::Memory);
1594  Node* ctrl = alock->in(TypeFunc::Control);
1595
1596  extract_call_projections(alock);
1597  // There are 2 projections from the lock.  The lock node will
1598  // be deleted when its last use is subsumed below.
1599  assert(alock->outcnt() == 2 &&
1600         _fallthroughproj != NULL &&
1601         _memproj_fallthrough != NULL,
1602         "Unexpected projections from Lock/Unlock");
1603
1604  Node* fallthroughproj = _fallthroughproj;
1605  Node* memproj_fallthrough = _memproj_fallthrough;
1606
1607  // The memory projection from a lock/unlock is RawMem
1608  // The input to a Lock is merged memory, so extract its RawMem input
1609  // (unless the MergeMem has been optimized away.)
1610  if (alock->is_Lock()) {
1611    // Seach for MemBarAcquire node and delete it also.
1612    MemBarNode* membar = fallthroughproj->unique_ctrl_out()->as_MemBar();
1613    assert(membar != NULL && membar->Opcode() == Op_MemBarAcquire, "");
1614    Node* ctrlproj = membar->proj_out(TypeFunc::Control);
1615    Node* memproj = membar->proj_out(TypeFunc::Memory);
1616    _igvn.hash_delete(ctrlproj);
1617    _igvn.subsume_node(ctrlproj, fallthroughproj);
1618    _igvn.hash_delete(memproj);
1619    _igvn.subsume_node(memproj, memproj_fallthrough);
1620
1621    // Delete FastLock node also if this Lock node is unique user
1622    // (a loop peeling may clone a Lock node).
1623    Node* flock = alock->as_Lock()->fastlock_node();
1624    if (flock->outcnt() == 1) {
1625      assert(flock->unique_out() == alock, "sanity");
1626      _igvn.hash_delete(flock);
1627      _igvn.subsume_node(flock, top());
1628    }
1629  }
1630
1631  // Seach for MemBarRelease node and delete it also.
1632  if (alock->is_Unlock() && ctrl != NULL && ctrl->is_Proj() &&
1633      ctrl->in(0)->is_MemBar()) {
1634    MemBarNode* membar = ctrl->in(0)->as_MemBar();
1635    assert(membar->Opcode() == Op_MemBarRelease &&
1636           mem->is_Proj() && membar == mem->in(0), "");
1637    _igvn.hash_delete(fallthroughproj);
1638    _igvn.subsume_node(fallthroughproj, ctrl);
1639    _igvn.hash_delete(memproj_fallthrough);
1640    _igvn.subsume_node(memproj_fallthrough, mem);
1641    fallthroughproj = ctrl;
1642    memproj_fallthrough = mem;
1643    ctrl = membar->in(TypeFunc::Control);
1644    mem  = membar->in(TypeFunc::Memory);
1645  }
1646
1647  _igvn.hash_delete(fallthroughproj);
1648  _igvn.subsume_node(fallthroughproj, ctrl);
1649  _igvn.hash_delete(memproj_fallthrough);
1650  _igvn.subsume_node(memproj_fallthrough, mem);
1651  return true;
1652}
1653
1654
1655//------------------------------expand_lock_node----------------------
1656void PhaseMacroExpand::expand_lock_node(LockNode *lock) {
1657
1658  Node* ctrl = lock->in(TypeFunc::Control);
1659  Node* mem = lock->in(TypeFunc::Memory);
1660  Node* obj = lock->obj_node();
1661  Node* box = lock->box_node();
1662  Node* flock = lock->fastlock_node();
1663
1664  // Make the merge point
1665  Node *region;
1666  Node *mem_phi;
1667  Node *slow_path;
1668
1669  if (UseOptoBiasInlining) {
1670    /*
1671     *  See the full description in MacroAssembler::biased_locking_enter().
1672     *
1673     *  if( (mark_word & biased_lock_mask) == biased_lock_pattern ) {
1674     *    // The object is biased.
1675     *    proto_node = klass->prototype_header;
1676     *    o_node = thread | proto_node;
1677     *    x_node = o_node ^ mark_word;
1678     *    if( (x_node & ~age_mask) == 0 ) { // Biased to the current thread ?
1679     *      // Done.
1680     *    } else {
1681     *      if( (x_node & biased_lock_mask) != 0 ) {
1682     *        // The klass's prototype header is no longer biased.
1683     *        cas(&mark_word, mark_word, proto_node)
1684     *        goto cas_lock;
1685     *      } else {
1686     *        // The klass's prototype header is still biased.
1687     *        if( (x_node & epoch_mask) != 0 ) { // Expired epoch?
1688     *          old = mark_word;
1689     *          new = o_node;
1690     *        } else {
1691     *          // Different thread or anonymous biased.
1692     *          old = mark_word & (epoch_mask | age_mask | biased_lock_mask);
1693     *          new = thread | old;
1694     *        }
1695     *        // Try to rebias.
1696     *        if( cas(&mark_word, old, new) == 0 ) {
1697     *          // Done.
1698     *        } else {
1699     *          goto slow_path; // Failed.
1700     *        }
1701     *      }
1702     *    }
1703     *  } else {
1704     *    // The object is not biased.
1705     *    cas_lock:
1706     *    if( FastLock(obj) == 0 ) {
1707     *      // Done.
1708     *    } else {
1709     *      slow_path:
1710     *      OptoRuntime::complete_monitor_locking_Java(obj);
1711     *    }
1712     *  }
1713     */
1714
1715    region  = new (C, 5) RegionNode(5);
1716    // create a Phi for the memory state
1717    mem_phi = new (C, 5) PhiNode( region, Type::MEMORY, TypeRawPtr::BOTTOM);
1718
1719    Node* fast_lock_region  = new (C, 3) RegionNode(3);
1720    Node* fast_lock_mem_phi = new (C, 3) PhiNode( fast_lock_region, Type::MEMORY, TypeRawPtr::BOTTOM);
1721
1722    // First, check mark word for the biased lock pattern.
1723    Node* mark_node = make_load(ctrl, mem, obj, oopDesc::mark_offset_in_bytes(), TypeX_X, TypeX_X->basic_type());
1724
1725    // Get fast path - mark word has the biased lock pattern.
1726    ctrl = opt_bits_test(ctrl, fast_lock_region, 1, mark_node,
1727                         markOopDesc::biased_lock_mask_in_place,
1728                         markOopDesc::biased_lock_pattern, true);
1729    // fast_lock_region->in(1) is set to slow path.
1730    fast_lock_mem_phi->init_req(1, mem);
1731
1732    // Now check that the lock is biased to the current thread and has
1733    // the same epoch and bias as Klass::_prototype_header.
1734
1735    // Special-case a fresh allocation to avoid building nodes:
1736    Node* klass_node = AllocateNode::Ideal_klass(obj, &_igvn);
1737    if (klass_node == NULL) {
1738      Node* k_adr = basic_plus_adr(obj, oopDesc::klass_offset_in_bytes());
1739      klass_node = transform_later( LoadKlassNode::make(_igvn, mem, k_adr, _igvn.type(k_adr)->is_ptr()) );
1740#ifdef _LP64
1741      if (UseCompressedOops && klass_node->is_DecodeN()) {
1742        assert(klass_node->in(1)->Opcode() == Op_LoadNKlass, "sanity");
1743        klass_node->in(1)->init_req(0, ctrl);
1744      } else
1745#endif
1746      klass_node->init_req(0, ctrl);
1747    }
1748    Node *proto_node = make_load(ctrl, mem, klass_node, Klass::prototype_header_offset_in_bytes() + sizeof(oopDesc), TypeX_X, TypeX_X->basic_type());
1749
1750    Node* thread = transform_later(new (C, 1) ThreadLocalNode());
1751    Node* cast_thread = transform_later(new (C, 2) CastP2XNode(ctrl, thread));
1752    Node* o_node = transform_later(new (C, 3) OrXNode(cast_thread, proto_node));
1753    Node* x_node = transform_later(new (C, 3) XorXNode(o_node, mark_node));
1754
1755    // Get slow path - mark word does NOT match the value.
1756    Node* not_biased_ctrl =  opt_bits_test(ctrl, region, 3, x_node,
1757                                      (~markOopDesc::age_mask_in_place), 0);
1758    // region->in(3) is set to fast path - the object is biased to the current thread.
1759    mem_phi->init_req(3, mem);
1760
1761
1762    // Mark word does NOT match the value (thread | Klass::_prototype_header).
1763
1764
1765    // First, check biased pattern.
1766    // Get fast path - _prototype_header has the same biased lock pattern.
1767    ctrl =  opt_bits_test(not_biased_ctrl, fast_lock_region, 2, x_node,
1768                          markOopDesc::biased_lock_mask_in_place, 0, true);
1769
1770    not_biased_ctrl = fast_lock_region->in(2); // Slow path
1771    // fast_lock_region->in(2) - the prototype header is no longer biased
1772    // and we have to revoke the bias on this object.
1773    // We are going to try to reset the mark of this object to the prototype
1774    // value and fall through to the CAS-based locking scheme.
1775    Node* adr = basic_plus_adr(obj, oopDesc::mark_offset_in_bytes());
1776    Node* cas = new (C, 5) StoreXConditionalNode(not_biased_ctrl, mem, adr,
1777                                                 proto_node, mark_node);
1778    transform_later(cas);
1779    Node* proj = transform_later( new (C, 1) SCMemProjNode(cas));
1780    fast_lock_mem_phi->init_req(2, proj);
1781
1782
1783    // Second, check epoch bits.
1784    Node* rebiased_region  = new (C, 3) RegionNode(3);
1785    Node* old_phi = new (C, 3) PhiNode( rebiased_region, TypeX_X);
1786    Node* new_phi = new (C, 3) PhiNode( rebiased_region, TypeX_X);
1787
1788    // Get slow path - mark word does NOT match epoch bits.
1789    Node* epoch_ctrl =  opt_bits_test(ctrl, rebiased_region, 1, x_node,
1790                                      markOopDesc::epoch_mask_in_place, 0);
1791    // The epoch of the current bias is not valid, attempt to rebias the object
1792    // toward the current thread.
1793    rebiased_region->init_req(2, epoch_ctrl);
1794    old_phi->init_req(2, mark_node);
1795    new_phi->init_req(2, o_node);
1796
1797    // rebiased_region->in(1) is set to fast path.
1798    // The epoch of the current bias is still valid but we know
1799    // nothing about the owner; it might be set or it might be clear.
1800    Node* cmask   = MakeConX(markOopDesc::biased_lock_mask_in_place |
1801                             markOopDesc::age_mask_in_place |
1802                             markOopDesc::epoch_mask_in_place);
1803    Node* old = transform_later(new (C, 3) AndXNode(mark_node, cmask));
1804    cast_thread = transform_later(new (C, 2) CastP2XNode(ctrl, thread));
1805    Node* new_mark = transform_later(new (C, 3) OrXNode(cast_thread, old));
1806    old_phi->init_req(1, old);
1807    new_phi->init_req(1, new_mark);
1808
1809    transform_later(rebiased_region);
1810    transform_later(old_phi);
1811    transform_later(new_phi);
1812
1813    // Try to acquire the bias of the object using an atomic operation.
1814    // If this fails we will go in to the runtime to revoke the object's bias.
1815    cas = new (C, 5) StoreXConditionalNode(rebiased_region, mem, adr,
1816                                           new_phi, old_phi);
1817    transform_later(cas);
1818    proj = transform_later( new (C, 1) SCMemProjNode(cas));
1819
1820    // Get slow path - Failed to CAS.
1821    not_biased_ctrl = opt_bits_test(rebiased_region, region, 4, cas, 0, 0);
1822    mem_phi->init_req(4, proj);
1823    // region->in(4) is set to fast path - the object is rebiased to the current thread.
1824
1825    // Failed to CAS.
1826    slow_path  = new (C, 3) RegionNode(3);
1827    Node *slow_mem = new (C, 3) PhiNode( slow_path, Type::MEMORY, TypeRawPtr::BOTTOM);
1828
1829    slow_path->init_req(1, not_biased_ctrl); // Capture slow-control
1830    slow_mem->init_req(1, proj);
1831
1832    // Call CAS-based locking scheme (FastLock node).
1833
1834    transform_later(fast_lock_region);
1835    transform_later(fast_lock_mem_phi);
1836
1837    // Get slow path - FastLock failed to lock the object.
1838    ctrl = opt_bits_test(fast_lock_region, region, 2, flock, 0, 0);
1839    mem_phi->init_req(2, fast_lock_mem_phi);
1840    // region->in(2) is set to fast path - the object is locked to the current thread.
1841
1842    slow_path->init_req(2, ctrl); // Capture slow-control
1843    slow_mem->init_req(2, fast_lock_mem_phi);
1844
1845    transform_later(slow_path);
1846    transform_later(slow_mem);
1847    // Reset lock's memory edge.
1848    lock->set_req(TypeFunc::Memory, slow_mem);
1849
1850  } else {
1851    region  = new (C, 3) RegionNode(3);
1852    // create a Phi for the memory state
1853    mem_phi = new (C, 3) PhiNode( region, Type::MEMORY, TypeRawPtr::BOTTOM);
1854
1855    // Optimize test; set region slot 2
1856    slow_path = opt_bits_test(ctrl, region, 2, flock, 0, 0);
1857    mem_phi->init_req(2, mem);
1858  }
1859
1860  // Make slow path call
1861  CallNode *call = make_slow_call( (CallNode *) lock, OptoRuntime::complete_monitor_enter_Type(), OptoRuntime::complete_monitor_locking_Java(), NULL, slow_path, obj, box );
1862
1863  extract_call_projections(call);
1864
1865  // Slow path can only throw asynchronous exceptions, which are always
1866  // de-opted.  So the compiler thinks the slow-call can never throw an
1867  // exception.  If it DOES throw an exception we would need the debug
1868  // info removed first (since if it throws there is no monitor).
1869  assert ( _ioproj_fallthrough == NULL && _ioproj_catchall == NULL &&
1870           _memproj_catchall == NULL && _catchallcatchproj == NULL, "Unexpected projection from Lock");
1871
1872  // Capture slow path
1873  // disconnect fall-through projection from call and create a new one
1874  // hook up users of fall-through projection to region
1875  Node *slow_ctrl = _fallthroughproj->clone();
1876  transform_later(slow_ctrl);
1877  _igvn.hash_delete(_fallthroughproj);
1878  _fallthroughproj->disconnect_inputs(NULL);
1879  region->init_req(1, slow_ctrl);
1880  // region inputs are now complete
1881  transform_later(region);
1882  _igvn.subsume_node(_fallthroughproj, region);
1883
1884  Node *memproj = transform_later( new(C, 1) ProjNode(call, TypeFunc::Memory) );
1885  mem_phi->init_req(1, memproj );
1886  transform_later(mem_phi);
1887  _igvn.hash_delete(_memproj_fallthrough);
1888  _igvn.subsume_node(_memproj_fallthrough, mem_phi);
1889}
1890
1891//------------------------------expand_unlock_node----------------------
1892void PhaseMacroExpand::expand_unlock_node(UnlockNode *unlock) {
1893
1894  Node* ctrl = unlock->in(TypeFunc::Control);
1895  Node* mem = unlock->in(TypeFunc::Memory);
1896  Node* obj = unlock->obj_node();
1897  Node* box = unlock->box_node();
1898
1899  // No need for a null check on unlock
1900
1901  // Make the merge point
1902  Node *region;
1903  Node *mem_phi;
1904
1905  if (UseOptoBiasInlining) {
1906    // Check for biased locking unlock case, which is a no-op.
1907    // See the full description in MacroAssembler::biased_locking_exit().
1908    region  = new (C, 4) RegionNode(4);
1909    // create a Phi for the memory state
1910    mem_phi = new (C, 4) PhiNode( region, Type::MEMORY, TypeRawPtr::BOTTOM);
1911    mem_phi->init_req(3, mem);
1912
1913    Node* mark_node = make_load(ctrl, mem, obj, oopDesc::mark_offset_in_bytes(), TypeX_X, TypeX_X->basic_type());
1914    ctrl = opt_bits_test(ctrl, region, 3, mark_node,
1915                         markOopDesc::biased_lock_mask_in_place,
1916                         markOopDesc::biased_lock_pattern);
1917  } else {
1918    region  = new (C, 3) RegionNode(3);
1919    // create a Phi for the memory state
1920    mem_phi = new (C, 3) PhiNode( region, Type::MEMORY, TypeRawPtr::BOTTOM);
1921  }
1922
1923  FastUnlockNode *funlock = new (C, 3) FastUnlockNode( ctrl, obj, box );
1924  funlock = transform_later( funlock )->as_FastUnlock();
1925  // Optimize test; set region slot 2
1926  Node *slow_path = opt_bits_test(ctrl, region, 2, funlock, 0, 0);
1927
1928  CallNode *call = make_slow_call( (CallNode *) unlock, OptoRuntime::complete_monitor_exit_Type(), CAST_FROM_FN_PTR(address, SharedRuntime::complete_monitor_unlocking_C), "complete_monitor_unlocking_C", slow_path, obj, box );
1929
1930  extract_call_projections(call);
1931
1932  assert ( _ioproj_fallthrough == NULL && _ioproj_catchall == NULL &&
1933           _memproj_catchall == NULL && _catchallcatchproj == NULL, "Unexpected projection from Lock");
1934
1935  // No exceptions for unlocking
1936  // Capture slow path
1937  // disconnect fall-through projection from call and create a new one
1938  // hook up users of fall-through projection to region
1939  Node *slow_ctrl = _fallthroughproj->clone();
1940  transform_later(slow_ctrl);
1941  _igvn.hash_delete(_fallthroughproj);
1942  _fallthroughproj->disconnect_inputs(NULL);
1943  region->init_req(1, slow_ctrl);
1944  // region inputs are now complete
1945  transform_later(region);
1946  _igvn.subsume_node(_fallthroughproj, region);
1947
1948  Node *memproj = transform_later( new(C, 1) ProjNode(call, TypeFunc::Memory) );
1949  mem_phi->init_req(1, memproj );
1950  mem_phi->init_req(2, mem);
1951  transform_later(mem_phi);
1952  _igvn.hash_delete(_memproj_fallthrough);
1953  _igvn.subsume_node(_memproj_fallthrough, mem_phi);
1954}
1955
1956//------------------------------expand_macro_nodes----------------------
1957//  Returns true if a failure occurred.
1958bool PhaseMacroExpand::expand_macro_nodes() {
1959  if (C->macro_count() == 0)
1960    return false;
1961  // First, attempt to eliminate locks
1962  bool progress = true;
1963  while (progress) {
1964    progress = false;
1965    for (int i = C->macro_count(); i > 0; i--) {
1966      Node * n = C->macro_node(i-1);
1967      bool success = false;
1968      debug_only(int old_macro_count = C->macro_count(););
1969      if (n->is_AbstractLock()) {
1970        success = eliminate_locking_node(n->as_AbstractLock());
1971      } else if (n->Opcode() == Op_Opaque1 || n->Opcode() == Op_Opaque2) {
1972        _igvn.add_users_to_worklist(n);
1973        _igvn.hash_delete(n);
1974        _igvn.subsume_node(n, n->in(1));
1975        success = true;
1976      }
1977      assert(success == (C->macro_count() < old_macro_count), "elimination reduces macro count");
1978      progress = progress || success;
1979    }
1980  }
1981  // Next, attempt to eliminate allocations
1982  progress = true;
1983  while (progress) {
1984    progress = false;
1985    for (int i = C->macro_count(); i > 0; i--) {
1986      Node * n = C->macro_node(i-1);
1987      bool success = false;
1988      debug_only(int old_macro_count = C->macro_count(););
1989      switch (n->class_id()) {
1990      case Node::Class_Allocate:
1991      case Node::Class_AllocateArray:
1992        success = eliminate_allocate_node(n->as_Allocate());
1993        break;
1994      case Node::Class_Lock:
1995      case Node::Class_Unlock:
1996        assert(!n->as_AbstractLock()->is_eliminated(), "sanity");
1997        break;
1998      default:
1999        assert(false, "unknown node type in macro list");
2000      }
2001      assert(success == (C->macro_count() < old_macro_count), "elimination reduces macro count");
2002      progress = progress || success;
2003    }
2004  }
2005  // Make sure expansion will not cause node limit to be exceeded.
2006  // Worst case is a macro node gets expanded into about 50 nodes.
2007  // Allow 50% more for optimization.
2008  if (C->check_node_count(C->macro_count() * 75, "out of nodes before macro expansion" ) )
2009    return true;
2010
2011  // expand "macro" nodes
2012  // nodes are removed from the macro list as they are processed
2013  while (C->macro_count() > 0) {
2014    int macro_count = C->macro_count();
2015    Node * n = C->macro_node(macro_count-1);
2016    assert(n->is_macro(), "only macro nodes expected here");
2017    if (_igvn.type(n) == Type::TOP || n->in(0)->is_top() ) {
2018      // node is unreachable, so don't try to expand it
2019      C->remove_macro_node(n);
2020      continue;
2021    }
2022    switch (n->class_id()) {
2023    case Node::Class_Allocate:
2024      expand_allocate(n->as_Allocate());
2025      break;
2026    case Node::Class_AllocateArray:
2027      expand_allocate_array(n->as_AllocateArray());
2028      break;
2029    case Node::Class_Lock:
2030      expand_lock_node(n->as_Lock());
2031      break;
2032    case Node::Class_Unlock:
2033      expand_unlock_node(n->as_Unlock());
2034      break;
2035    default:
2036      assert(false, "unknown node type in macro list");
2037    }
2038    assert(C->macro_count() < macro_count, "must have deleted a node from macro list");
2039    if (C->failing())  return true;
2040  }
2041
2042  _igvn.set_delay_transform(false);
2043  _igvn.optimize();
2044  return false;
2045}
2046