matcher.cpp revision 6646:b596a1063e90
1/*
2 * Copyright (c) 1997, 2014, Oracle and/or its affiliates. All rights reserved.
3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
4 *
5 * This code is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 only, as
7 * published by the Free Software Foundation.
8 *
9 * This code is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
12 * version 2 for more details (a copy is included in the LICENSE file that
13 * accompanied this code).
14 *
15 * You should have received a copy of the GNU General Public License version
16 * 2 along with this work; if not, write to the Free Software Foundation,
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18 *
19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20 * or visit www.oracle.com if you need additional information or have any
21 * questions.
22 *
23 */
24
25#include "precompiled.hpp"
26#include "memory/allocation.inline.hpp"
27#include "opto/addnode.hpp"
28#include "opto/callnode.hpp"
29#include "opto/idealGraphPrinter.hpp"
30#include "opto/matcher.hpp"
31#include "opto/memnode.hpp"
32#include "opto/movenode.hpp"
33#include "opto/opcodes.hpp"
34#include "opto/regmask.hpp"
35#include "opto/rootnode.hpp"
36#include "opto/runtime.hpp"
37#include "opto/type.hpp"
38#include "opto/vectornode.hpp"
39#include "runtime/os.hpp"
40#ifdef TARGET_ARCH_MODEL_x86_32
41# include "adfiles/ad_x86_32.hpp"
42#endif
43#ifdef TARGET_ARCH_MODEL_x86_64
44# include "adfiles/ad_x86_64.hpp"
45#endif
46#ifdef TARGET_ARCH_MODEL_sparc
47# include "adfiles/ad_sparc.hpp"
48#endif
49#ifdef TARGET_ARCH_MODEL_zero
50# include "adfiles/ad_zero.hpp"
51#endif
52#ifdef TARGET_ARCH_MODEL_arm
53# include "adfiles/ad_arm.hpp"
54#endif
55#ifdef TARGET_ARCH_MODEL_ppc_32
56# include "adfiles/ad_ppc_32.hpp"
57#endif
58#ifdef TARGET_ARCH_MODEL_ppc_64
59# include "adfiles/ad_ppc_64.hpp"
60#endif
61
62OptoReg::Name OptoReg::c_frame_pointer;
63
64const RegMask *Matcher::idealreg2regmask[_last_machine_leaf];
65RegMask Matcher::mreg2regmask[_last_Mach_Reg];
66RegMask Matcher::STACK_ONLY_mask;
67RegMask Matcher::c_frame_ptr_mask;
68const uint Matcher::_begin_rematerialize = _BEGIN_REMATERIALIZE;
69const uint Matcher::_end_rematerialize   = _END_REMATERIALIZE;
70
71//---------------------------Matcher-------------------------------------------
72Matcher::Matcher()
73: PhaseTransform( Phase::Ins_Select ),
74#ifdef ASSERT
75  _old2new_map(C->comp_arena()),
76  _new2old_map(C->comp_arena()),
77#endif
78  _shared_nodes(C->comp_arena()),
79  _reduceOp(reduceOp), _leftOp(leftOp), _rightOp(rightOp),
80  _swallowed(swallowed),
81  _begin_inst_chain_rule(_BEGIN_INST_CHAIN_RULE),
82  _end_inst_chain_rule(_END_INST_CHAIN_RULE),
83  _must_clone(must_clone),
84  _register_save_policy(register_save_policy),
85  _c_reg_save_policy(c_reg_save_policy),
86  _register_save_type(register_save_type),
87  _ruleName(ruleName),
88  _allocation_started(false),
89  _states_arena(Chunk::medium_size),
90  _visited(&_states_arena),
91  _shared(&_states_arena),
92  _dontcare(&_states_arena) {
93  C->set_matcher(this);
94
95  idealreg2spillmask  [Op_RegI] = NULL;
96  idealreg2spillmask  [Op_RegN] = NULL;
97  idealreg2spillmask  [Op_RegL] = NULL;
98  idealreg2spillmask  [Op_RegF] = NULL;
99  idealreg2spillmask  [Op_RegD] = NULL;
100  idealreg2spillmask  [Op_RegP] = NULL;
101  idealreg2spillmask  [Op_VecS] = NULL;
102  idealreg2spillmask  [Op_VecD] = NULL;
103  idealreg2spillmask  [Op_VecX] = NULL;
104  idealreg2spillmask  [Op_VecY] = NULL;
105
106  idealreg2debugmask  [Op_RegI] = NULL;
107  idealreg2debugmask  [Op_RegN] = NULL;
108  idealreg2debugmask  [Op_RegL] = NULL;
109  idealreg2debugmask  [Op_RegF] = NULL;
110  idealreg2debugmask  [Op_RegD] = NULL;
111  idealreg2debugmask  [Op_RegP] = NULL;
112  idealreg2debugmask  [Op_VecS] = NULL;
113  idealreg2debugmask  [Op_VecD] = NULL;
114  idealreg2debugmask  [Op_VecX] = NULL;
115  idealreg2debugmask  [Op_VecY] = NULL;
116
117  idealreg2mhdebugmask[Op_RegI] = NULL;
118  idealreg2mhdebugmask[Op_RegN] = NULL;
119  idealreg2mhdebugmask[Op_RegL] = NULL;
120  idealreg2mhdebugmask[Op_RegF] = NULL;
121  idealreg2mhdebugmask[Op_RegD] = NULL;
122  idealreg2mhdebugmask[Op_RegP] = NULL;
123  idealreg2mhdebugmask[Op_VecS] = NULL;
124  idealreg2mhdebugmask[Op_VecD] = NULL;
125  idealreg2mhdebugmask[Op_VecX] = NULL;
126  idealreg2mhdebugmask[Op_VecY] = NULL;
127
128  debug_only(_mem_node = NULL;)   // Ideal memory node consumed by mach node
129}
130
131//------------------------------warp_incoming_stk_arg------------------------
132// This warps a VMReg into an OptoReg::Name
133OptoReg::Name Matcher::warp_incoming_stk_arg( VMReg reg ) {
134  OptoReg::Name warped;
135  if( reg->is_stack() ) {  // Stack slot argument?
136    warped = OptoReg::add(_old_SP, reg->reg2stack() );
137    warped = OptoReg::add(warped, C->out_preserve_stack_slots());
138    if( warped >= _in_arg_limit )
139      _in_arg_limit = OptoReg::add(warped, 1); // Bump max stack slot seen
140    if (!RegMask::can_represent_arg(warped)) {
141      // the compiler cannot represent this method's calling sequence
142      C->record_method_not_compilable_all_tiers("unsupported incoming calling sequence");
143      return OptoReg::Bad;
144    }
145    return warped;
146  }
147  return OptoReg::as_OptoReg(reg);
148}
149
150//---------------------------compute_old_SP------------------------------------
151OptoReg::Name Compile::compute_old_SP() {
152  int fixed    = fixed_slots();
153  int preserve = in_preserve_stack_slots();
154  return OptoReg::stack2reg(round_to(fixed + preserve, Matcher::stack_alignment_in_slots()));
155}
156
157
158
159#ifdef ASSERT
160void Matcher::verify_new_nodes_only(Node* xroot) {
161  // Make sure that the new graph only references new nodes
162  ResourceMark rm;
163  Unique_Node_List worklist;
164  VectorSet visited(Thread::current()->resource_area());
165  worklist.push(xroot);
166  while (worklist.size() > 0) {
167    Node* n = worklist.pop();
168    visited <<= n->_idx;
169    assert(C->node_arena()->contains(n), "dead node");
170    for (uint j = 0; j < n->req(); j++) {
171      Node* in = n->in(j);
172      if (in != NULL) {
173        assert(C->node_arena()->contains(in), "dead node");
174        if (!visited.test(in->_idx)) {
175          worklist.push(in);
176        }
177      }
178    }
179  }
180}
181#endif
182
183
184//---------------------------match---------------------------------------------
185void Matcher::match( ) {
186  if( MaxLabelRootDepth < 100 ) { // Too small?
187    assert(false, "invalid MaxLabelRootDepth, increase it to 100 minimum");
188    MaxLabelRootDepth = 100;
189  }
190  // One-time initialization of some register masks.
191  init_spill_mask( C->root()->in(1) );
192  _return_addr_mask = return_addr();
193#ifdef _LP64
194  // Pointers take 2 slots in 64-bit land
195  _return_addr_mask.Insert(OptoReg::add(return_addr(),1));
196#endif
197
198  // Map a Java-signature return type into return register-value
199  // machine registers for 0, 1 and 2 returned values.
200  const TypeTuple *range = C->tf()->range();
201  if( range->cnt() > TypeFunc::Parms ) { // If not a void function
202    // Get ideal-register return type
203    int ireg = range->field_at(TypeFunc::Parms)->ideal_reg();
204    // Get machine return register
205    uint sop = C->start()->Opcode();
206    OptoRegPair regs = return_value(ireg, false);
207
208    // And mask for same
209    _return_value_mask = RegMask(regs.first());
210    if( OptoReg::is_valid(regs.second()) )
211      _return_value_mask.Insert(regs.second());
212  }
213
214  // ---------------
215  // Frame Layout
216
217  // Need the method signature to determine the incoming argument types,
218  // because the types determine which registers the incoming arguments are
219  // in, and this affects the matched code.
220  const TypeTuple *domain = C->tf()->domain();
221  uint             argcnt = domain->cnt() - TypeFunc::Parms;
222  BasicType *sig_bt        = NEW_RESOURCE_ARRAY( BasicType, argcnt );
223  VMRegPair *vm_parm_regs  = NEW_RESOURCE_ARRAY( VMRegPair, argcnt );
224  _parm_regs               = NEW_RESOURCE_ARRAY( OptoRegPair, argcnt );
225  _calling_convention_mask = NEW_RESOURCE_ARRAY( RegMask, argcnt );
226  uint i;
227  for( i = 0; i<argcnt; i++ ) {
228    sig_bt[i] = domain->field_at(i+TypeFunc::Parms)->basic_type();
229  }
230
231  // Pass array of ideal registers and length to USER code (from the AD file)
232  // that will convert this to an array of register numbers.
233  const StartNode *start = C->start();
234  start->calling_convention( sig_bt, vm_parm_regs, argcnt );
235#ifdef ASSERT
236  // Sanity check users' calling convention.  Real handy while trying to
237  // get the initial port correct.
238  { for (uint i = 0; i<argcnt; i++) {
239      if( !vm_parm_regs[i].first()->is_valid() && !vm_parm_regs[i].second()->is_valid() ) {
240        assert(domain->field_at(i+TypeFunc::Parms)==Type::HALF, "only allowed on halve" );
241        _parm_regs[i].set_bad();
242        continue;
243      }
244      VMReg parm_reg = vm_parm_regs[i].first();
245      assert(parm_reg->is_valid(), "invalid arg?");
246      if (parm_reg->is_reg()) {
247        OptoReg::Name opto_parm_reg = OptoReg::as_OptoReg(parm_reg);
248        assert(can_be_java_arg(opto_parm_reg) ||
249               C->stub_function() == CAST_FROM_FN_PTR(address, OptoRuntime::rethrow_C) ||
250               opto_parm_reg == inline_cache_reg(),
251               "parameters in register must be preserved by runtime stubs");
252      }
253      for (uint j = 0; j < i; j++) {
254        assert(parm_reg != vm_parm_regs[j].first(),
255               "calling conv. must produce distinct regs");
256      }
257    }
258  }
259#endif
260
261  // Do some initial frame layout.
262
263  // Compute the old incoming SP (may be called FP) as
264  //   OptoReg::stack0() + locks + in_preserve_stack_slots + pad2.
265  _old_SP = C->compute_old_SP();
266  assert( is_even(_old_SP), "must be even" );
267
268  // Compute highest incoming stack argument as
269  //   _old_SP + out_preserve_stack_slots + incoming argument size.
270  _in_arg_limit = OptoReg::add(_old_SP, C->out_preserve_stack_slots());
271  assert( is_even(_in_arg_limit), "out_preserve must be even" );
272  for( i = 0; i < argcnt; i++ ) {
273    // Permit args to have no register
274    _calling_convention_mask[i].Clear();
275    if( !vm_parm_regs[i].first()->is_valid() && !vm_parm_regs[i].second()->is_valid() ) {
276      continue;
277    }
278    // calling_convention returns stack arguments as a count of
279    // slots beyond OptoReg::stack0()/VMRegImpl::stack0.  We need to convert this to
280    // the allocators point of view, taking into account all the
281    // preserve area, locks & pad2.
282
283    OptoReg::Name reg1 = warp_incoming_stk_arg(vm_parm_regs[i].first());
284    if( OptoReg::is_valid(reg1))
285      _calling_convention_mask[i].Insert(reg1);
286
287    OptoReg::Name reg2 = warp_incoming_stk_arg(vm_parm_regs[i].second());
288    if( OptoReg::is_valid(reg2))
289      _calling_convention_mask[i].Insert(reg2);
290
291    // Saved biased stack-slot register number
292    _parm_regs[i].set_pair(reg2, reg1);
293  }
294
295  // Finally, make sure the incoming arguments take up an even number of
296  // words, in case the arguments or locals need to contain doubleword stack
297  // slots.  The rest of the system assumes that stack slot pairs (in
298  // particular, in the spill area) which look aligned will in fact be
299  // aligned relative to the stack pointer in the target machine.  Double
300  // stack slots will always be allocated aligned.
301  _new_SP = OptoReg::Name(round_to(_in_arg_limit, RegMask::SlotsPerLong));
302
303  // Compute highest outgoing stack argument as
304  //   _new_SP + out_preserve_stack_slots + max(outgoing argument size).
305  _out_arg_limit = OptoReg::add(_new_SP, C->out_preserve_stack_slots());
306  assert( is_even(_out_arg_limit), "out_preserve must be even" );
307
308  if (!RegMask::can_represent_arg(OptoReg::add(_out_arg_limit,-1))) {
309    // the compiler cannot represent this method's calling sequence
310    C->record_method_not_compilable("must be able to represent all call arguments in reg mask");
311  }
312
313  if (C->failing())  return;  // bailed out on incoming arg failure
314
315  // ---------------
316  // Collect roots of matcher trees.  Every node for which
317  // _shared[_idx] is cleared is guaranteed to not be shared, and thus
318  // can be a valid interior of some tree.
319  find_shared( C->root() );
320  find_shared( C->top() );
321
322  C->print_method(PHASE_BEFORE_MATCHING);
323
324  // Create new ideal node ConP #NULL even if it does exist in old space
325  // to avoid false sharing if the corresponding mach node is not used.
326  // The corresponding mach node is only used in rare cases for derived
327  // pointers.
328  Node* new_ideal_null = ConNode::make(C, TypePtr::NULL_PTR);
329
330  // Swap out to old-space; emptying new-space
331  Arena *old = C->node_arena()->move_contents(C->old_arena());
332
333  // Save debug and profile information for nodes in old space:
334  _old_node_note_array = C->node_note_array();
335  if (_old_node_note_array != NULL) {
336    C->set_node_note_array(new(C->comp_arena()) GrowableArray<Node_Notes*>
337                           (C->comp_arena(), _old_node_note_array->length(),
338                            0, NULL));
339  }
340
341  // Pre-size the new_node table to avoid the need for range checks.
342  grow_new_node_array(C->unique());
343
344  // Reset node counter so MachNodes start with _idx at 0
345  int nodes = C->unique(); // save value
346  C->set_unique(0);
347  C->reset_dead_node_list();
348
349  // Recursively match trees from old space into new space.
350  // Correct leaves of new-space Nodes; they point to old-space.
351  _visited.Clear();             // Clear visit bits for xform call
352  C->set_cached_top_node(xform( C->top(), nodes ));
353  if (!C->failing()) {
354    Node* xroot =        xform( C->root(), 1 );
355    if (xroot == NULL) {
356      Matcher::soft_match_failure();  // recursive matching process failed
357      C->record_method_not_compilable("instruction match failed");
358    } else {
359      // During matching shared constants were attached to C->root()
360      // because xroot wasn't available yet, so transfer the uses to
361      // the xroot.
362      for( DUIterator_Fast jmax, j = C->root()->fast_outs(jmax); j < jmax; j++ ) {
363        Node* n = C->root()->fast_out(j);
364        if (C->node_arena()->contains(n)) {
365          assert(n->in(0) == C->root(), "should be control user");
366          n->set_req(0, xroot);
367          --j;
368          --jmax;
369        }
370      }
371
372      // Generate new mach node for ConP #NULL
373      assert(new_ideal_null != NULL, "sanity");
374      _mach_null = match_tree(new_ideal_null);
375      // Don't set control, it will confuse GCM since there are no uses.
376      // The control will be set when this node is used first time
377      // in find_base_for_derived().
378      assert(_mach_null != NULL, "");
379
380      C->set_root(xroot->is_Root() ? xroot->as_Root() : NULL);
381
382#ifdef ASSERT
383      verify_new_nodes_only(xroot);
384#endif
385    }
386  }
387  if (C->top() == NULL || C->root() == NULL) {
388    C->record_method_not_compilable("graph lost"); // %%% cannot happen?
389  }
390  if (C->failing()) {
391    // delete old;
392    old->destruct_contents();
393    return;
394  }
395  assert( C->top(), "" );
396  assert( C->root(), "" );
397  validate_null_checks();
398
399  // Now smoke old-space
400  NOT_DEBUG( old->destruct_contents() );
401
402  // ------------------------
403  // Set up save-on-entry registers
404  Fixup_Save_On_Entry( );
405}
406
407
408//------------------------------Fixup_Save_On_Entry----------------------------
409// The stated purpose of this routine is to take care of save-on-entry
410// registers.  However, the overall goal of the Match phase is to convert into
411// machine-specific instructions which have RegMasks to guide allocation.
412// So what this procedure really does is put a valid RegMask on each input
413// to the machine-specific variations of all Return, TailCall and Halt
414// instructions.  It also adds edgs to define the save-on-entry values (and of
415// course gives them a mask).
416
417static RegMask *init_input_masks( uint size, RegMask &ret_adr, RegMask &fp ) {
418  RegMask *rms = NEW_RESOURCE_ARRAY( RegMask, size );
419  // Do all the pre-defined register masks
420  rms[TypeFunc::Control  ] = RegMask::Empty;
421  rms[TypeFunc::I_O      ] = RegMask::Empty;
422  rms[TypeFunc::Memory   ] = RegMask::Empty;
423  rms[TypeFunc::ReturnAdr] = ret_adr;
424  rms[TypeFunc::FramePtr ] = fp;
425  return rms;
426}
427
428//---------------------------init_first_stack_mask-----------------------------
429// Create the initial stack mask used by values spilling to the stack.
430// Disallow any debug info in outgoing argument areas by setting the
431// initial mask accordingly.
432void Matcher::init_first_stack_mask() {
433
434  // Allocate storage for spill masks as masks for the appropriate load type.
435  RegMask *rms = (RegMask*)C->comp_arena()->Amalloc_D(sizeof(RegMask) * (3*6+4));
436
437  idealreg2spillmask  [Op_RegN] = &rms[0];
438  idealreg2spillmask  [Op_RegI] = &rms[1];
439  idealreg2spillmask  [Op_RegL] = &rms[2];
440  idealreg2spillmask  [Op_RegF] = &rms[3];
441  idealreg2spillmask  [Op_RegD] = &rms[4];
442  idealreg2spillmask  [Op_RegP] = &rms[5];
443
444  idealreg2debugmask  [Op_RegN] = &rms[6];
445  idealreg2debugmask  [Op_RegI] = &rms[7];
446  idealreg2debugmask  [Op_RegL] = &rms[8];
447  idealreg2debugmask  [Op_RegF] = &rms[9];
448  idealreg2debugmask  [Op_RegD] = &rms[10];
449  idealreg2debugmask  [Op_RegP] = &rms[11];
450
451  idealreg2mhdebugmask[Op_RegN] = &rms[12];
452  idealreg2mhdebugmask[Op_RegI] = &rms[13];
453  idealreg2mhdebugmask[Op_RegL] = &rms[14];
454  idealreg2mhdebugmask[Op_RegF] = &rms[15];
455  idealreg2mhdebugmask[Op_RegD] = &rms[16];
456  idealreg2mhdebugmask[Op_RegP] = &rms[17];
457
458  idealreg2spillmask  [Op_VecS] = &rms[18];
459  idealreg2spillmask  [Op_VecD] = &rms[19];
460  idealreg2spillmask  [Op_VecX] = &rms[20];
461  idealreg2spillmask  [Op_VecY] = &rms[21];
462
463  OptoReg::Name i;
464
465  // At first, start with the empty mask
466  C->FIRST_STACK_mask().Clear();
467
468  // Add in the incoming argument area
469  OptoReg::Name init_in = OptoReg::add(_old_SP, C->out_preserve_stack_slots());
470  for (i = init_in; i < _in_arg_limit; i = OptoReg::add(i,1)) {
471    C->FIRST_STACK_mask().Insert(i);
472  }
473  // Add in all bits past the outgoing argument area
474  guarantee(RegMask::can_represent_arg(OptoReg::add(_out_arg_limit,-1)),
475            "must be able to represent all call arguments in reg mask");
476  OptoReg::Name init = _out_arg_limit;
477  for (i = init; RegMask::can_represent(i); i = OptoReg::add(i,1)) {
478    C->FIRST_STACK_mask().Insert(i);
479  }
480  // Finally, set the "infinite stack" bit.
481  C->FIRST_STACK_mask().set_AllStack();
482
483  // Make spill masks.  Registers for their class, plus FIRST_STACK_mask.
484  RegMask aligned_stack_mask = C->FIRST_STACK_mask();
485  // Keep spill masks aligned.
486  aligned_stack_mask.clear_to_pairs();
487  assert(aligned_stack_mask.is_AllStack(), "should be infinite stack");
488
489  *idealreg2spillmask[Op_RegP] = *idealreg2regmask[Op_RegP];
490#ifdef _LP64
491  *idealreg2spillmask[Op_RegN] = *idealreg2regmask[Op_RegN];
492   idealreg2spillmask[Op_RegN]->OR(C->FIRST_STACK_mask());
493   idealreg2spillmask[Op_RegP]->OR(aligned_stack_mask);
494#else
495   idealreg2spillmask[Op_RegP]->OR(C->FIRST_STACK_mask());
496#endif
497  *idealreg2spillmask[Op_RegI] = *idealreg2regmask[Op_RegI];
498   idealreg2spillmask[Op_RegI]->OR(C->FIRST_STACK_mask());
499  *idealreg2spillmask[Op_RegL] = *idealreg2regmask[Op_RegL];
500   idealreg2spillmask[Op_RegL]->OR(aligned_stack_mask);
501  *idealreg2spillmask[Op_RegF] = *idealreg2regmask[Op_RegF];
502   idealreg2spillmask[Op_RegF]->OR(C->FIRST_STACK_mask());
503  *idealreg2spillmask[Op_RegD] = *idealreg2regmask[Op_RegD];
504   idealreg2spillmask[Op_RegD]->OR(aligned_stack_mask);
505
506  if (Matcher::vector_size_supported(T_BYTE,4)) {
507    *idealreg2spillmask[Op_VecS] = *idealreg2regmask[Op_VecS];
508     idealreg2spillmask[Op_VecS]->OR(C->FIRST_STACK_mask());
509  }
510  if (Matcher::vector_size_supported(T_FLOAT,2)) {
511    // For VecD we need dual alignment and 8 bytes (2 slots) for spills.
512    // RA guarantees such alignment since it is needed for Double and Long values.
513    *idealreg2spillmask[Op_VecD] = *idealreg2regmask[Op_VecD];
514     idealreg2spillmask[Op_VecD]->OR(aligned_stack_mask);
515  }
516  if (Matcher::vector_size_supported(T_FLOAT,4)) {
517    // For VecX we need quadro alignment and 16 bytes (4 slots) for spills.
518    //
519    // RA can use input arguments stack slots for spills but until RA
520    // we don't know frame size and offset of input arg stack slots.
521    //
522    // Exclude last input arg stack slots to avoid spilling vectors there
523    // otherwise vector spills could stomp over stack slots in caller frame.
524    OptoReg::Name in = OptoReg::add(_in_arg_limit, -1);
525    for (int k = 1; (in >= init_in) && (k < RegMask::SlotsPerVecX); k++) {
526      aligned_stack_mask.Remove(in);
527      in = OptoReg::add(in, -1);
528    }
529     aligned_stack_mask.clear_to_sets(RegMask::SlotsPerVecX);
530     assert(aligned_stack_mask.is_AllStack(), "should be infinite stack");
531    *idealreg2spillmask[Op_VecX] = *idealreg2regmask[Op_VecX];
532     idealreg2spillmask[Op_VecX]->OR(aligned_stack_mask);
533  }
534  if (Matcher::vector_size_supported(T_FLOAT,8)) {
535    // For VecY we need octo alignment and 32 bytes (8 slots) for spills.
536    OptoReg::Name in = OptoReg::add(_in_arg_limit, -1);
537    for (int k = 1; (in >= init_in) && (k < RegMask::SlotsPerVecY); k++) {
538      aligned_stack_mask.Remove(in);
539      in = OptoReg::add(in, -1);
540    }
541     aligned_stack_mask.clear_to_sets(RegMask::SlotsPerVecY);
542     assert(aligned_stack_mask.is_AllStack(), "should be infinite stack");
543    *idealreg2spillmask[Op_VecY] = *idealreg2regmask[Op_VecY];
544     idealreg2spillmask[Op_VecY]->OR(aligned_stack_mask);
545  }
546   if (UseFPUForSpilling) {
547     // This mask logic assumes that the spill operations are
548     // symmetric and that the registers involved are the same size.
549     // On sparc for instance we may have to use 64 bit moves will
550     // kill 2 registers when used with F0-F31.
551     idealreg2spillmask[Op_RegI]->OR(*idealreg2regmask[Op_RegF]);
552     idealreg2spillmask[Op_RegF]->OR(*idealreg2regmask[Op_RegI]);
553#ifdef _LP64
554     idealreg2spillmask[Op_RegN]->OR(*idealreg2regmask[Op_RegF]);
555     idealreg2spillmask[Op_RegL]->OR(*idealreg2regmask[Op_RegD]);
556     idealreg2spillmask[Op_RegD]->OR(*idealreg2regmask[Op_RegL]);
557     idealreg2spillmask[Op_RegP]->OR(*idealreg2regmask[Op_RegD]);
558#else
559     idealreg2spillmask[Op_RegP]->OR(*idealreg2regmask[Op_RegF]);
560#ifdef ARM
561     // ARM has support for moving 64bit values between a pair of
562     // integer registers and a double register
563     idealreg2spillmask[Op_RegL]->OR(*idealreg2regmask[Op_RegD]);
564     idealreg2spillmask[Op_RegD]->OR(*idealreg2regmask[Op_RegL]);
565#endif
566#endif
567   }
568
569  // Make up debug masks.  Any spill slot plus callee-save registers.
570  // Caller-save registers are assumed to be trashable by the various
571  // inline-cache fixup routines.
572  *idealreg2debugmask  [Op_RegN]= *idealreg2spillmask[Op_RegN];
573  *idealreg2debugmask  [Op_RegI]= *idealreg2spillmask[Op_RegI];
574  *idealreg2debugmask  [Op_RegL]= *idealreg2spillmask[Op_RegL];
575  *idealreg2debugmask  [Op_RegF]= *idealreg2spillmask[Op_RegF];
576  *idealreg2debugmask  [Op_RegD]= *idealreg2spillmask[Op_RegD];
577  *idealreg2debugmask  [Op_RegP]= *idealreg2spillmask[Op_RegP];
578
579  *idealreg2mhdebugmask[Op_RegN]= *idealreg2spillmask[Op_RegN];
580  *idealreg2mhdebugmask[Op_RegI]= *idealreg2spillmask[Op_RegI];
581  *idealreg2mhdebugmask[Op_RegL]= *idealreg2spillmask[Op_RegL];
582  *idealreg2mhdebugmask[Op_RegF]= *idealreg2spillmask[Op_RegF];
583  *idealreg2mhdebugmask[Op_RegD]= *idealreg2spillmask[Op_RegD];
584  *idealreg2mhdebugmask[Op_RegP]= *idealreg2spillmask[Op_RegP];
585
586  // Prevent stub compilations from attempting to reference
587  // callee-saved registers from debug info
588  bool exclude_soe = !Compile::current()->is_method_compilation();
589
590  for( i=OptoReg::Name(0); i<OptoReg::Name(_last_Mach_Reg); i = OptoReg::add(i,1) ) {
591    // registers the caller has to save do not work
592    if( _register_save_policy[i] == 'C' ||
593        _register_save_policy[i] == 'A' ||
594        (_register_save_policy[i] == 'E' && exclude_soe) ) {
595      idealreg2debugmask  [Op_RegN]->Remove(i);
596      idealreg2debugmask  [Op_RegI]->Remove(i); // Exclude save-on-call
597      idealreg2debugmask  [Op_RegL]->Remove(i); // registers from debug
598      idealreg2debugmask  [Op_RegF]->Remove(i); // masks
599      idealreg2debugmask  [Op_RegD]->Remove(i);
600      idealreg2debugmask  [Op_RegP]->Remove(i);
601
602      idealreg2mhdebugmask[Op_RegN]->Remove(i);
603      idealreg2mhdebugmask[Op_RegI]->Remove(i);
604      idealreg2mhdebugmask[Op_RegL]->Remove(i);
605      idealreg2mhdebugmask[Op_RegF]->Remove(i);
606      idealreg2mhdebugmask[Op_RegD]->Remove(i);
607      idealreg2mhdebugmask[Op_RegP]->Remove(i);
608    }
609  }
610
611  // Subtract the register we use to save the SP for MethodHandle
612  // invokes to from the debug mask.
613  const RegMask save_mask = method_handle_invoke_SP_save_mask();
614  idealreg2mhdebugmask[Op_RegN]->SUBTRACT(save_mask);
615  idealreg2mhdebugmask[Op_RegI]->SUBTRACT(save_mask);
616  idealreg2mhdebugmask[Op_RegL]->SUBTRACT(save_mask);
617  idealreg2mhdebugmask[Op_RegF]->SUBTRACT(save_mask);
618  idealreg2mhdebugmask[Op_RegD]->SUBTRACT(save_mask);
619  idealreg2mhdebugmask[Op_RegP]->SUBTRACT(save_mask);
620}
621
622//---------------------------is_save_on_entry----------------------------------
623bool Matcher::is_save_on_entry( int reg ) {
624  return
625    _register_save_policy[reg] == 'E' ||
626    _register_save_policy[reg] == 'A' || // Save-on-entry register?
627    // Also save argument registers in the trampolining stubs
628    (C->save_argument_registers() && is_spillable_arg(reg));
629}
630
631//---------------------------Fixup_Save_On_Entry-------------------------------
632void Matcher::Fixup_Save_On_Entry( ) {
633  init_first_stack_mask();
634
635  Node *root = C->root();       // Short name for root
636  // Count number of save-on-entry registers.
637  uint soe_cnt = number_of_saved_registers();
638  uint i;
639
640  // Find the procedure Start Node
641  StartNode *start = C->start();
642  assert( start, "Expect a start node" );
643
644  // Save argument registers in the trampolining stubs
645  if( C->save_argument_registers() )
646    for( i = 0; i < _last_Mach_Reg; i++ )
647      if( is_spillable_arg(i) )
648        soe_cnt++;
649
650  // Input RegMask array shared by all Returns.
651  // The type for doubles and longs has a count of 2, but
652  // there is only 1 returned value
653  uint ret_edge_cnt = TypeFunc::Parms + ((C->tf()->range()->cnt() == TypeFunc::Parms) ? 0 : 1);
654  RegMask *ret_rms  = init_input_masks( ret_edge_cnt + soe_cnt, _return_addr_mask, c_frame_ptr_mask );
655  // Returns have 0 or 1 returned values depending on call signature.
656  // Return register is specified by return_value in the AD file.
657  if (ret_edge_cnt > TypeFunc::Parms)
658    ret_rms[TypeFunc::Parms+0] = _return_value_mask;
659
660  // Input RegMask array shared by all Rethrows.
661  uint reth_edge_cnt = TypeFunc::Parms+1;
662  RegMask *reth_rms  = init_input_masks( reth_edge_cnt + soe_cnt, _return_addr_mask, c_frame_ptr_mask );
663  // Rethrow takes exception oop only, but in the argument 0 slot.
664  reth_rms[TypeFunc::Parms] = mreg2regmask[find_receiver(false)];
665#ifdef _LP64
666  // Need two slots for ptrs in 64-bit land
667  reth_rms[TypeFunc::Parms].Insert(OptoReg::add(OptoReg::Name(find_receiver(false)),1));
668#endif
669
670  // Input RegMask array shared by all TailCalls
671  uint tail_call_edge_cnt = TypeFunc::Parms+2;
672  RegMask *tail_call_rms = init_input_masks( tail_call_edge_cnt + soe_cnt, _return_addr_mask, c_frame_ptr_mask );
673
674  // Input RegMask array shared by all TailJumps
675  uint tail_jump_edge_cnt = TypeFunc::Parms+2;
676  RegMask *tail_jump_rms = init_input_masks( tail_jump_edge_cnt + soe_cnt, _return_addr_mask, c_frame_ptr_mask );
677
678  // TailCalls have 2 returned values (target & moop), whose masks come
679  // from the usual MachNode/MachOper mechanism.  Find a sample
680  // TailCall to extract these masks and put the correct masks into
681  // the tail_call_rms array.
682  for( i=1; i < root->req(); i++ ) {
683    MachReturnNode *m = root->in(i)->as_MachReturn();
684    if( m->ideal_Opcode() == Op_TailCall ) {
685      tail_call_rms[TypeFunc::Parms+0] = m->MachNode::in_RegMask(TypeFunc::Parms+0);
686      tail_call_rms[TypeFunc::Parms+1] = m->MachNode::in_RegMask(TypeFunc::Parms+1);
687      break;
688    }
689  }
690
691  // TailJumps have 2 returned values (target & ex_oop), whose masks come
692  // from the usual MachNode/MachOper mechanism.  Find a sample
693  // TailJump to extract these masks and put the correct masks into
694  // the tail_jump_rms array.
695  for( i=1; i < root->req(); i++ ) {
696    MachReturnNode *m = root->in(i)->as_MachReturn();
697    if( m->ideal_Opcode() == Op_TailJump ) {
698      tail_jump_rms[TypeFunc::Parms+0] = m->MachNode::in_RegMask(TypeFunc::Parms+0);
699      tail_jump_rms[TypeFunc::Parms+1] = m->MachNode::in_RegMask(TypeFunc::Parms+1);
700      break;
701    }
702  }
703
704  // Input RegMask array shared by all Halts
705  uint halt_edge_cnt = TypeFunc::Parms;
706  RegMask *halt_rms = init_input_masks( halt_edge_cnt + soe_cnt, _return_addr_mask, c_frame_ptr_mask );
707
708  // Capture the return input masks into each exit flavor
709  for( i=1; i < root->req(); i++ ) {
710    MachReturnNode *exit = root->in(i)->as_MachReturn();
711    switch( exit->ideal_Opcode() ) {
712      case Op_Return   : exit->_in_rms = ret_rms;  break;
713      case Op_Rethrow  : exit->_in_rms = reth_rms; break;
714      case Op_TailCall : exit->_in_rms = tail_call_rms; break;
715      case Op_TailJump : exit->_in_rms = tail_jump_rms; break;
716      case Op_Halt     : exit->_in_rms = halt_rms; break;
717      default          : ShouldNotReachHere();
718    }
719  }
720
721  // Next unused projection number from Start.
722  int proj_cnt = C->tf()->domain()->cnt();
723
724  // Do all the save-on-entry registers.  Make projections from Start for
725  // them, and give them a use at the exit points.  To the allocator, they
726  // look like incoming register arguments.
727  for( i = 0; i < _last_Mach_Reg; i++ ) {
728    if( is_save_on_entry(i) ) {
729
730      // Add the save-on-entry to the mask array
731      ret_rms      [      ret_edge_cnt] = mreg2regmask[i];
732      reth_rms     [     reth_edge_cnt] = mreg2regmask[i];
733      tail_call_rms[tail_call_edge_cnt] = mreg2regmask[i];
734      tail_jump_rms[tail_jump_edge_cnt] = mreg2regmask[i];
735      // Halts need the SOE registers, but only in the stack as debug info.
736      // A just-prior uncommon-trap or deoptimization will use the SOE regs.
737      halt_rms     [     halt_edge_cnt] = *idealreg2spillmask[_register_save_type[i]];
738
739      Node *mproj;
740
741      // Is this a RegF low half of a RegD?  Double up 2 adjacent RegF's
742      // into a single RegD.
743      if( (i&1) == 0 &&
744          _register_save_type[i  ] == Op_RegF &&
745          _register_save_type[i+1] == Op_RegF &&
746          is_save_on_entry(i+1) ) {
747        // Add other bit for double
748        ret_rms      [      ret_edge_cnt].Insert(OptoReg::Name(i+1));
749        reth_rms     [     reth_edge_cnt].Insert(OptoReg::Name(i+1));
750        tail_call_rms[tail_call_edge_cnt].Insert(OptoReg::Name(i+1));
751        tail_jump_rms[tail_jump_edge_cnt].Insert(OptoReg::Name(i+1));
752        halt_rms     [     halt_edge_cnt].Insert(OptoReg::Name(i+1));
753        mproj = new MachProjNode( start, proj_cnt, ret_rms[ret_edge_cnt], Op_RegD );
754        proj_cnt += 2;          // Skip 2 for doubles
755      }
756      else if( (i&1) == 1 &&    // Else check for high half of double
757               _register_save_type[i-1] == Op_RegF &&
758               _register_save_type[i  ] == Op_RegF &&
759               is_save_on_entry(i-1) ) {
760        ret_rms      [      ret_edge_cnt] = RegMask::Empty;
761        reth_rms     [     reth_edge_cnt] = RegMask::Empty;
762        tail_call_rms[tail_call_edge_cnt] = RegMask::Empty;
763        tail_jump_rms[tail_jump_edge_cnt] = RegMask::Empty;
764        halt_rms     [     halt_edge_cnt] = RegMask::Empty;
765        mproj = C->top();
766      }
767      // Is this a RegI low half of a RegL?  Double up 2 adjacent RegI's
768      // into a single RegL.
769      else if( (i&1) == 0 &&
770          _register_save_type[i  ] == Op_RegI &&
771          _register_save_type[i+1] == Op_RegI &&
772        is_save_on_entry(i+1) ) {
773        // Add other bit for long
774        ret_rms      [      ret_edge_cnt].Insert(OptoReg::Name(i+1));
775        reth_rms     [     reth_edge_cnt].Insert(OptoReg::Name(i+1));
776        tail_call_rms[tail_call_edge_cnt].Insert(OptoReg::Name(i+1));
777        tail_jump_rms[tail_jump_edge_cnt].Insert(OptoReg::Name(i+1));
778        halt_rms     [     halt_edge_cnt].Insert(OptoReg::Name(i+1));
779        mproj = new MachProjNode( start, proj_cnt, ret_rms[ret_edge_cnt], Op_RegL );
780        proj_cnt += 2;          // Skip 2 for longs
781      }
782      else if( (i&1) == 1 &&    // Else check for high half of long
783               _register_save_type[i-1] == Op_RegI &&
784               _register_save_type[i  ] == Op_RegI &&
785               is_save_on_entry(i-1) ) {
786        ret_rms      [      ret_edge_cnt] = RegMask::Empty;
787        reth_rms     [     reth_edge_cnt] = RegMask::Empty;
788        tail_call_rms[tail_call_edge_cnt] = RegMask::Empty;
789        tail_jump_rms[tail_jump_edge_cnt] = RegMask::Empty;
790        halt_rms     [     halt_edge_cnt] = RegMask::Empty;
791        mproj = C->top();
792      } else {
793        // Make a projection for it off the Start
794        mproj = new MachProjNode( start, proj_cnt++, ret_rms[ret_edge_cnt], _register_save_type[i] );
795      }
796
797      ret_edge_cnt ++;
798      reth_edge_cnt ++;
799      tail_call_edge_cnt ++;
800      tail_jump_edge_cnt ++;
801      halt_edge_cnt ++;
802
803      // Add a use of the SOE register to all exit paths
804      for( uint j=1; j < root->req(); j++ )
805        root->in(j)->add_req(mproj);
806    } // End of if a save-on-entry register
807  } // End of for all machine registers
808}
809
810//------------------------------init_spill_mask--------------------------------
811void Matcher::init_spill_mask( Node *ret ) {
812  if( idealreg2regmask[Op_RegI] ) return; // One time only init
813
814  OptoReg::c_frame_pointer = c_frame_pointer();
815  c_frame_ptr_mask = c_frame_pointer();
816#ifdef _LP64
817  // pointers are twice as big
818  c_frame_ptr_mask.Insert(OptoReg::add(c_frame_pointer(),1));
819#endif
820
821  // Start at OptoReg::stack0()
822  STACK_ONLY_mask.Clear();
823  OptoReg::Name init = OptoReg::stack2reg(0);
824  // STACK_ONLY_mask is all stack bits
825  OptoReg::Name i;
826  for (i = init; RegMask::can_represent(i); i = OptoReg::add(i,1))
827    STACK_ONLY_mask.Insert(i);
828  // Also set the "infinite stack" bit.
829  STACK_ONLY_mask.set_AllStack();
830
831  // Copy the register names over into the shared world
832  for( i=OptoReg::Name(0); i<OptoReg::Name(_last_Mach_Reg); i = OptoReg::add(i,1) ) {
833    // SharedInfo::regName[i] = regName[i];
834    // Handy RegMasks per machine register
835    mreg2regmask[i].Insert(i);
836  }
837
838  // Grab the Frame Pointer
839  Node *fp  = ret->in(TypeFunc::FramePtr);
840  Node *mem = ret->in(TypeFunc::Memory);
841  const TypePtr* atp = TypePtr::BOTTOM;
842  // Share frame pointer while making spill ops
843  set_shared(fp);
844
845  // Compute generic short-offset Loads
846#ifdef _LP64
847  MachNode *spillCP = match_tree(new LoadNNode(NULL,mem,fp,atp,TypeInstPtr::BOTTOM,MemNode::unordered));
848#endif
849  MachNode *spillI  = match_tree(new LoadINode(NULL,mem,fp,atp,TypeInt::INT,MemNode::unordered));
850  MachNode *spillL  = match_tree(new LoadLNode(NULL,mem,fp,atp,TypeLong::LONG,MemNode::unordered,false));
851  MachNode *spillF  = match_tree(new LoadFNode(NULL,mem,fp,atp,Type::FLOAT,MemNode::unordered));
852  MachNode *spillD  = match_tree(new LoadDNode(NULL,mem,fp,atp,Type::DOUBLE,MemNode::unordered));
853  MachNode *spillP  = match_tree(new LoadPNode(NULL,mem,fp,atp,TypeInstPtr::BOTTOM,MemNode::unordered));
854  assert(spillI != NULL && spillL != NULL && spillF != NULL &&
855         spillD != NULL && spillP != NULL, "");
856  // Get the ADLC notion of the right regmask, for each basic type.
857#ifdef _LP64
858  idealreg2regmask[Op_RegN] = &spillCP->out_RegMask();
859#endif
860  idealreg2regmask[Op_RegI] = &spillI->out_RegMask();
861  idealreg2regmask[Op_RegL] = &spillL->out_RegMask();
862  idealreg2regmask[Op_RegF] = &spillF->out_RegMask();
863  idealreg2regmask[Op_RegD] = &spillD->out_RegMask();
864  idealreg2regmask[Op_RegP] = &spillP->out_RegMask();
865
866  // Vector regmasks.
867  if (Matcher::vector_size_supported(T_BYTE,4)) {
868    TypeVect::VECTS = TypeVect::make(T_BYTE, 4);
869    MachNode *spillVectS = match_tree(new LoadVectorNode(NULL,mem,fp,atp,TypeVect::VECTS));
870    idealreg2regmask[Op_VecS] = &spillVectS->out_RegMask();
871  }
872  if (Matcher::vector_size_supported(T_FLOAT,2)) {
873    MachNode *spillVectD = match_tree(new LoadVectorNode(NULL,mem,fp,atp,TypeVect::VECTD));
874    idealreg2regmask[Op_VecD] = &spillVectD->out_RegMask();
875  }
876  if (Matcher::vector_size_supported(T_FLOAT,4)) {
877    MachNode *spillVectX = match_tree(new LoadVectorNode(NULL,mem,fp,atp,TypeVect::VECTX));
878    idealreg2regmask[Op_VecX] = &spillVectX->out_RegMask();
879  }
880  if (Matcher::vector_size_supported(T_FLOAT,8)) {
881    MachNode *spillVectY = match_tree(new LoadVectorNode(NULL,mem,fp,atp,TypeVect::VECTY));
882    idealreg2regmask[Op_VecY] = &spillVectY->out_RegMask();
883  }
884}
885
886#ifdef ASSERT
887static void match_alias_type(Compile* C, Node* n, Node* m) {
888  if (!VerifyAliases)  return;  // do not go looking for trouble by default
889  const TypePtr* nat = n->adr_type();
890  const TypePtr* mat = m->adr_type();
891  int nidx = C->get_alias_index(nat);
892  int midx = C->get_alias_index(mat);
893  // Detune the assert for cases like (AndI 0xFF (LoadB p)).
894  if (nidx == Compile::AliasIdxTop && midx >= Compile::AliasIdxRaw) {
895    for (uint i = 1; i < n->req(); i++) {
896      Node* n1 = n->in(i);
897      const TypePtr* n1at = n1->adr_type();
898      if (n1at != NULL) {
899        nat = n1at;
900        nidx = C->get_alias_index(n1at);
901      }
902    }
903  }
904  // %%% Kludgery.  Instead, fix ideal adr_type methods for all these cases:
905  if (nidx == Compile::AliasIdxTop && midx == Compile::AliasIdxRaw) {
906    switch (n->Opcode()) {
907    case Op_PrefetchRead:
908    case Op_PrefetchWrite:
909    case Op_PrefetchAllocation:
910      nidx = Compile::AliasIdxRaw;
911      nat = TypeRawPtr::BOTTOM;
912      break;
913    }
914  }
915  if (nidx == Compile::AliasIdxRaw && midx == Compile::AliasIdxTop) {
916    switch (n->Opcode()) {
917    case Op_ClearArray:
918      midx = Compile::AliasIdxRaw;
919      mat = TypeRawPtr::BOTTOM;
920      break;
921    }
922  }
923  if (nidx == Compile::AliasIdxTop && midx == Compile::AliasIdxBot) {
924    switch (n->Opcode()) {
925    case Op_Return:
926    case Op_Rethrow:
927    case Op_Halt:
928    case Op_TailCall:
929    case Op_TailJump:
930      nidx = Compile::AliasIdxBot;
931      nat = TypePtr::BOTTOM;
932      break;
933    }
934  }
935  if (nidx == Compile::AliasIdxBot && midx == Compile::AliasIdxTop) {
936    switch (n->Opcode()) {
937    case Op_StrComp:
938    case Op_StrEquals:
939    case Op_StrIndexOf:
940    case Op_AryEq:
941    case Op_MemBarVolatile:
942    case Op_MemBarCPUOrder: // %%% these ideals should have narrower adr_type?
943    case Op_EncodeISOArray:
944      nidx = Compile::AliasIdxTop;
945      nat = NULL;
946      break;
947    }
948  }
949  if (nidx != midx) {
950    if (PrintOpto || (PrintMiscellaneous && (WizardMode || Verbose))) {
951      tty->print_cr("==== Matcher alias shift %d => %d", nidx, midx);
952      n->dump();
953      m->dump();
954    }
955    assert(C->subsume_loads() && C->must_alias(nat, midx),
956           "must not lose alias info when matching");
957  }
958}
959#endif
960
961
962//------------------------------MStack-----------------------------------------
963// State and MStack class used in xform() and find_shared() iterative methods.
964enum Node_State { Pre_Visit,  // node has to be pre-visited
965                      Visit,  // visit node
966                 Post_Visit,  // post-visit node
967             Alt_Post_Visit   // alternative post-visit path
968                };
969
970class MStack: public Node_Stack {
971  public:
972    MStack(int size) : Node_Stack(size) { }
973
974    void push(Node *n, Node_State ns) {
975      Node_Stack::push(n, (uint)ns);
976    }
977    void push(Node *n, Node_State ns, Node *parent, int indx) {
978      ++_inode_top;
979      if ((_inode_top + 1) >= _inode_max) grow();
980      _inode_top->node = parent;
981      _inode_top->indx = (uint)indx;
982      ++_inode_top;
983      _inode_top->node = n;
984      _inode_top->indx = (uint)ns;
985    }
986    Node *parent() {
987      pop();
988      return node();
989    }
990    Node_State state() const {
991      return (Node_State)index();
992    }
993    void set_state(Node_State ns) {
994      set_index((uint)ns);
995    }
996};
997
998
999//------------------------------xform------------------------------------------
1000// Given a Node in old-space, Match him (Label/Reduce) to produce a machine
1001// Node in new-space.  Given a new-space Node, recursively walk his children.
1002Node *Matcher::transform( Node *n ) { ShouldNotCallThis(); return n; }
1003Node *Matcher::xform( Node *n, int max_stack ) {
1004  // Use one stack to keep both: child's node/state and parent's node/index
1005  MStack mstack(max_stack * 2 * 2); // C->unique() * 2 * 2
1006  mstack.push(n, Visit, NULL, -1);  // set NULL as parent to indicate root
1007
1008  while (mstack.is_nonempty()) {
1009    C->check_node_count(NodeLimitFudgeFactor, "too many nodes matching instructions");
1010    if (C->failing()) return NULL;
1011    n = mstack.node();          // Leave node on stack
1012    Node_State nstate = mstack.state();
1013    if (nstate == Visit) {
1014      mstack.set_state(Post_Visit);
1015      Node *oldn = n;
1016      // Old-space or new-space check
1017      if (!C->node_arena()->contains(n)) {
1018        // Old space!
1019        Node* m;
1020        if (has_new_node(n)) {  // Not yet Label/Reduced
1021          m = new_node(n);
1022        } else {
1023          if (!is_dontcare(n)) { // Matcher can match this guy
1024            // Calls match special.  They match alone with no children.
1025            // Their children, the incoming arguments, match normally.
1026            m = n->is_SafePoint() ? match_sfpt(n->as_SafePoint()):match_tree(n);
1027            if (C->failing())  return NULL;
1028            if (m == NULL) { Matcher::soft_match_failure(); return NULL; }
1029          } else {                  // Nothing the matcher cares about
1030            if( n->is_Proj() && n->in(0)->is_Multi()) {       // Projections?
1031              // Convert to machine-dependent projection
1032              m = n->in(0)->as_Multi()->match( n->as_Proj(), this );
1033#ifdef ASSERT
1034              _new2old_map.map(m->_idx, n);
1035#endif
1036              if (m->in(0) != NULL) // m might be top
1037                collect_null_checks(m, n);
1038            } else {                // Else just a regular 'ol guy
1039              m = n->clone();       // So just clone into new-space
1040#ifdef ASSERT
1041              _new2old_map.map(m->_idx, n);
1042#endif
1043              // Def-Use edges will be added incrementally as Uses
1044              // of this node are matched.
1045              assert(m->outcnt() == 0, "no Uses of this clone yet");
1046            }
1047          }
1048
1049          set_new_node(n, m);       // Map old to new
1050          if (_old_node_note_array != NULL) {
1051            Node_Notes* nn = C->locate_node_notes(_old_node_note_array,
1052                                                  n->_idx);
1053            C->set_node_notes_at(m->_idx, nn);
1054          }
1055          debug_only(match_alias_type(C, n, m));
1056        }
1057        n = m;    // n is now a new-space node
1058        mstack.set_node(n);
1059      }
1060
1061      // New space!
1062      if (_visited.test_set(n->_idx)) continue; // while(mstack.is_nonempty())
1063
1064      int i;
1065      // Put precedence edges on stack first (match them last).
1066      for (i = oldn->req(); (uint)i < oldn->len(); i++) {
1067        Node *m = oldn->in(i);
1068        if (m == NULL) break;
1069        // set -1 to call add_prec() instead of set_req() during Step1
1070        mstack.push(m, Visit, n, -1);
1071      }
1072
1073      // For constant debug info, I'd rather have unmatched constants.
1074      int cnt = n->req();
1075      JVMState* jvms = n->jvms();
1076      int debug_cnt = jvms ? jvms->debug_start() : cnt;
1077
1078      // Now do only debug info.  Clone constants rather than matching.
1079      // Constants are represented directly in the debug info without
1080      // the need for executable machine instructions.
1081      // Monitor boxes are also represented directly.
1082      for (i = cnt - 1; i >= debug_cnt; --i) { // For all debug inputs do
1083        Node *m = n->in(i);          // Get input
1084        int op = m->Opcode();
1085        assert((op == Op_BoxLock) == jvms->is_monitor_use(i), "boxes only at monitor sites");
1086        if( op == Op_ConI || op == Op_ConP || op == Op_ConN || op == Op_ConNKlass ||
1087            op == Op_ConF || op == Op_ConD || op == Op_ConL
1088            // || op == Op_BoxLock  // %%%% enable this and remove (+++) in chaitin.cpp
1089            ) {
1090          m = m->clone();
1091#ifdef ASSERT
1092          _new2old_map.map(m->_idx, n);
1093#endif
1094          mstack.push(m, Post_Visit, n, i); // Don't need to visit
1095          mstack.push(m->in(0), Visit, m, 0);
1096        } else {
1097          mstack.push(m, Visit, n, i);
1098        }
1099      }
1100
1101      // And now walk his children, and convert his inputs to new-space.
1102      for( ; i >= 0; --i ) { // For all normal inputs do
1103        Node *m = n->in(i);  // Get input
1104        if(m != NULL)
1105          mstack.push(m, Visit, n, i);
1106      }
1107
1108    }
1109    else if (nstate == Post_Visit) {
1110      // Set xformed input
1111      Node *p = mstack.parent();
1112      if (p != NULL) { // root doesn't have parent
1113        int i = (int)mstack.index();
1114        if (i >= 0)
1115          p->set_req(i, n); // required input
1116        else if (i == -1)
1117          p->add_prec(n);   // precedence input
1118        else
1119          ShouldNotReachHere();
1120      }
1121      mstack.pop(); // remove processed node from stack
1122    }
1123    else {
1124      ShouldNotReachHere();
1125    }
1126  } // while (mstack.is_nonempty())
1127  return n; // Return new-space Node
1128}
1129
1130//------------------------------warp_outgoing_stk_arg------------------------
1131OptoReg::Name Matcher::warp_outgoing_stk_arg( VMReg reg, OptoReg::Name begin_out_arg_area, OptoReg::Name &out_arg_limit_per_call ) {
1132  // Convert outgoing argument location to a pre-biased stack offset
1133  if (reg->is_stack()) {
1134    OptoReg::Name warped = reg->reg2stack();
1135    // Adjust the stack slot offset to be the register number used
1136    // by the allocator.
1137    warped = OptoReg::add(begin_out_arg_area, warped);
1138    // Keep track of the largest numbered stack slot used for an arg.
1139    // Largest used slot per call-site indicates the amount of stack
1140    // that is killed by the call.
1141    if( warped >= out_arg_limit_per_call )
1142      out_arg_limit_per_call = OptoReg::add(warped,1);
1143    if (!RegMask::can_represent_arg(warped)) {
1144      C->record_method_not_compilable_all_tiers("unsupported calling sequence");
1145      return OptoReg::Bad;
1146    }
1147    return warped;
1148  }
1149  return OptoReg::as_OptoReg(reg);
1150}
1151
1152
1153//------------------------------match_sfpt-------------------------------------
1154// Helper function to match call instructions.  Calls match special.
1155// They match alone with no children.  Their children, the incoming
1156// arguments, match normally.
1157MachNode *Matcher::match_sfpt( SafePointNode *sfpt ) {
1158  MachSafePointNode *msfpt = NULL;
1159  MachCallNode      *mcall = NULL;
1160  uint               cnt;
1161  // Split out case for SafePoint vs Call
1162  CallNode *call;
1163  const TypeTuple *domain;
1164  ciMethod*        method = NULL;
1165  bool             is_method_handle_invoke = false;  // for special kill effects
1166  if( sfpt->is_Call() ) {
1167    call = sfpt->as_Call();
1168    domain = call->tf()->domain();
1169    cnt = domain->cnt();
1170
1171    // Match just the call, nothing else
1172    MachNode *m = match_tree(call);
1173    if (C->failing())  return NULL;
1174    if( m == NULL ) { Matcher::soft_match_failure(); return NULL; }
1175
1176    // Copy data from the Ideal SafePoint to the machine version
1177    mcall = m->as_MachCall();
1178
1179    mcall->set_tf(         call->tf());
1180    mcall->set_entry_point(call->entry_point());
1181    mcall->set_cnt(        call->cnt());
1182
1183    if( mcall->is_MachCallJava() ) {
1184      MachCallJavaNode *mcall_java  = mcall->as_MachCallJava();
1185      const CallJavaNode *call_java =  call->as_CallJava();
1186      method = call_java->method();
1187      mcall_java->_method = method;
1188      mcall_java->_bci = call_java->_bci;
1189      mcall_java->_optimized_virtual = call_java->is_optimized_virtual();
1190      is_method_handle_invoke = call_java->is_method_handle_invoke();
1191      mcall_java->_method_handle_invoke = is_method_handle_invoke;
1192      if (is_method_handle_invoke) {
1193        C->set_has_method_handle_invokes(true);
1194      }
1195      if( mcall_java->is_MachCallStaticJava() )
1196        mcall_java->as_MachCallStaticJava()->_name =
1197         call_java->as_CallStaticJava()->_name;
1198      if( mcall_java->is_MachCallDynamicJava() )
1199        mcall_java->as_MachCallDynamicJava()->_vtable_index =
1200         call_java->as_CallDynamicJava()->_vtable_index;
1201    }
1202    else if( mcall->is_MachCallRuntime() ) {
1203      mcall->as_MachCallRuntime()->_name = call->as_CallRuntime()->_name;
1204    }
1205    msfpt = mcall;
1206  }
1207  // This is a non-call safepoint
1208  else {
1209    call = NULL;
1210    domain = NULL;
1211    MachNode *mn = match_tree(sfpt);
1212    if (C->failing())  return NULL;
1213    msfpt = mn->as_MachSafePoint();
1214    cnt = TypeFunc::Parms;
1215  }
1216
1217  // Advertise the correct memory effects (for anti-dependence computation).
1218  msfpt->set_adr_type(sfpt->adr_type());
1219
1220  // Allocate a private array of RegMasks.  These RegMasks are not shared.
1221  msfpt->_in_rms = NEW_RESOURCE_ARRAY( RegMask, cnt );
1222  // Empty them all.
1223  memset( msfpt->_in_rms, 0, sizeof(RegMask)*cnt );
1224
1225  // Do all the pre-defined non-Empty register masks
1226  msfpt->_in_rms[TypeFunc::ReturnAdr] = _return_addr_mask;
1227  msfpt->_in_rms[TypeFunc::FramePtr ] = c_frame_ptr_mask;
1228
1229  // Place first outgoing argument can possibly be put.
1230  OptoReg::Name begin_out_arg_area = OptoReg::add(_new_SP, C->out_preserve_stack_slots());
1231  assert( is_even(begin_out_arg_area), "" );
1232  // Compute max outgoing register number per call site.
1233  OptoReg::Name out_arg_limit_per_call = begin_out_arg_area;
1234  // Calls to C may hammer extra stack slots above and beyond any arguments.
1235  // These are usually backing store for register arguments for varargs.
1236  if( call != NULL && call->is_CallRuntime() )
1237    out_arg_limit_per_call = OptoReg::add(out_arg_limit_per_call,C->varargs_C_out_slots_killed());
1238
1239
1240  // Do the normal argument list (parameters) register masks
1241  int argcnt = cnt - TypeFunc::Parms;
1242  if( argcnt > 0 ) {          // Skip it all if we have no args
1243    BasicType *sig_bt  = NEW_RESOURCE_ARRAY( BasicType, argcnt );
1244    VMRegPair *parm_regs = NEW_RESOURCE_ARRAY( VMRegPair, argcnt );
1245    int i;
1246    for( i = 0; i < argcnt; i++ ) {
1247      sig_bt[i] = domain->field_at(i+TypeFunc::Parms)->basic_type();
1248    }
1249    // V-call to pick proper calling convention
1250    call->calling_convention( sig_bt, parm_regs, argcnt );
1251
1252#ifdef ASSERT
1253    // Sanity check users' calling convention.  Really handy during
1254    // the initial porting effort.  Fairly expensive otherwise.
1255    { for (int i = 0; i<argcnt; i++) {
1256      if( !parm_regs[i].first()->is_valid() &&
1257          !parm_regs[i].second()->is_valid() ) continue;
1258      VMReg reg1 = parm_regs[i].first();
1259      VMReg reg2 = parm_regs[i].second();
1260      for (int j = 0; j < i; j++) {
1261        if( !parm_regs[j].first()->is_valid() &&
1262            !parm_regs[j].second()->is_valid() ) continue;
1263        VMReg reg3 = parm_regs[j].first();
1264        VMReg reg4 = parm_regs[j].second();
1265        if( !reg1->is_valid() ) {
1266          assert( !reg2->is_valid(), "valid halvsies" );
1267        } else if( !reg3->is_valid() ) {
1268          assert( !reg4->is_valid(), "valid halvsies" );
1269        } else {
1270          assert( reg1 != reg2, "calling conv. must produce distinct regs");
1271          assert( reg1 != reg3, "calling conv. must produce distinct regs");
1272          assert( reg1 != reg4, "calling conv. must produce distinct regs");
1273          assert( reg2 != reg3, "calling conv. must produce distinct regs");
1274          assert( reg2 != reg4 || !reg2->is_valid(), "calling conv. must produce distinct regs");
1275          assert( reg3 != reg4, "calling conv. must produce distinct regs");
1276        }
1277      }
1278    }
1279    }
1280#endif
1281
1282    // Visit each argument.  Compute its outgoing register mask.
1283    // Return results now can have 2 bits returned.
1284    // Compute max over all outgoing arguments both per call-site
1285    // and over the entire method.
1286    for( i = 0; i < argcnt; i++ ) {
1287      // Address of incoming argument mask to fill in
1288      RegMask *rm = &mcall->_in_rms[i+TypeFunc::Parms];
1289      if( !parm_regs[i].first()->is_valid() &&
1290          !parm_regs[i].second()->is_valid() ) {
1291        continue;               // Avoid Halves
1292      }
1293      // Grab first register, adjust stack slots and insert in mask.
1294      OptoReg::Name reg1 = warp_outgoing_stk_arg(parm_regs[i].first(), begin_out_arg_area, out_arg_limit_per_call );
1295      if (OptoReg::is_valid(reg1))
1296        rm->Insert( reg1 );
1297      // Grab second register (if any), adjust stack slots and insert in mask.
1298      OptoReg::Name reg2 = warp_outgoing_stk_arg(parm_regs[i].second(), begin_out_arg_area, out_arg_limit_per_call );
1299      if (OptoReg::is_valid(reg2))
1300        rm->Insert( reg2 );
1301    } // End of for all arguments
1302
1303    // Compute number of stack slots needed to restore stack in case of
1304    // Pascal-style argument popping.
1305    mcall->_argsize = out_arg_limit_per_call - begin_out_arg_area;
1306  }
1307
1308  // Compute the max stack slot killed by any call.  These will not be
1309  // available for debug info, and will be used to adjust FIRST_STACK_mask
1310  // after all call sites have been visited.
1311  if( _out_arg_limit < out_arg_limit_per_call)
1312    _out_arg_limit = out_arg_limit_per_call;
1313
1314  if (mcall) {
1315    // Kill the outgoing argument area, including any non-argument holes and
1316    // any legacy C-killed slots.  Use Fat-Projections to do the killing.
1317    // Since the max-per-method covers the max-per-call-site and debug info
1318    // is excluded on the max-per-method basis, debug info cannot land in
1319    // this killed area.
1320    uint r_cnt = mcall->tf()->range()->cnt();
1321    MachProjNode *proj = new MachProjNode( mcall, r_cnt+10000, RegMask::Empty, MachProjNode::fat_proj );
1322    if (!RegMask::can_represent_arg(OptoReg::Name(out_arg_limit_per_call-1))) {
1323      C->record_method_not_compilable_all_tiers("unsupported outgoing calling sequence");
1324    } else {
1325      for (int i = begin_out_arg_area; i < out_arg_limit_per_call; i++)
1326        proj->_rout.Insert(OptoReg::Name(i));
1327    }
1328    if (proj->_rout.is_NotEmpty()) {
1329      push_projection(proj);
1330    }
1331  }
1332  // Transfer the safepoint information from the call to the mcall
1333  // Move the JVMState list
1334  msfpt->set_jvms(sfpt->jvms());
1335  for (JVMState* jvms = msfpt->jvms(); jvms; jvms = jvms->caller()) {
1336    jvms->set_map(sfpt);
1337  }
1338
1339  // Debug inputs begin just after the last incoming parameter
1340  assert((mcall == NULL) || (mcall->jvms() == NULL) ||
1341         (mcall->jvms()->debug_start() + mcall->_jvmadj == mcall->tf()->domain()->cnt()), "");
1342
1343  // Move the OopMap
1344  msfpt->_oop_map = sfpt->_oop_map;
1345
1346  // Add additional edges.
1347  if (msfpt->mach_constant_base_node_input() != (uint)-1 && !msfpt->is_MachCallLeaf()) {
1348    // For these calls we can not add MachConstantBase in expand(), as the
1349    // ins are not complete then.
1350    msfpt->ins_req(msfpt->mach_constant_base_node_input(), C->mach_constant_base_node());
1351    if (msfpt->jvms() &&
1352        msfpt->mach_constant_base_node_input() <= msfpt->jvms()->debug_start() + msfpt->_jvmadj) {
1353      // We added an edge before jvms, so we must adapt the position of the ins.
1354      msfpt->jvms()->adapt_position(+1);
1355    }
1356  }
1357
1358  // Registers killed by the call are set in the local scheduling pass
1359  // of Global Code Motion.
1360  return msfpt;
1361}
1362
1363//---------------------------match_tree----------------------------------------
1364// Match a Ideal Node DAG - turn it into a tree; Label & Reduce.  Used as part
1365// of the whole-sale conversion from Ideal to Mach Nodes.  Also used for
1366// making GotoNodes while building the CFG and in init_spill_mask() to identify
1367// a Load's result RegMask for memoization in idealreg2regmask[]
1368MachNode *Matcher::match_tree( const Node *n ) {
1369  assert( n->Opcode() != Op_Phi, "cannot match" );
1370  assert( !n->is_block_start(), "cannot match" );
1371  // Set the mark for all locally allocated State objects.
1372  // When this call returns, the _states_arena arena will be reset
1373  // freeing all State objects.
1374  ResourceMark rm( &_states_arena );
1375
1376  LabelRootDepth = 0;
1377
1378  // StoreNodes require their Memory input to match any LoadNodes
1379  Node *mem = n->is_Store() ? n->in(MemNode::Memory) : (Node*)1 ;
1380#ifdef ASSERT
1381  Node* save_mem_node = _mem_node;
1382  _mem_node = n->is_Store() ? (Node*)n : NULL;
1383#endif
1384  // State object for root node of match tree
1385  // Allocate it on _states_arena - stack allocation can cause stack overflow.
1386  State *s = new (&_states_arena) State;
1387  s->_kids[0] = NULL;
1388  s->_kids[1] = NULL;
1389  s->_leaf = (Node*)n;
1390  // Label the input tree, allocating labels from top-level arena
1391  Label_Root( n, s, n->in(0), mem );
1392  if (C->failing())  return NULL;
1393
1394  // The minimum cost match for the whole tree is found at the root State
1395  uint mincost = max_juint;
1396  uint cost = max_juint;
1397  uint i;
1398  for( i = 0; i < NUM_OPERANDS; i++ ) {
1399    if( s->valid(i) &&                // valid entry and
1400        s->_cost[i] < cost &&         // low cost and
1401        s->_rule[i] >= NUM_OPERANDS ) // not an operand
1402      cost = s->_cost[mincost=i];
1403  }
1404  if (mincost == max_juint) {
1405#ifndef PRODUCT
1406    tty->print("No matching rule for:");
1407    s->dump();
1408#endif
1409    Matcher::soft_match_failure();
1410    return NULL;
1411  }
1412  // Reduce input tree based upon the state labels to machine Nodes
1413  MachNode *m = ReduceInst( s, s->_rule[mincost], mem );
1414#ifdef ASSERT
1415  _old2new_map.map(n->_idx, m);
1416  _new2old_map.map(m->_idx, (Node*)n);
1417#endif
1418
1419  // Add any Matcher-ignored edges
1420  uint cnt = n->req();
1421  uint start = 1;
1422  if( mem != (Node*)1 ) start = MemNode::Memory+1;
1423  if( n->is_AddP() ) {
1424    assert( mem == (Node*)1, "" );
1425    start = AddPNode::Base+1;
1426  }
1427  for( i = start; i < cnt; i++ ) {
1428    if( !n->match_edge(i) ) {
1429      if( i < m->req() )
1430        m->ins_req( i, n->in(i) );
1431      else
1432        m->add_req( n->in(i) );
1433    }
1434  }
1435
1436  debug_only( _mem_node = save_mem_node; )
1437  return m;
1438}
1439
1440
1441//------------------------------match_into_reg---------------------------------
1442// Choose to either match this Node in a register or part of the current
1443// match tree.  Return true for requiring a register and false for matching
1444// as part of the current match tree.
1445static bool match_into_reg( const Node *n, Node *m, Node *control, int i, bool shared ) {
1446
1447  const Type *t = m->bottom_type();
1448
1449  if (t->singleton()) {
1450    // Never force constants into registers.  Allow them to match as
1451    // constants or registers.  Copies of the same value will share
1452    // the same register.  See find_shared_node.
1453    return false;
1454  } else {                      // Not a constant
1455    // Stop recursion if they have different Controls.
1456    Node* m_control = m->in(0);
1457    // Control of load's memory can post-dominates load's control.
1458    // So use it since load can't float above its memory.
1459    Node* mem_control = (m->is_Load()) ? m->in(MemNode::Memory)->in(0) : NULL;
1460    if (control && m_control && control != m_control && control != mem_control) {
1461
1462      // Actually, we can live with the most conservative control we
1463      // find, if it post-dominates the others.  This allows us to
1464      // pick up load/op/store trees where the load can float a little
1465      // above the store.
1466      Node *x = control;
1467      const uint max_scan = 6;  // Arbitrary scan cutoff
1468      uint j;
1469      for (j=0; j<max_scan; j++) {
1470        if (x->is_Region())     // Bail out at merge points
1471          return true;
1472        x = x->in(0);
1473        if (x == m_control)     // Does 'control' post-dominate
1474          break;                // m->in(0)?  If so, we can use it
1475        if (x == mem_control)   // Does 'control' post-dominate
1476          break;                // mem_control?  If so, we can use it
1477      }
1478      if (j == max_scan)        // No post-domination before scan end?
1479        return true;            // Then break the match tree up
1480    }
1481    if ((m->is_DecodeN() && Matcher::narrow_oop_use_complex_address()) ||
1482        (m->is_DecodeNKlass() && Matcher::narrow_klass_use_complex_address())) {
1483      // These are commonly used in address expressions and can
1484      // efficiently fold into them on X64 in some cases.
1485      return false;
1486    }
1487  }
1488
1489  // Not forceable cloning.  If shared, put it into a register.
1490  return shared;
1491}
1492
1493
1494//------------------------------Instruction Selection--------------------------
1495// Label method walks a "tree" of nodes, using the ADLC generated DFA to match
1496// ideal nodes to machine instructions.  Trees are delimited by shared Nodes,
1497// things the Matcher does not match (e.g., Memory), and things with different
1498// Controls (hence forced into different blocks).  We pass in the Control
1499// selected for this entire State tree.
1500
1501// The Matcher works on Trees, but an Intel add-to-memory requires a DAG: the
1502// Store and the Load must have identical Memories (as well as identical
1503// pointers).  Since the Matcher does not have anything for Memory (and
1504// does not handle DAGs), I have to match the Memory input myself.  If the
1505// Tree root is a Store, I require all Loads to have the identical memory.
1506Node *Matcher::Label_Root( const Node *n, State *svec, Node *control, const Node *mem){
1507  // Since Label_Root is a recursive function, its possible that we might run
1508  // out of stack space.  See bugs 6272980 & 6227033 for more info.
1509  LabelRootDepth++;
1510  if (LabelRootDepth > MaxLabelRootDepth) {
1511    C->record_method_not_compilable_all_tiers("Out of stack space, increase MaxLabelRootDepth");
1512    return NULL;
1513  }
1514  uint care = 0;                // Edges matcher cares about
1515  uint cnt = n->req();
1516  uint i = 0;
1517
1518  // Examine children for memory state
1519  // Can only subsume a child into your match-tree if that child's memory state
1520  // is not modified along the path to another input.
1521  // It is unsafe even if the other inputs are separate roots.
1522  Node *input_mem = NULL;
1523  for( i = 1; i < cnt; i++ ) {
1524    if( !n->match_edge(i) ) continue;
1525    Node *m = n->in(i);         // Get ith input
1526    assert( m, "expect non-null children" );
1527    if( m->is_Load() ) {
1528      if( input_mem == NULL ) {
1529        input_mem = m->in(MemNode::Memory);
1530      } else if( input_mem != m->in(MemNode::Memory) ) {
1531        input_mem = NodeSentinel;
1532      }
1533    }
1534  }
1535
1536  for( i = 1; i < cnt; i++ ){// For my children
1537    if( !n->match_edge(i) ) continue;
1538    Node *m = n->in(i);         // Get ith input
1539    // Allocate states out of a private arena
1540    State *s = new (&_states_arena) State;
1541    svec->_kids[care++] = s;
1542    assert( care <= 2, "binary only for now" );
1543
1544    // Recursively label the State tree.
1545    s->_kids[0] = NULL;
1546    s->_kids[1] = NULL;
1547    s->_leaf = m;
1548
1549    // Check for leaves of the State Tree; things that cannot be a part of
1550    // the current tree.  If it finds any, that value is matched as a
1551    // register operand.  If not, then the normal matching is used.
1552    if( match_into_reg(n, m, control, i, is_shared(m)) ||
1553        //
1554        // Stop recursion if this is LoadNode and the root of this tree is a
1555        // StoreNode and the load & store have different memories.
1556        ((mem!=(Node*)1) && m->is_Load() && m->in(MemNode::Memory) != mem) ||
1557        // Can NOT include the match of a subtree when its memory state
1558        // is used by any of the other subtrees
1559        (input_mem == NodeSentinel) ) {
1560#ifndef PRODUCT
1561      // Print when we exclude matching due to different memory states at input-loads
1562      if( PrintOpto && (Verbose && WizardMode) && (input_mem == NodeSentinel)
1563        && !((mem!=(Node*)1) && m->is_Load() && m->in(MemNode::Memory) != mem) ) {
1564        tty->print_cr("invalid input_mem");
1565      }
1566#endif
1567      // Switch to a register-only opcode; this value must be in a register
1568      // and cannot be subsumed as part of a larger instruction.
1569      s->DFA( m->ideal_reg(), m );
1570
1571    } else {
1572      // If match tree has no control and we do, adopt it for entire tree
1573      if( control == NULL && m->in(0) != NULL && m->req() > 1 )
1574        control = m->in(0);         // Pick up control
1575      // Else match as a normal part of the match tree.
1576      control = Label_Root(m,s,control,mem);
1577      if (C->failing()) return NULL;
1578    }
1579  }
1580
1581
1582  // Call DFA to match this node, and return
1583  svec->DFA( n->Opcode(), n );
1584
1585#ifdef ASSERT
1586  uint x;
1587  for( x = 0; x < _LAST_MACH_OPER; x++ )
1588    if( svec->valid(x) )
1589      break;
1590
1591  if (x >= _LAST_MACH_OPER) {
1592    n->dump();
1593    svec->dump();
1594    assert( false, "bad AD file" );
1595  }
1596#endif
1597  return control;
1598}
1599
1600
1601// Con nodes reduced using the same rule can share their MachNode
1602// which reduces the number of copies of a constant in the final
1603// program.  The register allocator is free to split uses later to
1604// split live ranges.
1605MachNode* Matcher::find_shared_node(Node* leaf, uint rule) {
1606  if (!leaf->is_Con() && !leaf->is_DecodeNarrowPtr()) return NULL;
1607
1608  // See if this Con has already been reduced using this rule.
1609  if (_shared_nodes.Size() <= leaf->_idx) return NULL;
1610  MachNode* last = (MachNode*)_shared_nodes.at(leaf->_idx);
1611  if (last != NULL && rule == last->rule()) {
1612    // Don't expect control change for DecodeN
1613    if (leaf->is_DecodeNarrowPtr())
1614      return last;
1615    // Get the new space root.
1616    Node* xroot = new_node(C->root());
1617    if (xroot == NULL) {
1618      // This shouldn't happen give the order of matching.
1619      return NULL;
1620    }
1621
1622    // Shared constants need to have their control be root so they
1623    // can be scheduled properly.
1624    Node* control = last->in(0);
1625    if (control != xroot) {
1626      if (control == NULL || control == C->root()) {
1627        last->set_req(0, xroot);
1628      } else {
1629        assert(false, "unexpected control");
1630        return NULL;
1631      }
1632    }
1633    return last;
1634  }
1635  return NULL;
1636}
1637
1638
1639//------------------------------ReduceInst-------------------------------------
1640// Reduce a State tree (with given Control) into a tree of MachNodes.
1641// This routine (and it's cohort ReduceOper) convert Ideal Nodes into
1642// complicated machine Nodes.  Each MachNode covers some tree of Ideal Nodes.
1643// Each MachNode has a number of complicated MachOper operands; each
1644// MachOper also covers a further tree of Ideal Nodes.
1645
1646// The root of the Ideal match tree is always an instruction, so we enter
1647// the recursion here.  After building the MachNode, we need to recurse
1648// the tree checking for these cases:
1649// (1) Child is an instruction -
1650//     Build the instruction (recursively), add it as an edge.
1651//     Build a simple operand (register) to hold the result of the instruction.
1652// (2) Child is an interior part of an instruction -
1653//     Skip over it (do nothing)
1654// (3) Child is the start of a operand -
1655//     Build the operand, place it inside the instruction
1656//     Call ReduceOper.
1657MachNode *Matcher::ReduceInst( State *s, int rule, Node *&mem ) {
1658  assert( rule >= NUM_OPERANDS, "called with operand rule" );
1659
1660  MachNode* shared_node = find_shared_node(s->_leaf, rule);
1661  if (shared_node != NULL) {
1662    return shared_node;
1663  }
1664
1665  // Build the object to represent this state & prepare for recursive calls
1666  MachNode *mach = s->MachNodeGenerator( rule, C );
1667  mach->_opnds[0] = s->MachOperGenerator( _reduceOp[rule], C );
1668  assert( mach->_opnds[0] != NULL, "Missing result operand" );
1669  Node *leaf = s->_leaf;
1670  // Check for instruction or instruction chain rule
1671  if( rule >= _END_INST_CHAIN_RULE || rule < _BEGIN_INST_CHAIN_RULE ) {
1672    assert(C->node_arena()->contains(s->_leaf) || !has_new_node(s->_leaf),
1673           "duplicating node that's already been matched");
1674    // Instruction
1675    mach->add_req( leaf->in(0) ); // Set initial control
1676    // Reduce interior of complex instruction
1677    ReduceInst_Interior( s, rule, mem, mach, 1 );
1678  } else {
1679    // Instruction chain rules are data-dependent on their inputs
1680    mach->add_req(0);             // Set initial control to none
1681    ReduceInst_Chain_Rule( s, rule, mem, mach );
1682  }
1683
1684  // If a Memory was used, insert a Memory edge
1685  if( mem != (Node*)1 ) {
1686    mach->ins_req(MemNode::Memory,mem);
1687#ifdef ASSERT
1688    // Verify adr type after matching memory operation
1689    const MachOper* oper = mach->memory_operand();
1690    if (oper != NULL && oper != (MachOper*)-1) {
1691      // It has a unique memory operand.  Find corresponding ideal mem node.
1692      Node* m = NULL;
1693      if (leaf->is_Mem()) {
1694        m = leaf;
1695      } else {
1696        m = _mem_node;
1697        assert(m != NULL && m->is_Mem(), "expecting memory node");
1698      }
1699      const Type* mach_at = mach->adr_type();
1700      // DecodeN node consumed by an address may have different type
1701      // then its input. Don't compare types for such case.
1702      if (m->adr_type() != mach_at &&
1703          (m->in(MemNode::Address)->is_DecodeNarrowPtr() ||
1704           m->in(MemNode::Address)->is_AddP() &&
1705           m->in(MemNode::Address)->in(AddPNode::Address)->is_DecodeNarrowPtr() ||
1706           m->in(MemNode::Address)->is_AddP() &&
1707           m->in(MemNode::Address)->in(AddPNode::Address)->is_AddP() &&
1708           m->in(MemNode::Address)->in(AddPNode::Address)->in(AddPNode::Address)->is_DecodeNarrowPtr())) {
1709        mach_at = m->adr_type();
1710      }
1711      if (m->adr_type() != mach_at) {
1712        m->dump();
1713        tty->print_cr("mach:");
1714        mach->dump(1);
1715      }
1716      assert(m->adr_type() == mach_at, "matcher should not change adr type");
1717    }
1718#endif
1719  }
1720
1721  // If the _leaf is an AddP, insert the base edge
1722  if (leaf->is_AddP()) {
1723    mach->ins_req(AddPNode::Base,leaf->in(AddPNode::Base));
1724  }
1725
1726  uint number_of_projections_prior = number_of_projections();
1727
1728  // Perform any 1-to-many expansions required
1729  MachNode *ex = mach->Expand(s, _projection_list, mem);
1730  if (ex != mach) {
1731    assert(ex->ideal_reg() == mach->ideal_reg(), "ideal types should match");
1732    if( ex->in(1)->is_Con() )
1733      ex->in(1)->set_req(0, C->root());
1734    // Remove old node from the graph
1735    for( uint i=0; i<mach->req(); i++ ) {
1736      mach->set_req(i,NULL);
1737    }
1738#ifdef ASSERT
1739    _new2old_map.map(ex->_idx, s->_leaf);
1740#endif
1741  }
1742
1743  // PhaseChaitin::fixup_spills will sometimes generate spill code
1744  // via the matcher.  By the time, nodes have been wired into the CFG,
1745  // and any further nodes generated by expand rules will be left hanging
1746  // in space, and will not get emitted as output code.  Catch this.
1747  // Also, catch any new register allocation constraints ("projections")
1748  // generated belatedly during spill code generation.
1749  if (_allocation_started) {
1750    guarantee(ex == mach, "no expand rules during spill generation");
1751    guarantee(number_of_projections_prior == number_of_projections(), "no allocation during spill generation");
1752  }
1753
1754  if (leaf->is_Con() || leaf->is_DecodeNarrowPtr()) {
1755    // Record the con for sharing
1756    _shared_nodes.map(leaf->_idx, ex);
1757  }
1758
1759  return ex;
1760}
1761
1762void Matcher::ReduceInst_Chain_Rule( State *s, int rule, Node *&mem, MachNode *mach ) {
1763  // 'op' is what I am expecting to receive
1764  int op = _leftOp[rule];
1765  // Operand type to catch childs result
1766  // This is what my child will give me.
1767  int opnd_class_instance = s->_rule[op];
1768  // Choose between operand class or not.
1769  // This is what I will receive.
1770  int catch_op = (FIRST_OPERAND_CLASS <= op && op < NUM_OPERANDS) ? opnd_class_instance : op;
1771  // New rule for child.  Chase operand classes to get the actual rule.
1772  int newrule = s->_rule[catch_op];
1773
1774  if( newrule < NUM_OPERANDS ) {
1775    // Chain from operand or operand class, may be output of shared node
1776    assert( 0 <= opnd_class_instance && opnd_class_instance < NUM_OPERANDS,
1777            "Bad AD file: Instruction chain rule must chain from operand");
1778    // Insert operand into array of operands for this instruction
1779    mach->_opnds[1] = s->MachOperGenerator( opnd_class_instance, C );
1780
1781    ReduceOper( s, newrule, mem, mach );
1782  } else {
1783    // Chain from the result of an instruction
1784    assert( newrule >= _LAST_MACH_OPER, "Do NOT chain from internal operand");
1785    mach->_opnds[1] = s->MachOperGenerator( _reduceOp[catch_op], C );
1786    Node *mem1 = (Node*)1;
1787    debug_only(Node *save_mem_node = _mem_node;)
1788    mach->add_req( ReduceInst(s, newrule, mem1) );
1789    debug_only(_mem_node = save_mem_node;)
1790  }
1791  return;
1792}
1793
1794
1795uint Matcher::ReduceInst_Interior( State *s, int rule, Node *&mem, MachNode *mach, uint num_opnds ) {
1796  if( s->_leaf->is_Load() ) {
1797    Node *mem2 = s->_leaf->in(MemNode::Memory);
1798    assert( mem == (Node*)1 || mem == mem2, "multiple Memories being matched at once?" );
1799    debug_only( if( mem == (Node*)1 ) _mem_node = s->_leaf;)
1800    mem = mem2;
1801  }
1802  if( s->_leaf->in(0) != NULL && s->_leaf->req() > 1) {
1803    if( mach->in(0) == NULL )
1804      mach->set_req(0, s->_leaf->in(0));
1805  }
1806
1807  // Now recursively walk the state tree & add operand list.
1808  for( uint i=0; i<2; i++ ) {   // binary tree
1809    State *newstate = s->_kids[i];
1810    if( newstate == NULL ) break;      // Might only have 1 child
1811    // 'op' is what I am expecting to receive
1812    int op;
1813    if( i == 0 ) {
1814      op = _leftOp[rule];
1815    } else {
1816      op = _rightOp[rule];
1817    }
1818    // Operand type to catch childs result
1819    // This is what my child will give me.
1820    int opnd_class_instance = newstate->_rule[op];
1821    // Choose between operand class or not.
1822    // This is what I will receive.
1823    int catch_op = (op >= FIRST_OPERAND_CLASS && op < NUM_OPERANDS) ? opnd_class_instance : op;
1824    // New rule for child.  Chase operand classes to get the actual rule.
1825    int newrule = newstate->_rule[catch_op];
1826
1827    if( newrule < NUM_OPERANDS ) { // Operand/operandClass or internalOp/instruction?
1828      // Operand/operandClass
1829      // Insert operand into array of operands for this instruction
1830      mach->_opnds[num_opnds++] = newstate->MachOperGenerator( opnd_class_instance, C );
1831      ReduceOper( newstate, newrule, mem, mach );
1832
1833    } else {                    // Child is internal operand or new instruction
1834      if( newrule < _LAST_MACH_OPER ) { // internal operand or instruction?
1835        // internal operand --> call ReduceInst_Interior
1836        // Interior of complex instruction.  Do nothing but recurse.
1837        num_opnds = ReduceInst_Interior( newstate, newrule, mem, mach, num_opnds );
1838      } else {
1839        // instruction --> call build operand(  ) to catch result
1840        //             --> ReduceInst( newrule )
1841        mach->_opnds[num_opnds++] = s->MachOperGenerator( _reduceOp[catch_op], C );
1842        Node *mem1 = (Node*)1;
1843        debug_only(Node *save_mem_node = _mem_node;)
1844        mach->add_req( ReduceInst( newstate, newrule, mem1 ) );
1845        debug_only(_mem_node = save_mem_node;)
1846      }
1847    }
1848    assert( mach->_opnds[num_opnds-1], "" );
1849  }
1850  return num_opnds;
1851}
1852
1853// This routine walks the interior of possible complex operands.
1854// At each point we check our children in the match tree:
1855// (1) No children -
1856//     We are a leaf; add _leaf field as an input to the MachNode
1857// (2) Child is an internal operand -
1858//     Skip over it ( do nothing )
1859// (3) Child is an instruction -
1860//     Call ReduceInst recursively and
1861//     and instruction as an input to the MachNode
1862void Matcher::ReduceOper( State *s, int rule, Node *&mem, MachNode *mach ) {
1863  assert( rule < _LAST_MACH_OPER, "called with operand rule" );
1864  State *kid = s->_kids[0];
1865  assert( kid == NULL || s->_leaf->in(0) == NULL, "internal operands have no control" );
1866
1867  // Leaf?  And not subsumed?
1868  if( kid == NULL && !_swallowed[rule] ) {
1869    mach->add_req( s->_leaf );  // Add leaf pointer
1870    return;                     // Bail out
1871  }
1872
1873  if( s->_leaf->is_Load() ) {
1874    assert( mem == (Node*)1, "multiple Memories being matched at once?" );
1875    mem = s->_leaf->in(MemNode::Memory);
1876    debug_only(_mem_node = s->_leaf;)
1877  }
1878  if( s->_leaf->in(0) && s->_leaf->req() > 1) {
1879    if( !mach->in(0) )
1880      mach->set_req(0,s->_leaf->in(0));
1881    else {
1882      assert( s->_leaf->in(0) == mach->in(0), "same instruction, differing controls?" );
1883    }
1884  }
1885
1886  for( uint i=0; kid != NULL && i<2; kid = s->_kids[1], i++ ) {   // binary tree
1887    int newrule;
1888    if( i == 0)
1889      newrule = kid->_rule[_leftOp[rule]];
1890    else
1891      newrule = kid->_rule[_rightOp[rule]];
1892
1893    if( newrule < _LAST_MACH_OPER ) { // Operand or instruction?
1894      // Internal operand; recurse but do nothing else
1895      ReduceOper( kid, newrule, mem, mach );
1896
1897    } else {                    // Child is a new instruction
1898      // Reduce the instruction, and add a direct pointer from this
1899      // machine instruction to the newly reduced one.
1900      Node *mem1 = (Node*)1;
1901      debug_only(Node *save_mem_node = _mem_node;)
1902      mach->add_req( ReduceInst( kid, newrule, mem1 ) );
1903      debug_only(_mem_node = save_mem_node;)
1904    }
1905  }
1906}
1907
1908
1909// -------------------------------------------------------------------------
1910// Java-Java calling convention
1911// (what you use when Java calls Java)
1912
1913//------------------------------find_receiver----------------------------------
1914// For a given signature, return the OptoReg for parameter 0.
1915OptoReg::Name Matcher::find_receiver( bool is_outgoing ) {
1916  VMRegPair regs;
1917  BasicType sig_bt = T_OBJECT;
1918  calling_convention(&sig_bt, &regs, 1, is_outgoing);
1919  // Return argument 0 register.  In the LP64 build pointers
1920  // take 2 registers, but the VM wants only the 'main' name.
1921  return OptoReg::as_OptoReg(regs.first());
1922}
1923
1924// This function identifies sub-graphs in which a 'load' node is
1925// input to two different nodes, and such that it can be matched
1926// with BMI instructions like blsi, blsr, etc.
1927// Example : for b = -a[i] & a[i] can be matched to blsi r32, m32.
1928// The graph is (AndL (SubL Con0 LoadL*) LoadL*), where LoadL*
1929// refers to the same node.
1930#ifdef X86
1931// Match the generic fused operations pattern (op1 (op2 Con{ConType} mop) mop)
1932// This is a temporary solution until we make DAGs expressible in ADL.
1933template<typename ConType>
1934class FusedPatternMatcher {
1935  Node* _op1_node;
1936  Node* _mop_node;
1937  int _con_op;
1938
1939  static int match_next(Node* n, int next_op, int next_op_idx) {
1940    if (n->in(1) == NULL || n->in(2) == NULL) {
1941      return -1;
1942    }
1943
1944    if (next_op_idx == -1) { // n is commutative, try rotations
1945      if (n->in(1)->Opcode() == next_op) {
1946        return 1;
1947      } else if (n->in(2)->Opcode() == next_op) {
1948        return 2;
1949      }
1950    } else {
1951      assert(next_op_idx > 0 && next_op_idx <= 2, "Bad argument index");
1952      if (n->in(next_op_idx)->Opcode() == next_op) {
1953        return next_op_idx;
1954      }
1955    }
1956    return -1;
1957  }
1958public:
1959  FusedPatternMatcher(Node* op1_node, Node *mop_node, int con_op) :
1960    _op1_node(op1_node), _mop_node(mop_node), _con_op(con_op) { }
1961
1962  bool match(int op1, int op1_op2_idx,  // op1 and the index of the op1->op2 edge, -1 if op1 is commutative
1963             int op2, int op2_con_idx,  // op2 and the index of the op2->con edge, -1 if op2 is commutative
1964             typename ConType::NativeType con_value) {
1965    if (_op1_node->Opcode() != op1) {
1966      return false;
1967    }
1968    if (_mop_node->outcnt() > 2) {
1969      return false;
1970    }
1971    op1_op2_idx = match_next(_op1_node, op2, op1_op2_idx);
1972    if (op1_op2_idx == -1) {
1973      return false;
1974    }
1975    // Memory operation must be the other edge
1976    int op1_mop_idx = (op1_op2_idx & 1) + 1;
1977
1978    // Check that the mop node is really what we want
1979    if (_op1_node->in(op1_mop_idx) == _mop_node) {
1980      Node *op2_node = _op1_node->in(op1_op2_idx);
1981      if (op2_node->outcnt() > 1) {
1982        return false;
1983      }
1984      assert(op2_node->Opcode() == op2, "Should be");
1985      op2_con_idx = match_next(op2_node, _con_op, op2_con_idx);
1986      if (op2_con_idx == -1) {
1987        return false;
1988      }
1989      // Memory operation must be the other edge
1990      int op2_mop_idx = (op2_con_idx & 1) + 1;
1991      // Check that the memory operation is the same node
1992      if (op2_node->in(op2_mop_idx) == _mop_node) {
1993        // Now check the constant
1994        const Type* con_type = op2_node->in(op2_con_idx)->bottom_type();
1995        if (con_type != Type::TOP && ConType::as_self(con_type)->get_con() == con_value) {
1996          return true;
1997        }
1998      }
1999    }
2000    return false;
2001  }
2002};
2003
2004
2005bool Matcher::is_bmi_pattern(Node *n, Node *m) {
2006  if (n != NULL && m != NULL) {
2007    if (m->Opcode() == Op_LoadI) {
2008      FusedPatternMatcher<TypeInt> bmii(n, m, Op_ConI);
2009      return bmii.match(Op_AndI, -1, Op_SubI,  1,  0)  ||
2010             bmii.match(Op_AndI, -1, Op_AddI, -1, -1)  ||
2011             bmii.match(Op_XorI, -1, Op_AddI, -1, -1);
2012    } else if (m->Opcode() == Op_LoadL) {
2013      FusedPatternMatcher<TypeLong> bmil(n, m, Op_ConL);
2014      return bmil.match(Op_AndL, -1, Op_SubL,  1,  0) ||
2015             bmil.match(Op_AndL, -1, Op_AddL, -1, -1) ||
2016             bmil.match(Op_XorL, -1, Op_AddL, -1, -1);
2017    }
2018  }
2019  return false;
2020}
2021#endif // X86
2022
2023// A method-klass-holder may be passed in the inline_cache_reg
2024// and then expanded into the inline_cache_reg and a method_oop register
2025//   defined in ad_<arch>.cpp
2026
2027
2028//------------------------------find_shared------------------------------------
2029// Set bits if Node is shared or otherwise a root
2030void Matcher::find_shared( Node *n ) {
2031  // Allocate stack of size C->unique() * 2 to avoid frequent realloc
2032  MStack mstack(C->unique() * 2);
2033  // Mark nodes as address_visited if they are inputs to an address expression
2034  VectorSet address_visited(Thread::current()->resource_area());
2035  mstack.push(n, Visit);     // Don't need to pre-visit root node
2036  while (mstack.is_nonempty()) {
2037    n = mstack.node();       // Leave node on stack
2038    Node_State nstate = mstack.state();
2039    uint nop = n->Opcode();
2040    if (nstate == Pre_Visit) {
2041      if (address_visited.test(n->_idx)) { // Visited in address already?
2042        // Flag as visited and shared now.
2043        set_visited(n);
2044      }
2045      if (is_visited(n)) {   // Visited already?
2046        // Node is shared and has no reason to clone.  Flag it as shared.
2047        // This causes it to match into a register for the sharing.
2048        set_shared(n);       // Flag as shared and
2049        mstack.pop();        // remove node from stack
2050        continue;
2051      }
2052      nstate = Visit; // Not already visited; so visit now
2053    }
2054    if (nstate == Visit) {
2055      mstack.set_state(Post_Visit);
2056      set_visited(n);   // Flag as visited now
2057      bool mem_op = false;
2058
2059      switch( nop ) {  // Handle some opcodes special
2060      case Op_Phi:             // Treat Phis as shared roots
2061      case Op_Parm:
2062      case Op_Proj:            // All handled specially during matching
2063      case Op_SafePointScalarObject:
2064        set_shared(n);
2065        set_dontcare(n);
2066        break;
2067      case Op_If:
2068      case Op_CountedLoopEnd:
2069        mstack.set_state(Alt_Post_Visit); // Alternative way
2070        // Convert (If (Bool (CmpX A B))) into (If (Bool) (CmpX A B)).  Helps
2071        // with matching cmp/branch in 1 instruction.  The Matcher needs the
2072        // Bool and CmpX side-by-side, because it can only get at constants
2073        // that are at the leaves of Match trees, and the Bool's condition acts
2074        // as a constant here.
2075        mstack.push(n->in(1), Visit);         // Clone the Bool
2076        mstack.push(n->in(0), Pre_Visit);     // Visit control input
2077        continue; // while (mstack.is_nonempty())
2078      case Op_ConvI2D:         // These forms efficiently match with a prior
2079      case Op_ConvI2F:         //   Load but not a following Store
2080        if( n->in(1)->is_Load() &&        // Prior load
2081            n->outcnt() == 1 &&           // Not already shared
2082            n->unique_out()->is_Store() ) // Following store
2083          set_shared(n);       // Force it to be a root
2084        break;
2085      case Op_ReverseBytesI:
2086      case Op_ReverseBytesL:
2087        if( n->in(1)->is_Load() &&        // Prior load
2088            n->outcnt() == 1 )            // Not already shared
2089          set_shared(n);                  // Force it to be a root
2090        break;
2091      case Op_BoxLock:         // Cant match until we get stack-regs in ADLC
2092      case Op_IfFalse:
2093      case Op_IfTrue:
2094      case Op_MachProj:
2095      case Op_MergeMem:
2096      case Op_Catch:
2097      case Op_CatchProj:
2098      case Op_CProj:
2099      case Op_JumpProj:
2100      case Op_JProj:
2101      case Op_NeverBranch:
2102        set_dontcare(n);
2103        break;
2104      case Op_Jump:
2105        mstack.push(n->in(1), Pre_Visit);     // Switch Value (could be shared)
2106        mstack.push(n->in(0), Pre_Visit);     // Visit Control input
2107        continue;                             // while (mstack.is_nonempty())
2108      case Op_StrComp:
2109      case Op_StrEquals:
2110      case Op_StrIndexOf:
2111      case Op_AryEq:
2112      case Op_EncodeISOArray:
2113        set_shared(n); // Force result into register (it will be anyways)
2114        break;
2115      case Op_ConP: {  // Convert pointers above the centerline to NUL
2116        TypeNode *tn = n->as_Type(); // Constants derive from type nodes
2117        const TypePtr* tp = tn->type()->is_ptr();
2118        if (tp->_ptr == TypePtr::AnyNull) {
2119          tn->set_type(TypePtr::NULL_PTR);
2120        }
2121        break;
2122      }
2123      case Op_ConN: {  // Convert narrow pointers above the centerline to NUL
2124        TypeNode *tn = n->as_Type(); // Constants derive from type nodes
2125        const TypePtr* tp = tn->type()->make_ptr();
2126        if (tp && tp->_ptr == TypePtr::AnyNull) {
2127          tn->set_type(TypeNarrowOop::NULL_PTR);
2128        }
2129        break;
2130      }
2131      case Op_Binary:         // These are introduced in the Post_Visit state.
2132        ShouldNotReachHere();
2133        break;
2134      case Op_ClearArray:
2135      case Op_SafePoint:
2136        mem_op = true;
2137        break;
2138      default:
2139        if( n->is_Store() ) {
2140          // Do match stores, despite no ideal reg
2141          mem_op = true;
2142          break;
2143        }
2144        if( n->is_Mem() ) { // Loads and LoadStores
2145          mem_op = true;
2146          // Loads must be root of match tree due to prior load conflict
2147          if( C->subsume_loads() == false )
2148            set_shared(n);
2149        }
2150        // Fall into default case
2151        if( !n->ideal_reg() )
2152          set_dontcare(n);  // Unmatchable Nodes
2153      } // end_switch
2154
2155      for(int i = n->req() - 1; i >= 0; --i) { // For my children
2156        Node *m = n->in(i); // Get ith input
2157        if (m == NULL) continue;  // Ignore NULLs
2158        uint mop = m->Opcode();
2159
2160        // Must clone all producers of flags, or we will not match correctly.
2161        // Suppose a compare setting int-flags is shared (e.g., a switch-tree)
2162        // then it will match into an ideal Op_RegFlags.  Alas, the fp-flags
2163        // are also there, so we may match a float-branch to int-flags and
2164        // expect the allocator to haul the flags from the int-side to the
2165        // fp-side.  No can do.
2166        if( _must_clone[mop] ) {
2167          mstack.push(m, Visit);
2168          continue; // for(int i = ...)
2169        }
2170
2171        if( mop == Op_AddP && m->in(AddPNode::Base)->is_DecodeNarrowPtr()) {
2172          // Bases used in addresses must be shared but since
2173          // they are shared through a DecodeN they may appear
2174          // to have a single use so force sharing here.
2175          set_shared(m->in(AddPNode::Base)->in(1));
2176        }
2177
2178        // if 'n' and 'm' are part of a graph for BMI instruction, clone this node.
2179#ifdef X86
2180        if (UseBMI1Instructions && is_bmi_pattern(n, m)) {
2181          mstack.push(m, Visit);
2182          continue;
2183        }
2184#endif
2185
2186        // Clone addressing expressions as they are "free" in memory access instructions
2187        if( mem_op && i == MemNode::Address && mop == Op_AddP ) {
2188          // Some inputs for address expression are not put on stack
2189          // to avoid marking them as shared and forcing them into register
2190          // if they are used only in address expressions.
2191          // But they should be marked as shared if there are other uses
2192          // besides address expressions.
2193
2194          Node *off = m->in(AddPNode::Offset);
2195          if( off->is_Con() &&
2196              // When there are other uses besides address expressions
2197              // put it on stack and mark as shared.
2198              !is_visited(m) ) {
2199            address_visited.test_set(m->_idx); // Flag as address_visited
2200            Node *adr = m->in(AddPNode::Address);
2201
2202            // Intel, ARM and friends can handle 2 adds in addressing mode
2203            if( clone_shift_expressions && adr->is_AddP() &&
2204                // AtomicAdd is not an addressing expression.
2205                // Cheap to find it by looking for screwy base.
2206                !adr->in(AddPNode::Base)->is_top() &&
2207                // Are there other uses besides address expressions?
2208                !is_visited(adr) ) {
2209              address_visited.set(adr->_idx); // Flag as address_visited
2210              Node *shift = adr->in(AddPNode::Offset);
2211              // Check for shift by small constant as well
2212              if( shift->Opcode() == Op_LShiftX && shift->in(2)->is_Con() &&
2213                  shift->in(2)->get_int() <= 3 &&
2214                  // Are there other uses besides address expressions?
2215                  !is_visited(shift) ) {
2216                address_visited.set(shift->_idx); // Flag as address_visited
2217                mstack.push(shift->in(2), Visit);
2218                Node *conv = shift->in(1);
2219#ifdef _LP64
2220                // Allow Matcher to match the rule which bypass
2221                // ConvI2L operation for an array index on LP64
2222                // if the index value is positive.
2223                if( conv->Opcode() == Op_ConvI2L &&
2224                    conv->as_Type()->type()->is_long()->_lo >= 0 &&
2225                    // Are there other uses besides address expressions?
2226                    !is_visited(conv) ) {
2227                  address_visited.set(conv->_idx); // Flag as address_visited
2228                  mstack.push(conv->in(1), Pre_Visit);
2229                } else
2230#endif
2231                mstack.push(conv, Pre_Visit);
2232              } else {
2233                mstack.push(shift, Pre_Visit);
2234              }
2235              mstack.push(adr->in(AddPNode::Address), Pre_Visit);
2236              mstack.push(adr->in(AddPNode::Base), Pre_Visit);
2237            } else {  // Sparc, Alpha, PPC and friends
2238              mstack.push(adr, Pre_Visit);
2239            }
2240
2241            // Clone X+offset as it also folds into most addressing expressions
2242            mstack.push(off, Visit);
2243            mstack.push(m->in(AddPNode::Base), Pre_Visit);
2244            continue; // for(int i = ...)
2245          } // if( off->is_Con() )
2246        }   // if( mem_op &&
2247        mstack.push(m, Pre_Visit);
2248      }     // for(int i = ...)
2249    }
2250    else if (nstate == Alt_Post_Visit) {
2251      mstack.pop(); // Remove node from stack
2252      // We cannot remove the Cmp input from the Bool here, as the Bool may be
2253      // shared and all users of the Bool need to move the Cmp in parallel.
2254      // This leaves both the Bool and the If pointing at the Cmp.  To
2255      // prevent the Matcher from trying to Match the Cmp along both paths
2256      // BoolNode::match_edge always returns a zero.
2257
2258      // We reorder the Op_If in a pre-order manner, so we can visit without
2259      // accidentally sharing the Cmp (the Bool and the If make 2 users).
2260      n->add_req( n->in(1)->in(1) ); // Add the Cmp next to the Bool
2261    }
2262    else if (nstate == Post_Visit) {
2263      mstack.pop(); // Remove node from stack
2264
2265      // Now hack a few special opcodes
2266      switch( n->Opcode() ) {       // Handle some opcodes special
2267      case Op_StorePConditional:
2268      case Op_StoreIConditional:
2269      case Op_StoreLConditional:
2270      case Op_CompareAndSwapI:
2271      case Op_CompareAndSwapL:
2272      case Op_CompareAndSwapP:
2273      case Op_CompareAndSwapN: {   // Convert trinary to binary-tree
2274        Node *newval = n->in(MemNode::ValueIn );
2275        Node *oldval  = n->in(LoadStoreConditionalNode::ExpectedIn);
2276        Node *pair = new BinaryNode( oldval, newval );
2277        n->set_req(MemNode::ValueIn,pair);
2278        n->del_req(LoadStoreConditionalNode::ExpectedIn);
2279        break;
2280      }
2281      case Op_CMoveD:              // Convert trinary to binary-tree
2282      case Op_CMoveF:
2283      case Op_CMoveI:
2284      case Op_CMoveL:
2285      case Op_CMoveN:
2286      case Op_CMoveP: {
2287        // Restructure into a binary tree for Matching.  It's possible that
2288        // we could move this code up next to the graph reshaping for IfNodes
2289        // or vice-versa, but I do not want to debug this for Ladybird.
2290        // 10/2/2000 CNC.
2291        Node *pair1 = new BinaryNode(n->in(1),n->in(1)->in(1));
2292        n->set_req(1,pair1);
2293        Node *pair2 = new BinaryNode(n->in(2),n->in(3));
2294        n->set_req(2,pair2);
2295        n->del_req(3);
2296        break;
2297      }
2298      case Op_LoopLimit: {
2299        Node *pair1 = new BinaryNode(n->in(1),n->in(2));
2300        n->set_req(1,pair1);
2301        n->set_req(2,n->in(3));
2302        n->del_req(3);
2303        break;
2304      }
2305      case Op_StrEquals: {
2306        Node *pair1 = new BinaryNode(n->in(2),n->in(3));
2307        n->set_req(2,pair1);
2308        n->set_req(3,n->in(4));
2309        n->del_req(4);
2310        break;
2311      }
2312      case Op_StrComp:
2313      case Op_StrIndexOf: {
2314        Node *pair1 = new BinaryNode(n->in(2),n->in(3));
2315        n->set_req(2,pair1);
2316        Node *pair2 = new BinaryNode(n->in(4),n->in(5));
2317        n->set_req(3,pair2);
2318        n->del_req(5);
2319        n->del_req(4);
2320        break;
2321      }
2322      case Op_EncodeISOArray: {
2323        // Restructure into a binary tree for Matching.
2324        Node* pair = new BinaryNode(n->in(3), n->in(4));
2325        n->set_req(3, pair);
2326        n->del_req(4);
2327        break;
2328      }
2329      default:
2330        break;
2331      }
2332    }
2333    else {
2334      ShouldNotReachHere();
2335    }
2336  } // end of while (mstack.is_nonempty())
2337}
2338
2339#ifdef ASSERT
2340// machine-independent root to machine-dependent root
2341void Matcher::dump_old2new_map() {
2342  _old2new_map.dump();
2343}
2344#endif
2345
2346//---------------------------collect_null_checks-------------------------------
2347// Find null checks in the ideal graph; write a machine-specific node for
2348// it.  Used by later implicit-null-check handling.  Actually collects
2349// either an IfTrue or IfFalse for the common NOT-null path, AND the ideal
2350// value being tested.
2351void Matcher::collect_null_checks( Node *proj, Node *orig_proj ) {
2352  Node *iff = proj->in(0);
2353  if( iff->Opcode() == Op_If ) {
2354    // During matching If's have Bool & Cmp side-by-side
2355    BoolNode *b = iff->in(1)->as_Bool();
2356    Node *cmp = iff->in(2);
2357    int opc = cmp->Opcode();
2358    if (opc != Op_CmpP && opc != Op_CmpN) return;
2359
2360    const Type* ct = cmp->in(2)->bottom_type();
2361    if (ct == TypePtr::NULL_PTR ||
2362        (opc == Op_CmpN && ct == TypeNarrowOop::NULL_PTR)) {
2363
2364      bool push_it = false;
2365      if( proj->Opcode() == Op_IfTrue ) {
2366        extern int all_null_checks_found;
2367        all_null_checks_found++;
2368        if( b->_test._test == BoolTest::ne ) {
2369          push_it = true;
2370        }
2371      } else {
2372        assert( proj->Opcode() == Op_IfFalse, "" );
2373        if( b->_test._test == BoolTest::eq ) {
2374          push_it = true;
2375        }
2376      }
2377      if( push_it ) {
2378        _null_check_tests.push(proj);
2379        Node* val = cmp->in(1);
2380#ifdef _LP64
2381        if (val->bottom_type()->isa_narrowoop() &&
2382            !Matcher::narrow_oop_use_complex_address()) {
2383          //
2384          // Look for DecodeN node which should be pinned to orig_proj.
2385          // On platforms (Sparc) which can not handle 2 adds
2386          // in addressing mode we have to keep a DecodeN node and
2387          // use it to do implicit NULL check in address.
2388          //
2389          // DecodeN node was pinned to non-null path (orig_proj) during
2390          // CastPP transformation in final_graph_reshaping_impl().
2391          //
2392          uint cnt = orig_proj->outcnt();
2393          for (uint i = 0; i < orig_proj->outcnt(); i++) {
2394            Node* d = orig_proj->raw_out(i);
2395            if (d->is_DecodeN() && d->in(1) == val) {
2396              val = d;
2397              val->set_req(0, NULL); // Unpin now.
2398              // Mark this as special case to distinguish from
2399              // a regular case: CmpP(DecodeN, NULL).
2400              val = (Node*)(((intptr_t)val) | 1);
2401              break;
2402            }
2403          }
2404        }
2405#endif
2406        _null_check_tests.push(val);
2407      }
2408    }
2409  }
2410}
2411
2412//---------------------------validate_null_checks------------------------------
2413// Its possible that the value being NULL checked is not the root of a match
2414// tree.  If so, I cannot use the value in an implicit null check.
2415void Matcher::validate_null_checks( ) {
2416  uint cnt = _null_check_tests.size();
2417  for( uint i=0; i < cnt; i+=2 ) {
2418    Node *test = _null_check_tests[i];
2419    Node *val = _null_check_tests[i+1];
2420    bool is_decoden = ((intptr_t)val) & 1;
2421    val = (Node*)(((intptr_t)val) & ~1);
2422    if (has_new_node(val)) {
2423      Node* new_val = new_node(val);
2424      if (is_decoden) {
2425        assert(val->is_DecodeNarrowPtr() && val->in(0) == NULL, "sanity");
2426        // Note: new_val may have a control edge if
2427        // the original ideal node DecodeN was matched before
2428        // it was unpinned in Matcher::collect_null_checks().
2429        // Unpin the mach node and mark it.
2430        new_val->set_req(0, NULL);
2431        new_val = (Node*)(((intptr_t)new_val) | 1);
2432      }
2433      // Is a match-tree root, so replace with the matched value
2434      _null_check_tests.map(i+1, new_val);
2435    } else {
2436      // Yank from candidate list
2437      _null_check_tests.map(i+1,_null_check_tests[--cnt]);
2438      _null_check_tests.map(i,_null_check_tests[--cnt]);
2439      _null_check_tests.pop();
2440      _null_check_tests.pop();
2441      i-=2;
2442    }
2443  }
2444}
2445
2446// Used by the DFA in dfa_xxx.cpp.  Check for a following barrier or
2447// atomic instruction acting as a store_load barrier without any
2448// intervening volatile load, and thus we don't need a barrier here.
2449// We retain the Node to act as a compiler ordering barrier.
2450bool Matcher::post_store_load_barrier(const Node* vmb) {
2451  Compile* C = Compile::current();
2452  assert(vmb->is_MemBar(), "");
2453  assert(vmb->Opcode() != Op_MemBarAcquire && vmb->Opcode() != Op_LoadFence, "");
2454  const MemBarNode* membar = vmb->as_MemBar();
2455
2456  // Get the Ideal Proj node, ctrl, that can be used to iterate forward
2457  Node* ctrl = NULL;
2458  for (DUIterator_Fast imax, i = membar->fast_outs(imax); i < imax; i++) {
2459    Node* p = membar->fast_out(i);
2460    assert(p->is_Proj(), "only projections here");
2461    if ((p->as_Proj()->_con == TypeFunc::Control) &&
2462        !C->node_arena()->contains(p)) { // Unmatched old-space only
2463      ctrl = p;
2464      break;
2465    }
2466  }
2467  assert((ctrl != NULL), "missing control projection");
2468
2469  for (DUIterator_Fast jmax, j = ctrl->fast_outs(jmax); j < jmax; j++) {
2470    Node *x = ctrl->fast_out(j);
2471    int xop = x->Opcode();
2472
2473    // We don't need current barrier if we see another or a lock
2474    // before seeing volatile load.
2475    //
2476    // Op_Fastunlock previously appeared in the Op_* list below.
2477    // With the advent of 1-0 lock operations we're no longer guaranteed
2478    // that a monitor exit operation contains a serializing instruction.
2479
2480    if (xop == Op_MemBarVolatile ||
2481        xop == Op_CompareAndSwapL ||
2482        xop == Op_CompareAndSwapP ||
2483        xop == Op_CompareAndSwapN ||
2484        xop == Op_CompareAndSwapI) {
2485      return true;
2486    }
2487
2488    // Op_FastLock previously appeared in the Op_* list above.
2489    // With biased locking we're no longer guaranteed that a monitor
2490    // enter operation contains a serializing instruction.
2491    if ((xop == Op_FastLock) && !UseBiasedLocking) {
2492      return true;
2493    }
2494
2495    if (x->is_MemBar()) {
2496      // We must retain this membar if there is an upcoming volatile
2497      // load, which will be followed by acquire membar.
2498      if (xop == Op_MemBarAcquire || xop == Op_LoadFence) {
2499        return false;
2500      } else {
2501        // For other kinds of barriers, check by pretending we
2502        // are them, and seeing if we can be removed.
2503        return post_store_load_barrier(x->as_MemBar());
2504      }
2505    }
2506
2507    // probably not necessary to check for these
2508    if (x->is_Call() || x->is_SafePoint() || x->is_block_proj()) {
2509      return false;
2510    }
2511  }
2512  return false;
2513}
2514
2515// Check whether node n is a branch to an uncommon trap that we could
2516// optimize as test with very high branch costs in case of going to
2517// the uncommon trap. The code must be able to be recompiled to use
2518// a cheaper test.
2519bool Matcher::branches_to_uncommon_trap(const Node *n) {
2520  // Don't do it for natives, adapters, or runtime stubs
2521  Compile *C = Compile::current();
2522  if (!C->is_method_compilation()) return false;
2523
2524  assert(n->is_If(), "You should only call this on if nodes.");
2525  IfNode *ifn = n->as_If();
2526
2527  Node *ifFalse = NULL;
2528  for (DUIterator_Fast imax, i = ifn->fast_outs(imax); i < imax; i++) {
2529    if (ifn->fast_out(i)->is_IfFalse()) {
2530      ifFalse = ifn->fast_out(i);
2531      break;
2532    }
2533  }
2534  assert(ifFalse, "An If should have an ifFalse. Graph is broken.");
2535
2536  Node *reg = ifFalse;
2537  int cnt = 4; // We must protect against cycles.  Limit to 4 iterations.
2538               // Alternatively use visited set?  Seems too expensive.
2539  while (reg != NULL && cnt > 0) {
2540    CallNode *call = NULL;
2541    RegionNode *nxt_reg = NULL;
2542    for (DUIterator_Fast imax, i = reg->fast_outs(imax); i < imax; i++) {
2543      Node *o = reg->fast_out(i);
2544      if (o->is_Call()) {
2545        call = o->as_Call();
2546      }
2547      if (o->is_Region()) {
2548        nxt_reg = o->as_Region();
2549      }
2550    }
2551
2552    if (call &&
2553        call->entry_point() == SharedRuntime::uncommon_trap_blob()->entry_point()) {
2554      const Type* trtype = call->in(TypeFunc::Parms)->bottom_type();
2555      if (trtype->isa_int() && trtype->is_int()->is_con()) {
2556        jint tr_con = trtype->is_int()->get_con();
2557        Deoptimization::DeoptReason reason = Deoptimization::trap_request_reason(tr_con);
2558        Deoptimization::DeoptAction action = Deoptimization::trap_request_action(tr_con);
2559        assert((int)reason < (int)BitsPerInt, "recode bit map");
2560
2561        if (is_set_nth_bit(C->allowed_deopt_reasons(), (int)reason)
2562            && action != Deoptimization::Action_none) {
2563          // This uncommon trap is sure to recompile, eventually.
2564          // When that happens, C->too_many_traps will prevent
2565          // this transformation from happening again.
2566          return true;
2567        }
2568      }
2569    }
2570
2571    reg = nxt_reg;
2572    cnt--;
2573  }
2574
2575  return false;
2576}
2577
2578//=============================================================================
2579//---------------------------State---------------------------------------------
2580State::State(void) {
2581#ifdef ASSERT
2582  _id = 0;
2583  _kids[0] = _kids[1] = (State*)(intptr_t) CONST64(0xcafebabecafebabe);
2584  _leaf = (Node*)(intptr_t) CONST64(0xbaadf00dbaadf00d);
2585  //memset(_cost, -1, sizeof(_cost));
2586  //memset(_rule, -1, sizeof(_rule));
2587#endif
2588  memset(_valid, 0, sizeof(_valid));
2589}
2590
2591#ifdef ASSERT
2592State::~State() {
2593  _id = 99;
2594  _kids[0] = _kids[1] = (State*)(intptr_t) CONST64(0xcafebabecafebabe);
2595  _leaf = (Node*)(intptr_t) CONST64(0xbaadf00dbaadf00d);
2596  memset(_cost, -3, sizeof(_cost));
2597  memset(_rule, -3, sizeof(_rule));
2598}
2599#endif
2600
2601#ifndef PRODUCT
2602//---------------------------dump----------------------------------------------
2603void State::dump() {
2604  tty->print("\n");
2605  dump(0);
2606}
2607
2608void State::dump(int depth) {
2609  for( int j = 0; j < depth; j++ )
2610    tty->print("   ");
2611  tty->print("--N: ");
2612  _leaf->dump();
2613  uint i;
2614  for( i = 0; i < _LAST_MACH_OPER; i++ )
2615    // Check for valid entry
2616    if( valid(i) ) {
2617      for( int j = 0; j < depth; j++ )
2618        tty->print("   ");
2619        assert(_cost[i] != max_juint, "cost must be a valid value");
2620        assert(_rule[i] < _last_Mach_Node, "rule[i] must be valid rule");
2621        tty->print_cr("%s  %d  %s",
2622                      ruleName[i], _cost[i], ruleName[_rule[i]] );
2623      }
2624  tty->cr();
2625
2626  for( i=0; i<2; i++ )
2627    if( _kids[i] )
2628      _kids[i]->dump(depth+1);
2629}
2630#endif
2631