1/*
2 * Copyright (c) 2016, Oracle and/or its affiliates. All rights reserved.
3 * Copyright (c) 2016 SAP SE. All rights reserved.
4 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
5 *
6 * This code is free software; you can redistribute it and/or modify it
7 * under the terms of the GNU General Public License version 2 only, as
8 * published by the Free Software Foundation.
9 *
10 * This code is distributed in the hope that it will be useful, but WITHOUT
11 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
12 * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
13 * version 2 for more details (a copy is included in the LICENSE file that
14 * accompanied this code).
15 *
16 * You should have received a copy of the GNU General Public License version
17 * 2 along with this work; if not, write to the Free Software Foundation,
18 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
19 *
20 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
21 * or visit www.oracle.com if you need additional information or have any
22 * questions.
23 *
24 */
25
26#include "precompiled.hpp"
27#include "c1/c1_Compilation.hpp"
28#include "c1/c1_FrameMap.hpp"
29#include "c1/c1_Instruction.hpp"
30#include "c1/c1_LIRAssembler.hpp"
31#include "c1/c1_LIRGenerator.hpp"
32#include "c1/c1_Runtime1.hpp"
33#include "c1/c1_ValueStack.hpp"
34#include "ci/ciArray.hpp"
35#include "ci/ciObjArrayKlass.hpp"
36#include "ci/ciTypeArrayKlass.hpp"
37#include "runtime/sharedRuntime.hpp"
38#include "runtime/stubRoutines.hpp"
39#include "vmreg_s390.inline.hpp"
40
41#ifdef ASSERT
42#define __ gen()->lir(__FILE__, __LINE__)->
43#else
44#define __ gen()->lir()->
45#endif
46
47void LIRItem::load_byte_item() {
48  // Byte loads use same registers as other loads.
49  load_item();
50}
51
52void LIRItem::load_nonconstant(int bits) {
53  LIR_Opr r = value()->operand();
54  if (_gen->can_inline_as_constant(value(), bits)) {
55    if (!r->is_constant()) {
56      r = LIR_OprFact::value_type(value()->type());
57    }
58    _result = r;
59  } else {
60    load_item();
61  }
62}
63
64inline void load_int_as_long(LIR_List *ll, LIRItem &li, LIR_Opr dst) {
65  LIR_Opr r = li.value()->operand();
66  if (r->is_constant()) {
67    // Constants get loaded with sign extend on this platform.
68    ll->move(li.result(), dst);
69  } else {
70    if (!r->is_register()) {
71      li.load_item_force(dst);
72    }
73    LIR_Opr dst_l = FrameMap::as_long_opr(dst->as_register());
74    ll->convert(Bytecodes::_i2l, li.result(), dst_l); // Convert.
75  }
76}
77
78//--------------------------------------------------------------
79//               LIRGenerator
80//--------------------------------------------------------------
81
82LIR_Opr LIRGenerator::exceptionOopOpr() { return FrameMap::as_oop_opr(Z_EXC_OOP); }
83LIR_Opr LIRGenerator::exceptionPcOpr()  { return FrameMap::as_opr(Z_EXC_PC); }
84LIR_Opr LIRGenerator::divInOpr()        { return FrameMap::Z_R11_opr; }
85LIR_Opr LIRGenerator::divOutOpr()       { return FrameMap::Z_R11_opr; }
86LIR_Opr LIRGenerator::remOutOpr()       { return FrameMap::Z_R10_opr; }
87LIR_Opr LIRGenerator::ldivInOpr()       { return FrameMap::Z_R11_long_opr; }
88LIR_Opr LIRGenerator::ldivOutOpr()      { return FrameMap::Z_R11_long_opr; }
89LIR_Opr LIRGenerator::lremOutOpr()      { return FrameMap::Z_R10_long_opr; }
90LIR_Opr LIRGenerator::syncLockOpr()     { return new_register(T_INT); }
91LIR_Opr LIRGenerator::syncTempOpr()     { return FrameMap::Z_R13_opr; }
92LIR_Opr LIRGenerator::getThreadTemp()   { return LIR_OprFact::illegalOpr; }
93
94LIR_Opr LIRGenerator::result_register_for (ValueType* type, bool callee) {
95  LIR_Opr opr;
96  switch (type->tag()) {
97    case intTag:    opr = FrameMap::Z_R2_opr;        break;
98    case objectTag: opr = FrameMap::Z_R2_oop_opr;    break;
99    case longTag:   opr = FrameMap::Z_R2_long_opr;   break;
100    case floatTag:  opr = FrameMap::Z_F0_opr;        break;
101    case doubleTag: opr = FrameMap::Z_F0_double_opr; break;
102
103    case addressTag:
104    default: ShouldNotReachHere(); return LIR_OprFact::illegalOpr;
105  }
106
107  assert(opr->type_field() == as_OprType(as_BasicType(type)), "type mismatch");
108  return opr;
109}
110
111LIR_Opr LIRGenerator::rlock_byte(BasicType type) {
112  return new_register(T_INT);
113}
114
115//--------- Loading items into registers. --------------------------------
116
117// z/Architecture cannot inline all constants.
118bool LIRGenerator::can_store_as_constant(Value v, BasicType type) const {
119  if (v->type()->as_IntConstant() != NULL) {
120    return Immediate::is_simm16(v->type()->as_IntConstant()->value());
121  } else if (v->type()->as_LongConstant() != NULL) {
122    return Immediate::is_simm16(v->type()->as_LongConstant()->value());
123  } else if (v->type()->as_ObjectConstant() != NULL) {
124    return v->type()->as_ObjectConstant()->value()->is_null_object();
125  } else {
126    return false;
127  }
128}
129
130bool LIRGenerator::can_inline_as_constant(Value i, int bits) const {
131  if (i->type()->as_IntConstant() != NULL) {
132    return Assembler::is_simm(i->type()->as_IntConstant()->value(), bits);
133  } else if (i->type()->as_LongConstant() != NULL) {
134    return Assembler::is_simm(i->type()->as_LongConstant()->value(), bits);
135  } else {
136    return can_store_as_constant(i, as_BasicType(i->type()));
137  }
138}
139
140bool LIRGenerator::can_inline_as_constant(LIR_Const* c) const {
141  if (c->type() == T_INT) {
142    return Immediate::is_simm20(c->as_jint());
143  } else   if (c->type() == T_LONG) {
144    return Immediate::is_simm20(c->as_jlong());
145  }
146  return false;
147}
148
149LIR_Opr LIRGenerator::safepoint_poll_register() {
150  return new_register(longType);
151}
152
153LIR_Address* LIRGenerator::generate_address(LIR_Opr base, LIR_Opr index,
154                                            int shift, int disp, BasicType type) {
155  assert(base->is_register(), "must be");
156  if (index->is_constant()) {
157    intptr_t large_disp = ((intx)(index->as_constant_ptr()->as_jint()) << shift) + disp;
158    if (Displacement::is_validDisp(large_disp)) {
159      return new LIR_Address(base, large_disp, type);
160    }
161    // Index is illegal so replace it with the displacement loaded into a register.
162    index = new_pointer_register();
163    __ move(LIR_OprFact::intptrConst(large_disp), index);
164    return new LIR_Address(base, index, type);
165  } else {
166    if (shift > 0) {
167      LIR_Opr tmp = new_pointer_register();
168      __ shift_left(index, shift, tmp);
169      index = tmp;
170    }
171    return new LIR_Address(base, index, disp, type);
172  }
173}
174
175LIR_Address* LIRGenerator::emit_array_address(LIR_Opr array_opr, LIR_Opr index_opr,
176                                              BasicType type, bool needs_card_mark) {
177  int elem_size = type2aelembytes(type);
178  int shift = exact_log2(elem_size);
179  int offset_in_bytes = arrayOopDesc::base_offset_in_bytes(type);
180
181  LIR_Address* addr;
182  if (index_opr->is_constant()) {
183    addr = new LIR_Address(array_opr,
184                           offset_in_bytes + (intx)(index_opr->as_jint()) * elem_size, type);
185  } else {
186    if (index_opr->type() == T_INT) {
187      LIR_Opr tmp = new_register(T_LONG);
188      __ convert(Bytecodes::_i2l, index_opr, tmp);
189      index_opr = tmp;
190    }
191    if (shift > 0) {
192      __ shift_left(index_opr, shift, index_opr);
193    }
194    addr = new LIR_Address(array_opr,
195                           index_opr,
196                           offset_in_bytes, type);
197  }
198  if (needs_card_mark) {
199    // This store will need a precise card mark, so go ahead and
200    // compute the full adddres instead of computing once for the
201    // store and again for the card mark.
202    LIR_Opr tmp = new_pointer_register();
203    __ leal(LIR_OprFact::address(addr), tmp);
204    return new LIR_Address(tmp, type);
205  } else {
206    return addr;
207  }
208}
209
210LIR_Opr LIRGenerator::load_immediate(int x, BasicType type) {
211  LIR_Opr r = LIR_OprFact::illegalOpr;
212  if (type == T_LONG) {
213    r = LIR_OprFact::longConst(x);
214  } else if (type == T_INT) {
215    r = LIR_OprFact::intConst(x);
216  } else {
217    ShouldNotReachHere();
218  }
219  return r;
220}
221
222void LIRGenerator::increment_counter(address counter, BasicType type, int step) {
223  LIR_Opr pointer = new_pointer_register();
224  __ move(LIR_OprFact::intptrConst(counter), pointer);
225  LIR_Address* addr = new LIR_Address(pointer, type);
226  increment_counter(addr, step);
227}
228
229void LIRGenerator::increment_counter(LIR_Address* addr, int step) {
230  __ add((LIR_Opr)addr, LIR_OprFact::intConst(step), (LIR_Opr)addr);
231}
232
233void LIRGenerator::cmp_mem_int(LIR_Condition condition, LIR_Opr base, int disp, int c, CodeEmitInfo* info) {
234  LIR_Opr scratch = FrameMap::Z_R1_opr;
235  __ load(new LIR_Address(base, disp, T_INT), scratch, info);
236  __ cmp(condition, scratch, c);
237}
238
239void LIRGenerator::cmp_reg_mem(LIR_Condition condition, LIR_Opr reg, LIR_Opr base, int disp, BasicType type, CodeEmitInfo* info) {
240  __ cmp_reg_mem(condition, reg, new LIR_Address(base, disp, type), info);
241}
242
243void LIRGenerator::cmp_reg_mem(LIR_Condition condition, LIR_Opr reg, LIR_Opr base, LIR_Opr disp, BasicType type, CodeEmitInfo* info) {
244  __ cmp_reg_mem(condition, reg, new LIR_Address(base, disp, type), info);
245}
246
247bool LIRGenerator::strength_reduce_multiply(LIR_Opr left, int c, LIR_Opr result, LIR_Opr tmp) {
248  if (tmp->is_valid()) {
249    if (is_power_of_2(c + 1)) {
250      __ move(left, tmp);
251      __ shift_left(left, log2_intptr(c + 1), left);
252      __ sub(left, tmp, result);
253      return true;
254    } else if (is_power_of_2(c - 1)) {
255      __ move(left, tmp);
256      __ shift_left(left, log2_intptr(c - 1), left);
257      __ add(left, tmp, result);
258      return true;
259    }
260  }
261  return false;
262}
263
264void LIRGenerator::store_stack_parameter (LIR_Opr item, ByteSize offset_from_sp) {
265  BasicType type = item->type();
266  __ store(item, new LIR_Address(FrameMap::Z_SP_opr, in_bytes(offset_from_sp), type));
267}
268
269//----------------------------------------------------------------------
270//             visitor functions
271//----------------------------------------------------------------------
272
273void LIRGenerator::do_StoreIndexed(StoreIndexed* x) {
274  assert(x->is_pinned(),"");
275  bool needs_range_check = x->compute_needs_range_check();
276  bool use_length = x->length() != NULL;
277  bool obj_store = x->elt_type() == T_ARRAY || x->elt_type() == T_OBJECT;
278  bool needs_store_check = obj_store && (x->value()->as_Constant() == NULL ||
279                                         !get_jobject_constant(x->value())->is_null_object() ||
280                                         x->should_profile());
281
282  LIRItem array(x->array(), this);
283  LIRItem index(x->index(), this);
284  LIRItem value(x->value(), this);
285  LIRItem length(this);
286
287  array.load_item();
288  index.load_nonconstant(20);
289
290  if (use_length && needs_range_check) {
291    length.set_instruction(x->length());
292    length.load_item();
293  }
294  if (needs_store_check) {
295    value.load_item();
296  } else {
297    value.load_for_store(x->elt_type());
298  }
299
300  set_no_result(x);
301
302  // The CodeEmitInfo must be duplicated for each different
303  // LIR-instruction because spilling can occur anywhere between two
304  // instructions and so the debug information must be different.
305  CodeEmitInfo* range_check_info = state_for (x);
306  CodeEmitInfo* null_check_info = NULL;
307  if (x->needs_null_check()) {
308    null_check_info = new CodeEmitInfo(range_check_info);
309  }
310
311  // Emit array address setup early so it schedules better.
312  LIR_Address* array_addr = emit_array_address(array.result(), index.result(), x->elt_type(), obj_store);
313  if (value.result()->is_constant() && array_addr->index()->is_valid()) {
314    // Constants cannot be stored with index register on ZARCH_64 (see LIR_Assembler::const2mem()).
315    LIR_Opr tmp = new_pointer_register();
316    __ leal(LIR_OprFact::address(array_addr), tmp);
317    array_addr = new LIR_Address(tmp, x->elt_type());
318  }
319
320  if (GenerateRangeChecks && needs_range_check) {
321    if (use_length) {
322      __ cmp(lir_cond_belowEqual, length.result(), index.result());
323      __ branch(lir_cond_belowEqual, T_INT, new RangeCheckStub(range_check_info, index.result()));
324    } else {
325      array_range_check(array.result(), index.result(), null_check_info, range_check_info);
326      // Range_check also does the null check.
327      null_check_info = NULL;
328    }
329  }
330
331  if (GenerateArrayStoreCheck && needs_store_check) {
332    LIR_Opr tmp1 = new_register(objectType);
333    LIR_Opr tmp2 = new_register(objectType);
334    LIR_Opr tmp3 = LIR_OprFact::illegalOpr;
335
336    CodeEmitInfo* store_check_info = new CodeEmitInfo(range_check_info);
337    __ store_check(value.result(), array.result(), tmp1, tmp2, tmp3, store_check_info, x->profiled_method(), x->profiled_bci());
338  }
339
340  if (obj_store) {
341    // Needs GC write barriers.
342    pre_barrier(LIR_OprFact::address(array_addr), LIR_OprFact::illegalOpr /* pre_val */,
343                true /* do_load */, false /* patch */, NULL);
344    __ move(value.result(), array_addr, null_check_info);
345    // Seems to be a precise.
346    post_barrier(LIR_OprFact::address(array_addr), value.result());
347  } else {
348    __ move(value.result(), array_addr, null_check_info);
349  }
350}
351
352void LIRGenerator::do_MonitorEnter(MonitorEnter* x) {
353  assert(x->is_pinned(),"");
354  LIRItem obj(x->obj(), this);
355  obj.load_item();
356
357  set_no_result(x);
358
359  // "lock" stores the address of the monitor stack slot, so this is not an oop.
360  LIR_Opr lock = new_register(T_INT);
361
362  CodeEmitInfo* info_for_exception = NULL;
363  if (x->needs_null_check()) {
364    info_for_exception = state_for (x);
365  }
366  // This CodeEmitInfo must not have the xhandlers because here the
367  // object is already locked (xhandlers expect object to be unlocked).
368  CodeEmitInfo* info = state_for (x, x->state(), true);
369  monitor_enter(obj.result(), lock, syncTempOpr(), LIR_OprFact::illegalOpr,
370                x->monitor_no(), info_for_exception, info);
371}
372
373void LIRGenerator::do_MonitorExit(MonitorExit* x) {
374  assert(x->is_pinned(),"");
375
376  LIRItem obj(x->obj(), this);
377  obj.dont_load_item();
378
379  LIR_Opr lock = new_register(T_INT);
380  LIR_Opr obj_temp = new_register(T_INT);
381  set_no_result(x);
382  monitor_exit(obj_temp, lock, syncTempOpr(), LIR_OprFact::illegalOpr, x->monitor_no());
383}
384
385// _ineg, _lneg, _fneg, _dneg
386void LIRGenerator::do_NegateOp(NegateOp* x) {
387  LIRItem value(x->x(), this);
388  value.load_item();
389  LIR_Opr reg = rlock_result(x);
390  __ negate(value.result(), reg);
391}
392
393// for _fadd, _fmul, _fsub, _fdiv, _frem
394//     _dadd, _dmul, _dsub, _ddiv, _drem
395void LIRGenerator::do_ArithmeticOp_FPU(ArithmeticOp* x) {
396  LIRItem left(x->x(),  this);
397  LIRItem right(x->y(), this);
398  LIRItem* left_arg  = &left;
399  LIRItem* right_arg = &right;
400  assert(!left.is_stack(), "can't both be memory operands");
401  left.load_item();
402
403  if (right.is_register() || right.is_constant()) {
404    right.load_item();
405  } else {
406    right.dont_load_item();
407  }
408
409  if ((x->op() == Bytecodes::_frem) || (x->op() == Bytecodes::_drem)) {
410    address entry;
411    switch (x->op()) {
412    case Bytecodes::_frem:
413      entry = CAST_FROM_FN_PTR(address, SharedRuntime::frem);
414      break;
415    case Bytecodes::_drem:
416      entry = CAST_FROM_FN_PTR(address, SharedRuntime::drem);
417      break;
418    default:
419      ShouldNotReachHere();
420    }
421    LIR_Opr result = call_runtime(x->x(), x->y(), entry, x->type(), NULL);
422    set_result(x, result);
423  } else {
424    LIR_Opr reg = rlock(x);
425    LIR_Opr tmp = LIR_OprFact::illegalOpr;
426    arithmetic_op_fpu(x->op(), reg, left.result(), right.result(), x->is_strictfp(), tmp);
427    set_result(x, reg);
428  }
429}
430
431// for _ladd, _lmul, _lsub, _ldiv, _lrem
432void LIRGenerator::do_ArithmeticOp_Long(ArithmeticOp* x) {
433  if (x->op() == Bytecodes::_ldiv || x->op() == Bytecodes::_lrem) {
434    // Use shifts if divisior is a power of 2 otherwise use DSGR instruction.
435    // Instruction: DSGR R1, R2
436    // input : R1+1: dividend   (R1, R1+1 designate a register pair, R1 must be even)
437    //         R2:   divisor
438    //
439    // output: R1+1: quotient
440    //         R1:   remainder
441    //
442    // Register selection: R1:   Z_R10
443    //                     R1+1: Z_R11
444    //                     R2:   to be chosen by register allocator (linear scan)
445
446    // R1, and R1+1 will be destroyed.
447
448    LIRItem right(x->y(), this);
449    LIRItem left(x->x() , this);   // Visit left second, so that the is_register test is valid.
450
451    // Call state_for before load_item_force because state_for may
452    // force the evaluation of other instructions that are needed for
453    // correct debug info. Otherwise the live range of the fix
454    // register might be too long.
455    CodeEmitInfo* info = state_for (x);
456
457    LIR_Opr result = rlock_result(x);
458    LIR_Opr result_reg = result;
459    LIR_Opr tmp = LIR_OprFact::illegalOpr;
460    LIR_Opr divisor_opr = right.result();
461    if (divisor_opr->is_constant() && is_power_of_2(divisor_opr->as_jlong())) {
462      left.load_item();
463      right.dont_load_item();
464    } else {
465      left.load_item_force(ldivInOpr());
466      right.load_item();
467
468      // DSGR instruction needs register pair.
469      if (x->op() == Bytecodes::_ldiv) {
470        result_reg = ldivOutOpr();
471        tmp        = lremOutOpr();
472      } else {
473        result_reg = lremOutOpr();
474        tmp        = ldivOutOpr();
475      }
476    }
477
478    if (!ImplicitDiv0Checks) {
479      __ cmp(lir_cond_equal, right.result(), LIR_OprFact::longConst(0));
480      __ branch(lir_cond_equal, T_LONG, new DivByZeroStub(info));
481      // Idiv/irem cannot trap (passing info would generate an assertion).
482      info = NULL;
483    }
484
485    if (x->op() == Bytecodes::_lrem) {
486      __ irem(left.result(), right.result(), result_reg, tmp, info);
487    } else if (x->op() == Bytecodes::_ldiv) {
488      __ idiv(left.result(), right.result(), result_reg, tmp, info);
489    } else {
490      ShouldNotReachHere();
491    }
492
493    if (result_reg != result) {
494      __ move(result_reg, result);
495    }
496  } else {
497    LIRItem left(x->x(), this);
498    LIRItem right(x->y(), this);
499
500    left.load_item();
501    right.load_nonconstant(32);
502    rlock_result(x);
503    arithmetic_op_long(x->op(), x->operand(), left.result(), right.result(), NULL);
504  }
505}
506
507// for: _iadd, _imul, _isub, _idiv, _irem
508void LIRGenerator::do_ArithmeticOp_Int(ArithmeticOp* x) {
509  if (x->op() == Bytecodes::_idiv || x->op() == Bytecodes::_irem) {
510    // Use shifts if divisior is a power of 2 otherwise use DSGFR instruction.
511    // Instruction: DSGFR R1, R2
512    // input : R1+1: dividend   (R1, R1+1 designate a register pair, R1 must be even)
513    //         R2:   divisor
514    //
515    // output: R1+1: quotient
516    //         R1:   remainder
517    //
518    // Register selection: R1:   Z_R10
519    //                     R1+1: Z_R11
520    //                     R2:   To be chosen by register allocator (linear scan).
521
522    // R1, and R1+1 will be destroyed.
523
524    LIRItem right(x->y(), this);
525    LIRItem left(x->x() , this);   // Visit left second, so that the is_register test is valid.
526
527    // Call state_for before load_item_force because state_for may
528    // force the evaluation of other instructions that are needed for
529    // correct debug info. Otherwise the live range of the fix
530    // register might be too long.
531    CodeEmitInfo* info = state_for (x);
532
533    LIR_Opr result = rlock_result(x);
534    LIR_Opr result_reg = result;
535    LIR_Opr tmp = LIR_OprFact::illegalOpr;
536    LIR_Opr divisor_opr = right.result();
537    if (divisor_opr->is_constant() && is_power_of_2(divisor_opr->as_jint())) {
538      left.load_item();
539      right.dont_load_item();
540    } else {
541      left.load_item_force(divInOpr());
542      right.load_item();
543
544      // DSGFR instruction needs register pair.
545      if (x->op() == Bytecodes::_idiv) {
546        result_reg = divOutOpr();
547        tmp        = remOutOpr();
548      } else {
549        result_reg = remOutOpr();
550        tmp        = divOutOpr();
551      }
552    }
553
554    if (!ImplicitDiv0Checks) {
555      __ cmp(lir_cond_equal, right.result(), LIR_OprFact::intConst(0));
556      __ branch(lir_cond_equal, T_INT, new DivByZeroStub(info));
557      // Idiv/irem cannot trap (passing info would generate an assertion).
558      info = NULL;
559    }
560
561    if (x->op() == Bytecodes::_irem) {
562      __ irem(left.result(), right.result(), result_reg, tmp, info);
563    } else if (x->op() == Bytecodes::_idiv) {
564      __ idiv(left.result(), right.result(), result_reg, tmp, info);
565    } else {
566      ShouldNotReachHere();
567    }
568
569    if (result_reg != result) {
570      __ move(result_reg, result);
571    }
572  } else {
573    LIRItem left(x->x(),  this);
574    LIRItem right(x->y(), this);
575    LIRItem* left_arg = &left;
576    LIRItem* right_arg = &right;
577    if (x->is_commutative() && left.is_stack() && right.is_register()) {
578      // swap them if left is real stack (or cached) and right is real register(not cached)
579      left_arg = &right;
580      right_arg = &left;
581    }
582
583    left_arg->load_item();
584
585    // Do not need to load right, as we can handle stack and constants.
586    if (x->op() == Bytecodes::_imul) {
587      bool use_tmp = false;
588      if (right_arg->is_constant()) {
589        int iconst = right_arg->get_jint_constant();
590        if (is_power_of_2(iconst - 1) || is_power_of_2(iconst + 1)) {
591          use_tmp = true;
592        }
593      }
594      right_arg->dont_load_item();
595      LIR_Opr tmp = LIR_OprFact::illegalOpr;
596      if (use_tmp) {
597        tmp = new_register(T_INT);
598      }
599      rlock_result(x);
600
601      arithmetic_op_int(x->op(), x->operand(), left_arg->result(), right_arg->result(), tmp);
602    } else {
603      right_arg->dont_load_item();
604      rlock_result(x);
605      LIR_Opr tmp = LIR_OprFact::illegalOpr;
606      arithmetic_op_int(x->op(), x->operand(), left_arg->result(), right_arg->result(), tmp);
607    }
608  }
609}
610
611void LIRGenerator::do_ArithmeticOp(ArithmeticOp* x) {
612  // If an operand with use count 1 is the left operand, then it is
613  // likely that no move for 2-operand-LIR-form is necessary.
614  if (x->is_commutative() && x->y()->as_Constant() == NULL && x->x()->use_count() > x->y()->use_count()) {
615    x->swap_operands();
616  }
617
618  ValueTag tag = x->type()->tag();
619  assert(x->x()->type()->tag() == tag && x->y()->type()->tag() == tag, "wrong parameters");
620  switch (tag) {
621    case floatTag:
622    case doubleTag: do_ArithmeticOp_FPU(x);  return;
623    case longTag:   do_ArithmeticOp_Long(x); return;
624    case intTag:    do_ArithmeticOp_Int(x);  return;
625  }
626  ShouldNotReachHere();
627}
628
629// _ishl, _lshl, _ishr, _lshr, _iushr, _lushr
630void LIRGenerator::do_ShiftOp(ShiftOp* x) {
631  // count must always be in rcx
632  LIRItem value(x->x(), this);
633  LIRItem count(x->y(), this);
634
635  ValueTag elemType = x->type()->tag();
636  bool must_load_count = !count.is_constant();
637  if (must_load_count) {
638    count.load_item();
639  } else {
640    count.dont_load_item();
641  }
642  value.load_item();
643  LIR_Opr reg = rlock_result(x);
644
645  shift_op(x->op(), reg, value.result(), count.result(), LIR_OprFact::illegalOpr);
646}
647
648// _iand, _land, _ior, _lor, _ixor, _lxor
649void LIRGenerator::do_LogicOp(LogicOp* x) {
650  // IF an operand with use count 1 is the left operand, then it is
651  // likely that no move for 2-operand-LIR-form is necessary.
652  if (x->is_commutative() && x->y()->as_Constant() == NULL && x->x()->use_count() > x->y()->use_count()) {
653    x->swap_operands();
654  }
655
656  LIRItem left(x->x(), this);
657  LIRItem right(x->y(), this);
658
659  left.load_item();
660  right.load_nonconstant(32);
661  LIR_Opr reg = rlock_result(x);
662
663  logic_op(x->op(), reg, left.result(), right.result());
664}
665
666// _lcmp, _fcmpl, _fcmpg, _dcmpl, _dcmpg
667void LIRGenerator::do_CompareOp(CompareOp* x) {
668  LIRItem left(x->x(), this);
669  LIRItem right(x->y(), this);
670  left.load_item();
671  right.load_item();
672  LIR_Opr reg = rlock_result(x);
673  if (x->x()->type()->is_float_kind()) {
674    Bytecodes::Code code = x->op();
675    __ fcmp2int(left.result(), right.result(), reg, (code == Bytecodes::_fcmpl || code == Bytecodes::_dcmpl));
676  } else if (x->x()->type()->tag() == longTag) {
677    __ lcmp2int(left.result(), right.result(), reg);
678  } else {
679    ShouldNotReachHere();
680  }
681}
682
683void LIRGenerator::do_CompareAndSwap(Intrinsic* x, ValueType* type) {
684  assert(x->number_of_arguments() == 4, "wrong type");
685  LIRItem obj   (x->argument_at(0), this);  // object
686  LIRItem offset(x->argument_at(1), this);  // offset of field
687  LIRItem cmp   (x->argument_at(2), this);  // Value to compare with field.
688  LIRItem val   (x->argument_at(3), this);  // Replace field with val if matches cmp.
689
690  // Get address of field.
691  obj.load_item();
692  offset.load_nonconstant(20);
693  cmp.load_item();
694  val.load_item();
695
696  LIR_Opr addr = new_pointer_register();
697  LIR_Address* a;
698  if (offset.result()->is_constant()) {
699    assert(Immediate::is_simm20(offset.result()->as_jlong()), "should have been loaded into register");
700    a = new LIR_Address(obj.result(),
701                        offset.result()->as_jlong(),
702                        as_BasicType(type));
703  } else {
704    a = new LIR_Address(obj.result(),
705                        offset.result(),
706                        0,
707                        as_BasicType(type));
708  }
709  __ leal(LIR_OprFact::address(a), addr);
710
711  if (type == objectType) {  // Write-barrier needed for Object fields.
712    pre_barrier(addr, LIR_OprFact::illegalOpr /* pre_val */,
713                true /* do_load */, false /* patch */, NULL);
714  }
715
716  LIR_Opr ill = LIR_OprFact::illegalOpr;  // for convenience
717  if (type == objectType) {
718    __ cas_obj(addr, cmp.result(), val.result(), new_register(T_OBJECT), new_register(T_OBJECT));
719  } else if (type == intType) {
720    __ cas_int(addr, cmp.result(), val.result(), ill, ill);
721  } else if (type == longType) {
722    __ cas_long(addr, cmp.result(), val.result(), ill, ill);
723  } else {
724    ShouldNotReachHere();
725  }
726  // Generate conditional move of boolean result.
727  LIR_Opr result = rlock_result(x);
728  __ cmove(lir_cond_equal, LIR_OprFact::intConst(1), LIR_OprFact::intConst(0),
729           result, as_BasicType(type));
730  if (type == objectType) {  // Write-barrier needed for Object fields.
731    // Precise card mark since could either be object or array
732    post_barrier(addr, val.result());
733  }
734}
735
736
737void LIRGenerator::do_MathIntrinsic(Intrinsic* x) {
738  switch (x->id()) {
739    case vmIntrinsics::_dabs:
740    case vmIntrinsics::_dsqrt: {
741      assert(x->number_of_arguments() == 1, "wrong type");
742      LIRItem value(x->argument_at(0), this);
743      value.load_item();
744      LIR_Opr dst = rlock_result(x);
745
746      switch (x->id()) {
747      case vmIntrinsics::_dsqrt: {
748        __ sqrt(value.result(), dst, LIR_OprFact::illegalOpr);
749        break;
750      }
751      case vmIntrinsics::_dabs: {
752        __ abs(value.result(), dst, LIR_OprFact::illegalOpr);
753        break;
754      }
755      }
756      break;
757    }
758    case vmIntrinsics::_dlog10: // fall through
759    case vmIntrinsics::_dlog: // fall through
760    case vmIntrinsics::_dsin: // fall through
761    case vmIntrinsics::_dtan: // fall through
762    case vmIntrinsics::_dcos: // fall through
763    case vmIntrinsics::_dexp: {
764      assert(x->number_of_arguments() == 1, "wrong type");
765
766      address runtime_entry = NULL;
767      switch (x->id()) {
768      case vmIntrinsics::_dsin:
769        runtime_entry = CAST_FROM_FN_PTR(address, SharedRuntime::dsin);
770        break;
771      case vmIntrinsics::_dcos:
772        runtime_entry = CAST_FROM_FN_PTR(address, SharedRuntime::dcos);
773        break;
774      case vmIntrinsics::_dtan:
775        runtime_entry = CAST_FROM_FN_PTR(address, SharedRuntime::dtan);
776        break;
777      case vmIntrinsics::_dlog:
778        runtime_entry = CAST_FROM_FN_PTR(address, SharedRuntime::dlog);
779        break;
780      case vmIntrinsics::_dlog10:
781        runtime_entry = CAST_FROM_FN_PTR(address, SharedRuntime::dlog10);
782        break;
783      case vmIntrinsics::_dexp:
784        runtime_entry = CAST_FROM_FN_PTR(address, SharedRuntime::dexp);
785        break;
786      default:
787        ShouldNotReachHere();
788      }
789
790      LIR_Opr result = call_runtime(x->argument_at(0), runtime_entry, x->type(), NULL);
791      set_result(x, result);
792      break;
793    }
794    case vmIntrinsics::_dpow: {
795      assert(x->number_of_arguments() == 2, "wrong type");
796      address runtime_entry = CAST_FROM_FN_PTR(address, SharedRuntime::dpow);
797      LIR_Opr result = call_runtime(x->argument_at(0), x->argument_at(1), runtime_entry, x->type(), NULL);
798      set_result(x, result);
799      break;
800    }
801  }
802}
803
804void LIRGenerator::do_ArrayCopy(Intrinsic* x) {
805  assert(x->number_of_arguments() == 5, "wrong type");
806
807  // Copy stubs possibly call C code, e.g. G1 barriers, so we need to reserve room
808  // for the C ABI (see frame::z_abi_160).
809  BasicTypeArray sig; // Empty signature is precise enough.
810  frame_map()->c_calling_convention(&sig);
811
812  // Make all state_for calls early since they can emit code.
813  CodeEmitInfo* info = state_for (x, x->state());
814
815  LIRItem src(x->argument_at(0), this);
816  LIRItem src_pos(x->argument_at(1), this);
817  LIRItem dst(x->argument_at(2), this);
818  LIRItem dst_pos(x->argument_at(3), this);
819  LIRItem length(x->argument_at(4), this);
820
821  // Operands for arraycopy must use fixed registers, otherwise
822  // LinearScan will fail allocation (because arraycopy always needs a
823  // call).
824
825  src.load_item_force     (FrameMap::as_oop_opr(Z_ARG1));
826  src_pos.load_item_force (FrameMap::as_opr(Z_ARG2));
827  dst.load_item_force     (FrameMap::as_oop_opr(Z_ARG3));
828  dst_pos.load_item_force (FrameMap::as_opr(Z_ARG4));
829  length.load_item_force  (FrameMap::as_opr(Z_ARG5));
830
831  LIR_Opr tmp =            FrameMap::as_opr(Z_R7);
832
833  set_no_result(x);
834
835  int flags;
836  ciArrayKlass* expected_type;
837  arraycopy_helper(x, &flags, &expected_type);
838
839  __ arraycopy(src.result(), src_pos.result(), dst.result(), dst_pos.result(),
840               length.result(), tmp, expected_type, flags, info); // does add_safepoint
841}
842
843// _i2l, _i2f, _i2d, _l2i, _l2f, _l2d, _f2i, _f2l, _f2d, _d2i, _d2l, _d2f
844// _i2b, _i2c, _i2s
845void LIRGenerator::do_Convert(Convert* x) {
846  LIRItem value(x->value(), this);
847
848  value.load_item();
849  LIR_Opr reg = rlock_result(x);
850  __ convert(x->op(), value.result(), reg);
851}
852
853void LIRGenerator::do_NewInstance(NewInstance* x) {
854  print_if_not_loaded(x);
855
856  // This instruction can be deoptimized in the slow path : use
857  // Z_R2 as result register.
858  const LIR_Opr reg = result_register_for (x->type());
859
860  CodeEmitInfo* info = state_for (x, x->state());
861  LIR_Opr tmp1 = FrameMap::Z_R12_oop_opr;
862  LIR_Opr tmp2 = FrameMap::Z_R13_oop_opr;
863  LIR_Opr tmp3 = reg;
864  LIR_Opr tmp4 = LIR_OprFact::illegalOpr;
865  LIR_Opr klass_reg = FrameMap::Z_R11_metadata_opr;
866  new_instance(reg, x->klass(), x->is_unresolved(), tmp1, tmp2, tmp3, tmp4, klass_reg, info);
867  LIR_Opr result = rlock_result(x);
868  __ move(reg, result);
869}
870
871void LIRGenerator::do_NewTypeArray(NewTypeArray* x) {
872  CodeEmitInfo* info = state_for (x, x->state());
873
874  LIRItem length(x->length(), this);
875  length.load_item();
876
877  LIR_Opr reg = result_register_for (x->type());
878  LIR_Opr tmp1 = FrameMap::Z_R12_oop_opr;
879  LIR_Opr tmp2 = FrameMap::Z_R13_oop_opr;
880  LIR_Opr tmp3 = reg;
881  LIR_Opr tmp4 = LIR_OprFact::illegalOpr;
882  LIR_Opr klass_reg = FrameMap::Z_R11_metadata_opr;
883  LIR_Opr len = length.result();
884  BasicType elem_type = x->elt_type();
885
886  __ metadata2reg(ciTypeArrayKlass::make(elem_type)->constant_encoding(), klass_reg);
887
888  CodeStub* slow_path = new NewTypeArrayStub(klass_reg, len, reg, info);
889  __ allocate_array(reg, len, tmp1, tmp2, tmp3, tmp4, elem_type, klass_reg, slow_path);
890
891  LIR_Opr result = rlock_result(x);
892  __ move(reg, result);
893}
894
895void LIRGenerator::do_NewObjectArray(NewObjectArray* x) {
896  // Evaluate state_for early since it may emit code.
897  CodeEmitInfo* info = state_for (x, x->state());
898  // In case of patching (i.e., object class is not yet loaded), we need to reexecute the instruction
899  // and therefore provide the state before the parameters have been consumed.
900  CodeEmitInfo* patching_info = NULL;
901  if (!x->klass()->is_loaded() || PatchALot) {
902    patching_info = state_for (x, x->state_before());
903  }
904
905  LIRItem length(x->length(), this);
906  length.load_item();
907
908  const LIR_Opr reg = result_register_for (x->type());
909  LIR_Opr tmp1 = FrameMap::Z_R12_oop_opr;
910  LIR_Opr tmp2 = FrameMap::Z_R13_oop_opr;
911  LIR_Opr tmp3 = LIR_OprFact::illegalOpr;
912  LIR_Opr tmp4 = LIR_OprFact::illegalOpr;
913  LIR_Opr klass_reg = FrameMap::Z_R11_metadata_opr;
914  LIR_Opr len = length.result();
915
916  CodeStub* slow_path = new NewObjectArrayStub(klass_reg, len, reg, info);
917  ciKlass* obj = ciObjArrayKlass::make(x->klass());
918  if (obj == ciEnv::unloaded_ciobjarrayklass()) {
919    BAILOUT("encountered unloaded_ciobjarrayklass due to out of memory error");
920  }
921  klass2reg_with_patching(klass_reg, obj, patching_info);
922  __ allocate_array(reg, len, tmp1, tmp2, tmp3, tmp4, T_OBJECT, klass_reg, slow_path);
923
924  LIR_Opr result = rlock_result(x);
925  __ move(reg, result);
926}
927
928void LIRGenerator::do_NewMultiArray(NewMultiArray* x) {
929  Values* dims = x->dims();
930  int i = dims->length();
931  LIRItemList* items = new LIRItemList(i, i, NULL);
932  while (i-- > 0) {
933    LIRItem* size = new LIRItem(dims->at(i), this);
934    items->at_put(i, size);
935  }
936
937  // Evaluate state_for early since it may emit code.
938  CodeEmitInfo* patching_info = NULL;
939  if (!x->klass()->is_loaded() || PatchALot) {
940    patching_info = state_for (x, x->state_before());
941
942    // Cannot re-use same xhandlers for multiple CodeEmitInfos, so
943    // clone all handlers (NOTE: Usually this is handled transparently
944    // by the CodeEmitInfo cloning logic in CodeStub constructors but
945    // is done explicitly here because a stub isn't being used).
946    x->set_exception_handlers(new XHandlers(x->exception_handlers()));
947  }
948  CodeEmitInfo* info = state_for (x, x->state());
949
950  i = dims->length();
951  while (--i >= 0) {
952    LIRItem* size = items->at(i);
953    size->load_nonconstant(32);
954    // FrameMap::_reserved_argument_area_size includes the dimensions varargs, because
955    // it's initialized to hir()->max_stack() when the FrameMap is created.
956    store_stack_parameter(size->result(), in_ByteSize(i*sizeof(jint) + FrameMap::first_available_sp_in_frame));
957  }
958
959  LIR_Opr klass_reg = FrameMap::Z_R3_metadata_opr;
960  klass2reg_with_patching(klass_reg, x->klass(), patching_info);
961
962  LIR_Opr rank = FrameMap::Z_R4_opr;
963  __ move(LIR_OprFact::intConst(x->rank()), rank);
964  LIR_Opr varargs = FrameMap::Z_R5_opr;
965  __ leal(LIR_OprFact::address(new LIR_Address(FrameMap::Z_SP_opr, FrameMap::first_available_sp_in_frame, T_INT)),
966          varargs);
967  LIR_OprList* args = new LIR_OprList(3);
968  args->append(klass_reg);
969  args->append(rank);
970  args->append(varargs);
971  LIR_Opr reg = result_register_for (x->type());
972  __ call_runtime(Runtime1::entry_for (Runtime1::new_multi_array_id),
973                  LIR_OprFact::illegalOpr,
974                  reg, args, info);
975
976  LIR_Opr result = rlock_result(x);
977  __ move(reg, result);
978}
979
980void LIRGenerator::do_BlockBegin(BlockBegin* x) {
981  // Nothing to do.
982}
983
984void LIRGenerator::do_CheckCast(CheckCast* x) {
985  LIRItem obj(x->obj(), this);
986
987  CodeEmitInfo* patching_info = NULL;
988  if (!x->klass()->is_loaded() || (PatchALot && !x->is_incompatible_class_change_check())) {
989    // Must do this before locking the destination register as an oop register,
990    // and before the obj is loaded (the latter is for deoptimization).
991    patching_info = state_for (x, x->state_before());
992  }
993  obj.load_item();
994
995  // info for exceptions
996  CodeEmitInfo* info_for_exception =
997      (x->needs_exception_state() ? state_for(x) :
998                                    state_for(x, x->state_before(), true /*ignore_xhandler*/));
999
1000  CodeStub* stub;
1001  if (x->is_incompatible_class_change_check()) {
1002    assert(patching_info == NULL, "can't patch this");
1003    stub = new SimpleExceptionStub(Runtime1::throw_incompatible_class_change_error_id, LIR_OprFact::illegalOpr, info_for_exception);
1004  } else if (x->is_invokespecial_receiver_check()) {
1005    assert(patching_info == NULL, "can't patch this");
1006    stub = new DeoptimizeStub(info_for_exception,
1007                              Deoptimization::Reason_class_check,
1008                              Deoptimization::Action_none);
1009  } else {
1010    stub = new SimpleExceptionStub(Runtime1::throw_class_cast_exception_id, obj.result(), info_for_exception);
1011  }
1012  LIR_Opr reg = rlock_result(x);
1013  LIR_Opr tmp1 = new_register(objectType);
1014  LIR_Opr tmp2 = new_register(objectType);
1015  LIR_Opr tmp3 = LIR_OprFact::illegalOpr;
1016  __ checkcast(reg, obj.result(), x->klass(),
1017               tmp1, tmp2, tmp3,
1018               x->direct_compare(), info_for_exception, patching_info, stub,
1019               x->profiled_method(), x->profiled_bci());
1020}
1021
1022
1023void LIRGenerator::do_InstanceOf(InstanceOf* x) {
1024  LIRItem obj(x->obj(), this);
1025  CodeEmitInfo* patching_info = NULL;
1026  if (!x->klass()->is_loaded() || PatchALot) {
1027    patching_info = state_for (x, x->state_before());
1028  }
1029  // Ensure the result register is not the input register because the
1030  // result is initialized before the patching safepoint.
1031  obj.load_item();
1032  LIR_Opr out_reg = rlock_result(x);
1033  LIR_Opr tmp1 = new_register(objectType);
1034  LIR_Opr tmp2 = new_register(objectType);
1035  LIR_Opr tmp3 = LIR_OprFact::illegalOpr;
1036  __ instanceof(out_reg, obj.result(), x->klass(), tmp1, tmp2, tmp3,
1037                x->direct_compare(), patching_info,
1038                x->profiled_method(), x->profiled_bci());
1039}
1040
1041
1042void LIRGenerator::do_If (If* x) {
1043  assert(x->number_of_sux() == 2, "inconsistency");
1044  ValueTag tag = x->x()->type()->tag();
1045  bool is_safepoint = x->is_safepoint();
1046
1047  If::Condition cond = x->cond();
1048
1049  LIRItem xitem(x->x(), this);
1050  LIRItem yitem(x->y(), this);
1051  LIRItem* xin = &xitem;
1052  LIRItem* yin = &yitem;
1053
1054  if (tag == longTag) {
1055    // For longs, only conditions "eql", "neq", "lss", "geq" are valid;
1056    // mirror for other conditions.
1057    if (cond == If::gtr || cond == If::leq) {
1058      cond = Instruction::mirror(cond);
1059      xin = &yitem;
1060      yin = &xitem;
1061    }
1062    xin->set_destroys_register();
1063  }
1064  xin->load_item();
1065  // TODO: don't load long constants != 0L
1066  if (tag == longTag && yin->is_constant() && yin->get_jlong_constant() == 0 && (cond == If::eql || cond == If::neq)) {
1067    // inline long zero
1068    yin->dont_load_item();
1069  } else if (tag == longTag || tag == floatTag || tag == doubleTag) {
1070    // Longs cannot handle constants at right side.
1071    yin->load_item();
1072  } else {
1073    yin->dont_load_item();
1074  }
1075
1076  // Add safepoint before generating condition code so it can be recomputed.
1077  if (x->is_safepoint()) {
1078    // Increment backedge counter if needed.
1079    increment_backedge_counter(state_for (x, x->state_before()), x->profiled_bci());
1080    // Use safepoint_poll_register() instead of LIR_OprFact::illegalOpr.
1081    __ safepoint(safepoint_poll_register(), state_for (x, x->state_before()));
1082  }
1083  set_no_result(x);
1084
1085  LIR_Opr left = xin->result();
1086  LIR_Opr right = yin->result();
1087  __ cmp(lir_cond(cond), left, right);
1088  // Generate branch profiling. Profiling code doesn't kill flags.
1089  profile_branch(x, cond);
1090  move_to_phi(x->state());
1091  if (x->x()->type()->is_float_kind()) {
1092    __ branch(lir_cond(cond), right->type(), x->tsux(), x->usux());
1093  } else {
1094    __ branch(lir_cond(cond), right->type(), x->tsux());
1095  }
1096  assert(x->default_sux() == x->fsux(), "wrong destination above");
1097  __ jump(x->default_sux());
1098}
1099
1100LIR_Opr LIRGenerator::getThreadPointer() {
1101  return FrameMap::as_pointer_opr(Z_thread);
1102}
1103
1104void LIRGenerator::trace_block_entry(BlockBegin* block) {
1105  __ move(LIR_OprFact::intConst(block->block_id()), FrameMap::Z_R2_opr);
1106  LIR_OprList* args = new LIR_OprList(1);
1107  args->append(FrameMap::Z_R2_opr);
1108  address func = CAST_FROM_FN_PTR(address, Runtime1::trace_block_entry);
1109  __ call_runtime_leaf(func, LIR_OprFact::illegalOpr, LIR_OprFact::illegalOpr, args);
1110}
1111
1112void LIRGenerator::volatile_field_store(LIR_Opr value, LIR_Address* address,
1113                                        CodeEmitInfo* info) {
1114  __ store(value, address, info);
1115}
1116
1117void LIRGenerator::volatile_field_load(LIR_Address* address, LIR_Opr result,
1118                                       CodeEmitInfo* info) {
1119  __ load(address, result, info);
1120}
1121
1122
1123void LIRGenerator::put_Object_unsafe(LIR_Opr src, LIR_Opr offset, LIR_Opr data,
1124                                     BasicType type, bool is_volatile) {
1125  LIR_Address* addr = new LIR_Address(src, offset, type);
1126  bool is_obj = (type == T_ARRAY || type == T_OBJECT);
1127  if (is_obj) {
1128    // Do the pre-write barrier, if any.
1129    pre_barrier(LIR_OprFact::address(addr), LIR_OprFact::illegalOpr /* pre_val */,
1130                true /* do_load */, false /* patch */, NULL);
1131    __ move(data, addr);
1132    assert(src->is_register(), "must be register");
1133    // Seems to be a precise address.
1134    post_barrier(LIR_OprFact::address(addr), data);
1135  } else {
1136    __ move(data, addr);
1137  }
1138}
1139
1140
1141void LIRGenerator::get_Object_unsafe(LIR_Opr dst, LIR_Opr src, LIR_Opr offset,
1142                                     BasicType type, bool is_volatile) {
1143  LIR_Address* addr = new LIR_Address(src, offset, type);
1144  __ load(addr, dst);
1145}
1146
1147void LIRGenerator::do_UnsafeGetAndSetObject(UnsafeGetAndSetObject* x) {
1148  BasicType type = x->basic_type();
1149  assert (x->is_add() && type != T_ARRAY && type != T_OBJECT, "not supported");
1150  LIRItem src(x->object(), this);
1151  LIRItem off(x->offset(), this);
1152  LIRItem value(x->value(), this);
1153
1154  src.load_item();
1155  value.load_item();
1156  off.load_nonconstant(20);
1157
1158  LIR_Opr dst = rlock_result(x, type);
1159  LIR_Opr data = value.result();
1160  LIR_Opr offset = off.result();
1161
1162  LIR_Address* addr;
1163  if (offset->is_constant()) {
1164    assert(Immediate::is_simm20(offset->as_jlong()), "should have been loaded into register");
1165    addr = new LIR_Address(src.result(), offset->as_jlong(), type);
1166  } else {
1167    addr = new LIR_Address(src.result(), offset, type);
1168  }
1169
1170  __ xadd(LIR_OprFact::address(addr), data, dst, LIR_OprFact::illegalOpr);
1171}
1172
1173void LIRGenerator::do_update_CRC32(Intrinsic* x) {
1174  assert(UseCRC32Intrinsics, "or should not be here");
1175  LIR_Opr result = rlock_result(x);
1176
1177  switch (x->id()) {
1178    case vmIntrinsics::_updateCRC32: {
1179      LIRItem crc(x->argument_at(0), this);
1180      LIRItem val(x->argument_at(1), this);
1181      // Registers destroyed by update_crc32.
1182      crc.set_destroys_register();
1183      val.set_destroys_register();
1184      crc.load_item();
1185      val.load_item();
1186      __ update_crc32(crc.result(), val.result(), result);
1187      break;
1188    }
1189    case vmIntrinsics::_updateBytesCRC32:
1190    case vmIntrinsics::_updateByteBufferCRC32: {
1191      bool is_updateBytes = (x->id() == vmIntrinsics::_updateBytesCRC32);
1192
1193      LIRItem crc(x->argument_at(0), this);
1194      LIRItem buf(x->argument_at(1), this);
1195      LIRItem off(x->argument_at(2), this);
1196      LIRItem len(x->argument_at(3), this);
1197      buf.load_item();
1198      off.load_nonconstant();
1199
1200      LIR_Opr index = off.result();
1201      int offset = is_updateBytes ? arrayOopDesc::base_offset_in_bytes(T_BYTE) : 0;
1202      if (off.result()->is_constant()) {
1203        index = LIR_OprFact::illegalOpr;
1204        offset += off.result()->as_jint();
1205      }
1206      LIR_Opr base_op = buf.result();
1207
1208      if (index->is_valid()) {
1209        LIR_Opr tmp = new_register(T_LONG);
1210        __ convert(Bytecodes::_i2l, index, tmp);
1211        index = tmp;
1212      }
1213
1214      LIR_Address* a = new LIR_Address(base_op, index, offset, T_BYTE);
1215
1216      BasicTypeList signature(3);
1217      signature.append(T_INT);
1218      signature.append(T_ADDRESS);
1219      signature.append(T_INT);
1220      CallingConvention* cc = frame_map()->c_calling_convention(&signature);
1221      const LIR_Opr result_reg = result_register_for (x->type());
1222
1223      LIR_Opr arg1 = cc->at(0);
1224      LIR_Opr arg2 = cc->at(1);
1225      LIR_Opr arg3 = cc->at(2);
1226
1227      // CCallingConventionRequiresIntsAsLongs
1228      crc.load_item_force(arg1); // We skip int->long conversion here, because CRC32 stub doesn't care about high bits.
1229      __ leal(LIR_OprFact::address(a), arg2);
1230      load_int_as_long(gen()->lir(), len, arg3);
1231
1232      __ call_runtime_leaf(StubRoutines::updateBytesCRC32(), LIR_OprFact::illegalOpr, result_reg, cc->args());
1233      __ move(result_reg, result);
1234      break;
1235    }
1236    default: {
1237      ShouldNotReachHere();
1238    }
1239  }
1240}
1241
1242void LIRGenerator::do_update_CRC32C(Intrinsic* x) {
1243  Unimplemented();
1244}
1245
1246void LIRGenerator::do_FmaIntrinsic(Intrinsic* x) {
1247  assert(x->number_of_arguments() == 3, "wrong type");
1248  assert(UseFMA, "Needs FMA instructions support.");
1249  LIRItem value(x->argument_at(0), this);
1250  LIRItem value1(x->argument_at(1), this);
1251  LIRItem value2(x->argument_at(2), this);
1252
1253  value2.set_destroys_register();
1254
1255  value.load_item();
1256  value1.load_item();
1257  value2.load_item();
1258
1259  LIR_Opr calc_input = value.result();
1260  LIR_Opr calc_input1 = value1.result();
1261  LIR_Opr calc_input2 = value2.result();
1262  LIR_Opr calc_result = rlock_result(x);
1263
1264  switch (x->id()) {
1265  case vmIntrinsics::_fmaD:   __ fmad(calc_input, calc_input1, calc_input2, calc_result); break;
1266  case vmIntrinsics::_fmaF:   __ fmaf(calc_input, calc_input1, calc_input2, calc_result); break;
1267  default:                    ShouldNotReachHere();
1268  }
1269}
1270
1271void LIRGenerator::do_vectorizedMismatch(Intrinsic* x) {
1272  fatal("vectorizedMismatch intrinsic is not implemented on this platform");
1273}
1274
1275