1/*
2 * Copyright (c) 2016, 2017, Oracle and/or its affiliates. All rights reserved.
3 * Copyright (c) 2016, 2017, SAP SE. All rights reserved.
4 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
5 *
6 * This code is free software; you can redistribute it and/or modify it
7 * under the terms of the GNU General Public License version 2 only, as
8 * published by the Free Software Foundation.
9 *
10 * This code is distributed in the hope that it will be useful, but WITHOUT
11 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
12 * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
13 * version 2 for more details (a copy is included in the LICENSE file that
14 * accompanied this code).
15 *
16 * You should have received a copy of the GNU General Public License version
17 * 2 along with this work; if not, write to the Free Software Foundation,
18 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
19 *
20 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
21 * or visit www.oracle.com if you need additional information or have any
22 * questions.
23 *
24 */
25
26#include "precompiled.hpp"
27#include "asm/codeBuffer.hpp"
28#include "asm/macroAssembler.inline.hpp"
29#include "compiler/disassembler.hpp"
30#include "gc/shared/collectedHeap.inline.hpp"
31#include "interpreter/interpreter.hpp"
32#include "gc/shared/cardTableModRefBS.hpp"
33#include "memory/resourceArea.hpp"
34#include "memory/universe.hpp"
35#include "oops/klass.inline.hpp"
36#include "opto/compile.hpp"
37#include "opto/intrinsicnode.hpp"
38#include "opto/matcher.hpp"
39#include "prims/methodHandles.hpp"
40#include "registerSaver_s390.hpp"
41#include "runtime/biasedLocking.hpp"
42#include "runtime/icache.hpp"
43#include "runtime/interfaceSupport.hpp"
44#include "runtime/objectMonitor.hpp"
45#include "runtime/os.hpp"
46#include "runtime/sharedRuntime.hpp"
47#include "runtime/stubRoutines.hpp"
48#include "utilities/events.hpp"
49#include "utilities/macros.hpp"
50#if INCLUDE_ALL_GCS
51#include "gc/g1/g1CollectedHeap.inline.hpp"
52#include "gc/g1/g1SATBCardTableModRefBS.hpp"
53#include "gc/g1/heapRegion.hpp"
54#endif
55
56#include <ucontext.h>
57
58#define BLOCK_COMMENT(str) block_comment(str)
59#define BIND(label)        bind(label); BLOCK_COMMENT(#label ":")
60
61// Move 32-bit register if destination and source are different.
62void MacroAssembler::lr_if_needed(Register rd, Register rs) {
63  if (rs != rd) { z_lr(rd, rs); }
64}
65
66// Move register if destination and source are different.
67void MacroAssembler::lgr_if_needed(Register rd, Register rs) {
68  if (rs != rd) { z_lgr(rd, rs); }
69}
70
71// Zero-extend 32-bit register into 64-bit register if destination and source are different.
72void MacroAssembler::llgfr_if_needed(Register rd, Register rs) {
73  if (rs != rd) { z_llgfr(rd, rs); }
74}
75
76// Move float register if destination and source are different.
77void MacroAssembler::ldr_if_needed(FloatRegister rd, FloatRegister rs) {
78  if (rs != rd) { z_ldr(rd, rs); }
79}
80
81// Move integer register if destination and source are different.
82// It is assumed that shorter-than-int types are already
83// appropriately sign-extended.
84void MacroAssembler::move_reg_if_needed(Register dst, BasicType dst_type, Register src,
85                                        BasicType src_type) {
86  assert((dst_type != T_FLOAT) && (dst_type != T_DOUBLE), "use move_freg for float types");
87  assert((src_type != T_FLOAT) && (src_type != T_DOUBLE), "use move_freg for float types");
88
89  if (dst_type == src_type) {
90    lgr_if_needed(dst, src); // Just move all 64 bits.
91    return;
92  }
93
94  switch (dst_type) {
95    // Do not support these types for now.
96    //  case T_BOOLEAN:
97    case T_BYTE:  // signed byte
98      switch (src_type) {
99        case T_INT:
100          z_lgbr(dst, src);
101          break;
102        default:
103          ShouldNotReachHere();
104      }
105      return;
106
107    case T_CHAR:
108    case T_SHORT:
109      switch (src_type) {
110        case T_INT:
111          if (dst_type == T_CHAR) {
112            z_llghr(dst, src);
113          } else {
114            z_lghr(dst, src);
115          }
116          break;
117        default:
118          ShouldNotReachHere();
119      }
120      return;
121
122    case T_INT:
123      switch (src_type) {
124        case T_BOOLEAN:
125        case T_BYTE:
126        case T_CHAR:
127        case T_SHORT:
128        case T_INT:
129        case T_LONG:
130        case T_OBJECT:
131        case T_ARRAY:
132        case T_VOID:
133        case T_ADDRESS:
134          lr_if_needed(dst, src);
135          // llgfr_if_needed(dst, src);  // zero-extend (in case we need to find a bug).
136          return;
137
138        default:
139          assert(false, "non-integer src type");
140          return;
141      }
142    case T_LONG:
143      switch (src_type) {
144        case T_BOOLEAN:
145        case T_BYTE:
146        case T_CHAR:
147        case T_SHORT:
148        case T_INT:
149          z_lgfr(dst, src); // sign extension
150          return;
151
152        case T_LONG:
153        case T_OBJECT:
154        case T_ARRAY:
155        case T_VOID:
156        case T_ADDRESS:
157          lgr_if_needed(dst, src);
158          return;
159
160        default:
161          assert(false, "non-integer src type");
162          return;
163      }
164      return;
165    case T_OBJECT:
166    case T_ARRAY:
167    case T_VOID:
168    case T_ADDRESS:
169      switch (src_type) {
170        // These types don't make sense to be converted to pointers:
171        //      case T_BOOLEAN:
172        //      case T_BYTE:
173        //      case T_CHAR:
174        //      case T_SHORT:
175
176        case T_INT:
177          z_llgfr(dst, src); // zero extension
178          return;
179
180        case T_LONG:
181        case T_OBJECT:
182        case T_ARRAY:
183        case T_VOID:
184        case T_ADDRESS:
185          lgr_if_needed(dst, src);
186          return;
187
188        default:
189          assert(false, "non-integer src type");
190          return;
191      }
192      return;
193    default:
194      assert(false, "non-integer dst type");
195      return;
196  }
197}
198
199// Move float register if destination and source are different.
200void MacroAssembler::move_freg_if_needed(FloatRegister dst, BasicType dst_type,
201                                         FloatRegister src, BasicType src_type) {
202  assert((dst_type == T_FLOAT) || (dst_type == T_DOUBLE), "use move_reg for int types");
203  assert((src_type == T_FLOAT) || (src_type == T_DOUBLE), "use move_reg for int types");
204  if (dst_type == src_type) {
205    ldr_if_needed(dst, src); // Just move all 64 bits.
206  } else {
207    switch (dst_type) {
208      case T_FLOAT:
209        assert(src_type == T_DOUBLE, "invalid float type combination");
210        z_ledbr(dst, src);
211        return;
212      case T_DOUBLE:
213        assert(src_type == T_FLOAT, "invalid float type combination");
214        z_ldebr(dst, src);
215        return;
216      default:
217        assert(false, "non-float dst type");
218        return;
219    }
220  }
221}
222
223// Optimized emitter for reg to mem operations.
224// Uses modern instructions if running on modern hardware, classic instructions
225// otherwise. Prefers (usually shorter) classic instructions if applicable.
226// Data register (reg) cannot be used as work register.
227//
228// Don't rely on register locking, instead pass a scratch register (Z_R0 by default).
229// CAUTION! Passing registers >= Z_R2 may produce bad results on old CPUs!
230void MacroAssembler::freg2mem_opt(FloatRegister reg,
231                                  int64_t       disp,
232                                  Register      index,
233                                  Register      base,
234                                  void (MacroAssembler::*modern) (FloatRegister, int64_t, Register, Register),
235                                  void (MacroAssembler::*classic)(FloatRegister, int64_t, Register, Register),
236                                  Register      scratch) {
237  index = (index == noreg) ? Z_R0 : index;
238  if (Displacement::is_shortDisp(disp)) {
239    (this->*classic)(reg, disp, index, base);
240  } else {
241    if (Displacement::is_validDisp(disp)) {
242      (this->*modern)(reg, disp, index, base);
243    } else {
244      if (scratch != Z_R0 && scratch != Z_R1) {
245        (this->*modern)(reg, disp, index, base);      // Will fail with disp out of range.
246      } else {
247        if (scratch != Z_R0) {   // scratch == Z_R1
248          if ((scratch == index) || (index == base)) {
249            (this->*modern)(reg, disp, index, base);  // Will fail with disp out of range.
250          } else {
251            add2reg(scratch, disp, base);
252            (this->*classic)(reg, 0, index, scratch);
253            if (base == scratch) {
254              add2reg(base, -disp);  // Restore base.
255            }
256          }
257        } else {   // scratch == Z_R0
258          z_lgr(scratch, base);
259          add2reg(base, disp);
260          (this->*classic)(reg, 0, index, base);
261          z_lgr(base, scratch);      // Restore base.
262        }
263      }
264    }
265  }
266}
267
268void MacroAssembler::freg2mem_opt(FloatRegister reg, const Address &a, bool is_double) {
269  if (is_double) {
270    freg2mem_opt(reg, a.disp20(), a.indexOrR0(), a.baseOrR0(), MODERN_FFUN(z_stdy), CLASSIC_FFUN(z_std));
271  } else {
272    freg2mem_opt(reg, a.disp20(), a.indexOrR0(), a.baseOrR0(), MODERN_FFUN(z_stey), CLASSIC_FFUN(z_ste));
273  }
274}
275
276// Optimized emitter for mem to reg operations.
277// Uses modern instructions if running on modern hardware, classic instructions
278// otherwise. Prefers (usually shorter) classic instructions if applicable.
279// data register (reg) cannot be used as work register.
280//
281// Don't rely on register locking, instead pass a scratch register (Z_R0 by default).
282// CAUTION! Passing registers >= Z_R2 may produce bad results on old CPUs!
283void MacroAssembler::mem2freg_opt(FloatRegister reg,
284                                  int64_t       disp,
285                                  Register      index,
286                                  Register      base,
287                                  void (MacroAssembler::*modern) (FloatRegister, int64_t, Register, Register),
288                                  void (MacroAssembler::*classic)(FloatRegister, int64_t, Register, Register),
289                                  Register      scratch) {
290  index = (index == noreg) ? Z_R0 : index;
291  if (Displacement::is_shortDisp(disp)) {
292    (this->*classic)(reg, disp, index, base);
293  } else {
294    if (Displacement::is_validDisp(disp)) {
295      (this->*modern)(reg, disp, index, base);
296    } else {
297      if (scratch != Z_R0 && scratch != Z_R1) {
298        (this->*modern)(reg, disp, index, base);      // Will fail with disp out of range.
299      } else {
300        if (scratch != Z_R0) {   // scratch == Z_R1
301          if ((scratch == index) || (index == base)) {
302            (this->*modern)(reg, disp, index, base);  // Will fail with disp out of range.
303          } else {
304            add2reg(scratch, disp, base);
305            (this->*classic)(reg, 0, index, scratch);
306            if (base == scratch) {
307              add2reg(base, -disp);  // Restore base.
308            }
309          }
310        } else {   // scratch == Z_R0
311          z_lgr(scratch, base);
312          add2reg(base, disp);
313          (this->*classic)(reg, 0, index, base);
314          z_lgr(base, scratch);      // Restore base.
315        }
316      }
317    }
318  }
319}
320
321void MacroAssembler::mem2freg_opt(FloatRegister reg, const Address &a, bool is_double) {
322  if (is_double) {
323    mem2freg_opt(reg, a.disp20(), a.indexOrR0(), a.baseOrR0(), MODERN_FFUN(z_ldy), CLASSIC_FFUN(z_ld));
324  } else {
325    mem2freg_opt(reg, a.disp20(), a.indexOrR0(), a.baseOrR0(), MODERN_FFUN(z_ley), CLASSIC_FFUN(z_le));
326  }
327}
328
329// Optimized emitter for reg to mem operations.
330// Uses modern instructions if running on modern hardware, classic instructions
331// otherwise. Prefers (usually shorter) classic instructions if applicable.
332// Data register (reg) cannot be used as work register.
333//
334// Don't rely on register locking, instead pass a scratch register
335// (Z_R0 by default)
336// CAUTION! passing registers >= Z_R2 may produce bad results on old CPUs!
337void MacroAssembler::reg2mem_opt(Register reg,
338                                 int64_t  disp,
339                                 Register index,
340                                 Register base,
341                                 void (MacroAssembler::*modern) (Register, int64_t, Register, Register),
342                                 void (MacroAssembler::*classic)(Register, int64_t, Register, Register),
343                                 Register scratch) {
344  index = (index == noreg) ? Z_R0 : index;
345  if (Displacement::is_shortDisp(disp)) {
346    (this->*classic)(reg, disp, index, base);
347  } else {
348    if (Displacement::is_validDisp(disp)) {
349      (this->*modern)(reg, disp, index, base);
350    } else {
351      if (scratch != Z_R0 && scratch != Z_R1) {
352        (this->*modern)(reg, disp, index, base);      // Will fail with disp out of range.
353      } else {
354        if (scratch != Z_R0) {   // scratch == Z_R1
355          if ((scratch == index) || (index == base)) {
356            (this->*modern)(reg, disp, index, base);  // Will fail with disp out of range.
357          } else {
358            add2reg(scratch, disp, base);
359            (this->*classic)(reg, 0, index, scratch);
360            if (base == scratch) {
361              add2reg(base, -disp);  // Restore base.
362            }
363          }
364        } else {   // scratch == Z_R0
365          if ((scratch == reg) || (scratch == base) || (reg == base)) {
366            (this->*modern)(reg, disp, index, base);  // Will fail with disp out of range.
367          } else {
368            z_lgr(scratch, base);
369            add2reg(base, disp);
370            (this->*classic)(reg, 0, index, base);
371            z_lgr(base, scratch);    // Restore base.
372          }
373        }
374      }
375    }
376  }
377}
378
379int MacroAssembler::reg2mem_opt(Register reg, const Address &a, bool is_double) {
380  int store_offset = offset();
381  if (is_double) {
382    reg2mem_opt(reg, a.disp20(), a.indexOrR0(), a.baseOrR0(), MODERN_IFUN(z_stg), CLASSIC_IFUN(z_stg));
383  } else {
384    reg2mem_opt(reg, a.disp20(), a.indexOrR0(), a.baseOrR0(), MODERN_IFUN(z_sty), CLASSIC_IFUN(z_st));
385  }
386  return store_offset;
387}
388
389// Optimized emitter for mem to reg operations.
390// Uses modern instructions if running on modern hardware, classic instructions
391// otherwise. Prefers (usually shorter) classic instructions if applicable.
392// Data register (reg) will be used as work register where possible.
393void MacroAssembler::mem2reg_opt(Register reg,
394                                 int64_t  disp,
395                                 Register index,
396                                 Register base,
397                                 void (MacroAssembler::*modern) (Register, int64_t, Register, Register),
398                                 void (MacroAssembler::*classic)(Register, int64_t, Register, Register)) {
399  index = (index == noreg) ? Z_R0 : index;
400  if (Displacement::is_shortDisp(disp)) {
401    (this->*classic)(reg, disp, index, base);
402  } else {
403    if (Displacement::is_validDisp(disp)) {
404      (this->*modern)(reg, disp, index, base);
405    } else {
406      if ((reg == index) && (reg == base)) {
407        z_sllg(reg, reg, 1);
408        add2reg(reg, disp);
409        (this->*classic)(reg, 0, noreg, reg);
410      } else if ((reg == index) && (reg != Z_R0)) {
411        add2reg(reg, disp);
412        (this->*classic)(reg, 0, reg, base);
413      } else if (reg == base) {
414        add2reg(reg, disp);
415        (this->*classic)(reg, 0, index, reg);
416      } else if (reg != Z_R0) {
417        add2reg(reg, disp, base);
418        (this->*classic)(reg, 0, index, reg);
419      } else { // reg == Z_R0 && reg != base here
420        add2reg(base, disp);
421        (this->*classic)(reg, 0, index, base);
422        add2reg(base, -disp);
423      }
424    }
425  }
426}
427
428void MacroAssembler::mem2reg_opt(Register reg, const Address &a, bool is_double) {
429  if (is_double) {
430    z_lg(reg, a);
431  } else {
432    mem2reg_opt(reg, a.disp20(), a.indexOrR0(), a.baseOrR0(), MODERN_IFUN(z_ly), CLASSIC_IFUN(z_l));
433  }
434}
435
436void MacroAssembler::mem2reg_signed_opt(Register reg, const Address &a) {
437  mem2reg_opt(reg, a.disp20(), a.indexOrR0(), a.baseOrR0(), MODERN_IFUN(z_lgf), CLASSIC_IFUN(z_lgf));
438}
439
440void MacroAssembler::and_imm(Register r, long mask,
441                             Register tmp /* = Z_R0 */,
442                             bool wide    /* = false */) {
443  assert(wide || Immediate::is_simm32(mask), "mask value too large");
444
445  if (!wide) {
446    z_nilf(r, mask);
447    return;
448  }
449
450  assert(r != tmp, " need a different temporary register !");
451  load_const_optimized(tmp, mask);
452  z_ngr(r, tmp);
453}
454
455// Calculate the 1's complement.
456// Note: The condition code is neither preserved nor correctly set by this code!!!
457// Note: (wide == false) does not protect the high order half of the target register
458//       from alteration. It only serves as optimization hint for 32-bit results.
459void MacroAssembler::not_(Register r1, Register r2, bool wide) {
460
461  if ((r2 == noreg) || (r2 == r1)) { // Calc 1's complement in place.
462    z_xilf(r1, -1);
463    if (wide) {
464      z_xihf(r1, -1);
465    }
466  } else { // Distinct src and dst registers.
467    if (VM_Version::has_DistinctOpnds()) {
468      load_const_optimized(r1, -1);
469      z_xgrk(r1, r2, r1);
470    } else {
471      if (wide) {
472        z_lgr(r1, r2);
473        z_xilf(r1, -1);
474        z_xihf(r1, -1);
475      } else {
476        z_lr(r1, r2);
477        z_xilf(r1, -1);
478      }
479    }
480  }
481}
482
483unsigned long MacroAssembler::create_mask(int lBitPos, int rBitPos) {
484  assert(lBitPos >=  0,      "zero is  leftmost bit position");
485  assert(rBitPos <= 63,      "63   is rightmost bit position");
486  assert(lBitPos <= rBitPos, "inverted selection interval");
487  return (lBitPos == 0 ? (unsigned long)(-1L) : ((1UL<<(63-lBitPos+1))-1)) & (~((1UL<<(63-rBitPos))-1));
488}
489
490// Helper function for the "Rotate_then_<logicalOP>" emitters.
491// Rotate src, then mask register contents such that only bits in range survive.
492// For oneBits == false, all bits not in range are set to 0. Useful for deleting all bits outside range.
493// For oneBits == true,  all bits not in range are set to 1. Useful for preserving all bits outside range.
494// The caller must ensure that the selected range only contains bits with defined value.
495void MacroAssembler::rotate_then_mask(Register dst, Register src, int lBitPos, int rBitPos,
496                                      int nRotate, bool src32bit, bool dst32bit, bool oneBits) {
497  assert(!(dst32bit && lBitPos < 32), "selection interval out of range for int destination");
498  bool sll4rll = (nRotate >= 0) && (nRotate <= (63-rBitPos)); // Substitute SLL(G) for RLL(G).
499  bool srl4rll = (nRotate <  0) && (-nRotate <= lBitPos);     // Substitute SRL(G) for RLL(G).
500  //  Pre-determine which parts of dst will be zero after shift/rotate.
501  bool llZero  =  sll4rll && (nRotate >= 16);
502  bool lhZero  = (sll4rll && (nRotate >= 32)) || (srl4rll && (nRotate <= -48));
503  bool lfZero  = llZero && lhZero;
504  bool hlZero  = (sll4rll && (nRotate >= 48)) || (srl4rll && (nRotate <= -32));
505  bool hhZero  =                                 (srl4rll && (nRotate <= -16));
506  bool hfZero  = hlZero && hhZero;
507
508  // rotate then mask src operand.
509  // if oneBits == true,  all bits outside selected range are 1s.
510  // if oneBits == false, all bits outside selected range are 0s.
511  if (src32bit) {   // There might be garbage in the upper 32 bits which will get masked away.
512    if (dst32bit) {
513      z_rll(dst, src, nRotate);   // Copy and rotate, upper half of reg remains undisturbed.
514    } else {
515      if      (sll4rll) { z_sllg(dst, src,  nRotate); }
516      else if (srl4rll) { z_srlg(dst, src, -nRotate); }
517      else              { z_rllg(dst, src,  nRotate); }
518    }
519  } else {
520    if      (sll4rll) { z_sllg(dst, src,  nRotate); }
521    else if (srl4rll) { z_srlg(dst, src, -nRotate); }
522    else              { z_rllg(dst, src,  nRotate); }
523  }
524
525  unsigned long  range_mask    = create_mask(lBitPos, rBitPos);
526  unsigned int   range_mask_h  = (unsigned int)(range_mask >> 32);
527  unsigned int   range_mask_l  = (unsigned int)range_mask;
528  unsigned short range_mask_hh = (unsigned short)(range_mask >> 48);
529  unsigned short range_mask_hl = (unsigned short)(range_mask >> 32);
530  unsigned short range_mask_lh = (unsigned short)(range_mask >> 16);
531  unsigned short range_mask_ll = (unsigned short)range_mask;
532  // Works for z9 and newer H/W.
533  if (oneBits) {
534    if ((~range_mask_l) != 0)                { z_oilf(dst, ~range_mask_l); } // All bits outside range become 1s.
535    if (((~range_mask_h) != 0) && !dst32bit) { z_oihf(dst, ~range_mask_h); }
536  } else {
537    // All bits outside range become 0s
538    if (((~range_mask_l) != 0) &&              !lfZero) {
539      z_nilf(dst, range_mask_l);
540    }
541    if (((~range_mask_h) != 0) && !dst32bit && !hfZero) {
542      z_nihf(dst, range_mask_h);
543    }
544  }
545}
546
547// Rotate src, then insert selected range from rotated src into dst.
548// Clear dst before, if requested.
549void MacroAssembler::rotate_then_insert(Register dst, Register src, int lBitPos, int rBitPos,
550                                        int nRotate, bool clear_dst) {
551  // This version does not depend on src being zero-extended int2long.
552  nRotate &= 0x003f;                                       // For risbg, pretend it's an unsigned value.
553  z_risbg(dst, src, lBitPos, rBitPos, nRotate, clear_dst); // Rotate, then insert selected, clear the rest.
554}
555
556// Rotate src, then and selected range from rotated src into dst.
557// Set condition code only if so requested. Otherwise it is unpredictable.
558// See performance note in macroAssembler_s390.hpp for important information.
559void MacroAssembler::rotate_then_and(Register dst, Register src, int lBitPos, int rBitPos,
560                                     int nRotate, bool test_only) {
561  guarantee(!test_only, "Emitter not fit for test_only instruction variant.");
562  // This version does not depend on src being zero-extended int2long.
563  nRotate &= 0x003f;                                       // For risbg, pretend it's an unsigned value.
564  z_rxsbg(dst, src, lBitPos, rBitPos, nRotate, test_only); // Rotate, then xor selected.
565}
566
567// Rotate src, then or selected range from rotated src into dst.
568// Set condition code only if so requested. Otherwise it is unpredictable.
569// See performance note in macroAssembler_s390.hpp for important information.
570void MacroAssembler::rotate_then_or(Register dst, Register src,  int  lBitPos,  int  rBitPos,
571                                    int nRotate, bool test_only) {
572  guarantee(!test_only, "Emitter not fit for test_only instruction variant.");
573  // This version does not depend on src being zero-extended int2long.
574  nRotate &= 0x003f;                                       // For risbg, pretend it's an unsigned value.
575  z_rosbg(dst, src, lBitPos, rBitPos, nRotate, test_only); // Rotate, then xor selected.
576}
577
578// Rotate src, then xor selected range from rotated src into dst.
579// Set condition code only if so requested. Otherwise it is unpredictable.
580// See performance note in macroAssembler_s390.hpp for important information.
581void MacroAssembler::rotate_then_xor(Register dst, Register src,  int  lBitPos,  int  rBitPos,
582                                     int nRotate, bool test_only) {
583  guarantee(!test_only, "Emitter not fit for test_only instruction variant.");
584    // This version does not depend on src being zero-extended int2long.
585  nRotate &= 0x003f;                                       // For risbg, pretend it's an unsigned value.
586  z_rxsbg(dst, src, lBitPos, rBitPos, nRotate, test_only); // Rotate, then xor selected.
587}
588
589void MacroAssembler::add64(Register r1, RegisterOrConstant inc) {
590  if (inc.is_register()) {
591    z_agr(r1, inc.as_register());
592  } else { // constant
593    intptr_t imm = inc.as_constant();
594    add2reg(r1, imm);
595  }
596}
597// Helper function to multiply the 64bit contents of a register by a 16bit constant.
598// The optimization tries to avoid the mghi instruction, since it uses the FPU for
599// calculation and is thus rather slow.
600//
601// There is no handling for special cases, e.g. cval==0 or cval==1.
602//
603// Returns len of generated code block.
604unsigned int MacroAssembler::mul_reg64_const16(Register rval, Register work, int cval) {
605  int block_start = offset();
606
607  bool sign_flip = cval < 0;
608  cval = sign_flip ? -cval : cval;
609
610  BLOCK_COMMENT("Reg64*Con16 {");
611
612  int bit1 = cval & -cval;
613  if (bit1 == cval) {
614    z_sllg(rval, rval, exact_log2(bit1));
615    if (sign_flip) { z_lcgr(rval, rval); }
616  } else {
617    int bit2 = (cval-bit1) & -(cval-bit1);
618    if ((bit1+bit2) == cval) {
619      z_sllg(work, rval, exact_log2(bit1));
620      z_sllg(rval, rval, exact_log2(bit2));
621      z_agr(rval, work);
622      if (sign_flip) { z_lcgr(rval, rval); }
623    } else {
624      if (sign_flip) { z_mghi(rval, -cval); }
625      else           { z_mghi(rval,  cval); }
626    }
627  }
628  BLOCK_COMMENT("} Reg64*Con16");
629
630  int block_end = offset();
631  return block_end - block_start;
632}
633
634// Generic operation r1 := r2 + imm.
635//
636// Should produce the best code for each supported CPU version.
637// r2 == noreg yields r1 := r1 + imm
638// imm == 0 emits either no instruction or r1 := r2 !
639// NOTES: 1) Don't use this function where fixed sized
640//           instruction sequences are required!!!
641//        2) Don't use this function if condition code
642//           setting is required!
643//        3) Despite being declared as int64_t, the parameter imm
644//           must be a simm_32 value (= signed 32-bit integer).
645void MacroAssembler::add2reg(Register r1, int64_t imm, Register r2) {
646  assert(Immediate::is_simm32(imm), "probably an implicit conversion went wrong");
647
648  if (r2 == noreg) { r2 = r1; }
649
650  // Handle special case imm == 0.
651  if (imm == 0) {
652    lgr_if_needed(r1, r2);
653    // Nothing else to do.
654    return;
655  }
656
657  if (!PreferLAoverADD || (r2 == Z_R0)) {
658    bool distinctOpnds = VM_Version::has_DistinctOpnds();
659
660    // Can we encode imm in 16 bits signed?
661    if (Immediate::is_simm16(imm)) {
662      if (r1 == r2) {
663        z_aghi(r1, imm);
664        return;
665      }
666      if (distinctOpnds) {
667        z_aghik(r1, r2, imm);
668        return;
669      }
670      z_lgr(r1, r2);
671      z_aghi(r1, imm);
672      return;
673    }
674  } else {
675    // Can we encode imm in 12 bits unsigned?
676    if (Displacement::is_shortDisp(imm)) {
677      z_la(r1, imm, r2);
678      return;
679    }
680    // Can we encode imm in 20 bits signed?
681    if (Displacement::is_validDisp(imm)) {
682      // Always use LAY instruction, so we don't need the tmp register.
683      z_lay(r1, imm, r2);
684      return;
685    }
686
687  }
688
689  // Can handle it (all possible values) with long immediates.
690  lgr_if_needed(r1, r2);
691  z_agfi(r1, imm);
692}
693
694// Generic operation r := b + x + d
695//
696// Addition of several operands with address generation semantics - sort of:
697//  - no restriction on the registers. Any register will do for any operand.
698//  - x == noreg: operand will be disregarded.
699//  - b == noreg: will use (contents of) result reg as operand (r := r + d).
700//  - x == Z_R0:  just disregard
701//  - b == Z_R0:  use as operand. This is not address generation semantics!!!
702//
703// The same restrictions as on add2reg() are valid!!!
704void MacroAssembler::add2reg_with_index(Register r, int64_t d, Register x, Register b) {
705  assert(Immediate::is_simm32(d), "probably an implicit conversion went wrong");
706
707  if (x == noreg) { x = Z_R0; }
708  if (b == noreg) { b = r; }
709
710  // Handle special case x == R0.
711  if (x == Z_R0) {
712    // Can simply add the immediate value to the base register.
713    add2reg(r, d, b);
714    return;
715  }
716
717  if (!PreferLAoverADD || (b == Z_R0)) {
718    bool distinctOpnds = VM_Version::has_DistinctOpnds();
719    // Handle special case d == 0.
720    if (d == 0) {
721      if (b == x)        { z_sllg(r, b, 1); return; }
722      if (r == x)        { z_agr(r, b);     return; }
723      if (r == b)        { z_agr(r, x);     return; }
724      if (distinctOpnds) { z_agrk(r, x, b); return; }
725      z_lgr(r, b);
726      z_agr(r, x);
727    } else {
728      if (x == b)             { z_sllg(r, x, 1); }
729      else if (r == x)        { z_agr(r, b); }
730      else if (r == b)        { z_agr(r, x); }
731      else if (distinctOpnds) { z_agrk(r, x, b); }
732      else {
733        z_lgr(r, b);
734        z_agr(r, x);
735      }
736      add2reg(r, d);
737    }
738  } else {
739    // Can we encode imm in 12 bits unsigned?
740    if (Displacement::is_shortDisp(d)) {
741      z_la(r, d, x, b);
742      return;
743    }
744    // Can we encode imm in 20 bits signed?
745    if (Displacement::is_validDisp(d)) {
746      z_lay(r, d, x, b);
747      return;
748    }
749    z_la(r, 0, x, b);
750    add2reg(r, d);
751  }
752}
753
754// Generic emitter (32bit) for direct memory increment.
755// For optimal code, do not specify Z_R0 as temp register.
756void MacroAssembler::add2mem_32(const Address &a, int64_t imm, Register tmp) {
757  if (VM_Version::has_MemWithImmALUOps() && Immediate::is_simm8(imm)) {
758    z_asi(a, imm);
759  } else {
760    z_lgf(tmp, a);
761    add2reg(tmp, imm);
762    z_st(tmp, a);
763  }
764}
765
766void MacroAssembler::add2mem_64(const Address &a, int64_t imm, Register tmp) {
767  if (VM_Version::has_MemWithImmALUOps() && Immediate::is_simm8(imm)) {
768    z_agsi(a, imm);
769  } else {
770    z_lg(tmp, a);
771    add2reg(tmp, imm);
772    z_stg(tmp, a);
773  }
774}
775
776void MacroAssembler::load_sized_value(Register dst, Address src, size_t size_in_bytes, bool is_signed) {
777  switch (size_in_bytes) {
778    case  8: z_lg(dst, src); break;
779    case  4: is_signed ? z_lgf(dst, src) : z_llgf(dst, src); break;
780    case  2: is_signed ? z_lgh(dst, src) : z_llgh(dst, src); break;
781    case  1: is_signed ? z_lgb(dst, src) : z_llgc(dst, src); break;
782    default: ShouldNotReachHere();
783  }
784}
785
786void MacroAssembler::store_sized_value(Register src, Address dst, size_t size_in_bytes) {
787  switch (size_in_bytes) {
788    case  8: z_stg(src, dst); break;
789    case  4: z_st(src, dst); break;
790    case  2: z_sth(src, dst); break;
791    case  1: z_stc(src, dst); break;
792    default: ShouldNotReachHere();
793  }
794}
795
796// Split a si20 offset (20bit, signed) into an ui12 offset (12bit, unsigned) and
797// a high-order summand in register tmp.
798//
799// return value: <  0: No split required, si20 actually has property uimm12.
800//               >= 0: Split performed. Use return value as uimm12 displacement and
801//                     tmp as index register.
802int MacroAssembler::split_largeoffset(int64_t si20_offset, Register tmp, bool fixed_codelen, bool accumulate) {
803  assert(Immediate::is_simm20(si20_offset), "sanity");
804  int lg_off = (int)si20_offset &  0x0fff; // Punch out low-order 12 bits, always positive.
805  int ll_off = (int)si20_offset & ~0x0fff; // Force low-order 12 bits to zero.
806  assert((Displacement::is_shortDisp(si20_offset) && (ll_off == 0)) ||
807         !Displacement::is_shortDisp(si20_offset), "unexpected offset values");
808  assert((lg_off+ll_off) == si20_offset, "offset splitup error");
809
810  Register work = accumulate? Z_R0 : tmp;
811
812  if (fixed_codelen) {          // Len of code = 10 = 4 + 6.
813    z_lghi(work, ll_off>>12);   // Implicit sign extension.
814    z_slag(work, work, 12);
815  } else {                      // Len of code = 0..10.
816    if (ll_off == 0) { return -1; }
817    // ll_off has 8 significant bits (at most) plus sign.
818    if ((ll_off & 0x0000f000) == 0) {    // Non-zero bits only in upper halfbyte.
819      z_llilh(work, ll_off >> 16);
820      if (ll_off < 0) {                  // Sign-extension required.
821        z_lgfr(work, work);
822      }
823    } else {
824      if ((ll_off & 0x000f0000) == 0) {  // Non-zero bits only in lower halfbyte.
825        z_llill(work, ll_off);
826      } else {                           // Non-zero bits in both halfbytes.
827        z_lghi(work, ll_off>>12);        // Implicit sign extension.
828        z_slag(work, work, 12);
829      }
830    }
831  }
832  if (accumulate) { z_algr(tmp, work); } // len of code += 4
833  return lg_off;
834}
835
836void MacroAssembler::load_float_largeoffset(FloatRegister t, int64_t si20, Register a, Register tmp) {
837  if (Displacement::is_validDisp(si20)) {
838    z_ley(t, si20, a);
839  } else {
840    // Fixed_codelen = true is a simple way to ensure that the size of load_float_largeoffset
841    // does not depend on si20 (scratch buffer emit size == code buffer emit size for constant
842    // pool loads).
843    bool accumulate    = true;
844    bool fixed_codelen = true;
845    Register work;
846
847    if (fixed_codelen) {
848      z_lgr(tmp, a);  // Lgr_if_needed not applicable due to fixed_codelen.
849    } else {
850      accumulate = (a == tmp);
851    }
852    work = tmp;
853
854    int disp12 = split_largeoffset(si20, work, fixed_codelen, accumulate);
855    if (disp12 < 0) {
856      z_le(t, si20, work);
857    } else {
858      if (accumulate) {
859        z_le(t, disp12, work);
860      } else {
861        z_le(t, disp12, work, a);
862      }
863    }
864  }
865}
866
867void MacroAssembler::load_double_largeoffset(FloatRegister t, int64_t si20, Register a, Register tmp) {
868  if (Displacement::is_validDisp(si20)) {
869    z_ldy(t, si20, a);
870  } else {
871    // Fixed_codelen = true is a simple way to ensure that the size of load_double_largeoffset
872    // does not depend on si20 (scratch buffer emit size == code buffer emit size for constant
873    // pool loads).
874    bool accumulate    = true;
875    bool fixed_codelen = true;
876    Register work;
877
878    if (fixed_codelen) {
879      z_lgr(tmp, a);  // Lgr_if_needed not applicable due to fixed_codelen.
880    } else {
881      accumulate = (a == tmp);
882    }
883    work = tmp;
884
885    int disp12 = split_largeoffset(si20, work, fixed_codelen, accumulate);
886    if (disp12 < 0) {
887      z_ld(t, si20, work);
888    } else {
889      if (accumulate) {
890        z_ld(t, disp12, work);
891      } else {
892        z_ld(t, disp12, work, a);
893      }
894    }
895  }
896}
897
898// PCrelative TOC access.
899// Returns distance (in bytes) from current position to start of consts section.
900// Returns 0 (zero) if no consts section exists or if it has size zero.
901long MacroAssembler::toc_distance() {
902  CodeSection* cs = code()->consts();
903  return (long)((cs != NULL) ? cs->start()-pc() : 0);
904}
905
906// Implementation on x86/sparc assumes that constant and instruction section are
907// adjacent, but this doesn't hold. Two special situations may occur, that we must
908// be able to handle:
909//   1. const section may be located apart from the inst section.
910//   2. const section may be empty
911// In both cases, we use the const section's start address to compute the "TOC",
912// this seems to occur only temporarily; in the final step we always seem to end up
913// with the pc-relatice variant.
914//
915// PC-relative offset could be +/-2**32 -> use long for disp
916// Furthermore: makes no sense to have special code for
917// adjacent const and inst sections.
918void MacroAssembler::load_toc(Register Rtoc) {
919  // Simply use distance from start of const section (should be patched in the end).
920  long disp = toc_distance();
921
922  RelocationHolder rspec = internal_word_Relocation::spec(pc() + disp);
923  relocate(rspec);
924  z_larl(Rtoc, RelAddr::pcrel_off32(disp));  // Offset is in halfwords.
925}
926
927// PCrelative TOC access.
928// Load from anywhere pcrelative (with relocation of load instr)
929void MacroAssembler::load_long_pcrelative(Register Rdst, address dataLocation) {
930  address          pc             = this->pc();
931  ptrdiff_t        total_distance = dataLocation - pc;
932  RelocationHolder rspec          = internal_word_Relocation::spec(dataLocation);
933
934  assert((total_distance & 0x01L) == 0, "halfword alignment is mandatory");
935  assert(total_distance != 0, "sanity");
936
937  // Some extra safety net.
938  if (!RelAddr::is_in_range_of_RelAddr32(total_distance)) {
939    guarantee(RelAddr::is_in_range_of_RelAddr32(total_distance), "too far away");
940  }
941
942  (this)->relocate(rspec, relocInfo::pcrel_addr_format);
943  z_lgrl(Rdst, RelAddr::pcrel_off32(total_distance));
944}
945
946
947// PCrelative TOC access.
948// Load from anywhere pcrelative (with relocation of load instr)
949// loaded addr has to be relocated when added to constant pool.
950void MacroAssembler::load_addr_pcrelative(Register Rdst, address addrLocation) {
951  address          pc             = this->pc();
952  ptrdiff_t        total_distance = addrLocation - pc;
953  RelocationHolder rspec          = internal_word_Relocation::spec(addrLocation);
954
955  assert((total_distance & 0x01L) == 0, "halfword alignment is mandatory");
956
957  // Some extra safety net.
958  if (!RelAddr::is_in_range_of_RelAddr32(total_distance)) {
959    guarantee(RelAddr::is_in_range_of_RelAddr32(total_distance), "too far away");
960  }
961
962  (this)->relocate(rspec, relocInfo::pcrel_addr_format);
963  z_lgrl(Rdst, RelAddr::pcrel_off32(total_distance));
964}
965
966// Generic operation: load a value from memory and test.
967// CondCode indicates the sign (<0, ==0, >0) of the loaded value.
968void MacroAssembler::load_and_test_byte(Register dst, const Address &a) {
969  z_lb(dst, a);
970  z_ltr(dst, dst);
971}
972
973void MacroAssembler::load_and_test_short(Register dst, const Address &a) {
974  int64_t disp = a.disp20();
975  if (Displacement::is_shortDisp(disp)) {
976    z_lh(dst, a);
977  } else if (Displacement::is_longDisp(disp)) {
978    z_lhy(dst, a);
979  } else {
980    guarantee(false, "displacement out of range");
981  }
982  z_ltr(dst, dst);
983}
984
985void MacroAssembler::load_and_test_int(Register dst, const Address &a) {
986  z_lt(dst, a);
987}
988
989void MacroAssembler::load_and_test_int2long(Register dst, const Address &a) {
990  z_ltgf(dst, a);
991}
992
993void MacroAssembler::load_and_test_long(Register dst, const Address &a) {
994  z_ltg(dst, a);
995}
996
997// Test a bit in memory.
998void MacroAssembler::testbit(const Address &a, unsigned int bit) {
999  assert(a.index() == noreg, "no index reg allowed in testbit");
1000  if (bit <= 7) {
1001    z_tm(a.disp() + 3, a.base(), 1 << bit);
1002  } else if (bit <= 15) {
1003    z_tm(a.disp() + 2, a.base(), 1 << (bit - 8));
1004  } else if (bit <= 23) {
1005    z_tm(a.disp() + 1, a.base(), 1 << (bit - 16));
1006  } else if (bit <= 31) {
1007    z_tm(a.disp() + 0, a.base(), 1 << (bit - 24));
1008  } else {
1009    ShouldNotReachHere();
1010  }
1011}
1012
1013// Test a bit in a register. Result is reflected in CC.
1014void MacroAssembler::testbit(Register r, unsigned int bitPos) {
1015  if (bitPos < 16) {
1016    z_tmll(r, 1U<<bitPos);
1017  } else if (bitPos < 32) {
1018    z_tmlh(r, 1U<<(bitPos-16));
1019  } else if (bitPos < 48) {
1020    z_tmhl(r, 1U<<(bitPos-32));
1021  } else if (bitPos < 64) {
1022    z_tmhh(r, 1U<<(bitPos-48));
1023  } else {
1024    ShouldNotReachHere();
1025  }
1026}
1027
1028// Clear a register, i.e. load const zero into reg.
1029// Return len (in bytes) of generated instruction(s).
1030// whole_reg: Clear 64 bits if true, 32 bits otherwise.
1031// set_cc:    Use instruction that sets the condition code, if true.
1032int MacroAssembler::clear_reg(Register r, bool whole_reg, bool set_cc) {
1033  unsigned int start_off = offset();
1034  if (whole_reg) {
1035    set_cc ? z_xgr(r, r) : z_laz(r, 0, Z_R0);
1036  } else {  // Only 32bit register.
1037    set_cc ? z_xr(r, r) : z_lhi(r, 0);
1038  }
1039  return offset() - start_off;
1040}
1041
1042#ifdef ASSERT
1043int MacroAssembler::preset_reg(Register r, unsigned long pattern, int pattern_len) {
1044  switch (pattern_len) {
1045    case 1:
1046      pattern = (pattern & 0x000000ff)  | ((pattern & 0x000000ff)<<8);
1047    case 2:
1048      pattern = (pattern & 0x0000ffff)  | ((pattern & 0x0000ffff)<<16);
1049    case 4:
1050      pattern = (pattern & 0xffffffffL) | ((pattern & 0xffffffffL)<<32);
1051    case 8:
1052      return load_const_optimized_rtn_len(r, pattern, true);
1053      break;
1054    default:
1055      guarantee(false, "preset_reg: bad len");
1056  }
1057  return 0;
1058}
1059#endif
1060
1061// addr: Address descriptor of memory to clear index register will not be used !
1062// size: Number of bytes to clear.
1063//    !!! DO NOT USE THEM FOR ATOMIC MEMORY CLEARING !!!
1064//    !!! Use store_const() instead                  !!!
1065void MacroAssembler::clear_mem(const Address& addr, unsigned size) {
1066  guarantee(size <= 256, "MacroAssembler::clear_mem: size too large");
1067
1068  if (size == 1) {
1069    z_mvi(addr, 0);
1070    return;
1071  }
1072
1073  switch (size) {
1074    case 2: z_mvhhi(addr, 0);
1075      return;
1076    case 4: z_mvhi(addr, 0);
1077      return;
1078    case 8: z_mvghi(addr, 0);
1079      return;
1080    default: ; // Fallthru to xc.
1081  }
1082
1083  z_xc(addr, size, addr);
1084}
1085
1086void MacroAssembler::align(int modulus) {
1087  while (offset() % modulus != 0) z_nop();
1088}
1089
1090// Special version for non-relocateable code if required alignment
1091// is larger than CodeEntryAlignment.
1092void MacroAssembler::align_address(int modulus) {
1093  while ((uintptr_t)pc() % modulus != 0) z_nop();
1094}
1095
1096Address MacroAssembler::argument_address(RegisterOrConstant arg_slot,
1097                                         Register temp_reg,
1098                                         int64_t extra_slot_offset) {
1099  // On Z, we can have index and disp in an Address. So don't call argument_offset,
1100  // which issues an unnecessary add instruction.
1101  int stackElementSize = Interpreter::stackElementSize;
1102  int64_t offset = extra_slot_offset * stackElementSize;
1103  const Register argbase = Z_esp;
1104  if (arg_slot.is_constant()) {
1105    offset += arg_slot.as_constant() * stackElementSize;
1106    return Address(argbase, offset);
1107  }
1108  // else
1109  assert(temp_reg != noreg, "must specify");
1110  assert(temp_reg != Z_ARG1, "base and index are conflicting");
1111  z_sllg(temp_reg, arg_slot.as_register(), exact_log2(stackElementSize)); // tempreg = arg_slot << 3
1112  return Address(argbase, temp_reg, offset);
1113}
1114
1115
1116//===================================================================
1117//===   START   C O N S T A N T S   I N   C O D E   S T R E A M   ===
1118//===================================================================
1119//===            P A T CH A B L E   C O N S T A N T S             ===
1120//===================================================================
1121
1122
1123//---------------------------------------------------
1124//  Load (patchable) constant into register
1125//---------------------------------------------------
1126
1127
1128// Load absolute address (and try to optimize).
1129//   Note: This method is usable only for position-fixed code,
1130//         referring to a position-fixed target location.
1131//         If not so, relocations and patching must be used.
1132void MacroAssembler::load_absolute_address(Register d, address addr) {
1133  assert(addr != NULL, "should not happen");
1134  BLOCK_COMMENT("load_absolute_address:");
1135  if (addr == NULL) {
1136    z_larl(d, pc()); // Dummy emit for size calc.
1137    return;
1138  }
1139
1140  if (RelAddr::is_in_range_of_RelAddr32(addr, pc())) {
1141    z_larl(d, addr);
1142    return;
1143  }
1144
1145  load_const_optimized(d, (long)addr);
1146}
1147
1148// Load a 64bit constant.
1149// Patchable code sequence, but not atomically patchable.
1150// Make sure to keep code size constant -> no value-dependent optimizations.
1151// Do not kill condition code.
1152void MacroAssembler::load_const(Register t, long x) {
1153  Assembler::z_iihf(t, (int)(x >> 32));
1154  Assembler::z_iilf(t, (int)(x & 0xffffffff));
1155}
1156
1157// Load a 32bit constant into a 64bit register, sign-extend or zero-extend.
1158// Patchable code sequence, but not atomically patchable.
1159// Make sure to keep code size constant -> no value-dependent optimizations.
1160// Do not kill condition code.
1161void MacroAssembler::load_const_32to64(Register t, int64_t x, bool sign_extend) {
1162  if (sign_extend) { Assembler::z_lgfi(t, x); }
1163  else             { Assembler::z_llilf(t, x); }
1164}
1165
1166// Load narrow oop constant, no decompression.
1167void MacroAssembler::load_narrow_oop(Register t, narrowOop a) {
1168  assert(UseCompressedOops, "must be on to call this method");
1169  load_const_32to64(t, a, false /*sign_extend*/);
1170}
1171
1172// Load narrow klass constant, compression required.
1173void MacroAssembler::load_narrow_klass(Register t, Klass* k) {
1174  assert(UseCompressedClassPointers, "must be on to call this method");
1175  narrowKlass encoded_k = Klass::encode_klass(k);
1176  load_const_32to64(t, encoded_k, false /*sign_extend*/);
1177}
1178
1179//------------------------------------------------------
1180//  Compare (patchable) constant with register.
1181//------------------------------------------------------
1182
1183// Compare narrow oop in reg with narrow oop constant, no decompression.
1184void MacroAssembler::compare_immediate_narrow_oop(Register oop1, narrowOop oop2) {
1185  assert(UseCompressedOops, "must be on to call this method");
1186
1187  Assembler::z_clfi(oop1, oop2);
1188}
1189
1190// Compare narrow oop in reg with narrow oop constant, no decompression.
1191void MacroAssembler::compare_immediate_narrow_klass(Register klass1, Klass* klass2) {
1192  assert(UseCompressedClassPointers, "must be on to call this method");
1193  narrowKlass encoded_k = Klass::encode_klass(klass2);
1194
1195  Assembler::z_clfi(klass1, encoded_k);
1196}
1197
1198//----------------------------------------------------------
1199//  Check which kind of load_constant we have here.
1200//----------------------------------------------------------
1201
1202// Detection of CPU version dependent load_const sequence.
1203// The detection is valid only for code sequences generated by load_const,
1204// not load_const_optimized.
1205bool MacroAssembler::is_load_const(address a) {
1206  unsigned long inst1, inst2;
1207  unsigned int  len1,  len2;
1208
1209  len1 = get_instruction(a, &inst1);
1210  len2 = get_instruction(a + len1, &inst2);
1211
1212  return is_z_iihf(inst1) && is_z_iilf(inst2);
1213}
1214
1215// Detection of CPU version dependent load_const_32to64 sequence.
1216// Mostly used for narrow oops and narrow Klass pointers.
1217// The detection is valid only for code sequences generated by load_const_32to64.
1218bool MacroAssembler::is_load_const_32to64(address pos) {
1219  unsigned long inst1, inst2;
1220  unsigned int len1;
1221
1222  len1 = get_instruction(pos, &inst1);
1223  return is_z_llilf(inst1);
1224}
1225
1226// Detection of compare_immediate_narrow sequence.
1227// The detection is valid only for code sequences generated by compare_immediate_narrow_oop.
1228bool MacroAssembler::is_compare_immediate32(address pos) {
1229  return is_equal(pos, CLFI_ZOPC, RIL_MASK);
1230}
1231
1232// Detection of compare_immediate_narrow sequence.
1233// The detection is valid only for code sequences generated by compare_immediate_narrow_oop.
1234bool MacroAssembler::is_compare_immediate_narrow_oop(address pos) {
1235  return is_compare_immediate32(pos);
1236  }
1237
1238// Detection of compare_immediate_narrow sequence.
1239// The detection is valid only for code sequences generated by compare_immediate_narrow_klass.
1240bool MacroAssembler::is_compare_immediate_narrow_klass(address pos) {
1241  return is_compare_immediate32(pos);
1242}
1243
1244//-----------------------------------
1245//  patch the load_constant
1246//-----------------------------------
1247
1248// CPU-version dependend patching of load_const.
1249void MacroAssembler::patch_const(address a, long x) {
1250  assert(is_load_const(a), "not a load of a constant");
1251  set_imm32((address)a, (int) ((x >> 32) & 0xffffffff));
1252  set_imm32((address)(a + 6), (int)(x & 0xffffffff));
1253}
1254
1255// Patching the value of CPU version dependent load_const_32to64 sequence.
1256// The passed ptr MUST be in compressed format!
1257int MacroAssembler::patch_load_const_32to64(address pos, int64_t np) {
1258  assert(is_load_const_32to64(pos), "not a load of a narrow ptr (oop or klass)");
1259
1260  set_imm32(pos, np);
1261  return 6;
1262}
1263
1264// Patching the value of CPU version dependent compare_immediate_narrow sequence.
1265// The passed ptr MUST be in compressed format!
1266int MacroAssembler::patch_compare_immediate_32(address pos, int64_t np) {
1267  assert(is_compare_immediate32(pos), "not a compressed ptr compare");
1268
1269  set_imm32(pos, np);
1270  return 6;
1271}
1272
1273// Patching the immediate value of CPU version dependent load_narrow_oop sequence.
1274// The passed ptr must NOT be in compressed format!
1275int MacroAssembler::patch_load_narrow_oop(address pos, oop o) {
1276  assert(UseCompressedOops, "Can only patch compressed oops");
1277
1278  narrowOop no = oopDesc::encode_heap_oop(o);
1279  return patch_load_const_32to64(pos, no);
1280}
1281
1282// Patching the immediate value of CPU version dependent load_narrow_klass sequence.
1283// The passed ptr must NOT be in compressed format!
1284int MacroAssembler::patch_load_narrow_klass(address pos, Klass* k) {
1285  assert(UseCompressedClassPointers, "Can only patch compressed klass pointers");
1286
1287  narrowKlass nk = Klass::encode_klass(k);
1288  return patch_load_const_32to64(pos, nk);
1289}
1290
1291// Patching the immediate value of CPU version dependent compare_immediate_narrow_oop sequence.
1292// The passed ptr must NOT be in compressed format!
1293int MacroAssembler::patch_compare_immediate_narrow_oop(address pos, oop o) {
1294  assert(UseCompressedOops, "Can only patch compressed oops");
1295
1296  narrowOop no = oopDesc::encode_heap_oop(o);
1297  return patch_compare_immediate_32(pos, no);
1298}
1299
1300// Patching the immediate value of CPU version dependent compare_immediate_narrow_klass sequence.
1301// The passed ptr must NOT be in compressed format!
1302int MacroAssembler::patch_compare_immediate_narrow_klass(address pos, Klass* k) {
1303  assert(UseCompressedClassPointers, "Can only patch compressed klass pointers");
1304
1305  narrowKlass nk = Klass::encode_klass(k);
1306  return patch_compare_immediate_32(pos, nk);
1307}
1308
1309//------------------------------------------------------------------------
1310//  Extract the constant from a load_constant instruction stream.
1311//------------------------------------------------------------------------
1312
1313// Get constant from a load_const sequence.
1314long MacroAssembler::get_const(address a) {
1315  assert(is_load_const(a), "not a load of a constant");
1316  unsigned long x;
1317  x =  (((unsigned long) (get_imm32(a,0) & 0xffffffff)) << 32);
1318  x |= (((unsigned long) (get_imm32(a,1) & 0xffffffff)));
1319  return (long) x;
1320}
1321
1322//--------------------------------------
1323//  Store a constant in memory.
1324//--------------------------------------
1325
1326// General emitter to move a constant to memory.
1327// The store is atomic.
1328//  o Address must be given in RS format (no index register)
1329//  o Displacement should be 12bit unsigned for efficiency. 20bit signed also supported.
1330//  o Constant can be 1, 2, 4, or 8 bytes, signed or unsigned.
1331//  o Memory slot can be 1, 2, 4, or 8 bytes, signed or unsigned.
1332//  o Memory slot must be at least as wide as constant, will assert otherwise.
1333//  o Signed constants will sign-extend, unsigned constants will zero-extend to slot width.
1334int MacroAssembler::store_const(const Address &dest, long imm,
1335                                unsigned int lm, unsigned int lc,
1336                                Register scratch) {
1337  int64_t  disp = dest.disp();
1338  Register base = dest.base();
1339  assert(!dest.has_index(), "not supported");
1340  assert((lm==1)||(lm==2)||(lm==4)||(lm==8), "memory   length not supported");
1341  assert((lc==1)||(lc==2)||(lc==4)||(lc==8), "constant length not supported");
1342  assert(lm>=lc, "memory slot too small");
1343  assert(lc==8 || Immediate::is_simm(imm, lc*8), "const out of range");
1344  assert(Displacement::is_validDisp(disp), "displacement out of range");
1345
1346  bool is_shortDisp = Displacement::is_shortDisp(disp);
1347  int store_offset = -1;
1348
1349  // For target len == 1 it's easy.
1350  if (lm == 1) {
1351    store_offset = offset();
1352    if (is_shortDisp) {
1353      z_mvi(disp, base, imm);
1354      return store_offset;
1355    } else {
1356      z_mviy(disp, base, imm);
1357      return store_offset;
1358    }
1359  }
1360
1361  // All the "good stuff" takes an unsigned displacement.
1362  if (is_shortDisp) {
1363    // NOTE: Cannot use clear_mem for imm==0, because it is not atomic.
1364
1365    store_offset = offset();
1366    switch (lm) {
1367      case 2:  // Lc == 1 handled correctly here, even for unsigned. Instruction does no widening.
1368        z_mvhhi(disp, base, imm);
1369        return store_offset;
1370      case 4:
1371        if (Immediate::is_simm16(imm)) {
1372          z_mvhi(disp, base, imm);
1373          return store_offset;
1374        }
1375        break;
1376      case 8:
1377        if (Immediate::is_simm16(imm)) {
1378          z_mvghi(disp, base, imm);
1379          return store_offset;
1380        }
1381        break;
1382      default:
1383        ShouldNotReachHere();
1384        break;
1385    }
1386  }
1387
1388  //  Can't optimize, so load value and store it.
1389  guarantee(scratch != noreg, " need a scratch register here !");
1390  if (imm != 0) {
1391    load_const_optimized(scratch, imm);  // Preserves CC anyway.
1392  } else {
1393    // Leave CC alone!!
1394    (void) clear_reg(scratch, true, false); // Indicate unused result.
1395  }
1396
1397  store_offset = offset();
1398  if (is_shortDisp) {
1399    switch (lm) {
1400      case 2:
1401        z_sth(scratch, disp, Z_R0, base);
1402        return store_offset;
1403      case 4:
1404        z_st(scratch, disp, Z_R0, base);
1405        return store_offset;
1406      case 8:
1407        z_stg(scratch, disp, Z_R0, base);
1408        return store_offset;
1409      default:
1410        ShouldNotReachHere();
1411        break;
1412    }
1413  } else {
1414    switch (lm) {
1415      case 2:
1416        z_sthy(scratch, disp, Z_R0, base);
1417        return store_offset;
1418      case 4:
1419        z_sty(scratch, disp, Z_R0, base);
1420        return store_offset;
1421      case 8:
1422        z_stg(scratch, disp, Z_R0, base);
1423        return store_offset;
1424      default:
1425        ShouldNotReachHere();
1426        break;
1427    }
1428  }
1429  return -1; // should not reach here
1430}
1431
1432//===================================================================
1433//===       N O T   P A T CH A B L E   C O N S T A N T S          ===
1434//===================================================================
1435
1436// Load constant x into register t with a fast instrcution sequence
1437// depending on the bits in x. Preserves CC under all circumstances.
1438int MacroAssembler::load_const_optimized_rtn_len(Register t, long x, bool emit) {
1439  if (x == 0) {
1440    int len;
1441    if (emit) {
1442      len = clear_reg(t, true, false);
1443    } else {
1444      len = 4;
1445    }
1446    return len;
1447  }
1448
1449  if (Immediate::is_simm16(x)) {
1450    if (emit) { z_lghi(t, x); }
1451    return 4;
1452  }
1453
1454  // 64 bit value: | part1 | part2 | part3 | part4 |
1455  // At least one part is not zero!
1456  int part1 = ((x >> 32) & 0xffff0000) >> 16;
1457  int part2 = (x >> 32) & 0x0000ffff;
1458  int part3 = (x & 0xffff0000) >> 16;
1459  int part4 = (x & 0x0000ffff);
1460
1461  // Lower word only (unsigned).
1462  if ((part1 == 0) && (part2 == 0)) {
1463    if (part3 == 0) {
1464      if (emit) z_llill(t, part4);
1465      return 4;
1466    }
1467    if (part4 == 0) {
1468      if (emit) z_llilh(t, part3);
1469      return 4;
1470    }
1471    if (emit) z_llilf(t, (int)(x & 0xffffffff));
1472    return 6;
1473  }
1474
1475  // Upper word only.
1476  if ((part3 == 0) && (part4 == 0)) {
1477    if (part1 == 0) {
1478      if (emit) z_llihl(t, part2);
1479      return 4;
1480    }
1481    if (part2 == 0) {
1482      if (emit) z_llihh(t, part1);
1483      return 4;
1484    }
1485    if (emit) z_llihf(t, (int)(x >> 32));
1486    return 6;
1487  }
1488
1489  // Lower word only (signed).
1490  if ((part1 == 0x0000ffff) && (part2 == 0x0000ffff) && ((part3 & 0x00008000) != 0)) {
1491    if (emit) z_lgfi(t, (int)(x & 0xffffffff));
1492    return 6;
1493  }
1494
1495  int len = 0;
1496
1497  if ((part1 == 0) || (part2 == 0)) {
1498    if (part1 == 0) {
1499      if (emit) z_llihl(t, part2);
1500      len += 4;
1501    } else {
1502      if (emit) z_llihh(t, part1);
1503      len += 4;
1504    }
1505  } else {
1506    if (emit) z_llihf(t, (int)(x >> 32));
1507    len += 6;
1508  }
1509
1510  if ((part3 == 0) || (part4 == 0)) {
1511    if (part3 == 0) {
1512      if (emit) z_iill(t, part4);
1513      len += 4;
1514    } else {
1515      if (emit) z_iilh(t, part3);
1516      len += 4;
1517    }
1518  } else {
1519    if (emit) z_iilf(t, (int)(x & 0xffffffff));
1520    len += 6;
1521  }
1522  return len;
1523}
1524
1525//=====================================================================
1526//===     H I G H E R   L E V E L   B R A N C H   E M I T T E R S   ===
1527//=====================================================================
1528
1529// Note: In the worst case, one of the scratch registers is destroyed!!!
1530void MacroAssembler::compare32_and_branch(Register r1, RegisterOrConstant x2, branch_condition cond, Label& lbl) {
1531  // Right operand is constant.
1532  if (x2.is_constant()) {
1533    jlong value = x2.as_constant();
1534    compare_and_branch_optimized(r1, value, cond, lbl, /*len64=*/false, /*has_sign=*/true);
1535    return;
1536  }
1537
1538  // Right operand is in register.
1539  compare_and_branch_optimized(r1, x2.as_register(), cond, lbl, /*len64=*/false, /*has_sign=*/true);
1540}
1541
1542// Note: In the worst case, one of the scratch registers is destroyed!!!
1543void MacroAssembler::compareU32_and_branch(Register r1, RegisterOrConstant x2, branch_condition cond, Label& lbl) {
1544  // Right operand is constant.
1545  if (x2.is_constant()) {
1546    jlong value = x2.as_constant();
1547    compare_and_branch_optimized(r1, value, cond, lbl, /*len64=*/false, /*has_sign=*/false);
1548    return;
1549  }
1550
1551  // Right operand is in register.
1552  compare_and_branch_optimized(r1, x2.as_register(), cond, lbl, /*len64=*/false, /*has_sign=*/false);
1553}
1554
1555// Note: In the worst case, one of the scratch registers is destroyed!!!
1556void MacroAssembler::compare64_and_branch(Register r1, RegisterOrConstant x2, branch_condition cond, Label& lbl) {
1557  // Right operand is constant.
1558  if (x2.is_constant()) {
1559    jlong value = x2.as_constant();
1560    compare_and_branch_optimized(r1, value, cond, lbl, /*len64=*/true, /*has_sign=*/true);
1561    return;
1562  }
1563
1564  // Right operand is in register.
1565  compare_and_branch_optimized(r1, x2.as_register(), cond, lbl, /*len64=*/true, /*has_sign=*/true);
1566}
1567
1568void MacroAssembler::compareU64_and_branch(Register r1, RegisterOrConstant x2, branch_condition cond, Label& lbl) {
1569  // Right operand is constant.
1570  if (x2.is_constant()) {
1571    jlong value = x2.as_constant();
1572    compare_and_branch_optimized(r1, value, cond, lbl, /*len64=*/true, /*has_sign=*/false);
1573    return;
1574  }
1575
1576  // Right operand is in register.
1577  compare_and_branch_optimized(r1, x2.as_register(), cond, lbl, /*len64=*/true, /*has_sign=*/false);
1578}
1579
1580// Generate an optimal branch to the branch target.
1581// Optimal means that a relative branch (brc or brcl) is used if the
1582// branch distance is short enough. Loading the target address into a
1583// register and branching via reg is used as fallback only.
1584//
1585// Used registers:
1586//   Z_R1 - work reg. Holds branch target address.
1587//          Used in fallback case only.
1588//
1589// This version of branch_optimized is good for cases where the target address is known
1590// and constant, i.e. is never changed (no relocation, no patching).
1591void MacroAssembler::branch_optimized(Assembler::branch_condition cond, address branch_addr) {
1592  address branch_origin = pc();
1593
1594  if (RelAddr::is_in_range_of_RelAddr16(branch_addr, branch_origin)) {
1595    z_brc(cond, branch_addr);
1596  } else if (RelAddr::is_in_range_of_RelAddr32(branch_addr, branch_origin)) {
1597    z_brcl(cond, branch_addr);
1598  } else {
1599    load_const_optimized(Z_R1, branch_addr);  // CC must not get killed by load_const_optimized.
1600    z_bcr(cond, Z_R1);
1601  }
1602}
1603
1604// This version of branch_optimized is good for cases where the target address
1605// is potentially not yet known at the time the code is emitted.
1606//
1607// One very common case is a branch to an unbound label which is handled here.
1608// The caller might know (or hope) that the branch distance is short enough
1609// to be encoded in a 16bit relative address. In this case he will pass a
1610// NearLabel branch_target.
1611// Care must be taken with unbound labels. Each call to target(label) creates
1612// an entry in the patch queue for that label to patch all references of the label
1613// once it gets bound. Those recorded patch locations must be patchable. Otherwise,
1614// an assertion fires at patch time.
1615void MacroAssembler::branch_optimized(Assembler::branch_condition cond, Label& branch_target) {
1616  if (branch_target.is_bound()) {
1617    address branch_addr = target(branch_target);
1618    branch_optimized(cond, branch_addr);
1619  } else if (branch_target.is_near()) {
1620    z_brc(cond, branch_target);  // Caller assures that the target will be in range for z_brc.
1621  } else {
1622    z_brcl(cond, branch_target); // Let's hope target is in range. Otherwise, we will abort at patch time.
1623  }
1624}
1625
1626// Generate an optimal compare and branch to the branch target.
1627// Optimal means that a relative branch (clgrj, brc or brcl) is used if the
1628// branch distance is short enough. Loading the target address into a
1629// register and branching via reg is used as fallback only.
1630//
1631// Input:
1632//   r1 - left compare operand
1633//   r2 - right compare operand
1634void MacroAssembler::compare_and_branch_optimized(Register r1,
1635                                                  Register r2,
1636                                                  Assembler::branch_condition cond,
1637                                                  address  branch_addr,
1638                                                  bool     len64,
1639                                                  bool     has_sign) {
1640  unsigned int casenum = (len64?2:0)+(has_sign?0:1);
1641
1642  address branch_origin = pc();
1643  if (VM_Version::has_CompareBranch() && RelAddr::is_in_range_of_RelAddr16(branch_addr, branch_origin)) {
1644    switch (casenum) {
1645      case 0: z_crj( r1, r2, cond, branch_addr); break;
1646      case 1: z_clrj (r1, r2, cond, branch_addr); break;
1647      case 2: z_cgrj(r1, r2, cond, branch_addr); break;
1648      case 3: z_clgrj(r1, r2, cond, branch_addr); break;
1649      default: ShouldNotReachHere(); break;
1650    }
1651  } else {
1652    switch (casenum) {
1653      case 0: z_cr( r1, r2); break;
1654      case 1: z_clr(r1, r2); break;
1655      case 2: z_cgr(r1, r2); break;
1656      case 3: z_clgr(r1, r2); break;
1657      default: ShouldNotReachHere(); break;
1658    }
1659    branch_optimized(cond, branch_addr);
1660  }
1661}
1662
1663// Generate an optimal compare and branch to the branch target.
1664// Optimal means that a relative branch (clgij, brc or brcl) is used if the
1665// branch distance is short enough. Loading the target address into a
1666// register and branching via reg is used as fallback only.
1667//
1668// Input:
1669//   r1 - left compare operand (in register)
1670//   x2 - right compare operand (immediate)
1671void MacroAssembler::compare_and_branch_optimized(Register r1,
1672                                                  jlong    x2,
1673                                                  Assembler::branch_condition cond,
1674                                                  Label&   branch_target,
1675                                                  bool     len64,
1676                                                  bool     has_sign) {
1677  address      branch_origin = pc();
1678  bool         x2_imm8       = (has_sign && Immediate::is_simm8(x2)) || (!has_sign && Immediate::is_uimm8(x2));
1679  bool         is_RelAddr16  = branch_target.is_near() ||
1680                               (branch_target.is_bound() &&
1681                                RelAddr::is_in_range_of_RelAddr16(target(branch_target), branch_origin));
1682  unsigned int casenum       = (len64?2:0)+(has_sign?0:1);
1683
1684  if (VM_Version::has_CompareBranch() && is_RelAddr16 && x2_imm8) {
1685    switch (casenum) {
1686      case 0: z_cij( r1, x2, cond, branch_target); break;
1687      case 1: z_clij(r1, x2, cond, branch_target); break;
1688      case 2: z_cgij(r1, x2, cond, branch_target); break;
1689      case 3: z_clgij(r1, x2, cond, branch_target); break;
1690      default: ShouldNotReachHere(); break;
1691    }
1692    return;
1693  }
1694
1695  if (x2 == 0) {
1696    switch (casenum) {
1697      case 0: z_ltr(r1, r1); break;
1698      case 1: z_ltr(r1, r1); break; // Caution: unsigned test only provides zero/notZero indication!
1699      case 2: z_ltgr(r1, r1); break;
1700      case 3: z_ltgr(r1, r1); break; // Caution: unsigned test only provides zero/notZero indication!
1701      default: ShouldNotReachHere(); break;
1702    }
1703  } else {
1704    if ((has_sign && Immediate::is_simm16(x2)) || (!has_sign && Immediate::is_uimm(x2, 15))) {
1705      switch (casenum) {
1706        case 0: z_chi(r1, x2); break;
1707        case 1: z_chi(r1, x2); break; // positive immediate < 2**15
1708        case 2: z_cghi(r1, x2); break;
1709        case 3: z_cghi(r1, x2); break; // positive immediate < 2**15
1710        default: break;
1711      }
1712    } else if ( (has_sign && Immediate::is_simm32(x2)) || (!has_sign && Immediate::is_uimm32(x2)) ) {
1713      switch (casenum) {
1714        case 0: z_cfi( r1, x2); break;
1715        case 1: z_clfi(r1, x2); break;
1716        case 2: z_cgfi(r1, x2); break;
1717        case 3: z_clgfi(r1, x2); break;
1718        default: ShouldNotReachHere(); break;
1719      }
1720    } else {
1721      // No instruction with immediate operand possible, so load into register.
1722      Register scratch = (r1 != Z_R0) ? Z_R0 : Z_R1;
1723      load_const_optimized(scratch, x2);
1724      switch (casenum) {
1725        case 0: z_cr( r1, scratch); break;
1726        case 1: z_clr(r1, scratch); break;
1727        case 2: z_cgr(r1, scratch); break;
1728        case 3: z_clgr(r1, scratch); break;
1729        default: ShouldNotReachHere(); break;
1730      }
1731    }
1732  }
1733  branch_optimized(cond, branch_target);
1734}
1735
1736// Generate an optimal compare and branch to the branch target.
1737// Optimal means that a relative branch (clgrj, brc or brcl) is used if the
1738// branch distance is short enough. Loading the target address into a
1739// register and branching via reg is used as fallback only.
1740//
1741// Input:
1742//   r1 - left compare operand
1743//   r2 - right compare operand
1744void MacroAssembler::compare_and_branch_optimized(Register r1,
1745                                                  Register r2,
1746                                                  Assembler::branch_condition cond,
1747                                                  Label&   branch_target,
1748                                                  bool     len64,
1749                                                  bool     has_sign) {
1750  unsigned int casenum = (len64 ? 2 : 0) + (has_sign ? 0 : 1);
1751
1752  if (branch_target.is_bound()) {
1753    address branch_addr = target(branch_target);
1754    compare_and_branch_optimized(r1, r2, cond, branch_addr, len64, has_sign);
1755  } else {
1756    if (VM_Version::has_CompareBranch() && branch_target.is_near()) {
1757      switch (casenum) {
1758        case 0: z_crj(  r1, r2, cond, branch_target); break;
1759        case 1: z_clrj( r1, r2, cond, branch_target); break;
1760        case 2: z_cgrj( r1, r2, cond, branch_target); break;
1761        case 3: z_clgrj(r1, r2, cond, branch_target); break;
1762        default: ShouldNotReachHere(); break;
1763      }
1764    } else {
1765      switch (casenum) {
1766        case 0: z_cr( r1, r2); break;
1767        case 1: z_clr(r1, r2); break;
1768        case 2: z_cgr(r1, r2); break;
1769        case 3: z_clgr(r1, r2); break;
1770        default: ShouldNotReachHere(); break;
1771      }
1772      branch_optimized(cond, branch_target);
1773    }
1774  }
1775}
1776
1777//===========================================================================
1778//===   END     H I G H E R   L E V E L   B R A N C H   E M I T T E R S   ===
1779//===========================================================================
1780
1781AddressLiteral MacroAssembler::allocate_metadata_address(Metadata* obj) {
1782  assert(oop_recorder() != NULL, "this assembler needs an OopRecorder");
1783  int index = oop_recorder()->allocate_metadata_index(obj);
1784  RelocationHolder rspec = metadata_Relocation::spec(index);
1785  return AddressLiteral((address)obj, rspec);
1786}
1787
1788AddressLiteral MacroAssembler::constant_metadata_address(Metadata* obj) {
1789  assert(oop_recorder() != NULL, "this assembler needs an OopRecorder");
1790  int index = oop_recorder()->find_index(obj);
1791  RelocationHolder rspec = metadata_Relocation::spec(index);
1792  return AddressLiteral((address)obj, rspec);
1793}
1794
1795AddressLiteral MacroAssembler::allocate_oop_address(jobject obj) {
1796  assert(oop_recorder() != NULL, "this assembler needs an OopRecorder");
1797  int oop_index = oop_recorder()->allocate_oop_index(obj);
1798  return AddressLiteral(address(obj), oop_Relocation::spec(oop_index));
1799}
1800
1801AddressLiteral MacroAssembler::constant_oop_address(jobject obj) {
1802  assert(oop_recorder() != NULL, "this assembler needs an OopRecorder");
1803  int oop_index = oop_recorder()->find_index(obj);
1804  return AddressLiteral(address(obj), oop_Relocation::spec(oop_index));
1805}
1806
1807// NOTE: destroys r
1808void MacroAssembler::c2bool(Register r, Register t) {
1809  z_lcr(t, r);   // t = -r
1810  z_or(r, t);    // r = -r OR r
1811  z_srl(r, 31);  // Yields 0 if r was 0, 1 otherwise.
1812}
1813
1814RegisterOrConstant MacroAssembler::delayed_value_impl(intptr_t* delayed_value_addr,
1815                                                      Register tmp,
1816                                                      int offset) {
1817  intptr_t value = *delayed_value_addr;
1818  if (value != 0) {
1819    return RegisterOrConstant(value + offset);
1820  }
1821
1822  BLOCK_COMMENT("delayed_value {");
1823  // Load indirectly to solve generation ordering problem.
1824  load_absolute_address(tmp, (address) delayed_value_addr); // tmp = a;
1825  z_lg(tmp, 0, tmp);                   // tmp = *tmp;
1826
1827#ifdef ASSERT
1828  NearLabel L;
1829  compare64_and_branch(tmp, (intptr_t)0L, Assembler::bcondNotEqual, L);
1830  z_illtrap();
1831  bind(L);
1832#endif
1833
1834  if (offset != 0) {
1835    z_agfi(tmp, offset);               // tmp = tmp + offset;
1836  }
1837
1838  BLOCK_COMMENT("} delayed_value");
1839  return RegisterOrConstant(tmp);
1840}
1841
1842// Patch instruction `inst' at offset `inst_pos' to refer to `dest_pos'
1843// and return the resulting instruction.
1844// Dest_pos and inst_pos are 32 bit only. These parms can only designate
1845// relative positions.
1846// Use correct argument types. Do not pre-calculate distance.
1847unsigned long MacroAssembler::patched_branch(address dest_pos, unsigned long inst, address inst_pos) {
1848  int c = 0;
1849  unsigned long patched_inst = 0;
1850  if (is_call_pcrelative_short(inst) ||
1851      is_branch_pcrelative_short(inst) ||
1852      is_branchoncount_pcrelative_short(inst) ||
1853      is_branchonindex32_pcrelative_short(inst)) {
1854    c = 1;
1855    int m = fmask(15, 0);    // simm16(-1, 16, 32);
1856    int v = simm16(RelAddr::pcrel_off16(dest_pos, inst_pos), 16, 32);
1857    patched_inst = (inst & ~m) | v;
1858  } else if (is_compareandbranch_pcrelative_short(inst)) {
1859    c = 2;
1860    long m = fmask(31, 16);  // simm16(-1, 16, 48);
1861    long v = simm16(RelAddr::pcrel_off16(dest_pos, inst_pos), 16, 48);
1862    patched_inst = (inst & ~m) | v;
1863  } else if (is_branchonindex64_pcrelative_short(inst)) {
1864    c = 3;
1865    long m = fmask(31, 16);  // simm16(-1, 16, 48);
1866    long v = simm16(RelAddr::pcrel_off16(dest_pos, inst_pos), 16, 48);
1867    patched_inst = (inst & ~m) | v;
1868  } else if (is_call_pcrelative_long(inst) || is_branch_pcrelative_long(inst)) {
1869    c = 4;
1870    long m = fmask(31, 0);  // simm32(-1, 16, 48);
1871    long v = simm32(RelAddr::pcrel_off32(dest_pos, inst_pos), 16, 48);
1872    patched_inst = (inst & ~m) | v;
1873  } else if (is_pcrelative_long(inst)) { // These are the non-branch pc-relative instructions.
1874    c = 5;
1875    long m = fmask(31, 0);  // simm32(-1, 16, 48);
1876    long v = simm32(RelAddr::pcrel_off32(dest_pos, inst_pos), 16, 48);
1877    patched_inst = (inst & ~m) | v;
1878  } else {
1879    print_dbg_msg(tty, inst, "not a relative branch", 0);
1880    dump_code_range(tty, inst_pos, 32, "not a pcrelative branch");
1881    ShouldNotReachHere();
1882  }
1883
1884  long new_off = get_pcrel_offset(patched_inst);
1885  if (new_off != (dest_pos-inst_pos)) {
1886    tty->print_cr("case %d: dest_pos = %p, inst_pos = %p, disp = %ld(%12.12lx)", c, dest_pos, inst_pos, new_off, new_off);
1887    print_dbg_msg(tty, inst,         "<- original instruction: branch patching error", 0);
1888    print_dbg_msg(tty, patched_inst, "<- patched  instruction: branch patching error", 0);
1889#ifdef LUCY_DBG
1890    VM_Version::z_SIGSEGV();
1891#endif
1892    ShouldNotReachHere();
1893  }
1894  return patched_inst;
1895}
1896
1897// Only called when binding labels (share/vm/asm/assembler.cpp)
1898// Pass arguments as intended. Do not pre-calculate distance.
1899void MacroAssembler::pd_patch_instruction(address branch, address target) {
1900  unsigned long stub_inst;
1901  int           inst_len = get_instruction(branch, &stub_inst);
1902
1903  set_instruction(branch, patched_branch(target, stub_inst, branch), inst_len);
1904}
1905
1906
1907// Extract relative address (aka offset).
1908// inv_simm16 works for 4-byte instructions only.
1909// compare and branch instructions are 6-byte and have a 16bit offset "in the middle".
1910long MacroAssembler::get_pcrel_offset(unsigned long inst) {
1911
1912  if (MacroAssembler::is_pcrelative_short(inst)) {
1913    if (((inst&0xFFFFffff00000000UL) == 0) && ((inst&0x00000000FFFF0000UL) != 0)) {
1914      return RelAddr::inv_pcrel_off16(inv_simm16(inst));
1915    } else {
1916      return RelAddr::inv_pcrel_off16(inv_simm16_48(inst));
1917    }
1918  }
1919
1920  if (MacroAssembler::is_pcrelative_long(inst)) {
1921    return RelAddr::inv_pcrel_off32(inv_simm32(inst));
1922  }
1923
1924  print_dbg_msg(tty, inst, "not a pcrelative instruction", 6);
1925#ifdef LUCY_DBG
1926  VM_Version::z_SIGSEGV();
1927#else
1928  ShouldNotReachHere();
1929#endif
1930  return -1;
1931}
1932
1933long MacroAssembler::get_pcrel_offset(address pc) {
1934  unsigned long inst;
1935  unsigned int  len = get_instruction(pc, &inst);
1936
1937#ifdef ASSERT
1938  long offset;
1939  if (MacroAssembler::is_pcrelative_short(inst) || MacroAssembler::is_pcrelative_long(inst)) {
1940    offset = get_pcrel_offset(inst);
1941  } else {
1942    offset = -1;
1943  }
1944
1945  if (offset == -1) {
1946    dump_code_range(tty, pc, 32, "not a pcrelative instruction");
1947#ifdef LUCY_DBG
1948    VM_Version::z_SIGSEGV();
1949#else
1950    ShouldNotReachHere();
1951#endif
1952  }
1953  return offset;
1954#else
1955  return get_pcrel_offset(inst);
1956#endif // ASSERT
1957}
1958
1959// Get target address from pc-relative instructions.
1960address MacroAssembler::get_target_addr_pcrel(address pc) {
1961  assert(is_pcrelative_long(pc), "not a pcrelative instruction");
1962  return pc + get_pcrel_offset(pc);
1963}
1964
1965// Patch pc relative load address.
1966void MacroAssembler::patch_target_addr_pcrel(address pc, address con) {
1967  unsigned long inst;
1968  // Offset is +/- 2**32 -> use long.
1969  ptrdiff_t distance = con - pc;
1970
1971  get_instruction(pc, &inst);
1972
1973  if (is_pcrelative_short(inst)) {
1974    *(short *)(pc+2) = RelAddr::pcrel_off16(con, pc);  // Instructions are at least 2-byte aligned, no test required.
1975
1976    // Some extra safety net.
1977    if (!RelAddr::is_in_range_of_RelAddr16(distance)) {
1978      print_dbg_msg(tty, inst, "distance out of range (16bit)", 4);
1979      dump_code_range(tty, pc, 32, "distance out of range (16bit)");
1980      guarantee(RelAddr::is_in_range_of_RelAddr16(distance), "too far away (more than +/- 2**16");
1981    }
1982    return;
1983  }
1984
1985  if (is_pcrelative_long(inst)) {
1986    *(int *)(pc+2)   = RelAddr::pcrel_off32(con, pc);
1987
1988    // Some Extra safety net.
1989    if (!RelAddr::is_in_range_of_RelAddr32(distance)) {
1990      print_dbg_msg(tty, inst, "distance out of range (32bit)", 6);
1991      dump_code_range(tty, pc, 32, "distance out of range (32bit)");
1992      guarantee(RelAddr::is_in_range_of_RelAddr32(distance), "too far away (more than +/- 2**32");
1993    }
1994    return;
1995  }
1996
1997  guarantee(false, "not a pcrelative instruction to patch!");
1998}
1999
2000// "Current PC" here means the address just behind the basr instruction.
2001address MacroAssembler::get_PC(Register result) {
2002  z_basr(result, Z_R0); // Don't branch, just save next instruction address in result.
2003  return pc();
2004}
2005
2006// Get current PC + offset.
2007// Offset given in bytes, must be even!
2008// "Current PC" here means the address of the larl instruction plus the given offset.
2009address MacroAssembler::get_PC(Register result, int64_t offset) {
2010  address here = pc();
2011  z_larl(result, offset/2); // Save target instruction address in result.
2012  return here + offset;
2013}
2014
2015// Resize_frame with SP(new) = SP(old) - [offset].
2016void MacroAssembler::resize_frame_sub(Register offset, Register fp, bool load_fp)
2017{
2018  assert_different_registers(offset, fp, Z_SP);
2019  if (load_fp) { z_lg(fp, _z_abi(callers_sp), Z_SP); }
2020
2021  z_sgr(Z_SP, offset);
2022  z_stg(fp, _z_abi(callers_sp), Z_SP);
2023}
2024
2025// Resize_frame with SP(new) = [newSP] + offset.
2026//   This emitter is useful if we already have calculated a pointer
2027//   into the to-be-allocated stack space, e.g. with special alignment properties,
2028//   but need some additional space, e.g. for spilling.
2029//   newSP    is the pre-calculated pointer. It must not be modified.
2030//   fp       holds, or is filled with, the frame pointer.
2031//   offset   is the additional increment which is added to addr to form the new SP.
2032//            Note: specify a negative value to reserve more space!
2033//   load_fp == true  only indicates that fp is not pre-filled with the frame pointer.
2034//                    It does not guarantee that fp contains the frame pointer at the end.
2035void MacroAssembler::resize_frame_abs_with_offset(Register newSP, Register fp, int offset, bool load_fp) {
2036  assert_different_registers(newSP, fp, Z_SP);
2037
2038  if (load_fp) {
2039    z_lg(fp, _z_abi(callers_sp), Z_SP);
2040  }
2041
2042  add2reg(Z_SP, offset, newSP);
2043  z_stg(fp, _z_abi(callers_sp), Z_SP);
2044}
2045
2046// Resize_frame with SP(new) = [newSP].
2047//   load_fp == true  only indicates that fp is not pre-filled with the frame pointer.
2048//                    It does not guarantee that fp contains the frame pointer at the end.
2049void MacroAssembler::resize_frame_absolute(Register newSP, Register fp, bool load_fp) {
2050  assert_different_registers(newSP, fp, Z_SP);
2051
2052  if (load_fp) {
2053    z_lg(fp, _z_abi(callers_sp), Z_SP); // need to use load/store.
2054  }
2055
2056  z_lgr(Z_SP, newSP);
2057  if (newSP != Z_R0) { // make sure we generate correct code, no matter what register newSP uses.
2058    z_stg(fp, _z_abi(callers_sp), newSP);
2059  } else {
2060    z_stg(fp, _z_abi(callers_sp), Z_SP);
2061  }
2062}
2063
2064// Resize_frame with SP(new) = SP(old) + offset.
2065void MacroAssembler::resize_frame(RegisterOrConstant offset, Register fp, bool load_fp) {
2066  assert_different_registers(fp, Z_SP);
2067
2068  if (load_fp) {
2069    z_lg(fp, _z_abi(callers_sp), Z_SP);
2070  }
2071  add64(Z_SP, offset);
2072  z_stg(fp, _z_abi(callers_sp), Z_SP);
2073}
2074
2075void MacroAssembler::push_frame(Register bytes, Register old_sp, bool copy_sp, bool bytes_with_inverted_sign) {
2076#ifdef ASSERT
2077  assert_different_registers(bytes, old_sp, Z_SP);
2078  if (!copy_sp) {
2079    z_cgr(old_sp, Z_SP);
2080    asm_assert_eq("[old_sp]!=[Z_SP]", 0x211);
2081  }
2082#endif
2083  if (copy_sp) { z_lgr(old_sp, Z_SP); }
2084  if (bytes_with_inverted_sign) {
2085    z_agr(Z_SP, bytes);
2086  } else {
2087    z_sgr(Z_SP, bytes); // Z_sgfr sufficient, but probably not faster.
2088  }
2089  z_stg(old_sp, _z_abi(callers_sp), Z_SP);
2090}
2091
2092unsigned int MacroAssembler::push_frame(unsigned int bytes, Register scratch) {
2093  long offset = Assembler::align(bytes, frame::alignment_in_bytes);
2094  assert(offset > 0, "should push a frame with positive size, size = %ld.", offset);
2095  assert(Displacement::is_validDisp(-offset), "frame size out of range, size = %ld", offset);
2096
2097  // We must not write outside the current stack bounds (given by Z_SP).
2098  // Thus, we have to first update Z_SP and then store the previous SP as stack linkage.
2099  // We rely on Z_R0 by default to be available as scratch.
2100  z_lgr(scratch, Z_SP);
2101  add2reg(Z_SP, -offset);
2102  z_stg(scratch, _z_abi(callers_sp), Z_SP);
2103#ifdef ASSERT
2104  // Just make sure nobody uses the value in the default scratch register.
2105  // When another register is used, the caller might rely on it containing the frame pointer.
2106  if (scratch == Z_R0) {
2107    z_iihf(scratch, 0xbaadbabe);
2108    z_iilf(scratch, 0xdeadbeef);
2109  }
2110#endif
2111  return offset;
2112}
2113
2114// Push a frame of size `bytes' plus abi160 on top.
2115unsigned int MacroAssembler::push_frame_abi160(unsigned int bytes) {
2116  BLOCK_COMMENT("push_frame_abi160 {");
2117  unsigned int res = push_frame(bytes + frame::z_abi_160_size);
2118  BLOCK_COMMENT("} push_frame_abi160");
2119  return res;
2120}
2121
2122// Pop current C frame.
2123void MacroAssembler::pop_frame() {
2124  BLOCK_COMMENT("pop_frame:");
2125  Assembler::z_lg(Z_SP, _z_abi(callers_sp), Z_SP);
2126}
2127
2128// Pop current C frame and restore return PC register (Z_R14).
2129void MacroAssembler::pop_frame_restore_retPC(int frame_size_in_bytes) {
2130  BLOCK_COMMENT("pop_frame_restore_retPC:");
2131  int retPC_offset = _z_abi16(return_pc) + frame_size_in_bytes;
2132  // If possible, pop frame by add instead of load (a penny saved is a penny got :-).
2133  if (Displacement::is_validDisp(retPC_offset)) {
2134    z_lg(Z_R14, retPC_offset, Z_SP);
2135    add2reg(Z_SP, frame_size_in_bytes);
2136  } else {
2137    add2reg(Z_SP, frame_size_in_bytes);
2138    restore_return_pc();
2139  }
2140}
2141
2142void MacroAssembler::call_VM_leaf_base(address entry_point, bool allow_relocation) {
2143  if (allow_relocation) {
2144    call_c(entry_point);
2145  } else {
2146    call_c_static(entry_point);
2147  }
2148}
2149
2150void MacroAssembler::call_VM_leaf_base(address entry_point) {
2151  bool allow_relocation = true;
2152  call_VM_leaf_base(entry_point, allow_relocation);
2153}
2154
2155void MacroAssembler::call_VM_base(Register oop_result,
2156                                  Register last_java_sp,
2157                                  address  entry_point,
2158                                  bool     allow_relocation,
2159                                  bool     check_exceptions) { // Defaults to true.
2160  // Allow_relocation indicates, if true, that the generated code shall
2161  // be fit for code relocation or referenced data relocation. In other
2162  // words: all addresses must be considered variable. PC-relative addressing
2163  // is not possible then.
2164  // On the other hand, if (allow_relocation == false), addresses and offsets
2165  // may be considered stable, enabling us to take advantage of some PC-relative
2166  // addressing tweaks. These might improve performance and reduce code size.
2167
2168  // Determine last_java_sp register.
2169  if (!last_java_sp->is_valid()) {
2170    last_java_sp = Z_SP;  // Load Z_SP as SP.
2171  }
2172
2173  set_top_ijava_frame_at_SP_as_last_Java_frame(last_java_sp, Z_R1, allow_relocation);
2174
2175  // ARG1 must hold thread address.
2176  z_lgr(Z_ARG1, Z_thread);
2177
2178  address return_pc = NULL;
2179  if (allow_relocation) {
2180    return_pc = call_c(entry_point);
2181  } else {
2182    return_pc = call_c_static(entry_point);
2183  }
2184
2185  reset_last_Java_frame(allow_relocation);
2186
2187  // C++ interp handles this in the interpreter.
2188  check_and_handle_popframe(Z_thread);
2189  check_and_handle_earlyret(Z_thread);
2190
2191  // Check for pending exceptions.
2192  if (check_exceptions) {
2193    // Check for pending exceptions (java_thread is set upon return).
2194    load_and_test_long(Z_R0_scratch, Address(Z_thread, Thread::pending_exception_offset()));
2195
2196    // This used to conditionally jump to forward_exception however it is
2197    // possible if we relocate that the branch will not reach. So we must jump
2198    // around so we can always reach.
2199
2200    Label ok;
2201    z_bre(ok); // Bcondequal is the same as bcondZero.
2202    call_stub(StubRoutines::forward_exception_entry());
2203    bind(ok);
2204  }
2205
2206  // Get oop result if there is one and reset the value in the thread.
2207  if (oop_result->is_valid()) {
2208    get_vm_result(oop_result);
2209  }
2210
2211  _last_calls_return_pc = return_pc;  // Wipe out other (error handling) calls.
2212}
2213
2214void MacroAssembler::call_VM_base(Register oop_result,
2215                                  Register last_java_sp,
2216                                  address  entry_point,
2217                                  bool     check_exceptions) { // Defaults to true.
2218  bool allow_relocation = true;
2219  call_VM_base(oop_result, last_java_sp, entry_point, allow_relocation, check_exceptions);
2220}
2221
2222// VM calls without explicit last_java_sp.
2223
2224void MacroAssembler::call_VM(Register oop_result, address entry_point, bool check_exceptions) {
2225  // Call takes possible detour via InterpreterMacroAssembler.
2226  call_VM_base(oop_result, noreg, entry_point, true, check_exceptions);
2227}
2228
2229void MacroAssembler::call_VM(Register oop_result, address entry_point, Register arg_1, bool check_exceptions) {
2230  // Z_ARG1 is reserved for the thread.
2231  lgr_if_needed(Z_ARG2, arg_1);
2232  call_VM(oop_result, entry_point, check_exceptions);
2233}
2234
2235void MacroAssembler::call_VM(Register oop_result, address entry_point, Register arg_1, Register arg_2, bool check_exceptions) {
2236  // Z_ARG1 is reserved for the thread.
2237  lgr_if_needed(Z_ARG2, arg_1);
2238  assert(arg_2 != Z_ARG2, "smashed argument");
2239  lgr_if_needed(Z_ARG3, arg_2);
2240  call_VM(oop_result, entry_point, check_exceptions);
2241}
2242
2243void MacroAssembler::call_VM(Register oop_result, address entry_point, Register arg_1, Register arg_2,
2244                             Register arg_3, bool check_exceptions) {
2245  // Z_ARG1 is reserved for the thread.
2246  lgr_if_needed(Z_ARG2, arg_1);
2247  assert(arg_2 != Z_ARG2, "smashed argument");
2248  lgr_if_needed(Z_ARG3, arg_2);
2249  assert(arg_3 != Z_ARG2 && arg_3 != Z_ARG3, "smashed argument");
2250  lgr_if_needed(Z_ARG4, arg_3);
2251  call_VM(oop_result, entry_point, check_exceptions);
2252}
2253
2254// VM static calls without explicit last_java_sp.
2255
2256void MacroAssembler::call_VM_static(Register oop_result, address entry_point, bool check_exceptions) {
2257  // Call takes possible detour via InterpreterMacroAssembler.
2258  call_VM_base(oop_result, noreg, entry_point, false, check_exceptions);
2259}
2260
2261void MacroAssembler::call_VM_static(Register oop_result, address entry_point, Register arg_1, Register arg_2,
2262                                    Register arg_3, bool check_exceptions) {
2263  // Z_ARG1 is reserved for the thread.
2264  lgr_if_needed(Z_ARG2, arg_1);
2265  assert(arg_2 != Z_ARG2, "smashed argument");
2266  lgr_if_needed(Z_ARG3, arg_2);
2267  assert(arg_3 != Z_ARG2 && arg_3 != Z_ARG3, "smashed argument");
2268  lgr_if_needed(Z_ARG4, arg_3);
2269  call_VM_static(oop_result, entry_point, check_exceptions);
2270}
2271
2272// VM calls with explicit last_java_sp.
2273
2274void MacroAssembler::call_VM(Register oop_result, Register last_java_sp, address entry_point, bool check_exceptions) {
2275  // Call takes possible detour via InterpreterMacroAssembler.
2276  call_VM_base(oop_result, last_java_sp, entry_point, true, check_exceptions);
2277}
2278
2279void MacroAssembler::call_VM(Register oop_result, Register last_java_sp, address entry_point, Register arg_1, bool check_exceptions) {
2280   // Z_ARG1 is reserved for the thread.
2281   lgr_if_needed(Z_ARG2, arg_1);
2282   call_VM(oop_result, last_java_sp, entry_point, check_exceptions);
2283}
2284
2285void MacroAssembler::call_VM(Register oop_result, Register last_java_sp, address entry_point, Register arg_1,
2286                             Register arg_2, bool check_exceptions) {
2287   // Z_ARG1 is reserved for the thread.
2288   lgr_if_needed(Z_ARG2, arg_1);
2289   assert(arg_2 != Z_ARG2, "smashed argument");
2290   lgr_if_needed(Z_ARG3, arg_2);
2291   call_VM(oop_result, last_java_sp, entry_point, check_exceptions);
2292}
2293
2294void MacroAssembler::call_VM(Register oop_result, Register last_java_sp, address entry_point, Register arg_1,
2295                             Register arg_2, Register arg_3, bool check_exceptions) {
2296  // Z_ARG1 is reserved for the thread.
2297  lgr_if_needed(Z_ARG2, arg_1);
2298  assert(arg_2 != Z_ARG2, "smashed argument");
2299  lgr_if_needed(Z_ARG3, arg_2);
2300  assert(arg_3 != Z_ARG2 && arg_3 != Z_ARG3, "smashed argument");
2301  lgr_if_needed(Z_ARG4, arg_3);
2302  call_VM(oop_result, last_java_sp, entry_point, check_exceptions);
2303}
2304
2305// VM leaf calls.
2306
2307void MacroAssembler::call_VM_leaf(address entry_point) {
2308  // Call takes possible detour via InterpreterMacroAssembler.
2309  call_VM_leaf_base(entry_point, true);
2310}
2311
2312void MacroAssembler::call_VM_leaf(address entry_point, Register arg_1) {
2313  if (arg_1 != noreg) lgr_if_needed(Z_ARG1, arg_1);
2314  call_VM_leaf(entry_point);
2315}
2316
2317void MacroAssembler::call_VM_leaf(address entry_point, Register arg_1, Register arg_2) {
2318  if (arg_1 != noreg) lgr_if_needed(Z_ARG1, arg_1);
2319  assert(arg_2 != Z_ARG1, "smashed argument");
2320  if (arg_2 != noreg) lgr_if_needed(Z_ARG2, arg_2);
2321  call_VM_leaf(entry_point);
2322}
2323
2324void MacroAssembler::call_VM_leaf(address entry_point, Register arg_1, Register arg_2, Register arg_3) {
2325  if (arg_1 != noreg) lgr_if_needed(Z_ARG1, arg_1);
2326  assert(arg_2 != Z_ARG1, "smashed argument");
2327  if (arg_2 != noreg) lgr_if_needed(Z_ARG2, arg_2);
2328  assert(arg_3 != Z_ARG1 && arg_3 != Z_ARG2, "smashed argument");
2329  if (arg_3 != noreg) lgr_if_needed(Z_ARG3, arg_3);
2330  call_VM_leaf(entry_point);
2331}
2332
2333// Static VM leaf calls.
2334// Really static VM leaf calls are never patched.
2335
2336void MacroAssembler::call_VM_leaf_static(address entry_point) {
2337  // Call takes possible detour via InterpreterMacroAssembler.
2338  call_VM_leaf_base(entry_point, false);
2339}
2340
2341void MacroAssembler::call_VM_leaf_static(address entry_point, Register arg_1) {
2342  if (arg_1 != noreg) lgr_if_needed(Z_ARG1, arg_1);
2343  call_VM_leaf_static(entry_point);
2344}
2345
2346void MacroAssembler::call_VM_leaf_static(address entry_point, Register arg_1, Register arg_2) {
2347  if (arg_1 != noreg) lgr_if_needed(Z_ARG1, arg_1);
2348  assert(arg_2 != Z_ARG1, "smashed argument");
2349  if (arg_2 != noreg) lgr_if_needed(Z_ARG2, arg_2);
2350  call_VM_leaf_static(entry_point);
2351}
2352
2353void MacroAssembler::call_VM_leaf_static(address entry_point, Register arg_1, Register arg_2, Register arg_3) {
2354  if (arg_1 != noreg) lgr_if_needed(Z_ARG1, arg_1);
2355  assert(arg_2 != Z_ARG1, "smashed argument");
2356  if (arg_2 != noreg) lgr_if_needed(Z_ARG2, arg_2);
2357  assert(arg_3 != Z_ARG1 && arg_3 != Z_ARG2, "smashed argument");
2358  if (arg_3 != noreg) lgr_if_needed(Z_ARG3, arg_3);
2359  call_VM_leaf_static(entry_point);
2360}
2361
2362// Don't use detour via call_c(reg).
2363address MacroAssembler::call_c(address function_entry) {
2364  load_const(Z_R1, function_entry);
2365  return call(Z_R1);
2366}
2367
2368// Variant for really static (non-relocatable) calls which are never patched.
2369address MacroAssembler::call_c_static(address function_entry) {
2370  load_absolute_address(Z_R1, function_entry);
2371#if 0 // def ASSERT
2372  // Verify that call site did not move.
2373  load_const_optimized(Z_R0, function_entry);
2374  z_cgr(Z_R1, Z_R0);
2375  z_brc(bcondEqual, 3);
2376  z_illtrap(0xba);
2377#endif
2378  return call(Z_R1);
2379}
2380
2381address MacroAssembler::call_c_opt(address function_entry) {
2382  bool success = call_far_patchable(function_entry, -2 /* emit relocation + constant */);
2383  _last_calls_return_pc = success ? pc() : NULL;
2384  return _last_calls_return_pc;
2385}
2386
2387// Identify a call_far_patchable instruction: LARL + LG + BASR
2388//
2389//    nop                   ; optionally, if required for alignment
2390//    lgrl rx,A(TOC entry)  ; PC-relative access into constant pool
2391//    basr Z_R14,rx         ; end of this instruction must be aligned to a word boundary
2392//
2393// Code pattern will eventually get patched into variant2 (see below for detection code).
2394//
2395bool MacroAssembler::is_call_far_patchable_variant0_at(address instruction_addr) {
2396  address iaddr = instruction_addr;
2397
2398  // Check for the actual load instruction.
2399  if (!is_load_const_from_toc(iaddr)) { return false; }
2400  iaddr += load_const_from_toc_size();
2401
2402  // Check for the call (BASR) instruction, finally.
2403  assert(iaddr-instruction_addr+call_byregister_size() == call_far_patchable_size(), "size mismatch");
2404  return is_call_byregister(iaddr);
2405}
2406
2407// Identify a call_far_patchable instruction: BRASL
2408//
2409// Code pattern to suits atomic patching:
2410//    nop                       ; Optionally, if required for alignment.
2411//    nop    ...                ; Multiple filler nops to compensate for size difference (variant0 is longer).
2412//    nop                       ; For code pattern detection: Prepend each BRASL with a nop.
2413//    brasl  Z_R14,<reladdr>    ; End of code must be 4-byte aligned !
2414bool MacroAssembler::is_call_far_patchable_variant2_at(address instruction_addr) {
2415  const address call_addr = (address)((intptr_t)instruction_addr + call_far_patchable_size() - call_far_pcrelative_size());
2416
2417  // Check for correct number of leading nops.
2418  address iaddr;
2419  for (iaddr = instruction_addr; iaddr < call_addr; iaddr += nop_size()) {
2420    if (!is_z_nop(iaddr)) { return false; }
2421  }
2422  assert(iaddr == call_addr, "sanity");
2423
2424  // --> Check for call instruction.
2425  if (is_call_far_pcrelative(call_addr)) {
2426    assert(call_addr-instruction_addr+call_far_pcrelative_size() == call_far_patchable_size(), "size mismatch");
2427    return true;
2428  }
2429
2430  return false;
2431}
2432
2433// Emit a NOT mt-safely patchable 64 bit absolute call.
2434// If toc_offset == -2, then the destination of the call (= target) is emitted
2435//                      to the constant pool and a runtime_call relocation is added
2436//                      to the code buffer.
2437// If toc_offset != -2, target must already be in the constant pool at
2438//                      _ctableStart+toc_offset (a caller can retrieve toc_offset
2439//                      from the runtime_call relocation).
2440// Special handling of emitting to scratch buffer when there is no constant pool.
2441// Slightly changed code pattern. We emit an additional nop if we would
2442// not end emitting at a word aligned address. This is to ensure
2443// an atomically patchable displacement in brasl instructions.
2444//
2445// A call_far_patchable comes in different flavors:
2446//  - LARL(CP) / LG(CP) / BR (address in constant pool, access via CP register)
2447//  - LGRL(CP) / BR          (address in constant pool, pc-relative accesss)
2448//  - BRASL                  (relative address of call target coded in instruction)
2449// All flavors occupy the same amount of space. Length differences are compensated
2450// by leading nops, such that the instruction sequence always ends at the same
2451// byte offset. This is required to keep the return offset constant.
2452// Furthermore, the return address (the end of the instruction sequence) is forced
2453// to be on a 4-byte boundary. This is required for atomic patching, should we ever
2454// need to patch the call target of the BRASL flavor.
2455// RETURN value: false, if no constant pool entry could be allocated, true otherwise.
2456bool MacroAssembler::call_far_patchable(address target, int64_t tocOffset) {
2457  // Get current pc and ensure word alignment for end of instr sequence.
2458  const address start_pc = pc();
2459  const intptr_t       start_off = offset();
2460  assert(!call_far_patchable_requires_alignment_nop(start_pc), "call_far_patchable requires aligned address");
2461  const ptrdiff_t      dist      = (ptrdiff_t)(target - (start_pc + 2)); // Prepend each BRASL with a nop.
2462  const bool emit_target_to_pool = (tocOffset == -2) && !code_section()->scratch_emit();
2463  const bool emit_relative_call  = !emit_target_to_pool &&
2464                                   RelAddr::is_in_range_of_RelAddr32(dist) &&
2465                                   ReoptimizeCallSequences &&
2466                                   !code_section()->scratch_emit();
2467
2468  if (emit_relative_call) {
2469    // Add padding to get the same size as below.
2470    const unsigned int padding = call_far_patchable_size() - call_far_pcrelative_size();
2471    unsigned int current_padding;
2472    for (current_padding = 0; current_padding < padding; current_padding += nop_size()) { z_nop(); }
2473    assert(current_padding == padding, "sanity");
2474
2475    // relative call: len = 2(nop) + 6 (brasl)
2476    // CodeBlob resize cannot occur in this case because
2477    // this call is emitted into pre-existing space.
2478    z_nop(); // Prepend each BRASL with a nop.
2479    z_brasl(Z_R14, target);
2480  } else {
2481    // absolute call: Get address from TOC.
2482    // len = (load TOC){6|0} + (load from TOC){6} + (basr){2} = {14|8}
2483    if (emit_target_to_pool) {
2484      // When emitting the call for the first time, we do not need to use
2485      // the pc-relative version. It will be patched anyway, when the code
2486      // buffer is copied.
2487      // Relocation is not needed when !ReoptimizeCallSequences.
2488      relocInfo::relocType rt = ReoptimizeCallSequences ? relocInfo::runtime_call_w_cp_type : relocInfo::none;
2489      AddressLiteral dest(target, rt);
2490      // Store_oop_in_toc() adds dest to the constant table. As side effect, this kills
2491      // inst_mark(). Reset if possible.
2492      bool reset_mark = (inst_mark() == pc());
2493      tocOffset = store_oop_in_toc(dest);
2494      if (reset_mark) { set_inst_mark(); }
2495      if (tocOffset == -1) {
2496        return false; // Couldn't create constant pool entry.
2497      }
2498    }
2499    assert(offset() == start_off, "emit no code before this point!");
2500
2501    address tocPos = pc() + tocOffset;
2502    if (emit_target_to_pool) {
2503      tocPos = code()->consts()->start() + tocOffset;
2504    }
2505    load_long_pcrelative(Z_R14, tocPos);
2506    z_basr(Z_R14, Z_R14);
2507  }
2508
2509#ifdef ASSERT
2510  // Assert that we can identify the emitted call.
2511  assert(is_call_far_patchable_at(addr_at(start_off)), "can't identify emitted call");
2512  assert(offset() == start_off+call_far_patchable_size(), "wrong size");
2513
2514  if (emit_target_to_pool) {
2515    assert(get_dest_of_call_far_patchable_at(addr_at(start_off), code()->consts()->start()) == target,
2516           "wrong encoding of dest address");
2517  }
2518#endif
2519  return true; // success
2520}
2521
2522// Identify a call_far_patchable instruction.
2523// For more detailed information see header comment of call_far_patchable.
2524bool MacroAssembler::is_call_far_patchable_at(address instruction_addr) {
2525  return is_call_far_patchable_variant2_at(instruction_addr)  || // short version: BRASL
2526         is_call_far_patchable_variant0_at(instruction_addr);    // long version LARL + LG + BASR
2527}
2528
2529// Does the call_far_patchable instruction use a pc-relative encoding
2530// of the call destination?
2531bool MacroAssembler::is_call_far_patchable_pcrelative_at(address instruction_addr) {
2532  // Variant 2 is pc-relative.
2533  return is_call_far_patchable_variant2_at(instruction_addr);
2534}
2535
2536bool MacroAssembler::is_call_far_pcrelative(address instruction_addr) {
2537  // Prepend each BRASL with a nop.
2538  return is_z_nop(instruction_addr) && is_z_brasl(instruction_addr + nop_size());  // Match at position after one nop required.
2539}
2540
2541// Set destination address of a call_far_patchable instruction.
2542void MacroAssembler::set_dest_of_call_far_patchable_at(address instruction_addr, address dest, int64_t tocOffset) {
2543  ResourceMark rm;
2544
2545  // Now that CP entry is verified, patch call to a pc-relative call (if circumstances permit).
2546  int code_size = MacroAssembler::call_far_patchable_size();
2547  CodeBuffer buf(instruction_addr, code_size);
2548  MacroAssembler masm(&buf);
2549  masm.call_far_patchable(dest, tocOffset);
2550  ICache::invalidate_range(instruction_addr, code_size); // Empty on z.
2551}
2552
2553// Get dest address of a call_far_patchable instruction.
2554address MacroAssembler::get_dest_of_call_far_patchable_at(address instruction_addr, address ctable) {
2555  // Dynamic TOC: absolute address in constant pool.
2556  // Check variant2 first, it is more frequent.
2557
2558  // Relative address encoded in call instruction.
2559  if (is_call_far_patchable_variant2_at(instruction_addr)) {
2560    return MacroAssembler::get_target_addr_pcrel(instruction_addr + nop_size()); // Prepend each BRASL with a nop.
2561
2562  // Absolute address in constant pool.
2563  } else if (is_call_far_patchable_variant0_at(instruction_addr)) {
2564    address iaddr = instruction_addr;
2565
2566    long    tocOffset = get_load_const_from_toc_offset(iaddr);
2567    address tocLoc    = iaddr + tocOffset;
2568    return *(address *)(tocLoc);
2569  } else {
2570    fprintf(stderr, "MacroAssembler::get_dest_of_call_far_patchable_at has a problem at %p:\n", instruction_addr);
2571    fprintf(stderr, "not a call_far_patchable: %16.16lx %16.16lx, len = %d\n",
2572            *(unsigned long*)instruction_addr,
2573            *(unsigned long*)(instruction_addr+8),
2574            call_far_patchable_size());
2575    Disassembler::decode(instruction_addr, instruction_addr+call_far_patchable_size());
2576    ShouldNotReachHere();
2577    return NULL;
2578  }
2579}
2580
2581void MacroAssembler::align_call_far_patchable(address pc) {
2582  if (call_far_patchable_requires_alignment_nop(pc)) { z_nop(); }
2583}
2584
2585void MacroAssembler::check_and_handle_earlyret(Register java_thread) {
2586}
2587
2588void MacroAssembler::check_and_handle_popframe(Register java_thread) {
2589}
2590
2591// Read from the polling page.
2592// Use TM or TMY instruction, depending on read offset.
2593//   offset = 0: Use TM, safepoint polling.
2594//   offset < 0: Use TMY, profiling safepoint polling.
2595void MacroAssembler::load_from_polling_page(Register polling_page_address, int64_t offset) {
2596  if (Immediate::is_uimm12(offset)) {
2597    z_tm(offset, polling_page_address, mask_safepoint);
2598  } else {
2599    z_tmy(offset, polling_page_address, mask_profiling);
2600  }
2601}
2602
2603// Check whether z_instruction is a read access to the polling page
2604// which was emitted by load_from_polling_page(..).
2605bool MacroAssembler::is_load_from_polling_page(address instr_loc) {
2606  unsigned long z_instruction;
2607  unsigned int  ilen = get_instruction(instr_loc, &z_instruction);
2608
2609  if (ilen == 2) { return false; } // It's none of the allowed instructions.
2610
2611  if (ilen == 4) {
2612    if (!is_z_tm(z_instruction)) { return false; } // It's len=4, but not a z_tm. fail.
2613
2614    int ms = inv_mask(z_instruction,8,32);  // mask
2615    int ra = inv_reg(z_instruction,16,32);  // base register
2616    int ds = inv_uimm12(z_instruction);     // displacement
2617
2618    if (!(ds == 0 && ra != 0 && ms == mask_safepoint)) {
2619      return false; // It's not a z_tm(0, ra, mask_safepoint). Fail.
2620    }
2621
2622  } else { /* if (ilen == 6) */
2623
2624    assert(!is_z_lg(z_instruction), "old form (LG) polling page access. Please fix and use TM(Y).");
2625
2626    if (!is_z_tmy(z_instruction)) { return false; } // It's len=6, but not a z_tmy. fail.
2627
2628    int ms = inv_mask(z_instruction,8,48);  // mask
2629    int ra = inv_reg(z_instruction,16,48);  // base register
2630    int ds = inv_simm20(z_instruction);     // displacement
2631  }
2632
2633  return true;
2634}
2635
2636// Extract poll address from instruction and ucontext.
2637address MacroAssembler::get_poll_address(address instr_loc, void* ucontext) {
2638  assert(ucontext != NULL, "must have ucontext");
2639  ucontext_t* uc = (ucontext_t*) ucontext;
2640  unsigned long z_instruction;
2641  unsigned int ilen = get_instruction(instr_loc, &z_instruction);
2642
2643  if (ilen == 4 && is_z_tm(z_instruction)) {
2644    int ra = inv_reg(z_instruction, 16, 32);  // base register
2645    int ds = inv_uimm12(z_instruction);       // displacement
2646    address addr = (address)uc->uc_mcontext.gregs[ra];
2647    return addr + ds;
2648  } else if (ilen == 6 && is_z_tmy(z_instruction)) {
2649    int ra = inv_reg(z_instruction, 16, 48);  // base register
2650    int ds = inv_simm20(z_instruction);       // displacement
2651    address addr = (address)uc->uc_mcontext.gregs[ra];
2652    return addr + ds;
2653  }
2654
2655  ShouldNotReachHere();
2656  return NULL;
2657}
2658
2659// Extract poll register from instruction.
2660uint MacroAssembler::get_poll_register(address instr_loc) {
2661  unsigned long z_instruction;
2662  unsigned int ilen = get_instruction(instr_loc, &z_instruction);
2663
2664  if (ilen == 4 && is_z_tm(z_instruction)) {
2665    return (uint)inv_reg(z_instruction, 16, 32);  // base register
2666  } else if (ilen == 6 && is_z_tmy(z_instruction)) {
2667    return (uint)inv_reg(z_instruction, 16, 48);  // base register
2668  }
2669
2670  ShouldNotReachHere();
2671  return 0;
2672}
2673
2674bool MacroAssembler::is_memory_serialization(int instruction, JavaThread* thread, void* ucontext) {
2675  ShouldNotCallThis();
2676  return false;
2677}
2678
2679// Write serialization page so VM thread can do a pseudo remote membar
2680// We use the current thread pointer to calculate a thread specific
2681// offset to write to within the page. This minimizes bus traffic
2682// due to cache line collision.
2683void MacroAssembler::serialize_memory(Register thread, Register tmp1, Register tmp2) {
2684  assert_different_registers(tmp1, tmp2);
2685  z_sllg(tmp2, thread, os::get_serialize_page_shift_count());
2686  load_const_optimized(tmp1, (long) os::get_memory_serialize_page());
2687
2688  int mask = os::get_serialize_page_mask();
2689  if (Immediate::is_uimm16(mask)) {
2690    z_nill(tmp2, mask);
2691    z_llghr(tmp2, tmp2);
2692  } else {
2693    z_nilf(tmp2, mask);
2694    z_llgfr(tmp2, tmp2);
2695  }
2696
2697  z_release();
2698  z_st(Z_R0, 0, tmp2, tmp1);
2699}
2700
2701// Don't rely on register locking, always use Z_R1 as scratch register instead.
2702void MacroAssembler::bang_stack_with_offset(int offset) {
2703  // Stack grows down, caller passes positive offset.
2704  assert(offset > 0, "must bang with positive offset");
2705  if (Displacement::is_validDisp(-offset)) {
2706    z_tmy(-offset, Z_SP, mask_stackbang);
2707  } else {
2708    add2reg(Z_R1, -offset, Z_SP);    // Do not destroy Z_SP!!!
2709    z_tm(0, Z_R1, mask_stackbang);  // Just banging.
2710  }
2711}
2712
2713void MacroAssembler::reserved_stack_check(Register return_pc) {
2714  // Test if reserved zone needs to be enabled.
2715  Label no_reserved_zone_enabling;
2716  assert(return_pc == Z_R14, "Return pc must be in R14 before z_br() to StackOverflow stub.");
2717  BLOCK_COMMENT("reserved_stack_check {");
2718
2719  z_clg(Z_SP, Address(Z_thread, JavaThread::reserved_stack_activation_offset()));
2720  z_brl(no_reserved_zone_enabling);
2721
2722  // Enable reserved zone again, throw stack overflow exception.
2723  save_return_pc();
2724  push_frame_abi160(0);
2725  call_VM_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::enable_stack_reserved_zone), Z_thread);
2726  pop_frame();
2727  restore_return_pc();
2728
2729  load_const_optimized(Z_R1, StubRoutines::throw_delayed_StackOverflowError_entry());
2730  // Don't use call() or z_basr(), they will invalidate Z_R14 which contains the return pc.
2731  z_br(Z_R1);
2732
2733  should_not_reach_here();
2734
2735  bind(no_reserved_zone_enabling);
2736  BLOCK_COMMENT("} reserved_stack_check");
2737}
2738
2739// Defines obj, preserves var_size_in_bytes, okay for t2 == var_size_in_bytes.
2740void MacroAssembler::tlab_allocate(Register obj,
2741                                   Register var_size_in_bytes,
2742                                   int con_size_in_bytes,
2743                                   Register t1,
2744                                   Label& slow_case) {
2745  assert_different_registers(obj, var_size_in_bytes, t1);
2746  Register end = t1;
2747  Register thread = Z_thread;
2748
2749  z_lg(obj, Address(thread, JavaThread::tlab_top_offset()));
2750  if (var_size_in_bytes == noreg) {
2751    z_lay(end, Address(obj, con_size_in_bytes));
2752  } else {
2753    z_lay(end, Address(obj, var_size_in_bytes));
2754  }
2755  z_cg(end, Address(thread, JavaThread::tlab_end_offset()));
2756  branch_optimized(bcondHigh, slow_case);
2757
2758  // Update the tlab top pointer.
2759  z_stg(end, Address(thread, JavaThread::tlab_top_offset()));
2760
2761  // Recover var_size_in_bytes if necessary.
2762  if (var_size_in_bytes == end) {
2763    z_sgr(var_size_in_bytes, obj);
2764  }
2765}
2766
2767// Emitter for interface method lookup.
2768//   input: recv_klass, intf_klass, itable_index
2769//   output: method_result
2770//   kills: itable_index, temp1_reg, Z_R0, Z_R1
2771// TODO: Temp2_reg is unused. we may use this emitter also in the itable stubs.
2772// If the register is still not needed then, remove it.
2773void MacroAssembler::lookup_interface_method(Register           recv_klass,
2774                                             Register           intf_klass,
2775                                             RegisterOrConstant itable_index,
2776                                             Register           method_result,
2777                                             Register           temp1_reg,
2778                                             Register           temp2_reg,
2779                                             Label&             no_such_interface) {
2780
2781  const Register vtable_len = temp1_reg;    // Used to compute itable_entry_addr.
2782  const Register itable_entry_addr = Z_R1_scratch;
2783  const Register itable_interface = Z_R0_scratch;
2784
2785  BLOCK_COMMENT("lookup_interface_method {");
2786
2787  // Load start of itable entries into itable_entry_addr.
2788  z_llgf(vtable_len, Address(recv_klass, Klass::vtable_length_offset()));
2789  z_sllg(vtable_len, vtable_len, exact_log2(vtableEntry::size_in_bytes()));
2790
2791  // Loop over all itable entries until desired interfaceOop(Rinterface) found.
2792  const int vtable_base_offset = in_bytes(Klass::vtable_start_offset());
2793
2794  add2reg_with_index(itable_entry_addr,
2795                     vtable_base_offset + itableOffsetEntry::interface_offset_in_bytes(),
2796                     recv_klass, vtable_len);
2797
2798  const int itable_offset_search_inc = itableOffsetEntry::size() * wordSize;
2799  Label     search;
2800
2801  bind(search);
2802
2803  // Handle IncompatibleClassChangeError.
2804  // If the entry is NULL then we've reached the end of the table
2805  // without finding the expected interface, so throw an exception.
2806  load_and_test_long(itable_interface, Address(itable_entry_addr));
2807  z_bre(no_such_interface);
2808
2809  add2reg(itable_entry_addr, itable_offset_search_inc);
2810  z_cgr(itable_interface, intf_klass);
2811  z_brne(search);
2812
2813  // Entry found and itable_entry_addr points to it, get offset of vtable for interface.
2814
2815  const int vtable_offset_offset = (itableOffsetEntry::offset_offset_in_bytes() -
2816                                    itableOffsetEntry::interface_offset_in_bytes()) -
2817                                   itable_offset_search_inc;
2818
2819  // Compute itableMethodEntry and get method and entry point
2820  // we use addressing with index and displacement, since the formula
2821  // for computing the entry's offset has a fixed and a dynamic part,
2822  // the latter depending on the matched interface entry and on the case,
2823  // that the itable index has been passed as a register, not a constant value.
2824  int method_offset = itableMethodEntry::method_offset_in_bytes();
2825                           // Fixed part (displacement), common operand.
2826  Register itable_offset;  // Dynamic part (index register).
2827
2828  if (itable_index.is_register()) {
2829     // Compute the method's offset in that register, for the formula, see the
2830     // else-clause below.
2831     itable_offset = itable_index.as_register();
2832
2833     z_sllg(itable_offset, itable_offset, exact_log2(itableMethodEntry::size() * wordSize));
2834     z_agf(itable_offset, vtable_offset_offset, itable_entry_addr);
2835  } else {
2836    itable_offset = Z_R1_scratch;
2837    // Displacement increases.
2838    method_offset += itableMethodEntry::size() * wordSize * itable_index.as_constant();
2839
2840    // Load index from itable.
2841    z_llgf(itable_offset, vtable_offset_offset, itable_entry_addr);
2842  }
2843
2844  // Finally load the method's oop.
2845  z_lg(method_result, method_offset, itable_offset, recv_klass);
2846  BLOCK_COMMENT("} lookup_interface_method");
2847}
2848
2849// Lookup for virtual method invocation.
2850void MacroAssembler::lookup_virtual_method(Register           recv_klass,
2851                                           RegisterOrConstant vtable_index,
2852                                           Register           method_result) {
2853  assert_different_registers(recv_klass, vtable_index.register_or_noreg());
2854  assert(vtableEntry::size() * wordSize == wordSize,
2855         "else adjust the scaling in the code below");
2856
2857  BLOCK_COMMENT("lookup_virtual_method {");
2858
2859  const int base = in_bytes(Klass::vtable_start_offset());
2860
2861  if (vtable_index.is_constant()) {
2862    // Load with base + disp.
2863    Address vtable_entry_addr(recv_klass,
2864                              vtable_index.as_constant() * wordSize +
2865                              base +
2866                              vtableEntry::method_offset_in_bytes());
2867
2868    z_lg(method_result, vtable_entry_addr);
2869  } else {
2870    // Shift index properly and load with base + index + disp.
2871    Register vindex = vtable_index.as_register();
2872    Address  vtable_entry_addr(recv_klass, vindex,
2873                               base + vtableEntry::method_offset_in_bytes());
2874
2875    z_sllg(vindex, vindex, exact_log2(wordSize));
2876    z_lg(method_result, vtable_entry_addr);
2877  }
2878  BLOCK_COMMENT("} lookup_virtual_method");
2879}
2880
2881// Factor out code to call ic_miss_handler.
2882// Generate code to call the inline cache miss handler.
2883//
2884// In most cases, this code will be generated out-of-line.
2885// The method parameters are intended to provide some variability.
2886//   ICM          - Label which has to be bound to the start of useful code (past any traps).
2887//   trapMarker   - Marking byte for the generated illtrap instructions (if any).
2888//                  Any value except 0x00 is supported.
2889//                  = 0x00 - do not generate illtrap instructions.
2890//                         use nops to fill ununsed space.
2891//   requiredSize - required size of the generated code. If the actually
2892//                  generated code is smaller, use padding instructions to fill up.
2893//                  = 0 - no size requirement, no padding.
2894//   scratch      - scratch register to hold branch target address.
2895//
2896//  The method returns the code offset of the bound label.
2897unsigned int MacroAssembler::call_ic_miss_handler(Label& ICM, int trapMarker, int requiredSize, Register scratch) {
2898  intptr_t startOffset = offset();
2899
2900  // Prevent entry at content_begin().
2901  if (trapMarker != 0) {
2902    z_illtrap(trapMarker);
2903  }
2904
2905  // Load address of inline cache miss code into scratch register
2906  // and branch to cache miss handler.
2907  BLOCK_COMMENT("IC miss handler {");
2908  BIND(ICM);
2909  unsigned int   labelOffset = offset();
2910  AddressLiteral icmiss(SharedRuntime::get_ic_miss_stub());
2911
2912  load_const_optimized(scratch, icmiss);
2913  z_br(scratch);
2914
2915  // Fill unused space.
2916  if (requiredSize > 0) {
2917    while ((offset() - startOffset) < requiredSize) {
2918      if (trapMarker == 0) {
2919        z_nop();
2920      } else {
2921        z_illtrap(trapMarker);
2922      }
2923    }
2924  }
2925  BLOCK_COMMENT("} IC miss handler");
2926  return labelOffset;
2927}
2928
2929void MacroAssembler::nmethod_UEP(Label& ic_miss) {
2930  Register ic_reg       = as_Register(Matcher::inline_cache_reg_encode());
2931  int      klass_offset = oopDesc::klass_offset_in_bytes();
2932  if (!ImplicitNullChecks || MacroAssembler::needs_explicit_null_check(klass_offset)) {
2933    if (VM_Version::has_CompareBranch()) {
2934      z_cgij(Z_ARG1, 0, Assembler::bcondEqual, ic_miss);
2935    } else {
2936      z_ltgr(Z_ARG1, Z_ARG1);
2937      z_bre(ic_miss);
2938    }
2939  }
2940  // Compare cached class against klass from receiver.
2941  compare_klass_ptr(ic_reg, klass_offset, Z_ARG1, false);
2942  z_brne(ic_miss);
2943}
2944
2945void MacroAssembler::check_klass_subtype_fast_path(Register   sub_klass,
2946                                                   Register   super_klass,
2947                                                   Register   temp1_reg,
2948                                                   Label*     L_success,
2949                                                   Label*     L_failure,
2950                                                   Label*     L_slow_path,
2951                                                   RegisterOrConstant super_check_offset) {
2952
2953  const int sc_offset  = in_bytes(Klass::secondary_super_cache_offset());
2954  const int sco_offset = in_bytes(Klass::super_check_offset_offset());
2955
2956  bool must_load_sco = (super_check_offset.constant_or_zero() == -1);
2957  bool need_slow_path = (must_load_sco ||
2958                         super_check_offset.constant_or_zero() == sc_offset);
2959
2960  // Input registers must not overlap.
2961  assert_different_registers(sub_klass, super_klass, temp1_reg);
2962  if (super_check_offset.is_register()) {
2963    assert_different_registers(sub_klass, super_klass,
2964                               super_check_offset.as_register());
2965  } else if (must_load_sco) {
2966    assert(temp1_reg != noreg, "supply either a temp or a register offset");
2967  }
2968
2969  const Register Rsuper_check_offset = temp1_reg;
2970
2971  NearLabel L_fallthrough;
2972  int label_nulls = 0;
2973  if (L_success == NULL)   { L_success   = &L_fallthrough; label_nulls++; }
2974  if (L_failure == NULL)   { L_failure   = &L_fallthrough; label_nulls++; }
2975  if (L_slow_path == NULL) { L_slow_path = &L_fallthrough; label_nulls++; }
2976  assert(label_nulls <= 1 ||
2977         (L_slow_path == &L_fallthrough && label_nulls <= 2 && !need_slow_path),
2978         "at most one NULL in the batch, usually");
2979
2980  BLOCK_COMMENT("check_klass_subtype_fast_path {");
2981  // If the pointers are equal, we are done (e.g., String[] elements).
2982  // This self-check enables sharing of secondary supertype arrays among
2983  // non-primary types such as array-of-interface. Otherwise, each such
2984  // type would need its own customized SSA.
2985  // We move this check to the front of the fast path because many
2986  // type checks are in fact trivially successful in this manner,
2987  // so we get a nicely predicted branch right at the start of the check.
2988  compare64_and_branch(sub_klass, super_klass, bcondEqual, *L_success);
2989
2990  // Check the supertype display, which is uint.
2991  if (must_load_sco) {
2992    z_llgf(Rsuper_check_offset, sco_offset, super_klass);
2993    super_check_offset = RegisterOrConstant(Rsuper_check_offset);
2994  }
2995  Address super_check_addr(sub_klass, super_check_offset, 0);
2996  z_cg(super_klass, super_check_addr); // compare w/ displayed supertype
2997
2998  // This check has worked decisively for primary supers.
2999  // Secondary supers are sought in the super_cache ('super_cache_addr').
3000  // (Secondary supers are interfaces and very deeply nested subtypes.)
3001  // This works in the same check above because of a tricky aliasing
3002  // between the super_cache and the primary super display elements.
3003  // (The 'super_check_addr' can address either, as the case requires.)
3004  // Note that the cache is updated below if it does not help us find
3005  // what we need immediately.
3006  // So if it was a primary super, we can just fail immediately.
3007  // Otherwise, it's the slow path for us (no success at this point).
3008
3009  // Hacked jmp, which may only be used just before L_fallthrough.
3010#define final_jmp(label)                                                \
3011  if (&(label) == &L_fallthrough) { /*do nothing*/ }                    \
3012  else                            { branch_optimized(Assembler::bcondAlways, label); } /*omit semicolon*/
3013
3014  if (super_check_offset.is_register()) {
3015    branch_optimized(Assembler::bcondEqual, *L_success);
3016    z_cfi(super_check_offset.as_register(), sc_offset);
3017    if (L_failure == &L_fallthrough) {
3018      branch_optimized(Assembler::bcondEqual, *L_slow_path);
3019    } else {
3020      branch_optimized(Assembler::bcondNotEqual, *L_failure);
3021      final_jmp(*L_slow_path);
3022    }
3023  } else if (super_check_offset.as_constant() == sc_offset) {
3024    // Need a slow path; fast failure is impossible.
3025    if (L_slow_path == &L_fallthrough) {
3026      branch_optimized(Assembler::bcondEqual, *L_success);
3027    } else {
3028      branch_optimized(Assembler::bcondNotEqual, *L_slow_path);
3029      final_jmp(*L_success);
3030    }
3031  } else {
3032    // No slow path; it's a fast decision.
3033    if (L_failure == &L_fallthrough) {
3034      branch_optimized(Assembler::bcondEqual, *L_success);
3035    } else {
3036      branch_optimized(Assembler::bcondNotEqual, *L_failure);
3037      final_jmp(*L_success);
3038    }
3039  }
3040
3041  bind(L_fallthrough);
3042#undef local_brc
3043#undef final_jmp
3044  BLOCK_COMMENT("} check_klass_subtype_fast_path");
3045  // fallthru (to slow path)
3046}
3047
3048void MacroAssembler::check_klass_subtype_slow_path(Register Rsubklass,
3049                                                   Register Rsuperklass,
3050                                                   Register Rarray_ptr,  // tmp
3051                                                   Register Rlength,     // tmp
3052                                                   Label* L_success,
3053                                                   Label* L_failure) {
3054  // Input registers must not overlap.
3055  // Also check for R1 which is explicitely used here.
3056  assert_different_registers(Z_R1, Rsubklass, Rsuperklass, Rarray_ptr, Rlength);
3057  NearLabel L_fallthrough, L_loop;
3058  int label_nulls = 0;
3059  if (L_success == NULL) { L_success = &L_fallthrough; label_nulls++; }
3060  if (L_failure == NULL) { L_failure = &L_fallthrough; label_nulls++; }
3061  assert(label_nulls <= 1, "at most one NULL in the batch");
3062
3063  const int ss_offset = in_bytes(Klass::secondary_supers_offset());
3064  const int sc_offset = in_bytes(Klass::secondary_super_cache_offset());
3065
3066  const int length_offset = Array<Klass*>::length_offset_in_bytes();
3067  const int base_offset   = Array<Klass*>::base_offset_in_bytes();
3068
3069  // Hacked jmp, which may only be used just before L_fallthrough.
3070#define final_jmp(label)                                                \
3071  if (&(label) == &L_fallthrough) { /*do nothing*/ }                    \
3072  else                            branch_optimized(Assembler::bcondAlways, label) /*omit semicolon*/
3073
3074  NearLabel loop_iterate, loop_count, match;
3075
3076  BLOCK_COMMENT("check_klass_subtype_slow_path {");
3077  z_lg(Rarray_ptr, ss_offset, Rsubklass);
3078
3079  load_and_test_int(Rlength, Address(Rarray_ptr, length_offset));
3080  branch_optimized(Assembler::bcondZero, *L_failure);
3081
3082  // Oops in table are NO MORE compressed.
3083  z_cg(Rsuperklass, base_offset, Rarray_ptr); // Check array element for match.
3084  z_bre(match);                               // Shortcut for array length = 1.
3085
3086  // No match yet, so we must walk the array's elements.
3087  z_lngfr(Rlength, Rlength);
3088  z_sllg(Rlength, Rlength, LogBytesPerWord); // -#bytes of cache array
3089  z_llill(Z_R1, BytesPerWord);               // Set increment/end index.
3090  add2reg(Rlength, 2 * BytesPerWord);        // start index  = -(n-2)*BytesPerWord
3091  z_slgr(Rarray_ptr, Rlength);               // start addr: +=  (n-2)*BytesPerWord
3092  z_bru(loop_count);
3093
3094  BIND(loop_iterate);
3095  z_cg(Rsuperklass, base_offset, Rlength, Rarray_ptr); // Check array element for match.
3096  z_bre(match);
3097  BIND(loop_count);
3098  z_brxlg(Rlength, Z_R1, loop_iterate);
3099
3100  // Rsuperklass not found among secondary super classes -> failure.
3101  branch_optimized(Assembler::bcondAlways, *L_failure);
3102
3103  // Got a hit. Return success (zero result). Set cache.
3104  // Cache load doesn't happen here. For speed it is directly emitted by the compiler.
3105
3106  BIND(match);
3107
3108  z_stg(Rsuperklass, sc_offset, Rsubklass); // Save result to cache.
3109
3110  final_jmp(*L_success);
3111
3112  // Exit to the surrounding code.
3113  BIND(L_fallthrough);
3114#undef local_brc
3115#undef final_jmp
3116  BLOCK_COMMENT("} check_klass_subtype_slow_path");
3117}
3118
3119// Emitter for combining fast and slow path.
3120void MacroAssembler::check_klass_subtype(Register sub_klass,
3121                                         Register super_klass,
3122                                         Register temp1_reg,
3123                                         Register temp2_reg,
3124                                         Label&   L_success) {
3125  NearLabel failure;
3126  BLOCK_COMMENT(err_msg("check_klass_subtype(%s subclass of %s) {", sub_klass->name(), super_klass->name()));
3127  check_klass_subtype_fast_path(sub_klass, super_klass, temp1_reg,
3128                                &L_success, &failure, NULL);
3129  check_klass_subtype_slow_path(sub_klass, super_klass,
3130                                temp1_reg, temp2_reg, &L_success, NULL);
3131  BIND(failure);
3132  BLOCK_COMMENT("} check_klass_subtype");
3133}
3134
3135// Increment a counter at counter_address when the eq condition code is
3136// set. Kills registers tmp1_reg and tmp2_reg and preserves the condition code.
3137void MacroAssembler::increment_counter_eq(address counter_address, Register tmp1_reg, Register tmp2_reg) {
3138  Label l;
3139  z_brne(l);
3140  load_const(tmp1_reg, counter_address);
3141  add2mem_32(Address(tmp1_reg), 1, tmp2_reg);
3142  z_cr(tmp1_reg, tmp1_reg); // Set cc to eq.
3143  bind(l);
3144}
3145
3146// Semantics are dependent on the slow_case label:
3147//   If the slow_case label is not NULL, failure to biased-lock the object
3148//   transfers control to the location of the slow_case label. If the
3149//   object could be biased-locked, control is transferred to the done label.
3150//   The condition code is unpredictable.
3151//
3152//   If the slow_case label is NULL, failure to biased-lock the object results
3153//   in a transfer of control to the done label with a condition code of not_equal.
3154//   If the biased-lock could be successfully obtained, control is transfered to
3155//   the done label with a condition code of equal.
3156//   It is mandatory to react on the condition code At the done label.
3157//
3158void MacroAssembler::biased_locking_enter(Register  obj_reg,
3159                                          Register  mark_reg,
3160                                          Register  temp_reg,
3161                                          Register  temp2_reg,    // May be Z_RO!
3162                                          Label    &done,
3163                                          Label    *slow_case) {
3164  assert(UseBiasedLocking, "why call this otherwise?");
3165  assert_different_registers(obj_reg, mark_reg, temp_reg, temp2_reg);
3166
3167  Label cas_label; // Try, if implemented, CAS locking. Fall thru to slow path otherwise.
3168
3169  BLOCK_COMMENT("biased_locking_enter {");
3170
3171  // Biased locking
3172  // See whether the lock is currently biased toward our thread and
3173  // whether the epoch is still valid.
3174  // Note that the runtime guarantees sufficient alignment of JavaThread
3175  // pointers to allow age to be placed into low bits.
3176  assert(markOopDesc::age_shift == markOopDesc::lock_bits + markOopDesc::biased_lock_bits,
3177         "biased locking makes assumptions about bit layout");
3178  z_lr(temp_reg, mark_reg);
3179  z_nilf(temp_reg, markOopDesc::biased_lock_mask_in_place);
3180  z_chi(temp_reg, markOopDesc::biased_lock_pattern);
3181  z_brne(cas_label);  // Try cas if object is not biased, i.e. cannot be biased locked.
3182
3183  load_prototype_header(temp_reg, obj_reg);
3184  load_const_optimized(temp2_reg, ~((int) markOopDesc::age_mask_in_place));
3185
3186  z_ogr(temp_reg, Z_thread);
3187  z_xgr(temp_reg, mark_reg);
3188  z_ngr(temp_reg, temp2_reg);
3189  if (PrintBiasedLockingStatistics) {
3190    increment_counter_eq((address) BiasedLocking::biased_lock_entry_count_addr(), mark_reg, temp2_reg);
3191    // Restore mark_reg.
3192    z_lg(mark_reg, oopDesc::mark_offset_in_bytes(), obj_reg);
3193  }
3194  branch_optimized(Assembler::bcondEqual, done);  // Biased lock obtained, return success.
3195
3196  Label try_revoke_bias;
3197  Label try_rebias;
3198  Address mark_addr = Address(obj_reg, oopDesc::mark_offset_in_bytes());
3199
3200  //----------------------------------------------------------------------------
3201  // At this point we know that the header has the bias pattern and
3202  // that we are not the bias owner in the current epoch. We need to
3203  // figure out more details about the state of the header in order to
3204  // know what operations can be legally performed on the object's
3205  // header.
3206
3207  // If the low three bits in the xor result aren't clear, that means
3208  // the prototype header is no longer biased and we have to revoke
3209  // the bias on this object.
3210  z_tmll(temp_reg, markOopDesc::biased_lock_mask_in_place);
3211  z_brnaz(try_revoke_bias);
3212
3213  // Biasing is still enabled for this data type. See whether the
3214  // epoch of the current bias is still valid, meaning that the epoch
3215  // bits of the mark word are equal to the epoch bits of the
3216  // prototype header. (Note that the prototype header's epoch bits
3217  // only change at a safepoint.) If not, attempt to rebias the object
3218  // toward the current thread. Note that we must be absolutely sure
3219  // that the current epoch is invalid in order to do this because
3220  // otherwise the manipulations it performs on the mark word are
3221  // illegal.
3222  z_tmll(temp_reg, markOopDesc::epoch_mask_in_place);
3223  z_brnaz(try_rebias);
3224
3225  //----------------------------------------------------------------------------
3226  // The epoch of the current bias is still valid but we know nothing
3227  // about the owner; it might be set or it might be clear. Try to
3228  // acquire the bias of the object using an atomic operation. If this
3229  // fails we will go in to the runtime to revoke the object's bias.
3230  // Note that we first construct the presumed unbiased header so we
3231  // don't accidentally blow away another thread's valid bias.
3232  z_nilf(mark_reg, markOopDesc::biased_lock_mask_in_place | markOopDesc::age_mask_in_place |
3233         markOopDesc::epoch_mask_in_place);
3234  z_lgr(temp_reg, Z_thread);
3235  z_llgfr(mark_reg, mark_reg);
3236  z_ogr(temp_reg, mark_reg);
3237
3238  assert(oopDesc::mark_offset_in_bytes() == 0, "offset of _mark is not 0");
3239
3240  z_csg(mark_reg, temp_reg, 0, obj_reg);
3241
3242  // If the biasing toward our thread failed, this means that
3243  // another thread succeeded in biasing it toward itself and we
3244  // need to revoke that bias. The revocation will occur in the
3245  // interpreter runtime in the slow case.
3246
3247  if (PrintBiasedLockingStatistics) {
3248    increment_counter_eq((address) BiasedLocking::anonymously_biased_lock_entry_count_addr(),
3249                         temp_reg, temp2_reg);
3250  }
3251  if (slow_case != NULL) {
3252    branch_optimized(Assembler::bcondNotEqual, *slow_case); // Biased lock not obtained, need to go the long way.
3253  }
3254  branch_optimized(Assembler::bcondAlways, done);           // Biased lock status given in condition code.
3255
3256  //----------------------------------------------------------------------------
3257  bind(try_rebias);
3258  // At this point we know the epoch has expired, meaning that the
3259  // current "bias owner", if any, is actually invalid. Under these
3260  // circumstances _only_, we are allowed to use the current header's
3261  // value as the comparison value when doing the cas to acquire the
3262  // bias in the current epoch. In other words, we allow transfer of
3263  // the bias from one thread to another directly in this situation.
3264
3265  z_nilf(mark_reg, markOopDesc::biased_lock_mask_in_place | markOopDesc::age_mask_in_place | markOopDesc::epoch_mask_in_place);
3266  load_prototype_header(temp_reg, obj_reg);
3267  z_llgfr(mark_reg, mark_reg);
3268
3269  z_ogr(temp_reg, Z_thread);
3270
3271  assert(oopDesc::mark_offset_in_bytes() == 0, "offset of _mark is not 0");
3272
3273  z_csg(mark_reg, temp_reg, 0, obj_reg);
3274
3275  // If the biasing toward our thread failed, this means that
3276  // another thread succeeded in biasing it toward itself and we
3277  // need to revoke that bias. The revocation will occur in the
3278  // interpreter runtime in the slow case.
3279
3280  if (PrintBiasedLockingStatistics) {
3281    increment_counter_eq((address) BiasedLocking::rebiased_lock_entry_count_addr(), temp_reg, temp2_reg);
3282  }
3283  if (slow_case != NULL) {
3284    branch_optimized(Assembler::bcondNotEqual, *slow_case);  // Biased lock not obtained, need to go the long way.
3285  }
3286  z_bru(done);           // Biased lock status given in condition code.
3287
3288  //----------------------------------------------------------------------------
3289  bind(try_revoke_bias);
3290  // The prototype mark in the klass doesn't have the bias bit set any
3291  // more, indicating that objects of this data type are not supposed
3292  // to be biased any more. We are going to try to reset the mark of
3293  // this object to the prototype value and fall through to the
3294  // CAS-based locking scheme. Note that if our CAS fails, it means
3295  // that another thread raced us for the privilege of revoking the
3296  // bias of this particular object, so it's okay to continue in the
3297  // normal locking code.
3298  load_prototype_header(temp_reg, obj_reg);
3299
3300  assert(oopDesc::mark_offset_in_bytes() == 0, "offset of _mark is not 0");
3301
3302  z_csg(mark_reg, temp_reg, 0, obj_reg);
3303
3304  // Fall through to the normal CAS-based lock, because no matter what
3305  // the result of the above CAS, some thread must have succeeded in
3306  // removing the bias bit from the object's header.
3307  if (PrintBiasedLockingStatistics) {
3308    // z_cgr(mark_reg, temp2_reg);
3309    increment_counter_eq((address) BiasedLocking::revoked_lock_entry_count_addr(), temp_reg, temp2_reg);
3310  }
3311
3312  bind(cas_label);
3313  BLOCK_COMMENT("} biased_locking_enter");
3314}
3315
3316void MacroAssembler::biased_locking_exit(Register mark_addr, Register temp_reg, Label& done) {
3317  // Check for biased locking unlock case, which is a no-op
3318  // Note: we do not have to check the thread ID for two reasons.
3319  // First, the interpreter checks for IllegalMonitorStateException at
3320  // a higher level. Second, if the bias was revoked while we held the
3321  // lock, the object could not be rebiased toward another thread, so
3322  // the bias bit would be clear.
3323  BLOCK_COMMENT("biased_locking_exit {");
3324
3325  z_lg(temp_reg, 0, mark_addr);
3326  z_nilf(temp_reg, markOopDesc::biased_lock_mask_in_place);
3327
3328  z_chi(temp_reg, markOopDesc::biased_lock_pattern);
3329  z_bre(done);
3330  BLOCK_COMMENT("} biased_locking_exit");
3331}
3332
3333void MacroAssembler::compiler_fast_lock_object(Register oop, Register box, Register temp1, Register temp2, bool try_bias) {
3334  Register displacedHeader = temp1;
3335  Register currentHeader = temp1;
3336  Register temp = temp2;
3337  NearLabel done, object_has_monitor;
3338
3339  BLOCK_COMMENT("compiler_fast_lock_object {");
3340
3341  // Load markOop from oop into mark.
3342  z_lg(displacedHeader, 0, oop);
3343
3344  if (try_bias) {
3345    biased_locking_enter(oop, displacedHeader, temp, Z_R0, done);
3346  }
3347
3348  // Handle existing monitor.
3349  if ((EmitSync & 0x01) == 0) {
3350    // The object has an existing monitor iff (mark & monitor_value) != 0.
3351    guarantee(Immediate::is_uimm16(markOopDesc::monitor_value), "must be half-word");
3352    z_lr(temp, displacedHeader);
3353    z_nill(temp, markOopDesc::monitor_value);
3354    z_brne(object_has_monitor);
3355  }
3356
3357  // Set mark to markOop | markOopDesc::unlocked_value.
3358  z_oill(displacedHeader, markOopDesc::unlocked_value);
3359
3360  // Load Compare Value application register.
3361
3362  // Initialize the box (must happen before we update the object mark).
3363  z_stg(displacedHeader, BasicLock::displaced_header_offset_in_bytes(), box);
3364
3365  // Memory Fence (in cmpxchgd)
3366  // Compare object markOop with mark and if equal exchange scratch1 with object markOop.
3367
3368  // If the compare-and-swap succeeded, then we found an unlocked object and we
3369  // have now locked it.
3370  z_csg(displacedHeader, box, 0, oop);
3371  assert(currentHeader==displacedHeader, "must be same register"); // Identified two registers from z/Architecture.
3372  z_bre(done);
3373
3374  // We did not see an unlocked object so try the fast recursive case.
3375
3376  z_sgr(currentHeader, Z_SP);
3377  load_const_optimized(temp, (~(os::vm_page_size()-1) | markOopDesc::lock_mask_in_place));
3378
3379  z_ngr(currentHeader, temp);
3380  //   z_brne(done);
3381  //   z_release();
3382  z_stg(currentHeader/*==0 or not 0*/, BasicLock::displaced_header_offset_in_bytes(), box);
3383
3384  z_bru(done);
3385
3386  if ((EmitSync & 0x01) == 0) {
3387    Register zero = temp;
3388    Register monitor_tagged = displacedHeader; // Tagged with markOopDesc::monitor_value.
3389    bind(object_has_monitor);
3390    // The object's monitor m is unlocked iff m->owner == NULL,
3391    // otherwise m->owner may contain a thread or a stack address.
3392    //
3393    // Try to CAS m->owner from NULL to current thread.
3394    z_lghi(zero, 0);
3395    // If m->owner is null, then csg succeeds and sets m->owner=THREAD and CR=EQ.
3396    z_csg(zero, Z_thread, OM_OFFSET_NO_MONITOR_VALUE_TAG(owner), monitor_tagged);
3397    // Store a non-null value into the box.
3398    z_stg(box, BasicLock::displaced_header_offset_in_bytes(), box);
3399#ifdef ASSERT
3400      z_brne(done);
3401      // We've acquired the monitor, check some invariants.
3402      // Invariant 1: _recursions should be 0.
3403      asm_assert_mem8_is_zero(OM_OFFSET_NO_MONITOR_VALUE_TAG(recursions), monitor_tagged,
3404                              "monitor->_recursions should be 0", -1);
3405      z_ltgr(zero, zero); // Set CR=EQ.
3406#endif
3407  }
3408  bind(done);
3409
3410  BLOCK_COMMENT("} compiler_fast_lock_object");
3411  // If locking was successful, CR should indicate 'EQ'.
3412  // The compiler or the native wrapper generates a branch to the runtime call
3413  // _complete_monitor_locking_Java.
3414}
3415
3416void MacroAssembler::compiler_fast_unlock_object(Register oop, Register box, Register temp1, Register temp2, bool try_bias) {
3417  Register displacedHeader = temp1;
3418  Register currentHeader = temp2;
3419  Register temp = temp1;
3420  Register monitor = temp2;
3421
3422  Label done, object_has_monitor;
3423
3424  BLOCK_COMMENT("compiler_fast_unlock_object {");
3425
3426  if (try_bias) {
3427    biased_locking_exit(oop, currentHeader, done);
3428  }
3429
3430  // Find the lock address and load the displaced header from the stack.
3431  // if the displaced header is zero, we have a recursive unlock.
3432  load_and_test_long(displacedHeader, Address(box, BasicLock::displaced_header_offset_in_bytes()));
3433  z_bre(done);
3434
3435  // Handle existing monitor.
3436  if ((EmitSync & 0x02) == 0) {
3437    // The object has an existing monitor iff (mark & monitor_value) != 0.
3438    z_lg(currentHeader, oopDesc::mark_offset_in_bytes(), oop);
3439    guarantee(Immediate::is_uimm16(markOopDesc::monitor_value), "must be half-word");
3440    z_nill(currentHeader, markOopDesc::monitor_value);
3441    z_brne(object_has_monitor);
3442  }
3443
3444  // Check if it is still a light weight lock, this is true if we see
3445  // the stack address of the basicLock in the markOop of the object
3446  // copy box to currentHeader such that csg does not kill it.
3447  z_lgr(currentHeader, box);
3448  z_csg(currentHeader, displacedHeader, 0, oop);
3449  z_bru(done); // Csg sets CR as desired.
3450
3451  // Handle existing monitor.
3452  if ((EmitSync & 0x02) == 0) {
3453    bind(object_has_monitor);
3454    z_lg(currentHeader, oopDesc::mark_offset_in_bytes(), oop);    // CurrentHeader is tagged with monitor_value set.
3455    load_and_test_long(temp, Address(currentHeader, OM_OFFSET_NO_MONITOR_VALUE_TAG(recursions)));
3456    z_brne(done);
3457    load_and_test_long(temp, Address(currentHeader, OM_OFFSET_NO_MONITOR_VALUE_TAG(owner)));
3458    z_brne(done);
3459    load_and_test_long(temp, Address(currentHeader, OM_OFFSET_NO_MONITOR_VALUE_TAG(EntryList)));
3460    z_brne(done);
3461    load_and_test_long(temp, Address(currentHeader, OM_OFFSET_NO_MONITOR_VALUE_TAG(cxq)));
3462    z_brne(done);
3463    z_release();
3464    z_stg(temp/*=0*/, OM_OFFSET_NO_MONITOR_VALUE_TAG(owner), currentHeader);
3465  }
3466
3467  bind(done);
3468
3469  BLOCK_COMMENT("} compiler_fast_unlock_object");
3470  // flag == EQ indicates success
3471  // flag == NE indicates failure
3472}
3473
3474// Write to card table for modification at store_addr - register is destroyed afterwards.
3475void MacroAssembler::card_write_barrier_post(Register store_addr, Register tmp) {
3476  CardTableModRefBS* bs = (CardTableModRefBS*) Universe::heap()->barrier_set();
3477  assert(bs->kind() == BarrierSet::CardTableForRS ||
3478         bs->kind() == BarrierSet::CardTableExtension, "wrong barrier");
3479  assert_different_registers(store_addr, tmp);
3480  z_srlg(store_addr, store_addr, CardTableModRefBS::card_shift);
3481  load_absolute_address(tmp, (address)bs->byte_map_base);
3482  z_agr(store_addr, tmp);
3483  z_mvi(0, store_addr, 0); // Store byte 0.
3484}
3485
3486void MacroAssembler::resolve_jobject(Register value, Register tmp1, Register tmp2) {
3487  NearLabel Ldone;
3488  z_ltgr(tmp1, value);
3489  z_bre(Ldone);          // Use NULL result as-is.
3490
3491  z_nill(value, ~JNIHandles::weak_tag_mask);
3492  z_lg(value, 0, value); // Resolve (untagged) jobject.
3493
3494#if INCLUDE_ALL_GCS
3495  if (UseG1GC) {
3496    NearLabel Lnot_weak;
3497    z_tmll(tmp1, JNIHandles::weak_tag_mask); // Test for jweak tag.
3498    z_braz(Lnot_weak);
3499    verify_oop(value);
3500    g1_write_barrier_pre(noreg /* obj */,
3501                         noreg /* offset */,
3502                         value /* pre_val */,
3503                         noreg /* val */,
3504                         tmp1  /* tmp1 */,
3505                         tmp2  /* tmp2 */,
3506                         true  /* pre_val_needed */);
3507    bind(Lnot_weak);
3508  }
3509#endif // INCLUDE_ALL_GCS
3510  verify_oop(value);
3511  bind(Ldone);
3512}
3513
3514#if INCLUDE_ALL_GCS
3515
3516//------------------------------------------------------
3517// General G1 pre-barrier generator.
3518// Purpose: record the previous value if it is not null.
3519// All non-tmps are preserved.
3520//------------------------------------------------------
3521// Note: Rpre_val needs special attention.
3522//   The flag pre_val_needed indicated that the caller of this emitter function
3523//   relies on Rpre_val containing the correct value, that is:
3524//     either the value it contained on entry to this code segment
3525//     or the value that was loaded into the register from (Robj+offset).
3526//
3527//   Independent from this requirement, the contents of Rpre_val must survive
3528//   the push_frame() operation. push_frame() uses Z_R0_scratch by default
3529//   to temporarily remember the frame pointer.
3530//   If Rpre_val is assigned Z_R0_scratch by the caller, code must be emitted to
3531//   save it's value.
3532void MacroAssembler::g1_write_barrier_pre(Register           Robj,
3533                                          RegisterOrConstant offset,
3534                                          Register           Rpre_val,      // Ideally, this is a non-volatile register.
3535                                          Register           Rval,          // Will be preserved.
3536                                          Register           Rtmp1,         // If Rpre_val is volatile, either Rtmp1
3537                                          Register           Rtmp2,         // or Rtmp2 has to be non-volatile..
3538                                          bool               pre_val_needed // Save Rpre_val across runtime call, caller uses it.
3539                                       ) {
3540  Label callRuntime, filtered;
3541  const int active_offset = in_bytes(JavaThread::satb_mark_queue_offset() + SATBMarkQueue::byte_offset_of_active());
3542  const int buffer_offset = in_bytes(JavaThread::satb_mark_queue_offset() + SATBMarkQueue::byte_offset_of_buf());
3543  const int index_offset  = in_bytes(JavaThread::satb_mark_queue_offset() + SATBMarkQueue::byte_offset_of_index());
3544  assert_different_registers(Rtmp1, Rtmp2, Z_R0_scratch); // None of the Rtmp<i> must be Z_R0!!
3545  assert_different_registers(Robj, Z_R0_scratch);         // Used for addressing. Furthermore, push_frame destroys Z_R0!!
3546  assert_different_registers(Rval, Z_R0_scratch);         // push_frame destroys Z_R0!!
3547
3548#ifdef ASSERT
3549  // make sure the register is not Z_R0. Used for addressing. Furthermore, would be destroyed by push_frame.
3550  if (offset.is_register() && offset.as_register()->encoding() == 0) {
3551    tty->print_cr("Roffset(g1_write_barrier_pre)  = %%r%d", offset.as_register()->encoding());
3552    assert(false, "bad register for offset");
3553  }
3554#endif
3555
3556  BLOCK_COMMENT("g1_write_barrier_pre {");
3557
3558  // Is marking active?
3559  // Note: value is loaded for test purposes only. No further use here.
3560  if (in_bytes(SATBMarkQueue::byte_width_of_active()) == 4) {
3561    load_and_test_int(Rtmp1, Address(Z_thread, active_offset));
3562  } else {
3563    guarantee(in_bytes(SATBMarkQueue::byte_width_of_active()) == 1, "Assumption");
3564    load_and_test_byte(Rtmp1, Address(Z_thread, active_offset));
3565  }
3566  z_bre(filtered); // Activity indicator is zero, so there is no marking going on currently.
3567
3568  assert(Rpre_val != noreg, "must have a real register");
3569
3570
3571  // If an object is given, we need to load the previous value into Rpre_val.
3572  if (Robj != noreg) {
3573    // Load the previous value...
3574    Register ixReg = offset.is_register() ? offset.register_or_noreg() : Z_R0;
3575    if (UseCompressedOops) {
3576      z_llgf(Rpre_val, offset.constant_or_zero(), ixReg, Robj);
3577    } else {
3578      z_lg(Rpre_val, offset.constant_or_zero(), ixReg, Robj);
3579    }
3580  }
3581
3582  // Is the previous value NULL?
3583  // If so, we don't need to record it and we're done.
3584  // Note: pre_val is loaded, decompressed and stored (directly or via runtime call).
3585  //       Register contents is preserved across runtime call if caller requests to do so.
3586  z_ltgr(Rpre_val, Rpre_val);
3587  z_bre(filtered); // previous value is NULL, so we don't need to record it.
3588
3589  // Decode the oop now. We know it's not NULL.
3590  if (Robj != noreg && UseCompressedOops) {
3591    oop_decoder(Rpre_val, Rpre_val, /*maybeNULL=*/false);
3592  }
3593
3594  // OK, it's not filtered, so we'll need to call enqueue.
3595
3596  // We can store the original value in the thread's buffer
3597  // only if index > 0. Otherwise, we need runtime to handle.
3598  // (The index field is typed as size_t.)
3599  Register Rbuffer = Rtmp1, Rindex = Rtmp2;
3600  assert_different_registers(Rbuffer, Rindex, Rpre_val);
3601
3602  z_lg(Rbuffer, buffer_offset, Z_thread);
3603
3604  load_and_test_long(Rindex, Address(Z_thread, index_offset));
3605  z_bre(callRuntime); // If index == 0, goto runtime.
3606
3607  add2reg(Rindex, -wordSize); // Decrement index.
3608  z_stg(Rindex, index_offset, Z_thread);
3609
3610  // Record the previous value.
3611  z_stg(Rpre_val, 0, Rbuffer, Rindex);
3612  z_bru(filtered);  // We are done.
3613
3614  Rbuffer = noreg;  // end of life
3615  Rindex  = noreg;  // end of life
3616
3617  bind(callRuntime);
3618
3619  // Save some registers (inputs and result) over runtime call
3620  // by spilling them into the top frame.
3621  if (Robj != noreg && Robj->is_volatile()) {
3622    z_stg(Robj, Robj->encoding()*BytesPerWord, Z_SP);
3623  }
3624  if (offset.is_register() && offset.as_register()->is_volatile()) {
3625    Register Roff = offset.as_register();
3626    z_stg(Roff, Roff->encoding()*BytesPerWord, Z_SP);
3627  }
3628  if (Rval != noreg && Rval->is_volatile()) {
3629    z_stg(Rval, Rval->encoding()*BytesPerWord, Z_SP);
3630  }
3631
3632  // Save Rpre_val (result) over runtime call.
3633  Register Rpre_save = Rpre_val;
3634  if ((Rpre_val == Z_R0_scratch) || (pre_val_needed && Rpre_val->is_volatile())) {
3635    guarantee(!Rtmp1->is_volatile() || !Rtmp2->is_volatile(), "oops!");
3636    Rpre_save = !Rtmp1->is_volatile() ? Rtmp1 : Rtmp2;
3637  }
3638  lgr_if_needed(Rpre_save, Rpre_val);
3639
3640  // Push frame to protect top frame with return pc and spilled register values.
3641  save_return_pc();
3642  push_frame_abi160(0); // Will use Z_R0 as tmp.
3643
3644  // Rpre_val may be destroyed by push_frame().
3645  call_VM_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::g1_wb_pre), Rpre_save, Z_thread);
3646
3647  pop_frame();
3648  restore_return_pc();
3649
3650  // Restore spilled values.
3651  if (Robj != noreg && Robj->is_volatile()) {
3652    z_lg(Robj, Robj->encoding()*BytesPerWord, Z_SP);
3653  }
3654  if (offset.is_register() && offset.as_register()->is_volatile()) {
3655    Register Roff = offset.as_register();
3656    z_lg(Roff, Roff->encoding()*BytesPerWord, Z_SP);
3657  }
3658  if (Rval != noreg && Rval->is_volatile()) {
3659    z_lg(Rval, Rval->encoding()*BytesPerWord, Z_SP);
3660  }
3661  if (pre_val_needed && Rpre_val->is_volatile()) {
3662    lgr_if_needed(Rpre_val, Rpre_save);
3663  }
3664
3665  bind(filtered);
3666  BLOCK_COMMENT("} g1_write_barrier_pre");
3667}
3668
3669// General G1 post-barrier generator.
3670// Purpose: Store cross-region card.
3671void MacroAssembler::g1_write_barrier_post(Register Rstore_addr,
3672                                           Register Rnew_val,
3673                                           Register Rtmp1,
3674                                           Register Rtmp2,
3675                                           Register Rtmp3) {
3676  Label callRuntime, filtered;
3677
3678  assert_different_registers(Rstore_addr, Rnew_val, Rtmp1, Rtmp2); // Most probably, Rnew_val == Rtmp3.
3679
3680  G1SATBCardTableModRefBS* bs = (G1SATBCardTableModRefBS*) Universe::heap()->barrier_set();
3681  assert(bs->kind() == BarrierSet::G1SATBCTLogging, "wrong barrier");
3682
3683  BLOCK_COMMENT("g1_write_barrier_post {");
3684
3685  // Does store cross heap regions?
3686  // It does if the two addresses specify different grain addresses.
3687  if (G1RSBarrierRegionFilter) {
3688    if (VM_Version::has_DistinctOpnds()) {
3689      z_xgrk(Rtmp1, Rstore_addr, Rnew_val);
3690    } else {
3691      z_lgr(Rtmp1, Rstore_addr);
3692      z_xgr(Rtmp1, Rnew_val);
3693    }
3694    z_srag(Rtmp1, Rtmp1, HeapRegion::LogOfHRGrainBytes);
3695    z_bre(filtered);
3696  }
3697
3698  // Crosses regions, storing NULL?
3699#ifdef ASSERT
3700  z_ltgr(Rnew_val, Rnew_val);
3701  asm_assert_ne("null oop not allowed (G1)", 0x255); // TODO: also on z? Checked by caller on PPC64, so following branch is obsolete:
3702  z_bre(filtered);  // Safety net: don't break if we have a NULL oop.
3703#endif
3704  Rnew_val = noreg; // end of lifetime
3705
3706  // Storing region crossing non-NULL, is card already dirty?
3707  assert(sizeof(*bs->byte_map_base) == sizeof(jbyte), "adjust this code");
3708  assert_different_registers(Rtmp1, Rtmp2, Rtmp3);
3709  // Make sure not to use Z_R0 for any of these registers.
3710  Register Rcard_addr = (Rtmp1 != Z_R0_scratch) ? Rtmp1 : Rtmp3;
3711  Register Rbase      = (Rtmp2 != Z_R0_scratch) ? Rtmp2 : Rtmp3;
3712
3713  // calculate address of card
3714  load_const_optimized(Rbase, (address)bs->byte_map_base);        // Card table base.
3715  z_srlg(Rcard_addr, Rstore_addr, CardTableModRefBS::card_shift); // Index into card table.
3716  z_algr(Rcard_addr, Rbase);                                      // Explicit calculation needed for cli.
3717  Rbase = noreg; // end of lifetime
3718
3719  // Filter young.
3720  assert((unsigned int)G1SATBCardTableModRefBS::g1_young_card_val() <= 255, "otherwise check this code");
3721  z_cli(0, Rcard_addr, (int)G1SATBCardTableModRefBS::g1_young_card_val());
3722  z_bre(filtered);
3723
3724  // Check the card value. If dirty, we're done.
3725  // This also avoids false sharing of the (already dirty) card.
3726  z_sync(); // Required to support concurrent cleaning.
3727  assert((unsigned int)CardTableModRefBS::dirty_card_val() <= 255, "otherwise check this code");
3728  z_cli(0, Rcard_addr, CardTableModRefBS::dirty_card_val()); // Reload after membar.
3729  z_bre(filtered);
3730
3731  // Storing a region crossing, non-NULL oop, card is clean.
3732  // Dirty card and log.
3733  z_mvi(0, Rcard_addr, CardTableModRefBS::dirty_card_val());
3734
3735  Register Rcard_addr_x = Rcard_addr;
3736  Register Rqueue_index = (Rtmp2 != Z_R0_scratch) ? Rtmp2 : Rtmp1;
3737  Register Rqueue_buf   = (Rtmp3 != Z_R0_scratch) ? Rtmp3 : Rtmp1;
3738  const int qidx_off    = in_bytes(JavaThread::dirty_card_queue_offset() + SATBMarkQueue::byte_offset_of_index());
3739  const int qbuf_off    = in_bytes(JavaThread::dirty_card_queue_offset() + SATBMarkQueue::byte_offset_of_buf());
3740  if ((Rcard_addr == Rqueue_buf) || (Rcard_addr == Rqueue_index)) {
3741    Rcard_addr_x = Z_R0_scratch;  // Register shortage. We have to use Z_R0.
3742  }
3743  lgr_if_needed(Rcard_addr_x, Rcard_addr);
3744
3745  load_and_test_long(Rqueue_index, Address(Z_thread, qidx_off));
3746  z_bre(callRuntime); // Index == 0 then jump to runtime.
3747
3748  z_lg(Rqueue_buf, qbuf_off, Z_thread);
3749
3750  add2reg(Rqueue_index, -wordSize); // Decrement index.
3751  z_stg(Rqueue_index, qidx_off, Z_thread);
3752
3753  z_stg(Rcard_addr_x, 0, Rqueue_index, Rqueue_buf); // Store card.
3754  z_bru(filtered);
3755
3756  bind(callRuntime);
3757
3758  // TODO: do we need a frame? Introduced to be on the safe side.
3759  bool needs_frame = true;
3760  lgr_if_needed(Rcard_addr, Rcard_addr_x); // copy back asap. push_frame will destroy Z_R0_scratch!
3761
3762  // VM call need frame to access(write) O register.
3763  if (needs_frame) {
3764    save_return_pc();
3765    push_frame_abi160(0); // Will use Z_R0 as tmp on old CPUs.
3766  }
3767
3768  // Save the live input values.
3769  call_VM_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::g1_wb_post), Rcard_addr, Z_thread);
3770
3771  if (needs_frame) {
3772    pop_frame();
3773    restore_return_pc();
3774  }
3775
3776  bind(filtered);
3777
3778  BLOCK_COMMENT("} g1_write_barrier_post");
3779}
3780#endif // INCLUDE_ALL_GCS
3781
3782// Last_Java_sp must comply to the rules in frame_s390.hpp.
3783void MacroAssembler::set_last_Java_frame(Register last_Java_sp, Register last_Java_pc, bool allow_relocation) {
3784  BLOCK_COMMENT("set_last_Java_frame {");
3785
3786  // Always set last_Java_pc and flags first because once last_Java_sp
3787  // is visible has_last_Java_frame is true and users will look at the
3788  // rest of the fields. (Note: flags should always be zero before we
3789  // get here so doesn't need to be set.)
3790
3791  // Verify that last_Java_pc was zeroed on return to Java.
3792  if (allow_relocation) {
3793    asm_assert_mem8_is_zero(in_bytes(JavaThread::last_Java_pc_offset()),
3794                            Z_thread,
3795                            "last_Java_pc not zeroed before leaving Java",
3796                            0x200);
3797  } else {
3798    asm_assert_mem8_is_zero_static(in_bytes(JavaThread::last_Java_pc_offset()),
3799                                   Z_thread,
3800                                   "last_Java_pc not zeroed before leaving Java",
3801                                   0x200);
3802  }
3803
3804  // When returning from calling out from Java mode the frame anchor's
3805  // last_Java_pc will always be set to NULL. It is set here so that
3806  // if we are doing a call to native (not VM) that we capture the
3807  // known pc and don't have to rely on the native call having a
3808  // standard frame linkage where we can find the pc.
3809  if (last_Java_pc!=noreg) {
3810    z_stg(last_Java_pc, Address(Z_thread, JavaThread::last_Java_pc_offset()));
3811  }
3812
3813  // This membar release is not required on z/Architecture, since the sequence of stores
3814  // in maintained. Nevertheless, we leave it in to document the required ordering.
3815  // The implementation of z_release() should be empty.
3816  // z_release();
3817
3818  z_stg(last_Java_sp, Address(Z_thread, JavaThread::last_Java_sp_offset()));
3819  BLOCK_COMMENT("} set_last_Java_frame");
3820}
3821
3822void MacroAssembler::reset_last_Java_frame(bool allow_relocation) {
3823  BLOCK_COMMENT("reset_last_Java_frame {");
3824
3825  if (allow_relocation) {
3826    asm_assert_mem8_isnot_zero(in_bytes(JavaThread::last_Java_sp_offset()),
3827                               Z_thread,
3828                               "SP was not set, still zero",
3829                               0x202);
3830  } else {
3831    asm_assert_mem8_isnot_zero_static(in_bytes(JavaThread::last_Java_sp_offset()),
3832                                      Z_thread,
3833                                      "SP was not set, still zero",
3834                                      0x202);
3835  }
3836
3837  // _last_Java_sp = 0
3838  // Clearing storage must be atomic here, so don't use clear_mem()!
3839  store_const(Address(Z_thread, JavaThread::last_Java_sp_offset()), 0);
3840
3841  // _last_Java_pc = 0
3842  store_const(Address(Z_thread, JavaThread::last_Java_pc_offset()), 0);
3843
3844  BLOCK_COMMENT("} reset_last_Java_frame");
3845  return;
3846}
3847
3848void MacroAssembler::set_top_ijava_frame_at_SP_as_last_Java_frame(Register sp, Register tmp1, bool allow_relocation) {
3849  assert_different_registers(sp, tmp1);
3850
3851  // We cannot trust that code generated by the C++ compiler saves R14
3852  // to z_abi_160.return_pc, because sometimes it spills R14 using stmg at
3853  // z_abi_160.gpr14 (e.g. InterpreterRuntime::_new()).
3854  // Therefore we load the PC into tmp1 and let set_last_Java_frame() save
3855  // it into the frame anchor.
3856  get_PC(tmp1);
3857  set_last_Java_frame(/*sp=*/sp, /*pc=*/tmp1, allow_relocation);
3858}
3859
3860void MacroAssembler::set_thread_state(JavaThreadState new_state) {
3861  z_release();
3862
3863  assert(Immediate::is_uimm16(_thread_max_state), "enum value out of range for instruction");
3864  assert(sizeof(JavaThreadState) == sizeof(int), "enum value must have base type int");
3865  store_const(Address(Z_thread, JavaThread::thread_state_offset()), new_state, Z_R0, false);
3866}
3867
3868void MacroAssembler::get_vm_result(Register oop_result) {
3869  verify_thread();
3870
3871  z_lg(oop_result, Address(Z_thread, JavaThread::vm_result_offset()));
3872  clear_mem(Address(Z_thread, JavaThread::vm_result_offset()), sizeof(void*));
3873
3874  verify_oop(oop_result);
3875}
3876
3877void MacroAssembler::get_vm_result_2(Register result) {
3878  verify_thread();
3879
3880  z_lg(result, Address(Z_thread, JavaThread::vm_result_2_offset()));
3881  clear_mem(Address(Z_thread, JavaThread::vm_result_2_offset()), sizeof(void*));
3882}
3883
3884// We require that C code which does not return a value in vm_result will
3885// leave it undisturbed.
3886void MacroAssembler::set_vm_result(Register oop_result) {
3887  z_stg(oop_result, Address(Z_thread, JavaThread::vm_result_offset()));
3888}
3889
3890// Explicit null checks (used for method handle code).
3891void MacroAssembler::null_check(Register reg, Register tmp, int64_t offset) {
3892  if (!ImplicitNullChecks) {
3893    NearLabel ok;
3894
3895    compare64_and_branch(reg, (intptr_t) 0, Assembler::bcondNotEqual, ok);
3896
3897    // We just put the address into reg if it was 0 (tmp==Z_R0 is allowed so we can't use it for the address).
3898    address exception_entry = Interpreter::throw_NullPointerException_entry();
3899    load_absolute_address(reg, exception_entry);
3900    z_br(reg);
3901
3902    bind(ok);
3903  } else {
3904    if (needs_explicit_null_check((intptr_t)offset)) {
3905      // Provoke OS NULL exception if reg = NULL by
3906      // accessing M[reg] w/o changing any registers.
3907      z_lg(tmp, 0, reg);
3908    }
3909    // else
3910      // Nothing to do, (later) access of M[reg + offset]
3911      // will provoke OS NULL exception if reg = NULL.
3912  }
3913}
3914
3915//-------------------------------------
3916//  Compressed Klass Pointers
3917//-------------------------------------
3918
3919// Klass oop manipulations if compressed.
3920void MacroAssembler::encode_klass_not_null(Register dst, Register src) {
3921  Register current = (src != noreg) ? src : dst; // Klass is in dst if no src provided. (dst == src) also possible.
3922  address  base    = Universe::narrow_klass_base();
3923  int      shift   = Universe::narrow_klass_shift();
3924  assert(UseCompressedClassPointers, "only for compressed klass ptrs");
3925
3926  BLOCK_COMMENT("cKlass encoder {");
3927
3928#ifdef ASSERT
3929  Label ok;
3930  z_tmll(current, KlassAlignmentInBytes-1); // Check alignment.
3931  z_brc(Assembler::bcondAllZero, ok);
3932  // The plain disassembler does not recognize illtrap. It instead displays
3933  // a 32-bit value. Issueing two illtraps assures the disassembler finds
3934  // the proper beginning of the next instruction.
3935  z_illtrap(0xee);
3936  z_illtrap(0xee);
3937  bind(ok);
3938#endif
3939
3940  if (base != NULL) {
3941    unsigned int base_h = ((unsigned long)base)>>32;
3942    unsigned int base_l = (unsigned int)((unsigned long)base);
3943    if ((base_h != 0) && (base_l == 0) && VM_Version::has_HighWordInstr()) {
3944      lgr_if_needed(dst, current);
3945      z_aih(dst, -((int)base_h));     // Base has no set bits in lower half.
3946    } else if ((base_h == 0) && (base_l != 0)) {
3947      lgr_if_needed(dst, current);
3948      z_agfi(dst, -(int)base_l);
3949    } else {
3950      load_const(Z_R0, base);
3951      lgr_if_needed(dst, current);
3952      z_sgr(dst, Z_R0);
3953    }
3954    current = dst;
3955  }
3956  if (shift != 0) {
3957    assert (LogKlassAlignmentInBytes == shift, "decode alg wrong");
3958    z_srlg(dst, current, shift);
3959    current = dst;
3960  }
3961  lgr_if_needed(dst, current); // Move may be required (if neither base nor shift != 0).
3962
3963  BLOCK_COMMENT("} cKlass encoder");
3964}
3965
3966// This function calculates the size of the code generated by
3967//   decode_klass_not_null(register dst, Register src)
3968// when (Universe::heap() != NULL). Hence, if the instructions
3969// it generates change, then this method needs to be updated.
3970int MacroAssembler::instr_size_for_decode_klass_not_null() {
3971  address  base    = Universe::narrow_klass_base();
3972  int shift_size   = Universe::narrow_klass_shift() == 0 ? 0 : 6; /* sllg */
3973  int addbase_size = 0;
3974  assert(UseCompressedClassPointers, "only for compressed klass ptrs");
3975
3976  if (base != NULL) {
3977    unsigned int base_h = ((unsigned long)base)>>32;
3978    unsigned int base_l = (unsigned int)((unsigned long)base);
3979    if ((base_h != 0) && (base_l == 0) && VM_Version::has_HighWordInstr()) {
3980      addbase_size += 6; /* aih */
3981    } else if ((base_h == 0) && (base_l != 0)) {
3982      addbase_size += 6; /* algfi */
3983    } else {
3984      addbase_size += load_const_size();
3985      addbase_size += 4; /* algr */
3986    }
3987  }
3988#ifdef ASSERT
3989  addbase_size += 10;
3990  addbase_size += 2; // Extra sigill.
3991#endif
3992  return addbase_size + shift_size;
3993}
3994
3995// !!! If the instructions that get generated here change
3996//     then function instr_size_for_decode_klass_not_null()
3997//     needs to get updated.
3998// This variant of decode_klass_not_null() must generate predictable code!
3999// The code must only depend on globally known parameters.
4000void MacroAssembler::decode_klass_not_null(Register dst) {
4001  address  base    = Universe::narrow_klass_base();
4002  int      shift   = Universe::narrow_klass_shift();
4003  int      beg_off = offset();
4004  assert(UseCompressedClassPointers, "only for compressed klass ptrs");
4005
4006  BLOCK_COMMENT("cKlass decoder (const size) {");
4007
4008  if (shift != 0) { // Shift required?
4009    z_sllg(dst, dst, shift);
4010  }
4011  if (base != NULL) {
4012    unsigned int base_h = ((unsigned long)base)>>32;
4013    unsigned int base_l = (unsigned int)((unsigned long)base);
4014    if ((base_h != 0) && (base_l == 0) && VM_Version::has_HighWordInstr()) {
4015      z_aih(dst, base_h);     // Base has no set bits in lower half.
4016    } else if ((base_h == 0) && (base_l != 0)) {
4017      z_algfi(dst, base_l);   // Base has no set bits in upper half.
4018    } else {
4019      load_const(Z_R0, base); // Base has set bits everywhere.
4020      z_algr(dst, Z_R0);
4021    }
4022  }
4023
4024#ifdef ASSERT
4025  Label ok;
4026  z_tmll(dst, KlassAlignmentInBytes-1); // Check alignment.
4027  z_brc(Assembler::bcondAllZero, ok);
4028  // The plain disassembler does not recognize illtrap. It instead displays
4029  // a 32-bit value. Issueing two illtraps assures the disassembler finds
4030  // the proper beginning of the next instruction.
4031  z_illtrap(0xd1);
4032  z_illtrap(0xd1);
4033  bind(ok);
4034#endif
4035  assert(offset() == beg_off + instr_size_for_decode_klass_not_null(), "Code gen mismatch.");
4036
4037  BLOCK_COMMENT("} cKlass decoder (const size)");
4038}
4039
4040// This variant of decode_klass_not_null() is for cases where
4041//  1) the size of the generated instructions may vary
4042//  2) the result is (potentially) stored in a register different from the source.
4043void MacroAssembler::decode_klass_not_null(Register dst, Register src) {
4044  address base  = Universe::narrow_klass_base();
4045  int     shift = Universe::narrow_klass_shift();
4046  assert(UseCompressedClassPointers, "only for compressed klass ptrs");
4047
4048  BLOCK_COMMENT("cKlass decoder {");
4049
4050  if (src == noreg) src = dst;
4051
4052  if (shift != 0) { // Shift or at least move required?
4053    z_sllg(dst, src, shift);
4054  } else {
4055    lgr_if_needed(dst, src);
4056  }
4057
4058  if (base != NULL) {
4059    unsigned int base_h = ((unsigned long)base)>>32;
4060    unsigned int base_l = (unsigned int)((unsigned long)base);
4061    if ((base_h != 0) && (base_l == 0) && VM_Version::has_HighWordInstr()) {
4062      z_aih(dst, base_h);     // Base has not set bits in lower half.
4063    } else if ((base_h == 0) && (base_l != 0)) {
4064      z_algfi(dst, base_l);   // Base has no set bits in upper half.
4065    } else {
4066      load_const_optimized(Z_R0, base); // Base has set bits everywhere.
4067      z_algr(dst, Z_R0);
4068    }
4069  }
4070
4071#ifdef ASSERT
4072  Label ok;
4073  z_tmll(dst, KlassAlignmentInBytes-1); // Check alignment.
4074  z_brc(Assembler::bcondAllZero, ok);
4075  // The plain disassembler does not recognize illtrap. It instead displays
4076  // a 32-bit value. Issueing two illtraps assures the disassembler finds
4077  // the proper beginning of the next instruction.
4078  z_illtrap(0xd2);
4079  z_illtrap(0xd2);
4080  bind(ok);
4081#endif
4082  BLOCK_COMMENT("} cKlass decoder");
4083}
4084
4085void MacroAssembler::load_klass(Register klass, Address mem) {
4086  if (UseCompressedClassPointers) {
4087    z_llgf(klass, mem);
4088    // Attention: no null check here!
4089    decode_klass_not_null(klass);
4090  } else {
4091    z_lg(klass, mem);
4092  }
4093}
4094
4095void MacroAssembler::load_klass(Register klass, Register src_oop) {
4096  if (UseCompressedClassPointers) {
4097    z_llgf(klass, oopDesc::klass_offset_in_bytes(), src_oop);
4098    // Attention: no null check here!
4099    decode_klass_not_null(klass);
4100  } else {
4101    z_lg(klass, oopDesc::klass_offset_in_bytes(), src_oop);
4102  }
4103}
4104
4105void MacroAssembler::load_prototype_header(Register Rheader, Register Rsrc_oop) {
4106  assert_different_registers(Rheader, Rsrc_oop);
4107  load_klass(Rheader, Rsrc_oop);
4108  z_lg(Rheader, Address(Rheader, Klass::prototype_header_offset()));
4109}
4110
4111void MacroAssembler::store_klass(Register klass, Register dst_oop, Register ck) {
4112  if (UseCompressedClassPointers) {
4113    assert_different_registers(dst_oop, klass, Z_R0);
4114    if (ck == noreg) ck = klass;
4115    encode_klass_not_null(ck, klass);
4116    z_st(ck, Address(dst_oop, oopDesc::klass_offset_in_bytes()));
4117  } else {
4118    z_stg(klass, Address(dst_oop, oopDesc::klass_offset_in_bytes()));
4119  }
4120}
4121
4122void MacroAssembler::store_klass_gap(Register s, Register d) {
4123  if (UseCompressedClassPointers) {
4124    assert(s != d, "not enough registers");
4125    // Support s = noreg.
4126    if (s != noreg) {
4127      z_st(s, Address(d, oopDesc::klass_gap_offset_in_bytes()));
4128    } else {
4129      z_mvhi(Address(d, oopDesc::klass_gap_offset_in_bytes()), 0);
4130    }
4131  }
4132}
4133
4134// Compare klass ptr in memory against klass ptr in register.
4135//
4136// Rop1            - klass in register, always uncompressed.
4137// disp            - Offset of klass in memory, compressed/uncompressed, depending on runtime flag.
4138// Rbase           - Base address of cKlass in memory.
4139// maybeNULL       - True if Rop1 possibly is a NULL.
4140void MacroAssembler::compare_klass_ptr(Register Rop1, int64_t disp, Register Rbase, bool maybeNULL) {
4141
4142  BLOCK_COMMENT("compare klass ptr {");
4143
4144  if (UseCompressedClassPointers) {
4145    const int shift = Universe::narrow_klass_shift();
4146    address   base  = Universe::narrow_klass_base();
4147
4148    assert((shift == 0) || (shift == LogKlassAlignmentInBytes), "cKlass encoder detected bad shift");
4149    assert_different_registers(Rop1, Z_R0);
4150    assert_different_registers(Rop1, Rbase, Z_R1);
4151
4152    // First encode register oop and then compare with cOop in memory.
4153    // This sequence saves an unnecessary cOop load and decode.
4154    if (base == NULL) {
4155      if (shift == 0) {
4156        z_cl(Rop1, disp, Rbase);     // Unscaled
4157      } else {
4158        z_srlg(Z_R0, Rop1, shift);   // ZeroBased
4159        z_cl(Z_R0, disp, Rbase);
4160      }
4161    } else {                         // HeapBased
4162#ifdef ASSERT
4163      bool     used_R0 = true;
4164      bool     used_R1 = true;
4165#endif
4166      Register current = Rop1;
4167      Label    done;
4168
4169      if (maybeNULL) {       // NULL ptr must be preserved!
4170        z_ltgr(Z_R0, current);
4171        z_bre(done);
4172        current = Z_R0;
4173      }
4174
4175      unsigned int base_h = ((unsigned long)base)>>32;
4176      unsigned int base_l = (unsigned int)((unsigned long)base);
4177      if ((base_h != 0) && (base_l == 0) && VM_Version::has_HighWordInstr()) {
4178        lgr_if_needed(Z_R0, current);
4179        z_aih(Z_R0, -((int)base_h));     // Base has no set bits in lower half.
4180      } else if ((base_h == 0) && (base_l != 0)) {
4181        lgr_if_needed(Z_R0, current);
4182        z_agfi(Z_R0, -(int)base_l);
4183      } else {
4184        int pow2_offset = get_oop_base_complement(Z_R1, ((uint64_t)(intptr_t)base));
4185        add2reg_with_index(Z_R0, pow2_offset, Z_R1, Rop1); // Subtract base by adding complement.
4186      }
4187
4188      if (shift != 0) {
4189        z_srlg(Z_R0, Z_R0, shift);
4190      }
4191      bind(done);
4192      z_cl(Z_R0, disp, Rbase);
4193#ifdef ASSERT
4194      if (used_R0) preset_reg(Z_R0, 0xb05bUL, 2);
4195      if (used_R1) preset_reg(Z_R1, 0xb06bUL, 2);
4196#endif
4197    }
4198  } else {
4199    z_clg(Rop1, disp, Z_R0, Rbase);
4200  }
4201  BLOCK_COMMENT("} compare klass ptr");
4202}
4203
4204//---------------------------
4205//  Compressed oops
4206//---------------------------
4207
4208void MacroAssembler::encode_heap_oop(Register oop) {
4209  oop_encoder(oop, oop, true /*maybe null*/);
4210}
4211
4212void MacroAssembler::encode_heap_oop_not_null(Register oop) {
4213  oop_encoder(oop, oop, false /*not null*/);
4214}
4215
4216// Called with something derived from the oop base. e.g. oop_base>>3.
4217int MacroAssembler::get_oop_base_pow2_offset(uint64_t oop_base) {
4218  unsigned int oop_base_ll = ((unsigned int)(oop_base >>  0)) & 0xffff;
4219  unsigned int oop_base_lh = ((unsigned int)(oop_base >> 16)) & 0xffff;
4220  unsigned int oop_base_hl = ((unsigned int)(oop_base >> 32)) & 0xffff;
4221  unsigned int oop_base_hh = ((unsigned int)(oop_base >> 48)) & 0xffff;
4222  unsigned int n_notzero_parts = (oop_base_ll == 0 ? 0:1)
4223                               + (oop_base_lh == 0 ? 0:1)
4224                               + (oop_base_hl == 0 ? 0:1)
4225                               + (oop_base_hh == 0 ? 0:1);
4226
4227  assert(oop_base != 0, "This is for HeapBased cOops only");
4228
4229  if (n_notzero_parts != 1) { //  Check if oop_base is just a few pages shy of a power of 2.
4230    uint64_t pow2_offset = 0x10000 - oop_base_ll;
4231    if (pow2_offset < 0x8000) {  // This might not be necessary.
4232      uint64_t oop_base2 = oop_base + pow2_offset;
4233
4234      oop_base_ll = ((unsigned int)(oop_base2 >>  0)) & 0xffff;
4235      oop_base_lh = ((unsigned int)(oop_base2 >> 16)) & 0xffff;
4236      oop_base_hl = ((unsigned int)(oop_base2 >> 32)) & 0xffff;
4237      oop_base_hh = ((unsigned int)(oop_base2 >> 48)) & 0xffff;
4238      n_notzero_parts = (oop_base_ll == 0 ? 0:1) +
4239                        (oop_base_lh == 0 ? 0:1) +
4240                        (oop_base_hl == 0 ? 0:1) +
4241                        (oop_base_hh == 0 ? 0:1);
4242      if (n_notzero_parts == 1) {
4243        assert(-(int64_t)pow2_offset != (int64_t)-1, "We use -1 to signal uninitialized base register");
4244        return -pow2_offset;
4245      }
4246    }
4247  }
4248  return 0;
4249}
4250
4251// If base address is offset from a straight power of two by just a few pages,
4252// return this offset to the caller for a possible later composite add.
4253// TODO/FIX: will only work correctly for 4k pages.
4254int MacroAssembler::get_oop_base(Register Rbase, uint64_t oop_base) {
4255  int pow2_offset = get_oop_base_pow2_offset(oop_base);
4256
4257  load_const_optimized(Rbase, oop_base - pow2_offset); // Best job possible.
4258
4259  return pow2_offset;
4260}
4261
4262int MacroAssembler::get_oop_base_complement(Register Rbase, uint64_t oop_base) {
4263  int offset = get_oop_base(Rbase, oop_base);
4264  z_lcgr(Rbase, Rbase);
4265  return -offset;
4266}
4267
4268// Compare compressed oop in memory against oop in register.
4269// Rop1            - Oop in register.
4270// disp            - Offset of cOop in memory.
4271// Rbase           - Base address of cOop in memory.
4272// maybeNULL       - True if Rop1 possibly is a NULL.
4273// maybeNULLtarget - Branch target for Rop1 == NULL, if flow control shall NOT continue with compare instruction.
4274void MacroAssembler::compare_heap_oop(Register Rop1, Address mem, bool maybeNULL) {
4275  Register Rbase  = mem.baseOrR0();
4276  Register Rindex = mem.indexOrR0();
4277  int64_t  disp   = mem.disp();
4278
4279  const int shift = Universe::narrow_oop_shift();
4280  address   base  = Universe::narrow_oop_base();
4281
4282  assert(UseCompressedOops, "must be on to call this method");
4283  assert(Universe::heap() != NULL, "java heap must be initialized to call this method");
4284  assert((shift == 0) || (shift == LogMinObjAlignmentInBytes), "cOop encoder detected bad shift");
4285  assert_different_registers(Rop1, Z_R0);
4286  assert_different_registers(Rop1, Rbase, Z_R1);
4287  assert_different_registers(Rop1, Rindex, Z_R1);
4288
4289  BLOCK_COMMENT("compare heap oop {");
4290
4291  // First encode register oop and then compare with cOop in memory.
4292  // This sequence saves an unnecessary cOop load and decode.
4293  if (base == NULL) {
4294    if (shift == 0) {
4295      z_cl(Rop1, disp, Rindex, Rbase);  // Unscaled
4296    } else {
4297      z_srlg(Z_R0, Rop1, shift);        // ZeroBased
4298      z_cl(Z_R0, disp, Rindex, Rbase);
4299    }
4300  } else {                              // HeapBased
4301#ifdef ASSERT
4302    bool  used_R0 = true;
4303    bool  used_R1 = true;
4304#endif
4305    Label done;
4306    int   pow2_offset = get_oop_base_complement(Z_R1, ((uint64_t)(intptr_t)base));
4307
4308    if (maybeNULL) {       // NULL ptr must be preserved!
4309      z_ltgr(Z_R0, Rop1);
4310      z_bre(done);
4311    }
4312
4313    add2reg_with_index(Z_R0, pow2_offset, Z_R1, Rop1);
4314    z_srlg(Z_R0, Z_R0, shift);
4315
4316    bind(done);
4317    z_cl(Z_R0, disp, Rindex, Rbase);
4318#ifdef ASSERT
4319    if (used_R0) preset_reg(Z_R0, 0xb05bUL, 2);
4320    if (used_R1) preset_reg(Z_R1, 0xb06bUL, 2);
4321#endif
4322  }
4323  BLOCK_COMMENT("} compare heap oop");
4324}
4325
4326// Load heap oop and decompress, if necessary.
4327void  MacroAssembler::load_heap_oop(Register dest, const Address &a) {
4328  if (UseCompressedOops) {
4329    z_llgf(dest, a.disp(), a.indexOrR0(), a.baseOrR0());
4330    oop_decoder(dest, dest, true);
4331  } else {
4332    z_lg(dest, a.disp(), a.indexOrR0(), a.baseOrR0());
4333  }
4334}
4335
4336// Load heap oop and decompress, if necessary.
4337void MacroAssembler::load_heap_oop(Register dest, int64_t disp, Register base) {
4338  if (UseCompressedOops) {
4339    z_llgf(dest, disp, base);
4340    oop_decoder(dest, dest, true);
4341  } else {
4342    z_lg(dest, disp, base);
4343  }
4344}
4345
4346// Load heap oop and decompress, if necessary.
4347void MacroAssembler::load_heap_oop_not_null(Register dest, int64_t disp, Register base) {
4348  if (UseCompressedOops) {
4349    z_llgf(dest, disp, base);
4350    oop_decoder(dest, dest, false);
4351  } else {
4352    z_lg(dest, disp, base);
4353  }
4354}
4355
4356// Compress, if necessary, and store oop to heap.
4357void MacroAssembler::store_heap_oop(Register Roop, RegisterOrConstant offset, Register base) {
4358  Register Ridx = offset.is_register() ? offset.register_or_noreg() : Z_R0;
4359  if (UseCompressedOops) {
4360    assert_different_registers(Roop, offset.register_or_noreg(), base);
4361    encode_heap_oop(Roop);
4362    z_st(Roop, offset.constant_or_zero(), Ridx, base);
4363  } else {
4364    z_stg(Roop, offset.constant_or_zero(), Ridx, base);
4365  }
4366}
4367
4368// Compress, if necessary, and store oop to heap. Oop is guaranteed to be not NULL.
4369void MacroAssembler::store_heap_oop_not_null(Register Roop, RegisterOrConstant offset, Register base) {
4370  Register Ridx = offset.is_register() ? offset.register_or_noreg() : Z_R0;
4371  if (UseCompressedOops) {
4372    assert_different_registers(Roop, offset.register_or_noreg(), base);
4373    encode_heap_oop_not_null(Roop);
4374    z_st(Roop, offset.constant_or_zero(), Ridx, base);
4375  } else {
4376    z_stg(Roop, offset.constant_or_zero(), Ridx, base);
4377  }
4378}
4379
4380// Store NULL oop to heap.
4381void MacroAssembler::store_heap_oop_null(Register zero, RegisterOrConstant offset, Register base) {
4382  Register Ridx = offset.is_register() ? offset.register_or_noreg() : Z_R0;
4383  if (UseCompressedOops) {
4384    z_st(zero, offset.constant_or_zero(), Ridx, base);
4385  } else {
4386    z_stg(zero, offset.constant_or_zero(), Ridx, base);
4387  }
4388}
4389
4390//-------------------------------------------------
4391// Encode compressed oop. Generally usable encoder.
4392//-------------------------------------------------
4393// Rsrc - contains regular oop on entry. It remains unchanged.
4394// Rdst - contains compressed oop on exit.
4395// Rdst and Rsrc may indicate same register, in which case Rsrc does not remain unchanged.
4396//
4397// Rdst must not indicate scratch register Z_R1 (Z_R1_scratch) for functionality.
4398// Rdst should not indicate scratch register Z_R0 (Z_R0_scratch) for performance.
4399//
4400// only32bitValid is set, if later code only uses the lower 32 bits. In this
4401// case we must not fix the upper 32 bits.
4402void MacroAssembler::oop_encoder(Register Rdst, Register Rsrc, bool maybeNULL,
4403                                 Register Rbase, int pow2_offset, bool only32bitValid) {
4404
4405  const address oop_base  = Universe::narrow_oop_base();
4406  const int     oop_shift = Universe::narrow_oop_shift();
4407  const bool    disjoint  = Universe::narrow_oop_base_disjoint();
4408
4409  assert(UseCompressedOops, "must be on to call this method");
4410  assert(Universe::heap() != NULL, "java heap must be initialized to call this encoder");
4411  assert((oop_shift == 0) || (oop_shift == LogMinObjAlignmentInBytes), "cOop encoder detected bad shift");
4412
4413  if (disjoint || (oop_base == NULL)) {
4414    BLOCK_COMMENT("cOop encoder zeroBase {");
4415    if (oop_shift == 0) {
4416      if (oop_base != NULL && !only32bitValid) {
4417        z_llgfr(Rdst, Rsrc); // Clear upper bits in case the register will be decoded again.
4418      } else {
4419        lgr_if_needed(Rdst, Rsrc);
4420      }
4421    } else {
4422      z_srlg(Rdst, Rsrc, oop_shift);
4423      if (oop_base != NULL && !only32bitValid) {
4424        z_llgfr(Rdst, Rdst); // Clear upper bits in case the register will be decoded again.
4425      }
4426    }
4427    BLOCK_COMMENT("} cOop encoder zeroBase");
4428    return;
4429  }
4430
4431  bool used_R0 = false;
4432  bool used_R1 = false;
4433
4434  BLOCK_COMMENT("cOop encoder general {");
4435  assert_different_registers(Rdst, Z_R1);
4436  assert_different_registers(Rsrc, Rbase);
4437  if (maybeNULL) {
4438    Label done;
4439    // We reorder shifting and subtracting, so that we can compare
4440    // and shift in parallel:
4441    //
4442    // cycle 0:  potential LoadN, base = <const>
4443    // cycle 1:  base = !base     dst = src >> 3,    cmp cr = (src != 0)
4444    // cycle 2:  if (cr) br,      dst = dst + base + offset
4445
4446    // Get oop_base components.
4447    if (pow2_offset == -1) {
4448      if (Rdst == Rbase) {
4449        if (Rdst == Z_R1 || Rsrc == Z_R1) {
4450          Rbase = Z_R0;
4451          used_R0 = true;
4452        } else {
4453          Rdst = Z_R1;
4454          used_R1 = true;
4455        }
4456      }
4457      if (Rbase == Z_R1) {
4458        used_R1 = true;
4459      }
4460      pow2_offset = get_oop_base_complement(Rbase, ((uint64_t)(intptr_t)oop_base) >> oop_shift);
4461    }
4462    assert_different_registers(Rdst, Rbase);
4463
4464    // Check for NULL oop (must be left alone) and shift.
4465    if (oop_shift != 0) {  // Shift out alignment bits
4466      if (((intptr_t)oop_base&0xc000000000000000L) == 0L) { // We are sure: no single address will have the leftmost bit set.
4467        z_srag(Rdst, Rsrc, oop_shift);  // Arithmetic shift sets the condition code.
4468      } else {
4469        z_srlg(Rdst, Rsrc, oop_shift);
4470        z_ltgr(Rsrc, Rsrc);  // This is the recommended way of testing for zero.
4471        // This probably is faster, as it does not write a register. No!
4472        // z_cghi(Rsrc, 0);
4473      }
4474    } else {
4475      z_ltgr(Rdst, Rsrc);   // Move NULL to result register.
4476    }
4477    z_bre(done);
4478
4479    // Subtract oop_base components.
4480    if ((Rdst == Z_R0) || (Rbase == Z_R0)) {
4481      z_algr(Rdst, Rbase);
4482      if (pow2_offset != 0) { add2reg(Rdst, pow2_offset); }
4483    } else {
4484      add2reg_with_index(Rdst, pow2_offset, Rbase, Rdst);
4485    }
4486    if (!only32bitValid) {
4487      z_llgfr(Rdst, Rdst); // Clear upper bits in case the register will be decoded again.
4488    }
4489    bind(done);
4490
4491  } else {  // not null
4492    // Get oop_base components.
4493    if (pow2_offset == -1) {
4494      pow2_offset = get_oop_base_complement(Rbase, (uint64_t)(intptr_t)oop_base);
4495    }
4496
4497    // Subtract oop_base components and shift.
4498    if (Rdst == Z_R0 || Rsrc == Z_R0 || Rbase == Z_R0) {
4499      // Don't use lay instruction.
4500      if (Rdst == Rsrc) {
4501        z_algr(Rdst, Rbase);
4502      } else {
4503        lgr_if_needed(Rdst, Rbase);
4504        z_algr(Rdst, Rsrc);
4505      }
4506      if (pow2_offset != 0) add2reg(Rdst, pow2_offset);
4507    } else {
4508      add2reg_with_index(Rdst, pow2_offset, Rbase, Rsrc);
4509    }
4510    if (oop_shift != 0) {   // Shift out alignment bits.
4511      z_srlg(Rdst, Rdst, oop_shift);
4512    }
4513    if (!only32bitValid) {
4514      z_llgfr(Rdst, Rdst); // Clear upper bits in case the register will be decoded again.
4515    }
4516  }
4517#ifdef ASSERT
4518  if (used_R0 && Rdst != Z_R0 && Rsrc != Z_R0) { preset_reg(Z_R0, 0xb01bUL, 2); }
4519  if (used_R1 && Rdst != Z_R1 && Rsrc != Z_R1) { preset_reg(Z_R1, 0xb02bUL, 2); }
4520#endif
4521  BLOCK_COMMENT("} cOop encoder general");
4522}
4523
4524//-------------------------------------------------
4525// decode compressed oop. Generally usable decoder.
4526//-------------------------------------------------
4527// Rsrc - contains compressed oop on entry.
4528// Rdst - contains regular oop on exit.
4529// Rdst and Rsrc may indicate same register.
4530// Rdst must not be the same register as Rbase, if Rbase was preloaded (before call).
4531// Rdst can be the same register as Rbase. Then, either Z_R0 or Z_R1 must be available as scratch.
4532// Rbase - register to use for the base
4533// pow2_offset - offset of base to nice value. If -1, base must be loaded.
4534// For performance, it is good to
4535//  - avoid Z_R0 for any of the argument registers.
4536//  - keep Rdst and Rsrc distinct from Rbase. Rdst == Rsrc is ok for performance.
4537//  - avoid Z_R1 for Rdst if Rdst == Rbase.
4538void MacroAssembler::oop_decoder(Register Rdst, Register Rsrc, bool maybeNULL, Register Rbase, int pow2_offset) {
4539
4540  const address oop_base  = Universe::narrow_oop_base();
4541  const int     oop_shift = Universe::narrow_oop_shift();
4542  const bool    disjoint  = Universe::narrow_oop_base_disjoint();
4543
4544  assert(UseCompressedOops, "must be on to call this method");
4545  assert(Universe::heap() != NULL, "java heap must be initialized to call this decoder");
4546  assert((oop_shift == 0) || (oop_shift == LogMinObjAlignmentInBytes),
4547         "cOop encoder detected bad shift");
4548
4549  // cOops are always loaded zero-extended from memory. No explicit zero-extension necessary.
4550
4551  if (oop_base != NULL) {
4552    unsigned int oop_base_hl = ((unsigned int)((uint64_t)(intptr_t)oop_base >> 32)) & 0xffff;
4553    unsigned int oop_base_hh = ((unsigned int)((uint64_t)(intptr_t)oop_base >> 48)) & 0xffff;
4554    unsigned int oop_base_hf = ((unsigned int)((uint64_t)(intptr_t)oop_base >> 32)) & 0xFFFFffff;
4555    if (disjoint && (oop_base_hl == 0 || oop_base_hh == 0)) {
4556      BLOCK_COMMENT("cOop decoder disjointBase {");
4557      // We do not need to load the base. Instead, we can install the upper bits
4558      // with an OR instead of an ADD.
4559      Label done;
4560
4561      // Rsrc contains a narrow oop. Thus we are sure the leftmost <oop_shift> bits will never be set.
4562      if (maybeNULL) {  // NULL ptr must be preserved!
4563        z_slag(Rdst, Rsrc, oop_shift);  // Arithmetic shift sets the condition code.
4564        z_bre(done);
4565      } else {
4566        z_sllg(Rdst, Rsrc, oop_shift);  // Logical shift leaves condition code alone.
4567      }
4568      if ((oop_base_hl != 0) && (oop_base_hh != 0)) {
4569        z_oihf(Rdst, oop_base_hf);
4570      } else if (oop_base_hl != 0) {
4571        z_oihl(Rdst, oop_base_hl);
4572      } else {
4573        assert(oop_base_hh != 0, "not heapbased mode");
4574        z_oihh(Rdst, oop_base_hh);
4575      }
4576      bind(done);
4577      BLOCK_COMMENT("} cOop decoder disjointBase");
4578    } else {
4579      BLOCK_COMMENT("cOop decoder general {");
4580      // There are three decode steps:
4581      //   scale oop offset (shift left)
4582      //   get base (in reg) and pow2_offset (constant)
4583      //   add base, pow2_offset, and oop offset
4584      // The following register overlap situations may exist:
4585      // Rdst == Rsrc,  Rbase any other
4586      //   not a problem. Scaling in-place leaves Rbase undisturbed.
4587      //   Loading Rbase does not impact the scaled offset.
4588      // Rdst == Rbase, Rsrc  any other
4589      //   scaling would destroy a possibly preloaded Rbase. Loading Rbase
4590      //   would destroy the scaled offset.
4591      //   Remedy: use Rdst_tmp if Rbase has been preloaded.
4592      //           use Rbase_tmp if base has to be loaded.
4593      // Rsrc == Rbase, Rdst  any other
4594      //   Only possible without preloaded Rbase.
4595      //   Loading Rbase does not destroy compressed oop because it was scaled into Rdst before.
4596      // Rsrc == Rbase, Rdst == Rbase
4597      //   Only possible without preloaded Rbase.
4598      //   Loading Rbase would destroy compressed oop. Scaling in-place is ok.
4599      //   Remedy: use Rbase_tmp.
4600      //
4601      Label    done;
4602      Register Rdst_tmp       = Rdst;
4603      Register Rbase_tmp      = Rbase;
4604      bool     used_R0        = false;
4605      bool     used_R1        = false;
4606      bool     base_preloaded = pow2_offset >= 0;
4607      guarantee(!(base_preloaded && (Rsrc == Rbase)), "Register clash, check caller");
4608      assert(oop_shift != 0, "room for optimization");
4609
4610      // Check if we need to use scratch registers.
4611      if (Rdst == Rbase) {
4612        assert(!(((Rdst == Z_R0) && (Rsrc == Z_R1)) || ((Rdst == Z_R1) && (Rsrc == Z_R0))), "need a scratch reg");
4613        if (Rdst != Rsrc) {
4614          if (base_preloaded) { Rdst_tmp  = (Rdst == Z_R1) ? Z_R0 : Z_R1; }
4615          else                { Rbase_tmp = (Rdst == Z_R1) ? Z_R0 : Z_R1; }
4616        } else {
4617          Rbase_tmp = (Rdst == Z_R1) ? Z_R0 : Z_R1;
4618        }
4619      }
4620      if (base_preloaded) lgr_if_needed(Rbase_tmp, Rbase);
4621
4622      // Scale oop and check for NULL.
4623      // Rsrc contains a narrow oop. Thus we are sure the leftmost <oop_shift> bits will never be set.
4624      if (maybeNULL) {  // NULL ptr must be preserved!
4625        z_slag(Rdst_tmp, Rsrc, oop_shift);  // Arithmetic shift sets the condition code.
4626        z_bre(done);
4627      } else {
4628        z_sllg(Rdst_tmp, Rsrc, oop_shift);  // Logical shift leaves condition code alone.
4629      }
4630
4631      // Get oop_base components.
4632      if (!base_preloaded) {
4633        pow2_offset = get_oop_base(Rbase_tmp, (uint64_t)(intptr_t)oop_base);
4634      }
4635
4636      // Add up all components.
4637      if ((Rbase_tmp == Z_R0) || (Rdst_tmp == Z_R0)) {
4638        z_algr(Rdst_tmp, Rbase_tmp);
4639        if (pow2_offset != 0) { add2reg(Rdst_tmp, pow2_offset); }
4640      } else {
4641        add2reg_with_index(Rdst_tmp, pow2_offset, Rbase_tmp, Rdst_tmp);
4642      }
4643
4644      bind(done);
4645      lgr_if_needed(Rdst, Rdst_tmp);
4646#ifdef ASSERT
4647      if (used_R0 && Rdst != Z_R0 && Rsrc != Z_R0) { preset_reg(Z_R0, 0xb03bUL, 2); }
4648      if (used_R1 && Rdst != Z_R1 && Rsrc != Z_R1) { preset_reg(Z_R1, 0xb04bUL, 2); }
4649#endif
4650      BLOCK_COMMENT("} cOop decoder general");
4651    }
4652  } else {
4653    BLOCK_COMMENT("cOop decoder zeroBase {");
4654    if (oop_shift == 0) {
4655      lgr_if_needed(Rdst, Rsrc);
4656    } else {
4657      z_sllg(Rdst, Rsrc, oop_shift);
4658    }
4659    BLOCK_COMMENT("} cOop decoder zeroBase");
4660  }
4661}
4662
4663// ((OopHandle)result).resolve();
4664void MacroAssembler::resolve_oop_handle(Register result) {
4665  // OopHandle::resolve is an indirection.
4666  z_lg(result, 0, result);
4667}
4668
4669void MacroAssembler::load_mirror(Register mirror, Register method) {
4670  mem2reg_opt(mirror, Address(method, Method::const_offset()));
4671  mem2reg_opt(mirror, Address(mirror, ConstMethod::constants_offset()));
4672  mem2reg_opt(mirror, Address(mirror, ConstantPool::pool_holder_offset_in_bytes()));
4673  mem2reg_opt(mirror, Address(mirror, Klass::java_mirror_offset()));
4674}
4675
4676//---------------------------------------------------------------
4677//---  Operations on arrays.
4678//---------------------------------------------------------------
4679
4680// Compiler ensures base is doubleword aligned and cnt is #doublewords.
4681// Emitter does not KILL cnt and base arguments, since they need to be copied to
4682// work registers anyway.
4683// Actually, only r0, r1, and r5 are killed.
4684unsigned int MacroAssembler::Clear_Array(Register cnt_arg, Register base_pointer_arg, Register src_addr, Register src_len) {
4685  // Src_addr is evenReg.
4686  // Src_len is odd_Reg.
4687
4688  int      block_start = offset();
4689  Register tmp_reg  = src_len; // Holds target instr addr for EX.
4690  Register dst_len  = Z_R1;    // Holds dst len  for MVCLE.
4691  Register dst_addr = Z_R0;    // Holds dst addr for MVCLE.
4692
4693  Label doXC, doMVCLE, done;
4694
4695  BLOCK_COMMENT("Clear_Array {");
4696
4697  // Check for zero len and convert to long.
4698  z_ltgfr(src_len, cnt_arg);      // Remember casted value for doSTG case.
4699  z_bre(done);                    // Nothing to do if len == 0.
4700
4701  // Prefetch data to be cleared.
4702  if (VM_Version::has_Prefetch()) {
4703    z_pfd(0x02,   0, Z_R0, base_pointer_arg);
4704    z_pfd(0x02, 256, Z_R0, base_pointer_arg);
4705  }
4706
4707  z_sllg(dst_len, src_len, 3);    // #bytes to clear.
4708  z_cghi(src_len, 32);            // Check for len <= 256 bytes (<=32 DW).
4709  z_brnh(doXC);                   // If so, use executed XC to clear.
4710
4711  // MVCLE: initialize long arrays (general case).
4712  bind(doMVCLE);
4713  z_lgr(dst_addr, base_pointer_arg);
4714  clear_reg(src_len, true, false); // Src len of MVCLE is zero.
4715
4716  MacroAssembler::move_long_ext(dst_addr, src_addr, 0);
4717  z_bru(done);
4718
4719  // XC: initialize short arrays.
4720  Label XC_template; // Instr template, never exec directly!
4721    bind(XC_template);
4722    z_xc(0,0,base_pointer_arg,0,base_pointer_arg);
4723
4724  bind(doXC);
4725    add2reg(dst_len, -1);             // Get #bytes-1 for EXECUTE.
4726    if (VM_Version::has_ExecuteExtensions()) {
4727      z_exrl(dst_len, XC_template);   // Execute XC with var. len.
4728    } else {
4729      z_larl(tmp_reg, XC_template);
4730      z_ex(dst_len,0,Z_R0,tmp_reg);   // Execute XC with var. len.
4731    }
4732    // z_bru(done);      // fallthru
4733
4734  bind(done);
4735
4736  BLOCK_COMMENT("} Clear_Array");
4737
4738  int block_end = offset();
4739  return block_end - block_start;
4740}
4741
4742// Compiler ensures base is doubleword aligned and cnt is count of doublewords.
4743// Emitter does not KILL any arguments nor work registers.
4744// Emitter generates up to 16 XC instructions, depending on the array length.
4745unsigned int MacroAssembler::Clear_Array_Const(long cnt, Register base) {
4746  int  block_start    = offset();
4747  int  off;
4748  int  lineSize_Bytes = AllocatePrefetchStepSize;
4749  int  lineSize_DW    = AllocatePrefetchStepSize>>LogBytesPerWord;
4750  bool doPrefetch     = VM_Version::has_Prefetch();
4751  int  XC_maxlen      = 256;
4752  int  numXCInstr     = cnt > 0 ? (cnt*BytesPerWord-1)/XC_maxlen+1 : 0;
4753
4754  BLOCK_COMMENT("Clear_Array_Const {");
4755  assert(cnt*BytesPerWord <= 4096, "ClearArrayConst can handle 4k only");
4756
4757  // Do less prefetching for very short arrays.
4758  if (numXCInstr > 0) {
4759    // Prefetch only some cache lines, then begin clearing.
4760    if (doPrefetch) {
4761      if (cnt*BytesPerWord <= lineSize_Bytes/4) {  // If less than 1/4 of a cache line to clear,
4762        z_pfd(0x02, 0, Z_R0, base);                // prefetch just the first cache line.
4763      } else {
4764        assert(XC_maxlen == lineSize_Bytes, "ClearArrayConst needs 256B cache lines");
4765        for (off = 0; (off < AllocatePrefetchLines) && (off <= numXCInstr); off ++) {
4766          z_pfd(0x02, off*lineSize_Bytes, Z_R0, base);
4767        }
4768      }
4769    }
4770
4771    for (off=0; off<(numXCInstr-1); off++) {
4772      z_xc(off*XC_maxlen, XC_maxlen-1, base, off*XC_maxlen, base);
4773
4774      // Prefetch some cache lines in advance.
4775      if (doPrefetch && (off <= numXCInstr-AllocatePrefetchLines)) {
4776        z_pfd(0x02, (off+AllocatePrefetchLines)*lineSize_Bytes, Z_R0, base);
4777      }
4778    }
4779    if (off*XC_maxlen < cnt*BytesPerWord) {
4780      z_xc(off*XC_maxlen, (cnt*BytesPerWord-off*XC_maxlen)-1, base, off*XC_maxlen, base);
4781    }
4782  }
4783  BLOCK_COMMENT("} Clear_Array_Const");
4784
4785  int block_end = offset();
4786  return block_end - block_start;
4787}
4788
4789// Compiler ensures base is doubleword aligned and cnt is #doublewords.
4790// Emitter does not KILL cnt and base arguments, since they need to be copied to
4791// work registers anyway.
4792// Actually, only r0, r1, r4, and r5 (which are work registers) are killed.
4793//
4794// For very large arrays, exploit MVCLE H/W support.
4795// MVCLE instruction automatically exploits H/W-optimized page mover.
4796// - Bytes up to next page boundary are cleared with a series of XC to self.
4797// - All full pages are cleared with the page mover H/W assist.
4798// - Remaining bytes are again cleared by a series of XC to self.
4799//
4800unsigned int MacroAssembler::Clear_Array_Const_Big(long cnt, Register base_pointer_arg, Register src_addr, Register src_len) {
4801  // Src_addr is evenReg.
4802  // Src_len is odd_Reg.
4803
4804  int      block_start = offset();
4805  Register dst_len  = Z_R1;      // Holds dst len  for MVCLE.
4806  Register dst_addr = Z_R0;      // Holds dst addr for MVCLE.
4807
4808  BLOCK_COMMENT("Clear_Array_Const_Big {");
4809
4810  // Get len to clear.
4811  load_const_optimized(dst_len, (long)cnt*8L);  // in Bytes = #DW*8
4812
4813  // Prepare other args to MVCLE.
4814  z_lgr(dst_addr, base_pointer_arg);
4815  // Indicate unused result.
4816  (void) clear_reg(src_len, true, false);  // Src len of MVCLE is zero.
4817
4818  // Clear.
4819  MacroAssembler::move_long_ext(dst_addr, src_addr, 0);
4820  BLOCK_COMMENT("} Clear_Array_Const_Big");
4821
4822  int block_end = offset();
4823  return block_end - block_start;
4824}
4825
4826// Allocator.
4827unsigned int MacroAssembler::CopyRawMemory_AlignedDisjoint(Register src_reg, Register dst_reg,
4828                                                           Register cnt_reg,
4829                                                           Register tmp1_reg, Register tmp2_reg) {
4830  // Tmp1 is oddReg.
4831  // Tmp2 is evenReg.
4832
4833  int block_start = offset();
4834  Label doMVC, doMVCLE, done, MVC_template;
4835
4836  BLOCK_COMMENT("CopyRawMemory_AlignedDisjoint {");
4837
4838  // Check for zero len and convert to long.
4839  z_ltgfr(cnt_reg, cnt_reg);      // Remember casted value for doSTG case.
4840  z_bre(done);                    // Nothing to do if len == 0.
4841
4842  z_sllg(Z_R1, cnt_reg, 3);       // Dst len in bytes. calc early to have the result ready.
4843
4844  z_cghi(cnt_reg, 32);            // Check for len <= 256 bytes (<=32 DW).
4845  z_brnh(doMVC);                  // If so, use executed MVC to clear.
4846
4847  bind(doMVCLE);                  // A lot of data (more than 256 bytes).
4848  // Prep dest reg pair.
4849  z_lgr(Z_R0, dst_reg);           // dst addr
4850  // Dst len already in Z_R1.
4851  // Prep src reg pair.
4852  z_lgr(tmp2_reg, src_reg);       // src addr
4853  z_lgr(tmp1_reg, Z_R1);          // Src len same as dst len.
4854
4855  // Do the copy.
4856  move_long_ext(Z_R0, tmp2_reg, 0xb0); // Bypass cache.
4857  z_bru(done);                         // All done.
4858
4859  bind(MVC_template);             // Just some data (not more than 256 bytes).
4860  z_mvc(0, 0, dst_reg, 0, src_reg);
4861
4862  bind(doMVC);
4863
4864  if (VM_Version::has_ExecuteExtensions()) {
4865    add2reg(Z_R1, -1);
4866  } else {
4867    add2reg(tmp1_reg, -1, Z_R1);
4868    z_larl(Z_R1, MVC_template);
4869  }
4870
4871  if (VM_Version::has_Prefetch()) {
4872    z_pfd(1,  0,Z_R0,src_reg);
4873    z_pfd(2,  0,Z_R0,dst_reg);
4874    //    z_pfd(1,256,Z_R0,src_reg);    // Assume very short copy.
4875    //    z_pfd(2,256,Z_R0,dst_reg);
4876  }
4877
4878  if (VM_Version::has_ExecuteExtensions()) {
4879    z_exrl(Z_R1, MVC_template);
4880  } else {
4881    z_ex(tmp1_reg, 0, Z_R0, Z_R1);
4882  }
4883
4884  bind(done);
4885
4886  BLOCK_COMMENT("} CopyRawMemory_AlignedDisjoint");
4887
4888  int block_end = offset();
4889  return block_end - block_start;
4890}
4891
4892//------------------------------------------------------
4893//   Special String Intrinsics. Implementation
4894//------------------------------------------------------
4895
4896// Intrinsics for CompactStrings
4897
4898// Compress char[] to byte[]. odd_reg contains cnt. Kills dst. Early clobber: result
4899// The result is the number of characters copied before the first incompatible character was found.
4900// If tmp2 is provided and the compression fails, the compression stops exactly at this point and the result is precise.
4901//
4902// Note: Does not behave exactly like package private StringUTF16 compress java implementation in case of failure:
4903// - Different number of characters may have been written to dead array (if tmp2 not provided).
4904// - Returns a number <cnt instead of 0. (Result gets compared with cnt.)
4905unsigned int MacroAssembler::string_compress(Register result, Register src, Register dst, Register odd_reg,
4906                                             Register even_reg, Register tmp, Register tmp2) {
4907  int block_start = offset();
4908  Label Lloop1, Lloop2, Lslow, Ldone;
4909  const Register addr2 = dst, ind1 = result, mask = tmp;
4910  const bool precise = (tmp2 != noreg);
4911
4912  BLOCK_COMMENT("string_compress {");
4913
4914  z_sll(odd_reg, 1);       // Number of bytes to read. (Must be a positive simm32.)
4915  clear_reg(ind1);         // Index to read.
4916  z_llilf(mask, 0xFF00FF00);
4917  z_ahi(odd_reg, -16);     // Last possible index for fast loop.
4918  z_brl(Lslow);
4919
4920  // ind1: index, even_reg: index increment, odd_reg: index limit
4921  z_iihf(mask, 0xFF00FF00);
4922  z_lhi(even_reg, 16);
4923
4924  bind(Lloop1); // 8 Characters per iteration.
4925  z_lg(Z_R0, Address(src, ind1));
4926  z_lg(Z_R1, Address(src, ind1, 8));
4927  if (precise) {
4928    if (VM_Version::has_DistinctOpnds()) {
4929      z_ogrk(tmp2, Z_R0, Z_R1);
4930    } else {
4931      z_lgr(tmp2, Z_R0);
4932      z_ogr(tmp2, Z_R1);
4933    }
4934    z_ngr(tmp2, mask);
4935    z_brne(Lslow);         // Failed fast case, retry slowly.
4936  }
4937  z_stcmh(Z_R0, 5, 0, addr2);
4938  z_stcm(Z_R0, 5, 2, addr2);
4939  if (!precise) { z_ogr(Z_R0, Z_R1); }
4940  z_stcmh(Z_R1, 5, 4, addr2);
4941  z_stcm(Z_R1, 5, 6, addr2);
4942  if (!precise) {
4943    z_ngr(Z_R0, mask);
4944    z_brne(Ldone);         // Failed (more than needed was written).
4945  }
4946  z_aghi(addr2, 8);
4947  z_brxle(ind1, even_reg, Lloop1);
4948
4949  bind(Lslow);
4950  // Compute index limit and skip if negative.
4951  z_ahi(odd_reg, 16-2);    // Last possible index for slow loop.
4952  z_lhi(even_reg, 2);
4953  z_cr(ind1, odd_reg);
4954  z_brh(Ldone);
4955
4956  bind(Lloop2); // 1 Character per iteration.
4957  z_llh(Z_R0, Address(src, ind1));
4958  z_tmll(Z_R0, 0xFF00);
4959  z_brnaz(Ldone);          // Failed slow case: Return number of written characters.
4960  z_stc(Z_R0, Address(addr2));
4961  z_aghi(addr2, 1);
4962  z_brxle(ind1, even_reg, Lloop2);
4963
4964  bind(Ldone);             // result = ind1 = 2*cnt
4965  z_srl(ind1, 1);
4966
4967  BLOCK_COMMENT("} string_compress");
4968
4969  return offset() - block_start;
4970}
4971
4972// Inflate byte[] to char[].
4973unsigned int MacroAssembler::string_inflate_trot(Register src, Register dst, Register cnt, Register tmp) {
4974  int block_start = offset();
4975
4976  BLOCK_COMMENT("string_inflate {");
4977
4978  Register stop_char = Z_R0;
4979  Register table     = Z_R1;
4980  Register src_addr  = tmp;
4981
4982  assert_different_registers(Z_R0, Z_R1, tmp, src, dst, cnt);
4983  assert(dst->encoding()%2 == 0, "must be even reg");
4984  assert(cnt->encoding()%2 == 1, "must be odd reg");
4985  assert(cnt->encoding() - dst->encoding() == 1, "must be even/odd pair");
4986
4987  StubRoutines::zarch::generate_load_trot_table_addr(this, table);  // kills Z_R0 (if ASSERT)
4988  clear_reg(stop_char);  // Stop character. Not used here, but initialized to have a defined value.
4989  lgr_if_needed(src_addr, src);
4990  z_llgfr(cnt, cnt);     // # src characters, must be a positive simm32.
4991
4992  translate_ot(dst, src_addr, /* mask = */ 0x0001);
4993
4994  BLOCK_COMMENT("} string_inflate");
4995
4996  return offset() - block_start;
4997}
4998
4999// Inflate byte[] to char[]. odd_reg contains cnt. Kills src.
5000unsigned int MacroAssembler::string_inflate(Register src, Register dst, Register odd_reg,
5001                                            Register even_reg, Register tmp) {
5002  int block_start = offset();
5003
5004  BLOCK_COMMENT("string_inflate {");
5005
5006  Label Lloop1, Lloop2, Lslow, Ldone;
5007  const Register addr1 = src, ind2 = tmp;
5008
5009  z_sll(odd_reg, 1);       // Number of bytes to write. (Must be a positive simm32.)
5010  clear_reg(ind2);         // Index to write.
5011  z_ahi(odd_reg, -16);     // Last possible index for fast loop.
5012  z_brl(Lslow);
5013
5014  // ind2: index, even_reg: index increment, odd_reg: index limit
5015  clear_reg(Z_R0);
5016  clear_reg(Z_R1);
5017  z_lhi(even_reg, 16);
5018
5019  bind(Lloop1); // 8 Characters per iteration.
5020  z_icmh(Z_R0, 5, 0, addr1);
5021  z_icmh(Z_R1, 5, 4, addr1);
5022  z_icm(Z_R0, 5, 2, addr1);
5023  z_icm(Z_R1, 5, 6, addr1);
5024  z_aghi(addr1, 8);
5025  z_stg(Z_R0, Address(dst, ind2));
5026  z_stg(Z_R1, Address(dst, ind2, 8));
5027  z_brxle(ind2, even_reg, Lloop1);
5028
5029  bind(Lslow);
5030  // Compute index limit and skip if negative.
5031  z_ahi(odd_reg, 16-2);    // Last possible index for slow loop.
5032  z_lhi(even_reg, 2);
5033  z_cr(ind2, odd_reg);
5034  z_brh(Ldone);
5035
5036  bind(Lloop2); // 1 Character per iteration.
5037  z_llc(Z_R0, Address(addr1));
5038  z_sth(Z_R0, Address(dst, ind2));
5039  z_aghi(addr1, 1);
5040  z_brxle(ind2, even_reg, Lloop2);
5041
5042  bind(Ldone);
5043
5044  BLOCK_COMMENT("} string_inflate");
5045
5046  return offset() - block_start;
5047}
5048
5049// Kills src.
5050unsigned int MacroAssembler::has_negatives(Register result, Register src, Register cnt,
5051                                           Register odd_reg, Register even_reg, Register tmp) {
5052  int block_start = offset();
5053  Label Lloop1, Lloop2, Lslow, Lnotfound, Ldone;
5054  const Register addr = src, mask = tmp;
5055
5056  BLOCK_COMMENT("has_negatives {");
5057
5058  z_llgfr(Z_R1, cnt);      // Number of bytes to read. (Must be a positive simm32.)
5059  z_llilf(mask, 0x80808080);
5060  z_lhi(result, 1);        // Assume true.
5061  // Last possible addr for fast loop.
5062  z_lay(odd_reg, -16, Z_R1, src);
5063  z_chi(cnt, 16);
5064  z_brl(Lslow);
5065
5066  // ind1: index, even_reg: index increment, odd_reg: index limit
5067  z_iihf(mask, 0x80808080);
5068  z_lghi(even_reg, 16);
5069
5070  bind(Lloop1); // 16 bytes per iteration.
5071  z_lg(Z_R0, Address(addr));
5072  z_lg(Z_R1, Address(addr, 8));
5073  z_ogr(Z_R0, Z_R1);
5074  z_ngr(Z_R0, mask);
5075  z_brne(Ldone);           // If found return 1.
5076  z_brxlg(addr, even_reg, Lloop1);
5077
5078  bind(Lslow);
5079  z_aghi(odd_reg, 16-1);   // Last possible addr for slow loop.
5080  z_lghi(even_reg, 1);
5081  z_cgr(addr, odd_reg);
5082  z_brh(Lnotfound);
5083
5084  bind(Lloop2); // 1 byte per iteration.
5085  z_cli(Address(addr), 0x80);
5086  z_brnl(Ldone);           // If found return 1.
5087  z_brxlg(addr, even_reg, Lloop2);
5088
5089  bind(Lnotfound);
5090  z_lhi(result, 0);
5091
5092  bind(Ldone);
5093
5094  BLOCK_COMMENT("} has_negatives");
5095
5096  return offset() - block_start;
5097}
5098
5099// kill: cnt1, cnt2, odd_reg, even_reg; early clobber: result
5100unsigned int MacroAssembler::string_compare(Register str1, Register str2,
5101                                            Register cnt1, Register cnt2,
5102                                            Register odd_reg, Register even_reg, Register result, int ae) {
5103  int block_start = offset();
5104
5105  assert_different_registers(str1, cnt1, cnt2, odd_reg, even_reg, result);
5106  assert_different_registers(str2, cnt1, cnt2, odd_reg, even_reg, result);
5107
5108  // If strings are equal up to min length, return the length difference.
5109  const Register diff = result, // Pre-set result with length difference.
5110                 min  = cnt1,   // min number of bytes
5111                 tmp  = cnt2;
5112
5113  // Note: Making use of the fact that compareTo(a, b) == -compareTo(b, a)
5114  // we interchange str1 and str2 in the UL case and negate the result.
5115  // Like this, str1 is always latin1 encoded, except for the UU case.
5116  // In addition, we need 0 (or sign which is 0) extend when using 64 bit register.
5117  const bool used_as_LU = (ae == StrIntrinsicNode::LU || ae == StrIntrinsicNode::UL);
5118
5119  BLOCK_COMMENT("string_compare {");
5120
5121  if (used_as_LU) {
5122    z_srl(cnt2, 1);
5123  }
5124
5125  // See if the lengths are different, and calculate min in cnt1.
5126  // Save diff in case we need it for a tie-breaker.
5127
5128  // diff = cnt1 - cnt2
5129  if (VM_Version::has_DistinctOpnds()) {
5130    z_srk(diff, cnt1, cnt2);
5131  } else {
5132    z_lr(diff, cnt1);
5133    z_sr(diff, cnt2);
5134  }
5135  if (str1 != str2) {
5136    if (VM_Version::has_LoadStoreConditional()) {
5137      z_locr(min, cnt2, Assembler::bcondHigh);
5138    } else {
5139      Label Lskip;
5140      z_brl(Lskip);    // min ok if cnt1 < cnt2
5141      z_lr(min, cnt2); // min = cnt2
5142      bind(Lskip);
5143    }
5144  }
5145
5146  if (ae == StrIntrinsicNode::UU) {
5147    z_sra(diff, 1);
5148  }
5149  if (str1 != str2) {
5150    Label Ldone;
5151    if (used_as_LU) {
5152      // Loop which searches the first difference character by character.
5153      Label Lloop;
5154      const Register ind1 = Z_R1,
5155                     ind2 = min;
5156      int stride1 = 1, stride2 = 2; // See comment above.
5157
5158      // ind1: index, even_reg: index increment, odd_reg: index limit
5159      z_llilf(ind1, (unsigned int)(-stride1));
5160      z_lhi(even_reg, stride1);
5161      add2reg(odd_reg, -stride1, min);
5162      clear_reg(ind2); // kills min
5163
5164      bind(Lloop);
5165      z_brxh(ind1, even_reg, Ldone);
5166      z_llc(tmp, Address(str1, ind1));
5167      z_llh(Z_R0, Address(str2, ind2));
5168      z_ahi(ind2, stride2);
5169      z_sr(tmp, Z_R0);
5170      z_bre(Lloop);
5171
5172      z_lr(result, tmp);
5173
5174    } else {
5175      // Use clcle in fast loop (only for same encoding).
5176      z_lgr(Z_R0, str1);
5177      z_lgr(even_reg, str2);
5178      z_llgfr(Z_R1, min);
5179      z_llgfr(odd_reg, min);
5180
5181      if (ae == StrIntrinsicNode::LL) {
5182        compare_long_ext(Z_R0, even_reg, 0);
5183      } else {
5184        compare_long_uni(Z_R0, even_reg, 0);
5185      }
5186      z_bre(Ldone);
5187      z_lgr(Z_R1, Z_R0);
5188      if (ae == StrIntrinsicNode::LL) {
5189        z_llc(Z_R0, Address(even_reg));
5190        z_llc(result, Address(Z_R1));
5191      } else {
5192        z_llh(Z_R0, Address(even_reg));
5193        z_llh(result, Address(Z_R1));
5194      }
5195      z_sr(result, Z_R0);
5196    }
5197
5198    // Otherwise, return the difference between the first mismatched chars.
5199    bind(Ldone);
5200  }
5201
5202  if (ae == StrIntrinsicNode::UL) {
5203    z_lcr(result, result); // Negate result (see note above).
5204  }
5205
5206  BLOCK_COMMENT("} string_compare");
5207
5208  return offset() - block_start;
5209}
5210
5211unsigned int MacroAssembler::array_equals(bool is_array_equ, Register ary1, Register ary2, Register limit,
5212                                          Register odd_reg, Register even_reg, Register result, bool is_byte) {
5213  int block_start = offset();
5214
5215  BLOCK_COMMENT("array_equals {");
5216
5217  assert_different_registers(ary1, limit, odd_reg, even_reg);
5218  assert_different_registers(ary2, limit, odd_reg, even_reg);
5219
5220  Label Ldone, Ldone_true, Ldone_false, Lclcle, CLC_template;
5221  int base_offset = 0;
5222
5223  if (ary1 != ary2) {
5224    if (is_array_equ) {
5225      base_offset = arrayOopDesc::base_offset_in_bytes(is_byte ? T_BYTE : T_CHAR);
5226
5227      // Return true if the same array.
5228      compareU64_and_branch(ary1, ary2, Assembler::bcondEqual, Ldone_true);
5229
5230      // Return false if one of them is NULL.
5231      compareU64_and_branch(ary1, (intptr_t)0, Assembler::bcondEqual, Ldone_false);
5232      compareU64_and_branch(ary2, (intptr_t)0, Assembler::bcondEqual, Ldone_false);
5233
5234      // Load the lengths of arrays.
5235      z_llgf(odd_reg, Address(ary1, arrayOopDesc::length_offset_in_bytes()));
5236
5237      // Return false if the two arrays are not equal length.
5238      z_c(odd_reg, Address(ary2, arrayOopDesc::length_offset_in_bytes()));
5239      z_brne(Ldone_false);
5240
5241      // string len in bytes (right operand)
5242      if (!is_byte) {
5243        z_chi(odd_reg, 128);
5244        z_sll(odd_reg, 1); // preserves flags
5245        z_brh(Lclcle);
5246      } else {
5247        compareU32_and_branch(odd_reg, (intptr_t)256, Assembler::bcondHigh, Lclcle);
5248      }
5249    } else {
5250      z_llgfr(odd_reg, limit); // Need to zero-extend prior to using the value.
5251      compareU32_and_branch(limit, (intptr_t)256, Assembler::bcondHigh, Lclcle);
5252    }
5253
5254
5255    // Use clc instruction for up to 256 bytes.
5256    {
5257      Register str1_reg = ary1,
5258          str2_reg = ary2;
5259      if (is_array_equ) {
5260        str1_reg = Z_R1;
5261        str2_reg = even_reg;
5262        add2reg(str1_reg, base_offset, ary1); // string addr (left operand)
5263        add2reg(str2_reg, base_offset, ary2); // string addr (right operand)
5264      }
5265      z_ahi(odd_reg, -1); // Clc uses decremented limit. Also compare result to 0.
5266      z_brl(Ldone_true);
5267      // Note: We could jump to the template if equal.
5268
5269      assert(VM_Version::has_ExecuteExtensions(), "unsupported hardware");
5270      z_exrl(odd_reg, CLC_template);
5271      z_bre(Ldone_true);
5272      // fall through
5273
5274      bind(Ldone_false);
5275      clear_reg(result);
5276      z_bru(Ldone);
5277
5278      bind(CLC_template);
5279      z_clc(0, 0, str1_reg, 0, str2_reg);
5280    }
5281
5282    // Use clcle instruction.
5283    {
5284      bind(Lclcle);
5285      add2reg(even_reg, base_offset, ary2); // string addr (right operand)
5286      add2reg(Z_R0, base_offset, ary1);     // string addr (left operand)
5287
5288      z_lgr(Z_R1, odd_reg); // string len in bytes (left operand)
5289      if (is_byte) {
5290        compare_long_ext(Z_R0, even_reg, 0);
5291      } else {
5292        compare_long_uni(Z_R0, even_reg, 0);
5293      }
5294      z_lghi(result, 0); // Preserve flags.
5295      z_brne(Ldone);
5296    }
5297  }
5298  // fall through
5299
5300  bind(Ldone_true);
5301  z_lghi(result, 1); // All characters are equal.
5302  bind(Ldone);
5303
5304  BLOCK_COMMENT("} array_equals");
5305
5306  return offset() - block_start;
5307}
5308
5309// kill: haycnt, needlecnt, odd_reg, even_reg; early clobber: result
5310unsigned int MacroAssembler::string_indexof(Register result, Register haystack, Register haycnt,
5311                                            Register needle, Register needlecnt, int needlecntval,
5312                                            Register odd_reg, Register even_reg, int ae) {
5313  int block_start = offset();
5314
5315  // Ensure 0<needlecnt<=haycnt in ideal graph as prerequisite!
5316  assert(ae != StrIntrinsicNode::LU, "Invalid encoding");
5317  const int h_csize = (ae == StrIntrinsicNode::LL) ? 1 : 2;
5318  const int n_csize = (ae == StrIntrinsicNode::UU) ? 2 : 1;
5319  Label L_needle1, L_Found, L_NotFound;
5320
5321  BLOCK_COMMENT("string_indexof {");
5322
5323  if (needle == haystack) {
5324    z_lhi(result, 0);
5325  } else {
5326
5327  // Load first character of needle (R0 used by search_string instructions).
5328  if (n_csize == 2) { z_llgh(Z_R0, Address(needle)); } else { z_llgc(Z_R0, Address(needle)); }
5329
5330  // Compute last haystack addr to use if no match gets found.
5331  if (needlecnt != noreg) { // variable needlecnt
5332    z_ahi(needlecnt, -1); // Remaining characters after first one.
5333    z_sr(haycnt, needlecnt); // Compute index succeeding last element to compare.
5334    if (n_csize == 2) { z_sll(needlecnt, 1); } // In bytes.
5335  } else { // constant needlecnt
5336    assert((needlecntval & 0x7fff) == needlecntval, "must be positive simm16 immediate");
5337    // Compute index succeeding last element to compare.
5338    if (needlecntval != 1) { z_ahi(haycnt, 1 - needlecntval); }
5339  }
5340
5341  z_llgfr(haycnt, haycnt); // Clear high half.
5342  z_lgr(result, haystack); // Final result will be computed from needle start pointer.
5343  if (h_csize == 2) { z_sll(haycnt, 1); } // Scale to number of bytes.
5344  z_agr(haycnt, haystack); // Point to address succeeding last element (haystack+scale*(haycnt-needlecnt+1)).
5345
5346  if (h_csize != n_csize) {
5347    assert(ae == StrIntrinsicNode::UL, "Invalid encoding");
5348
5349    if (needlecnt != noreg || needlecntval != 1) {
5350      if (needlecnt != noreg) {
5351        compare32_and_branch(needlecnt, (intptr_t)0, Assembler::bcondEqual, L_needle1);
5352      }
5353
5354      // Main Loop: UL version (now we have at least 2 characters).
5355      Label L_OuterLoop, L_InnerLoop, L_Skip;
5356      bind(L_OuterLoop); // Search for 1st 2 characters.
5357      z_lgr(Z_R1, haycnt);
5358      MacroAssembler::search_string_uni(Z_R1, result);
5359      z_brc(Assembler::bcondNotFound, L_NotFound);
5360      z_lgr(result, Z_R1);
5361
5362      z_lghi(Z_R1, n_csize);
5363      z_lghi(even_reg, h_csize);
5364      bind(L_InnerLoop);
5365      z_llgc(odd_reg, Address(needle, Z_R1));
5366      z_ch(odd_reg, Address(result, even_reg));
5367      z_brne(L_Skip);
5368      if (needlecnt != noreg) { z_cr(Z_R1, needlecnt); } else { z_chi(Z_R1, needlecntval - 1); }
5369      z_brnl(L_Found);
5370      z_aghi(Z_R1, n_csize);
5371      z_aghi(even_reg, h_csize);
5372      z_bru(L_InnerLoop);
5373
5374      bind(L_Skip);
5375      z_aghi(result, h_csize); // This is the new address we want to use for comparing.
5376      z_bru(L_OuterLoop);
5377    }
5378
5379  } else {
5380    const intptr_t needle_bytes = (n_csize == 2) ? ((needlecntval - 1) << 1) : (needlecntval - 1);
5381    Label L_clcle;
5382
5383    if (needlecnt != noreg || (needlecntval != 1 && needle_bytes <= 256)) {
5384      if (needlecnt != noreg) {
5385        compare32_and_branch(needlecnt, 256, Assembler::bcondHigh, L_clcle);
5386        z_ahi(needlecnt, -1); // remaining bytes -1 (for CLC)
5387        z_brl(L_needle1);
5388      }
5389
5390      // Main Loop: clc version (now we have at least 2 characters).
5391      Label L_OuterLoop, CLC_template;
5392      bind(L_OuterLoop); // Search for 1st 2 characters.
5393      z_lgr(Z_R1, haycnt);
5394      if (h_csize == 1) {
5395        MacroAssembler::search_string(Z_R1, result);
5396      } else {
5397        MacroAssembler::search_string_uni(Z_R1, result);
5398      }
5399      z_brc(Assembler::bcondNotFound, L_NotFound);
5400      z_lgr(result, Z_R1);
5401
5402      if (needlecnt != noreg) {
5403        assert(VM_Version::has_ExecuteExtensions(), "unsupported hardware");
5404        z_exrl(needlecnt, CLC_template);
5405      } else {
5406        z_clc(h_csize, needle_bytes -1, Z_R1, n_csize, needle);
5407      }
5408      z_bre(L_Found);
5409      z_aghi(result, h_csize); // This is the new address we want to use for comparing.
5410      z_bru(L_OuterLoop);
5411
5412      if (needlecnt != noreg) {
5413        bind(CLC_template);
5414        z_clc(h_csize, 0, Z_R1, n_csize, needle);
5415      }
5416    }
5417
5418    if (needlecnt != noreg || needle_bytes > 256) {
5419      bind(L_clcle);
5420
5421      // Main Loop: clcle version (now we have at least 256 bytes).
5422      Label L_OuterLoop, CLC_template;
5423      bind(L_OuterLoop); // Search for 1st 2 characters.
5424      z_lgr(Z_R1, haycnt);
5425      if (h_csize == 1) {
5426        MacroAssembler::search_string(Z_R1, result);
5427      } else {
5428        MacroAssembler::search_string_uni(Z_R1, result);
5429      }
5430      z_brc(Assembler::bcondNotFound, L_NotFound);
5431
5432      add2reg(Z_R0, n_csize, needle);
5433      add2reg(even_reg, h_csize, Z_R1);
5434      z_lgr(result, Z_R1);
5435      if (needlecnt != noreg) {
5436        z_llgfr(Z_R1, needlecnt); // needle len in bytes (left operand)
5437        z_llgfr(odd_reg, needlecnt);
5438      } else {
5439        load_const_optimized(Z_R1, needle_bytes);
5440        if (Immediate::is_simm16(needle_bytes)) { z_lghi(odd_reg, needle_bytes); } else { z_lgr(odd_reg, Z_R1); }
5441      }
5442      if (h_csize == 1) {
5443        compare_long_ext(Z_R0, even_reg, 0);
5444      } else {
5445        compare_long_uni(Z_R0, even_reg, 0);
5446      }
5447      z_bre(L_Found);
5448
5449      if (n_csize == 2) { z_llgh(Z_R0, Address(needle)); } else { z_llgc(Z_R0, Address(needle)); } // Reload.
5450      z_aghi(result, h_csize); // This is the new address we want to use for comparing.
5451      z_bru(L_OuterLoop);
5452    }
5453  }
5454
5455  if (needlecnt != noreg || needlecntval == 1) {
5456    bind(L_needle1);
5457
5458    // Single needle character version.
5459    if (h_csize == 1) {
5460      MacroAssembler::search_string(haycnt, result);
5461    } else {
5462      MacroAssembler::search_string_uni(haycnt, result);
5463    }
5464    z_lgr(result, haycnt);
5465    z_brc(Assembler::bcondFound, L_Found);
5466  }
5467
5468  bind(L_NotFound);
5469  add2reg(result, -1, haystack); // Return -1.
5470
5471  bind(L_Found); // Return index (or -1 in fallthrough case).
5472  z_sgr(result, haystack);
5473  if (h_csize == 2) { z_srag(result, result, exact_log2(sizeof(jchar))); }
5474  }
5475  BLOCK_COMMENT("} string_indexof");
5476
5477  return offset() - block_start;
5478}
5479
5480// early clobber: result
5481unsigned int MacroAssembler::string_indexof_char(Register result, Register haystack, Register haycnt,
5482                                                 Register needle, jchar needleChar, Register odd_reg, Register even_reg, bool is_byte) {
5483  int block_start = offset();
5484
5485  BLOCK_COMMENT("string_indexof_char {");
5486
5487  if (needle == haystack) {
5488    z_lhi(result, 0);
5489  } else {
5490
5491  Label Ldone;
5492
5493  z_llgfr(odd_reg, haycnt);  // Preset loop ctr/searchrange end.
5494  if (needle == noreg) {
5495    load_const_optimized(Z_R0, (unsigned long)needleChar);
5496  } else {
5497    if (is_byte) {
5498      z_llgcr(Z_R0, needle); // First (and only) needle char.
5499    } else {
5500      z_llghr(Z_R0, needle); // First (and only) needle char.
5501    }
5502  }
5503
5504  if (!is_byte) {
5505    z_agr(odd_reg, odd_reg); // Calc #bytes to be processed with SRSTU.
5506  }
5507
5508  z_lgr(even_reg, haystack); // haystack addr
5509  z_agr(odd_reg, haystack);  // First char after range end.
5510  z_lghi(result, -1);
5511
5512  if (is_byte) {
5513    MacroAssembler::search_string(odd_reg, even_reg);
5514  } else {
5515    MacroAssembler::search_string_uni(odd_reg, even_reg);
5516  }
5517  z_brc(Assembler::bcondNotFound, Ldone);
5518  if (is_byte) {
5519    if (VM_Version::has_DistinctOpnds()) {
5520      z_sgrk(result, odd_reg, haystack);
5521    } else {
5522      z_sgr(odd_reg, haystack);
5523      z_lgr(result, odd_reg);
5524    }
5525  } else {
5526    z_slgr(odd_reg, haystack);
5527    z_srlg(result, odd_reg, exact_log2(sizeof(jchar)));
5528  }
5529
5530  bind(Ldone);
5531  }
5532  BLOCK_COMMENT("} string_indexof_char");
5533
5534  return offset() - block_start;
5535}
5536
5537
5538//-------------------------------------------------
5539//   Constants (scalar and oop) in constant pool
5540//-------------------------------------------------
5541
5542// Add a non-relocated constant to the CP.
5543int MacroAssembler::store_const_in_toc(AddressLiteral& val) {
5544  long    value  = val.value();
5545  address tocPos = long_constant(value);
5546
5547  if (tocPos != NULL) {
5548    int tocOffset = (int)(tocPos - code()->consts()->start());
5549    return tocOffset;
5550  }
5551  // Address_constant returned NULL, so no constant entry has been created.
5552  // In that case, we return a "fatal" offset, just in case that subsequently
5553  // generated access code is executed.
5554  return -1;
5555}
5556
5557// Returns the TOC offset where the address is stored.
5558// Add a relocated constant to the CP.
5559int MacroAssembler::store_oop_in_toc(AddressLiteral& oop) {
5560  // Use RelocationHolder::none for the constant pool entry.
5561  // Otherwise we will end up with a failing NativeCall::verify(x),
5562  // where x is the address of the constant pool entry.
5563  address tocPos = address_constant((address)oop.value(), RelocationHolder::none);
5564
5565  if (tocPos != NULL) {
5566    int              tocOffset = (int)(tocPos - code()->consts()->start());
5567    RelocationHolder rsp = oop.rspec();
5568    Relocation      *rel = rsp.reloc();
5569
5570    // Store toc_offset in relocation, used by call_far_patchable.
5571    if ((relocInfo::relocType)rel->type() == relocInfo::runtime_call_w_cp_type) {
5572      ((runtime_call_w_cp_Relocation *)(rel))->set_constant_pool_offset(tocOffset);
5573    }
5574    // Relocate at the load's pc.
5575    relocate(rsp);
5576
5577    return tocOffset;
5578  }
5579  // Address_constant returned NULL, so no constant entry has been created
5580  // in that case, we return a "fatal" offset, just in case that subsequently
5581  // generated access code is executed.
5582  return -1;
5583}
5584
5585bool MacroAssembler::load_const_from_toc(Register dst, AddressLiteral& a, Register Rtoc) {
5586  int     tocOffset = store_const_in_toc(a);
5587  if (tocOffset == -1) return false;
5588  address tocPos    = tocOffset + code()->consts()->start();
5589  assert((address)code()->consts()->start() != NULL, "Please add CP address");
5590
5591  load_long_pcrelative(dst, tocPos);
5592  return true;
5593}
5594
5595bool MacroAssembler::load_oop_from_toc(Register dst, AddressLiteral& a, Register Rtoc) {
5596  int     tocOffset = store_oop_in_toc(a);
5597  if (tocOffset == -1) return false;
5598  address tocPos    = tocOffset + code()->consts()->start();
5599  assert((address)code()->consts()->start() != NULL, "Please add CP address");
5600
5601  load_addr_pcrelative(dst, tocPos);
5602  return true;
5603}
5604
5605// If the instruction sequence at the given pc is a load_const_from_toc
5606// sequence, return the value currently stored at the referenced position
5607// in the TOC.
5608intptr_t MacroAssembler::get_const_from_toc(address pc) {
5609
5610  assert(is_load_const_from_toc(pc), "must be load_const_from_pool");
5611
5612  long    offset  = get_load_const_from_toc_offset(pc);
5613  address dataLoc = NULL;
5614  if (is_load_const_from_toc_pcrelative(pc)) {
5615    dataLoc = pc + offset;
5616  } else {
5617    CodeBlob* cb = CodeCache::find_blob_unsafe(pc);   // Else we get assertion if nmethod is zombie.
5618    assert(cb && cb->is_nmethod(), "sanity");
5619    nmethod* nm = (nmethod*)cb;
5620    dataLoc = nm->ctable_begin() + offset;
5621  }
5622  return *(intptr_t *)dataLoc;
5623}
5624
5625// If the instruction sequence at the given pc is a load_const_from_toc
5626// sequence, copy the passed-in new_data value into the referenced
5627// position in the TOC.
5628void MacroAssembler::set_const_in_toc(address pc, unsigned long new_data, CodeBlob *cb) {
5629  assert(is_load_const_from_toc(pc), "must be load_const_from_pool");
5630
5631  long    offset = MacroAssembler::get_load_const_from_toc_offset(pc);
5632  address dataLoc = NULL;
5633  if (is_load_const_from_toc_pcrelative(pc)) {
5634    dataLoc = pc+offset;
5635  } else {
5636    nmethod* nm = CodeCache::find_nmethod(pc);
5637    assert((cb == NULL) || (nm == (nmethod*)cb), "instruction address should be in CodeBlob");
5638    dataLoc = nm->ctable_begin() + offset;
5639  }
5640  if (*(unsigned long *)dataLoc != new_data) { // Prevent cache invalidation: update only if necessary.
5641    *(unsigned long *)dataLoc = new_data;
5642  }
5643}
5644
5645// Dynamic TOC. Getter must only be called if "a" is a load_const_from_toc
5646// site. Verify by calling is_load_const_from_toc() before!!
5647// Offset is +/- 2**32 -> use long.
5648long MacroAssembler::get_load_const_from_toc_offset(address a) {
5649  assert(is_load_const_from_toc_pcrelative(a), "expected pc relative load");
5650  //  expected code sequence:
5651  //    z_lgrl(t, simm32);    len = 6
5652  unsigned long inst;
5653  unsigned int  len = get_instruction(a, &inst);
5654  return get_pcrel_offset(inst);
5655}
5656
5657//**********************************************************************************
5658//  inspection of generated instruction sequences for a particular pattern
5659//**********************************************************************************
5660
5661bool MacroAssembler::is_load_const_from_toc_pcrelative(address a) {
5662#ifdef ASSERT
5663  unsigned long inst;
5664  unsigned int  len = get_instruction(a+2, &inst);
5665  if ((len == 6) && is_load_pcrelative_long(a) && is_call_pcrelative_long(inst)) {
5666    const int range = 128;
5667    Assembler::dump_code_range(tty, a, range, "instr(a) == z_lgrl && instr(a+2) == z_brasl");
5668    VM_Version::z_SIGSEGV();
5669  }
5670#endif
5671  // expected code sequence:
5672  //   z_lgrl(t, relAddr32);    len = 6
5673  //TODO: verify accessed data is in CP, if possible.
5674  return is_load_pcrelative_long(a);  // TODO: might be too general. Currently, only lgrl is used.
5675}
5676
5677bool MacroAssembler::is_load_const_from_toc_call(address a) {
5678  return is_load_const_from_toc(a) && is_call_byregister(a + load_const_from_toc_size());
5679}
5680
5681bool MacroAssembler::is_load_const_call(address a) {
5682  return is_load_const(a) && is_call_byregister(a + load_const_size());
5683}
5684
5685//-------------------------------------------------
5686//   Emitters for some really CICS instructions
5687//-------------------------------------------------
5688
5689void MacroAssembler::move_long_ext(Register dst, Register src, unsigned int pad) {
5690  assert(dst->encoding()%2==0, "must be an even/odd register pair");
5691  assert(src->encoding()%2==0, "must be an even/odd register pair");
5692  assert(pad<256, "must be a padding BYTE");
5693
5694  Label retry;
5695  bind(retry);
5696  Assembler::z_mvcle(dst, src, pad);
5697  Assembler::z_brc(Assembler::bcondOverflow /* CC==3 (iterate) */, retry);
5698}
5699
5700void MacroAssembler::compare_long_ext(Register left, Register right, unsigned int pad) {
5701  assert(left->encoding() % 2 == 0, "must be an even/odd register pair");
5702  assert(right->encoding() % 2 == 0, "must be an even/odd register pair");
5703  assert(pad<256, "must be a padding BYTE");
5704
5705  Label retry;
5706  bind(retry);
5707  Assembler::z_clcle(left, right, pad, Z_R0);
5708  Assembler::z_brc(Assembler::bcondOverflow /* CC==3 (iterate) */, retry);
5709}
5710
5711void MacroAssembler::compare_long_uni(Register left, Register right, unsigned int pad) {
5712  assert(left->encoding() % 2 == 0, "must be an even/odd register pair");
5713  assert(right->encoding() % 2 == 0, "must be an even/odd register pair");
5714  assert(pad<=0xfff, "must be a padding HALFWORD");
5715  assert(VM_Version::has_ETF2(), "instruction must be available");
5716
5717  Label retry;
5718  bind(retry);
5719  Assembler::z_clclu(left, right, pad, Z_R0);
5720  Assembler::z_brc(Assembler::bcondOverflow /* CC==3 (iterate) */, retry);
5721}
5722
5723void MacroAssembler::search_string(Register end, Register start) {
5724  assert(end->encoding() != 0, "end address must not be in R0");
5725  assert(start->encoding() != 0, "start address must not be in R0");
5726
5727  Label retry;
5728  bind(retry);
5729  Assembler::z_srst(end, start);
5730  Assembler::z_brc(Assembler::bcondOverflow /* CC==3 (iterate) */, retry);
5731}
5732
5733void MacroAssembler::search_string_uni(Register end, Register start) {
5734  assert(end->encoding() != 0, "end address must not be in R0");
5735  assert(start->encoding() != 0, "start address must not be in R0");
5736  assert(VM_Version::has_ETF3(), "instruction must be available");
5737
5738  Label retry;
5739  bind(retry);
5740  Assembler::z_srstu(end, start);
5741  Assembler::z_brc(Assembler::bcondOverflow /* CC==3 (iterate) */, retry);
5742}
5743
5744void MacroAssembler::kmac(Register srcBuff) {
5745  assert(srcBuff->encoding()     != 0, "src buffer address can't be in Z_R0");
5746  assert(srcBuff->encoding() % 2 == 0, "src buffer/len must be an even/odd register pair");
5747
5748  Label retry;
5749  bind(retry);
5750  Assembler::z_kmac(Z_R0, srcBuff);
5751  Assembler::z_brc(Assembler::bcondOverflow /* CC==3 (iterate) */, retry);
5752}
5753
5754void MacroAssembler::kimd(Register srcBuff) {
5755  assert(srcBuff->encoding()     != 0, "src buffer address can't be in Z_R0");
5756  assert(srcBuff->encoding() % 2 == 0, "src buffer/len must be an even/odd register pair");
5757
5758  Label retry;
5759  bind(retry);
5760  Assembler::z_kimd(Z_R0, srcBuff);
5761  Assembler::z_brc(Assembler::bcondOverflow /* CC==3 (iterate) */, retry);
5762}
5763
5764void MacroAssembler::klmd(Register srcBuff) {
5765  assert(srcBuff->encoding()     != 0, "src buffer address can't be in Z_R0");
5766  assert(srcBuff->encoding() % 2 == 0, "src buffer/len must be an even/odd register pair");
5767
5768  Label retry;
5769  bind(retry);
5770  Assembler::z_klmd(Z_R0, srcBuff);
5771  Assembler::z_brc(Assembler::bcondOverflow /* CC==3 (iterate) */, retry);
5772}
5773
5774void MacroAssembler::km(Register dstBuff, Register srcBuff) {
5775  // DstBuff and srcBuff are allowed to be the same register (encryption in-place).
5776  // DstBuff and srcBuff storage must not overlap destructively, and neither must overlap the parameter block.
5777  assert(srcBuff->encoding()     != 0, "src buffer address can't be in Z_R0");
5778  assert(dstBuff->encoding() % 2 == 0, "dst buffer addr must be an even register");
5779  assert(srcBuff->encoding() % 2 == 0, "src buffer addr/len must be an even/odd register pair");
5780
5781  Label retry;
5782  bind(retry);
5783  Assembler::z_km(dstBuff, srcBuff);
5784  Assembler::z_brc(Assembler::bcondOverflow /* CC==3 (iterate) */, retry);
5785}
5786
5787void MacroAssembler::kmc(Register dstBuff, Register srcBuff) {
5788  // DstBuff and srcBuff are allowed to be the same register (encryption in-place).
5789  // DstBuff and srcBuff storage must not overlap destructively, and neither must overlap the parameter block.
5790  assert(srcBuff->encoding()     != 0, "src buffer address can't be in Z_R0");
5791  assert(dstBuff->encoding() % 2 == 0, "dst buffer addr must be an even register");
5792  assert(srcBuff->encoding() % 2 == 0, "src buffer addr/len must be an even/odd register pair");
5793
5794  Label retry;
5795  bind(retry);
5796  Assembler::z_kmc(dstBuff, srcBuff);
5797  Assembler::z_brc(Assembler::bcondOverflow /* CC==3 (iterate) */, retry);
5798}
5799
5800void MacroAssembler::cksm(Register crcBuff, Register srcBuff) {
5801  assert(srcBuff->encoding() % 2 == 0, "src buffer addr/len must be an even/odd register pair");
5802
5803  Label retry;
5804  bind(retry);
5805  Assembler::z_cksm(crcBuff, srcBuff);
5806  Assembler::z_brc(Assembler::bcondOverflow /* CC==3 (iterate) */, retry);
5807}
5808
5809void MacroAssembler::translate_oo(Register r1, Register r2, uint m3) {
5810  assert(r1->encoding() % 2 == 0, "dst addr/src len must be an even/odd register pair");
5811  assert((m3 & 0b1110) == 0, "Unused mask bits must be zero");
5812
5813  Label retry;
5814  bind(retry);
5815  Assembler::z_troo(r1, r2, m3);
5816  Assembler::z_brc(Assembler::bcondOverflow /* CC==3 (iterate) */, retry);
5817}
5818
5819void MacroAssembler::translate_ot(Register r1, Register r2, uint m3) {
5820  assert(r1->encoding() % 2 == 0, "dst addr/src len must be an even/odd register pair");
5821  assert((m3 & 0b1110) == 0, "Unused mask bits must be zero");
5822
5823  Label retry;
5824  bind(retry);
5825  Assembler::z_trot(r1, r2, m3);
5826  Assembler::z_brc(Assembler::bcondOverflow /* CC==3 (iterate) */, retry);
5827}
5828
5829void MacroAssembler::translate_to(Register r1, Register r2, uint m3) {
5830  assert(r1->encoding() % 2 == 0, "dst addr/src len must be an even/odd register pair");
5831  assert((m3 & 0b1110) == 0, "Unused mask bits must be zero");
5832
5833  Label retry;
5834  bind(retry);
5835  Assembler::z_trto(r1, r2, m3);
5836  Assembler::z_brc(Assembler::bcondOverflow /* CC==3 (iterate) */, retry);
5837}
5838
5839void MacroAssembler::translate_tt(Register r1, Register r2, uint m3) {
5840  assert(r1->encoding() % 2 == 0, "dst addr/src len must be an even/odd register pair");
5841  assert((m3 & 0b1110) == 0, "Unused mask bits must be zero");
5842
5843  Label retry;
5844  bind(retry);
5845  Assembler::z_trtt(r1, r2, m3);
5846  Assembler::z_brc(Assembler::bcondOverflow /* CC==3 (iterate) */, retry);
5847}
5848
5849void MacroAssembler::generate_safepoint_check(Label& slow_path, Register scratch, bool may_relocate) {
5850  if (scratch == noreg) scratch = Z_R1;
5851  address Astate = SafepointSynchronize::address_of_state();
5852  BLOCK_COMMENT("safepoint check:");
5853
5854  if (may_relocate) {
5855    ptrdiff_t total_distance = Astate - this->pc();
5856    if (RelAddr::is_in_range_of_RelAddr32(total_distance)) {
5857      RelocationHolder rspec = external_word_Relocation::spec(Astate);
5858      (this)->relocate(rspec, relocInfo::pcrel_addr_format);
5859      load_absolute_address(scratch, Astate);
5860    } else {
5861      load_const_optimized(scratch, Astate);
5862    }
5863  } else {
5864    load_absolute_address(scratch, Astate);
5865  }
5866  z_cli(/*SafepointSynchronize::sz_state()*/4-1, scratch, SafepointSynchronize::_not_synchronized);
5867  z_brne(slow_path);
5868}
5869
5870
5871void MacroAssembler::generate_type_profiling(const Register Rdata,
5872                                             const Register Rreceiver_klass,
5873                                             const Register Rwanted_receiver_klass,
5874                                             const Register Rmatching_row,
5875                                             bool is_virtual_call) {
5876  const int row_size = in_bytes(ReceiverTypeData::receiver_offset(1)) -
5877                       in_bytes(ReceiverTypeData::receiver_offset(0));
5878  const int num_rows = ReceiverTypeData::row_limit();
5879  NearLabel found_free_row;
5880  NearLabel do_increment;
5881  NearLabel found_no_slot;
5882
5883  BLOCK_COMMENT("type profiling {");
5884
5885  // search for:
5886  //    a) The type given in Rwanted_receiver_klass.
5887  //    b) The *first* empty row.
5888
5889  // First search for a) only, just running over b) with no regard.
5890  // This is possible because
5891  //    wanted_receiver_class == receiver_class  &&  wanted_receiver_class == 0
5892  // is never true (receiver_class can't be zero).
5893  for (int row_num = 0; row_num < num_rows; row_num++) {
5894    // Row_offset should be a well-behaved positive number. The generated code relies
5895    // on that wrt constant code size. Add2reg can handle all row_offset values, but
5896    // will have to vary generated code size.
5897    int row_offset = in_bytes(ReceiverTypeData::receiver_offset(row_num));
5898    assert(Displacement::is_shortDisp(row_offset), "Limitation of generated code");
5899
5900    // Is Rwanted_receiver_klass in this row?
5901    if (VM_Version::has_CompareBranch()) {
5902      z_lg(Rwanted_receiver_klass, row_offset, Z_R0, Rdata);
5903      // Rmatching_row = Rdata + row_offset;
5904      add2reg(Rmatching_row, row_offset, Rdata);
5905      // if (*row_recv == (intptr_t) receiver_klass) goto fill_existing_slot;
5906      compare64_and_branch(Rwanted_receiver_klass, Rreceiver_klass, Assembler::bcondEqual, do_increment);
5907    } else {
5908      add2reg(Rmatching_row, row_offset, Rdata);
5909      z_cg(Rreceiver_klass, row_offset, Z_R0, Rdata);
5910      z_bre(do_increment);
5911    }
5912  }
5913
5914  // Now that we did not find a match, let's search for b).
5915
5916  // We could save the first calculation of Rmatching_row if we woud search for a) in reverse order.
5917  // We would then end up here with Rmatching_row containing the value for row_num == 0.
5918  // We would not see much benefit, if any at all, because the CPU can schedule
5919  // two instructions together with a branch anyway.
5920  for (int row_num = 0; row_num < num_rows; row_num++) {
5921    int row_offset = in_bytes(ReceiverTypeData::receiver_offset(row_num));
5922
5923    // Has this row a zero receiver_klass, i.e. is it empty?
5924    if (VM_Version::has_CompareBranch()) {
5925      z_lg(Rwanted_receiver_klass, row_offset, Z_R0, Rdata);
5926      // Rmatching_row = Rdata + row_offset
5927      add2reg(Rmatching_row, row_offset, Rdata);
5928      // if (*row_recv == (intptr_t) 0) goto found_free_row
5929      compare64_and_branch(Rwanted_receiver_klass, (intptr_t)0, Assembler::bcondEqual, found_free_row);
5930    } else {
5931      add2reg(Rmatching_row, row_offset, Rdata);
5932      load_and_test_long(Rwanted_receiver_klass, Address(Rdata, row_offset));
5933      z_bre(found_free_row);  // zero -> Found a free row.
5934    }
5935  }
5936
5937  // No match, no empty row found.
5938  // Increment total counter to indicate polymorphic case.
5939  if (is_virtual_call) {
5940    add2mem_64(Address(Rdata, CounterData::count_offset()), 1, Rmatching_row);
5941  }
5942  z_bru(found_no_slot);
5943
5944  // Here we found an empty row, but we have not found Rwanted_receiver_klass.
5945  // Rmatching_row holds the address to the first empty row.
5946  bind(found_free_row);
5947  // Store receiver_klass into empty slot.
5948  z_stg(Rreceiver_klass, 0, Z_R0, Rmatching_row);
5949
5950  // Increment the counter of Rmatching_row.
5951  bind(do_increment);
5952  ByteSize counter_offset = ReceiverTypeData::receiver_count_offset(0) - ReceiverTypeData::receiver_offset(0);
5953  add2mem_64(Address(Rmatching_row, counter_offset), 1, Rdata);
5954
5955  bind(found_no_slot);
5956
5957  BLOCK_COMMENT("} type profiling");
5958}
5959
5960//---------------------------------------
5961// Helpers for Intrinsic Emitters
5962//---------------------------------------
5963
5964/**
5965 * uint32_t crc;
5966 * timesXtoThe32[crc & 0xFF] ^ (crc >> 8);
5967 */
5968void MacroAssembler::fold_byte_crc32(Register crc, Register val, Register table, Register tmp) {
5969  assert_different_registers(crc, table, tmp);
5970  assert_different_registers(val, table);
5971  if (crc == val) {      // Must rotate first to use the unmodified value.
5972    rotate_then_insert(tmp, val, 56-2, 63-2, 2, true);  // Insert byte 7 of val, shifted left by 2, into byte 6..7 of tmp, clear the rest.
5973    z_srl(crc, 8);       // Unsigned shift, clear leftmost 8 bits.
5974  } else {
5975    z_srl(crc, 8);       // Unsigned shift, clear leftmost 8 bits.
5976    rotate_then_insert(tmp, val, 56-2, 63-2, 2, true);  // Insert byte 7 of val, shifted left by 2, into byte 6..7 of tmp, clear the rest.
5977  }
5978  z_x(crc, Address(table, tmp, 0));
5979}
5980
5981/**
5982 * uint32_t crc;
5983 * timesXtoThe32[crc & 0xFF] ^ (crc >> 8);
5984 */
5985void MacroAssembler::fold_8bit_crc32(Register crc, Register table, Register tmp) {
5986  fold_byte_crc32(crc, crc, table, tmp);
5987}
5988
5989/**
5990 * Emits code to update CRC-32 with a byte value according to constants in table.
5991 *
5992 * @param [in,out]crc Register containing the crc.
5993 * @param [in]val     Register containing the byte to fold into the CRC.
5994 * @param [in]table   Register containing the table of crc constants.
5995 *
5996 * uint32_t crc;
5997 * val = crc_table[(val ^ crc) & 0xFF];
5998 * crc = val ^ (crc >> 8);
5999 */
6000void MacroAssembler::update_byte_crc32(Register crc, Register val, Register table) {
6001  z_xr(val, crc);
6002  fold_byte_crc32(crc, val, table, val);
6003}
6004
6005
6006/**
6007 * @param crc   register containing existing CRC (32-bit)
6008 * @param buf   register pointing to input byte buffer (byte*)
6009 * @param len   register containing number of bytes
6010 * @param table register pointing to CRC table
6011 */
6012void MacroAssembler::update_byteLoop_crc32(Register crc, Register buf, Register len, Register table, Register data) {
6013  assert_different_registers(crc, buf, len, table, data);
6014
6015  Label L_mainLoop, L_done;
6016  const int mainLoop_stepping = 1;
6017
6018  // Process all bytes in a single-byte loop.
6019  z_ltr(len, len);
6020  z_brnh(L_done);
6021
6022  bind(L_mainLoop);
6023    z_llgc(data, Address(buf, (intptr_t)0));// Current byte of input buffer (zero extended). Avoids garbage in upper half of register.
6024    add2reg(buf, mainLoop_stepping);        // Advance buffer position.
6025    update_byte_crc32(crc, data, table);
6026    z_brct(len, L_mainLoop);                // Iterate.
6027
6028  bind(L_done);
6029}
6030
6031/**
6032 * Emits code to update CRC-32 with a 4-byte value according to constants in table.
6033 * Implementation according to jdk/src/share/native/java/util/zip/zlib-1.2.8/crc32.c.
6034 *
6035 */
6036void MacroAssembler::update_1word_crc32(Register crc, Register buf, Register table, int bufDisp, int bufInc,
6037                                        Register t0,  Register t1,  Register t2,    Register t3) {
6038  // This is what we implement (the DOBIG4 part):
6039  //
6040  // #define DOBIG4 c ^= *++buf4; \
6041  //         c = crc_table[4][c & 0xff] ^ crc_table[5][(c >> 8) & 0xff] ^ \
6042  //             crc_table[6][(c >> 16) & 0xff] ^ crc_table[7][c >> 24]
6043  // #define DOBIG32 DOBIG4; DOBIG4; DOBIG4; DOBIG4; DOBIG4; DOBIG4; DOBIG4; DOBIG4
6044  // Pre-calculate (constant) column offsets, use columns 4..7 for big-endian.
6045  const int ix0 = 4*(4*CRC32_COLUMN_SIZE);
6046  const int ix1 = 5*(4*CRC32_COLUMN_SIZE);
6047  const int ix2 = 6*(4*CRC32_COLUMN_SIZE);
6048  const int ix3 = 7*(4*CRC32_COLUMN_SIZE);
6049
6050  // XOR crc with next four bytes of buffer.
6051  lgr_if_needed(t0, crc);
6052  z_x(t0, Address(buf, bufDisp));
6053  if (bufInc != 0) {
6054    add2reg(buf, bufInc);
6055  }
6056
6057  // Chop crc into 4 single-byte pieces, shifted left 2 bits, to form the table indices.
6058  rotate_then_insert(t3, t0, 56-2, 63-2, 2,    true);  // ((c >>  0) & 0xff) << 2
6059  rotate_then_insert(t2, t0, 56-2, 63-2, 2-8,  true);  // ((c >>  8) & 0xff) << 2
6060  rotate_then_insert(t1, t0, 56-2, 63-2, 2-16, true);  // ((c >> 16) & 0xff) << 2
6061  rotate_then_insert(t0, t0, 56-2, 63-2, 2-24, true);  // ((c >> 24) & 0xff) << 2
6062
6063  // XOR indexed table values to calculate updated crc.
6064  z_ly(t2, Address(table, t2, (intptr_t)ix1));
6065  z_ly(t0, Address(table, t0, (intptr_t)ix3));
6066  z_xy(t2, Address(table, t3, (intptr_t)ix0));
6067  z_xy(t0, Address(table, t1, (intptr_t)ix2));
6068  z_xr(t0, t2);           // Now t0 contains the updated CRC value.
6069  lgr_if_needed(crc, t0);
6070}
6071
6072/**
6073 * @param crc   register containing existing CRC (32-bit)
6074 * @param buf   register pointing to input byte buffer (byte*)
6075 * @param len   register containing number of bytes
6076 * @param table register pointing to CRC table
6077 *
6078 * uses Z_R10..Z_R13 as work register. Must be saved/restored by caller!
6079 */
6080void MacroAssembler::kernel_crc32_2word(Register crc, Register buf, Register len, Register table,
6081                                        Register t0,  Register t1,  Register t2,  Register t3,
6082                                        bool invertCRC) {
6083  assert_different_registers(crc, buf, len, table);
6084
6085  Label L_mainLoop, L_tail;
6086  Register  data = t0;
6087  Register  ctr  = Z_R0;
6088  const int mainLoop_stepping = 8;
6089  const int tailLoop_stepping = 1;
6090  const int log_stepping      = exact_log2(mainLoop_stepping);
6091
6092  // Don't test for len <= 0 here. This pathological case should not occur anyway.
6093  // Optimizing for it by adding a test and a branch seems to be a waste of CPU cycles.
6094  // The situation itself is detected and handled correctly by the conditional branches
6095  // following aghi(len, -stepping) and aghi(len, +stepping).
6096
6097  if (invertCRC) {
6098    not_(crc, noreg, false);           // 1s complement of crc
6099  }
6100
6101#if 0
6102  {
6103    // Pre-mainLoop alignment did not show any positive effect on performance.
6104    // We leave the code in for reference. Maybe the vector instructions in z13 depend on alignment.
6105
6106    z_cghi(len, mainLoop_stepping);    // Alignment is useless for short data streams.
6107    z_brnh(L_tail);
6108
6109    // Align buf to word (4-byte) boundary.
6110    z_lcr(ctr, buf);
6111    rotate_then_insert(ctr, ctr, 62, 63, 0, true); // TODO: should set cc
6112    z_sgfr(len, ctr);                  // Remaining len after alignment.
6113
6114    update_byteLoop_crc32(crc, buf, ctr, table, data);
6115  }
6116#endif
6117
6118  // Check for short (<mainLoop_stepping bytes) buffer.
6119  z_srag(ctr, len, log_stepping);
6120  z_brnh(L_tail);
6121
6122  z_lrvr(crc, crc);          // Revert byte order because we are dealing with big-endian data.
6123  rotate_then_insert(len, len, 64-log_stepping, 63, 0, true); // #bytes for tailLoop
6124
6125  BIND(L_mainLoop);
6126    update_1word_crc32(crc, buf, table, 0, 0, crc, t1, t2, t3);
6127    update_1word_crc32(crc, buf, table, 4, mainLoop_stepping, crc, t1, t2, t3);
6128    z_brct(ctr, L_mainLoop); // Iterate.
6129
6130  z_lrvr(crc, crc);          // Revert byte order back to original.
6131
6132  // Process last few (<8) bytes of buffer.
6133  BIND(L_tail);
6134  update_byteLoop_crc32(crc, buf, len, table, data);
6135
6136  if (invertCRC) {
6137    not_(crc, noreg, false);           // 1s complement of crc
6138  }
6139}
6140
6141/**
6142 * @param crc   register containing existing CRC (32-bit)
6143 * @param buf   register pointing to input byte buffer (byte*)
6144 * @param len   register containing number of bytes
6145 * @param table register pointing to CRC table
6146 *
6147 * uses Z_R10..Z_R13 as work register. Must be saved/restored by caller!
6148 */
6149void MacroAssembler::kernel_crc32_1word(Register crc, Register buf, Register len, Register table,
6150                                        Register t0,  Register t1,  Register t2,  Register t3,
6151                                        bool invertCRC) {
6152  assert_different_registers(crc, buf, len, table);
6153
6154  Label L_mainLoop, L_tail;
6155  Register  data = t0;
6156  Register  ctr  = Z_R0;
6157  const int mainLoop_stepping = 4;
6158  const int log_stepping      = exact_log2(mainLoop_stepping);
6159
6160  // Don't test for len <= 0 here. This pathological case should not occur anyway.
6161  // Optimizing for it by adding a test and a branch seems to be a waste of CPU cycles.
6162  // The situation itself is detected and handled correctly by the conditional branches
6163  // following aghi(len, -stepping) and aghi(len, +stepping).
6164
6165  if (invertCRC) {
6166    not_(crc, noreg, false);           // 1s complement of crc
6167  }
6168
6169  // Check for short (<4 bytes) buffer.
6170  z_srag(ctr, len, log_stepping);
6171  z_brnh(L_tail);
6172
6173  z_lrvr(crc, crc);          // Revert byte order because we are dealing with big-endian data.
6174  rotate_then_insert(len, len, 64-log_stepping, 63, 0, true); // #bytes for tailLoop
6175
6176  BIND(L_mainLoop);
6177    update_1word_crc32(crc, buf, table, 0, mainLoop_stepping, crc, t1, t2, t3);
6178    z_brct(ctr, L_mainLoop); // Iterate.
6179
6180  z_lrvr(crc, crc);          // Revert byte order back to original.
6181
6182  // Process last few (<8) bytes of buffer.
6183  BIND(L_tail);
6184  update_byteLoop_crc32(crc, buf, len, table, data);
6185
6186  if (invertCRC) {
6187    not_(crc, noreg, false);           // 1s complement of crc
6188  }
6189}
6190
6191/**
6192 * @param crc   register containing existing CRC (32-bit)
6193 * @param buf   register pointing to input byte buffer (byte*)
6194 * @param len   register containing number of bytes
6195 * @param table register pointing to CRC table
6196 */
6197void MacroAssembler::kernel_crc32_1byte(Register crc, Register buf, Register len, Register table,
6198                                        Register t0,  Register t1,  Register t2,  Register t3,
6199                                        bool invertCRC) {
6200  assert_different_registers(crc, buf, len, table);
6201  Register data = t0;
6202
6203  if (invertCRC) {
6204    not_(crc, noreg, false);           // 1s complement of crc
6205  }
6206
6207  update_byteLoop_crc32(crc, buf, len, table, data);
6208
6209  if (invertCRC) {
6210    not_(crc, noreg, false);           // 1s complement of crc
6211  }
6212}
6213
6214void MacroAssembler::kernel_crc32_singleByte(Register crc, Register buf, Register len, Register table, Register tmp,
6215                                             bool invertCRC) {
6216  assert_different_registers(crc, buf, len, table, tmp);
6217
6218  if (invertCRC) {
6219    not_(crc, noreg, false);           // 1s complement of crc
6220  }
6221
6222  z_llgc(tmp, Address(buf, (intptr_t)0));  // Current byte of input buffer (zero extended). Avoids garbage in upper half of register.
6223  update_byte_crc32(crc, tmp, table);
6224
6225  if (invertCRC) {
6226    not_(crc, noreg, false);           // 1s complement of crc
6227  }
6228}
6229
6230void MacroAssembler::kernel_crc32_singleByteReg(Register crc, Register val, Register table,
6231                                                bool invertCRC) {
6232  assert_different_registers(crc, val, table);
6233
6234  if (invertCRC) {
6235    not_(crc, noreg, false);           // 1s complement of crc
6236  }
6237
6238  update_byte_crc32(crc, val, table);
6239
6240  if (invertCRC) {
6241    not_(crc, noreg, false);           // 1s complement of crc
6242  }
6243}
6244
6245//
6246// Code for BigInteger::multiplyToLen() intrinsic.
6247//
6248
6249// dest_lo += src1 + src2
6250// dest_hi += carry1 + carry2
6251// Z_R7 is destroyed !
6252void MacroAssembler::add2_with_carry(Register dest_hi, Register dest_lo,
6253                                     Register src1, Register src2) {
6254  clear_reg(Z_R7);
6255  z_algr(dest_lo, src1);
6256  z_alcgr(dest_hi, Z_R7);
6257  z_algr(dest_lo, src2);
6258  z_alcgr(dest_hi, Z_R7);
6259}
6260
6261// Multiply 64 bit by 64 bit first loop.
6262void MacroAssembler::multiply_64_x_64_loop(Register x, Register xstart,
6263                                           Register x_xstart,
6264                                           Register y, Register y_idx,
6265                                           Register z,
6266                                           Register carry,
6267                                           Register product,
6268                                           Register idx, Register kdx) {
6269  // jlong carry, x[], y[], z[];
6270  // for (int idx=ystart, kdx=ystart+1+xstart; idx >= 0; idx--, kdx--) {
6271  //   huge_128 product = y[idx] * x[xstart] + carry;
6272  //   z[kdx] = (jlong)product;
6273  //   carry  = (jlong)(product >>> 64);
6274  // }
6275  // z[xstart] = carry;
6276
6277  Label L_first_loop, L_first_loop_exit;
6278  Label L_one_x, L_one_y, L_multiply;
6279
6280  z_aghi(xstart, -1);
6281  z_brl(L_one_x);   // Special case: length of x is 1.
6282
6283  // Load next two integers of x.
6284  z_sllg(Z_R1_scratch, xstart, LogBytesPerInt);
6285  mem2reg_opt(x_xstart, Address(x, Z_R1_scratch, 0));
6286
6287
6288  bind(L_first_loop);
6289
6290  z_aghi(idx, -1);
6291  z_brl(L_first_loop_exit);
6292  z_aghi(idx, -1);
6293  z_brl(L_one_y);
6294
6295  // Load next two integers of y.
6296  z_sllg(Z_R1_scratch, idx, LogBytesPerInt);
6297  mem2reg_opt(y_idx, Address(y, Z_R1_scratch, 0));
6298
6299
6300  bind(L_multiply);
6301
6302  Register multiplicand = product->successor();
6303  Register product_low = multiplicand;
6304
6305  lgr_if_needed(multiplicand, x_xstart);
6306  z_mlgr(product, y_idx);     // multiplicand * y_idx -> product::multiplicand
6307  clear_reg(Z_R7);
6308  z_algr(product_low, carry); // Add carry to result.
6309  z_alcgr(product, Z_R7);     // Add carry of the last addition.
6310  add2reg(kdx, -2);
6311
6312  // Store result.
6313  z_sllg(Z_R7, kdx, LogBytesPerInt);
6314  reg2mem_opt(product_low, Address(z, Z_R7, 0));
6315  lgr_if_needed(carry, product);
6316  z_bru(L_first_loop);
6317
6318
6319  bind(L_one_y); // Load one 32 bit portion of y as (0,value).
6320
6321  clear_reg(y_idx);
6322  mem2reg_opt(y_idx, Address(y, (intptr_t) 0), false);
6323  z_bru(L_multiply);
6324
6325
6326  bind(L_one_x); // Load one 32 bit portion of x as (0,value).
6327
6328  clear_reg(x_xstart);
6329  mem2reg_opt(x_xstart, Address(x, (intptr_t) 0), false);
6330  z_bru(L_first_loop);
6331
6332  bind(L_first_loop_exit);
6333}
6334
6335// Multiply 64 bit by 64 bit and add 128 bit.
6336void MacroAssembler::multiply_add_128_x_128(Register x_xstart, Register y,
6337                                            Register z,
6338                                            Register yz_idx, Register idx,
6339                                            Register carry, Register product,
6340                                            int offset) {
6341  // huge_128 product = (y[idx] * x_xstart) + z[kdx] + carry;
6342  // z[kdx] = (jlong)product;
6343
6344  Register multiplicand = product->successor();
6345  Register product_low = multiplicand;
6346
6347  z_sllg(Z_R7, idx, LogBytesPerInt);
6348  mem2reg_opt(yz_idx, Address(y, Z_R7, offset));
6349
6350  lgr_if_needed(multiplicand, x_xstart);
6351  z_mlgr(product, yz_idx); // multiplicand * yz_idx -> product::multiplicand
6352  mem2reg_opt(yz_idx, Address(z, Z_R7, offset));
6353
6354  add2_with_carry(product, product_low, carry, yz_idx);
6355
6356  z_sllg(Z_R7, idx, LogBytesPerInt);
6357  reg2mem_opt(product_low, Address(z, Z_R7, offset));
6358
6359}
6360
6361// Multiply 128 bit by 128 bit. Unrolled inner loop.
6362void MacroAssembler::multiply_128_x_128_loop(Register x_xstart,
6363                                             Register y, Register z,
6364                                             Register yz_idx, Register idx,
6365                                             Register jdx,
6366                                             Register carry, Register product,
6367                                             Register carry2) {
6368  // jlong carry, x[], y[], z[];
6369  // int kdx = ystart+1;
6370  // for (int idx=ystart-2; idx >= 0; idx -= 2) { // Third loop
6371  //   huge_128 product = (y[idx+1] * x_xstart) + z[kdx+idx+1] + carry;
6372  //   z[kdx+idx+1] = (jlong)product;
6373  //   jlong carry2 = (jlong)(product >>> 64);
6374  //   product = (y[idx] * x_xstart) + z[kdx+idx] + carry2;
6375  //   z[kdx+idx] = (jlong)product;
6376  //   carry = (jlong)(product >>> 64);
6377  // }
6378  // idx += 2;
6379  // if (idx > 0) {
6380  //   product = (y[idx] * x_xstart) + z[kdx+idx] + carry;
6381  //   z[kdx+idx] = (jlong)product;
6382  //   carry = (jlong)(product >>> 64);
6383  // }
6384
6385  Label L_third_loop, L_third_loop_exit, L_post_third_loop_done;
6386
6387  // scale the index
6388  lgr_if_needed(jdx, idx);
6389  and_imm(jdx, 0xfffffffffffffffcL);
6390  rshift(jdx, 2);
6391
6392
6393  bind(L_third_loop);
6394
6395  z_aghi(jdx, -1);
6396  z_brl(L_third_loop_exit);
6397  add2reg(idx, -4);
6398
6399  multiply_add_128_x_128(x_xstart, y, z, yz_idx, idx, carry, product, 8);
6400  lgr_if_needed(carry2, product);
6401
6402  multiply_add_128_x_128(x_xstart, y, z, yz_idx, idx, carry2, product, 0);
6403  lgr_if_needed(carry, product);
6404  z_bru(L_third_loop);
6405
6406
6407  bind(L_third_loop_exit);  // Handle any left-over operand parts.
6408
6409  and_imm(idx, 0x3);
6410  z_brz(L_post_third_loop_done);
6411
6412  Label L_check_1;
6413
6414  z_aghi(idx, -2);
6415  z_brl(L_check_1);
6416
6417  multiply_add_128_x_128(x_xstart, y, z, yz_idx, idx, carry, product, 0);
6418  lgr_if_needed(carry, product);
6419
6420
6421  bind(L_check_1);
6422
6423  add2reg(idx, 0x2);
6424  and_imm(idx, 0x1);
6425  z_aghi(idx, -1);
6426  z_brl(L_post_third_loop_done);
6427
6428  Register   multiplicand = product->successor();
6429  Register   product_low = multiplicand;
6430
6431  z_sllg(Z_R7, idx, LogBytesPerInt);
6432  clear_reg(yz_idx);
6433  mem2reg_opt(yz_idx, Address(y, Z_R7, 0), false);
6434  lgr_if_needed(multiplicand, x_xstart);
6435  z_mlgr(product, yz_idx); // multiplicand * yz_idx -> product::multiplicand
6436  clear_reg(yz_idx);
6437  mem2reg_opt(yz_idx, Address(z, Z_R7, 0), false);
6438
6439  add2_with_carry(product, product_low, yz_idx, carry);
6440
6441  z_sllg(Z_R7, idx, LogBytesPerInt);
6442  reg2mem_opt(product_low, Address(z, Z_R7, 0), false);
6443  rshift(product_low, 32);
6444
6445  lshift(product, 32);
6446  z_ogr(product_low, product);
6447  lgr_if_needed(carry, product_low);
6448
6449  bind(L_post_third_loop_done);
6450}
6451
6452void MacroAssembler::multiply_to_len(Register x, Register xlen,
6453                                     Register y, Register ylen,
6454                                     Register z,
6455                                     Register tmp1, Register tmp2,
6456                                     Register tmp3, Register tmp4,
6457                                     Register tmp5) {
6458  ShortBranchVerifier sbv(this);
6459
6460  assert_different_registers(x, xlen, y, ylen, z,
6461                             tmp1, tmp2, tmp3, tmp4, tmp5, Z_R1_scratch, Z_R7);
6462  assert_different_registers(x, xlen, y, ylen, z,
6463                             tmp1, tmp2, tmp3, tmp4, tmp5, Z_R8);
6464
6465  z_stmg(Z_R7, Z_R13, _z_abi(gpr7), Z_SP);
6466
6467  // In openJdk, we store the argument as 32-bit value to slot.
6468  Address zlen(Z_SP, _z_abi(remaining_cargs));  // Int in long on big endian.
6469
6470  const Register idx = tmp1;
6471  const Register kdx = tmp2;
6472  const Register xstart = tmp3;
6473
6474  const Register y_idx = tmp4;
6475  const Register carry = tmp5;
6476  const Register product  = Z_R0_scratch;
6477  const Register x_xstart = Z_R8;
6478
6479  // First Loop.
6480  //
6481  //   final static long LONG_MASK = 0xffffffffL;
6482  //   int xstart = xlen - 1;
6483  //   int ystart = ylen - 1;
6484  //   long carry = 0;
6485  //   for (int idx=ystart, kdx=ystart+1+xstart; idx >= 0; idx-, kdx--) {
6486  //     long product = (y[idx] & LONG_MASK) * (x[xstart] & LONG_MASK) + carry;
6487  //     z[kdx] = (int)product;
6488  //     carry = product >>> 32;
6489  //   }
6490  //   z[xstart] = (int)carry;
6491  //
6492
6493  lgr_if_needed(idx, ylen);  // idx = ylen
6494  z_llgf(kdx, zlen);         // C2 does not respect int to long conversion for stub calls, thus load zero-extended.
6495  clear_reg(carry);          // carry = 0
6496
6497  Label L_done;
6498
6499  lgr_if_needed(xstart, xlen);
6500  z_aghi(xstart, -1);
6501  z_brl(L_done);
6502
6503  multiply_64_x_64_loop(x, xstart, x_xstart, y, y_idx, z, carry, product, idx, kdx);
6504
6505  NearLabel L_second_loop;
6506  compare64_and_branch(kdx, RegisterOrConstant((intptr_t) 0), bcondEqual, L_second_loop);
6507
6508  NearLabel L_carry;
6509  z_aghi(kdx, -1);
6510  z_brz(L_carry);
6511
6512  // Store lower 32 bits of carry.
6513  z_sllg(Z_R1_scratch, kdx, LogBytesPerInt);
6514  reg2mem_opt(carry, Address(z, Z_R1_scratch, 0), false);
6515  rshift(carry, 32);
6516  z_aghi(kdx, -1);
6517
6518
6519  bind(L_carry);
6520
6521  // Store upper 32 bits of carry.
6522  z_sllg(Z_R1_scratch, kdx, LogBytesPerInt);
6523  reg2mem_opt(carry, Address(z, Z_R1_scratch, 0), false);
6524
6525  // Second and third (nested) loops.
6526  //
6527  // for (int i = xstart-1; i >= 0; i--) { // Second loop
6528  //   carry = 0;
6529  //   for (int jdx=ystart, k=ystart+1+i; jdx >= 0; jdx--, k--) { // Third loop
6530  //     long product = (y[jdx] & LONG_MASK) * (x[i] & LONG_MASK) +
6531  //                    (z[k] & LONG_MASK) + carry;
6532  //     z[k] = (int)product;
6533  //     carry = product >>> 32;
6534  //   }
6535  //   z[i] = (int)carry;
6536  // }
6537  //
6538  // i = xlen, j = tmp1, k = tmp2, carry = tmp5, x[i] = rdx
6539
6540  const Register jdx = tmp1;
6541
6542  bind(L_second_loop);
6543
6544  clear_reg(carry);           // carry = 0;
6545  lgr_if_needed(jdx, ylen);   // j = ystart+1
6546
6547  z_aghi(xstart, -1);         // i = xstart-1;
6548  z_brl(L_done);
6549
6550  // Use free slots in the current stackframe instead of push/pop.
6551  Address zsave(Z_SP, _z_abi(carg_1));
6552  reg2mem_opt(z, zsave);
6553
6554
6555  Label L_last_x;
6556
6557  z_sllg(Z_R1_scratch, xstart, LogBytesPerInt);
6558  load_address(z, Address(z, Z_R1_scratch, 4)); // z = z + k - j
6559  z_aghi(xstart, -1);                           // i = xstart-1;
6560  z_brl(L_last_x);
6561
6562  z_sllg(Z_R1_scratch, xstart, LogBytesPerInt);
6563  mem2reg_opt(x_xstart, Address(x, Z_R1_scratch, 0));
6564
6565
6566  Label L_third_loop_prologue;
6567
6568  bind(L_third_loop_prologue);
6569
6570  Address xsave(Z_SP, _z_abi(carg_2));
6571  Address xlensave(Z_SP, _z_abi(carg_3));
6572  Address ylensave(Z_SP, _z_abi(carg_4));
6573
6574  reg2mem_opt(x, xsave);
6575  reg2mem_opt(xstart, xlensave);
6576  reg2mem_opt(ylen, ylensave);
6577
6578
6579  multiply_128_x_128_loop(x_xstart, y, z, y_idx, jdx, ylen, carry, product, x);
6580
6581  mem2reg_opt(z, zsave);
6582  mem2reg_opt(x, xsave);
6583  mem2reg_opt(xlen, xlensave);   // This is the decrement of the loop counter!
6584  mem2reg_opt(ylen, ylensave);
6585
6586  add2reg(tmp3, 1, xlen);
6587  z_sllg(Z_R1_scratch, tmp3, LogBytesPerInt);
6588  reg2mem_opt(carry, Address(z, Z_R1_scratch, 0), false);
6589  z_aghi(tmp3, -1);
6590  z_brl(L_done);
6591
6592  rshift(carry, 32);
6593  z_sllg(Z_R1_scratch, tmp3, LogBytesPerInt);
6594  reg2mem_opt(carry, Address(z, Z_R1_scratch, 0), false);
6595  z_bru(L_second_loop);
6596
6597  // Next infrequent code is moved outside loops.
6598  bind(L_last_x);
6599
6600  clear_reg(x_xstart);
6601  mem2reg_opt(x_xstart, Address(x, (intptr_t) 0), false);
6602  z_bru(L_third_loop_prologue);
6603
6604  bind(L_done);
6605
6606  z_lmg(Z_R7, Z_R13, _z_abi(gpr7), Z_SP);
6607}
6608
6609#ifndef PRODUCT
6610// Assert if CC indicates "not equal" (check_equal==true) or "equal" (check_equal==false).
6611void MacroAssembler::asm_assert(bool check_equal, const char *msg, int id) {
6612  Label ok;
6613  if (check_equal) {
6614    z_bre(ok);
6615  } else {
6616    z_brne(ok);
6617  }
6618  stop(msg, id);
6619  bind(ok);
6620}
6621
6622// Assert if CC indicates "low".
6623void MacroAssembler::asm_assert_low(const char *msg, int id) {
6624  Label ok;
6625  z_brnl(ok);
6626  stop(msg, id);
6627  bind(ok);
6628}
6629
6630// Assert if CC indicates "high".
6631void MacroAssembler::asm_assert_high(const char *msg, int id) {
6632  Label ok;
6633  z_brnh(ok);
6634  stop(msg, id);
6635  bind(ok);
6636}
6637
6638// Assert if CC indicates "not equal" (check_equal==true) or "equal" (check_equal==false)
6639// generate non-relocatable code.
6640void MacroAssembler::asm_assert_static(bool check_equal, const char *msg, int id) {
6641  Label ok;
6642  if (check_equal) { z_bre(ok); }
6643  else             { z_brne(ok); }
6644  stop_static(msg, id);
6645  bind(ok);
6646}
6647
6648void MacroAssembler::asm_assert_mems_zero(bool check_equal, bool allow_relocation, int size, int64_t mem_offset,
6649                                          Register mem_base, const char* msg, int id) {
6650  switch (size) {
6651    case 4:
6652      load_and_test_int(Z_R0, Address(mem_base, mem_offset));
6653      break;
6654    case 8:
6655      load_and_test_long(Z_R0,  Address(mem_base, mem_offset));
6656      break;
6657    default:
6658      ShouldNotReachHere();
6659  }
6660  if (allow_relocation) { asm_assert(check_equal, msg, id); }
6661  else                  { asm_assert_static(check_equal, msg, id); }
6662}
6663
6664// Check the condition
6665//   expected_size == FP - SP
6666// after transformation:
6667//   expected_size - FP + SP == 0
6668// Destroys Register expected_size if no tmp register is passed.
6669void MacroAssembler::asm_assert_frame_size(Register expected_size, Register tmp, const char* msg, int id) {
6670  if (tmp == noreg) {
6671    tmp = expected_size;
6672  } else {
6673    if (tmp != expected_size) {
6674      z_lgr(tmp, expected_size);
6675    }
6676    z_algr(tmp, Z_SP);
6677    z_slg(tmp, 0, Z_R0, Z_SP);
6678    asm_assert_eq(msg, id);
6679  }
6680}
6681#endif // !PRODUCT
6682
6683void MacroAssembler::verify_thread() {
6684  if (VerifyThread) {
6685    unimplemented("", 117);
6686  }
6687}
6688
6689// Plausibility check for oops.
6690void MacroAssembler::verify_oop(Register oop, const char* msg) {
6691  if (!VerifyOops) return;
6692
6693  BLOCK_COMMENT("verify_oop {");
6694  Register tmp = Z_R0;
6695  unsigned int nbytes_save = 5*BytesPerWord;
6696  address entry = StubRoutines::verify_oop_subroutine_entry_address();
6697
6698  save_return_pc();
6699  push_frame_abi160(nbytes_save);
6700  z_stmg(Z_R1, Z_R5, frame::z_abi_160_size, Z_SP);
6701
6702  z_lgr(Z_ARG2, oop);
6703  load_const(Z_ARG1, (address) msg);
6704  load_const(Z_R1, entry);
6705  z_lg(Z_R1, 0, Z_R1);
6706  call_c(Z_R1);
6707
6708  z_lmg(Z_R1, Z_R5, frame::z_abi_160_size, Z_SP);
6709  pop_frame();
6710  restore_return_pc();
6711
6712  BLOCK_COMMENT("} verify_oop ");
6713}
6714
6715const char* MacroAssembler::stop_types[] = {
6716  "stop",
6717  "untested",
6718  "unimplemented",
6719  "shouldnotreachhere"
6720};
6721
6722static void stop_on_request(const char* tp, const char* msg) {
6723  tty->print("Z assembly code requires stop: (%s) %s\n", tp, msg);
6724  guarantee(false, "Z assembly code requires stop: %s", msg);
6725}
6726
6727void MacroAssembler::stop(int type, const char* msg, int id) {
6728  BLOCK_COMMENT(err_msg("stop: %s {", msg));
6729
6730  // Setup arguments.
6731  load_const(Z_ARG1, (void*) stop_types[type%stop_end]);
6732  load_const(Z_ARG2, (void*) msg);
6733  get_PC(Z_R14);     // Following code pushes a frame without entering a new function. Use current pc as return address.
6734  save_return_pc();  // Saves return pc Z_R14.
6735  push_frame_abi160(0);
6736  call_VM_leaf(CAST_FROM_FN_PTR(address, stop_on_request), Z_ARG1, Z_ARG2);
6737  // The plain disassembler does not recognize illtrap. It instead displays
6738  // a 32-bit value. Issueing two illtraps assures the disassembler finds
6739  // the proper beginning of the next instruction.
6740  z_illtrap(); // Illegal instruction.
6741  z_illtrap(); // Illegal instruction.
6742
6743  BLOCK_COMMENT(" } stop");
6744}
6745
6746// Special version of stop() for code size reduction.
6747// Reuses the previously generated call sequence, if any.
6748// Generates the call sequence on its own, if necessary.
6749// Note: This code will work only in non-relocatable code!
6750//       The relative address of the data elements (arg1, arg2) must not change.
6751//       The reentry point must not move relative to it's users. This prerequisite
6752//       should be given for "hand-written" code, if all chain calls are in the same code blob.
6753//       Generated code must not undergo any transformation, e.g. ShortenBranches, to be safe.
6754address MacroAssembler::stop_chain(address reentry, int type, const char* msg, int id, bool allow_relocation) {
6755  BLOCK_COMMENT(err_msg("stop_chain(%s,%s): %s {", reentry==NULL?"init":"cont", allow_relocation?"reloc ":"static", msg));
6756
6757  // Setup arguments.
6758  if (allow_relocation) {
6759    // Relocatable version (for comparison purposes). Remove after some time.
6760    load_const(Z_ARG1, (void*) stop_types[type%stop_end]);
6761    load_const(Z_ARG2, (void*) msg);
6762  } else {
6763    load_absolute_address(Z_ARG1, (address)stop_types[type%stop_end]);
6764    load_absolute_address(Z_ARG2, (address)msg);
6765  }
6766  if ((reentry != NULL) && RelAddr::is_in_range_of_RelAddr16(reentry, pc())) {
6767    BLOCK_COMMENT("branch to reentry point:");
6768    z_brc(bcondAlways, reentry);
6769  } else {
6770    BLOCK_COMMENT("reentry point:");
6771    reentry = pc();      // Re-entry point for subsequent stop calls.
6772    save_return_pc();    // Saves return pc Z_R14.
6773    push_frame_abi160(0);
6774    if (allow_relocation) {
6775      reentry = NULL;    // Prevent reentry if code relocation is allowed.
6776      call_VM_leaf(CAST_FROM_FN_PTR(address, stop_on_request), Z_ARG1, Z_ARG2);
6777    } else {
6778      call_VM_leaf_static(CAST_FROM_FN_PTR(address, stop_on_request), Z_ARG1, Z_ARG2);
6779    }
6780    z_illtrap(); // Illegal instruction as emergency stop, should the above call return.
6781  }
6782  BLOCK_COMMENT(" } stop_chain");
6783
6784  return reentry;
6785}
6786
6787// Special version of stop() for code size reduction.
6788// Assumes constant relative addresses for data and runtime call.
6789void MacroAssembler::stop_static(int type, const char* msg, int id) {
6790  stop_chain(NULL, type, msg, id, false);
6791}
6792
6793void MacroAssembler::stop_subroutine() {
6794  unimplemented("stop_subroutine", 710);
6795}
6796
6797// Prints msg to stdout from within generated code..
6798void MacroAssembler::warn(const char* msg) {
6799  RegisterSaver::save_live_registers(this, RegisterSaver::all_registers, Z_R14);
6800  load_absolute_address(Z_R1, (address) warning);
6801  load_absolute_address(Z_ARG1, (address) msg);
6802  (void) call(Z_R1);
6803  RegisterSaver::restore_live_registers(this, RegisterSaver::all_registers);
6804}
6805
6806#ifndef PRODUCT
6807
6808// Write pattern 0x0101010101010101 in region [low-before, high+after].
6809void MacroAssembler::zap_from_to(Register low, Register high, Register val, Register addr, int before, int after) {
6810  if (!ZapEmptyStackFields) return;
6811  BLOCK_COMMENT("zap memory region {");
6812  load_const_optimized(val, 0x0101010101010101);
6813  int size = before + after;
6814  if (low == high && size < 5 && size > 0) {
6815    int offset = -before*BytesPerWord;
6816    for (int i = 0; i < size; ++i) {
6817      z_stg(val, Address(low, offset));
6818      offset +=(1*BytesPerWord);
6819    }
6820  } else {
6821    add2reg(addr, -before*BytesPerWord, low);
6822    if (after) {
6823#ifdef ASSERT
6824      jlong check = after * BytesPerWord;
6825      assert(Immediate::is_simm32(check) && Immediate::is_simm32(-check), "value not encodable !");
6826#endif
6827      add2reg(high, after * BytesPerWord);
6828    }
6829    NearLabel loop;
6830    bind(loop);
6831    z_stg(val, Address(addr));
6832    add2reg(addr, 8);
6833    compare64_and_branch(addr, high, bcondNotHigh, loop);
6834    if (after) {
6835      add2reg(high, -after * BytesPerWord);
6836    }
6837  }
6838  BLOCK_COMMENT("} zap memory region");
6839}
6840#endif // !PRODUCT
6841
6842SkipIfEqual::SkipIfEqual(MacroAssembler* masm, const bool* flag_addr, bool value, Register _rscratch) {
6843  _masm = masm;
6844  _masm->load_absolute_address(_rscratch, (address)flag_addr);
6845  _masm->load_and_test_int(_rscratch, Address(_rscratch));
6846  if (value) {
6847    _masm->z_brne(_label); // Skip if true, i.e. != 0.
6848  } else {
6849    _masm->z_bre(_label);  // Skip if false, i.e. == 0.
6850  }
6851}
6852
6853SkipIfEqual::~SkipIfEqual() {
6854  _masm->bind(_label);
6855}
6856