c1_Runtime1_x86.cpp revision 7331:110ec5963eb1
1/*
2 * Copyright (c) 1999, 2013, Oracle and/or its affiliates. All rights reserved.
3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
4 *
5 * This code is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 only, as
7 * published by the Free Software Foundation.
8 *
9 * This code is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
12 * version 2 for more details (a copy is included in the LICENSE file that
13 * accompanied this code).
14 *
15 * You should have received a copy of the GNU General Public License version
16 * 2 along with this work; if not, write to the Free Software Foundation,
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18 *
19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20 * or visit www.oracle.com if you need additional information or have any
21 * questions.
22 *
23 */
24
25#include "precompiled.hpp"
26#include "asm/assembler.hpp"
27#include "c1/c1_Defs.hpp"
28#include "c1/c1_MacroAssembler.hpp"
29#include "c1/c1_Runtime1.hpp"
30#include "interpreter/interpreter.hpp"
31#include "nativeInst_x86.hpp"
32#include "oops/compiledICHolder.hpp"
33#include "oops/oop.inline.hpp"
34#include "prims/jvmtiExport.hpp"
35#include "register_x86.hpp"
36#include "runtime/sharedRuntime.hpp"
37#include "runtime/signature.hpp"
38#include "runtime/vframeArray.hpp"
39#include "utilities/macros.hpp"
40#include "vmreg_x86.inline.hpp"
41#if INCLUDE_ALL_GCS
42#include "gc_implementation/g1/g1SATBCardTableModRefBS.hpp"
43#endif
44
45
46// Implementation of StubAssembler
47
48int StubAssembler::call_RT(Register oop_result1, Register metadata_result, address entry, int args_size) {
49  // setup registers
50  const Register thread = NOT_LP64(rdi) LP64_ONLY(r15_thread); // is callee-saved register (Visual C++ calling conventions)
51  assert(!(oop_result1->is_valid() || metadata_result->is_valid()) || oop_result1 != metadata_result, "registers must be different");
52  assert(oop_result1 != thread && metadata_result != thread, "registers must be different");
53  assert(args_size >= 0, "illegal args_size");
54  bool align_stack = false;
55#ifdef _LP64
56  // At a method handle call, the stack may not be properly aligned
57  // when returning with an exception.
58  align_stack = (stub_id() == Runtime1::handle_exception_from_callee_id);
59#endif
60
61#ifdef _LP64
62  mov(c_rarg0, thread);
63  set_num_rt_args(0); // Nothing on stack
64#else
65  set_num_rt_args(1 + args_size);
66
67  // push java thread (becomes first argument of C function)
68  get_thread(thread);
69  push(thread);
70#endif // _LP64
71
72  int call_offset;
73  if (!align_stack) {
74    set_last_Java_frame(thread, noreg, rbp, NULL);
75  } else {
76    address the_pc = pc();
77    call_offset = offset();
78    set_last_Java_frame(thread, noreg, rbp, the_pc);
79    andptr(rsp, -(StackAlignmentInBytes));    // Align stack
80  }
81
82  // do the call
83  call(RuntimeAddress(entry));
84  if (!align_stack) {
85    call_offset = offset();
86  }
87  // verify callee-saved register
88#ifdef ASSERT
89  guarantee(thread != rax, "change this code");
90  push(rax);
91  { Label L;
92    get_thread(rax);
93    cmpptr(thread, rax);
94    jcc(Assembler::equal, L);
95    int3();
96    stop("StubAssembler::call_RT: rdi not callee saved?");
97    bind(L);
98  }
99  pop(rax);
100#endif
101  reset_last_Java_frame(thread, true, align_stack);
102
103  // discard thread and arguments
104  NOT_LP64(addptr(rsp, num_rt_args()*BytesPerWord));
105
106  // check for pending exceptions
107  { Label L;
108    cmpptr(Address(thread, Thread::pending_exception_offset()), (int32_t)NULL_WORD);
109    jcc(Assembler::equal, L);
110    // exception pending => remove activation and forward to exception handler
111    movptr(rax, Address(thread, Thread::pending_exception_offset()));
112    // make sure that the vm_results are cleared
113    if (oop_result1->is_valid()) {
114      movptr(Address(thread, JavaThread::vm_result_offset()), NULL_WORD);
115    }
116    if (metadata_result->is_valid()) {
117      movptr(Address(thread, JavaThread::vm_result_2_offset()), NULL_WORD);
118    }
119    if (frame_size() == no_frame_size) {
120      leave();
121      jump(RuntimeAddress(StubRoutines::forward_exception_entry()));
122    } else if (_stub_id == Runtime1::forward_exception_id) {
123      should_not_reach_here();
124    } else {
125      jump(RuntimeAddress(Runtime1::entry_for(Runtime1::forward_exception_id)));
126    }
127    bind(L);
128  }
129  // get oop results if there are any and reset the values in the thread
130  if (oop_result1->is_valid()) {
131    get_vm_result(oop_result1, thread);
132  }
133  if (metadata_result->is_valid()) {
134    get_vm_result_2(metadata_result, thread);
135  }
136  return call_offset;
137}
138
139
140int StubAssembler::call_RT(Register oop_result1, Register metadata_result, address entry, Register arg1) {
141#ifdef _LP64
142  mov(c_rarg1, arg1);
143#else
144  push(arg1);
145#endif // _LP64
146  return call_RT(oop_result1, metadata_result, entry, 1);
147}
148
149
150int StubAssembler::call_RT(Register oop_result1, Register metadata_result, address entry, Register arg1, Register arg2) {
151#ifdef _LP64
152  if (c_rarg1 == arg2) {
153    if (c_rarg2 == arg1) {
154      xchgq(arg1, arg2);
155    } else {
156      mov(c_rarg2, arg2);
157      mov(c_rarg1, arg1);
158    }
159  } else {
160    mov(c_rarg1, arg1);
161    mov(c_rarg2, arg2);
162  }
163#else
164  push(arg2);
165  push(arg1);
166#endif // _LP64
167  return call_RT(oop_result1, metadata_result, entry, 2);
168}
169
170
171int StubAssembler::call_RT(Register oop_result1, Register metadata_result, address entry, Register arg1, Register arg2, Register arg3) {
172#ifdef _LP64
173  // if there is any conflict use the stack
174  if (arg1 == c_rarg2 || arg1 == c_rarg3 ||
175      arg2 == c_rarg1 || arg1 == c_rarg3 ||
176      arg3 == c_rarg1 || arg1 == c_rarg2) {
177    push(arg3);
178    push(arg2);
179    push(arg1);
180    pop(c_rarg1);
181    pop(c_rarg2);
182    pop(c_rarg3);
183  } else {
184    mov(c_rarg1, arg1);
185    mov(c_rarg2, arg2);
186    mov(c_rarg3, arg3);
187  }
188#else
189  push(arg3);
190  push(arg2);
191  push(arg1);
192#endif // _LP64
193  return call_RT(oop_result1, metadata_result, entry, 3);
194}
195
196
197// Implementation of StubFrame
198
199class StubFrame: public StackObj {
200 private:
201  StubAssembler* _sasm;
202
203 public:
204  StubFrame(StubAssembler* sasm, const char* name, bool must_gc_arguments);
205  void load_argument(int offset_in_words, Register reg);
206
207  ~StubFrame();
208};
209
210
211#define __ _sasm->
212
213StubFrame::StubFrame(StubAssembler* sasm, const char* name, bool must_gc_arguments) {
214  _sasm = sasm;
215  __ set_info(name, must_gc_arguments);
216  __ enter();
217}
218
219// load parameters that were stored with LIR_Assembler::store_parameter
220// Note: offsets for store_parameter and load_argument must match
221void StubFrame::load_argument(int offset_in_words, Register reg) {
222  // rbp, + 0: link
223  //     + 1: return address
224  //     + 2: argument with offset 0
225  //     + 3: argument with offset 1
226  //     + 4: ...
227
228  __ movptr(reg, Address(rbp, (offset_in_words + 2) * BytesPerWord));
229}
230
231
232StubFrame::~StubFrame() {
233  __ leave();
234  __ ret(0);
235}
236
237#undef __
238
239
240// Implementation of Runtime1
241
242#define __ sasm->
243
244const int float_regs_as_doubles_size_in_slots = pd_nof_fpu_regs_frame_map * 2;
245const int xmm_regs_as_doubles_size_in_slots = FrameMap::nof_xmm_regs * 2;
246
247// Stack layout for saving/restoring  all the registers needed during a runtime
248// call (this includes deoptimization)
249// Note: note that users of this frame may well have arguments to some runtime
250// while these values are on the stack. These positions neglect those arguments
251// but the code in save_live_registers will take the argument count into
252// account.
253//
254#ifdef _LP64
255  #define SLOT2(x) x,
256  #define SLOT_PER_WORD 2
257#else
258  #define SLOT2(x)
259  #define SLOT_PER_WORD 1
260#endif // _LP64
261
262enum reg_save_layout {
263  // 64bit needs to keep stack 16 byte aligned. So we add some alignment dummies to make that
264  // happen and will assert if the stack size we create is misaligned
265#ifdef _LP64
266  align_dummy_0, align_dummy_1,
267#endif // _LP64
268#ifdef _WIN64
269  // Windows always allocates space for it's argument registers (see
270  // frame::arg_reg_save_area_bytes).
271  arg_reg_save_1, arg_reg_save_1H,                                                          // 0, 4
272  arg_reg_save_2, arg_reg_save_2H,                                                          // 8, 12
273  arg_reg_save_3, arg_reg_save_3H,                                                          // 16, 20
274  arg_reg_save_4, arg_reg_save_4H,                                                          // 24, 28
275#endif // _WIN64
276  xmm_regs_as_doubles_off,                                                                  // 32
277  float_regs_as_doubles_off = xmm_regs_as_doubles_off + xmm_regs_as_doubles_size_in_slots,  // 160
278  fpu_state_off = float_regs_as_doubles_off + float_regs_as_doubles_size_in_slots,          // 224
279  // fpu_state_end_off is exclusive
280  fpu_state_end_off = fpu_state_off + (FPUStateSizeInWords / SLOT_PER_WORD),                // 352
281  marker = fpu_state_end_off, SLOT2(markerH)                                                // 352, 356
282  extra_space_offset,                                                                       // 360
283#ifdef _LP64
284  r15_off = extra_space_offset, r15H_off,                                                   // 360, 364
285  r14_off, r14H_off,                                                                        // 368, 372
286  r13_off, r13H_off,                                                                        // 376, 380
287  r12_off, r12H_off,                                                                        // 384, 388
288  r11_off, r11H_off,                                                                        // 392, 396
289  r10_off, r10H_off,                                                                        // 400, 404
290  r9_off, r9H_off,                                                                          // 408, 412
291  r8_off, r8H_off,                                                                          // 416, 420
292  rdi_off, rdiH_off,                                                                        // 424, 428
293#else
294  rdi_off = extra_space_offset,
295#endif // _LP64
296  rsi_off, SLOT2(rsiH_off)                                                                  // 432, 436
297  rbp_off, SLOT2(rbpH_off)                                                                  // 440, 444
298  rsp_off, SLOT2(rspH_off)                                                                  // 448, 452
299  rbx_off, SLOT2(rbxH_off)                                                                  // 456, 460
300  rdx_off, SLOT2(rdxH_off)                                                                  // 464, 468
301  rcx_off, SLOT2(rcxH_off)                                                                  // 472, 476
302  rax_off, SLOT2(raxH_off)                                                                  // 480, 484
303  saved_rbp_off, SLOT2(saved_rbpH_off)                                                      // 488, 492
304  return_off, SLOT2(returnH_off)                                                            // 496, 500
305  reg_save_frame_size   // As noted: neglects any parameters to runtime                     // 504
306};
307
308
309
310// Save off registers which might be killed by calls into the runtime.
311// Tries to smart of about FP registers.  In particular we separate
312// saving and describing the FPU registers for deoptimization since we
313// have to save the FPU registers twice if we describe them and on P4
314// saving FPU registers which don't contain anything appears
315// expensive.  The deopt blob is the only thing which needs to
316// describe FPU registers.  In all other cases it should be sufficient
317// to simply save their current value.
318
319static OopMap* generate_oop_map(StubAssembler* sasm, int num_rt_args,
320                                bool save_fpu_registers = true) {
321
322  // In 64bit all the args are in regs so there are no additional stack slots
323  LP64_ONLY(num_rt_args = 0);
324  LP64_ONLY(assert((reg_save_frame_size * VMRegImpl::stack_slot_size) % 16 == 0, "must be 16 byte aligned");)
325  int frame_size_in_slots = reg_save_frame_size + num_rt_args; // args + thread
326  sasm->set_frame_size(frame_size_in_slots / VMRegImpl::slots_per_word );
327
328  // record saved value locations in an OopMap
329  // locations are offsets from sp after runtime call; num_rt_args is number of arguments in call, including thread
330  OopMap* map = new OopMap(frame_size_in_slots, 0);
331  map->set_callee_saved(VMRegImpl::stack2reg(rax_off + num_rt_args), rax->as_VMReg());
332  map->set_callee_saved(VMRegImpl::stack2reg(rcx_off + num_rt_args), rcx->as_VMReg());
333  map->set_callee_saved(VMRegImpl::stack2reg(rdx_off + num_rt_args), rdx->as_VMReg());
334  map->set_callee_saved(VMRegImpl::stack2reg(rbx_off + num_rt_args), rbx->as_VMReg());
335  map->set_callee_saved(VMRegImpl::stack2reg(rsi_off + num_rt_args), rsi->as_VMReg());
336  map->set_callee_saved(VMRegImpl::stack2reg(rdi_off + num_rt_args), rdi->as_VMReg());
337#ifdef _LP64
338  map->set_callee_saved(VMRegImpl::stack2reg(r8_off + num_rt_args),  r8->as_VMReg());
339  map->set_callee_saved(VMRegImpl::stack2reg(r9_off + num_rt_args),  r9->as_VMReg());
340  map->set_callee_saved(VMRegImpl::stack2reg(r10_off + num_rt_args), r10->as_VMReg());
341  map->set_callee_saved(VMRegImpl::stack2reg(r11_off + num_rt_args), r11->as_VMReg());
342  map->set_callee_saved(VMRegImpl::stack2reg(r12_off + num_rt_args), r12->as_VMReg());
343  map->set_callee_saved(VMRegImpl::stack2reg(r13_off + num_rt_args), r13->as_VMReg());
344  map->set_callee_saved(VMRegImpl::stack2reg(r14_off + num_rt_args), r14->as_VMReg());
345  map->set_callee_saved(VMRegImpl::stack2reg(r15_off + num_rt_args), r15->as_VMReg());
346
347  // This is stupid but needed.
348  map->set_callee_saved(VMRegImpl::stack2reg(raxH_off + num_rt_args), rax->as_VMReg()->next());
349  map->set_callee_saved(VMRegImpl::stack2reg(rcxH_off + num_rt_args), rcx->as_VMReg()->next());
350  map->set_callee_saved(VMRegImpl::stack2reg(rdxH_off + num_rt_args), rdx->as_VMReg()->next());
351  map->set_callee_saved(VMRegImpl::stack2reg(rbxH_off + num_rt_args), rbx->as_VMReg()->next());
352  map->set_callee_saved(VMRegImpl::stack2reg(rsiH_off + num_rt_args), rsi->as_VMReg()->next());
353  map->set_callee_saved(VMRegImpl::stack2reg(rdiH_off + num_rt_args), rdi->as_VMReg()->next());
354
355  map->set_callee_saved(VMRegImpl::stack2reg(r8H_off + num_rt_args),  r8->as_VMReg()->next());
356  map->set_callee_saved(VMRegImpl::stack2reg(r9H_off + num_rt_args),  r9->as_VMReg()->next());
357  map->set_callee_saved(VMRegImpl::stack2reg(r10H_off + num_rt_args), r10->as_VMReg()->next());
358  map->set_callee_saved(VMRegImpl::stack2reg(r11H_off + num_rt_args), r11->as_VMReg()->next());
359  map->set_callee_saved(VMRegImpl::stack2reg(r12H_off + num_rt_args), r12->as_VMReg()->next());
360  map->set_callee_saved(VMRegImpl::stack2reg(r13H_off + num_rt_args), r13->as_VMReg()->next());
361  map->set_callee_saved(VMRegImpl::stack2reg(r14H_off + num_rt_args), r14->as_VMReg()->next());
362  map->set_callee_saved(VMRegImpl::stack2reg(r15H_off + num_rt_args), r15->as_VMReg()->next());
363#endif // _LP64
364
365  if (save_fpu_registers) {
366    if (UseSSE < 2) {
367      int fpu_off = float_regs_as_doubles_off;
368      for (int n = 0; n < FrameMap::nof_fpu_regs; n++) {
369        VMReg fpu_name_0 = FrameMap::fpu_regname(n);
370        map->set_callee_saved(VMRegImpl::stack2reg(fpu_off +     num_rt_args), fpu_name_0);
371        // %%% This is really a waste but we'll keep things as they were for now
372        if (true) {
373          map->set_callee_saved(VMRegImpl::stack2reg(fpu_off + 1 + num_rt_args), fpu_name_0->next());
374        }
375        fpu_off += 2;
376      }
377      assert(fpu_off == fpu_state_off, "incorrect number of fpu stack slots");
378    }
379
380    if (UseSSE >= 2) {
381      int xmm_off = xmm_regs_as_doubles_off;
382      for (int n = 0; n < FrameMap::nof_xmm_regs; n++) {
383        VMReg xmm_name_0 = as_XMMRegister(n)->as_VMReg();
384        map->set_callee_saved(VMRegImpl::stack2reg(xmm_off +     num_rt_args), xmm_name_0);
385        // %%% This is really a waste but we'll keep things as they were for now
386        if (true) {
387          map->set_callee_saved(VMRegImpl::stack2reg(xmm_off + 1 + num_rt_args), xmm_name_0->next());
388        }
389        xmm_off += 2;
390      }
391      assert(xmm_off == float_regs_as_doubles_off, "incorrect number of xmm registers");
392
393    } else if (UseSSE == 1) {
394      int xmm_off = xmm_regs_as_doubles_off;
395      for (int n = 0; n < FrameMap::nof_xmm_regs; n++) {
396        VMReg xmm_name_0 = as_XMMRegister(n)->as_VMReg();
397        map->set_callee_saved(VMRegImpl::stack2reg(xmm_off +     num_rt_args), xmm_name_0);
398        xmm_off += 2;
399      }
400      assert(xmm_off == float_regs_as_doubles_off, "incorrect number of xmm registers");
401    }
402  }
403
404  return map;
405}
406
407static OopMap* save_live_registers(StubAssembler* sasm, int num_rt_args,
408                                   bool save_fpu_registers = true) {
409  __ block_comment("save_live_registers");
410
411  __ pusha();         // integer registers
412
413  // assert(float_regs_as_doubles_off % 2 == 0, "misaligned offset");
414  // assert(xmm_regs_as_doubles_off % 2 == 0, "misaligned offset");
415
416  __ subptr(rsp, extra_space_offset * VMRegImpl::stack_slot_size);
417
418#ifdef ASSERT
419  __ movptr(Address(rsp, marker * VMRegImpl::stack_slot_size), (int32_t)0xfeedbeef);
420#endif
421
422  if (save_fpu_registers) {
423    if (UseSSE < 2) {
424      // save FPU stack
425      __ fnsave(Address(rsp, fpu_state_off * VMRegImpl::stack_slot_size));
426      __ fwait();
427
428#ifdef ASSERT
429      Label ok;
430      __ cmpw(Address(rsp, fpu_state_off * VMRegImpl::stack_slot_size), StubRoutines::fpu_cntrl_wrd_std());
431      __ jccb(Assembler::equal, ok);
432      __ stop("corrupted control word detected");
433      __ bind(ok);
434#endif
435
436      // Reset the control word to guard against exceptions being unmasked
437      // since fstp_d can cause FPU stack underflow exceptions.  Write it
438      // into the on stack copy and then reload that to make sure that the
439      // current and future values are correct.
440      __ movw(Address(rsp, fpu_state_off * VMRegImpl::stack_slot_size), StubRoutines::fpu_cntrl_wrd_std());
441      __ frstor(Address(rsp, fpu_state_off * VMRegImpl::stack_slot_size));
442
443      // Save the FPU registers in de-opt-able form
444      __ fstp_d(Address(rsp, float_regs_as_doubles_off * VMRegImpl::stack_slot_size +  0));
445      __ fstp_d(Address(rsp, float_regs_as_doubles_off * VMRegImpl::stack_slot_size +  8));
446      __ fstp_d(Address(rsp, float_regs_as_doubles_off * VMRegImpl::stack_slot_size + 16));
447      __ fstp_d(Address(rsp, float_regs_as_doubles_off * VMRegImpl::stack_slot_size + 24));
448      __ fstp_d(Address(rsp, float_regs_as_doubles_off * VMRegImpl::stack_slot_size + 32));
449      __ fstp_d(Address(rsp, float_regs_as_doubles_off * VMRegImpl::stack_slot_size + 40));
450      __ fstp_d(Address(rsp, float_regs_as_doubles_off * VMRegImpl::stack_slot_size + 48));
451      __ fstp_d(Address(rsp, float_regs_as_doubles_off * VMRegImpl::stack_slot_size + 56));
452    }
453
454    if (UseSSE >= 2) {
455      // save XMM registers
456      // XMM registers can contain float or double values, but this is not known here,
457      // so always save them as doubles.
458      // note that float values are _not_ converted automatically, so for float values
459      // the second word contains only garbage data.
460      __ movdbl(Address(rsp, xmm_regs_as_doubles_off * VMRegImpl::stack_slot_size +  0), xmm0);
461      __ movdbl(Address(rsp, xmm_regs_as_doubles_off * VMRegImpl::stack_slot_size +  8), xmm1);
462      __ movdbl(Address(rsp, xmm_regs_as_doubles_off * VMRegImpl::stack_slot_size + 16), xmm2);
463      __ movdbl(Address(rsp, xmm_regs_as_doubles_off * VMRegImpl::stack_slot_size + 24), xmm3);
464      __ movdbl(Address(rsp, xmm_regs_as_doubles_off * VMRegImpl::stack_slot_size + 32), xmm4);
465      __ movdbl(Address(rsp, xmm_regs_as_doubles_off * VMRegImpl::stack_slot_size + 40), xmm5);
466      __ movdbl(Address(rsp, xmm_regs_as_doubles_off * VMRegImpl::stack_slot_size + 48), xmm6);
467      __ movdbl(Address(rsp, xmm_regs_as_doubles_off * VMRegImpl::stack_slot_size + 56), xmm7);
468#ifdef _LP64
469      __ movdbl(Address(rsp, xmm_regs_as_doubles_off * VMRegImpl::stack_slot_size + 64), xmm8);
470      __ movdbl(Address(rsp, xmm_regs_as_doubles_off * VMRegImpl::stack_slot_size + 72), xmm9);
471      __ movdbl(Address(rsp, xmm_regs_as_doubles_off * VMRegImpl::stack_slot_size + 80), xmm10);
472      __ movdbl(Address(rsp, xmm_regs_as_doubles_off * VMRegImpl::stack_slot_size + 88), xmm11);
473      __ movdbl(Address(rsp, xmm_regs_as_doubles_off * VMRegImpl::stack_slot_size + 96), xmm12);
474      __ movdbl(Address(rsp, xmm_regs_as_doubles_off * VMRegImpl::stack_slot_size + 104), xmm13);
475      __ movdbl(Address(rsp, xmm_regs_as_doubles_off * VMRegImpl::stack_slot_size + 112), xmm14);
476      __ movdbl(Address(rsp, xmm_regs_as_doubles_off * VMRegImpl::stack_slot_size + 120), xmm15);
477#endif // _LP64
478    } else if (UseSSE == 1) {
479      // save XMM registers as float because double not supported without SSE2
480      __ movflt(Address(rsp, xmm_regs_as_doubles_off * VMRegImpl::stack_slot_size +  0), xmm0);
481      __ movflt(Address(rsp, xmm_regs_as_doubles_off * VMRegImpl::stack_slot_size +  8), xmm1);
482      __ movflt(Address(rsp, xmm_regs_as_doubles_off * VMRegImpl::stack_slot_size + 16), xmm2);
483      __ movflt(Address(rsp, xmm_regs_as_doubles_off * VMRegImpl::stack_slot_size + 24), xmm3);
484      __ movflt(Address(rsp, xmm_regs_as_doubles_off * VMRegImpl::stack_slot_size + 32), xmm4);
485      __ movflt(Address(rsp, xmm_regs_as_doubles_off * VMRegImpl::stack_slot_size + 40), xmm5);
486      __ movflt(Address(rsp, xmm_regs_as_doubles_off * VMRegImpl::stack_slot_size + 48), xmm6);
487      __ movflt(Address(rsp, xmm_regs_as_doubles_off * VMRegImpl::stack_slot_size + 56), xmm7);
488    }
489  }
490
491  // FPU stack must be empty now
492  __ verify_FPU(0, "save_live_registers");
493
494  return generate_oop_map(sasm, num_rt_args, save_fpu_registers);
495}
496
497
498static void restore_fpu(StubAssembler* sasm, bool restore_fpu_registers = true) {
499  if (restore_fpu_registers) {
500    if (UseSSE >= 2) {
501      // restore XMM registers
502      __ movdbl(xmm0, Address(rsp, xmm_regs_as_doubles_off * VMRegImpl::stack_slot_size +  0));
503      __ movdbl(xmm1, Address(rsp, xmm_regs_as_doubles_off * VMRegImpl::stack_slot_size +  8));
504      __ movdbl(xmm2, Address(rsp, xmm_regs_as_doubles_off * VMRegImpl::stack_slot_size + 16));
505      __ movdbl(xmm3, Address(rsp, xmm_regs_as_doubles_off * VMRegImpl::stack_slot_size + 24));
506      __ movdbl(xmm4, Address(rsp, xmm_regs_as_doubles_off * VMRegImpl::stack_slot_size + 32));
507      __ movdbl(xmm5, Address(rsp, xmm_regs_as_doubles_off * VMRegImpl::stack_slot_size + 40));
508      __ movdbl(xmm6, Address(rsp, xmm_regs_as_doubles_off * VMRegImpl::stack_slot_size + 48));
509      __ movdbl(xmm7, Address(rsp, xmm_regs_as_doubles_off * VMRegImpl::stack_slot_size + 56));
510#ifdef _LP64
511      __ movdbl(xmm8, Address(rsp, xmm_regs_as_doubles_off * VMRegImpl::stack_slot_size + 64));
512      __ movdbl(xmm9, Address(rsp, xmm_regs_as_doubles_off * VMRegImpl::stack_slot_size + 72));
513      __ movdbl(xmm10, Address(rsp, xmm_regs_as_doubles_off * VMRegImpl::stack_slot_size + 80));
514      __ movdbl(xmm11, Address(rsp, xmm_regs_as_doubles_off * VMRegImpl::stack_slot_size + 88));
515      __ movdbl(xmm12, Address(rsp, xmm_regs_as_doubles_off * VMRegImpl::stack_slot_size + 96));
516      __ movdbl(xmm13, Address(rsp, xmm_regs_as_doubles_off * VMRegImpl::stack_slot_size + 104));
517      __ movdbl(xmm14, Address(rsp, xmm_regs_as_doubles_off * VMRegImpl::stack_slot_size + 112));
518      __ movdbl(xmm15, Address(rsp, xmm_regs_as_doubles_off * VMRegImpl::stack_slot_size + 120));
519#endif // _LP64
520    } else if (UseSSE == 1) {
521      // restore XMM registers
522      __ movflt(xmm0, Address(rsp, xmm_regs_as_doubles_off * VMRegImpl::stack_slot_size +  0));
523      __ movflt(xmm1, Address(rsp, xmm_regs_as_doubles_off * VMRegImpl::stack_slot_size +  8));
524      __ movflt(xmm2, Address(rsp, xmm_regs_as_doubles_off * VMRegImpl::stack_slot_size + 16));
525      __ movflt(xmm3, Address(rsp, xmm_regs_as_doubles_off * VMRegImpl::stack_slot_size + 24));
526      __ movflt(xmm4, Address(rsp, xmm_regs_as_doubles_off * VMRegImpl::stack_slot_size + 32));
527      __ movflt(xmm5, Address(rsp, xmm_regs_as_doubles_off * VMRegImpl::stack_slot_size + 40));
528      __ movflt(xmm6, Address(rsp, xmm_regs_as_doubles_off * VMRegImpl::stack_slot_size + 48));
529      __ movflt(xmm7, Address(rsp, xmm_regs_as_doubles_off * VMRegImpl::stack_slot_size + 56));
530    }
531
532    if (UseSSE < 2) {
533      __ frstor(Address(rsp, fpu_state_off * VMRegImpl::stack_slot_size));
534    } else {
535      // check that FPU stack is really empty
536      __ verify_FPU(0, "restore_live_registers");
537    }
538
539  } else {
540    // check that FPU stack is really empty
541    __ verify_FPU(0, "restore_live_registers");
542  }
543
544#ifdef ASSERT
545  {
546    Label ok;
547    __ cmpptr(Address(rsp, marker * VMRegImpl::stack_slot_size), (int32_t)0xfeedbeef);
548    __ jcc(Assembler::equal, ok);
549    __ stop("bad offsets in frame");
550    __ bind(ok);
551  }
552#endif // ASSERT
553
554  __ addptr(rsp, extra_space_offset * VMRegImpl::stack_slot_size);
555}
556
557
558static void restore_live_registers(StubAssembler* sasm, bool restore_fpu_registers = true) {
559  __ block_comment("restore_live_registers");
560
561  restore_fpu(sasm, restore_fpu_registers);
562  __ popa();
563}
564
565
566static void restore_live_registers_except_rax(StubAssembler* sasm, bool restore_fpu_registers = true) {
567  __ block_comment("restore_live_registers_except_rax");
568
569  restore_fpu(sasm, restore_fpu_registers);
570
571#ifdef _LP64
572  __ movptr(r15, Address(rsp, 0));
573  __ movptr(r14, Address(rsp, wordSize));
574  __ movptr(r13, Address(rsp, 2 * wordSize));
575  __ movptr(r12, Address(rsp, 3 * wordSize));
576  __ movptr(r11, Address(rsp, 4 * wordSize));
577  __ movptr(r10, Address(rsp, 5 * wordSize));
578  __ movptr(r9,  Address(rsp, 6 * wordSize));
579  __ movptr(r8,  Address(rsp, 7 * wordSize));
580  __ movptr(rdi, Address(rsp, 8 * wordSize));
581  __ movptr(rsi, Address(rsp, 9 * wordSize));
582  __ movptr(rbp, Address(rsp, 10 * wordSize));
583  // skip rsp
584  __ movptr(rbx, Address(rsp, 12 * wordSize));
585  __ movptr(rdx, Address(rsp, 13 * wordSize));
586  __ movptr(rcx, Address(rsp, 14 * wordSize));
587
588  __ addptr(rsp, 16 * wordSize);
589#else
590
591  __ pop(rdi);
592  __ pop(rsi);
593  __ pop(rbp);
594  __ pop(rbx); // skip this value
595  __ pop(rbx);
596  __ pop(rdx);
597  __ pop(rcx);
598  __ addptr(rsp, BytesPerWord);
599#endif // _LP64
600}
601
602
603void Runtime1::initialize_pd() {
604  // nothing to do
605}
606
607
608// target: the entry point of the method that creates and posts the exception oop
609// has_argument: true if the exception needs an argument (passed on stack because registers must be preserved)
610
611OopMapSet* Runtime1::generate_exception_throw(StubAssembler* sasm, address target, bool has_argument) {
612  // preserve all registers
613  int num_rt_args = has_argument ? 2 : 1;
614  OopMap* oop_map = save_live_registers(sasm, num_rt_args);
615
616  // now all registers are saved and can be used freely
617  // verify that no old value is used accidentally
618  __ invalidate_registers(true, true, true, true, true, true);
619
620  // registers used by this stub
621  const Register temp_reg = rbx;
622
623  // load argument for exception that is passed as an argument into the stub
624  if (has_argument) {
625#ifdef _LP64
626    __ movptr(c_rarg1, Address(rbp, 2*BytesPerWord));
627#else
628    __ movptr(temp_reg, Address(rbp, 2*BytesPerWord));
629    __ push(temp_reg);
630#endif // _LP64
631  }
632  int call_offset = __ call_RT(noreg, noreg, target, num_rt_args - 1);
633
634  OopMapSet* oop_maps = new OopMapSet();
635  oop_maps->add_gc_map(call_offset, oop_map);
636
637  __ stop("should not reach here");
638
639  return oop_maps;
640}
641
642
643OopMapSet* Runtime1::generate_handle_exception(StubID id, StubAssembler *sasm) {
644  __ block_comment("generate_handle_exception");
645
646  // incoming parameters
647  const Register exception_oop = rax;
648  const Register exception_pc  = rdx;
649  // other registers used in this stub
650  const Register thread = NOT_LP64(rdi) LP64_ONLY(r15_thread);
651
652  // Save registers, if required.
653  OopMapSet* oop_maps = new OopMapSet();
654  OopMap* oop_map = NULL;
655  switch (id) {
656  case forward_exception_id:
657    // We're handling an exception in the context of a compiled frame.
658    // The registers have been saved in the standard places.  Perform
659    // an exception lookup in the caller and dispatch to the handler
660    // if found.  Otherwise unwind and dispatch to the callers
661    // exception handler.
662    oop_map = generate_oop_map(sasm, 1 /*thread*/);
663
664    // load and clear pending exception oop into RAX
665    __ movptr(exception_oop, Address(thread, Thread::pending_exception_offset()));
666    __ movptr(Address(thread, Thread::pending_exception_offset()), NULL_WORD);
667
668    // load issuing PC (the return address for this stub) into rdx
669    __ movptr(exception_pc, Address(rbp, 1*BytesPerWord));
670
671    // make sure that the vm_results are cleared (may be unnecessary)
672    __ movptr(Address(thread, JavaThread::vm_result_offset()),   NULL_WORD);
673    __ movptr(Address(thread, JavaThread::vm_result_2_offset()), NULL_WORD);
674    break;
675  case handle_exception_nofpu_id:
676  case handle_exception_id:
677    // At this point all registers MAY be live.
678    oop_map = save_live_registers(sasm, 1 /*thread*/, id == handle_exception_nofpu_id);
679    break;
680  case handle_exception_from_callee_id: {
681    // At this point all registers except exception oop (RAX) and
682    // exception pc (RDX) are dead.
683    const int frame_size = 2 /*BP, return address*/ NOT_LP64(+ 1 /*thread*/) WIN64_ONLY(+ frame::arg_reg_save_area_bytes / BytesPerWord);
684    oop_map = new OopMap(frame_size * VMRegImpl::slots_per_word, 0);
685    sasm->set_frame_size(frame_size);
686    WIN64_ONLY(__ subq(rsp, frame::arg_reg_save_area_bytes));
687    break;
688  }
689  default:  ShouldNotReachHere();
690  }
691
692#ifdef TIERED
693  // C2 can leave the fpu stack dirty
694  if (UseSSE < 2) {
695    __ empty_FPU_stack();
696  }
697#endif // TIERED
698
699  // verify that only rax, and rdx is valid at this time
700  __ invalidate_registers(false, true, true, false, true, true);
701  // verify that rax, contains a valid exception
702  __ verify_not_null_oop(exception_oop);
703
704  // load address of JavaThread object for thread-local data
705  NOT_LP64(__ get_thread(thread);)
706
707#ifdef ASSERT
708  // check that fields in JavaThread for exception oop and issuing pc are
709  // empty before writing to them
710  Label oop_empty;
711  __ cmpptr(Address(thread, JavaThread::exception_oop_offset()), (int32_t) NULL_WORD);
712  __ jcc(Assembler::equal, oop_empty);
713  __ stop("exception oop already set");
714  __ bind(oop_empty);
715
716  Label pc_empty;
717  __ cmpptr(Address(thread, JavaThread::exception_pc_offset()), 0);
718  __ jcc(Assembler::equal, pc_empty);
719  __ stop("exception pc already set");
720  __ bind(pc_empty);
721#endif
722
723  // save exception oop and issuing pc into JavaThread
724  // (exception handler will load it from here)
725  __ movptr(Address(thread, JavaThread::exception_oop_offset()), exception_oop);
726  __ movptr(Address(thread, JavaThread::exception_pc_offset()),  exception_pc);
727
728  // patch throwing pc into return address (has bci & oop map)
729  __ movptr(Address(rbp, 1*BytesPerWord), exception_pc);
730
731  // compute the exception handler.
732  // the exception oop and the throwing pc are read from the fields in JavaThread
733  int call_offset = __ call_RT(noreg, noreg, CAST_FROM_FN_PTR(address, exception_handler_for_pc));
734  oop_maps->add_gc_map(call_offset, oop_map);
735
736  // rax: handler address
737  //      will be the deopt blob if nmethod was deoptimized while we looked up
738  //      handler regardless of whether handler existed in the nmethod.
739
740  // only rax, is valid at this time, all other registers have been destroyed by the runtime call
741  __ invalidate_registers(false, true, true, true, true, true);
742
743  // patch the return address, this stub will directly return to the exception handler
744  __ movptr(Address(rbp, 1*BytesPerWord), rax);
745
746  switch (id) {
747  case forward_exception_id:
748  case handle_exception_nofpu_id:
749  case handle_exception_id:
750    // Restore the registers that were saved at the beginning.
751    restore_live_registers(sasm, id == handle_exception_nofpu_id);
752    break;
753  case handle_exception_from_callee_id:
754    // WIN64_ONLY: No need to add frame::arg_reg_save_area_bytes to SP
755    // since we do a leave anyway.
756
757    // Pop the return address since we are possibly changing SP (restoring from BP).
758    __ leave();
759    __ pop(rcx);
760
761    // Restore SP from BP if the exception PC is a method handle call site.
762    NOT_LP64(__ get_thread(thread);)
763    __ cmpl(Address(thread, JavaThread::is_method_handle_return_offset()), 0);
764    __ cmovptr(Assembler::notEqual, rsp, rbp_mh_SP_save);
765    __ jmp(rcx);  // jump to exception handler
766    break;
767  default:  ShouldNotReachHere();
768  }
769
770  return oop_maps;
771}
772
773
774void Runtime1::generate_unwind_exception(StubAssembler *sasm) {
775  // incoming parameters
776  const Register exception_oop = rax;
777  // callee-saved copy of exception_oop during runtime call
778  const Register exception_oop_callee_saved = NOT_LP64(rsi) LP64_ONLY(r14);
779  // other registers used in this stub
780  const Register exception_pc = rdx;
781  const Register handler_addr = rbx;
782  const Register thread = NOT_LP64(rdi) LP64_ONLY(r15_thread);
783
784  // verify that only rax, is valid at this time
785  __ invalidate_registers(false, true, true, true, true, true);
786
787#ifdef ASSERT
788  // check that fields in JavaThread for exception oop and issuing pc are empty
789  NOT_LP64(__ get_thread(thread);)
790  Label oop_empty;
791  __ cmpptr(Address(thread, JavaThread::exception_oop_offset()), 0);
792  __ jcc(Assembler::equal, oop_empty);
793  __ stop("exception oop must be empty");
794  __ bind(oop_empty);
795
796  Label pc_empty;
797  __ cmpptr(Address(thread, JavaThread::exception_pc_offset()), 0);
798  __ jcc(Assembler::equal, pc_empty);
799  __ stop("exception pc must be empty");
800  __ bind(pc_empty);
801#endif
802
803  // clear the FPU stack in case any FPU results are left behind
804  __ empty_FPU_stack();
805
806  // save exception_oop in callee-saved register to preserve it during runtime calls
807  __ verify_not_null_oop(exception_oop);
808  __ movptr(exception_oop_callee_saved, exception_oop);
809
810  NOT_LP64(__ get_thread(thread);)
811  // Get return address (is on top of stack after leave).
812  __ movptr(exception_pc, Address(rsp, 0));
813
814  // search the exception handler address of the caller (using the return address)
815  __ call_VM_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::exception_handler_for_return_address), thread, exception_pc);
816  // rax: exception handler address of the caller
817
818  // Only RAX and RSI are valid at this time, all other registers have been destroyed by the call.
819  __ invalidate_registers(false, true, true, true, false, true);
820
821  // move result of call into correct register
822  __ movptr(handler_addr, rax);
823
824  // Restore exception oop to RAX (required convention of exception handler).
825  __ movptr(exception_oop, exception_oop_callee_saved);
826
827  // verify that there is really a valid exception in rax
828  __ verify_not_null_oop(exception_oop);
829
830  // get throwing pc (= return address).
831  // rdx has been destroyed by the call, so it must be set again
832  // the pop is also necessary to simulate the effect of a ret(0)
833  __ pop(exception_pc);
834
835  // Restore SP from BP if the exception PC is a method handle call site.
836  NOT_LP64(__ get_thread(thread);)
837  __ cmpl(Address(thread, JavaThread::is_method_handle_return_offset()), 0);
838  __ cmovptr(Assembler::notEqual, rsp, rbp_mh_SP_save);
839
840  // continue at exception handler (return address removed)
841  // note: do *not* remove arguments when unwinding the
842  //       activation since the caller assumes having
843  //       all arguments on the stack when entering the
844  //       runtime to determine the exception handler
845  //       (GC happens at call site with arguments!)
846  // rax: exception oop
847  // rdx: throwing pc
848  // rbx: exception handler
849  __ jmp(handler_addr);
850}
851
852
853OopMapSet* Runtime1::generate_patching(StubAssembler* sasm, address target) {
854  // use the maximum number of runtime-arguments here because it is difficult to
855  // distinguish each RT-Call.
856  // Note: This number affects also the RT-Call in generate_handle_exception because
857  //       the oop-map is shared for all calls.
858  const int num_rt_args = 2;  // thread + dummy
859
860  DeoptimizationBlob* deopt_blob = SharedRuntime::deopt_blob();
861  assert(deopt_blob != NULL, "deoptimization blob must have been created");
862
863  OopMap* oop_map = save_live_registers(sasm, num_rt_args);
864
865#ifdef _LP64
866  const Register thread = r15_thread;
867  // No need to worry about dummy
868  __ mov(c_rarg0, thread);
869#else
870  __ push(rax); // push dummy
871
872  const Register thread = rdi; // is callee-saved register (Visual C++ calling conventions)
873  // push java thread (becomes first argument of C function)
874  __ get_thread(thread);
875  __ push(thread);
876#endif // _LP64
877  __ set_last_Java_frame(thread, noreg, rbp, NULL);
878  // do the call
879  __ call(RuntimeAddress(target));
880  OopMapSet* oop_maps = new OopMapSet();
881  oop_maps->add_gc_map(__ offset(), oop_map);
882  // verify callee-saved register
883#ifdef ASSERT
884  guarantee(thread != rax, "change this code");
885  __ push(rax);
886  { Label L;
887    __ get_thread(rax);
888    __ cmpptr(thread, rax);
889    __ jcc(Assembler::equal, L);
890    __ stop("StubAssembler::call_RT: rdi/r15 not callee saved?");
891    __ bind(L);
892  }
893  __ pop(rax);
894#endif
895  __ reset_last_Java_frame(thread, true, false);
896#ifndef _LP64
897  __ pop(rcx); // discard thread arg
898  __ pop(rcx); // discard dummy
899#endif // _LP64
900
901  // check for pending exceptions
902  { Label L;
903    __ cmpptr(Address(thread, Thread::pending_exception_offset()), (int32_t)NULL_WORD);
904    __ jcc(Assembler::equal, L);
905    // exception pending => remove activation and forward to exception handler
906
907    __ testptr(rax, rax);                                   // have we deoptimized?
908    __ jump_cc(Assembler::equal,
909               RuntimeAddress(Runtime1::entry_for(Runtime1::forward_exception_id)));
910
911    // the deopt blob expects exceptions in the special fields of
912    // JavaThread, so copy and clear pending exception.
913
914    // load and clear pending exception
915    __ movptr(rax, Address(thread, Thread::pending_exception_offset()));
916    __ movptr(Address(thread, Thread::pending_exception_offset()), NULL_WORD);
917
918    // check that there is really a valid exception
919    __ verify_not_null_oop(rax);
920
921    // load throwing pc: this is the return address of the stub
922    __ movptr(rdx, Address(rsp, return_off * VMRegImpl::stack_slot_size));
923
924#ifdef ASSERT
925    // check that fields in JavaThread for exception oop and issuing pc are empty
926    Label oop_empty;
927    __ cmpptr(Address(thread, JavaThread::exception_oop_offset()), (int32_t)NULL_WORD);
928    __ jcc(Assembler::equal, oop_empty);
929    __ stop("exception oop must be empty");
930    __ bind(oop_empty);
931
932    Label pc_empty;
933    __ cmpptr(Address(thread, JavaThread::exception_pc_offset()), (int32_t)NULL_WORD);
934    __ jcc(Assembler::equal, pc_empty);
935    __ stop("exception pc must be empty");
936    __ bind(pc_empty);
937#endif
938
939    // store exception oop and throwing pc to JavaThread
940    __ movptr(Address(thread, JavaThread::exception_oop_offset()), rax);
941    __ movptr(Address(thread, JavaThread::exception_pc_offset()), rdx);
942
943    restore_live_registers(sasm);
944
945    __ leave();
946    __ addptr(rsp, BytesPerWord);  // remove return address from stack
947
948    // Forward the exception directly to deopt blob. We can blow no
949    // registers and must leave throwing pc on the stack.  A patch may
950    // have values live in registers so the entry point with the
951    // exception in tls.
952    __ jump(RuntimeAddress(deopt_blob->unpack_with_exception_in_tls()));
953
954    __ bind(L);
955  }
956
957
958  // Runtime will return true if the nmethod has been deoptimized during
959  // the patching process. In that case we must do a deopt reexecute instead.
960
961  Label reexecuteEntry, cont;
962
963  __ testptr(rax, rax);                                 // have we deoptimized?
964  __ jcc(Assembler::equal, cont);                       // no
965
966  // Will reexecute. Proper return address is already on the stack we just restore
967  // registers, pop all of our frame but the return address and jump to the deopt blob
968  restore_live_registers(sasm);
969  __ leave();
970  __ jump(RuntimeAddress(deopt_blob->unpack_with_reexecution()));
971
972  __ bind(cont);
973  restore_live_registers(sasm);
974  __ leave();
975  __ ret(0);
976
977  return oop_maps;
978}
979
980
981OopMapSet* Runtime1::generate_code_for(StubID id, StubAssembler* sasm) {
982
983  // for better readability
984  const bool must_gc_arguments = true;
985  const bool dont_gc_arguments = false;
986
987  // default value; overwritten for some optimized stubs that are called from methods that do not use the fpu
988  bool save_fpu_registers = true;
989
990  // stub code & info for the different stubs
991  OopMapSet* oop_maps = NULL;
992  switch (id) {
993    case forward_exception_id:
994      {
995        oop_maps = generate_handle_exception(id, sasm);
996        __ leave();
997        __ ret(0);
998      }
999      break;
1000
1001    case new_instance_id:
1002    case fast_new_instance_id:
1003    case fast_new_instance_init_check_id:
1004      {
1005        Register klass = rdx; // Incoming
1006        Register obj   = rax; // Result
1007
1008        if (id == new_instance_id) {
1009          __ set_info("new_instance", dont_gc_arguments);
1010        } else if (id == fast_new_instance_id) {
1011          __ set_info("fast new_instance", dont_gc_arguments);
1012        } else {
1013          assert(id == fast_new_instance_init_check_id, "bad StubID");
1014          __ set_info("fast new_instance init check", dont_gc_arguments);
1015        }
1016
1017        if ((id == fast_new_instance_id || id == fast_new_instance_init_check_id) &&
1018            UseTLAB && FastTLABRefill) {
1019          Label slow_path;
1020          Register obj_size = rcx;
1021          Register t1       = rbx;
1022          Register t2       = rsi;
1023          assert_different_registers(klass, obj, obj_size, t1, t2);
1024
1025          __ push(rdi);
1026          __ push(rbx);
1027
1028          if (id == fast_new_instance_init_check_id) {
1029            // make sure the klass is initialized
1030            __ cmpb(Address(klass, InstanceKlass::init_state_offset()), InstanceKlass::fully_initialized);
1031            __ jcc(Assembler::notEqual, slow_path);
1032          }
1033
1034#ifdef ASSERT
1035          // assert object can be fast path allocated
1036          {
1037            Label ok, not_ok;
1038            __ movl(obj_size, Address(klass, Klass::layout_helper_offset()));
1039            __ cmpl(obj_size, 0);  // make sure it's an instance (LH > 0)
1040            __ jcc(Assembler::lessEqual, not_ok);
1041            __ testl(obj_size, Klass::_lh_instance_slow_path_bit);
1042            __ jcc(Assembler::zero, ok);
1043            __ bind(not_ok);
1044            __ stop("assert(can be fast path allocated)");
1045            __ should_not_reach_here();
1046            __ bind(ok);
1047          }
1048#endif // ASSERT
1049
1050          // if we got here then the TLAB allocation failed, so try
1051          // refilling the TLAB or allocating directly from eden.
1052          Label retry_tlab, try_eden;
1053          const Register thread =
1054            __ tlab_refill(retry_tlab, try_eden, slow_path); // does not destroy rdx (klass), returns rdi
1055
1056          __ bind(retry_tlab);
1057
1058          // get the instance size (size is postive so movl is fine for 64bit)
1059          __ movl(obj_size, Address(klass, Klass::layout_helper_offset()));
1060
1061          __ tlab_allocate(obj, obj_size, 0, t1, t2, slow_path);
1062
1063          __ initialize_object(obj, klass, obj_size, 0, t1, t2);
1064          __ verify_oop(obj);
1065          __ pop(rbx);
1066          __ pop(rdi);
1067          __ ret(0);
1068
1069          __ bind(try_eden);
1070          // get the instance size (size is postive so movl is fine for 64bit)
1071          __ movl(obj_size, Address(klass, Klass::layout_helper_offset()));
1072
1073          __ eden_allocate(obj, obj_size, 0, t1, slow_path);
1074          __ incr_allocated_bytes(thread, obj_size, 0);
1075
1076          __ initialize_object(obj, klass, obj_size, 0, t1, t2);
1077          __ verify_oop(obj);
1078          __ pop(rbx);
1079          __ pop(rdi);
1080          __ ret(0);
1081
1082          __ bind(slow_path);
1083          __ pop(rbx);
1084          __ pop(rdi);
1085        }
1086
1087        __ enter();
1088        OopMap* map = save_live_registers(sasm, 2);
1089        int call_offset = __ call_RT(obj, noreg, CAST_FROM_FN_PTR(address, new_instance), klass);
1090        oop_maps = new OopMapSet();
1091        oop_maps->add_gc_map(call_offset, map);
1092        restore_live_registers_except_rax(sasm);
1093        __ verify_oop(obj);
1094        __ leave();
1095        __ ret(0);
1096
1097        // rax,: new instance
1098      }
1099
1100      break;
1101
1102    case counter_overflow_id:
1103      {
1104        Register bci = rax, method = rbx;
1105        __ enter();
1106        OopMap* map = save_live_registers(sasm, 3);
1107        // Retrieve bci
1108        __ movl(bci, Address(rbp, 2*BytesPerWord));
1109        // And a pointer to the Method*
1110        __ movptr(method, Address(rbp, 3*BytesPerWord));
1111        int call_offset = __ call_RT(noreg, noreg, CAST_FROM_FN_PTR(address, counter_overflow), bci, method);
1112        oop_maps = new OopMapSet();
1113        oop_maps->add_gc_map(call_offset, map);
1114        restore_live_registers(sasm);
1115        __ leave();
1116        __ ret(0);
1117      }
1118      break;
1119
1120    case new_type_array_id:
1121    case new_object_array_id:
1122      {
1123        Register length   = rbx; // Incoming
1124        Register klass    = rdx; // Incoming
1125        Register obj      = rax; // Result
1126
1127        if (id == new_type_array_id) {
1128          __ set_info("new_type_array", dont_gc_arguments);
1129        } else {
1130          __ set_info("new_object_array", dont_gc_arguments);
1131        }
1132
1133#ifdef ASSERT
1134        // assert object type is really an array of the proper kind
1135        {
1136          Label ok;
1137          Register t0 = obj;
1138          __ movl(t0, Address(klass, Klass::layout_helper_offset()));
1139          __ sarl(t0, Klass::_lh_array_tag_shift);
1140          int tag = ((id == new_type_array_id)
1141                     ? Klass::_lh_array_tag_type_value
1142                     : Klass::_lh_array_tag_obj_value);
1143          __ cmpl(t0, tag);
1144          __ jcc(Assembler::equal, ok);
1145          __ stop("assert(is an array klass)");
1146          __ should_not_reach_here();
1147          __ bind(ok);
1148        }
1149#endif // ASSERT
1150
1151        if (UseTLAB && FastTLABRefill) {
1152          Register arr_size = rsi;
1153          Register t1       = rcx;  // must be rcx for use as shift count
1154          Register t2       = rdi;
1155          Label slow_path;
1156          assert_different_registers(length, klass, obj, arr_size, t1, t2);
1157
1158          // check that array length is small enough for fast path.
1159          __ cmpl(length, C1_MacroAssembler::max_array_allocation_length);
1160          __ jcc(Assembler::above, slow_path);
1161
1162          // if we got here then the TLAB allocation failed, so try
1163          // refilling the TLAB or allocating directly from eden.
1164          Label retry_tlab, try_eden;
1165          const Register thread =
1166            __ tlab_refill(retry_tlab, try_eden, slow_path); // preserves rbx & rdx, returns rdi
1167
1168          __ bind(retry_tlab);
1169
1170          // get the allocation size: round_up(hdr + length << (layout_helper & 0x1F))
1171          // since size is positive movl does right thing on 64bit
1172          __ movl(t1, Address(klass, Klass::layout_helper_offset()));
1173          // since size is postive movl does right thing on 64bit
1174          __ movl(arr_size, length);
1175          assert(t1 == rcx, "fixed register usage");
1176          __ shlptr(arr_size /* by t1=rcx, mod 32 */);
1177          __ shrptr(t1, Klass::_lh_header_size_shift);
1178          __ andptr(t1, Klass::_lh_header_size_mask);
1179          __ addptr(arr_size, t1);
1180          __ addptr(arr_size, MinObjAlignmentInBytesMask); // align up
1181          __ andptr(arr_size, ~MinObjAlignmentInBytesMask);
1182
1183          __ tlab_allocate(obj, arr_size, 0, t1, t2, slow_path);  // preserves arr_size
1184
1185          __ initialize_header(obj, klass, length, t1, t2);
1186          __ movb(t1, Address(klass, in_bytes(Klass::layout_helper_offset()) + (Klass::_lh_header_size_shift / BitsPerByte)));
1187          assert(Klass::_lh_header_size_shift % BitsPerByte == 0, "bytewise");
1188          assert(Klass::_lh_header_size_mask <= 0xFF, "bytewise");
1189          __ andptr(t1, Klass::_lh_header_size_mask);
1190          __ subptr(arr_size, t1);  // body length
1191          __ addptr(t1, obj);       // body start
1192          __ initialize_body(t1, arr_size, 0, t2);
1193          __ verify_oop(obj);
1194          __ ret(0);
1195
1196          __ bind(try_eden);
1197          // get the allocation size: round_up(hdr + length << (layout_helper & 0x1F))
1198          // since size is positive movl does right thing on 64bit
1199          __ movl(t1, Address(klass, Klass::layout_helper_offset()));
1200          // since size is postive movl does right thing on 64bit
1201          __ movl(arr_size, length);
1202          assert(t1 == rcx, "fixed register usage");
1203          __ shlptr(arr_size /* by t1=rcx, mod 32 */);
1204          __ shrptr(t1, Klass::_lh_header_size_shift);
1205          __ andptr(t1, Klass::_lh_header_size_mask);
1206          __ addptr(arr_size, t1);
1207          __ addptr(arr_size, MinObjAlignmentInBytesMask); // align up
1208          __ andptr(arr_size, ~MinObjAlignmentInBytesMask);
1209
1210          __ eden_allocate(obj, arr_size, 0, t1, slow_path);  // preserves arr_size
1211          __ incr_allocated_bytes(thread, arr_size, 0);
1212
1213          __ initialize_header(obj, klass, length, t1, t2);
1214          __ movb(t1, Address(klass, in_bytes(Klass::layout_helper_offset()) + (Klass::_lh_header_size_shift / BitsPerByte)));
1215          assert(Klass::_lh_header_size_shift % BitsPerByte == 0, "bytewise");
1216          assert(Klass::_lh_header_size_mask <= 0xFF, "bytewise");
1217          __ andptr(t1, Klass::_lh_header_size_mask);
1218          __ subptr(arr_size, t1);  // body length
1219          __ addptr(t1, obj);       // body start
1220          __ initialize_body(t1, arr_size, 0, t2);
1221          __ verify_oop(obj);
1222          __ ret(0);
1223
1224          __ bind(slow_path);
1225        }
1226
1227        __ enter();
1228        OopMap* map = save_live_registers(sasm, 3);
1229        int call_offset;
1230        if (id == new_type_array_id) {
1231          call_offset = __ call_RT(obj, noreg, CAST_FROM_FN_PTR(address, new_type_array), klass, length);
1232        } else {
1233          call_offset = __ call_RT(obj, noreg, CAST_FROM_FN_PTR(address, new_object_array), klass, length);
1234        }
1235
1236        oop_maps = new OopMapSet();
1237        oop_maps->add_gc_map(call_offset, map);
1238        restore_live_registers_except_rax(sasm);
1239
1240        __ verify_oop(obj);
1241        __ leave();
1242        __ ret(0);
1243
1244        // rax,: new array
1245      }
1246      break;
1247
1248    case new_multi_array_id:
1249      { StubFrame f(sasm, "new_multi_array", dont_gc_arguments);
1250        // rax,: klass
1251        // rbx,: rank
1252        // rcx: address of 1st dimension
1253        OopMap* map = save_live_registers(sasm, 4);
1254        int call_offset = __ call_RT(rax, noreg, CAST_FROM_FN_PTR(address, new_multi_array), rax, rbx, rcx);
1255
1256        oop_maps = new OopMapSet();
1257        oop_maps->add_gc_map(call_offset, map);
1258        restore_live_registers_except_rax(sasm);
1259
1260        // rax,: new multi array
1261        __ verify_oop(rax);
1262      }
1263      break;
1264
1265    case register_finalizer_id:
1266      {
1267        __ set_info("register_finalizer", dont_gc_arguments);
1268
1269        // This is called via call_runtime so the arguments
1270        // will be place in C abi locations
1271
1272#ifdef _LP64
1273        __ verify_oop(c_rarg0);
1274        __ mov(rax, c_rarg0);
1275#else
1276        // The object is passed on the stack and we haven't pushed a
1277        // frame yet so it's one work away from top of stack.
1278        __ movptr(rax, Address(rsp, 1 * BytesPerWord));
1279        __ verify_oop(rax);
1280#endif // _LP64
1281
1282        // load the klass and check the has finalizer flag
1283        Label register_finalizer;
1284        Register t = rsi;
1285        __ load_klass(t, rax);
1286        __ movl(t, Address(t, Klass::access_flags_offset()));
1287        __ testl(t, JVM_ACC_HAS_FINALIZER);
1288        __ jcc(Assembler::notZero, register_finalizer);
1289        __ ret(0);
1290
1291        __ bind(register_finalizer);
1292        __ enter();
1293        OopMap* oop_map = save_live_registers(sasm, 2 /*num_rt_args */);
1294        int call_offset = __ call_RT(noreg, noreg, CAST_FROM_FN_PTR(address, SharedRuntime::register_finalizer), rax);
1295        oop_maps = new OopMapSet();
1296        oop_maps->add_gc_map(call_offset, oop_map);
1297
1298        // Now restore all the live registers
1299        restore_live_registers(sasm);
1300
1301        __ leave();
1302        __ ret(0);
1303      }
1304      break;
1305
1306    case throw_range_check_failed_id:
1307      { StubFrame f(sasm, "range_check_failed", dont_gc_arguments);
1308        oop_maps = generate_exception_throw(sasm, CAST_FROM_FN_PTR(address, throw_range_check_exception), true);
1309      }
1310      break;
1311
1312    case throw_index_exception_id:
1313      { StubFrame f(sasm, "index_range_check_failed", dont_gc_arguments);
1314        oop_maps = generate_exception_throw(sasm, CAST_FROM_FN_PTR(address, throw_index_exception), true);
1315      }
1316      break;
1317
1318    case throw_div0_exception_id:
1319      { StubFrame f(sasm, "throw_div0_exception", dont_gc_arguments);
1320        oop_maps = generate_exception_throw(sasm, CAST_FROM_FN_PTR(address, throw_div0_exception), false);
1321      }
1322      break;
1323
1324    case throw_null_pointer_exception_id:
1325      { StubFrame f(sasm, "throw_null_pointer_exception", dont_gc_arguments);
1326        oop_maps = generate_exception_throw(sasm, CAST_FROM_FN_PTR(address, throw_null_pointer_exception), false);
1327      }
1328      break;
1329
1330    case handle_exception_nofpu_id:
1331    case handle_exception_id:
1332      { StubFrame f(sasm, "handle_exception", dont_gc_arguments);
1333        oop_maps = generate_handle_exception(id, sasm);
1334      }
1335      break;
1336
1337    case handle_exception_from_callee_id:
1338      { StubFrame f(sasm, "handle_exception_from_callee", dont_gc_arguments);
1339        oop_maps = generate_handle_exception(id, sasm);
1340      }
1341      break;
1342
1343    case unwind_exception_id:
1344      { __ set_info("unwind_exception", dont_gc_arguments);
1345        // note: no stubframe since we are about to leave the current
1346        //       activation and we are calling a leaf VM function only.
1347        generate_unwind_exception(sasm);
1348      }
1349      break;
1350
1351    case throw_array_store_exception_id:
1352      { StubFrame f(sasm, "throw_array_store_exception", dont_gc_arguments);
1353        // tos + 0: link
1354        //     + 1: return address
1355        oop_maps = generate_exception_throw(sasm, CAST_FROM_FN_PTR(address, throw_array_store_exception), true);
1356      }
1357      break;
1358
1359    case throw_class_cast_exception_id:
1360      { StubFrame f(sasm, "throw_class_cast_exception", dont_gc_arguments);
1361        oop_maps = generate_exception_throw(sasm, CAST_FROM_FN_PTR(address, throw_class_cast_exception), true);
1362      }
1363      break;
1364
1365    case throw_incompatible_class_change_error_id:
1366      { StubFrame f(sasm, "throw_incompatible_class_cast_exception", dont_gc_arguments);
1367        oop_maps = generate_exception_throw(sasm, CAST_FROM_FN_PTR(address, throw_incompatible_class_change_error), false);
1368      }
1369      break;
1370
1371    case slow_subtype_check_id:
1372      {
1373        // Typical calling sequence:
1374        // __ push(klass_RInfo);  // object klass or other subclass
1375        // __ push(sup_k_RInfo);  // array element klass or other superclass
1376        // __ call(slow_subtype_check);
1377        // Note that the subclass is pushed first, and is therefore deepest.
1378        // Previous versions of this code reversed the names 'sub' and 'super'.
1379        // This was operationally harmless but made the code unreadable.
1380        enum layout {
1381          rax_off, SLOT2(raxH_off)
1382          rcx_off, SLOT2(rcxH_off)
1383          rsi_off, SLOT2(rsiH_off)
1384          rdi_off, SLOT2(rdiH_off)
1385          // saved_rbp_off, SLOT2(saved_rbpH_off)
1386          return_off, SLOT2(returnH_off)
1387          sup_k_off, SLOT2(sup_kH_off)
1388          klass_off, SLOT2(superH_off)
1389          framesize,
1390          result_off = klass_off  // deepest argument is also the return value
1391        };
1392
1393        __ set_info("slow_subtype_check", dont_gc_arguments);
1394        __ push(rdi);
1395        __ push(rsi);
1396        __ push(rcx);
1397        __ push(rax);
1398
1399        // This is called by pushing args and not with C abi
1400        __ movptr(rsi, Address(rsp, (klass_off) * VMRegImpl::stack_slot_size)); // subclass
1401        __ movptr(rax, Address(rsp, (sup_k_off) * VMRegImpl::stack_slot_size)); // superclass
1402
1403        Label miss;
1404        __ check_klass_subtype_slow_path(rsi, rax, rcx, rdi, NULL, &miss);
1405
1406        // fallthrough on success:
1407        __ movptr(Address(rsp, (result_off) * VMRegImpl::stack_slot_size), 1); // result
1408        __ pop(rax);
1409        __ pop(rcx);
1410        __ pop(rsi);
1411        __ pop(rdi);
1412        __ ret(0);
1413
1414        __ bind(miss);
1415        __ movptr(Address(rsp, (result_off) * VMRegImpl::stack_slot_size), NULL_WORD); // result
1416        __ pop(rax);
1417        __ pop(rcx);
1418        __ pop(rsi);
1419        __ pop(rdi);
1420        __ ret(0);
1421      }
1422      break;
1423
1424    case monitorenter_nofpu_id:
1425      save_fpu_registers = false;
1426      // fall through
1427    case monitorenter_id:
1428      {
1429        StubFrame f(sasm, "monitorenter", dont_gc_arguments);
1430        OopMap* map = save_live_registers(sasm, 3, save_fpu_registers);
1431
1432        // Called with store_parameter and not C abi
1433
1434        f.load_argument(1, rax); // rax,: object
1435        f.load_argument(0, rbx); // rbx,: lock address
1436
1437        int call_offset = __ call_RT(noreg, noreg, CAST_FROM_FN_PTR(address, monitorenter), rax, rbx);
1438
1439        oop_maps = new OopMapSet();
1440        oop_maps->add_gc_map(call_offset, map);
1441        restore_live_registers(sasm, save_fpu_registers);
1442      }
1443      break;
1444
1445    case monitorexit_nofpu_id:
1446      save_fpu_registers = false;
1447      // fall through
1448    case monitorexit_id:
1449      {
1450        StubFrame f(sasm, "monitorexit", dont_gc_arguments);
1451        OopMap* map = save_live_registers(sasm, 2, save_fpu_registers);
1452
1453        // Called with store_parameter and not C abi
1454
1455        f.load_argument(0, rax); // rax,: lock address
1456
1457        // note: really a leaf routine but must setup last java sp
1458        //       => use call_RT for now (speed can be improved by
1459        //       doing last java sp setup manually)
1460        int call_offset = __ call_RT(noreg, noreg, CAST_FROM_FN_PTR(address, monitorexit), rax);
1461
1462        oop_maps = new OopMapSet();
1463        oop_maps->add_gc_map(call_offset, map);
1464        restore_live_registers(sasm, save_fpu_registers);
1465      }
1466      break;
1467
1468    case deoptimize_id:
1469      {
1470        StubFrame f(sasm, "deoptimize", dont_gc_arguments);
1471        const int num_rt_args = 2;  // thread, trap_request
1472        OopMap* oop_map = save_live_registers(sasm, num_rt_args);
1473        f.load_argument(0, rax);
1474        int call_offset = __ call_RT(noreg, noreg, CAST_FROM_FN_PTR(address, deoptimize), rax);
1475        oop_maps = new OopMapSet();
1476        oop_maps->add_gc_map(call_offset, oop_map);
1477        restore_live_registers(sasm);
1478        DeoptimizationBlob* deopt_blob = SharedRuntime::deopt_blob();
1479        assert(deopt_blob != NULL, "deoptimization blob must have been created");
1480        __ leave();
1481        __ jump(RuntimeAddress(deopt_blob->unpack_with_reexecution()));
1482      }
1483      break;
1484
1485    case access_field_patching_id:
1486      { StubFrame f(sasm, "access_field_patching", dont_gc_arguments);
1487        // we should set up register map
1488        oop_maps = generate_patching(sasm, CAST_FROM_FN_PTR(address, access_field_patching));
1489      }
1490      break;
1491
1492    case load_klass_patching_id:
1493      { StubFrame f(sasm, "load_klass_patching", dont_gc_arguments);
1494        // we should set up register map
1495        oop_maps = generate_patching(sasm, CAST_FROM_FN_PTR(address, move_klass_patching));
1496      }
1497      break;
1498
1499    case load_mirror_patching_id:
1500      { StubFrame f(sasm, "load_mirror_patching", dont_gc_arguments);
1501        // we should set up register map
1502        oop_maps = generate_patching(sasm, CAST_FROM_FN_PTR(address, move_mirror_patching));
1503      }
1504      break;
1505
1506    case load_appendix_patching_id:
1507      { StubFrame f(sasm, "load_appendix_patching", dont_gc_arguments);
1508        // we should set up register map
1509        oop_maps = generate_patching(sasm, CAST_FROM_FN_PTR(address, move_appendix_patching));
1510      }
1511      break;
1512
1513    case dtrace_object_alloc_id:
1514      { // rax,: object
1515        StubFrame f(sasm, "dtrace_object_alloc", dont_gc_arguments);
1516        // we can't gc here so skip the oopmap but make sure that all
1517        // the live registers get saved.
1518        save_live_registers(sasm, 1);
1519
1520        __ NOT_LP64(push(rax)) LP64_ONLY(mov(c_rarg0, rax));
1521        __ call(RuntimeAddress(CAST_FROM_FN_PTR(address, SharedRuntime::dtrace_object_alloc)));
1522        NOT_LP64(__ pop(rax));
1523
1524        restore_live_registers(sasm);
1525      }
1526      break;
1527
1528    case fpu2long_stub_id:
1529      {
1530        // rax, and rdx are destroyed, but should be free since the result is returned there
1531        // preserve rsi,ecx
1532        __ push(rsi);
1533        __ push(rcx);
1534        LP64_ONLY(__ push(rdx);)
1535
1536        // check for NaN
1537        Label return0, do_return, return_min_jlong, do_convert;
1538
1539        Address value_high_word(rsp, wordSize + 4);
1540        Address value_low_word(rsp, wordSize);
1541        Address result_high_word(rsp, 3*wordSize + 4);
1542        Address result_low_word(rsp, 3*wordSize);
1543
1544        __ subptr(rsp, 32);                    // more than enough on 32bit
1545        __ fst_d(value_low_word);
1546        __ movl(rax, value_high_word);
1547        __ andl(rax, 0x7ff00000);
1548        __ cmpl(rax, 0x7ff00000);
1549        __ jcc(Assembler::notEqual, do_convert);
1550        __ movl(rax, value_high_word);
1551        __ andl(rax, 0xfffff);
1552        __ orl(rax, value_low_word);
1553        __ jcc(Assembler::notZero, return0);
1554
1555        __ bind(do_convert);
1556        __ fnstcw(Address(rsp, 0));
1557        __ movzwl(rax, Address(rsp, 0));
1558        __ orl(rax, 0xc00);
1559        __ movw(Address(rsp, 2), rax);
1560        __ fldcw(Address(rsp, 2));
1561        __ fwait();
1562        __ fistp_d(result_low_word);
1563        __ fldcw(Address(rsp, 0));
1564        __ fwait();
1565        // This gets the entire long in rax on 64bit
1566        __ movptr(rax, result_low_word);
1567        // testing of high bits
1568        __ movl(rdx, result_high_word);
1569        __ mov(rcx, rax);
1570        // What the heck is the point of the next instruction???
1571        __ xorl(rcx, 0x0);
1572        __ movl(rsi, 0x80000000);
1573        __ xorl(rsi, rdx);
1574        __ orl(rcx, rsi);
1575        __ jcc(Assembler::notEqual, do_return);
1576        __ fldz();
1577        __ fcomp_d(value_low_word);
1578        __ fnstsw_ax();
1579#ifdef _LP64
1580        __ testl(rax, 0x4100);  // ZF & CF == 0
1581        __ jcc(Assembler::equal, return_min_jlong);
1582#else
1583        __ sahf();
1584        __ jcc(Assembler::above, return_min_jlong);
1585#endif // _LP64
1586        // return max_jlong
1587#ifndef _LP64
1588        __ movl(rdx, 0x7fffffff);
1589        __ movl(rax, 0xffffffff);
1590#else
1591        __ mov64(rax, CONST64(0x7fffffffffffffff));
1592#endif // _LP64
1593        __ jmp(do_return);
1594
1595        __ bind(return_min_jlong);
1596#ifndef _LP64
1597        __ movl(rdx, 0x80000000);
1598        __ xorl(rax, rax);
1599#else
1600        __ mov64(rax, UCONST64(0x8000000000000000));
1601#endif // _LP64
1602        __ jmp(do_return);
1603
1604        __ bind(return0);
1605        __ fpop();
1606#ifndef _LP64
1607        __ xorptr(rdx,rdx);
1608        __ xorptr(rax,rax);
1609#else
1610        __ xorptr(rax, rax);
1611#endif // _LP64
1612
1613        __ bind(do_return);
1614        __ addptr(rsp, 32);
1615        LP64_ONLY(__ pop(rdx);)
1616        __ pop(rcx);
1617        __ pop(rsi);
1618        __ ret(0);
1619      }
1620      break;
1621
1622#if INCLUDE_ALL_GCS
1623    case g1_pre_barrier_slow_id:
1624      {
1625        StubFrame f(sasm, "g1_pre_barrier", dont_gc_arguments);
1626        // arg0 : previous value of memory
1627
1628        BarrierSet* bs = Universe::heap()->barrier_set();
1629        if (bs->kind() != BarrierSet::G1SATBCTLogging) {
1630          __ movptr(rax, (int)id);
1631          __ call_RT(noreg, noreg, CAST_FROM_FN_PTR(address, unimplemented_entry), rax);
1632          __ should_not_reach_here();
1633          break;
1634        }
1635        __ push(rax);
1636        __ push(rdx);
1637
1638        const Register pre_val = rax;
1639        const Register thread = NOT_LP64(rax) LP64_ONLY(r15_thread);
1640        const Register tmp = rdx;
1641
1642        NOT_LP64(__ get_thread(thread);)
1643
1644        Address in_progress(thread, in_bytes(JavaThread::satb_mark_queue_offset() +
1645                                             PtrQueue::byte_offset_of_active()));
1646
1647        Address queue_index(thread, in_bytes(JavaThread::satb_mark_queue_offset() +
1648                                             PtrQueue::byte_offset_of_index()));
1649        Address buffer(thread, in_bytes(JavaThread::satb_mark_queue_offset() +
1650                                        PtrQueue::byte_offset_of_buf()));
1651
1652
1653        Label done;
1654        Label runtime;
1655
1656        // Can we store original value in the thread's buffer?
1657
1658#ifdef _LP64
1659        __ movslq(tmp, queue_index);
1660        __ cmpq(tmp, 0);
1661#else
1662        __ cmpl(queue_index, 0);
1663#endif
1664        __ jcc(Assembler::equal, runtime);
1665#ifdef _LP64
1666        __ subq(tmp, wordSize);
1667        __ movl(queue_index, tmp);
1668        __ addq(tmp, buffer);
1669#else
1670        __ subl(queue_index, wordSize);
1671        __ movl(tmp, buffer);
1672        __ addl(tmp, queue_index);
1673#endif
1674
1675        // prev_val (rax)
1676        f.load_argument(0, pre_val);
1677        __ movptr(Address(tmp, 0), pre_val);
1678        __ jmp(done);
1679
1680        __ bind(runtime);
1681        __ push(rcx);
1682#ifdef _LP64
1683        __ push(r8);
1684        __ push(r9);
1685        __ push(r10);
1686        __ push(r11);
1687#  ifndef _WIN64
1688        __ push(rdi);
1689        __ push(rsi);
1690#  endif
1691#endif
1692        // load the pre-value
1693        f.load_argument(0, rcx);
1694        __ call_VM_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::g1_wb_pre), rcx, thread);
1695#ifdef _LP64
1696#  ifndef _WIN64
1697        __ pop(rsi);
1698        __ pop(rdi);
1699#  endif
1700        __ pop(r11);
1701        __ pop(r10);
1702        __ pop(r9);
1703        __ pop(r8);
1704#endif
1705        __ pop(rcx);
1706        __ bind(done);
1707
1708        __ pop(rdx);
1709        __ pop(rax);
1710      }
1711      break;
1712
1713    case g1_post_barrier_slow_id:
1714      {
1715        StubFrame f(sasm, "g1_post_barrier", dont_gc_arguments);
1716
1717
1718        // arg0: store_address
1719        Address store_addr(rbp, 2*BytesPerWord);
1720
1721        BarrierSet* bs = Universe::heap()->barrier_set();
1722        CardTableModRefBS* ct = (CardTableModRefBS*)bs;
1723        assert(sizeof(*ct->byte_map_base) == sizeof(jbyte), "adjust this code");
1724
1725        Label done;
1726        Label runtime;
1727
1728        // At this point we know new_value is non-NULL and the new_value crosses regions.
1729        // Must check to see if card is already dirty
1730
1731        const Register thread = NOT_LP64(rax) LP64_ONLY(r15_thread);
1732
1733        Address queue_index(thread, in_bytes(JavaThread::dirty_card_queue_offset() +
1734                                             PtrQueue::byte_offset_of_index()));
1735        Address buffer(thread, in_bytes(JavaThread::dirty_card_queue_offset() +
1736                                        PtrQueue::byte_offset_of_buf()));
1737
1738        __ push(rax);
1739        __ push(rcx);
1740
1741        const Register cardtable = rax;
1742        const Register card_addr = rcx;
1743
1744        f.load_argument(0, card_addr);
1745        __ shrptr(card_addr, CardTableModRefBS::card_shift);
1746        // Do not use ExternalAddress to load 'byte_map_base', since 'byte_map_base' is NOT
1747        // a valid address and therefore is not properly handled by the relocation code.
1748        __ movptr(cardtable, (intptr_t)ct->byte_map_base);
1749        __ addptr(card_addr, cardtable);
1750
1751        NOT_LP64(__ get_thread(thread);)
1752
1753        __ cmpb(Address(card_addr, 0), (int)G1SATBCardTableModRefBS::g1_young_card_val());
1754        __ jcc(Assembler::equal, done);
1755
1756        __ membar(Assembler::Membar_mask_bits(Assembler::StoreLoad));
1757        __ cmpb(Address(card_addr, 0), (int)CardTableModRefBS::dirty_card_val());
1758        __ jcc(Assembler::equal, done);
1759
1760        // storing region crossing non-NULL, card is clean.
1761        // dirty card and log.
1762
1763        __ movb(Address(card_addr, 0), (int)CardTableModRefBS::dirty_card_val());
1764
1765        __ cmpl(queue_index, 0);
1766        __ jcc(Assembler::equal, runtime);
1767        __ subl(queue_index, wordSize);
1768
1769        const Register buffer_addr = rbx;
1770        __ push(rbx);
1771
1772        __ movptr(buffer_addr, buffer);
1773
1774#ifdef _LP64
1775        __ movslq(rscratch1, queue_index);
1776        __ addptr(buffer_addr, rscratch1);
1777#else
1778        __ addptr(buffer_addr, queue_index);
1779#endif
1780        __ movptr(Address(buffer_addr, 0), card_addr);
1781
1782        __ pop(rbx);
1783        __ jmp(done);
1784
1785        __ bind(runtime);
1786        __ push(rdx);
1787#ifdef _LP64
1788        __ push(r8);
1789        __ push(r9);
1790        __ push(r10);
1791        __ push(r11);
1792#  ifndef _WIN64
1793        __ push(rdi);
1794        __ push(rsi);
1795#  endif
1796#endif
1797        __ call_VM_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::g1_wb_post), card_addr, thread);
1798#ifdef _LP64
1799#  ifndef _WIN64
1800        __ pop(rsi);
1801        __ pop(rdi);
1802#  endif
1803        __ pop(r11);
1804        __ pop(r10);
1805        __ pop(r9);
1806        __ pop(r8);
1807#endif
1808        __ pop(rdx);
1809        __ bind(done);
1810
1811        __ pop(rcx);
1812        __ pop(rax);
1813
1814      }
1815      break;
1816#endif // INCLUDE_ALL_GCS
1817
1818    case predicate_failed_trap_id:
1819      {
1820        StubFrame f(sasm, "predicate_failed_trap", dont_gc_arguments);
1821
1822        OopMap* map = save_live_registers(sasm, 1);
1823
1824        int call_offset = __ call_RT(noreg, noreg, CAST_FROM_FN_PTR(address, predicate_failed_trap));
1825        oop_maps = new OopMapSet();
1826        oop_maps->add_gc_map(call_offset, map);
1827        restore_live_registers(sasm);
1828        __ leave();
1829        DeoptimizationBlob* deopt_blob = SharedRuntime::deopt_blob();
1830        assert(deopt_blob != NULL, "deoptimization blob must have been created");
1831
1832        __ jump(RuntimeAddress(deopt_blob->unpack_with_reexecution()));
1833      }
1834      break;
1835
1836    default:
1837      { StubFrame f(sasm, "unimplemented entry", dont_gc_arguments);
1838        __ movptr(rax, (int)id);
1839        __ call_RT(noreg, noreg, CAST_FROM_FN_PTR(address, unimplemented_entry), rax);
1840        __ should_not_reach_here();
1841      }
1842      break;
1843  }
1844  return oop_maps;
1845}
1846
1847#undef __
1848
1849const char *Runtime1::pd_name_for_address(address entry) {
1850  return "<unknown function>";
1851}
1852