c1_Runtime1_sparc.cpp revision 6760:22b98ab2a69f
1/*
2 * Copyright (c) 1999, 2014, Oracle and/or its affiliates. All rights reserved.
3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
4 *
5 * This code is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 only, as
7 * published by the Free Software Foundation.
8 *
9 * This code is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
12 * version 2 for more details (a copy is included in the LICENSE file that
13 * accompanied this code).
14 *
15 * You should have received a copy of the GNU General Public License version
16 * 2 along with this work; if not, write to the Free Software Foundation,
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18 *
19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20 * or visit www.oracle.com if you need additional information or have any
21 * questions.
22 *
23 */
24
25#include "precompiled.hpp"
26#include "c1/c1_Defs.hpp"
27#include "c1/c1_MacroAssembler.hpp"
28#include "c1/c1_Runtime1.hpp"
29#include "interpreter/interpreter.hpp"
30#include "nativeInst_sparc.hpp"
31#include "oops/compiledICHolder.hpp"
32#include "oops/oop.inline.hpp"
33#include "prims/jvmtiExport.hpp"
34#include "runtime/sharedRuntime.hpp"
35#include "runtime/signature.hpp"
36#include "runtime/vframeArray.hpp"
37#include "utilities/macros.hpp"
38#include "vmreg_sparc.inline.hpp"
39#if INCLUDE_ALL_GCS
40#include "gc_implementation/g1/g1SATBCardTableModRefBS.hpp"
41#endif
42
43// Implementation of StubAssembler
44
45int StubAssembler::call_RT(Register oop_result1, Register metadata_result, address entry_point, int number_of_arguments) {
46  // for sparc changing the number of arguments doesn't change
47  // anything about the frame size so we'll always lie and claim that
48  // we are only passing 1 argument.
49  set_num_rt_args(1);
50
51  assert_not_delayed();
52  // bang stack before going to runtime
53  set(-os::vm_page_size() + STACK_BIAS, G3_scratch);
54  st(G0, SP, G3_scratch);
55
56  // debugging support
57  assert(number_of_arguments >= 0   , "cannot have negative number of arguments");
58
59  set_last_Java_frame(SP, noreg);
60  if (VerifyThread)  mov(G2_thread, O0); // about to be smashed; pass early
61  save_thread(L7_thread_cache);
62  // do the call
63  call(entry_point, relocInfo::runtime_call_type);
64  if (!VerifyThread) {
65    delayed()->mov(G2_thread, O0);  // pass thread as first argument
66  } else {
67    delayed()->nop();             // (thread already passed)
68  }
69  int call_offset = offset();  // offset of return address
70  restore_thread(L7_thread_cache);
71  reset_last_Java_frame();
72
73  // check for pending exceptions
74  { Label L;
75    Address exception_addr(G2_thread, Thread::pending_exception_offset());
76    ld_ptr(exception_addr, Gtemp);
77    br_null_short(Gtemp, pt, L);
78    Address vm_result_addr(G2_thread, JavaThread::vm_result_offset());
79    st_ptr(G0, vm_result_addr);
80    Address vm_result_addr_2(G2_thread, JavaThread::vm_result_2_offset());
81    st_ptr(G0, vm_result_addr_2);
82
83    if (frame_size() == no_frame_size) {
84      // we use O7 linkage so that forward_exception_entry has the issuing PC
85      call(StubRoutines::forward_exception_entry(), relocInfo::runtime_call_type);
86      delayed()->restore();
87    } else if (_stub_id == Runtime1::forward_exception_id) {
88      should_not_reach_here();
89    } else {
90      AddressLiteral exc(Runtime1::entry_for(Runtime1::forward_exception_id));
91      jump_to(exc, G4);
92      delayed()->nop();
93    }
94    bind(L);
95  }
96
97  // get oop result if there is one and reset the value in the thread
98  if (oop_result1->is_valid()) {                    // get oop result if there is one and reset it in the thread
99    get_vm_result  (oop_result1);
100  } else {
101    // be a little paranoid and clear the result
102    Address vm_result_addr(G2_thread, JavaThread::vm_result_offset());
103    st_ptr(G0, vm_result_addr);
104  }
105
106  // get second result if there is one and reset the value in the thread
107  if (metadata_result->is_valid()) {
108    get_vm_result_2  (metadata_result);
109  } else {
110    // be a little paranoid and clear the result
111    Address vm_result_addr_2(G2_thread, JavaThread::vm_result_2_offset());
112    st_ptr(G0, vm_result_addr_2);
113  }
114
115  return call_offset;
116}
117
118
119int StubAssembler::call_RT(Register oop_result1, Register metadata_result, address entry, Register arg1) {
120  // O0 is reserved for the thread
121  mov(arg1, O1);
122  return call_RT(oop_result1, metadata_result, entry, 1);
123}
124
125
126int StubAssembler::call_RT(Register oop_result1, Register metadata_result, address entry, Register arg1, Register arg2) {
127  // O0 is reserved for the thread
128  mov(arg1, O1);
129  mov(arg2, O2); assert(arg2 != O1, "smashed argument");
130  return call_RT(oop_result1, metadata_result, entry, 2);
131}
132
133
134int StubAssembler::call_RT(Register oop_result1, Register metadata_result, address entry, Register arg1, Register arg2, Register arg3) {
135  // O0 is reserved for the thread
136  mov(arg1, O1);
137  mov(arg2, O2); assert(arg2 != O1,               "smashed argument");
138  mov(arg3, O3); assert(arg3 != O1 && arg3 != O2, "smashed argument");
139  return call_RT(oop_result1, metadata_result, entry, 3);
140}
141
142
143// Implementation of Runtime1
144
145#define __ sasm->
146
147static int cpu_reg_save_offsets[FrameMap::nof_cpu_regs];
148static int fpu_reg_save_offsets[FrameMap::nof_fpu_regs];
149static int reg_save_size_in_words;
150static int frame_size_in_bytes = -1;
151
152static OopMap* generate_oop_map(StubAssembler* sasm, bool save_fpu_registers) {
153  assert(frame_size_in_bytes == __ total_frame_size_in_bytes(reg_save_size_in_words),
154         "mismatch in calculation");
155  sasm->set_frame_size(frame_size_in_bytes / BytesPerWord);
156  int frame_size_in_slots = frame_size_in_bytes / sizeof(jint);
157  OopMap* oop_map = new OopMap(frame_size_in_slots, 0);
158
159  int i;
160  for (i = 0; i < FrameMap::nof_cpu_regs; i++) {
161    Register r = as_Register(i);
162    if (r == G1 || r == G3 || r == G4 || r == G5) {
163      int sp_offset = cpu_reg_save_offsets[i];
164      oop_map->set_callee_saved(VMRegImpl::stack2reg(sp_offset),
165                                r->as_VMReg());
166    }
167  }
168
169  if (save_fpu_registers) {
170    for (i = 0; i < FrameMap::nof_fpu_regs; i++) {
171      FloatRegister r = as_FloatRegister(i);
172      int sp_offset = fpu_reg_save_offsets[i];
173      oop_map->set_callee_saved(VMRegImpl::stack2reg(sp_offset),
174                                r->as_VMReg());
175    }
176  }
177  return oop_map;
178}
179
180static OopMap* save_live_registers(StubAssembler* sasm, bool save_fpu_registers = true) {
181  assert(frame_size_in_bytes == __ total_frame_size_in_bytes(reg_save_size_in_words),
182         "mismatch in calculation");
183  __ save_frame_c1(frame_size_in_bytes);
184
185  // Record volatile registers as callee-save values in an OopMap so their save locations will be
186  // propagated to the caller frame's RegisterMap during StackFrameStream construction (needed for
187  // deoptimization; see compiledVFrame::create_stack_value).  The caller's I, L and O registers
188  // are saved in register windows - I's and L's in the caller's frame and O's in the stub frame
189  // (as the stub's I's) when the runtime routine called by the stub creates its frame.
190  // OopMap frame sizes are in c2 stack slot sizes (sizeof(jint))
191
192  int i;
193  for (i = 0; i < FrameMap::nof_cpu_regs; i++) {
194    Register r = as_Register(i);
195    if (r == G1 || r == G3 || r == G4 || r == G5) {
196      int sp_offset = cpu_reg_save_offsets[i];
197      __ st_ptr(r, SP, (sp_offset * BytesPerWord) + STACK_BIAS);
198    }
199  }
200
201  if (save_fpu_registers) {
202    for (i = 0; i < FrameMap::nof_fpu_regs; i++) {
203      FloatRegister r = as_FloatRegister(i);
204      int sp_offset = fpu_reg_save_offsets[i];
205      __ stf(FloatRegisterImpl::S, r, SP, (sp_offset * BytesPerWord) + STACK_BIAS);
206    }
207  }
208
209  return generate_oop_map(sasm, save_fpu_registers);
210}
211
212static void restore_live_registers(StubAssembler* sasm, bool restore_fpu_registers = true) {
213  for (int i = 0; i < FrameMap::nof_cpu_regs; i++) {
214    Register r = as_Register(i);
215    if (r == G1 || r == G3 || r == G4 || r == G5) {
216      __ ld_ptr(SP, (cpu_reg_save_offsets[i] * BytesPerWord) + STACK_BIAS, r);
217    }
218  }
219
220  if (restore_fpu_registers) {
221    for (int i = 0; i < FrameMap::nof_fpu_regs; i++) {
222      FloatRegister r = as_FloatRegister(i);
223      __ ldf(FloatRegisterImpl::S, SP, (fpu_reg_save_offsets[i] * BytesPerWord) + STACK_BIAS, r);
224    }
225  }
226}
227
228
229void Runtime1::initialize_pd() {
230  // compute word offsets from SP at which live (non-windowed) registers are captured by stub routines
231  //
232  // A stub routine will have a frame that is at least large enough to hold
233  // a register window save area (obviously) and the volatile g registers
234  // and floating registers. A user of save_live_registers can have a frame
235  // that has more scratch area in it (although typically they will use L-regs).
236  // in that case the frame will look like this (stack growing down)
237  //
238  // FP -> |             |
239  //       | scratch mem |
240  //       |   "      "  |
241  //       --------------
242  //       | float regs  |
243  //       |   "    "    |
244  //       ---------------
245  //       | G regs      |
246  //       | "  "        |
247  //       ---------------
248  //       | abi reg.    |
249  //       | window save |
250  //       | area        |
251  // SP -> ---------------
252  //
253  int i;
254  int sp_offset = round_to(frame::register_save_words, 2); //  start doubleword aligned
255
256  // only G int registers are saved explicitly; others are found in register windows
257  for (i = 0; i < FrameMap::nof_cpu_regs; i++) {
258    Register r = as_Register(i);
259    if (r == G1 || r == G3 || r == G4 || r == G5) {
260      cpu_reg_save_offsets[i] = sp_offset;
261      sp_offset++;
262    }
263  }
264
265  // all float registers are saved explicitly
266  assert(FrameMap::nof_fpu_regs == 32, "double registers not handled here");
267  for (i = 0; i < FrameMap::nof_fpu_regs; i++) {
268    fpu_reg_save_offsets[i] = sp_offset;
269    sp_offset++;
270  }
271  reg_save_size_in_words = sp_offset - frame::memory_parameter_word_sp_offset;
272  // this should match assembler::total_frame_size_in_bytes, which
273  // isn't callable from this context.  It's checked by an assert when
274  // it's used though.
275  frame_size_in_bytes = align_size_up(sp_offset * wordSize, 8);
276}
277
278
279OopMapSet* Runtime1::generate_exception_throw(StubAssembler* sasm, address target, bool has_argument) {
280  // make a frame and preserve the caller's caller-save registers
281  OopMap* oop_map = save_live_registers(sasm);
282  int call_offset;
283  if (!has_argument) {
284    call_offset = __ call_RT(noreg, noreg, target);
285  } else {
286    call_offset = __ call_RT(noreg, noreg, target, G4);
287  }
288  OopMapSet* oop_maps = new OopMapSet();
289  oop_maps->add_gc_map(call_offset, oop_map);
290
291  __ should_not_reach_here();
292  return oop_maps;
293}
294
295
296OopMapSet* Runtime1::generate_stub_call(StubAssembler* sasm, Register result, address target,
297                                        Register arg1, Register arg2, Register arg3) {
298  // make a frame and preserve the caller's caller-save registers
299  OopMap* oop_map = save_live_registers(sasm);
300
301  int call_offset;
302  if (arg1 == noreg) {
303    call_offset = __ call_RT(result, noreg, target);
304  } else if (arg2 == noreg) {
305    call_offset = __ call_RT(result, noreg, target, arg1);
306  } else if (arg3 == noreg) {
307    call_offset = __ call_RT(result, noreg, target, arg1, arg2);
308  } else {
309    call_offset = __ call_RT(result, noreg, target, arg1, arg2, arg3);
310  }
311  OopMapSet* oop_maps = NULL;
312
313  oop_maps = new OopMapSet();
314  oop_maps->add_gc_map(call_offset, oop_map);
315  restore_live_registers(sasm);
316
317  __ ret();
318  __ delayed()->restore();
319
320  return oop_maps;
321}
322
323
324OopMapSet* Runtime1::generate_patching(StubAssembler* sasm, address target) {
325  // make a frame and preserve the caller's caller-save registers
326  OopMap* oop_map = save_live_registers(sasm);
327
328  // call the runtime patching routine, returns non-zero if nmethod got deopted.
329  int call_offset = __ call_RT(noreg, noreg, target);
330  OopMapSet* oop_maps = new OopMapSet();
331  oop_maps->add_gc_map(call_offset, oop_map);
332
333  // re-execute the patched instruction or, if the nmethod was deoptmized, return to the
334  // deoptimization handler entry that will cause re-execution of the current bytecode
335  DeoptimizationBlob* deopt_blob = SharedRuntime::deopt_blob();
336  assert(deopt_blob != NULL, "deoptimization blob must have been created");
337
338  Label no_deopt;
339  __ br_null_short(O0, Assembler::pt, no_deopt);
340
341  // return to the deoptimization handler entry for unpacking and rexecute
342  // if we simply returned the we'd deopt as if any call we patched had just
343  // returned.
344
345  restore_live_registers(sasm);
346
347  AddressLiteral dest(deopt_blob->unpack_with_reexecution());
348  __ jump_to(dest, O0);
349  __ delayed()->restore();
350
351  __ bind(no_deopt);
352  restore_live_registers(sasm);
353  __ ret();
354  __ delayed()->restore();
355
356  return oop_maps;
357}
358
359OopMapSet* Runtime1::generate_code_for(StubID id, StubAssembler* sasm) {
360
361  OopMapSet* oop_maps = NULL;
362  // for better readability
363  const bool must_gc_arguments = true;
364  const bool dont_gc_arguments = false;
365
366  // stub code & info for the different stubs
367  switch (id) {
368    case forward_exception_id:
369      {
370        oop_maps = generate_handle_exception(id, sasm);
371      }
372      break;
373
374    case new_instance_id:
375    case fast_new_instance_id:
376    case fast_new_instance_init_check_id:
377      {
378        Register G5_klass = G5; // Incoming
379        Register O0_obj   = O0; // Outgoing
380
381        if (id == new_instance_id) {
382          __ set_info("new_instance", dont_gc_arguments);
383        } else if (id == fast_new_instance_id) {
384          __ set_info("fast new_instance", dont_gc_arguments);
385        } else {
386          assert(id == fast_new_instance_init_check_id, "bad StubID");
387          __ set_info("fast new_instance init check", dont_gc_arguments);
388        }
389
390        if ((id == fast_new_instance_id || id == fast_new_instance_init_check_id) &&
391            UseTLAB && FastTLABRefill) {
392          Label slow_path;
393          Register G1_obj_size = G1;
394          Register G3_t1 = G3;
395          Register G4_t2 = G4;
396          assert_different_registers(G5_klass, G1_obj_size, G3_t1, G4_t2);
397
398          // Push a frame since we may do dtrace notification for the
399          // allocation which requires calling out and we don't want
400          // to stomp the real return address.
401          __ save_frame(0);
402
403          if (id == fast_new_instance_init_check_id) {
404            // make sure the klass is initialized
405            __ ldub(G5_klass, in_bytes(InstanceKlass::init_state_offset()), G3_t1);
406            __ cmp(G3_t1, InstanceKlass::fully_initialized);
407            __ br(Assembler::notEqual, false, Assembler::pn, slow_path);
408            __ delayed()->nop();
409          }
410#ifdef ASSERT
411          // assert object can be fast path allocated
412          {
413            Label ok, not_ok;
414          __ ld(G5_klass, in_bytes(Klass::layout_helper_offset()), G1_obj_size);
415          // make sure it's an instance (LH > 0)
416          __ cmp_and_br_short(G1_obj_size, 0, Assembler::lessEqual, Assembler::pn, not_ok);
417          __ btst(Klass::_lh_instance_slow_path_bit, G1_obj_size);
418          __ br(Assembler::zero, false, Assembler::pn, ok);
419          __ delayed()->nop();
420          __ bind(not_ok);
421          __ stop("assert(can be fast path allocated)");
422          __ should_not_reach_here();
423          __ bind(ok);
424          }
425#endif // ASSERT
426          // if we got here then the TLAB allocation failed, so try
427          // refilling the TLAB or allocating directly from eden.
428          Label retry_tlab, try_eden;
429          __ tlab_refill(retry_tlab, try_eden, slow_path); // preserves G5_klass
430
431          __ bind(retry_tlab);
432
433          // get the instance size
434          __ ld(G5_klass, in_bytes(Klass::layout_helper_offset()), G1_obj_size);
435
436          __ tlab_allocate(O0_obj, G1_obj_size, 0, G3_t1, slow_path);
437
438          __ initialize_object(O0_obj, G5_klass, G1_obj_size, 0, G3_t1, G4_t2);
439          __ verify_oop(O0_obj);
440          __ mov(O0, I0);
441          __ ret();
442          __ delayed()->restore();
443
444          __ bind(try_eden);
445          // get the instance size
446          __ ld(G5_klass, in_bytes(Klass::layout_helper_offset()), G1_obj_size);
447          __ eden_allocate(O0_obj, G1_obj_size, 0, G3_t1, G4_t2, slow_path);
448          __ incr_allocated_bytes(G1_obj_size, G3_t1, G4_t2);
449
450          __ initialize_object(O0_obj, G5_klass, G1_obj_size, 0, G3_t1, G4_t2);
451          __ verify_oop(O0_obj);
452          __ mov(O0, I0);
453          __ ret();
454          __ delayed()->restore();
455
456          __ bind(slow_path);
457
458          // pop this frame so generate_stub_call can push it's own
459          __ restore();
460        }
461
462        oop_maps = generate_stub_call(sasm, I0, CAST_FROM_FN_PTR(address, new_instance), G5_klass);
463        // I0->O0: new instance
464      }
465
466      break;
467
468    case counter_overflow_id:
469        // G4 contains bci, G5 contains method
470      oop_maps = generate_stub_call(sasm, noreg, CAST_FROM_FN_PTR(address, counter_overflow), G4, G5);
471      break;
472
473    case new_type_array_id:
474    case new_object_array_id:
475      {
476        Register G5_klass = G5; // Incoming
477        Register G4_length = G4; // Incoming
478        Register O0_obj   = O0; // Outgoing
479
480        Address klass_lh(G5_klass, Klass::layout_helper_offset());
481        assert(Klass::_lh_header_size_shift % BitsPerByte == 0, "bytewise");
482        assert(Klass::_lh_header_size_mask == 0xFF, "bytewise");
483        // Use this offset to pick out an individual byte of the layout_helper:
484        const int klass_lh_header_size_offset = ((BytesPerInt - 1)  // 3 - 2 selects byte {0,1,0,0}
485                                                 - Klass::_lh_header_size_shift / BitsPerByte);
486
487        if (id == new_type_array_id) {
488          __ set_info("new_type_array", dont_gc_arguments);
489        } else {
490          __ set_info("new_object_array", dont_gc_arguments);
491        }
492
493#ifdef ASSERT
494        // assert object type is really an array of the proper kind
495        {
496          Label ok;
497          Register G3_t1 = G3;
498          __ ld(klass_lh, G3_t1);
499          __ sra(G3_t1, Klass::_lh_array_tag_shift, G3_t1);
500          int tag = ((id == new_type_array_id)
501                     ? Klass::_lh_array_tag_type_value
502                     : Klass::_lh_array_tag_obj_value);
503          __ cmp_and_brx_short(G3_t1, tag, Assembler::equal, Assembler::pt, ok);
504          __ stop("assert(is an array klass)");
505          __ should_not_reach_here();
506          __ bind(ok);
507        }
508#endif // ASSERT
509
510        if (UseTLAB && FastTLABRefill) {
511          Label slow_path;
512          Register G1_arr_size = G1;
513          Register G3_t1 = G3;
514          Register O1_t2 = O1;
515          assert_different_registers(G5_klass, G4_length, G1_arr_size, G3_t1, O1_t2);
516
517          // check that array length is small enough for fast path
518          __ set(C1_MacroAssembler::max_array_allocation_length, G3_t1);
519          __ cmp(G4_length, G3_t1);
520          __ br(Assembler::greaterUnsigned, false, Assembler::pn, slow_path);
521          __ delayed()->nop();
522
523          // if we got here then the TLAB allocation failed, so try
524          // refilling the TLAB or allocating directly from eden.
525          Label retry_tlab, try_eden;
526          __ tlab_refill(retry_tlab, try_eden, slow_path); // preserves G4_length and G5_klass
527
528          __ bind(retry_tlab);
529
530          // get the allocation size: (length << (layout_helper & 0x1F)) + header_size
531          __ ld(klass_lh, G3_t1);
532          __ sll(G4_length, G3_t1, G1_arr_size);
533          __ srl(G3_t1, Klass::_lh_header_size_shift, G3_t1);
534          __ and3(G3_t1, Klass::_lh_header_size_mask, G3_t1);
535          __ add(G1_arr_size, G3_t1, G1_arr_size);
536          __ add(G1_arr_size, MinObjAlignmentInBytesMask, G1_arr_size);  // align up
537          __ and3(G1_arr_size, ~MinObjAlignmentInBytesMask, G1_arr_size);
538
539          __ tlab_allocate(O0_obj, G1_arr_size, 0, G3_t1, slow_path);  // preserves G1_arr_size
540
541          __ initialize_header(O0_obj, G5_klass, G4_length, G3_t1, O1_t2);
542          __ ldub(klass_lh, G3_t1, klass_lh_header_size_offset);
543          __ sub(G1_arr_size, G3_t1, O1_t2);  // body length
544          __ add(O0_obj, G3_t1, G3_t1);       // body start
545          __ initialize_body(G3_t1, O1_t2);
546          __ verify_oop(O0_obj);
547          __ retl();
548          __ delayed()->nop();
549
550          __ bind(try_eden);
551          // get the allocation size: (length << (layout_helper & 0x1F)) + header_size
552          __ ld(klass_lh, G3_t1);
553          __ sll(G4_length, G3_t1, G1_arr_size);
554          __ srl(G3_t1, Klass::_lh_header_size_shift, G3_t1);
555          __ and3(G3_t1, Klass::_lh_header_size_mask, G3_t1);
556          __ add(G1_arr_size, G3_t1, G1_arr_size);
557          __ add(G1_arr_size, MinObjAlignmentInBytesMask, G1_arr_size);
558          __ and3(G1_arr_size, ~MinObjAlignmentInBytesMask, G1_arr_size);
559
560          __ eden_allocate(O0_obj, G1_arr_size, 0, G3_t1, O1_t2, slow_path);  // preserves G1_arr_size
561          __ incr_allocated_bytes(G1_arr_size, G3_t1, O1_t2);
562
563          __ initialize_header(O0_obj, G5_klass, G4_length, G3_t1, O1_t2);
564          __ ldub(klass_lh, G3_t1, klass_lh_header_size_offset);
565          __ sub(G1_arr_size, G3_t1, O1_t2);  // body length
566          __ add(O0_obj, G3_t1, G3_t1);       // body start
567          __ initialize_body(G3_t1, O1_t2);
568          __ verify_oop(O0_obj);
569          __ retl();
570          __ delayed()->nop();
571
572          __ bind(slow_path);
573        }
574
575        if (id == new_type_array_id) {
576          oop_maps = generate_stub_call(sasm, I0, CAST_FROM_FN_PTR(address, new_type_array), G5_klass, G4_length);
577        } else {
578          oop_maps = generate_stub_call(sasm, I0, CAST_FROM_FN_PTR(address, new_object_array), G5_klass, G4_length);
579        }
580        // I0 -> O0: new array
581      }
582      break;
583
584    case new_multi_array_id:
585      { // O0: klass
586        // O1: rank
587        // O2: address of 1st dimension
588        __ set_info("new_multi_array", dont_gc_arguments);
589        oop_maps = generate_stub_call(sasm, I0, CAST_FROM_FN_PTR(address, new_multi_array), I0, I1, I2);
590        // I0 -> O0: new multi array
591      }
592      break;
593
594    case register_finalizer_id:
595      {
596        __ set_info("register_finalizer", dont_gc_arguments);
597
598        // load the klass and check the has finalizer flag
599        Label register_finalizer;
600        Register t = O1;
601        __ load_klass(O0, t);
602        __ ld(t, in_bytes(Klass::access_flags_offset()), t);
603        __ set(JVM_ACC_HAS_FINALIZER, G3);
604        __ andcc(G3, t, G0);
605        __ br(Assembler::notZero, false, Assembler::pt, register_finalizer);
606        __ delayed()->nop();
607
608        // do a leaf return
609        __ retl();
610        __ delayed()->nop();
611
612        __ bind(register_finalizer);
613        OopMap* oop_map = save_live_registers(sasm);
614        int call_offset = __ call_RT(noreg, noreg,
615                                     CAST_FROM_FN_PTR(address, SharedRuntime::register_finalizer), I0);
616        oop_maps = new OopMapSet();
617        oop_maps->add_gc_map(call_offset, oop_map);
618
619        // Now restore all the live registers
620        restore_live_registers(sasm);
621
622        __ ret();
623        __ delayed()->restore();
624      }
625      break;
626
627    case throw_range_check_failed_id:
628      { __ set_info("range_check_failed", dont_gc_arguments); // arguments will be discarded
629        // G4: index
630        oop_maps = generate_exception_throw(sasm, CAST_FROM_FN_PTR(address, throw_range_check_exception), true);
631      }
632      break;
633
634    case throw_index_exception_id:
635      { __ set_info("index_range_check_failed", dont_gc_arguments); // arguments will be discarded
636        // G4: index
637        oop_maps = generate_exception_throw(sasm, CAST_FROM_FN_PTR(address, throw_index_exception), true);
638      }
639      break;
640
641    case throw_div0_exception_id:
642      { __ set_info("throw_div0_exception", dont_gc_arguments);
643        oop_maps = generate_exception_throw(sasm, CAST_FROM_FN_PTR(address, throw_div0_exception), false);
644      }
645      break;
646
647    case throw_null_pointer_exception_id:
648      { __ set_info("throw_null_pointer_exception", dont_gc_arguments);
649        oop_maps = generate_exception_throw(sasm, CAST_FROM_FN_PTR(address, throw_null_pointer_exception), false);
650      }
651      break;
652
653    case handle_exception_id:
654      { __ set_info("handle_exception", dont_gc_arguments);
655        oop_maps = generate_handle_exception(id, sasm);
656      }
657      break;
658
659    case handle_exception_from_callee_id:
660      { __ set_info("handle_exception_from_callee", dont_gc_arguments);
661        oop_maps = generate_handle_exception(id, sasm);
662      }
663      break;
664
665    case unwind_exception_id:
666      {
667        // O0: exception
668        // I7: address of call to this method
669
670        __ set_info("unwind_exception", dont_gc_arguments);
671        __ mov(Oexception, Oexception->after_save());
672        __ add(I7, frame::pc_return_offset, Oissuing_pc->after_save());
673
674        __ call_VM_leaf(L7_thread_cache, CAST_FROM_FN_PTR(address, SharedRuntime::exception_handler_for_return_address),
675                        G2_thread, Oissuing_pc->after_save());
676        __ verify_not_null_oop(Oexception->after_save());
677
678        // Restore SP from L7 if the exception PC is a method handle call site.
679        __ mov(O0, G5);  // Save the target address.
680        __ lduw(Address(G2_thread, JavaThread::is_method_handle_return_offset()), L0);
681        __ tst(L0);  // Condition codes are preserved over the restore.
682        __ restore();
683
684        __ jmp(G5, 0);
685        __ delayed()->movcc(Assembler::notZero, false, Assembler::icc, L7_mh_SP_save, SP);  // Restore SP if required.
686      }
687      break;
688
689    case throw_array_store_exception_id:
690      {
691        __ set_info("throw_array_store_exception", dont_gc_arguments);
692        oop_maps = generate_exception_throw(sasm, CAST_FROM_FN_PTR(address, throw_array_store_exception), true);
693      }
694      break;
695
696    case throw_class_cast_exception_id:
697      {
698        // G4: object
699        __ set_info("throw_class_cast_exception", dont_gc_arguments);
700        oop_maps = generate_exception_throw(sasm, CAST_FROM_FN_PTR(address, throw_class_cast_exception), true);
701      }
702      break;
703
704    case throw_incompatible_class_change_error_id:
705      {
706        __ set_info("throw_incompatible_class_cast_exception", dont_gc_arguments);
707        oop_maps = generate_exception_throw(sasm, CAST_FROM_FN_PTR(address, throw_incompatible_class_change_error), false);
708      }
709      break;
710
711    case slow_subtype_check_id:
712      { // Support for uint StubRoutine::partial_subtype_check( Klass sub, Klass super );
713        // Arguments :
714        //
715        //      ret  : G3
716        //      sub  : G3, argument, destroyed
717        //      super: G1, argument, not changed
718        //      raddr: O7, blown by call
719        Label miss;
720
721        __ save_frame(0);               // Blow no registers!
722
723        __ check_klass_subtype_slow_path(G3, G1, L0, L1, L2, L4, NULL, &miss);
724
725        __ mov(1, G3);
726        __ ret();                       // Result in G5 is 'true'
727        __ delayed()->restore();        // free copy or add can go here
728
729        __ bind(miss);
730        __ mov(0, G3);
731        __ ret();                       // Result in G5 is 'false'
732        __ delayed()->restore();        // free copy or add can go here
733      }
734
735    case monitorenter_nofpu_id:
736    case monitorenter_id:
737      { // G4: object
738        // G5: lock address
739        __ set_info("monitorenter", dont_gc_arguments);
740
741        int save_fpu_registers = (id == monitorenter_id);
742        // make a frame and preserve the caller's caller-save registers
743        OopMap* oop_map = save_live_registers(sasm, save_fpu_registers);
744
745        int call_offset = __ call_RT(noreg, noreg, CAST_FROM_FN_PTR(address, monitorenter), G4, G5);
746
747        oop_maps = new OopMapSet();
748        oop_maps->add_gc_map(call_offset, oop_map);
749        restore_live_registers(sasm, save_fpu_registers);
750
751        __ ret();
752        __ delayed()->restore();
753      }
754      break;
755
756    case monitorexit_nofpu_id:
757    case monitorexit_id:
758      { // G4: lock address
759        // note: really a leaf routine but must setup last java sp
760        //       => use call_RT for now (speed can be improved by
761        //       doing last java sp setup manually)
762        __ set_info("monitorexit", dont_gc_arguments);
763
764        int save_fpu_registers = (id == monitorexit_id);
765        // make a frame and preserve the caller's caller-save registers
766        OopMap* oop_map = save_live_registers(sasm, save_fpu_registers);
767
768        int call_offset = __ call_RT(noreg, noreg, CAST_FROM_FN_PTR(address, monitorexit), G4);
769
770        oop_maps = new OopMapSet();
771        oop_maps->add_gc_map(call_offset, oop_map);
772        restore_live_registers(sasm, save_fpu_registers);
773
774        __ ret();
775        __ delayed()->restore();
776      }
777      break;
778
779    case deoptimize_id:
780      {
781        __ set_info("deoptimize", dont_gc_arguments);
782        OopMap* oop_map = save_live_registers(sasm);
783        int call_offset = __ call_RT(noreg, noreg, CAST_FROM_FN_PTR(address, deoptimize), G4);
784        oop_maps = new OopMapSet();
785        oop_maps->add_gc_map(call_offset, oop_map);
786        restore_live_registers(sasm);
787        DeoptimizationBlob* deopt_blob = SharedRuntime::deopt_blob();
788        assert(deopt_blob != NULL, "deoptimization blob must have been created");
789        AddressLiteral dest(deopt_blob->unpack_with_reexecution());
790        __ jump_to(dest, O0);
791        __ delayed()->restore();
792      }
793      break;
794
795    case access_field_patching_id:
796      { __ set_info("access_field_patching", dont_gc_arguments);
797        oop_maps = generate_patching(sasm, CAST_FROM_FN_PTR(address, access_field_patching));
798      }
799      break;
800
801    case load_klass_patching_id:
802      { __ set_info("load_klass_patching", dont_gc_arguments);
803        oop_maps = generate_patching(sasm, CAST_FROM_FN_PTR(address, move_klass_patching));
804      }
805      break;
806
807    case load_mirror_patching_id:
808      { __ set_info("load_mirror_patching", dont_gc_arguments);
809        oop_maps = generate_patching(sasm, CAST_FROM_FN_PTR(address, move_mirror_patching));
810      }
811      break;
812
813    case load_appendix_patching_id:
814      { __ set_info("load_appendix_patching", dont_gc_arguments);
815        oop_maps = generate_patching(sasm, CAST_FROM_FN_PTR(address, move_appendix_patching));
816      }
817      break;
818
819    case dtrace_object_alloc_id:
820      { // O0: object
821        __ set_info("dtrace_object_alloc", dont_gc_arguments);
822        // we can't gc here so skip the oopmap but make sure that all
823        // the live registers get saved.
824        save_live_registers(sasm);
825
826        __ save_thread(L7_thread_cache);
827        __ call(CAST_FROM_FN_PTR(address, SharedRuntime::dtrace_object_alloc),
828                relocInfo::runtime_call_type);
829        __ delayed()->mov(I0, O0);
830        __ restore_thread(L7_thread_cache);
831
832        restore_live_registers(sasm);
833        __ ret();
834        __ delayed()->restore();
835      }
836      break;
837
838#if INCLUDE_ALL_GCS
839    case g1_pre_barrier_slow_id:
840      { // G4: previous value of memory
841        BarrierSet* bs = Universe::heap()->barrier_set();
842        if (bs->kind() != BarrierSet::G1SATBCTLogging) {
843          __ save_frame(0);
844          __ set((int)id, O1);
845          __ call_RT(noreg, noreg, CAST_FROM_FN_PTR(address, unimplemented_entry), I0);
846          __ should_not_reach_here();
847          break;
848        }
849
850        __ set_info("g1_pre_barrier_slow_id", dont_gc_arguments);
851
852        Register pre_val = G4;
853        Register tmp  = G1_scratch;
854        Register tmp2 = G3_scratch;
855
856        Label refill, restart;
857        bool with_frame = false; // I don't know if we can do with-frame.
858        int satb_q_index_byte_offset =
859          in_bytes(JavaThread::satb_mark_queue_offset() +
860                   PtrQueue::byte_offset_of_index());
861        int satb_q_buf_byte_offset =
862          in_bytes(JavaThread::satb_mark_queue_offset() +
863                   PtrQueue::byte_offset_of_buf());
864
865        __ bind(restart);
866        // Load the index into the SATB buffer. PtrQueue::_index is a
867        // size_t so ld_ptr is appropriate
868        __ ld_ptr(G2_thread, satb_q_index_byte_offset, tmp);
869
870        // index == 0?
871        __ cmp_and_brx_short(tmp, G0, Assembler::equal, Assembler::pn, refill);
872
873        __ ld_ptr(G2_thread, satb_q_buf_byte_offset, tmp2);
874        __ sub(tmp, oopSize, tmp);
875
876        __ st_ptr(pre_val, tmp2, tmp);  // [_buf + index] := <address_of_card>
877        // Use return-from-leaf
878        __ retl();
879        __ delayed()->st_ptr(tmp, G2_thread, satb_q_index_byte_offset);
880
881        __ bind(refill);
882        __ save_frame(0);
883
884        __ mov(pre_val, L0);
885        __ mov(tmp,     L1);
886        __ mov(tmp2,    L2);
887
888        __ call_VM_leaf(L7_thread_cache,
889                        CAST_FROM_FN_PTR(address,
890                                         SATBMarkQueueSet::handle_zero_index_for_thread),
891                                         G2_thread);
892
893        __ mov(L0, pre_val);
894        __ mov(L1, tmp);
895        __ mov(L2, tmp2);
896
897        __ br(Assembler::always, /*annul*/false, Assembler::pt, restart);
898        __ delayed()->restore();
899      }
900      break;
901
902    case g1_post_barrier_slow_id:
903      {
904        BarrierSet* bs = Universe::heap()->barrier_set();
905        if (bs->kind() != BarrierSet::G1SATBCTLogging) {
906          __ save_frame(0);
907          __ set((int)id, O1);
908          __ call_RT(noreg, noreg, CAST_FROM_FN_PTR(address, unimplemented_entry), I0);
909          __ should_not_reach_here();
910          break;
911        }
912
913        __ set_info("g1_post_barrier_slow_id", dont_gc_arguments);
914
915        Register addr = G4;
916        Register cardtable = G5;
917        Register tmp  = G1_scratch;
918        Register tmp2 = G3_scratch;
919        jbyte* byte_map_base = ((CardTableModRefBS*)bs)->byte_map_base;
920
921        Label not_already_dirty, restart, refill, young_card;
922
923#ifdef _LP64
924        __ srlx(addr, CardTableModRefBS::card_shift, addr);
925#else
926        __ srl(addr, CardTableModRefBS::card_shift, addr);
927#endif
928
929        AddressLiteral rs(byte_map_base);
930        __ set(rs, cardtable);         // cardtable := <card table base>
931        __ ldub(addr, cardtable, tmp); // tmp := [addr + cardtable]
932
933        __ cmp_and_br_short(tmp, G1SATBCardTableModRefBS::g1_young_card_val(), Assembler::equal, Assembler::pt, young_card);
934
935        __ membar(Assembler::Membar_mask_bits(Assembler::StoreLoad));
936        __ ldub(addr, cardtable, tmp); // tmp := [addr + cardtable]
937
938        assert(CardTableModRefBS::dirty_card_val() == 0, "otherwise check this code");
939        __ cmp_and_br_short(tmp, G0, Assembler::notEqual, Assembler::pt, not_already_dirty);
940
941        __ bind(young_card);
942        // We didn't take the branch, so we're already dirty: return.
943        // Use return-from-leaf
944        __ retl();
945        __ delayed()->nop();
946
947        // Not dirty.
948        __ bind(not_already_dirty);
949
950        // Get cardtable + tmp into a reg by itself
951        __ add(addr, cardtable, tmp2);
952
953        // First, dirty it.
954        __ stb(G0, tmp2, 0);  // [cardPtr] := 0  (i.e., dirty).
955
956        Register tmp3 = cardtable;
957        Register tmp4 = tmp;
958
959        // these registers are now dead
960        addr = cardtable = tmp = noreg;
961
962        int dirty_card_q_index_byte_offset =
963          in_bytes(JavaThread::dirty_card_queue_offset() +
964                   PtrQueue::byte_offset_of_index());
965        int dirty_card_q_buf_byte_offset =
966          in_bytes(JavaThread::dirty_card_queue_offset() +
967                   PtrQueue::byte_offset_of_buf());
968
969        __ bind(restart);
970
971        // Get the index into the update buffer. PtrQueue::_index is
972        // a size_t so ld_ptr is appropriate here.
973        __ ld_ptr(G2_thread, dirty_card_q_index_byte_offset, tmp3);
974
975        // index == 0?
976        __ cmp_and_brx_short(tmp3, G0, Assembler::equal,  Assembler::pn, refill);
977
978        __ ld_ptr(G2_thread, dirty_card_q_buf_byte_offset, tmp4);
979        __ sub(tmp3, oopSize, tmp3);
980
981        __ st_ptr(tmp2, tmp4, tmp3);  // [_buf + index] := <address_of_card>
982        // Use return-from-leaf
983        __ retl();
984        __ delayed()->st_ptr(tmp3, G2_thread, dirty_card_q_index_byte_offset);
985
986        __ bind(refill);
987        __ save_frame(0);
988
989        __ mov(tmp2, L0);
990        __ mov(tmp3, L1);
991        __ mov(tmp4, L2);
992
993        __ call_VM_leaf(L7_thread_cache,
994                        CAST_FROM_FN_PTR(address,
995                                         DirtyCardQueueSet::handle_zero_index_for_thread),
996                                         G2_thread);
997
998        __ mov(L0, tmp2);
999        __ mov(L1, tmp3);
1000        __ mov(L2, tmp4);
1001
1002        __ br(Assembler::always, /*annul*/false, Assembler::pt, restart);
1003        __ delayed()->restore();
1004      }
1005      break;
1006#endif // INCLUDE_ALL_GCS
1007
1008    case predicate_failed_trap_id:
1009      {
1010        __ set_info("predicate_failed_trap", dont_gc_arguments);
1011        OopMap* oop_map = save_live_registers(sasm);
1012
1013        int call_offset = __ call_RT(noreg, noreg, CAST_FROM_FN_PTR(address, predicate_failed_trap));
1014
1015        oop_maps = new OopMapSet();
1016        oop_maps->add_gc_map(call_offset, oop_map);
1017
1018        DeoptimizationBlob* deopt_blob = SharedRuntime::deopt_blob();
1019        assert(deopt_blob != NULL, "deoptimization blob must have been created");
1020        restore_live_registers(sasm);
1021
1022        AddressLiteral dest(deopt_blob->unpack_with_reexecution());
1023        __ jump_to(dest, O0);
1024        __ delayed()->restore();
1025      }
1026      break;
1027
1028    default:
1029      { __ set_info("unimplemented entry", dont_gc_arguments);
1030        __ save_frame(0);
1031        __ set((int)id, O1);
1032        __ call_RT(noreg, noreg, CAST_FROM_FN_PTR(address, unimplemented_entry), O1);
1033        __ should_not_reach_here();
1034      }
1035      break;
1036  }
1037  return oop_maps;
1038}
1039
1040
1041OopMapSet* Runtime1::generate_handle_exception(StubID id, StubAssembler* sasm) {
1042  __ block_comment("generate_handle_exception");
1043
1044  // Save registers, if required.
1045  OopMapSet* oop_maps = new OopMapSet();
1046  OopMap* oop_map = NULL;
1047  switch (id) {
1048  case forward_exception_id:
1049    // We're handling an exception in the context of a compiled frame.
1050    // The registers have been saved in the standard places.  Perform
1051    // an exception lookup in the caller and dispatch to the handler
1052    // if found.  Otherwise unwind and dispatch to the callers
1053    // exception handler.
1054     oop_map = generate_oop_map(sasm, true);
1055
1056     // transfer the pending exception to the exception_oop
1057     __ ld_ptr(G2_thread, in_bytes(JavaThread::pending_exception_offset()), Oexception);
1058     __ ld_ptr(Oexception, 0, G0);
1059     __ st_ptr(G0, G2_thread, in_bytes(JavaThread::pending_exception_offset()));
1060     __ add(I7, frame::pc_return_offset, Oissuing_pc);
1061    break;
1062  case handle_exception_id:
1063    // At this point all registers MAY be live.
1064    oop_map = save_live_registers(sasm);
1065    __ mov(Oexception->after_save(),  Oexception);
1066    __ mov(Oissuing_pc->after_save(), Oissuing_pc);
1067    break;
1068  case handle_exception_from_callee_id:
1069    // At this point all registers except exception oop (Oexception)
1070    // and exception pc (Oissuing_pc) are dead.
1071    oop_map = new OopMap(frame_size_in_bytes / sizeof(jint), 0);
1072    sasm->set_frame_size(frame_size_in_bytes / BytesPerWord);
1073    __ save_frame_c1(frame_size_in_bytes);
1074    __ mov(Oexception->after_save(),  Oexception);
1075    __ mov(Oissuing_pc->after_save(), Oissuing_pc);
1076    break;
1077  default:  ShouldNotReachHere();
1078  }
1079
1080  __ verify_not_null_oop(Oexception);
1081
1082#ifdef ASSERT
1083  // check that fields in JavaThread for exception oop and issuing pc are
1084  // empty before writing to them
1085  Label oop_empty;
1086  Register scratch = I7;  // We can use I7 here because it's overwritten later anyway.
1087  __ ld_ptr(Address(G2_thread, JavaThread::exception_oop_offset()), scratch);
1088  __ br_null(scratch, false, Assembler::pt, oop_empty);
1089  __ delayed()->nop();
1090  __ stop("exception oop already set");
1091  __ bind(oop_empty);
1092
1093  Label pc_empty;
1094  __ ld_ptr(Address(G2_thread, JavaThread::exception_pc_offset()), scratch);
1095  __ br_null(scratch, false, Assembler::pt, pc_empty);
1096  __ delayed()->nop();
1097  __ stop("exception pc already set");
1098  __ bind(pc_empty);
1099#endif
1100
1101  // save the exception and issuing pc in the thread
1102  __ st_ptr(Oexception,  G2_thread, in_bytes(JavaThread::exception_oop_offset()));
1103  __ st_ptr(Oissuing_pc, G2_thread, in_bytes(JavaThread::exception_pc_offset()));
1104
1105  // use the throwing pc as the return address to lookup (has bci & oop map)
1106  __ mov(Oissuing_pc, I7);
1107  __ sub(I7, frame::pc_return_offset, I7);
1108  int call_offset = __ call_RT(noreg, noreg, CAST_FROM_FN_PTR(address, exception_handler_for_pc));
1109  oop_maps->add_gc_map(call_offset, oop_map);
1110
1111  // Note: if nmethod has been deoptimized then regardless of
1112  // whether it had a handler or not we will deoptimize
1113  // by entering the deopt blob with a pending exception.
1114
1115  // Restore the registers that were saved at the beginning, remove
1116  // the frame and jump to the exception handler.
1117  switch (id) {
1118  case forward_exception_id:
1119  case handle_exception_id:
1120    restore_live_registers(sasm);
1121    __ jmp(O0, 0);
1122    __ delayed()->restore();
1123    break;
1124  case handle_exception_from_callee_id:
1125    // Restore SP from L7 if the exception PC is a method handle call site.
1126    __ mov(O0, G5);  // Save the target address.
1127    __ lduw(Address(G2_thread, JavaThread::is_method_handle_return_offset()), L0);
1128    __ tst(L0);  // Condition codes are preserved over the restore.
1129    __ restore();
1130
1131    __ jmp(G5, 0);  // jump to the exception handler
1132    __ delayed()->movcc(Assembler::notZero, false, Assembler::icc, L7_mh_SP_save, SP);  // Restore SP if required.
1133    break;
1134  default:  ShouldNotReachHere();
1135  }
1136
1137  return oop_maps;
1138}
1139
1140
1141#undef __
1142
1143const char *Runtime1::pd_name_for_address(address entry) {
1144  return "<unknown function>";
1145}
1146