c1_Runtime1_sparc.cpp revision 0:a61af66fc99e
1/*
2 * Copyright 1999-2007 Sun Microsystems, Inc.  All Rights Reserved.
3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
4 *
5 * This code is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 only, as
7 * published by the Free Software Foundation.
8 *
9 * This code is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
12 * version 2 for more details (a copy is included in the LICENSE file that
13 * accompanied this code).
14 *
15 * You should have received a copy of the GNU General Public License version
16 * 2 along with this work; if not, write to the Free Software Foundation,
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18 *
19 * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
20 * CA 95054 USA or visit www.sun.com if you need additional information or
21 * have any questions.
22 *
23 */
24
25#include "incls/_precompiled.incl"
26#include "incls/_c1_Runtime1_sparc.cpp.incl"
27
28// Implementation of StubAssembler
29
30int StubAssembler::call_RT(Register oop_result1, Register oop_result2, address entry_point, int number_of_arguments) {
31  // for sparc changing the number of arguments doesn't change
32  // anything about the frame size so we'll always lie and claim that
33  // we are only passing 1 argument.
34  set_num_rt_args(1);
35
36  assert_not_delayed();
37  // bang stack before going to runtime
38  set(-os::vm_page_size() + STACK_BIAS, G3_scratch);
39  st(G0, SP, G3_scratch);
40
41  // debugging support
42  assert(number_of_arguments >= 0   , "cannot have negative number of arguments");
43
44  set_last_Java_frame(SP, noreg);
45  if (VerifyThread)  mov(G2_thread, O0); // about to be smashed; pass early
46  save_thread(L7_thread_cache);
47  // do the call
48  call(entry_point, relocInfo::runtime_call_type);
49  if (!VerifyThread) {
50    delayed()->mov(G2_thread, O0);  // pass thread as first argument
51  } else {
52    delayed()->nop();             // (thread already passed)
53  }
54  int call_offset = offset();  // offset of return address
55  restore_thread(L7_thread_cache);
56  reset_last_Java_frame();
57
58  // check for pending exceptions
59  { Label L;
60    Address exception_addr(G2_thread, 0, in_bytes(Thread::pending_exception_offset()));
61    ld_ptr(exception_addr, Gtemp);
62    br_null(Gtemp, false, pt, L);
63    delayed()->nop();
64    Address vm_result_addr(G2_thread, 0, in_bytes(JavaThread::vm_result_offset()));
65    st_ptr(G0, vm_result_addr);
66    Address vm_result_addr_2(G2_thread, 0, in_bytes(JavaThread::vm_result_2_offset()));
67    st_ptr(G0, vm_result_addr_2);
68
69    if (frame_size() == no_frame_size) {
70      // we use O7 linkage so that forward_exception_entry has the issuing PC
71      call(StubRoutines::forward_exception_entry(), relocInfo::runtime_call_type);
72      delayed()->restore();
73    } else if (_stub_id == Runtime1::forward_exception_id) {
74      should_not_reach_here();
75    } else {
76      Address exc(G4, Runtime1::entry_for(Runtime1::forward_exception_id));
77      jump_to(exc, 0);
78      delayed()->nop();
79    }
80    bind(L);
81  }
82
83  // get oop result if there is one and reset the value in the thread
84  if (oop_result1->is_valid()) {                    // get oop result if there is one and reset it in the thread
85    get_vm_result  (oop_result1);
86  } else {
87    // be a little paranoid and clear the result
88    Address vm_result_addr(G2_thread, 0, in_bytes(JavaThread::vm_result_offset()));
89    st_ptr(G0, vm_result_addr);
90  }
91
92  if (oop_result2->is_valid()) {
93    get_vm_result_2(oop_result2);
94  } else {
95    // be a little paranoid and clear the result
96    Address vm_result_addr_2(G2_thread, 0, in_bytes(JavaThread::vm_result_2_offset()));
97    st_ptr(G0, vm_result_addr_2);
98  }
99
100  return call_offset;
101}
102
103
104int StubAssembler::call_RT(Register oop_result1, Register oop_result2, address entry, Register arg1) {
105  // O0 is reserved for the thread
106  mov(arg1, O1);
107  return call_RT(oop_result1, oop_result2, entry, 1);
108}
109
110
111int StubAssembler::call_RT(Register oop_result1, Register oop_result2, address entry, Register arg1, Register arg2) {
112  // O0 is reserved for the thread
113  mov(arg1, O1);
114  mov(arg2, O2); assert(arg2 != O1, "smashed argument");
115  return call_RT(oop_result1, oop_result2, entry, 2);
116}
117
118
119int StubAssembler::call_RT(Register oop_result1, Register oop_result2, address entry, Register arg1, Register arg2, Register arg3) {
120  // O0 is reserved for the thread
121  mov(arg1, O1);
122  mov(arg2, O2); assert(arg2 != O1,               "smashed argument");
123  mov(arg3, O3); assert(arg3 != O1 && arg3 != O2, "smashed argument");
124  return call_RT(oop_result1, oop_result2, entry, 3);
125}
126
127
128// Implementation of Runtime1
129
130#define __ sasm->
131
132static int cpu_reg_save_offsets[FrameMap::nof_cpu_regs];
133static int fpu_reg_save_offsets[FrameMap::nof_fpu_regs];
134static int reg_save_size_in_words;
135static int frame_size_in_bytes = -1;
136
137static OopMap* generate_oop_map(StubAssembler* sasm, bool save_fpu_registers) {
138  assert(frame_size_in_bytes == __ total_frame_size_in_bytes(reg_save_size_in_words),
139         " mismatch in calculation");
140  sasm->set_frame_size(frame_size_in_bytes / BytesPerWord);
141  int frame_size_in_slots = frame_size_in_bytes / sizeof(jint);
142  OopMap* oop_map = new OopMap(frame_size_in_slots, 0);
143
144  int i;
145  for (i = 0; i < FrameMap::nof_cpu_regs; i++) {
146    Register r = as_Register(i);
147    if (r == G1 || r == G3 || r == G4 || r == G5) {
148      int sp_offset = cpu_reg_save_offsets[i];
149      oop_map->set_callee_saved(VMRegImpl::stack2reg(sp_offset),
150                                r->as_VMReg());
151    }
152  }
153
154  if (save_fpu_registers) {
155    for (i = 0; i < FrameMap::nof_fpu_regs; i++) {
156      FloatRegister r = as_FloatRegister(i);
157      int sp_offset = fpu_reg_save_offsets[i];
158      oop_map->set_callee_saved(VMRegImpl::stack2reg(sp_offset),
159                                r->as_VMReg());
160    }
161  }
162  return oop_map;
163}
164
165static OopMap* save_live_registers(StubAssembler* sasm, bool save_fpu_registers = true) {
166  assert(frame_size_in_bytes == __ total_frame_size_in_bytes(reg_save_size_in_words),
167         " mismatch in calculation");
168  __ save_frame_c1(frame_size_in_bytes);
169  sasm->set_frame_size(frame_size_in_bytes / BytesPerWord);
170
171  // Record volatile registers as callee-save values in an OopMap so their save locations will be
172  // propagated to the caller frame's RegisterMap during StackFrameStream construction (needed for
173  // deoptimization; see compiledVFrame::create_stack_value).  The caller's I, L and O registers
174  // are saved in register windows - I's and L's in the caller's frame and O's in the stub frame
175  // (as the stub's I's) when the runtime routine called by the stub creates its frame.
176  // OopMap frame sizes are in c2 stack slot sizes (sizeof(jint))
177
178  int i;
179  for (i = 0; i < FrameMap::nof_cpu_regs; i++) {
180    Register r = as_Register(i);
181    if (r == G1 || r == G3 || r == G4 || r == G5) {
182      int sp_offset = cpu_reg_save_offsets[i];
183      __ st_ptr(r, SP, (sp_offset * BytesPerWord) + STACK_BIAS);
184    }
185  }
186
187  if (save_fpu_registers) {
188    for (i = 0; i < FrameMap::nof_fpu_regs; i++) {
189      FloatRegister r = as_FloatRegister(i);
190      int sp_offset = fpu_reg_save_offsets[i];
191      __ stf(FloatRegisterImpl::S, r, SP, (sp_offset * BytesPerWord) + STACK_BIAS);
192    }
193  }
194
195  return generate_oop_map(sasm, save_fpu_registers);
196}
197
198static void restore_live_registers(StubAssembler* sasm, bool restore_fpu_registers = true) {
199  for (int i = 0; i < FrameMap::nof_cpu_regs; i++) {
200    Register r = as_Register(i);
201    if (r == G1 || r == G3 || r == G4 || r == G5) {
202      __ ld_ptr(SP, (cpu_reg_save_offsets[i] * BytesPerWord) + STACK_BIAS, r);
203    }
204  }
205
206  if (restore_fpu_registers) {
207    for (int i = 0; i < FrameMap::nof_fpu_regs; i++) {
208      FloatRegister r = as_FloatRegister(i);
209      __ ldf(FloatRegisterImpl::S, SP, (fpu_reg_save_offsets[i] * BytesPerWord) + STACK_BIAS, r);
210    }
211  }
212}
213
214
215void Runtime1::initialize_pd() {
216  // compute word offsets from SP at which live (non-windowed) registers are captured by stub routines
217  //
218  // A stub routine will have a frame that is at least large enough to hold
219  // a register window save area (obviously) and the volatile g registers
220  // and floating registers. A user of save_live_registers can have a frame
221  // that has more scratch area in it (although typically they will use L-regs).
222  // in that case the frame will look like this (stack growing down)
223  //
224  // FP -> |             |
225  //       | scratch mem |
226  //       |   "      "  |
227  //       --------------
228  //       | float regs  |
229  //       |   "    "    |
230  //       ---------------
231  //       | G regs      |
232  //       | "  "        |
233  //       ---------------
234  //       | abi reg.    |
235  //       | window save |
236  //       | area        |
237  // SP -> ---------------
238  //
239  int i;
240  int sp_offset = round_to(frame::register_save_words, 2); //  start doubleword aligned
241
242  // only G int registers are saved explicitly; others are found in register windows
243  for (i = 0; i < FrameMap::nof_cpu_regs; i++) {
244    Register r = as_Register(i);
245    if (r == G1 || r == G3 || r == G4 || r == G5) {
246      cpu_reg_save_offsets[i] = sp_offset;
247      sp_offset++;
248    }
249  }
250
251  // all float registers are saved explicitly
252  assert(FrameMap::nof_fpu_regs == 32, "double registers not handled here");
253  for (i = 0; i < FrameMap::nof_fpu_regs; i++) {
254    fpu_reg_save_offsets[i] = sp_offset;
255    sp_offset++;
256  }
257  reg_save_size_in_words = sp_offset - frame::memory_parameter_word_sp_offset;
258  // this should match assembler::total_frame_size_in_bytes, which
259  // isn't callable from this context.  It's checked by an assert when
260  // it's used though.
261  frame_size_in_bytes = align_size_up(sp_offset * wordSize, 8);
262}
263
264
265OopMapSet* Runtime1::generate_exception_throw(StubAssembler* sasm, address target, bool has_argument) {
266  // make a frame and preserve the caller's caller-save registers
267  OopMap* oop_map = save_live_registers(sasm);
268  int call_offset;
269  if (!has_argument) {
270    call_offset = __ call_RT(noreg, noreg, target);
271  } else {
272    call_offset = __ call_RT(noreg, noreg, target, G4);
273  }
274  OopMapSet* oop_maps = new OopMapSet();
275  oop_maps->add_gc_map(call_offset, oop_map);
276
277  __ should_not_reach_here();
278  return oop_maps;
279}
280
281
282OopMapSet* Runtime1::generate_stub_call(StubAssembler* sasm, Register result, address target,
283                                        Register arg1, Register arg2, Register arg3) {
284  // make a frame and preserve the caller's caller-save registers
285  OopMap* oop_map = save_live_registers(sasm);
286
287  int call_offset;
288  if (arg1 == noreg) {
289    call_offset = __ call_RT(result, noreg, target);
290  } else if (arg2 == noreg) {
291    call_offset = __ call_RT(result, noreg, target, arg1);
292  } else if (arg3 == noreg) {
293    call_offset = __ call_RT(result, noreg, target, arg1, arg2);
294  } else {
295    call_offset = __ call_RT(result, noreg, target, arg1, arg2, arg3);
296  }
297  OopMapSet* oop_maps = NULL;
298
299  oop_maps = new OopMapSet();
300  oop_maps->add_gc_map(call_offset, oop_map);
301  restore_live_registers(sasm);
302
303  __ ret();
304  __ delayed()->restore();
305
306  return oop_maps;
307}
308
309
310OopMapSet* Runtime1::generate_patching(StubAssembler* sasm, address target) {
311  // make a frame and preserve the caller's caller-save registers
312  OopMap* oop_map = save_live_registers(sasm);
313
314  // call the runtime patching routine, returns non-zero if nmethod got deopted.
315  int call_offset = __ call_RT(noreg, noreg, target);
316  OopMapSet* oop_maps = new OopMapSet();
317  oop_maps->add_gc_map(call_offset, oop_map);
318
319  // re-execute the patched instruction or, if the nmethod was deoptmized, return to the
320  // deoptimization handler entry that will cause re-execution of the current bytecode
321  DeoptimizationBlob* deopt_blob = SharedRuntime::deopt_blob();
322  assert(deopt_blob != NULL, "deoptimization blob must have been created");
323
324  Label no_deopt;
325  __ tst(O0);
326  __ brx(Assembler::equal, false, Assembler::pt, no_deopt);
327  __ delayed()->nop();
328
329  // return to the deoptimization handler entry for unpacking and rexecute
330  // if we simply returned the we'd deopt as if any call we patched had just
331  // returned.
332
333  restore_live_registers(sasm);
334  __ restore();
335  __ br(Assembler::always, false, Assembler::pt, deopt_blob->unpack_with_reexecution(), relocInfo::runtime_call_type);
336  __ delayed()->nop();
337
338  __ bind(no_deopt);
339  restore_live_registers(sasm);
340  __ ret();
341  __ delayed()->restore();
342
343  return oop_maps;
344}
345
346OopMapSet* Runtime1::generate_code_for(StubID id, StubAssembler* sasm) {
347
348  OopMapSet* oop_maps = NULL;
349  // for better readability
350  const bool must_gc_arguments = true;
351  const bool dont_gc_arguments = false;
352
353  // stub code & info for the different stubs
354  switch (id) {
355    case forward_exception_id:
356      {
357        // we're handling an exception in the context of a compiled
358        // frame.  The registers have been saved in the standard
359        // places.  Perform an exception lookup in the caller and
360        // dispatch to the handler if found.  Otherwise unwind and
361        // dispatch to the callers exception handler.
362
363        oop_maps = new OopMapSet();
364        OopMap* oop_map = generate_oop_map(sasm, true);
365
366        // transfer the pending exception to the exception_oop
367        __ ld_ptr(G2_thread, in_bytes(JavaThread::pending_exception_offset()), Oexception);
368        __ ld_ptr(Oexception, 0, G0);
369        __ st_ptr(G0, G2_thread, in_bytes(JavaThread::pending_exception_offset()));
370        __ add(I7, frame::pc_return_offset, Oissuing_pc);
371
372        generate_handle_exception(sasm, oop_maps, oop_map);
373        __ should_not_reach_here();
374      }
375      break;
376
377    case new_instance_id:
378    case fast_new_instance_id:
379    case fast_new_instance_init_check_id:
380      {
381        Register G5_klass = G5; // Incoming
382        Register O0_obj   = O0; // Outgoing
383
384        if (id == new_instance_id) {
385          __ set_info("new_instance", dont_gc_arguments);
386        } else if (id == fast_new_instance_id) {
387          __ set_info("fast new_instance", dont_gc_arguments);
388        } else {
389          assert(id == fast_new_instance_init_check_id, "bad StubID");
390          __ set_info("fast new_instance init check", dont_gc_arguments);
391        }
392
393        if ((id == fast_new_instance_id || id == fast_new_instance_init_check_id) &&
394            UseTLAB && FastTLABRefill) {
395          Label slow_path;
396          Register G1_obj_size = G1;
397          Register G3_t1 = G3;
398          Register G4_t2 = G4;
399          assert_different_registers(G5_klass, G1_obj_size, G3_t1, G4_t2);
400
401          // Push a frame since we may do dtrace notification for the
402          // allocation which requires calling out and we don't want
403          // to stomp the real return address.
404          __ save_frame(0);
405
406          if (id == fast_new_instance_init_check_id) {
407            // make sure the klass is initialized
408            __ ld(G5_klass, instanceKlass::init_state_offset_in_bytes() + sizeof(oopDesc), G3_t1);
409            __ cmp(G3_t1, instanceKlass::fully_initialized);
410            __ br(Assembler::notEqual, false, Assembler::pn, slow_path);
411            __ delayed()->nop();
412          }
413#ifdef ASSERT
414          // assert object can be fast path allocated
415          {
416            Label ok, not_ok;
417          __ ld(G5_klass, Klass::layout_helper_offset_in_bytes() + sizeof(oopDesc), G1_obj_size);
418          __ cmp(G1_obj_size, 0);  // make sure it's an instance (LH > 0)
419          __ br(Assembler::lessEqual, false, Assembler::pn, not_ok);
420          __ delayed()->nop();
421          __ btst(Klass::_lh_instance_slow_path_bit, G1_obj_size);
422          __ br(Assembler::zero, false, Assembler::pn, ok);
423          __ delayed()->nop();
424          __ bind(not_ok);
425          __ stop("assert(can be fast path allocated)");
426          __ should_not_reach_here();
427          __ bind(ok);
428          }
429#endif // ASSERT
430          // if we got here then the TLAB allocation failed, so try
431          // refilling the TLAB or allocating directly from eden.
432          Label retry_tlab, try_eden;
433          __ tlab_refill(retry_tlab, try_eden, slow_path); // preserves G5_klass
434
435          __ bind(retry_tlab);
436
437          // get the instance size
438          __ ld(G5_klass, klassOopDesc::header_size() * HeapWordSize + Klass::layout_helper_offset_in_bytes(), G1_obj_size);
439          __ tlab_allocate(O0_obj, G1_obj_size, 0, G3_t1, slow_path);
440          __ initialize_object(O0_obj, G5_klass, G1_obj_size, 0, G3_t1, G4_t2);
441          __ verify_oop(O0_obj);
442          __ mov(O0, I0);
443          __ ret();
444          __ delayed()->restore();
445
446          __ bind(try_eden);
447          // get the instance size
448          __ ld(G5_klass, klassOopDesc::header_size() * HeapWordSize + Klass::layout_helper_offset_in_bytes(), G1_obj_size);
449          __ eden_allocate(O0_obj, G1_obj_size, 0, G3_t1, G4_t2, slow_path);
450          __ initialize_object(O0_obj, G5_klass, G1_obj_size, 0, G3_t1, G4_t2);
451          __ verify_oop(O0_obj);
452          __ mov(O0, I0);
453          __ ret();
454          __ delayed()->restore();
455
456          __ bind(slow_path);
457
458          // pop this frame so generate_stub_call can push it's own
459          __ restore();
460        }
461
462        oop_maps = generate_stub_call(sasm, I0, CAST_FROM_FN_PTR(address, new_instance), G5_klass);
463        // I0->O0: new instance
464      }
465
466      break;
467
468#ifdef TIERED
469    case counter_overflow_id:
470        // G4 contains bci
471      oop_maps = generate_stub_call(sasm, noreg, CAST_FROM_FN_PTR(address, counter_overflow), G4);
472      break;
473#endif // TIERED
474
475    case new_type_array_id:
476    case new_object_array_id:
477      {
478        Register G5_klass = G5; // Incoming
479        Register G4_length = G4; // Incoming
480        Register O0_obj   = O0; // Outgoing
481
482        Address klass_lh(G5_klass, 0, ((klassOopDesc::header_size() * HeapWordSize)
483                                       + Klass::layout_helper_offset_in_bytes()));
484        assert(Klass::_lh_header_size_shift % BitsPerByte == 0, "bytewise");
485        assert(Klass::_lh_header_size_mask == 0xFF, "bytewise");
486        // Use this offset to pick out an individual byte of the layout_helper:
487        const int klass_lh_header_size_offset = ((BytesPerInt - 1)  // 3 - 2 selects byte {0,1,0,0}
488                                                 - Klass::_lh_header_size_shift / BitsPerByte);
489
490        if (id == new_type_array_id) {
491          __ set_info("new_type_array", dont_gc_arguments);
492        } else {
493          __ set_info("new_object_array", dont_gc_arguments);
494        }
495
496#ifdef ASSERT
497        // assert object type is really an array of the proper kind
498        {
499          Label ok;
500          Register G3_t1 = G3;
501          __ ld(klass_lh, G3_t1);
502          __ sra(G3_t1, Klass::_lh_array_tag_shift, G3_t1);
503          int tag = ((id == new_type_array_id)
504                     ? Klass::_lh_array_tag_type_value
505                     : Klass::_lh_array_tag_obj_value);
506          __ cmp(G3_t1, tag);
507          __ brx(Assembler::equal, false, Assembler::pt, ok);
508          __ delayed()->nop();
509          __ stop("assert(is an array klass)");
510          __ should_not_reach_here();
511          __ bind(ok);
512        }
513#endif // ASSERT
514
515        if (UseTLAB && FastTLABRefill) {
516          Label slow_path;
517          Register G1_arr_size = G1;
518          Register G3_t1 = G3;
519          Register O1_t2 = O1;
520          assert_different_registers(G5_klass, G4_length, G1_arr_size, G3_t1, O1_t2);
521
522          // check that array length is small enough for fast path
523          __ set(C1_MacroAssembler::max_array_allocation_length, G3_t1);
524          __ cmp(G4_length, G3_t1);
525          __ br(Assembler::greaterUnsigned, false, Assembler::pn, slow_path);
526          __ delayed()->nop();
527
528          // if we got here then the TLAB allocation failed, so try
529          // refilling the TLAB or allocating directly from eden.
530          Label retry_tlab, try_eden;
531          __ tlab_refill(retry_tlab, try_eden, slow_path); // preserves G4_length and G5_klass
532
533          __ bind(retry_tlab);
534
535          // get the allocation size: (length << (layout_helper & 0x1F)) + header_size
536          __ ld(klass_lh, G3_t1);
537          __ sll(G4_length, G3_t1, G1_arr_size);
538          __ srl(G3_t1, Klass::_lh_header_size_shift, G3_t1);
539          __ and3(G3_t1, Klass::_lh_header_size_mask, G3_t1);
540          __ add(G1_arr_size, G3_t1, G1_arr_size);
541          __ add(G1_arr_size, MinObjAlignmentInBytesMask, G1_arr_size);  // align up
542          __ and3(G1_arr_size, ~MinObjAlignmentInBytesMask, G1_arr_size);
543
544          __ tlab_allocate(O0_obj, G1_arr_size, 0, G3_t1, slow_path);  // preserves G1_arr_size
545
546          __ initialize_header(O0_obj, G5_klass, G4_length, G3_t1, O1_t2);
547          __ ldub(klass_lh, G3_t1, klass_lh_header_size_offset);
548          __ sub(G1_arr_size, G3_t1, O1_t2);  // body length
549          __ add(O0_obj, G3_t1, G3_t1);       // body start
550          __ initialize_body(G3_t1, O1_t2);
551          __ verify_oop(O0_obj);
552          __ retl();
553          __ delayed()->nop();
554
555          __ bind(try_eden);
556          // get the allocation size: (length << (layout_helper & 0x1F)) + header_size
557          __ ld(klass_lh, G3_t1);
558          __ sll(G4_length, G3_t1, G1_arr_size);
559          __ srl(G3_t1, Klass::_lh_header_size_shift, G3_t1);
560          __ and3(G3_t1, Klass::_lh_header_size_mask, G3_t1);
561          __ add(G1_arr_size, G3_t1, G1_arr_size);
562          __ add(G1_arr_size, MinObjAlignmentInBytesMask, G1_arr_size);
563          __ and3(G1_arr_size, ~MinObjAlignmentInBytesMask, G1_arr_size);
564
565          __ eden_allocate(O0_obj, G1_arr_size, 0, G3_t1, O1_t2, slow_path);  // preserves G1_arr_size
566
567          __ initialize_header(O0_obj, G5_klass, G4_length, G3_t1, O1_t2);
568          __ ldub(klass_lh, G3_t1, klass_lh_header_size_offset);
569          __ sub(G1_arr_size, G3_t1, O1_t2);  // body length
570          __ add(O0_obj, G3_t1, G3_t1);       // body start
571          __ initialize_body(G3_t1, O1_t2);
572          __ verify_oop(O0_obj);
573          __ retl();
574          __ delayed()->nop();
575
576          __ bind(slow_path);
577        }
578
579        if (id == new_type_array_id) {
580          oop_maps = generate_stub_call(sasm, I0, CAST_FROM_FN_PTR(address, new_type_array), G5_klass, G4_length);
581        } else {
582          oop_maps = generate_stub_call(sasm, I0, CAST_FROM_FN_PTR(address, new_object_array), G5_klass, G4_length);
583        }
584        // I0 -> O0: new array
585      }
586      break;
587
588    case new_multi_array_id:
589      { // O0: klass
590        // O1: rank
591        // O2: address of 1st dimension
592        __ set_info("new_multi_array", dont_gc_arguments);
593        oop_maps = generate_stub_call(sasm, I0, CAST_FROM_FN_PTR(address, new_multi_array), I0, I1, I2);
594        // I0 -> O0: new multi array
595      }
596      break;
597
598    case register_finalizer_id:
599      {
600        __ set_info("register_finalizer", dont_gc_arguments);
601
602        // load the klass and check the has finalizer flag
603        Label register_finalizer;
604        Register t = O1;
605        __ ld_ptr(O0, oopDesc::klass_offset_in_bytes(), t);
606        __ ld(t, Klass::access_flags_offset_in_bytes() + sizeof(oopDesc), t);
607        __ set(JVM_ACC_HAS_FINALIZER, G3);
608        __ andcc(G3, t, G0);
609        __ br(Assembler::notZero, false, Assembler::pt, register_finalizer);
610        __ delayed()->nop();
611
612        // do a leaf return
613        __ retl();
614        __ delayed()->nop();
615
616        __ bind(register_finalizer);
617        OopMap* oop_map = save_live_registers(sasm);
618        int call_offset = __ call_RT(noreg, noreg,
619                                     CAST_FROM_FN_PTR(address, SharedRuntime::register_finalizer), I0);
620        oop_maps = new OopMapSet();
621        oop_maps->add_gc_map(call_offset, oop_map);
622
623        // Now restore all the live registers
624        restore_live_registers(sasm);
625
626        __ ret();
627        __ delayed()->restore();
628      }
629      break;
630
631    case throw_range_check_failed_id:
632      { __ set_info("range_check_failed", dont_gc_arguments); // arguments will be discarded
633        // G4: index
634        oop_maps = generate_exception_throw(sasm, CAST_FROM_FN_PTR(address, throw_range_check_exception), true);
635      }
636      break;
637
638    case throw_index_exception_id:
639      { __ set_info("index_range_check_failed", dont_gc_arguments); // arguments will be discarded
640        // G4: index
641        oop_maps = generate_exception_throw(sasm, CAST_FROM_FN_PTR(address, throw_index_exception), true);
642      }
643      break;
644
645    case throw_div0_exception_id:
646      { __ set_info("throw_div0_exception", dont_gc_arguments);
647        oop_maps = generate_exception_throw(sasm, CAST_FROM_FN_PTR(address, throw_div0_exception), false);
648      }
649      break;
650
651    case throw_null_pointer_exception_id:
652      { __ set_info("throw_null_pointer_exception", dont_gc_arguments);
653        oop_maps = generate_exception_throw(sasm, CAST_FROM_FN_PTR(address, throw_null_pointer_exception), false);
654      }
655      break;
656
657    case handle_exception_id:
658      {
659        __ set_info("handle_exception", dont_gc_arguments);
660        // make a frame and preserve the caller's caller-save registers
661
662        oop_maps = new OopMapSet();
663        OopMap* oop_map = save_live_registers(sasm);
664        __ mov(Oexception->after_save(),  Oexception);
665        __ mov(Oissuing_pc->after_save(), Oissuing_pc);
666        generate_handle_exception(sasm, oop_maps, oop_map);
667      }
668      break;
669
670    case unwind_exception_id:
671      {
672        // O0: exception
673        // I7: address of call to this method
674
675        __ set_info("unwind_exception", dont_gc_arguments);
676        __ mov(Oexception, Oexception->after_save());
677        __ add(I7, frame::pc_return_offset, Oissuing_pc->after_save());
678
679        __ call_VM_leaf(L7_thread_cache, CAST_FROM_FN_PTR(address, SharedRuntime::exception_handler_for_return_address),
680                        Oissuing_pc->after_save());
681        __ verify_not_null_oop(Oexception->after_save());
682        __ jmp(O0, 0);
683        __ delayed()->restore();
684      }
685      break;
686
687    case throw_array_store_exception_id:
688      {
689        __ set_info("throw_array_store_exception", dont_gc_arguments);
690        oop_maps = generate_exception_throw(sasm, CAST_FROM_FN_PTR(address, throw_array_store_exception), false);
691      }
692      break;
693
694    case throw_class_cast_exception_id:
695      {
696        // G4: object
697        __ set_info("throw_class_cast_exception", dont_gc_arguments);
698        oop_maps = generate_exception_throw(sasm, CAST_FROM_FN_PTR(address, throw_class_cast_exception), true);
699      }
700      break;
701
702    case throw_incompatible_class_change_error_id:
703      {
704        __ set_info("throw_incompatible_class_cast_exception", dont_gc_arguments);
705        oop_maps = generate_exception_throw(sasm, CAST_FROM_FN_PTR(address, throw_incompatible_class_change_error), false);
706      }
707      break;
708
709    case slow_subtype_check_id:
710      { // Support for uint StubRoutine::partial_subtype_check( Klass sub, Klass super );
711        // Arguments :
712        //
713        //      ret  : G3
714        //      sub  : G3, argument, destroyed
715        //      super: G1, argument, not changed
716        //      raddr: O7, blown by call
717        Label loop, miss;
718
719        __ save_frame(0);               // Blow no registers!
720
721        __ ld_ptr( G3, sizeof(oopDesc) + Klass::secondary_supers_offset_in_bytes(), L3 );
722        __ lduw(L3,arrayOopDesc::length_offset_in_bytes(),L0); // length in l0
723        __ add(L3,arrayOopDesc::base_offset_in_bytes(T_OBJECT),L1); // ptr into array
724        __ clr(L4);                     // Index
725        // Load a little early; will load 1 off the end of the array.
726        // Ok for now; revisit if we have other uses of this routine.
727        __ ld_ptr(L1,0,L2);             // Will load a little early
728
729        // The scan loop
730        __ bind(loop);
731        __ add(L1,wordSize,L1); // Bump by OOP size
732        __ cmp(L4,L0);
733        __ br(Assembler::equal,false,Assembler::pn,miss);
734        __ delayed()->inc(L4);  // Bump index
735        __ subcc(L2,G1,L3);             // Check for match; zero in L3 for a hit
736        __ brx( Assembler::notEqual, false, Assembler::pt, loop );
737        __ delayed()->ld_ptr(L1,0,L2); // Will load a little early
738
739        // Got a hit; report success; set cache
740        __ st_ptr( G1, G3, sizeof(oopDesc) + Klass::secondary_super_cache_offset_in_bytes() );
741
742        __ mov(1, G3);
743        __ ret();                       // Result in G5 is ok; flags set
744        __ delayed()->restore();        // free copy or add can go here
745
746        __ bind(miss);
747        __ mov(0, G3);
748        __ ret();                       // Result in G5 is ok; flags set
749        __ delayed()->restore();        // free copy or add can go here
750      }
751
752    case monitorenter_nofpu_id:
753    case monitorenter_id:
754      { // G4: object
755        // G5: lock address
756        __ set_info("monitorenter", dont_gc_arguments);
757
758        int save_fpu_registers = (id == monitorenter_id);
759        // make a frame and preserve the caller's caller-save registers
760        OopMap* oop_map = save_live_registers(sasm, save_fpu_registers);
761
762        int call_offset = __ call_RT(noreg, noreg, CAST_FROM_FN_PTR(address, monitorenter), G4, G5);
763
764        oop_maps = new OopMapSet();
765        oop_maps->add_gc_map(call_offset, oop_map);
766        restore_live_registers(sasm, save_fpu_registers);
767
768        __ ret();
769        __ delayed()->restore();
770      }
771      break;
772
773    case monitorexit_nofpu_id:
774    case monitorexit_id:
775      { // G4: lock address
776        // note: really a leaf routine but must setup last java sp
777        //       => use call_RT for now (speed can be improved by
778        //       doing last java sp setup manually)
779        __ set_info("monitorexit", dont_gc_arguments);
780
781        int save_fpu_registers = (id == monitorexit_id);
782        // make a frame and preserve the caller's caller-save registers
783        OopMap* oop_map = save_live_registers(sasm, save_fpu_registers);
784
785        int call_offset = __ call_RT(noreg, noreg, CAST_FROM_FN_PTR(address, monitorexit), G4);
786
787        oop_maps = new OopMapSet();
788        oop_maps->add_gc_map(call_offset, oop_map);
789        restore_live_registers(sasm, save_fpu_registers);
790
791        __ ret();
792        __ delayed()->restore();
793
794      }
795      break;
796
797    case access_field_patching_id:
798      { __ set_info("access_field_patching", dont_gc_arguments);
799        oop_maps = generate_patching(sasm, CAST_FROM_FN_PTR(address, access_field_patching));
800      }
801      break;
802
803    case load_klass_patching_id:
804      { __ set_info("load_klass_patching", dont_gc_arguments);
805        oop_maps = generate_patching(sasm, CAST_FROM_FN_PTR(address, move_klass_patching));
806      }
807      break;
808
809    case jvmti_exception_throw_id:
810      { // Oexception : exception
811        __ set_info("jvmti_exception_throw", dont_gc_arguments);
812        oop_maps = generate_stub_call(sasm, noreg, CAST_FROM_FN_PTR(address, Runtime1::post_jvmti_exception_throw), I0);
813      }
814      break;
815
816    case dtrace_object_alloc_id:
817      { // O0: object
818        __ set_info("dtrace_object_alloc", dont_gc_arguments);
819        // we can't gc here so skip the oopmap but make sure that all
820        // the live registers get saved.
821        save_live_registers(sasm);
822
823        __ save_thread(L7_thread_cache);
824        __ call(CAST_FROM_FN_PTR(address, SharedRuntime::dtrace_object_alloc),
825                relocInfo::runtime_call_type);
826        __ delayed()->mov(I0, O0);
827        __ restore_thread(L7_thread_cache);
828
829        restore_live_registers(sasm);
830        __ ret();
831        __ delayed()->restore();
832      }
833      break;
834
835    default:
836      { __ set_info("unimplemented entry", dont_gc_arguments);
837        __ save_frame(0);
838        __ set((int)id, O1);
839        __ call_RT(noreg, noreg, CAST_FROM_FN_PTR(address, unimplemented_entry), O1);
840        __ should_not_reach_here();
841      }
842      break;
843  }
844  return oop_maps;
845}
846
847
848void Runtime1::generate_handle_exception(StubAssembler* sasm, OopMapSet* oop_maps, OopMap* oop_map, bool) {
849  Label no_deopt;
850  Label no_handler;
851
852  __ verify_not_null_oop(Oexception);
853
854  // save the exception and issuing pc in the thread
855  __ st_ptr(Oexception, G2_thread, in_bytes(JavaThread::exception_oop_offset()));
856  __ st_ptr(Oissuing_pc, G2_thread, in_bytes(JavaThread::exception_pc_offset()));
857
858  // save the real return address and use the throwing pc as the return address to lookup (has bci & oop map)
859  __ mov(I7, L0);
860  __ mov(Oissuing_pc, I7);
861  __ sub(I7, frame::pc_return_offset, I7);
862  int call_offset = __ call_RT(noreg, noreg, CAST_FROM_FN_PTR(address, exception_handler_for_pc));
863
864  // Note: if nmethod has been deoptimized then regardless of
865  // whether it had a handler or not we will deoptimize
866  // by entering the deopt blob with a pending exception.
867
868  __ tst(O0);
869  __ br(Assembler::zero, false, Assembler::pn, no_handler);
870  __ delayed()->nop();
871
872  // restore the registers that were saved at the beginning and jump to the exception handler.
873  restore_live_registers(sasm);
874
875  __ jmp(O0, 0);
876  __ delayed()->restore();
877
878  __ bind(no_handler);
879  __ mov(L0, I7); // restore return address
880
881  // restore exception oop
882  __ ld_ptr(G2_thread, in_bytes(JavaThread::exception_oop_offset()), Oexception->after_save());
883  __ st_ptr(G0, G2_thread, in_bytes(JavaThread::exception_oop_offset()));
884
885  __ restore();
886
887  Address exc(G4, Runtime1::entry_for(Runtime1::unwind_exception_id));
888  __ jump_to(exc, 0);
889  __ delayed()->nop();
890
891
892  oop_maps->add_gc_map(call_offset, oop_map);
893}
894
895
896#undef __
897
898#define __ masm->
899