templateTable_sparc.cpp revision 113:ba764ed4b6f2
1/*
2 * Copyright 1997-2007 Sun Microsystems, Inc.  All Rights Reserved.
3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
4 *
5 * This code is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 only, as
7 * published by the Free Software Foundation.
8 *
9 * This code is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
12 * version 2 for more details (a copy is included in the LICENSE file that
13 * accompanied this code).
14 *
15 * You should have received a copy of the GNU General Public License version
16 * 2 along with this work; if not, write to the Free Software Foundation,
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18 *
19 * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
20 * CA 95054 USA or visit www.sun.com if you need additional information or
21 * have any questions.
22 *
23 */
24
25#include "incls/_precompiled.incl"
26#include "incls/_templateTable_sparc.cpp.incl"
27
28#ifndef CC_INTERP
29#define __ _masm->
30
31
32//----------------------------------------------------------------------------------------------------
33// Platform-dependent initialization
34
35void TemplateTable::pd_initialize() {
36  // (none)
37}
38
39
40//----------------------------------------------------------------------------------------------------
41// Condition conversion
42Assembler::Condition ccNot(TemplateTable::Condition cc) {
43  switch (cc) {
44    case TemplateTable::equal        : return Assembler::notEqual;
45    case TemplateTable::not_equal    : return Assembler::equal;
46    case TemplateTable::less         : return Assembler::greaterEqual;
47    case TemplateTable::less_equal   : return Assembler::greater;
48    case TemplateTable::greater      : return Assembler::lessEqual;
49    case TemplateTable::greater_equal: return Assembler::less;
50  }
51  ShouldNotReachHere();
52  return Assembler::zero;
53}
54
55//----------------------------------------------------------------------------------------------------
56// Miscelaneous helper routines
57
58
59Address TemplateTable::at_bcp(int offset) {
60  assert(_desc->uses_bcp(), "inconsistent uses_bcp information");
61  return Address( Lbcp, 0, offset);
62}
63
64
65void TemplateTable::patch_bytecode(Bytecodes::Code bc, Register Rbyte_code,
66                                   Register Rscratch,
67                                   bool load_bc_into_scratch /*=true*/) {
68  // With sharing on, may need to test methodOop flag.
69  if (!RewriteBytecodes) return;
70  if (load_bc_into_scratch) __ set(bc, Rbyte_code);
71  Label patch_done;
72  if (JvmtiExport::can_post_breakpoint()) {
73    Label fast_patch;
74    __ ldub(at_bcp(0), Rscratch);
75    __ cmp(Rscratch, Bytecodes::_breakpoint);
76    __ br(Assembler::notEqual, false, Assembler::pt, fast_patch);
77    __ delayed()->nop();  // don't bother to hoist the stb here
78    // perform the quickening, slowly, in the bowels of the breakpoint table
79    __ call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::set_original_bytecode_at), Lmethod, Lbcp, Rbyte_code);
80    __ ba(false, patch_done);
81    __ delayed()->nop();
82    __ bind(fast_patch);
83  }
84#ifdef ASSERT
85  Bytecodes::Code orig_bytecode =  Bytecodes::java_code(bc);
86  Label okay;
87  __ ldub(at_bcp(0), Rscratch);
88  __ cmp(Rscratch, orig_bytecode);
89  __ br(Assembler::equal, false, Assembler::pt, okay);
90  __ delayed() ->cmp(Rscratch, Rbyte_code);
91  __ br(Assembler::equal, false, Assembler::pt, okay);
92  __ delayed()->nop();
93  __ stop("Rewriting wrong bytecode location");
94  __ bind(okay);
95#endif
96  __ stb(Rbyte_code, at_bcp(0));
97  __ bind(patch_done);
98}
99
100//----------------------------------------------------------------------------------------------------
101// Individual instructions
102
103void TemplateTable::nop() {
104  transition(vtos, vtos);
105  // nothing to do
106}
107
108void TemplateTable::shouldnotreachhere() {
109  transition(vtos, vtos);
110  __ stop("shouldnotreachhere bytecode");
111}
112
113void TemplateTable::aconst_null() {
114  transition(vtos, atos);
115  __ clr(Otos_i);
116}
117
118
119void TemplateTable::iconst(int value) {
120  transition(vtos, itos);
121  __ set(value, Otos_i);
122}
123
124
125void TemplateTable::lconst(int value) {
126  transition(vtos, ltos);
127  assert(value >= 0, "check this code");
128#ifdef _LP64
129  __ set(value, Otos_l);
130#else
131  __ set(value, Otos_l2);
132  __ clr( Otos_l1);
133#endif
134}
135
136
137void TemplateTable::fconst(int value) {
138  transition(vtos, ftos);
139  static float zero = 0.0, one = 1.0, two = 2.0;
140  float* p;
141  switch( value ) {
142   default: ShouldNotReachHere();
143   case 0:  p = &zero;  break;
144   case 1:  p = &one;   break;
145   case 2:  p = &two;   break;
146  }
147  Address a(G3_scratch, (address)p);
148  __ sethi(a);
149  __ ldf(FloatRegisterImpl::S, a, Ftos_f);
150}
151
152
153void TemplateTable::dconst(int value) {
154  transition(vtos, dtos);
155  static double zero = 0.0, one = 1.0;
156  double* p;
157  switch( value ) {
158   default: ShouldNotReachHere();
159   case 0:  p = &zero;  break;
160   case 1:  p = &one;   break;
161  }
162  Address a(G3_scratch, (address)p);
163  __ sethi(a);
164  __ ldf(FloatRegisterImpl::D, a, Ftos_d);
165}
166
167
168// %%%%% Should factore most snippet templates across platforms
169
170void TemplateTable::bipush() {
171  transition(vtos, itos);
172  __ ldsb( at_bcp(1), Otos_i );
173}
174
175void TemplateTable::sipush() {
176  transition(vtos, itos);
177  __ get_2_byte_integer_at_bcp(1, G3_scratch, Otos_i, InterpreterMacroAssembler::Signed);
178}
179
180void TemplateTable::ldc(bool wide) {
181  transition(vtos, vtos);
182  Label call_ldc, notInt, notString, notClass, exit;
183
184  if (wide) {
185    __ get_2_byte_integer_at_bcp(1, G3_scratch, O1, InterpreterMacroAssembler::Unsigned);
186  } else {
187    __ ldub(Lbcp, 1, O1);
188  }
189  __ get_cpool_and_tags(O0, O2);
190
191  const int base_offset = constantPoolOopDesc::header_size() * wordSize;
192  const int tags_offset = typeArrayOopDesc::header_size(T_BYTE) * wordSize;
193
194  // get type from tags
195  __ add(O2, tags_offset, O2);
196  __ ldub(O2, O1, O2);
197  __ cmp(O2, JVM_CONSTANT_UnresolvedString);    // unresolved string? If so, must resolve
198  __ brx(Assembler::equal, true, Assembler::pt, call_ldc);
199  __ delayed()->nop();
200
201  __ cmp(O2, JVM_CONSTANT_UnresolvedClass);     // unresolved class? If so, must resolve
202  __ brx(Assembler::equal, true, Assembler::pt, call_ldc);
203  __ delayed()->nop();
204
205  __ cmp(O2, JVM_CONSTANT_UnresolvedClassInError);     // unresolved class in error state
206  __ brx(Assembler::equal, true, Assembler::pn, call_ldc);
207  __ delayed()->nop();
208
209  __ cmp(O2, JVM_CONSTANT_Class);      // need to call vm to get java mirror of the class
210  __ brx(Assembler::notEqual, true, Assembler::pt, notClass);
211  __ delayed()->add(O0, base_offset, O0);
212
213  __ bind(call_ldc);
214  __ set(wide, O1);
215  call_VM(Otos_i, CAST_FROM_FN_PTR(address, InterpreterRuntime::ldc), O1);
216  __ push(atos);
217  __ ba(false, exit);
218  __ delayed()->nop();
219
220  __ bind(notClass);
221 // __ add(O0, base_offset, O0);
222  __ sll(O1, LogBytesPerWord, O1);
223  __ cmp(O2, JVM_CONSTANT_Integer);
224  __ brx(Assembler::notEqual, true, Assembler::pt, notInt);
225  __ delayed()->cmp(O2, JVM_CONSTANT_String);
226  __ ld(O0, O1, Otos_i);
227  __ push(itos);
228  __ ba(false, exit);
229  __ delayed()->nop();
230
231  __ bind(notInt);
232 // __ cmp(O2, JVM_CONSTANT_String);
233  __ brx(Assembler::notEqual, true, Assembler::pt, notString);
234  __ delayed()->ldf(FloatRegisterImpl::S, O0, O1, Ftos_f);
235  __ ld_ptr(O0, O1, Otos_i);
236  __ verify_oop(Otos_i);
237  __ push(atos);
238  __ ba(false, exit);
239  __ delayed()->nop();
240
241  __ bind(notString);
242 // __ ldf(FloatRegisterImpl::S, O0, O1, Ftos_f);
243  __ push(ftos);
244
245  __ bind(exit);
246}
247
248void TemplateTable::ldc2_w() {
249  transition(vtos, vtos);
250  Label retry, resolved, Long, exit;
251
252  __ bind(retry);
253  __ get_2_byte_integer_at_bcp(1, G3_scratch, O1, InterpreterMacroAssembler::Unsigned);
254  __ get_cpool_and_tags(O0, O2);
255
256  const int base_offset = constantPoolOopDesc::header_size() * wordSize;
257  const int tags_offset = typeArrayOopDesc::header_size(T_BYTE) * wordSize;
258  // get type from tags
259  __ add(O2, tags_offset, O2);
260  __ ldub(O2, O1, O2);
261
262  __ sll(O1, LogBytesPerWord, O1);
263  __ add(O0, O1, G3_scratch);
264
265  __ cmp(O2, JVM_CONSTANT_Double);
266  __ brx(Assembler::notEqual, false, Assembler::pt, Long);
267  __ delayed()->nop();
268  // A double can be placed at word-aligned locations in the constant pool.
269  // Check out Conversions.java for an example.
270  // Also constantPoolOopDesc::header_size() is 20, which makes it very difficult
271  // to double-align double on the constant pool.  SG, 11/7/97
272#ifdef _LP64
273  __ ldf(FloatRegisterImpl::D, G3_scratch, base_offset, Ftos_d);
274#else
275  FloatRegister f = Ftos_d;
276  __ ldf(FloatRegisterImpl::S, G3_scratch, base_offset, f);
277  __ ldf(FloatRegisterImpl::S, G3_scratch, base_offset + sizeof(jdouble)/2,
278         f->successor());
279#endif
280  __ push(dtos);
281  __ ba(false, exit);
282  __ delayed()->nop();
283
284  __ bind(Long);
285#ifdef _LP64
286  __ ldx(G3_scratch, base_offset, Otos_l);
287#else
288  __ ld(G3_scratch, base_offset, Otos_l);
289  __ ld(G3_scratch, base_offset + sizeof(jlong)/2, Otos_l->successor());
290#endif
291  __ push(ltos);
292
293  __ bind(exit);
294}
295
296
297void TemplateTable::locals_index(Register reg, int offset) {
298  __ ldub( at_bcp(offset), reg );
299}
300
301
302void TemplateTable::locals_index_wide(Register reg) {
303  // offset is 2, not 1, because Lbcp points to wide prefix code
304  __ get_2_byte_integer_at_bcp(2, G4_scratch, reg, InterpreterMacroAssembler::Unsigned);
305}
306
307void TemplateTable::iload() {
308  transition(vtos, itos);
309  // Rewrite iload,iload  pair into fast_iload2
310  //         iload,caload pair into fast_icaload
311  if (RewriteFrequentPairs) {
312    Label rewrite, done;
313
314    // get next byte
315    __ ldub(at_bcp(Bytecodes::length_for(Bytecodes::_iload)), G3_scratch);
316
317    // if _iload, wait to rewrite to iload2.  We only want to rewrite the
318    // last two iloads in a pair.  Comparing against fast_iload means that
319    // the next bytecode is neither an iload or a caload, and therefore
320    // an iload pair.
321    __ cmp(G3_scratch, (int)Bytecodes::_iload);
322    __ br(Assembler::equal, false, Assembler::pn, done);
323    __ delayed()->nop();
324
325    __ cmp(G3_scratch, (int)Bytecodes::_fast_iload);
326    __ br(Assembler::equal, false, Assembler::pn, rewrite);
327    __ delayed()->set(Bytecodes::_fast_iload2, G4_scratch);
328
329    __ cmp(G3_scratch, (int)Bytecodes::_caload);
330    __ br(Assembler::equal, false, Assembler::pn, rewrite);
331    __ delayed()->set(Bytecodes::_fast_icaload, G4_scratch);
332
333    __ set(Bytecodes::_fast_iload, G4_scratch);  // don't check again
334    // rewrite
335    // G4_scratch: fast bytecode
336    __ bind(rewrite);
337    patch_bytecode(Bytecodes::_iload, G4_scratch, G3_scratch, false);
338    __ bind(done);
339  }
340
341  // Get the local value into tos
342  locals_index(G3_scratch);
343  __ access_local_int( G3_scratch, Otos_i );
344}
345
346void TemplateTable::fast_iload2() {
347  transition(vtos, itos);
348  locals_index(G3_scratch);
349  __ access_local_int( G3_scratch, Otos_i );
350  __ push_i();
351  locals_index(G3_scratch, 3);  // get next bytecode's local index.
352  __ access_local_int( G3_scratch, Otos_i );
353}
354
355void TemplateTable::fast_iload() {
356  transition(vtos, itos);
357  locals_index(G3_scratch);
358  __ access_local_int( G3_scratch, Otos_i );
359}
360
361void TemplateTable::lload() {
362  transition(vtos, ltos);
363  locals_index(G3_scratch);
364  __ access_local_long( G3_scratch, Otos_l );
365}
366
367
368void TemplateTable::fload() {
369  transition(vtos, ftos);
370  locals_index(G3_scratch);
371  __ access_local_float( G3_scratch, Ftos_f );
372}
373
374
375void TemplateTable::dload() {
376  transition(vtos, dtos);
377  locals_index(G3_scratch);
378  __ access_local_double( G3_scratch, Ftos_d );
379}
380
381
382void TemplateTable::aload() {
383  transition(vtos, atos);
384  locals_index(G3_scratch);
385  __ access_local_ptr( G3_scratch, Otos_i);
386}
387
388
389void TemplateTable::wide_iload() {
390  transition(vtos, itos);
391  locals_index_wide(G3_scratch);
392  __ access_local_int( G3_scratch, Otos_i );
393}
394
395
396void TemplateTable::wide_lload() {
397  transition(vtos, ltos);
398  locals_index_wide(G3_scratch);
399  __ access_local_long( G3_scratch, Otos_l );
400}
401
402
403void TemplateTable::wide_fload() {
404  transition(vtos, ftos);
405  locals_index_wide(G3_scratch);
406  __ access_local_float( G3_scratch, Ftos_f );
407}
408
409
410void TemplateTable::wide_dload() {
411  transition(vtos, dtos);
412  locals_index_wide(G3_scratch);
413  __ access_local_double( G3_scratch, Ftos_d );
414}
415
416
417void TemplateTable::wide_aload() {
418  transition(vtos, atos);
419  locals_index_wide(G3_scratch);
420  __ access_local_ptr( G3_scratch, Otos_i );
421  __ verify_oop(Otos_i);
422}
423
424
425void TemplateTable::iaload() {
426  transition(itos, itos);
427  // Otos_i: index
428  // tos: array
429  __ index_check(O2, Otos_i, LogBytesPerInt, G3_scratch, O3);
430  __ ld(O3, arrayOopDesc::base_offset_in_bytes(T_INT), Otos_i);
431}
432
433
434void TemplateTable::laload() {
435  transition(itos, ltos);
436  // Otos_i: index
437  // O2: array
438  __ index_check(O2, Otos_i, LogBytesPerLong, G3_scratch, O3);
439  __ ld_long(O3, arrayOopDesc::base_offset_in_bytes(T_LONG), Otos_l);
440}
441
442
443void TemplateTable::faload() {
444  transition(itos, ftos);
445  // Otos_i: index
446  // O2: array
447  __ index_check(O2, Otos_i, LogBytesPerInt, G3_scratch, O3);
448  __ ldf(FloatRegisterImpl::S, O3, arrayOopDesc::base_offset_in_bytes(T_FLOAT), Ftos_f);
449}
450
451
452void TemplateTable::daload() {
453  transition(itos, dtos);
454  // Otos_i: index
455  // O2: array
456  __ index_check(O2, Otos_i, LogBytesPerLong, G3_scratch, O3);
457  __ ldf(FloatRegisterImpl::D, O3, arrayOopDesc::base_offset_in_bytes(T_DOUBLE), Ftos_d);
458}
459
460
461void TemplateTable::aaload() {
462  transition(itos, atos);
463  // Otos_i: index
464  // tos: array
465  __ index_check(O2, Otos_i, UseCompressedOops ? 2 : LogBytesPerWord, G3_scratch, O3);
466  __ load_heap_oop(O3, arrayOopDesc::base_offset_in_bytes(T_OBJECT), Otos_i);
467  __ verify_oop(Otos_i);
468}
469
470
471void TemplateTable::baload() {
472  transition(itos, itos);
473  // Otos_i: index
474  // tos: array
475  __ index_check(O2, Otos_i, 0, G3_scratch, O3);
476  __ ldsb(O3, arrayOopDesc::base_offset_in_bytes(T_BYTE), Otos_i);
477}
478
479
480void TemplateTable::caload() {
481  transition(itos, itos);
482  // Otos_i: index
483  // tos: array
484  __ index_check(O2, Otos_i, LogBytesPerShort, G3_scratch, O3);
485  __ lduh(O3, arrayOopDesc::base_offset_in_bytes(T_CHAR), Otos_i);
486}
487
488void TemplateTable::fast_icaload() {
489  transition(vtos, itos);
490  // Otos_i: index
491  // tos: array
492  locals_index(G3_scratch);
493  __ access_local_int( G3_scratch, Otos_i );
494  __ index_check(O2, Otos_i, LogBytesPerShort, G3_scratch, O3);
495  __ lduh(O3, arrayOopDesc::base_offset_in_bytes(T_CHAR), Otos_i);
496}
497
498
499void TemplateTable::saload() {
500  transition(itos, itos);
501  // Otos_i: index
502  // tos: array
503  __ index_check(O2, Otos_i, LogBytesPerShort, G3_scratch, O3);
504  __ ldsh(O3, arrayOopDesc::base_offset_in_bytes(T_SHORT), Otos_i);
505}
506
507
508void TemplateTable::iload(int n) {
509  transition(vtos, itos);
510  debug_only(__ verify_local_tag(frame::TagValue, Llocals, Otos_i, n));
511  __ ld( Llocals, Interpreter::local_offset_in_bytes(n), Otos_i );
512}
513
514
515void TemplateTable::lload(int n) {
516  transition(vtos, ltos);
517  assert(n+1 < Argument::n_register_parameters, "would need more code");
518  debug_only(__ verify_local_tag(frame::TagCategory2, Llocals, Otos_l, n));
519  __ load_unaligned_long(Llocals, Interpreter::local_offset_in_bytes(n+1), Otos_l);
520}
521
522
523void TemplateTable::fload(int n) {
524  transition(vtos, ftos);
525  assert(n < Argument::n_register_parameters, "would need more code");
526  debug_only(__ verify_local_tag(frame::TagValue, Llocals, G3_scratch, n));
527  __ ldf( FloatRegisterImpl::S, Llocals, Interpreter::local_offset_in_bytes(n),     Ftos_f );
528}
529
530
531void TemplateTable::dload(int n) {
532  transition(vtos, dtos);
533  FloatRegister dst = Ftos_d;
534  debug_only(__ verify_local_tag(frame::TagCategory2, Llocals, G3_scratch, n));
535  __ load_unaligned_double(Llocals, Interpreter::local_offset_in_bytes(n+1), dst);
536}
537
538
539void TemplateTable::aload(int n) {
540  transition(vtos, atos);
541  debug_only(__ verify_local_tag(frame::TagReference, Llocals, Otos_i, n));
542  __ ld_ptr( Llocals, Interpreter::local_offset_in_bytes(n), Otos_i );
543}
544
545
546void TemplateTable::aload_0() {
547  transition(vtos, atos);
548
549  // According to bytecode histograms, the pairs:
550  //
551  // _aload_0, _fast_igetfield (itos)
552  // _aload_0, _fast_agetfield (atos)
553  // _aload_0, _fast_fgetfield (ftos)
554  //
555  // occur frequently. If RewriteFrequentPairs is set, the (slow) _aload_0
556  // bytecode checks the next bytecode and then rewrites the current
557  // bytecode into a pair bytecode; otherwise it rewrites the current
558  // bytecode into _fast_aload_0 that doesn't do the pair check anymore.
559  //
560  if (RewriteFrequentPairs) {
561    Label rewrite, done;
562
563    // get next byte
564    __ ldub(at_bcp(Bytecodes::length_for(Bytecodes::_aload_0)), G3_scratch);
565
566    // do actual aload_0
567    aload(0);
568
569    // if _getfield then wait with rewrite
570    __ cmp(G3_scratch, (int)Bytecodes::_getfield);
571    __ br(Assembler::equal, false, Assembler::pn, done);
572    __ delayed()->nop();
573
574    // if _igetfield then rewrite to _fast_iaccess_0
575    assert(Bytecodes::java_code(Bytecodes::_fast_iaccess_0) == Bytecodes::_aload_0, "adjust fast bytecode def");
576    __ cmp(G3_scratch, (int)Bytecodes::_fast_igetfield);
577    __ br(Assembler::equal, false, Assembler::pn, rewrite);
578    __ delayed()->set(Bytecodes::_fast_iaccess_0, G4_scratch);
579
580    // if _agetfield then rewrite to _fast_aaccess_0
581    assert(Bytecodes::java_code(Bytecodes::_fast_aaccess_0) == Bytecodes::_aload_0, "adjust fast bytecode def");
582    __ cmp(G3_scratch, (int)Bytecodes::_fast_agetfield);
583    __ br(Assembler::equal, false, Assembler::pn, rewrite);
584    __ delayed()->set(Bytecodes::_fast_aaccess_0, G4_scratch);
585
586    // if _fgetfield then rewrite to _fast_faccess_0
587    assert(Bytecodes::java_code(Bytecodes::_fast_faccess_0) == Bytecodes::_aload_0, "adjust fast bytecode def");
588    __ cmp(G3_scratch, (int)Bytecodes::_fast_fgetfield);
589    __ br(Assembler::equal, false, Assembler::pn, rewrite);
590    __ delayed()->set(Bytecodes::_fast_faccess_0, G4_scratch);
591
592    // else rewrite to _fast_aload0
593    assert(Bytecodes::java_code(Bytecodes::_fast_aload_0) == Bytecodes::_aload_0, "adjust fast bytecode def");
594    __ set(Bytecodes::_fast_aload_0, G4_scratch);
595
596    // rewrite
597    // G4_scratch: fast bytecode
598    __ bind(rewrite);
599    patch_bytecode(Bytecodes::_aload_0, G4_scratch, G3_scratch, false);
600    __ bind(done);
601  } else {
602    aload(0);
603  }
604}
605
606
607void TemplateTable::istore() {
608  transition(itos, vtos);
609  locals_index(G3_scratch);
610  __ store_local_int( G3_scratch, Otos_i );
611}
612
613
614void TemplateTable::lstore() {
615  transition(ltos, vtos);
616  locals_index(G3_scratch);
617  __ store_local_long( G3_scratch, Otos_l );
618}
619
620
621void TemplateTable::fstore() {
622  transition(ftos, vtos);
623  locals_index(G3_scratch);
624  __ store_local_float( G3_scratch, Ftos_f );
625}
626
627
628void TemplateTable::dstore() {
629  transition(dtos, vtos);
630  locals_index(G3_scratch);
631  __ store_local_double( G3_scratch, Ftos_d );
632}
633
634
635void TemplateTable::astore() {
636  transition(vtos, vtos);
637  // astore tos can also be a returnAddress, so load and store the tag too
638  __ load_ptr_and_tag(0, Otos_i, Otos_l2);
639  __ inc(Lesp, Interpreter::stackElementSize());
640  __ verify_oop_or_return_address(Otos_i, G3_scratch);
641  locals_index(G3_scratch);
642  __ store_local_ptr( G3_scratch, Otos_i, Otos_l2 );
643}
644
645
646void TemplateTable::wide_istore() {
647  transition(vtos, vtos);
648  __ pop_i();
649  locals_index_wide(G3_scratch);
650  __ store_local_int( G3_scratch, Otos_i );
651}
652
653
654void TemplateTable::wide_lstore() {
655  transition(vtos, vtos);
656  __ pop_l();
657  locals_index_wide(G3_scratch);
658  __ store_local_long( G3_scratch, Otos_l );
659}
660
661
662void TemplateTable::wide_fstore() {
663  transition(vtos, vtos);
664  __ pop_f();
665  locals_index_wide(G3_scratch);
666  __ store_local_float( G3_scratch, Ftos_f );
667}
668
669
670void TemplateTable::wide_dstore() {
671  transition(vtos, vtos);
672  __ pop_d();
673  locals_index_wide(G3_scratch);
674  __ store_local_double( G3_scratch, Ftos_d );
675}
676
677
678void TemplateTable::wide_astore() {
679  transition(vtos, vtos);
680  // astore tos can also be a returnAddress, so load and store the tag too
681  __ load_ptr_and_tag(0, Otos_i, Otos_l2);
682  __ inc(Lesp, Interpreter::stackElementSize());
683  __ verify_oop_or_return_address(Otos_i, G3_scratch);
684  locals_index_wide(G3_scratch);
685  __ store_local_ptr( G3_scratch, Otos_i, Otos_l2 );
686}
687
688
689void TemplateTable::iastore() {
690  transition(itos, vtos);
691  __ pop_i(O2); // index
692  // Otos_i: val
693  // O3: array
694  __ index_check(O3, O2, LogBytesPerInt, G3_scratch, O2);
695  __ st(Otos_i, O2, arrayOopDesc::base_offset_in_bytes(T_INT));
696}
697
698
699void TemplateTable::lastore() {
700  transition(ltos, vtos);
701  __ pop_i(O2); // index
702  // Otos_l: val
703  // O3: array
704  __ index_check(O3, O2, LogBytesPerLong, G3_scratch, O2);
705  __ st_long(Otos_l, O2, arrayOopDesc::base_offset_in_bytes(T_LONG));
706}
707
708
709void TemplateTable::fastore() {
710  transition(ftos, vtos);
711  __ pop_i(O2); // index
712  // Ftos_f: val
713  // O3: array
714  __ index_check(O3, O2, LogBytesPerInt, G3_scratch, O2);
715  __ stf(FloatRegisterImpl::S, Ftos_f, O2, arrayOopDesc::base_offset_in_bytes(T_FLOAT));
716}
717
718
719void TemplateTable::dastore() {
720  transition(dtos, vtos);
721  __ pop_i(O2); // index
722  // Fos_d: val
723  // O3: array
724  __ index_check(O3, O2, LogBytesPerLong, G3_scratch, O2);
725  __ stf(FloatRegisterImpl::D, Ftos_d, O2, arrayOopDesc::base_offset_in_bytes(T_DOUBLE));
726}
727
728
729void TemplateTable::aastore() {
730  Label store_ok, is_null, done;
731  transition(vtos, vtos);
732  __ ld_ptr(Lesp, Interpreter::expr_offset_in_bytes(0), Otos_i);
733  __ ld(Lesp, Interpreter::expr_offset_in_bytes(1), O2);         // get index
734  __ ld_ptr(Lesp, Interpreter::expr_offset_in_bytes(2), O3);     // get array
735  // Otos_i: val
736  // O2: index
737  // O3: array
738  __ verify_oop(Otos_i);
739  __ index_check_without_pop(O3, O2, UseCompressedOops ? 2 : LogBytesPerWord, G3_scratch, O1);
740
741  // do array store check - check for NULL value first
742  __ br_null( Otos_i, false, Assembler::pn, is_null );
743  __ delayed()->nop();
744
745  __ load_klass(O3, O4); // get array klass
746  __ load_klass(Otos_i, O5); // get value klass
747
748  // do fast instanceof cache test
749
750  __ ld_ptr(O4,     sizeof(oopDesc) + objArrayKlass::element_klass_offset_in_bytes(),  O4);
751
752  assert(Otos_i == O0, "just checking");
753
754  // Otos_i:    value
755  // O1:        addr - offset
756  // O2:        index
757  // O3:        array
758  // O4:        array element klass
759  // O5:        value klass
760
761  // Generate a fast subtype check.  Branch to store_ok if no
762  // failure.  Throw if failure.
763  __ gen_subtype_check( O5, O4, G3_scratch, G4_scratch, G1_scratch, store_ok );
764
765  // Not a subtype; so must throw exception
766  __ throw_if_not_x( Assembler::never, Interpreter::_throw_ArrayStoreException_entry, G3_scratch );
767
768  // Store is OK.
769  __ bind(store_ok);
770  __ store_heap_oop(Otos_i, O1, arrayOopDesc::base_offset_in_bytes(T_OBJECT));
771  // Quote from rememberedSet.hpp: For objArrays, the precise card
772  // corresponding to the pointer store is dirtied so we don't need to
773  // scavenge the entire array.
774  Address element(O1, 0, arrayOopDesc::base_offset_in_bytes(T_OBJECT));
775  __ add(element, O1);              // address the element precisely
776  __ store_check(G3_scratch, O1);
777  __ ba(false,done);
778  __ delayed()->inc(Lesp, 3* Interpreter::stackElementSize()); // adj sp (pops array, index and value)
779
780  __ bind(is_null);
781  __ store_heap_oop(Otos_i, element);
782  __ profile_null_seen(G3_scratch);
783  __ inc(Lesp, 3* Interpreter::stackElementSize());     // adj sp (pops array, index and value)
784  __ bind(done);
785}
786
787
788void TemplateTable::bastore() {
789  transition(itos, vtos);
790  __ pop_i(O2); // index
791  // Otos_i: val
792  // O3: array
793  __ index_check(O3, O2, 0, G3_scratch, O2);
794  __ stb(Otos_i, O2, arrayOopDesc::base_offset_in_bytes(T_BYTE));
795}
796
797
798void TemplateTable::castore() {
799  transition(itos, vtos);
800  __ pop_i(O2); // index
801  // Otos_i: val
802  // O3: array
803  __ index_check(O3, O2, LogBytesPerShort, G3_scratch, O2);
804  __ sth(Otos_i, O2, arrayOopDesc::base_offset_in_bytes(T_CHAR));
805}
806
807
808void TemplateTable::sastore() {
809  // %%%%% Factor across platform
810  castore();
811}
812
813
814void TemplateTable::istore(int n) {
815  transition(itos, vtos);
816  __ tag_local(frame::TagValue, Llocals, Otos_i, n);
817  __ st(Otos_i, Llocals, Interpreter::local_offset_in_bytes(n));
818}
819
820
821void TemplateTable::lstore(int n) {
822  transition(ltos, vtos);
823  assert(n+1 < Argument::n_register_parameters, "only handle register cases");
824  __ tag_local(frame::TagCategory2, Llocals, Otos_l, n);
825  __ store_unaligned_long(Otos_l, Llocals, Interpreter::local_offset_in_bytes(n+1));
826
827}
828
829
830void TemplateTable::fstore(int n) {
831  transition(ftos, vtos);
832  assert(n < Argument::n_register_parameters, "only handle register cases");
833  __ tag_local(frame::TagValue, Llocals, Otos_l, n);
834  __ stf(FloatRegisterImpl::S, Ftos_f, Llocals, Interpreter::local_offset_in_bytes(n));
835}
836
837
838void TemplateTable::dstore(int n) {
839  transition(dtos, vtos);
840  FloatRegister src = Ftos_d;
841  __ tag_local(frame::TagCategory2, Llocals, Otos_l, n);
842  __ store_unaligned_double(src, Llocals, Interpreter::local_offset_in_bytes(n+1));
843}
844
845
846void TemplateTable::astore(int n) {
847  transition(vtos, vtos);
848  // astore tos can also be a returnAddress, so load and store the tag too
849  __ load_ptr_and_tag(0, Otos_i, Otos_l2);
850  __ inc(Lesp, Interpreter::stackElementSize());
851  __ verify_oop_or_return_address(Otos_i, G3_scratch);
852  __ store_local_ptr( n, Otos_i, Otos_l2 );
853}
854
855
856void TemplateTable::pop() {
857  transition(vtos, vtos);
858  __ inc(Lesp, Interpreter::stackElementSize());
859}
860
861
862void TemplateTable::pop2() {
863  transition(vtos, vtos);
864  __ inc(Lesp, 2 * Interpreter::stackElementSize());
865}
866
867
868void TemplateTable::dup() {
869  transition(vtos, vtos);
870  // stack: ..., a
871  // load a and tag
872  __ load_ptr_and_tag(0, Otos_i, Otos_l2);
873  __ push_ptr(Otos_i, Otos_l2);
874  // stack: ..., a, a
875}
876
877
878void TemplateTable::dup_x1() {
879  transition(vtos, vtos);
880  // stack: ..., a, b
881  __ load_ptr_and_tag(1, G3_scratch, G4_scratch);   // get a
882  __ load_ptr_and_tag(0, Otos_l1, Otos_l2);         // get b
883  __ store_ptr_and_tag(1, Otos_l1, Otos_l2);        // put b
884  __ store_ptr_and_tag(0, G3_scratch, G4_scratch);  // put a - like swap
885  __ push_ptr(Otos_l1, Otos_l2);                    // push b
886  // stack: ..., b, a, b
887}
888
889
890void TemplateTable::dup_x2() {
891  transition(vtos, vtos);
892  // stack: ..., a, b, c
893  // get c and push on stack, reuse registers
894  __ load_ptr_and_tag(0, G3_scratch, G4_scratch);     // get c
895  __ push_ptr(G3_scratch, G4_scratch);               // push c with tag
896  // stack: ..., a, b, c, c  (c in reg)  (Lesp - 4)
897  // (stack offsets n+1 now)
898  __ load_ptr_and_tag(3, Otos_l1, Otos_l2);          // get a
899  __ store_ptr_and_tag(3, G3_scratch, G4_scratch);   // put c at 3
900  // stack: ..., c, b, c, c  (a in reg)
901  __ load_ptr_and_tag(2, G3_scratch, G4_scratch);    // get b
902  __ store_ptr_and_tag(2, Otos_l1, Otos_l2);         // put a at 2
903  // stack: ..., c, a, c, c  (b in reg)
904  __ store_ptr_and_tag(1, G3_scratch, G4_scratch);   // put b at 1
905  // stack: ..., c, a, b, c
906}
907
908
909void TemplateTable::dup2() {
910  transition(vtos, vtos);
911  __ load_ptr_and_tag(1, G3_scratch, G4_scratch);     // get a
912  __ load_ptr_and_tag(0, Otos_l1, Otos_l2);           // get b
913  __ push_ptr(G3_scratch, G4_scratch);                // push a
914  __ push_ptr(Otos_l1, Otos_l2);                      // push b
915  // stack: ..., a, b, a, b
916}
917
918
919void TemplateTable::dup2_x1() {
920  transition(vtos, vtos);
921  // stack: ..., a, b, c
922  __ load_ptr_and_tag(1, Lscratch, G1_scratch);       // get b
923  __ load_ptr_and_tag(2, Otos_l1, Otos_l2);           // get a
924  __ store_ptr_and_tag(2, Lscratch, G1_scratch);      // put b at a
925  // stack: ..., b, b, c
926  __ load_ptr_and_tag(0, G3_scratch, G4_scratch);     // get c
927  __ store_ptr_and_tag(1, G3_scratch, G4_scratch);    // put c at b
928  // stack: ..., b, c, c
929  __ store_ptr_and_tag(0, Otos_l1, Otos_l2);          // put a at c
930  // stack: ..., b, c, a
931  __ push_ptr(Lscratch, G1_scratch);                  // push b
932  __ push_ptr(G3_scratch, G4_scratch);                // push c
933  // stack: ..., b, c, a, b, c
934}
935
936
937// The spec says that these types can be a mixture of category 1 (1 word)
938// types and/or category 2 types (long and doubles)
939void TemplateTable::dup2_x2() {
940  transition(vtos, vtos);
941  // stack: ..., a, b, c, d
942  __ load_ptr_and_tag(1, Lscratch, G1_scratch);       // get c
943  __ load_ptr_and_tag(3, Otos_l1, Otos_l2);           // get a
944  __ store_ptr_and_tag(3, Lscratch, G1_scratch);      // put c at 3
945  __ store_ptr_and_tag(1, Otos_l1, Otos_l2);          // put a at 1
946  // stack: ..., c, b, a, d
947  __ load_ptr_and_tag(2, G3_scratch, G4_scratch);     // get b
948  __ load_ptr_and_tag(0, Otos_l1, Otos_l2);           // get d
949  __ store_ptr_and_tag(0, G3_scratch, G4_scratch);    // put b at 0
950  __ store_ptr_and_tag(2, Otos_l1, Otos_l2);          // put d at 2
951  // stack: ..., c, d, a, b
952  __ push_ptr(Lscratch, G1_scratch);                  // push c
953  __ push_ptr(Otos_l1, Otos_l2);                      // push d
954  // stack: ..., c, d, a, b, c, d
955}
956
957
958void TemplateTable::swap() {
959  transition(vtos, vtos);
960  // stack: ..., a, b
961  __ load_ptr_and_tag(1, G3_scratch, G4_scratch);     // get a
962  __ load_ptr_and_tag(0, Otos_l1, Otos_l2);           // get b
963  __ store_ptr_and_tag(0, G3_scratch, G4_scratch);    // put b
964  __ store_ptr_and_tag(1, Otos_l1, Otos_l2);          // put a
965  // stack: ..., b, a
966}
967
968
969void TemplateTable::iop2(Operation op) {
970  transition(itos, itos);
971  __ pop_i(O1);
972  switch (op) {
973   case  add:  __  add(O1, Otos_i, Otos_i);  break;
974   case  sub:  __  sub(O1, Otos_i, Otos_i);  break;
975     // %%%%% Mul may not exist: better to call .mul?
976   case  mul:  __ smul(O1, Otos_i, Otos_i);  break;
977   case _and:  __  and3(O1, Otos_i, Otos_i);  break;
978   case  _or:  __   or3(O1, Otos_i, Otos_i);  break;
979   case _xor:  __  xor3(O1, Otos_i, Otos_i);  break;
980   case  shl:  __  sll(O1, Otos_i, Otos_i);  break;
981   case  shr:  __  sra(O1, Otos_i, Otos_i);  break;
982   case ushr:  __  srl(O1, Otos_i, Otos_i);  break;
983   default: ShouldNotReachHere();
984  }
985}
986
987
988void TemplateTable::lop2(Operation op) {
989  transition(ltos, ltos);
990  __ pop_l(O2);
991  switch (op) {
992#ifdef _LP64
993   case  add:  __ add(O2, Otos_l, Otos_l);  break;
994   case  sub:  __ sub(O2, Otos_l, Otos_l);  break;
995   case _and:  __ and3( O2, Otos_l, Otos_l);  break;
996   case  _or:  __  or3( O2, Otos_l, Otos_l);  break;
997   case _xor:  __ xor3( O2, Otos_l, Otos_l);  break;
998#else
999   case  add:  __ addcc(O3, Otos_l2, Otos_l2);  __ addc(O2, Otos_l1, Otos_l1);  break;
1000   case  sub:  __ subcc(O3, Otos_l2, Otos_l2);  __ subc(O2, Otos_l1, Otos_l1);  break;
1001   case _and:  __ and3(  O3, Otos_l2, Otos_l2);  __ and3( O2, Otos_l1, Otos_l1);  break;
1002   case  _or:  __  or3(  O3, Otos_l2, Otos_l2);  __  or3( O2, Otos_l1, Otos_l1);  break;
1003   case _xor:  __ xor3(  O3, Otos_l2, Otos_l2);  __ xor3( O2, Otos_l1, Otos_l1);  break;
1004#endif
1005   default: ShouldNotReachHere();
1006  }
1007}
1008
1009
1010void TemplateTable::idiv() {
1011  // %%%%% Later: ForSPARC/V7 call .sdiv library routine,
1012  // %%%%% Use ldsw...sdivx on pure V9 ABI. 64 bit safe.
1013
1014  transition(itos, itos);
1015  __ pop_i(O1); // get 1st op
1016
1017  // Y contains upper 32 bits of result, set it to 0 or all ones
1018  __ wry(G0);
1019  __ mov(~0, G3_scratch);
1020
1021  __ tst(O1);
1022     Label neg;
1023  __ br(Assembler::negative, true, Assembler::pn, neg);
1024  __ delayed()->wry(G3_scratch);
1025  __ bind(neg);
1026
1027     Label ok;
1028  __ tst(Otos_i);
1029  __ throw_if_not_icc( Assembler::notZero, Interpreter::_throw_ArithmeticException_entry, G3_scratch );
1030
1031  const int min_int = 0x80000000;
1032  Label regular;
1033  __ cmp(Otos_i, -1);
1034  __ br(Assembler::notEqual, false, Assembler::pt, regular);
1035#ifdef _LP64
1036  // Don't put set in delay slot
1037  // Set will turn into multiple instructions in 64 bit mode
1038  __ delayed()->nop();
1039  __ set(min_int, G4_scratch);
1040#else
1041  __ delayed()->set(min_int, G4_scratch);
1042#endif
1043  Label done;
1044  __ cmp(O1, G4_scratch);
1045  __ br(Assembler::equal, true, Assembler::pt, done);
1046  __ delayed()->mov(O1, Otos_i);   // (mov only executed if branch taken)
1047
1048  __ bind(regular);
1049  __ sdiv(O1, Otos_i, Otos_i); // note: irem uses O1 after this instruction!
1050  __ bind(done);
1051}
1052
1053
1054void TemplateTable::irem() {
1055  transition(itos, itos);
1056  __ mov(Otos_i, O2); // save divisor
1057  idiv();                               // %%%% Hack: exploits fact that idiv leaves dividend in O1
1058  __ smul(Otos_i, O2, Otos_i);
1059  __ sub(O1, Otos_i, Otos_i);
1060}
1061
1062
1063void TemplateTable::lmul() {
1064  transition(ltos, ltos);
1065  __ pop_l(O2);
1066#ifdef _LP64
1067  __ mulx(Otos_l, O2, Otos_l);
1068#else
1069  __ call_VM_leaf(Lscratch, CAST_FROM_FN_PTR(address, SharedRuntime::lmul));
1070#endif
1071
1072}
1073
1074
1075void TemplateTable::ldiv() {
1076  transition(ltos, ltos);
1077
1078  // check for zero
1079  __ pop_l(O2);
1080#ifdef _LP64
1081  __ tst(Otos_l);
1082  __ throw_if_not_xcc( Assembler::notZero, Interpreter::_throw_ArithmeticException_entry, G3_scratch);
1083  __ sdivx(O2, Otos_l, Otos_l);
1084#else
1085  __ orcc(Otos_l1, Otos_l2, G0);
1086  __ throw_if_not_icc( Assembler::notZero, Interpreter::_throw_ArithmeticException_entry, G3_scratch);
1087  __ call_VM_leaf(Lscratch, CAST_FROM_FN_PTR(address, SharedRuntime::ldiv));
1088#endif
1089}
1090
1091
1092void TemplateTable::lrem() {
1093  transition(ltos, ltos);
1094
1095  // check for zero
1096  __ pop_l(O2);
1097#ifdef _LP64
1098  __ tst(Otos_l);
1099  __ throw_if_not_xcc( Assembler::notZero, Interpreter::_throw_ArithmeticException_entry, G3_scratch);
1100  __ sdivx(O2, Otos_l, Otos_l2);
1101  __ mulx (Otos_l2, Otos_l, Otos_l2);
1102  __ sub  (O2, Otos_l2, Otos_l);
1103#else
1104  __ orcc(Otos_l1, Otos_l2, G0);
1105  __ throw_if_not_icc(Assembler::notZero, Interpreter::_throw_ArithmeticException_entry, G3_scratch);
1106  __ call_VM_leaf(Lscratch, CAST_FROM_FN_PTR(address, SharedRuntime::lrem));
1107#endif
1108}
1109
1110
1111void TemplateTable::lshl() {
1112  transition(itos, ltos); // %%%% could optimize, fill delay slot or opt for ultra
1113
1114  __ pop_l(O2);                          // shift value in O2, O3
1115#ifdef _LP64
1116  __ sllx(O2, Otos_i, Otos_l);
1117#else
1118  __ lshl(O2, O3, Otos_i, Otos_l1, Otos_l2, O4);
1119#endif
1120}
1121
1122
1123void TemplateTable::lshr() {
1124  transition(itos, ltos); // %%%% see lshl comment
1125
1126  __ pop_l(O2);                          // shift value in O2, O3
1127#ifdef _LP64
1128  __ srax(O2, Otos_i, Otos_l);
1129#else
1130  __ lshr(O2, O3, Otos_i, Otos_l1, Otos_l2, O4);
1131#endif
1132}
1133
1134
1135
1136void TemplateTable::lushr() {
1137  transition(itos, ltos); // %%%% see lshl comment
1138
1139  __ pop_l(O2);                          // shift value in O2, O3
1140#ifdef _LP64
1141  __ srlx(O2, Otos_i, Otos_l);
1142#else
1143  __ lushr(O2, O3, Otos_i, Otos_l1, Otos_l2, O4);
1144#endif
1145}
1146
1147
1148void TemplateTable::fop2(Operation op) {
1149  transition(ftos, ftos);
1150  switch (op) {
1151   case  add:  __  pop_f(F4); __ fadd(FloatRegisterImpl::S, F4, Ftos_f, Ftos_f);  break;
1152   case  sub:  __  pop_f(F4); __ fsub(FloatRegisterImpl::S, F4, Ftos_f, Ftos_f);  break;
1153   case  mul:  __  pop_f(F4); __ fmul(FloatRegisterImpl::S, F4, Ftos_f, Ftos_f);  break;
1154   case  div:  __  pop_f(F4); __ fdiv(FloatRegisterImpl::S, F4, Ftos_f, Ftos_f);  break;
1155   case  rem:
1156     assert(Ftos_f == F0, "just checking");
1157#ifdef _LP64
1158     // LP64 calling conventions use F1, F3 for passing 2 floats
1159     __ pop_f(F1);
1160     __ fmov(FloatRegisterImpl::S, Ftos_f, F3);
1161#else
1162     __ pop_i(O0);
1163     __ stf(FloatRegisterImpl::S, Ftos_f, __ d_tmp);
1164     __ ld( __ d_tmp, O1 );
1165#endif
1166     __ call_VM_leaf(Lscratch, CAST_FROM_FN_PTR(address, SharedRuntime::frem));
1167     assert( Ftos_f == F0, "fix this code" );
1168     break;
1169
1170   default: ShouldNotReachHere();
1171  }
1172}
1173
1174
1175void TemplateTable::dop2(Operation op) {
1176  transition(dtos, dtos);
1177  switch (op) {
1178   case  add:  __  pop_d(F4); __ fadd(FloatRegisterImpl::D, F4, Ftos_d, Ftos_d);  break;
1179   case  sub:  __  pop_d(F4); __ fsub(FloatRegisterImpl::D, F4, Ftos_d, Ftos_d);  break;
1180   case  mul:  __  pop_d(F4); __ fmul(FloatRegisterImpl::D, F4, Ftos_d, Ftos_d);  break;
1181   case  div:  __  pop_d(F4); __ fdiv(FloatRegisterImpl::D, F4, Ftos_d, Ftos_d);  break;
1182   case  rem:
1183#ifdef _LP64
1184     // Pass arguments in D0, D2
1185     __ fmov(FloatRegisterImpl::D, Ftos_f, F2 );
1186     __ pop_d( F0 );
1187#else
1188     // Pass arguments in O0O1, O2O3
1189     __ stf(FloatRegisterImpl::D, Ftos_f, __ d_tmp);
1190     __ ldd( __ d_tmp, O2 );
1191     __ pop_d(Ftos_f);
1192     __ stf(FloatRegisterImpl::D, Ftos_f, __ d_tmp);
1193     __ ldd( __ d_tmp, O0 );
1194#endif
1195     __ call_VM_leaf(Lscratch, CAST_FROM_FN_PTR(address, SharedRuntime::drem));
1196     assert( Ftos_d == F0, "fix this code" );
1197     break;
1198
1199   default: ShouldNotReachHere();
1200  }
1201}
1202
1203
1204void TemplateTable::ineg() {
1205  transition(itos, itos);
1206  __ neg(Otos_i);
1207}
1208
1209
1210void TemplateTable::lneg() {
1211  transition(ltos, ltos);
1212#ifdef _LP64
1213  __ sub(G0, Otos_l, Otos_l);
1214#else
1215  __ lneg(Otos_l1, Otos_l2);
1216#endif
1217}
1218
1219
1220void TemplateTable::fneg() {
1221  transition(ftos, ftos);
1222  __ fneg(FloatRegisterImpl::S, Ftos_f);
1223}
1224
1225
1226void TemplateTable::dneg() {
1227  transition(dtos, dtos);
1228  // v8 has fnegd if source and dest are the same
1229  __ fneg(FloatRegisterImpl::D, Ftos_f);
1230}
1231
1232
1233void TemplateTable::iinc() {
1234  transition(vtos, vtos);
1235  locals_index(G3_scratch);
1236  __ ldsb(Lbcp, 2, O2);  // load constant
1237  __ access_local_int(G3_scratch, Otos_i);
1238  __ add(Otos_i, O2, Otos_i);
1239  __ st(Otos_i, G3_scratch, Interpreter::value_offset_in_bytes());    // access_local_int puts E.A. in G3_scratch
1240}
1241
1242
1243void TemplateTable::wide_iinc() {
1244  transition(vtos, vtos);
1245  locals_index_wide(G3_scratch);
1246  __ get_2_byte_integer_at_bcp( 4,  O2, O3, InterpreterMacroAssembler::Signed);
1247  __ access_local_int(G3_scratch, Otos_i);
1248  __ add(Otos_i, O3, Otos_i);
1249  __ st(Otos_i, G3_scratch, Interpreter::value_offset_in_bytes());    // access_local_int puts E.A. in G3_scratch
1250}
1251
1252
1253void TemplateTable::convert() {
1254// %%%%% Factor this first part accross platforms
1255  #ifdef ASSERT
1256    TosState tos_in  = ilgl;
1257    TosState tos_out = ilgl;
1258    switch (bytecode()) {
1259      case Bytecodes::_i2l: // fall through
1260      case Bytecodes::_i2f: // fall through
1261      case Bytecodes::_i2d: // fall through
1262      case Bytecodes::_i2b: // fall through
1263      case Bytecodes::_i2c: // fall through
1264      case Bytecodes::_i2s: tos_in = itos; break;
1265      case Bytecodes::_l2i: // fall through
1266      case Bytecodes::_l2f: // fall through
1267      case Bytecodes::_l2d: tos_in = ltos; break;
1268      case Bytecodes::_f2i: // fall through
1269      case Bytecodes::_f2l: // fall through
1270      case Bytecodes::_f2d: tos_in = ftos; break;
1271      case Bytecodes::_d2i: // fall through
1272      case Bytecodes::_d2l: // fall through
1273      case Bytecodes::_d2f: tos_in = dtos; break;
1274      default             : ShouldNotReachHere();
1275    }
1276    switch (bytecode()) {
1277      case Bytecodes::_l2i: // fall through
1278      case Bytecodes::_f2i: // fall through
1279      case Bytecodes::_d2i: // fall through
1280      case Bytecodes::_i2b: // fall through
1281      case Bytecodes::_i2c: // fall through
1282      case Bytecodes::_i2s: tos_out = itos; break;
1283      case Bytecodes::_i2l: // fall through
1284      case Bytecodes::_f2l: // fall through
1285      case Bytecodes::_d2l: tos_out = ltos; break;
1286      case Bytecodes::_i2f: // fall through
1287      case Bytecodes::_l2f: // fall through
1288      case Bytecodes::_d2f: tos_out = ftos; break;
1289      case Bytecodes::_i2d: // fall through
1290      case Bytecodes::_l2d: // fall through
1291      case Bytecodes::_f2d: tos_out = dtos; break;
1292      default             : ShouldNotReachHere();
1293    }
1294    transition(tos_in, tos_out);
1295  #endif
1296
1297
1298  // Conversion
1299  Label done;
1300  switch (bytecode()) {
1301   case Bytecodes::_i2l:
1302#ifdef _LP64
1303    // Sign extend the 32 bits
1304    __ sra ( Otos_i, 0, Otos_l );
1305#else
1306    __ addcc(Otos_i, 0, Otos_l2);
1307    __ br(Assembler::greaterEqual, true, Assembler::pt, done);
1308    __ delayed()->clr(Otos_l1);
1309    __ set(~0, Otos_l1);
1310#endif
1311    break;
1312
1313   case Bytecodes::_i2f:
1314    __ st(Otos_i, __ d_tmp );
1315    __ ldf(FloatRegisterImpl::S,  __ d_tmp, F0);
1316    __ fitof(FloatRegisterImpl::S, F0, Ftos_f);
1317    break;
1318
1319   case Bytecodes::_i2d:
1320    __ st(Otos_i, __ d_tmp);
1321    __ ldf(FloatRegisterImpl::S,  __ d_tmp, F0);
1322    __ fitof(FloatRegisterImpl::D, F0, Ftos_f);
1323    break;
1324
1325   case Bytecodes::_i2b:
1326    __ sll(Otos_i, 24, Otos_i);
1327    __ sra(Otos_i, 24, Otos_i);
1328    break;
1329
1330   case Bytecodes::_i2c:
1331    __ sll(Otos_i, 16, Otos_i);
1332    __ srl(Otos_i, 16, Otos_i);
1333    break;
1334
1335   case Bytecodes::_i2s:
1336    __ sll(Otos_i, 16, Otos_i);
1337    __ sra(Otos_i, 16, Otos_i);
1338    break;
1339
1340   case Bytecodes::_l2i:
1341#ifndef _LP64
1342    __ mov(Otos_l2, Otos_i);
1343#else
1344    // Sign-extend into the high 32 bits
1345    __ sra(Otos_l, 0, Otos_i);
1346#endif
1347    break;
1348
1349   case Bytecodes::_l2f:
1350   case Bytecodes::_l2d:
1351    __ st_long(Otos_l, __ d_tmp);
1352    __ ldf(FloatRegisterImpl::D, __ d_tmp, Ftos_d);
1353
1354    if (VM_Version::v9_instructions_work()) {
1355      if (bytecode() == Bytecodes::_l2f) {
1356        __ fxtof(FloatRegisterImpl::S, Ftos_d, Ftos_f);
1357      } else {
1358        __ fxtof(FloatRegisterImpl::D, Ftos_d, Ftos_d);
1359      }
1360    } else {
1361      __ call_VM_leaf(
1362        Lscratch,
1363        bytecode() == Bytecodes::_l2f
1364          ? CAST_FROM_FN_PTR(address, SharedRuntime::l2f)
1365          : CAST_FROM_FN_PTR(address, SharedRuntime::l2d)
1366      );
1367    }
1368    break;
1369
1370  case Bytecodes::_f2i:  {
1371      Label isNaN;
1372      // result must be 0 if value is NaN; test by comparing value to itself
1373      __ fcmp(FloatRegisterImpl::S, Assembler::fcc0, Ftos_f, Ftos_f);
1374      // According to the v8 manual, you have to have a non-fp instruction
1375      // between fcmp and fb.
1376      if (!VM_Version::v9_instructions_work()) {
1377        __ nop();
1378      }
1379      __ fb(Assembler::f_unordered, true, Assembler::pn, isNaN);
1380      __ delayed()->clr(Otos_i);                                     // NaN
1381      __ ftoi(FloatRegisterImpl::S, Ftos_f, F30);
1382      __ stf(FloatRegisterImpl::S, F30, __ d_tmp);
1383      __ ld(__ d_tmp, Otos_i);
1384      __ bind(isNaN);
1385    }
1386    break;
1387
1388   case Bytecodes::_f2l:
1389    // must uncache tos
1390    __ push_f();
1391#ifdef _LP64
1392    __ pop_f(F1);
1393#else
1394    __ pop_i(O0);
1395#endif
1396    __ call_VM_leaf(Lscratch, CAST_FROM_FN_PTR(address, SharedRuntime::f2l));
1397    break;
1398
1399   case Bytecodes::_f2d:
1400    __ ftof( FloatRegisterImpl::S, FloatRegisterImpl::D, Ftos_f, Ftos_f);
1401    break;
1402
1403   case Bytecodes::_d2i:
1404   case Bytecodes::_d2l:
1405    // must uncache tos
1406    __ push_d();
1407#ifdef _LP64
1408    // LP64 calling conventions pass first double arg in D0
1409    __ pop_d( Ftos_d );
1410#else
1411    __ pop_i( O0 );
1412    __ pop_i( O1 );
1413#endif
1414    __ call_VM_leaf(Lscratch,
1415        bytecode() == Bytecodes::_d2i
1416          ? CAST_FROM_FN_PTR(address, SharedRuntime::d2i)
1417          : CAST_FROM_FN_PTR(address, SharedRuntime::d2l));
1418    break;
1419
1420    case Bytecodes::_d2f:
1421    if (VM_Version::v9_instructions_work()) {
1422      __ ftof( FloatRegisterImpl::D, FloatRegisterImpl::S, Ftos_d, Ftos_f);
1423    }
1424    else {
1425      // must uncache tos
1426      __ push_d();
1427      __ pop_i(O0);
1428      __ pop_i(O1);
1429      __ call_VM_leaf(Lscratch, CAST_FROM_FN_PTR(address, SharedRuntime::d2f));
1430    }
1431    break;
1432
1433    default: ShouldNotReachHere();
1434  }
1435  __ bind(done);
1436}
1437
1438
1439void TemplateTable::lcmp() {
1440  transition(ltos, itos);
1441
1442#ifdef _LP64
1443  __ pop_l(O1); // pop off value 1, value 2 is in O0
1444  __ lcmp( O1, Otos_l, Otos_i );
1445#else
1446  __ pop_l(O2); // cmp O2,3 to O0,1
1447  __ lcmp( O2, O3, Otos_l1, Otos_l2, Otos_i );
1448#endif
1449}
1450
1451
1452void TemplateTable::float_cmp(bool is_float, int unordered_result) {
1453
1454  if (is_float) __ pop_f(F2);
1455  else          __ pop_d(F2);
1456
1457  assert(Ftos_f == F0  &&  Ftos_d == F0,  "alias checking:");
1458
1459  __ float_cmp( is_float, unordered_result, F2, F0, Otos_i );
1460}
1461
1462void TemplateTable::branch(bool is_jsr, bool is_wide) {
1463  // Note: on SPARC, we use InterpreterMacroAssembler::if_cmp also.
1464  __ verify_oop(Lmethod);
1465  __ verify_thread();
1466
1467  const Register O2_bumped_count = O2;
1468  __ profile_taken_branch(G3_scratch, O2_bumped_count);
1469
1470  // get (wide) offset to O1_disp
1471  const Register O1_disp = O1;
1472  if (is_wide)  __ get_4_byte_integer_at_bcp( 1,  G4_scratch, O1_disp,                                    InterpreterMacroAssembler::set_CC);
1473  else          __ get_2_byte_integer_at_bcp( 1,  G4_scratch, O1_disp, InterpreterMacroAssembler::Signed, InterpreterMacroAssembler::set_CC);
1474
1475  // Handle all the JSR stuff here, then exit.
1476  // It's much shorter and cleaner than intermingling with the
1477  // non-JSR normal-branch stuff occuring below.
1478  if( is_jsr ) {
1479    // compute return address as bci in Otos_i
1480    __ ld_ptr(Address(Lmethod, 0, in_bytes(methodOopDesc::const_offset())), G3_scratch);
1481    __ sub(Lbcp, G3_scratch, G3_scratch);
1482    __ sub(G3_scratch, in_bytes(constMethodOopDesc::codes_offset()) - (is_wide ? 5 : 3), Otos_i);
1483
1484    // Bump Lbcp to target of JSR
1485    __ add(Lbcp, O1_disp, Lbcp);
1486    // Push returnAddress for "ret" on stack
1487    __ push_ptr(Otos_i, G0); // push ptr sized thing plus 0 for tag.
1488    // And away we go!
1489    __ dispatch_next(vtos);
1490    return;
1491  }
1492
1493  // Normal (non-jsr) branch handling
1494
1495  // Save the current Lbcp
1496  const Register O0_cur_bcp = O0;
1497  __ mov( Lbcp, O0_cur_bcp );
1498
1499  bool increment_invocation_counter_for_backward_branches = UseCompiler && UseLoopCounter;
1500  if ( increment_invocation_counter_for_backward_branches ) {
1501    Label Lforward;
1502    // check branch direction
1503    __ br( Assembler::positive, false,  Assembler::pn, Lforward );
1504    // Bump bytecode pointer by displacement (take the branch)
1505    __ delayed()->add( O1_disp, Lbcp, Lbcp );     // add to bc addr
1506
1507    // Update Backedge branch separately from invocations
1508    const Register G4_invoke_ctr = G4;
1509    __ increment_backedge_counter(G4_invoke_ctr, G1_scratch);
1510    if (ProfileInterpreter) {
1511      __ test_invocation_counter_for_mdp(G4_invoke_ctr, Lbcp, G3_scratch, Lforward);
1512      if (UseOnStackReplacement) {
1513        __ test_backedge_count_for_osr(O2_bumped_count, O0_cur_bcp, G3_scratch);
1514      }
1515    } else {
1516      if (UseOnStackReplacement) {
1517        __ test_backedge_count_for_osr(G4_invoke_ctr, O0_cur_bcp, G3_scratch);
1518      }
1519    }
1520
1521    __ bind(Lforward);
1522  } else
1523    // Bump bytecode pointer by displacement (take the branch)
1524    __ add( O1_disp, Lbcp, Lbcp );// add to bc addr
1525
1526  // continue with bytecode @ target
1527  // %%%%% Like Intel, could speed things up by moving bytecode fetch to code above,
1528  // %%%%% and changing dispatch_next to dispatch_only
1529  __ dispatch_next(vtos);
1530}
1531
1532
1533// Note Condition in argument is TemplateTable::Condition
1534// arg scope is within class scope
1535
1536void TemplateTable::if_0cmp(Condition cc) {
1537  // no pointers, integer only!
1538  transition(itos, vtos);
1539  // assume branch is more often taken than not (loops use backward branches)
1540  __ cmp( Otos_i, 0);
1541  __ if_cmp(ccNot(cc), false);
1542}
1543
1544
1545void TemplateTable::if_icmp(Condition cc) {
1546  transition(itos, vtos);
1547  __ pop_i(O1);
1548  __ cmp(O1, Otos_i);
1549  __ if_cmp(ccNot(cc), false);
1550}
1551
1552
1553void TemplateTable::if_nullcmp(Condition cc) {
1554  transition(atos, vtos);
1555  __ tst(Otos_i);
1556  __ if_cmp(ccNot(cc), true);
1557}
1558
1559
1560void TemplateTable::if_acmp(Condition cc) {
1561  transition(atos, vtos);
1562  __ pop_ptr(O1);
1563  __ verify_oop(O1);
1564  __ verify_oop(Otos_i);
1565  __ cmp(O1, Otos_i);
1566  __ if_cmp(ccNot(cc), true);
1567}
1568
1569
1570
1571void TemplateTable::ret() {
1572  transition(vtos, vtos);
1573  locals_index(G3_scratch);
1574  __ access_local_returnAddress(G3_scratch, Otos_i);
1575  // Otos_i contains the bci, compute the bcp from that
1576
1577#ifdef _LP64
1578#ifdef ASSERT
1579  // jsr result was labeled as an 'itos' not an 'atos' because we cannot GC
1580  // the result.  The return address (really a BCI) was stored with an
1581  // 'astore' because JVM specs claim it's a pointer-sized thing.  Hence in
1582  // the 64-bit build the 32-bit BCI is actually in the low bits of a 64-bit
1583  // loaded value.
1584  { Label zzz ;
1585     __ set (65536, G3_scratch) ;
1586     __ cmp (Otos_i, G3_scratch) ;
1587     __ bp( Assembler::lessEqualUnsigned, false, Assembler::xcc, Assembler::pn, zzz);
1588     __ delayed()->nop();
1589     __ stop("BCI is in the wrong register half?");
1590     __ bind (zzz) ;
1591  }
1592#endif
1593#endif
1594
1595  __ profile_ret(vtos, Otos_i, G4_scratch);
1596
1597  __ ld_ptr(Address(Lmethod, 0, in_bytes(methodOopDesc::const_offset())), G3_scratch);
1598  __ add(G3_scratch, Otos_i, G3_scratch);
1599  __ add(G3_scratch, in_bytes(constMethodOopDesc::codes_offset()), Lbcp);
1600  __ dispatch_next(vtos);
1601}
1602
1603
1604void TemplateTable::wide_ret() {
1605  transition(vtos, vtos);
1606  locals_index_wide(G3_scratch);
1607  __ access_local_returnAddress(G3_scratch, Otos_i);
1608  // Otos_i contains the bci, compute the bcp from that
1609
1610  __ profile_ret(vtos, Otos_i, G4_scratch);
1611
1612  __ ld_ptr(Address(Lmethod, 0, in_bytes(methodOopDesc::const_offset())), G3_scratch);
1613  __ add(G3_scratch, Otos_i, G3_scratch);
1614  __ add(G3_scratch, in_bytes(constMethodOopDesc::codes_offset()), Lbcp);
1615  __ dispatch_next(vtos);
1616}
1617
1618
1619void TemplateTable::tableswitch() {
1620  transition(itos, vtos);
1621  Label default_case, continue_execution;
1622
1623  // align bcp
1624  __ add(Lbcp, BytesPerInt, O1);
1625  __ and3(O1, -BytesPerInt, O1);
1626  // load lo, hi
1627  __ ld(O1, 1 * BytesPerInt, O2);       // Low Byte
1628  __ ld(O1, 2 * BytesPerInt, O3);       // High Byte
1629#ifdef _LP64
1630  // Sign extend the 32 bits
1631  __ sra ( Otos_i, 0, Otos_i );
1632#endif /* _LP64 */
1633
1634  // check against lo & hi
1635  __ cmp( Otos_i, O2);
1636  __ br( Assembler::less, false, Assembler::pn, default_case);
1637  __ delayed()->cmp( Otos_i, O3 );
1638  __ br( Assembler::greater, false, Assembler::pn, default_case);
1639  // lookup dispatch offset
1640  __ delayed()->sub(Otos_i, O2, O2);
1641  __ profile_switch_case(O2, O3, G3_scratch, G4_scratch);
1642  __ sll(O2, LogBytesPerInt, O2);
1643  __ add(O2, 3 * BytesPerInt, O2);
1644  __ ba(false, continue_execution);
1645  __ delayed()->ld(O1, O2, O2);
1646  // handle default
1647  __ bind(default_case);
1648  __ profile_switch_default(O3);
1649  __ ld(O1, 0, O2); // get default offset
1650  // continue execution
1651  __ bind(continue_execution);
1652  __ add(Lbcp, O2, Lbcp);
1653  __ dispatch_next(vtos);
1654}
1655
1656
1657void TemplateTable::lookupswitch() {
1658  transition(itos, itos);
1659  __ stop("lookupswitch bytecode should have been rewritten");
1660}
1661
1662void TemplateTable::fast_linearswitch() {
1663  transition(itos, vtos);
1664    Label loop_entry, loop, found, continue_execution;
1665  // align bcp
1666  __ add(Lbcp, BytesPerInt, O1);
1667  __ and3(O1, -BytesPerInt, O1);
1668 // set counter
1669  __ ld(O1, BytesPerInt, O2);
1670  __ sll(O2, LogBytesPerInt + 1, O2); // in word-pairs
1671  __ add(O1, 2 * BytesPerInt, O3); // set first pair addr
1672  __ ba(false, loop_entry);
1673  __ delayed()->add(O3, O2, O2); // counter now points past last pair
1674
1675  // table search
1676  __ bind(loop);
1677  __ cmp(O4, Otos_i);
1678  __ br(Assembler::equal, true, Assembler::pn, found);
1679  __ delayed()->ld(O3, BytesPerInt, O4); // offset -> O4
1680  __ inc(O3, 2 * BytesPerInt);
1681
1682  __ bind(loop_entry);
1683  __ cmp(O2, O3);
1684  __ brx(Assembler::greaterUnsigned, true, Assembler::pt, loop);
1685  __ delayed()->ld(O3, 0, O4);
1686
1687  // default case
1688  __ ld(O1, 0, O4); // get default offset
1689  if (ProfileInterpreter) {
1690    __ profile_switch_default(O3);
1691    __ ba(false, continue_execution);
1692    __ delayed()->nop();
1693  }
1694
1695  // entry found -> get offset
1696  __ bind(found);
1697  if (ProfileInterpreter) {
1698    __ sub(O3, O1, O3);
1699    __ sub(O3, 2*BytesPerInt, O3);
1700    __ srl(O3, LogBytesPerInt + 1, O3); // in word-pairs
1701    __ profile_switch_case(O3, O1, O2, G3_scratch);
1702
1703    __ bind(continue_execution);
1704  }
1705  __ add(Lbcp, O4, Lbcp);
1706  __ dispatch_next(vtos);
1707}
1708
1709
1710void TemplateTable::fast_binaryswitch() {
1711  transition(itos, vtos);
1712  // Implementation using the following core algorithm: (copied from Intel)
1713  //
1714  // int binary_search(int key, LookupswitchPair* array, int n) {
1715  //   // Binary search according to "Methodik des Programmierens" by
1716  //   // Edsger W. Dijkstra and W.H.J. Feijen, Addison Wesley Germany 1985.
1717  //   int i = 0;
1718  //   int j = n;
1719  //   while (i+1 < j) {
1720  //     // invariant P: 0 <= i < j <= n and (a[i] <= key < a[j] or Q)
1721  //     // with      Q: for all i: 0 <= i < n: key < a[i]
1722  //     // where a stands for the array and assuming that the (inexisting)
1723  //     // element a[n] is infinitely big.
1724  //     int h = (i + j) >> 1;
1725  //     // i < h < j
1726  //     if (key < array[h].fast_match()) {
1727  //       j = h;
1728  //     } else {
1729  //       i = h;
1730  //     }
1731  //   }
1732  //   // R: a[i] <= key < a[i+1] or Q
1733  //   // (i.e., if key is within array, i is the correct index)
1734  //   return i;
1735  // }
1736
1737  // register allocation
1738  assert(Otos_i == O0, "alias checking");
1739  const Register Rkey     = Otos_i;                    // already set (tosca)
1740  const Register Rarray   = O1;
1741  const Register Ri       = O2;
1742  const Register Rj       = O3;
1743  const Register Rh       = O4;
1744  const Register Rscratch = O5;
1745
1746  const int log_entry_size = 3;
1747  const int entry_size = 1 << log_entry_size;
1748
1749  Label found;
1750  // Find Array start
1751  __ add(Lbcp, 3 * BytesPerInt, Rarray);
1752  __ and3(Rarray, -BytesPerInt, Rarray);
1753  // initialize i & j (in delay slot)
1754  __ clr( Ri );
1755
1756  // and start
1757  Label entry;
1758  __ ba(false, entry);
1759  __ delayed()->ld( Rarray, -BytesPerInt, Rj);
1760  // (Rj is already in the native byte-ordering.)
1761
1762  // binary search loop
1763  { Label loop;
1764    __ bind( loop );
1765    // int h = (i + j) >> 1;
1766    __ sra( Rh, 1, Rh );
1767    // if (key < array[h].fast_match()) {
1768    //   j = h;
1769    // } else {
1770    //   i = h;
1771    // }
1772    __ sll( Rh, log_entry_size, Rscratch );
1773    __ ld( Rarray, Rscratch, Rscratch );
1774    // (Rscratch is already in the native byte-ordering.)
1775    __ cmp( Rkey, Rscratch );
1776    if ( VM_Version::v9_instructions_work() ) {
1777      __ movcc( Assembler::less,         false, Assembler::icc, Rh, Rj );  // j = h if (key <  array[h].fast_match())
1778      __ movcc( Assembler::greaterEqual, false, Assembler::icc, Rh, Ri );  // i = h if (key >= array[h].fast_match())
1779    }
1780    else {
1781      Label end_of_if;
1782      __ br( Assembler::less, true, Assembler::pt, end_of_if );
1783      __ delayed()->mov( Rh, Rj ); // if (<) Rj = Rh
1784      __ mov( Rh, Ri );            // else i = h
1785      __ bind(end_of_if);          // }
1786    }
1787
1788    // while (i+1 < j)
1789    __ bind( entry );
1790    __ add( Ri, 1, Rscratch );
1791    __ cmp(Rscratch, Rj);
1792    __ br( Assembler::less, true, Assembler::pt, loop );
1793    __ delayed()->add( Ri, Rj, Rh ); // start h = i + j  >> 1;
1794  }
1795
1796  // end of binary search, result index is i (must check again!)
1797  Label default_case;
1798  Label continue_execution;
1799  if (ProfileInterpreter) {
1800    __ mov( Ri, Rh );              // Save index in i for profiling
1801  }
1802  __ sll( Ri, log_entry_size, Ri );
1803  __ ld( Rarray, Ri, Rscratch );
1804  // (Rscratch is already in the native byte-ordering.)
1805  __ cmp( Rkey, Rscratch );
1806  __ br( Assembler::notEqual, true, Assembler::pn, default_case );
1807  __ delayed()->ld( Rarray, -2 * BytesPerInt, Rj ); // load default offset -> j
1808
1809  // entry found -> j = offset
1810  __ inc( Ri, BytesPerInt );
1811  __ profile_switch_case(Rh, Rj, Rscratch, Rkey);
1812  __ ld( Rarray, Ri, Rj );
1813  // (Rj is already in the native byte-ordering.)
1814
1815  if (ProfileInterpreter) {
1816    __ ba(false, continue_execution);
1817    __ delayed()->nop();
1818  }
1819
1820  __ bind(default_case); // fall through (if not profiling)
1821  __ profile_switch_default(Ri);
1822
1823  __ bind(continue_execution);
1824  __ add( Lbcp, Rj, Lbcp );
1825  __ dispatch_next( vtos );
1826}
1827
1828
1829void TemplateTable::_return(TosState state) {
1830  transition(state, state);
1831  assert(_desc->calls_vm(), "inconsistent calls_vm information");
1832
1833  if (_desc->bytecode() == Bytecodes::_return_register_finalizer) {
1834    assert(state == vtos, "only valid state");
1835    __ mov(G0, G3_scratch);
1836    __ access_local_ptr(G3_scratch, Otos_i);
1837    __ load_klass(Otos_i, O2);
1838    __ set(JVM_ACC_HAS_FINALIZER, G3);
1839    __ ld(O2, Klass::access_flags_offset_in_bytes() + sizeof(oopDesc), O2);
1840    __ andcc(G3, O2, G0);
1841    Label skip_register_finalizer;
1842    __ br(Assembler::zero, false, Assembler::pn, skip_register_finalizer);
1843    __ delayed()->nop();
1844
1845    // Call out to do finalizer registration
1846    __ call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::register_finalizer), Otos_i);
1847
1848    __ bind(skip_register_finalizer);
1849  }
1850
1851  __ remove_activation(state, /* throw_monitor_exception */ true);
1852
1853  // The caller's SP was adjusted upon method entry to accomodate
1854  // the callee's non-argument locals. Undo that adjustment.
1855  __ ret();                             // return to caller
1856  __ delayed()->restore(I5_savedSP, G0, SP);
1857}
1858
1859
1860// ----------------------------------------------------------------------------
1861// Volatile variables demand their effects be made known to all CPU's in
1862// order.  Store buffers on most chips allow reads & writes to reorder; the
1863// JMM's ReadAfterWrite.java test fails in -Xint mode without some kind of
1864// memory barrier (i.e., it's not sufficient that the interpreter does not
1865// reorder volatile references, the hardware also must not reorder them).
1866//
1867// According to the new Java Memory Model (JMM):
1868// (1) All volatiles are serialized wrt to each other.
1869// ALSO reads & writes act as aquire & release, so:
1870// (2) A read cannot let unrelated NON-volatile memory refs that happen after
1871// the read float up to before the read.  It's OK for non-volatile memory refs
1872// that happen before the volatile read to float down below it.
1873// (3) Similar a volatile write cannot let unrelated NON-volatile memory refs
1874// that happen BEFORE the write float down to after the write.  It's OK for
1875// non-volatile memory refs that happen after the volatile write to float up
1876// before it.
1877//
1878// We only put in barriers around volatile refs (they are expensive), not
1879// _between_ memory refs (that would require us to track the flavor of the
1880// previous memory refs).  Requirements (2) and (3) require some barriers
1881// before volatile stores and after volatile loads.  These nearly cover
1882// requirement (1) but miss the volatile-store-volatile-load case.  This final
1883// case is placed after volatile-stores although it could just as well go
1884// before volatile-loads.
1885void TemplateTable::volatile_barrier(Assembler::Membar_mask_bits order_constraint) {
1886  // Helper function to insert a is-volatile test and memory barrier
1887  // All current sparc implementations run in TSO, needing only StoreLoad
1888  if ((order_constraint & Assembler::StoreLoad) == 0) return;
1889  __ membar( order_constraint );
1890}
1891
1892// ----------------------------------------------------------------------------
1893void TemplateTable::resolve_cache_and_index(int byte_no, Register Rcache, Register index) {
1894  assert(byte_no == 1 || byte_no == 2, "byte_no out of range");
1895  // Depends on cpCacheOop layout!
1896  const int shift_count = (1 + byte_no)*BitsPerByte;
1897  Label resolved;
1898
1899  __ get_cache_and_index_at_bcp(Rcache, index, 1);
1900  __ ld_ptr(Address(Rcache, 0, in_bytes(constantPoolCacheOopDesc::base_offset() +
1901                                        ConstantPoolCacheEntry::indices_offset())), Lbyte_code);
1902
1903  __ srl(  Lbyte_code, shift_count, Lbyte_code );
1904  __ and3( Lbyte_code,        0xFF, Lbyte_code );
1905  __ cmp(  Lbyte_code, (int)bytecode());
1906  __ br(   Assembler::equal, false, Assembler::pt, resolved);
1907  __ delayed()->set((int)bytecode(), O1);
1908
1909  address entry;
1910  switch (bytecode()) {
1911    case Bytecodes::_getstatic      : // fall through
1912    case Bytecodes::_putstatic      : // fall through
1913    case Bytecodes::_getfield       : // fall through
1914    case Bytecodes::_putfield       : entry = CAST_FROM_FN_PTR(address, InterpreterRuntime::resolve_get_put); break;
1915    case Bytecodes::_invokevirtual  : // fall through
1916    case Bytecodes::_invokespecial  : // fall through
1917    case Bytecodes::_invokestatic   : // fall through
1918    case Bytecodes::_invokeinterface: entry = CAST_FROM_FN_PTR(address, InterpreterRuntime::resolve_invoke);  break;
1919    default                         : ShouldNotReachHere();                                 break;
1920  }
1921  // first time invocation - must resolve first
1922  __ call_VM(noreg, entry, O1);
1923  // Update registers with resolved info
1924  __ get_cache_and_index_at_bcp(Rcache, index, 1);
1925  __ bind(resolved);
1926}
1927
1928void TemplateTable::load_invoke_cp_cache_entry(int byte_no,
1929                                               Register Rmethod,
1930                                               Register Ritable_index,
1931                                               Register Rflags,
1932                                               bool is_invokevirtual,
1933                                               bool is_invokevfinal) {
1934  // Uses both G3_scratch and G4_scratch
1935  Register Rcache = G3_scratch;
1936  Register Rscratch = G4_scratch;
1937  assert_different_registers(Rcache, Rmethod, Ritable_index);
1938
1939  ByteSize cp_base_offset = constantPoolCacheOopDesc::base_offset();
1940
1941  // determine constant pool cache field offsets
1942  const int method_offset = in_bytes(
1943    cp_base_offset +
1944      (is_invokevirtual
1945       ? ConstantPoolCacheEntry::f2_offset()
1946       : ConstantPoolCacheEntry::f1_offset()
1947      )
1948    );
1949  const int flags_offset = in_bytes(cp_base_offset +
1950                                    ConstantPoolCacheEntry::flags_offset());
1951  // access constant pool cache fields
1952  const int index_offset = in_bytes(cp_base_offset +
1953                                    ConstantPoolCacheEntry::f2_offset());
1954
1955  if (is_invokevfinal) {
1956    __ get_cache_and_index_at_bcp(Rcache, Rscratch, 1);
1957  } else {
1958    resolve_cache_and_index(byte_no, Rcache, Rscratch);
1959  }
1960
1961  __ ld_ptr(Address(Rcache, 0, method_offset), Rmethod);
1962  if (Ritable_index != noreg) {
1963    __ ld_ptr(Address(Rcache, 0, index_offset), Ritable_index);
1964  }
1965  __ ld_ptr(Address(Rcache, 0, flags_offset),  Rflags);
1966}
1967
1968// The Rcache register must be set before call
1969void TemplateTable::load_field_cp_cache_entry(Register Robj,
1970                                              Register Rcache,
1971                                              Register index,
1972                                              Register Roffset,
1973                                              Register Rflags,
1974                                              bool is_static) {
1975  assert_different_registers(Rcache, Rflags, Roffset);
1976
1977  ByteSize cp_base_offset = constantPoolCacheOopDesc::base_offset();
1978
1979  __ ld_ptr(Address(Rcache, 0, in_bytes(cp_base_offset +
1980                             ConstantPoolCacheEntry::flags_offset())), Rflags);
1981  __ ld_ptr(Address(Rcache, 0, in_bytes(cp_base_offset +
1982                             ConstantPoolCacheEntry::f2_offset())), Roffset);
1983  if (is_static) {
1984    __ ld_ptr(Address(Rcache, 0, in_bytes(cp_base_offset +
1985                             ConstantPoolCacheEntry::f1_offset())), Robj);
1986  }
1987}
1988
1989// The registers Rcache and index expected to be set before call.
1990// Correct values of the Rcache and index registers are preserved.
1991void TemplateTable::jvmti_post_field_access(Register Rcache,
1992                                            Register index,
1993                                            bool is_static,
1994                                            bool has_tos) {
1995  ByteSize cp_base_offset = constantPoolCacheOopDesc::base_offset();
1996
1997  if (JvmtiExport::can_post_field_access()) {
1998    // Check to see if a field access watch has been set before we take
1999    // the time to call into the VM.
2000    Label Label1;
2001    assert_different_registers(Rcache, index, G1_scratch);
2002    Address get_field_access_count_addr(G1_scratch,
2003                                        (address)JvmtiExport::get_field_access_count_addr(),
2004                                        relocInfo::none);
2005    __ load_contents(get_field_access_count_addr, G1_scratch);
2006    __ tst(G1_scratch);
2007    __ br(Assembler::zero, false, Assembler::pt, Label1);
2008    __ delayed()->nop();
2009
2010    __ add(Rcache, in_bytes(cp_base_offset), Rcache);
2011
2012    if (is_static) {
2013      __ clr(Otos_i);
2014    } else {
2015      if (has_tos) {
2016      // save object pointer before call_VM() clobbers it
2017        __ mov(Otos_i, Lscratch);
2018      } else {
2019        // Load top of stack (do not pop the value off the stack);
2020        __ ld_ptr(Lesp, Interpreter::expr_offset_in_bytes(0), Otos_i);
2021      }
2022      __ verify_oop(Otos_i);
2023    }
2024    // Otos_i: object pointer or NULL if static
2025    // Rcache: cache entry pointer
2026    __ call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::post_field_access),
2027               Otos_i, Rcache);
2028    if (!is_static && has_tos) {
2029      __ mov(Lscratch, Otos_i);  // restore object pointer
2030      __ verify_oop(Otos_i);
2031    }
2032    __ get_cache_and_index_at_bcp(Rcache, index, 1);
2033    __ bind(Label1);
2034  }
2035}
2036
2037void TemplateTable::getfield_or_static(int byte_no, bool is_static) {
2038  transition(vtos, vtos);
2039
2040  Register Rcache = G3_scratch;
2041  Register index  = G4_scratch;
2042  Register Rclass = Rcache;
2043  Register Roffset= G4_scratch;
2044  Register Rflags = G1_scratch;
2045  ByteSize cp_base_offset = constantPoolCacheOopDesc::base_offset();
2046
2047  resolve_cache_and_index(byte_no, Rcache, index);
2048  jvmti_post_field_access(Rcache, index, is_static, false);
2049  load_field_cp_cache_entry(Rclass, Rcache, index, Roffset, Rflags, is_static);
2050
2051  if (!is_static) {
2052    pop_and_check_object(Rclass);
2053  } else {
2054    __ verify_oop(Rclass);
2055  }
2056
2057  Label exit;
2058
2059  Assembler::Membar_mask_bits membar_bits =
2060    Assembler::Membar_mask_bits(Assembler::LoadLoad | Assembler::LoadStore);
2061
2062  if (__ membar_has_effect(membar_bits)) {
2063    // Get volatile flag
2064    __ set((1 << ConstantPoolCacheEntry::volatileField), Lscratch);
2065    __ and3(Rflags, Lscratch, Lscratch);
2066  }
2067
2068  Label checkVolatile;
2069
2070  // compute field type
2071  Label notByte, notInt, notShort, notChar, notLong, notFloat, notObj;
2072  __ srl(Rflags, ConstantPoolCacheEntry::tosBits, Rflags);
2073  // Make sure we don't need to mask Rflags for tosBits after the above shift
2074  ConstantPoolCacheEntry::verify_tosBits();
2075
2076  // Check atos before itos for getstatic, more likely (in Queens at least)
2077  __ cmp(Rflags, atos);
2078  __ br(Assembler::notEqual, false, Assembler::pt, notObj);
2079  __ delayed() ->cmp(Rflags, itos);
2080
2081  // atos
2082  __ load_heap_oop(Rclass, Roffset, Otos_i);
2083  __ verify_oop(Otos_i);
2084  __ push(atos);
2085  if (!is_static) {
2086    patch_bytecode(Bytecodes::_fast_agetfield, G3_scratch, G4_scratch);
2087  }
2088  __ ba(false, checkVolatile);
2089  __ delayed()->tst(Lscratch);
2090
2091  __ bind(notObj);
2092
2093  // cmp(Rflags, itos);
2094  __ br(Assembler::notEqual, false, Assembler::pt, notInt);
2095  __ delayed() ->cmp(Rflags, ltos);
2096
2097  // itos
2098  __ ld(Rclass, Roffset, Otos_i);
2099  __ push(itos);
2100  if (!is_static) {
2101    patch_bytecode(Bytecodes::_fast_igetfield, G3_scratch, G4_scratch);
2102  }
2103  __ ba(false, checkVolatile);
2104  __ delayed()->tst(Lscratch);
2105
2106  __ bind(notInt);
2107
2108  // cmp(Rflags, ltos);
2109  __ br(Assembler::notEqual, false, Assembler::pt, notLong);
2110  __ delayed() ->cmp(Rflags, btos);
2111
2112  // ltos
2113  // load must be atomic
2114  __ ld_long(Rclass, Roffset, Otos_l);
2115  __ push(ltos);
2116  if (!is_static) {
2117    patch_bytecode(Bytecodes::_fast_lgetfield, G3_scratch, G4_scratch);
2118  }
2119  __ ba(false, checkVolatile);
2120  __ delayed()->tst(Lscratch);
2121
2122  __ bind(notLong);
2123
2124  // cmp(Rflags, btos);
2125  __ br(Assembler::notEqual, false, Assembler::pt, notByte);
2126  __ delayed() ->cmp(Rflags, ctos);
2127
2128  // btos
2129  __ ldsb(Rclass, Roffset, Otos_i);
2130  __ push(itos);
2131  if (!is_static) {
2132    patch_bytecode(Bytecodes::_fast_bgetfield, G3_scratch, G4_scratch);
2133  }
2134  __ ba(false, checkVolatile);
2135  __ delayed()->tst(Lscratch);
2136
2137  __ bind(notByte);
2138
2139  // cmp(Rflags, ctos);
2140  __ br(Assembler::notEqual, false, Assembler::pt, notChar);
2141  __ delayed() ->cmp(Rflags, stos);
2142
2143  // ctos
2144  __ lduh(Rclass, Roffset, Otos_i);
2145  __ push(itos);
2146  if (!is_static) {
2147    patch_bytecode(Bytecodes::_fast_cgetfield, G3_scratch, G4_scratch);
2148  }
2149  __ ba(false, checkVolatile);
2150  __ delayed()->tst(Lscratch);
2151
2152  __ bind(notChar);
2153
2154  // cmp(Rflags, stos);
2155  __ br(Assembler::notEqual, false, Assembler::pt, notShort);
2156  __ delayed() ->cmp(Rflags, ftos);
2157
2158  // stos
2159  __ ldsh(Rclass, Roffset, Otos_i);
2160  __ push(itos);
2161  if (!is_static) {
2162    patch_bytecode(Bytecodes::_fast_sgetfield, G3_scratch, G4_scratch);
2163  }
2164  __ ba(false, checkVolatile);
2165  __ delayed()->tst(Lscratch);
2166
2167  __ bind(notShort);
2168
2169
2170  // cmp(Rflags, ftos);
2171  __ br(Assembler::notEqual, false, Assembler::pt, notFloat);
2172  __ delayed() ->tst(Lscratch);
2173
2174  // ftos
2175  __ ldf(FloatRegisterImpl::S, Rclass, Roffset, Ftos_f);
2176  __ push(ftos);
2177  if (!is_static) {
2178    patch_bytecode(Bytecodes::_fast_fgetfield, G3_scratch, G4_scratch);
2179  }
2180  __ ba(false, checkVolatile);
2181  __ delayed()->tst(Lscratch);
2182
2183  __ bind(notFloat);
2184
2185
2186  // dtos
2187  __ ldf(FloatRegisterImpl::D, Rclass, Roffset, Ftos_d);
2188  __ push(dtos);
2189  if (!is_static) {
2190    patch_bytecode(Bytecodes::_fast_dgetfield, G3_scratch, G4_scratch);
2191  }
2192
2193  __ bind(checkVolatile);
2194  if (__ membar_has_effect(membar_bits)) {
2195    // __ tst(Lscratch); executed in delay slot
2196    __ br(Assembler::zero, false, Assembler::pt, exit);
2197    __ delayed()->nop();
2198    volatile_barrier(membar_bits);
2199  }
2200
2201  __ bind(exit);
2202}
2203
2204
2205void TemplateTable::getfield(int byte_no) {
2206  getfield_or_static(byte_no, false);
2207}
2208
2209void TemplateTable::getstatic(int byte_no) {
2210  getfield_or_static(byte_no, true);
2211}
2212
2213
2214void TemplateTable::fast_accessfield(TosState state) {
2215  transition(atos, state);
2216  Register Rcache  = G3_scratch;
2217  Register index   = G4_scratch;
2218  Register Roffset = G4_scratch;
2219  Register Rflags  = Rcache;
2220  ByteSize cp_base_offset = constantPoolCacheOopDesc::base_offset();
2221
2222  __ get_cache_and_index_at_bcp(Rcache, index, 1);
2223  jvmti_post_field_access(Rcache, index, /*is_static*/false, /*has_tos*/true);
2224
2225  __ ld_ptr(Address(Rcache, 0, in_bytes(cp_base_offset + ConstantPoolCacheEntry::f2_offset())), Roffset);
2226
2227  __ null_check(Otos_i);
2228  __ verify_oop(Otos_i);
2229
2230  Label exit;
2231
2232  Assembler::Membar_mask_bits membar_bits =
2233    Assembler::Membar_mask_bits(Assembler::LoadLoad | Assembler::LoadStore);
2234  if (__ membar_has_effect(membar_bits)) {
2235    // Get volatile flag
2236    __ ld_ptr(Address(Rcache, 0, in_bytes(cp_base_offset + ConstantPoolCacheEntry::f2_offset())), Rflags);
2237    __ set((1 << ConstantPoolCacheEntry::volatileField), Lscratch);
2238  }
2239
2240  switch (bytecode()) {
2241    case Bytecodes::_fast_bgetfield:
2242      __ ldsb(Otos_i, Roffset, Otos_i);
2243      break;
2244    case Bytecodes::_fast_cgetfield:
2245      __ lduh(Otos_i, Roffset, Otos_i);
2246      break;
2247    case Bytecodes::_fast_sgetfield:
2248      __ ldsh(Otos_i, Roffset, Otos_i);
2249      break;
2250    case Bytecodes::_fast_igetfield:
2251      __ ld(Otos_i, Roffset, Otos_i);
2252      break;
2253    case Bytecodes::_fast_lgetfield:
2254      __ ld_long(Otos_i, Roffset, Otos_l);
2255      break;
2256    case Bytecodes::_fast_fgetfield:
2257      __ ldf(FloatRegisterImpl::S, Otos_i, Roffset, Ftos_f);
2258      break;
2259    case Bytecodes::_fast_dgetfield:
2260      __ ldf(FloatRegisterImpl::D, Otos_i, Roffset, Ftos_d);
2261      break;
2262    case Bytecodes::_fast_agetfield:
2263      __ load_heap_oop(Otos_i, Roffset, Otos_i);
2264      break;
2265    default:
2266      ShouldNotReachHere();
2267  }
2268
2269  if (__ membar_has_effect(membar_bits)) {
2270    __ btst(Lscratch, Rflags);
2271    __ br(Assembler::zero, false, Assembler::pt, exit);
2272    __ delayed()->nop();
2273    volatile_barrier(membar_bits);
2274    __ bind(exit);
2275  }
2276
2277  if (state == atos) {
2278    __ verify_oop(Otos_i);    // does not blow flags!
2279  }
2280}
2281
2282void TemplateTable::jvmti_post_fast_field_mod() {
2283  if (JvmtiExport::can_post_field_modification()) {
2284    // Check to see if a field modification watch has been set before we take
2285    // the time to call into the VM.
2286    Label done;
2287    Address get_field_modification_count_addr(G4_scratch, (address)JvmtiExport::get_field_modification_count_addr(), relocInfo::none);
2288    __ load_contents(get_field_modification_count_addr, G4_scratch);
2289    __ tst(G4_scratch);
2290    __ br(Assembler::zero, false, Assembler::pt, done);
2291    __ delayed()->nop();
2292    __ pop_ptr(G4_scratch);     // copy the object pointer from tos
2293    __ verify_oop(G4_scratch);
2294    __ push_ptr(G4_scratch);    // put the object pointer back on tos
2295    __ get_cache_entry_pointer_at_bcp(G1_scratch, G3_scratch, 1);
2296    // Save tos values before call_VM() clobbers them. Since we have
2297    // to do it for every data type, we use the saved values as the
2298    // jvalue object.
2299    switch (bytecode()) {  // save tos values before call_VM() clobbers them
2300    case Bytecodes::_fast_aputfield: __ push_ptr(Otos_i); break;
2301    case Bytecodes::_fast_bputfield: // fall through
2302    case Bytecodes::_fast_sputfield: // fall through
2303    case Bytecodes::_fast_cputfield: // fall through
2304    case Bytecodes::_fast_iputfield: __ push_i(Otos_i); break;
2305    case Bytecodes::_fast_dputfield: __ push_d(Ftos_d); break;
2306    case Bytecodes::_fast_fputfield: __ push_f(Ftos_f); break;
2307    // get words in right order for use as jvalue object
2308    case Bytecodes::_fast_lputfield: __ push_l(Otos_l); break;
2309    }
2310    // setup pointer to jvalue object
2311    __ mov(Lesp, G3_scratch);  __ inc(G3_scratch, wordSize);
2312    // G4_scratch:  object pointer
2313    // G1_scratch: cache entry pointer
2314    // G3_scratch: jvalue object on the stack
2315    __ call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::post_field_modification), G4_scratch, G1_scratch, G3_scratch);
2316    switch (bytecode()) {             // restore tos values
2317    case Bytecodes::_fast_aputfield: __ pop_ptr(Otos_i); break;
2318    case Bytecodes::_fast_bputfield: // fall through
2319    case Bytecodes::_fast_sputfield: // fall through
2320    case Bytecodes::_fast_cputfield: // fall through
2321    case Bytecodes::_fast_iputfield: __ pop_i(Otos_i); break;
2322    case Bytecodes::_fast_dputfield: __ pop_d(Ftos_d); break;
2323    case Bytecodes::_fast_fputfield: __ pop_f(Ftos_f); break;
2324    case Bytecodes::_fast_lputfield: __ pop_l(Otos_l); break;
2325    }
2326    __ bind(done);
2327  }
2328}
2329
2330// The registers Rcache and index expected to be set before call.
2331// The function may destroy various registers, just not the Rcache and index registers.
2332void TemplateTable::jvmti_post_field_mod(Register Rcache, Register index, bool is_static) {
2333  ByteSize cp_base_offset = constantPoolCacheOopDesc::base_offset();
2334
2335  if (JvmtiExport::can_post_field_modification()) {
2336    // Check to see if a field modification watch has been set before we take
2337    // the time to call into the VM.
2338    Label Label1;
2339    assert_different_registers(Rcache, index, G1_scratch);
2340    Address get_field_modification_count_addr(G1_scratch,
2341                                              (address)JvmtiExport::get_field_modification_count_addr(),
2342                                              relocInfo::none);
2343    __ load_contents(get_field_modification_count_addr, G1_scratch);
2344    __ tst(G1_scratch);
2345    __ br(Assembler::zero, false, Assembler::pt, Label1);
2346    __ delayed()->nop();
2347
2348    // The Rcache and index registers have been already set.
2349    // This allows to eliminate this call but the Rcache and index
2350    // registers must be correspondingly used after this line.
2351    __ get_cache_and_index_at_bcp(G1_scratch, G4_scratch, 1);
2352
2353    __ add(G1_scratch, in_bytes(cp_base_offset), G3_scratch);
2354    if (is_static) {
2355      // Life is simple.  Null out the object pointer.
2356      __ clr(G4_scratch);
2357    } else {
2358      Register Rflags = G1_scratch;
2359      // Life is harder. The stack holds the value on top, followed by the
2360      // object.  We don't know the size of the value, though; it could be
2361      // one or two words depending on its type. As a result, we must find
2362      // the type to determine where the object is.
2363
2364      Label two_word, valsizeknown;
2365      __ ld_ptr(Address(G1_scratch, 0, in_bytes(cp_base_offset + ConstantPoolCacheEntry::flags_offset())), Rflags);
2366      __ mov(Lesp, G4_scratch);
2367      __ srl(Rflags, ConstantPoolCacheEntry::tosBits, Rflags);
2368      // Make sure we don't need to mask Rflags for tosBits after the above shift
2369      ConstantPoolCacheEntry::verify_tosBits();
2370      __ cmp(Rflags, ltos);
2371      __ br(Assembler::equal, false, Assembler::pt, two_word);
2372      __ delayed()->cmp(Rflags, dtos);
2373      __ br(Assembler::equal, false, Assembler::pt, two_word);
2374      __ delayed()->nop();
2375      __ inc(G4_scratch, Interpreter::expr_offset_in_bytes(1));
2376      __ br(Assembler::always, false, Assembler::pt, valsizeknown);
2377      __ delayed()->nop();
2378      __ bind(two_word);
2379
2380      __ inc(G4_scratch, Interpreter::expr_offset_in_bytes(2));
2381
2382      __ bind(valsizeknown);
2383      // setup object pointer
2384      __ ld_ptr(G4_scratch, 0, G4_scratch);
2385      __ verify_oop(G4_scratch);
2386    }
2387    // setup pointer to jvalue object
2388    __ mov(Lesp, G1_scratch);  __ inc(G1_scratch, wordSize);
2389    // G4_scratch:  object pointer or NULL if static
2390    // G3_scratch: cache entry pointer
2391    // G1_scratch: jvalue object on the stack
2392    __ call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::post_field_modification),
2393               G4_scratch, G3_scratch, G1_scratch);
2394    __ get_cache_and_index_at_bcp(Rcache, index, 1);
2395    __ bind(Label1);
2396  }
2397}
2398
2399void TemplateTable::pop_and_check_object(Register r) {
2400  __ pop_ptr(r);
2401  __ null_check(r);  // for field access must check obj.
2402  __ verify_oop(r);
2403}
2404
2405void TemplateTable::putfield_or_static(int byte_no, bool is_static) {
2406  transition(vtos, vtos);
2407  Register Rcache = G3_scratch;
2408  Register index  = G4_scratch;
2409  Register Rclass = Rcache;
2410  Register Roffset= G4_scratch;
2411  Register Rflags = G1_scratch;
2412  ByteSize cp_base_offset = constantPoolCacheOopDesc::base_offset();
2413
2414  resolve_cache_and_index(byte_no, Rcache, index);
2415  jvmti_post_field_mod(Rcache, index, is_static);
2416  load_field_cp_cache_entry(Rclass, Rcache, index, Roffset, Rflags, is_static);
2417
2418  Assembler::Membar_mask_bits read_bits =
2419    Assembler::Membar_mask_bits(Assembler::LoadStore | Assembler::StoreStore);
2420  Assembler::Membar_mask_bits write_bits = Assembler::StoreLoad;
2421
2422  Label notVolatile, checkVolatile, exit;
2423  if (__ membar_has_effect(read_bits) || __ membar_has_effect(write_bits)) {
2424    __ set((1 << ConstantPoolCacheEntry::volatileField), Lscratch);
2425    __ and3(Rflags, Lscratch, Lscratch);
2426
2427    if (__ membar_has_effect(read_bits)) {
2428      __ tst(Lscratch);
2429      __ br(Assembler::zero, false, Assembler::pt, notVolatile);
2430      __ delayed()->nop();
2431      volatile_barrier(read_bits);
2432      __ bind(notVolatile);
2433    }
2434  }
2435
2436  __ srl(Rflags, ConstantPoolCacheEntry::tosBits, Rflags);
2437  // Make sure we don't need to mask Rflags for tosBits after the above shift
2438  ConstantPoolCacheEntry::verify_tosBits();
2439
2440  // compute field type
2441  Label notInt, notShort, notChar, notObj, notByte, notLong, notFloat;
2442
2443  if (is_static) {
2444    // putstatic with object type most likely, check that first
2445    __ cmp(Rflags, atos );
2446    __ br(Assembler::notEqual, false, Assembler::pt, notObj);
2447    __ delayed() ->cmp(Rflags, itos );
2448
2449    // atos
2450    __ pop_ptr();
2451    __ verify_oop(Otos_i);
2452    __ store_heap_oop(Otos_i, Rclass, Roffset);
2453    __ store_check(G1_scratch, Rclass, Roffset);
2454    __ ba(false, checkVolatile);
2455    __ delayed()->tst(Lscratch);
2456
2457    __ bind(notObj);
2458
2459    // cmp(Rflags, itos );
2460    __ br(Assembler::notEqual, false, Assembler::pt, notInt);
2461    __ delayed() ->cmp(Rflags, btos );
2462
2463    // itos
2464    __ pop_i();
2465    __ st(Otos_i, Rclass, Roffset);
2466    __ ba(false, checkVolatile);
2467    __ delayed()->tst(Lscratch);
2468
2469    __ bind(notInt);
2470
2471  } else {
2472    // putfield with int type most likely, check that first
2473    __ cmp(Rflags, itos );
2474    __ br(Assembler::notEqual, false, Assembler::pt, notInt);
2475    __ delayed() ->cmp(Rflags, atos );
2476
2477    // itos
2478    __ pop_i();
2479    pop_and_check_object(Rclass);
2480    __ st(Otos_i, Rclass, Roffset);
2481    patch_bytecode(Bytecodes::_fast_iputfield, G3_scratch, G4_scratch);
2482    __ ba(false, checkVolatile);
2483    __ delayed()->tst(Lscratch);
2484
2485    __ bind(notInt);
2486    // cmp(Rflags, atos );
2487    __ br(Assembler::notEqual, false, Assembler::pt, notObj);
2488    __ delayed() ->cmp(Rflags, btos );
2489
2490    // atos
2491    __ pop_ptr();
2492    pop_and_check_object(Rclass);
2493    __ verify_oop(Otos_i);
2494    __ store_heap_oop(Otos_i, Rclass, Roffset);
2495    __ store_check(G1_scratch, Rclass, Roffset);
2496    patch_bytecode(Bytecodes::_fast_aputfield, G3_scratch, G4_scratch);
2497    __ ba(false, checkVolatile);
2498    __ delayed()->tst(Lscratch);
2499
2500    __ bind(notObj);
2501  }
2502
2503  // cmp(Rflags, btos );
2504  __ br(Assembler::notEqual, false, Assembler::pt, notByte);
2505  __ delayed() ->cmp(Rflags, ltos );
2506
2507  // btos
2508  __ pop_i();
2509  if (!is_static) pop_and_check_object(Rclass);
2510  __ stb(Otos_i, Rclass, Roffset);
2511  if (!is_static) {
2512    patch_bytecode(Bytecodes::_fast_bputfield, G3_scratch, G4_scratch);
2513  }
2514  __ ba(false, checkVolatile);
2515  __ delayed()->tst(Lscratch);
2516
2517  __ bind(notByte);
2518
2519  // cmp(Rflags, ltos );
2520  __ br(Assembler::notEqual, false, Assembler::pt, notLong);
2521  __ delayed() ->cmp(Rflags, ctos );
2522
2523  // ltos
2524  __ pop_l();
2525  if (!is_static) pop_and_check_object(Rclass);
2526  __ st_long(Otos_l, Rclass, Roffset);
2527  if (!is_static) {
2528    patch_bytecode(Bytecodes::_fast_lputfield, G3_scratch, G4_scratch);
2529  }
2530  __ ba(false, checkVolatile);
2531  __ delayed()->tst(Lscratch);
2532
2533  __ bind(notLong);
2534
2535  // cmp(Rflags, ctos );
2536  __ br(Assembler::notEqual, false, Assembler::pt, notChar);
2537  __ delayed() ->cmp(Rflags, stos );
2538
2539  // ctos (char)
2540  __ pop_i();
2541  if (!is_static) pop_and_check_object(Rclass);
2542  __ sth(Otos_i, Rclass, Roffset);
2543  if (!is_static) {
2544    patch_bytecode(Bytecodes::_fast_cputfield, G3_scratch, G4_scratch);
2545  }
2546  __ ba(false, checkVolatile);
2547  __ delayed()->tst(Lscratch);
2548
2549  __ bind(notChar);
2550  // cmp(Rflags, stos );
2551  __ br(Assembler::notEqual, false, Assembler::pt, notShort);
2552  __ delayed() ->cmp(Rflags, ftos );
2553
2554  // stos (char)
2555  __ pop_i();
2556  if (!is_static) pop_and_check_object(Rclass);
2557  __ sth(Otos_i, Rclass, Roffset);
2558  if (!is_static) {
2559    patch_bytecode(Bytecodes::_fast_sputfield, G3_scratch, G4_scratch);
2560  }
2561  __ ba(false, checkVolatile);
2562  __ delayed()->tst(Lscratch);
2563
2564  __ bind(notShort);
2565  // cmp(Rflags, ftos );
2566  __ br(Assembler::notZero, false, Assembler::pt, notFloat);
2567  __ delayed()->nop();
2568
2569  // ftos
2570  __ pop_f();
2571  if (!is_static) pop_and_check_object(Rclass);
2572  __ stf(FloatRegisterImpl::S, Ftos_f, Rclass, Roffset);
2573  if (!is_static) {
2574    patch_bytecode(Bytecodes::_fast_fputfield, G3_scratch, G4_scratch);
2575  }
2576  __ ba(false, checkVolatile);
2577  __ delayed()->tst(Lscratch);
2578
2579  __ bind(notFloat);
2580
2581  // dtos
2582  __ pop_d();
2583  if (!is_static) pop_and_check_object(Rclass);
2584  __ stf(FloatRegisterImpl::D, Ftos_d, Rclass, Roffset);
2585  if (!is_static) {
2586    patch_bytecode(Bytecodes::_fast_dputfield, G3_scratch, G4_scratch);
2587  }
2588
2589  __ bind(checkVolatile);
2590  __ tst(Lscratch);
2591
2592  if (__ membar_has_effect(write_bits)) {
2593    // __ tst(Lscratch); in delay slot
2594    __ br(Assembler::zero, false, Assembler::pt, exit);
2595    __ delayed()->nop();
2596    volatile_barrier(Assembler::StoreLoad);
2597    __ bind(exit);
2598  }
2599}
2600
2601void TemplateTable::fast_storefield(TosState state) {
2602  transition(state, vtos);
2603  Register Rcache = G3_scratch;
2604  Register Rclass = Rcache;
2605  Register Roffset= G4_scratch;
2606  Register Rflags = G1_scratch;
2607  ByteSize cp_base_offset = constantPoolCacheOopDesc::base_offset();
2608
2609  jvmti_post_fast_field_mod();
2610
2611  __ get_cache_and_index_at_bcp(Rcache, G4_scratch, 1);
2612
2613  Assembler::Membar_mask_bits read_bits =
2614    Assembler::Membar_mask_bits(Assembler::LoadStore | Assembler::StoreStore);
2615  Assembler::Membar_mask_bits write_bits = Assembler::StoreLoad;
2616
2617  Label notVolatile, checkVolatile, exit;
2618  if (__ membar_has_effect(read_bits) || __ membar_has_effect(write_bits)) {
2619    __ ld_ptr(Address(Rcache, 0, in_bytes(cp_base_offset +
2620                             ConstantPoolCacheEntry::flags_offset())), Rflags);
2621    __ set((1 << ConstantPoolCacheEntry::volatileField), Lscratch);
2622    __ and3(Rflags, Lscratch, Lscratch);
2623    if (__ membar_has_effect(read_bits)) {
2624      __ tst(Lscratch);
2625      __ br(Assembler::zero, false, Assembler::pt, notVolatile);
2626      __ delayed()->nop();
2627      volatile_barrier(read_bits);
2628      __ bind(notVolatile);
2629    }
2630  }
2631
2632  __ ld_ptr(Address(Rcache, 0, in_bytes(cp_base_offset +
2633                             ConstantPoolCacheEntry::f2_offset())), Roffset);
2634  pop_and_check_object(Rclass);
2635
2636  switch (bytecode()) {
2637    case Bytecodes::_fast_bputfield: __ stb(Otos_i, Rclass, Roffset); break;
2638    case Bytecodes::_fast_cputfield: /* fall through */
2639    case Bytecodes::_fast_sputfield: __ sth(Otos_i, Rclass, Roffset); break;
2640    case Bytecodes::_fast_iputfield: __ st(Otos_i, Rclass, Roffset);  break;
2641    case Bytecodes::_fast_lputfield: __ st_long(Otos_l, Rclass, Roffset); break;
2642    case Bytecodes::_fast_fputfield:
2643      __ stf(FloatRegisterImpl::S, Ftos_f, Rclass, Roffset);
2644      break;
2645    case Bytecodes::_fast_dputfield:
2646      __ stf(FloatRegisterImpl::D, Ftos_d, Rclass, Roffset);
2647      break;
2648    case Bytecodes::_fast_aputfield:
2649      __ store_heap_oop(Otos_i, Rclass, Roffset);
2650      __ store_check(G1_scratch, Rclass, Roffset);
2651      break;
2652    default:
2653      ShouldNotReachHere();
2654  }
2655
2656  if (__ membar_has_effect(write_bits)) {
2657    __ tst(Lscratch);
2658    __ br(Assembler::zero, false, Assembler::pt, exit);
2659    __ delayed()->nop();
2660    volatile_barrier(Assembler::StoreLoad);
2661    __ bind(exit);
2662  }
2663}
2664
2665
2666void TemplateTable::putfield(int byte_no) {
2667  putfield_or_static(byte_no, false);
2668}
2669
2670void TemplateTable::putstatic(int byte_no) {
2671  putfield_or_static(byte_no, true);
2672}
2673
2674
2675void TemplateTable::fast_xaccess(TosState state) {
2676  transition(vtos, state);
2677  Register Rcache = G3_scratch;
2678  Register Roffset = G4_scratch;
2679  Register Rflags  = G4_scratch;
2680  Register Rreceiver = Lscratch;
2681
2682  __ ld_ptr(Llocals, Interpreter::value_offset_in_bytes(), Rreceiver);
2683
2684  // access constant pool cache  (is resolved)
2685  __ get_cache_and_index_at_bcp(Rcache, G4_scratch, 2);
2686  __ ld_ptr(Address(Rcache, 0, in_bytes(constantPoolCacheOopDesc::base_offset() + ConstantPoolCacheEntry::f2_offset())), Roffset);
2687  __ add(Lbcp, 1, Lbcp);       // needed to report exception at the correct bcp
2688
2689  __ verify_oop(Rreceiver);
2690  __ null_check(Rreceiver);
2691  if (state == atos) {
2692    __ load_heap_oop(Rreceiver, Roffset, Otos_i);
2693  } else if (state == itos) {
2694    __ ld (Rreceiver, Roffset, Otos_i) ;
2695  } else if (state == ftos) {
2696    __ ldf(FloatRegisterImpl::S, Rreceiver, Roffset, Ftos_f);
2697  } else {
2698    ShouldNotReachHere();
2699  }
2700
2701  Assembler::Membar_mask_bits membar_bits =
2702    Assembler::Membar_mask_bits(Assembler::LoadLoad | Assembler::LoadStore);
2703  if (__ membar_has_effect(membar_bits)) {
2704
2705    // Get is_volatile value in Rflags and check if membar is needed
2706    __ ld_ptr(Address(Rcache, 0, in_bytes(constantPoolCacheOopDesc::base_offset() + ConstantPoolCacheEntry::flags_offset())), Rflags);
2707
2708    // Test volatile
2709    Label notVolatile;
2710    __ set((1 << ConstantPoolCacheEntry::volatileField), Lscratch);
2711    __ btst(Rflags, Lscratch);
2712    __ br(Assembler::zero, false, Assembler::pt, notVolatile);
2713    __ delayed()->nop();
2714    volatile_barrier(membar_bits);
2715    __ bind(notVolatile);
2716  }
2717
2718  __ interp_verify_oop(Otos_i, state, __FILE__, __LINE__);
2719  __ sub(Lbcp, 1, Lbcp);
2720}
2721
2722//----------------------------------------------------------------------------------------------------
2723// Calls
2724
2725void TemplateTable::count_calls(Register method, Register temp) {
2726  // implemented elsewhere
2727  ShouldNotReachHere();
2728}
2729
2730void TemplateTable::generate_vtable_call(Register Rrecv, Register Rindex, Register Rret) {
2731  Register Rtemp = G4_scratch;
2732  Register Rcall = Rindex;
2733  assert_different_registers(Rcall, G5_method, Gargs, Rret);
2734
2735  // get target methodOop & entry point
2736  const int base = instanceKlass::vtable_start_offset() * wordSize;
2737  if (vtableEntry::size() % 3 == 0) {
2738    // scale the vtable index by 12:
2739    int one_third = vtableEntry::size() / 3;
2740    __ sll(Rindex, exact_log2(one_third * 1 * wordSize), Rtemp);
2741    __ sll(Rindex, exact_log2(one_third * 2 * wordSize), Rindex);
2742    __ add(Rindex, Rtemp, Rindex);
2743  } else {
2744    // scale the vtable index by 8:
2745    __ sll(Rindex, exact_log2(vtableEntry::size() * wordSize), Rindex);
2746  }
2747
2748  __ add(Rrecv, Rindex, Rrecv);
2749  __ ld_ptr(Rrecv, base + vtableEntry::method_offset_in_bytes(), G5_method);
2750
2751  __ call_from_interpreter(Rcall, Gargs, Rret);
2752}
2753
2754void TemplateTable::invokevirtual(int byte_no) {
2755  transition(vtos, vtos);
2756
2757  Register Rscratch = G3_scratch;
2758  Register Rtemp = G4_scratch;
2759  Register Rret = Lscratch;
2760  Register Rrecv = G5_method;
2761  Label notFinal;
2762
2763  load_invoke_cp_cache_entry(byte_no, G5_method, noreg, Rret, true);
2764  __ mov(SP, O5_savedSP); // record SP that we wanted the callee to restore
2765
2766  // Check for vfinal
2767  __ set((1 << ConstantPoolCacheEntry::vfinalMethod), G4_scratch);
2768  __ btst(Rret, G4_scratch);
2769  __ br(Assembler::zero, false, Assembler::pt, notFinal);
2770  __ delayed()->and3(Rret, 0xFF, G4_scratch);      // gets number of parameters
2771
2772  patch_bytecode(Bytecodes::_fast_invokevfinal, Rscratch, Rtemp);
2773
2774  invokevfinal_helper(Rscratch, Rret);
2775
2776  __ bind(notFinal);
2777
2778  __ mov(G5_method, Rscratch);  // better scratch register
2779  __ load_receiver(G4_scratch, O0);  // gets receiverOop
2780  // receiver is in O0
2781  __ verify_oop(O0);
2782
2783  // get return address
2784  Address table(Rtemp, (address)Interpreter::return_3_addrs_by_index_table());
2785  __ load_address(table);
2786  __ srl(Rret, ConstantPoolCacheEntry::tosBits, Rret);          // get return type
2787  // Make sure we don't need to mask Rret for tosBits after the above shift
2788  ConstantPoolCacheEntry::verify_tosBits();
2789  __ sll(Rret,  LogBytesPerWord, Rret);
2790  __ ld_ptr(Rtemp, Rret, Rret);         // get return address
2791
2792  // get receiver klass
2793  __ null_check(O0, oopDesc::klass_offset_in_bytes());
2794  __ load_klass(O0, Rrecv);
2795  __ verify_oop(Rrecv);
2796
2797  __ profile_virtual_call(Rrecv, O4);
2798
2799  generate_vtable_call(Rrecv, Rscratch, Rret);
2800}
2801
2802void TemplateTable::fast_invokevfinal(int byte_no) {
2803  transition(vtos, vtos);
2804
2805  load_invoke_cp_cache_entry(byte_no, G5_method, noreg, Lscratch, true,
2806                             /*is_invokevfinal*/true);
2807  __ mov(SP, O5_savedSP); // record SP that we wanted the callee to restore
2808  invokevfinal_helper(G3_scratch, Lscratch);
2809}
2810
2811void TemplateTable::invokevfinal_helper(Register Rscratch, Register Rret) {
2812  Register Rtemp = G4_scratch;
2813
2814  __ verify_oop(G5_method);
2815
2816  // Load receiver from stack slot
2817  __ lduh(Address(G5_method, 0, in_bytes(methodOopDesc::size_of_parameters_offset())), G4_scratch);
2818  __ load_receiver(G4_scratch, O0);
2819
2820  // receiver NULL check
2821  __ null_check(O0);
2822
2823  __ profile_final_call(O4);
2824
2825  // get return address
2826  Address table(Rtemp, (address)Interpreter::return_3_addrs_by_index_table());
2827  __ load_address(table);
2828  __ srl(Rret, ConstantPoolCacheEntry::tosBits, Rret);          // get return type
2829  // Make sure we don't need to mask Rret for tosBits after the above shift
2830  ConstantPoolCacheEntry::verify_tosBits();
2831  __ sll(Rret,  LogBytesPerWord, Rret);
2832  __ ld_ptr(Rtemp, Rret, Rret);         // get return address
2833
2834
2835  // do the call
2836  __ call_from_interpreter(Rscratch, Gargs, Rret);
2837}
2838
2839void TemplateTable::invokespecial(int byte_no) {
2840  transition(vtos, vtos);
2841
2842  Register Rscratch = G3_scratch;
2843  Register Rtemp = G4_scratch;
2844  Register Rret = Lscratch;
2845
2846  load_invoke_cp_cache_entry(byte_no, G5_method, noreg, Rret, false);
2847  __ mov(SP, O5_savedSP); // record SP that we wanted the callee to restore
2848
2849  __ verify_oop(G5_method);
2850
2851  __ lduh(Address(G5_method, 0, in_bytes(methodOopDesc::size_of_parameters_offset())), G4_scratch);
2852  __ load_receiver(G4_scratch, O0);
2853
2854  // receiver NULL check
2855  __ null_check(O0);
2856
2857  __ profile_call(O4);
2858
2859  // get return address
2860  Address table(Rtemp, (address)Interpreter::return_3_addrs_by_index_table());
2861  __ load_address(table);
2862  __ srl(Rret, ConstantPoolCacheEntry::tosBits, Rret);          // get return type
2863  // Make sure we don't need to mask Rret for tosBits after the above shift
2864  ConstantPoolCacheEntry::verify_tosBits();
2865  __ sll(Rret,  LogBytesPerWord, Rret);
2866  __ ld_ptr(Rtemp, Rret, Rret);         // get return address
2867
2868  // do the call
2869  __ call_from_interpreter(Rscratch, Gargs, Rret);
2870}
2871
2872void TemplateTable::invokestatic(int byte_no) {
2873  transition(vtos, vtos);
2874
2875  Register Rscratch = G3_scratch;
2876  Register Rtemp = G4_scratch;
2877  Register Rret = Lscratch;
2878
2879  load_invoke_cp_cache_entry(byte_no, G5_method, noreg, Rret, false);
2880  __ mov(SP, O5_savedSP); // record SP that we wanted the callee to restore
2881
2882  __ verify_oop(G5_method);
2883
2884  __ profile_call(O4);
2885
2886  // get return address
2887  Address table(Rtemp, (address)Interpreter::return_3_addrs_by_index_table());
2888  __ load_address(table);
2889  __ srl(Rret, ConstantPoolCacheEntry::tosBits, Rret);          // get return type
2890  // Make sure we don't need to mask Rret for tosBits after the above shift
2891  ConstantPoolCacheEntry::verify_tosBits();
2892  __ sll(Rret,  LogBytesPerWord, Rret);
2893  __ ld_ptr(Rtemp, Rret, Rret);         // get return address
2894
2895  // do the call
2896  __ call_from_interpreter(Rscratch, Gargs, Rret);
2897}
2898
2899
2900void TemplateTable::invokeinterface_object_method(Register RklassOop,
2901                                                  Register Rcall,
2902                                                  Register Rret,
2903                                                  Register Rflags) {
2904  Register Rscratch = G4_scratch;
2905  Register Rindex = Lscratch;
2906
2907  assert_different_registers(Rscratch, Rindex, Rret);
2908
2909  Label notFinal;
2910
2911  // Check for vfinal
2912  __ set((1 << ConstantPoolCacheEntry::vfinalMethod), Rscratch);
2913  __ btst(Rflags, Rscratch);
2914  __ br(Assembler::zero, false, Assembler::pt, notFinal);
2915  __ delayed()->nop();
2916
2917  __ profile_final_call(O4);
2918
2919  // do the call - the index (f2) contains the methodOop
2920  assert_different_registers(G5_method, Gargs, Rcall);
2921  __ mov(Rindex, G5_method);
2922  __ call_from_interpreter(Rcall, Gargs, Rret);
2923  __ bind(notFinal);
2924
2925  __ profile_virtual_call(RklassOop, O4);
2926  generate_vtable_call(RklassOop, Rindex, Rret);
2927}
2928
2929
2930void TemplateTable::invokeinterface(int byte_no) {
2931  transition(vtos, vtos);
2932
2933  Register Rscratch = G4_scratch;
2934  Register Rret = G3_scratch;
2935  Register Rindex = Lscratch;
2936  Register Rinterface = G1_scratch;
2937  Register RklassOop = G5_method;
2938  Register Rflags = O1;
2939  assert_different_registers(Rscratch, G5_method);
2940
2941  load_invoke_cp_cache_entry(byte_no, Rinterface, Rindex, Rflags, false);
2942  __ mov(SP, O5_savedSP); // record SP that we wanted the callee to restore
2943
2944  // get receiver
2945  __ and3(Rflags, 0xFF, Rscratch);       // gets number of parameters
2946  __ load_receiver(Rscratch, O0);
2947  __ verify_oop(O0);
2948
2949  __ mov(Rflags, Rret);
2950
2951  // get return address
2952  Address table(Rscratch, (address)Interpreter::return_5_addrs_by_index_table());
2953  __ load_address(table);
2954  __ srl(Rret, ConstantPoolCacheEntry::tosBits, Rret);          // get return type
2955  // Make sure we don't need to mask Rret for tosBits after the above shift
2956  ConstantPoolCacheEntry::verify_tosBits();
2957  __ sll(Rret,  LogBytesPerWord, Rret);
2958  __ ld_ptr(Rscratch, Rret, Rret);      // get return address
2959
2960  // get receiver klass
2961  __ null_check(O0, oopDesc::klass_offset_in_bytes());
2962  __ load_klass(O0, RklassOop);
2963  __ verify_oop(RklassOop);
2964
2965  // Special case of invokeinterface called for virtual method of
2966  // java.lang.Object.  See cpCacheOop.cpp for details.
2967  // This code isn't produced by javac, but could be produced by
2968  // another compliant java compiler.
2969  Label notMethod;
2970  __ set((1 << ConstantPoolCacheEntry::methodInterface), Rscratch);
2971  __ btst(Rflags, Rscratch);
2972  __ br(Assembler::zero, false, Assembler::pt, notMethod);
2973  __ delayed()->nop();
2974
2975  invokeinterface_object_method(RklassOop, Rinterface, Rret, Rflags);
2976
2977  __ bind(notMethod);
2978
2979  __ profile_virtual_call(RklassOop, O4);
2980
2981  //
2982  // find entry point to call
2983  //
2984
2985  // compute start of first itableOffsetEntry (which is at end of vtable)
2986  const int base = instanceKlass::vtable_start_offset() * wordSize;
2987  Label search;
2988  Register Rtemp = Rflags;
2989
2990  __ ld(Address(RklassOop, 0, instanceKlass::vtable_length_offset() * wordSize), Rtemp);
2991  if (align_object_offset(1) > 1) {
2992    __ round_to(Rtemp, align_object_offset(1));
2993  }
2994  __ sll(Rtemp, LogBytesPerWord, Rtemp);   // Rscratch *= 4;
2995  if (Assembler::is_simm13(base)) {
2996    __ add(Rtemp, base, Rtemp);
2997  } else {
2998    __ set(base, Rscratch);
2999    __ add(Rscratch, Rtemp, Rtemp);
3000  }
3001  __ add(RklassOop, Rtemp, Rscratch);
3002
3003  __ bind(search);
3004
3005  __ ld_ptr(Rscratch, itableOffsetEntry::interface_offset_in_bytes(), Rtemp);
3006  {
3007    Label ok;
3008
3009    // Check that entry is non-null.  Null entries are probably a bytecode
3010    // problem.  If the interface isn't implemented by the reciever class,
3011    // the VM should throw IncompatibleClassChangeError.  linkResolver checks
3012    // this too but that's only if the entry isn't already resolved, so we
3013    // need to check again.
3014    __ br_notnull( Rtemp, false, Assembler::pt, ok);
3015    __ delayed()->nop();
3016    call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::throw_IncompatibleClassChangeError));
3017    __ should_not_reach_here();
3018    __ bind(ok);
3019    __ verify_oop(Rtemp);
3020  }
3021
3022  __ verify_oop(Rinterface);
3023
3024  __ cmp(Rinterface, Rtemp);
3025  __ brx(Assembler::notEqual, true, Assembler::pn, search);
3026  __ delayed()->add(Rscratch, itableOffsetEntry::size() * wordSize, Rscratch);
3027
3028  // entry found and Rscratch points to it
3029  __ ld(Rscratch, itableOffsetEntry::offset_offset_in_bytes(), Rscratch);
3030
3031  assert(itableMethodEntry::method_offset_in_bytes() == 0, "adjust instruction below");
3032  __ sll(Rindex, exact_log2(itableMethodEntry::size() * wordSize), Rindex);       // Rindex *= 8;
3033  __ add(Rscratch, Rindex, Rscratch);
3034  __ ld_ptr(RklassOop, Rscratch, G5_method);
3035
3036  // Check for abstract method error.
3037  {
3038    Label ok;
3039    __ tst(G5_method);
3040    __ brx(Assembler::notZero, false, Assembler::pt, ok);
3041    __ delayed()->nop();
3042    call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::throw_AbstractMethodError));
3043    __ should_not_reach_here();
3044    __ bind(ok);
3045  }
3046
3047  Register Rcall = Rinterface;
3048  assert_different_registers(Rcall, G5_method, Gargs, Rret);
3049
3050  __ verify_oop(G5_method);
3051  __ call_from_interpreter(Rcall, Gargs, Rret);
3052
3053}
3054
3055
3056//----------------------------------------------------------------------------------------------------
3057// Allocation
3058
3059void TemplateTable::_new() {
3060  transition(vtos, atos);
3061
3062  Label slow_case;
3063  Label done;
3064  Label initialize_header;
3065  Label initialize_object;  // including clearing the fields
3066
3067  Register RallocatedObject = Otos_i;
3068  Register RinstanceKlass = O1;
3069  Register Roffset = O3;
3070  Register Rscratch = O4;
3071
3072  __ get_2_byte_integer_at_bcp(1, Rscratch, Roffset, InterpreterMacroAssembler::Unsigned);
3073  __ get_cpool_and_tags(Rscratch, G3_scratch);
3074  // make sure the class we're about to instantiate has been resolved
3075  __ add(G3_scratch, typeArrayOopDesc::header_size(T_BYTE) * wordSize, G3_scratch);
3076  __ ldub(G3_scratch, Roffset, G3_scratch);
3077  __ cmp(G3_scratch, JVM_CONSTANT_Class);
3078  __ br(Assembler::notEqual, false, Assembler::pn, slow_case);
3079  __ delayed()->sll(Roffset, LogBytesPerWord, Roffset);
3080
3081  //__ sll(Roffset, LogBytesPerWord, Roffset);        // executed in delay slot
3082  __ add(Roffset, sizeof(constantPoolOopDesc), Roffset);
3083  __ ld_ptr(Rscratch, Roffset, RinstanceKlass);
3084
3085  // make sure klass is fully initialized:
3086  __ ld(RinstanceKlass, instanceKlass::init_state_offset_in_bytes() + sizeof(oopDesc), G3_scratch);
3087  __ cmp(G3_scratch, instanceKlass::fully_initialized);
3088  __ br(Assembler::notEqual, false, Assembler::pn, slow_case);
3089  __ delayed()->ld(RinstanceKlass, Klass::layout_helper_offset_in_bytes() + sizeof(oopDesc), Roffset);
3090
3091  // get instance_size in instanceKlass (already aligned)
3092  //__ ld(RinstanceKlass, Klass::layout_helper_offset_in_bytes() + sizeof(oopDesc), Roffset);
3093
3094  // make sure klass does not have has_finalizer, or is abstract, or interface or java/lang/Class
3095  __ btst(Klass::_lh_instance_slow_path_bit, Roffset);
3096  __ br(Assembler::notZero, false, Assembler::pn, slow_case);
3097  __ delayed()->nop();
3098
3099  // allocate the instance
3100  // 1) Try to allocate in the TLAB
3101  // 2) if fail, and the TLAB is not full enough to discard, allocate in the shared Eden
3102  // 3) if the above fails (or is not applicable), go to a slow case
3103  // (creates a new TLAB, etc.)
3104
3105  const bool allow_shared_alloc =
3106    Universe::heap()->supports_inline_contig_alloc() && !CMSIncrementalMode;
3107
3108  if(UseTLAB) {
3109    Register RoldTopValue = RallocatedObject;
3110    Register RtopAddr = G3_scratch, RtlabWasteLimitValue = G3_scratch;
3111    Register RnewTopValue = G1_scratch;
3112    Register RendValue = Rscratch;
3113    Register RfreeValue = RnewTopValue;
3114
3115    // check if we can allocate in the TLAB
3116    __ ld_ptr(G2_thread, in_bytes(JavaThread::tlab_top_offset()), RoldTopValue); // sets up RalocatedObject
3117    __ ld_ptr(G2_thread, in_bytes(JavaThread::tlab_end_offset()), RendValue);
3118    __ add(RoldTopValue, Roffset, RnewTopValue);
3119
3120    // if there is enough space, we do not CAS and do not clear
3121    __ cmp(RnewTopValue, RendValue);
3122    if(ZeroTLAB) {
3123      // the fields have already been cleared
3124      __ brx(Assembler::lessEqualUnsigned, true, Assembler::pt, initialize_header);
3125    } else {
3126      // initialize both the header and fields
3127      __ brx(Assembler::lessEqualUnsigned, true, Assembler::pt, initialize_object);
3128    }
3129    __ delayed()->st_ptr(RnewTopValue, G2_thread, in_bytes(JavaThread::tlab_top_offset()));
3130
3131    if (allow_shared_alloc) {
3132    // Check if tlab should be discarded (refill_waste_limit >= free)
3133    __ ld_ptr(G2_thread, in_bytes(JavaThread::tlab_refill_waste_limit_offset()), RtlabWasteLimitValue);
3134    __ sub(RendValue, RoldTopValue, RfreeValue);
3135#ifdef _LP64
3136    __ srlx(RfreeValue, LogHeapWordSize, RfreeValue);
3137#else
3138    __ srl(RfreeValue, LogHeapWordSize, RfreeValue);
3139#endif
3140    __ cmp(RtlabWasteLimitValue, RfreeValue);
3141    __ brx(Assembler::greaterEqualUnsigned, false, Assembler::pt, slow_case); // tlab waste is small
3142    __ delayed()->nop();
3143
3144    // increment waste limit to prevent getting stuck on this slow path
3145    __ add(RtlabWasteLimitValue, ThreadLocalAllocBuffer::refill_waste_limit_increment(), RtlabWasteLimitValue);
3146    __ st_ptr(RtlabWasteLimitValue, G2_thread, in_bytes(JavaThread::tlab_refill_waste_limit_offset()));
3147    } else {
3148      // No allocation in the shared eden.
3149      __ br(Assembler::always, false, Assembler::pt, slow_case);
3150      __ delayed()->nop();
3151    }
3152  }
3153
3154  // Allocation in the shared Eden
3155  if (allow_shared_alloc) {
3156    Register RoldTopValue = G1_scratch;
3157    Register RtopAddr = G3_scratch;
3158    Register RnewTopValue = RallocatedObject;
3159    Register RendValue = Rscratch;
3160
3161    __ set((intptr_t)Universe::heap()->top_addr(), RtopAddr);
3162
3163    Label retry;
3164    __ bind(retry);
3165    __ set((intptr_t)Universe::heap()->end_addr(), RendValue);
3166    __ ld_ptr(RendValue, 0, RendValue);
3167    __ ld_ptr(RtopAddr, 0, RoldTopValue);
3168    __ add(RoldTopValue, Roffset, RnewTopValue);
3169
3170    // RnewTopValue contains the top address after the new object
3171    // has been allocated.
3172    __ cmp(RnewTopValue, RendValue);
3173    __ brx(Assembler::greaterUnsigned, false, Assembler::pn, slow_case);
3174    __ delayed()->nop();
3175
3176    __ casx_under_lock(RtopAddr, RoldTopValue, RnewTopValue,
3177      VM_Version::v9_instructions_work() ? NULL :
3178      (address)StubRoutines::Sparc::atomic_memory_operation_lock_addr());
3179
3180    // if someone beat us on the allocation, try again, otherwise continue
3181    __ cmp(RoldTopValue, RnewTopValue);
3182    __ brx(Assembler::notEqual, false, Assembler::pn, retry);
3183    __ delayed()->nop();
3184  }
3185
3186  if (UseTLAB || Universe::heap()->supports_inline_contig_alloc()) {
3187    // clear object fields
3188    __ bind(initialize_object);
3189    __ deccc(Roffset, sizeof(oopDesc));
3190    __ br(Assembler::zero, false, Assembler::pt, initialize_header);
3191    __ delayed()->add(RallocatedObject, sizeof(oopDesc), G3_scratch);
3192
3193    // initialize remaining object fields
3194    { Label loop;
3195      __ subcc(Roffset, wordSize, Roffset);
3196      __ bind(loop);
3197      //__ subcc(Roffset, wordSize, Roffset);      // executed above loop or in delay slot
3198      __ st_ptr(G0, G3_scratch, Roffset);
3199      __ br(Assembler::notEqual, false, Assembler::pt, loop);
3200      __ delayed()->subcc(Roffset, wordSize, Roffset);
3201    }
3202    __ br(Assembler::always, false, Assembler::pt, initialize_header);
3203    __ delayed()->nop();
3204  }
3205
3206  // slow case
3207  __ bind(slow_case);
3208  __ get_2_byte_integer_at_bcp(1, G3_scratch, O2, InterpreterMacroAssembler::Unsigned);
3209  __ get_constant_pool(O1);
3210
3211  call_VM(Otos_i, CAST_FROM_FN_PTR(address, InterpreterRuntime::_new), O1, O2);
3212
3213  __ ba(false, done);
3214  __ delayed()->nop();
3215
3216  // Initialize the header: mark, klass
3217  __ bind(initialize_header);
3218
3219  if (UseBiasedLocking) {
3220    __ ld_ptr(RinstanceKlass, Klass::prototype_header_offset_in_bytes() + sizeof(oopDesc), G4_scratch);
3221  } else {
3222    __ set((intptr_t)markOopDesc::prototype(), G4_scratch);
3223  }
3224  __ st_ptr(G4_scratch, RallocatedObject, oopDesc::mark_offset_in_bytes());       // mark
3225  __ store_klass(RinstanceKlass, RallocatedObject); // klass
3226
3227  {
3228    SkipIfEqual skip_if(
3229      _masm, G4_scratch, &DTraceAllocProbes, Assembler::zero);
3230    // Trigger dtrace event
3231    __ push(atos);
3232    __ call_VM_leaf(noreg,
3233       CAST_FROM_FN_PTR(address, SharedRuntime::dtrace_object_alloc), O0);
3234    __ pop(atos);
3235  }
3236
3237  // continue
3238  __ bind(done);
3239}
3240
3241
3242
3243void TemplateTable::newarray() {
3244  transition(itos, atos);
3245  __ ldub(Lbcp, 1, O1);
3246     call_VM(Otos_i, CAST_FROM_FN_PTR(address, InterpreterRuntime::newarray), O1, Otos_i);
3247}
3248
3249
3250void TemplateTable::anewarray() {
3251  transition(itos, atos);
3252  __ get_constant_pool(O1);
3253  __ get_2_byte_integer_at_bcp(1, G4_scratch, O2, InterpreterMacroAssembler::Unsigned);
3254     call_VM(Otos_i, CAST_FROM_FN_PTR(address, InterpreterRuntime::anewarray), O1, O2, Otos_i);
3255}
3256
3257
3258void TemplateTable::arraylength() {
3259  transition(atos, itos);
3260  Label ok;
3261  __ verify_oop(Otos_i);
3262  __ tst(Otos_i);
3263  __ throw_if_not_1_x( Assembler::notZero, ok );
3264  __ delayed()->ld(Otos_i, arrayOopDesc::length_offset_in_bytes(), Otos_i);
3265  __ throw_if_not_2( Interpreter::_throw_NullPointerException_entry, G3_scratch, ok);
3266}
3267
3268
3269void TemplateTable::checkcast() {
3270  transition(atos, atos);
3271  Label done, is_null, quicked, cast_ok, resolved;
3272  Register Roffset = G1_scratch;
3273  Register RobjKlass = O5;
3274  Register RspecifiedKlass = O4;
3275
3276  // Check for casting a NULL
3277  __ br_null(Otos_i, false, Assembler::pn, is_null);
3278  __ delayed()->nop();
3279
3280  // Get value klass in RobjKlass
3281  __ load_klass(Otos_i, RobjKlass); // get value klass
3282
3283  // Get constant pool tag
3284  __ get_2_byte_integer_at_bcp(1, Lscratch, Roffset, InterpreterMacroAssembler::Unsigned);
3285
3286  // See if the checkcast has been quickened
3287  __ get_cpool_and_tags(Lscratch, G3_scratch);
3288  __ add(G3_scratch, typeArrayOopDesc::header_size(T_BYTE) * wordSize, G3_scratch);
3289  __ ldub(G3_scratch, Roffset, G3_scratch);
3290  __ cmp(G3_scratch, JVM_CONSTANT_Class);
3291  __ br(Assembler::equal, true, Assembler::pt, quicked);
3292  __ delayed()->sll(Roffset, LogBytesPerWord, Roffset);
3293
3294  __ push_ptr(); // save receiver for result, and for GC
3295  call_VM(RspecifiedKlass, CAST_FROM_FN_PTR(address, InterpreterRuntime::quicken_io_cc) );
3296  __ pop_ptr(Otos_i, G3_scratch); // restore receiver
3297
3298  __ br(Assembler::always, false, Assembler::pt, resolved);
3299  __ delayed()->nop();
3300
3301  // Extract target class from constant pool
3302  __ bind(quicked);
3303  __ add(Roffset, sizeof(constantPoolOopDesc), Roffset);
3304  __ ld_ptr(Lscratch, Roffset, RspecifiedKlass);
3305  __ bind(resolved);
3306  __ load_klass(Otos_i, RobjKlass); // get value klass
3307
3308  // Generate a fast subtype check.  Branch to cast_ok if no
3309  // failure.  Throw exception if failure.
3310  __ gen_subtype_check( RobjKlass, RspecifiedKlass, G3_scratch, G4_scratch, G1_scratch, cast_ok );
3311
3312  // Not a subtype; so must throw exception
3313  __ throw_if_not_x( Assembler::never, Interpreter::_throw_ClassCastException_entry, G3_scratch );
3314
3315  __ bind(cast_ok);
3316
3317  if (ProfileInterpreter) {
3318    __ ba(false, done);
3319    __ delayed()->nop();
3320  }
3321  __ bind(is_null);
3322  __ profile_null_seen(G3_scratch);
3323  __ bind(done);
3324}
3325
3326
3327void TemplateTable::instanceof() {
3328  Label done, is_null, quicked, resolved;
3329  transition(atos, itos);
3330  Register Roffset = G1_scratch;
3331  Register RobjKlass = O5;
3332  Register RspecifiedKlass = O4;
3333
3334  // Check for casting a NULL
3335  __ br_null(Otos_i, false, Assembler::pt, is_null);
3336  __ delayed()->nop();
3337
3338  // Get value klass in RobjKlass
3339  __ load_klass(Otos_i, RobjKlass); // get value klass
3340
3341  // Get constant pool tag
3342  __ get_2_byte_integer_at_bcp(1, Lscratch, Roffset, InterpreterMacroAssembler::Unsigned);
3343
3344  // See if the checkcast has been quickened
3345  __ get_cpool_and_tags(Lscratch, G3_scratch);
3346  __ add(G3_scratch, typeArrayOopDesc::header_size(T_BYTE) * wordSize, G3_scratch);
3347  __ ldub(G3_scratch, Roffset, G3_scratch);
3348  __ cmp(G3_scratch, JVM_CONSTANT_Class);
3349  __ br(Assembler::equal, true, Assembler::pt, quicked);
3350  __ delayed()->sll(Roffset, LogBytesPerWord, Roffset);
3351
3352  __ push_ptr(); // save receiver for result, and for GC
3353  call_VM(RspecifiedKlass, CAST_FROM_FN_PTR(address, InterpreterRuntime::quicken_io_cc) );
3354  __ pop_ptr(Otos_i, G3_scratch); // restore receiver
3355
3356  __ br(Assembler::always, false, Assembler::pt, resolved);
3357  __ delayed()->nop();
3358
3359
3360  // Extract target class from constant pool
3361  __ bind(quicked);
3362  __ add(Roffset, sizeof(constantPoolOopDesc), Roffset);
3363  __ get_constant_pool(Lscratch);
3364  __ ld_ptr(Lscratch, Roffset, RspecifiedKlass);
3365  __ bind(resolved);
3366  __ load_klass(Otos_i, RobjKlass); // get value klass
3367
3368  // Generate a fast subtype check.  Branch to cast_ok if no
3369  // failure.  Return 0 if failure.
3370  __ or3(G0, 1, Otos_i);      // set result assuming quick tests succeed
3371  __ gen_subtype_check( RobjKlass, RspecifiedKlass, G3_scratch, G4_scratch, G1_scratch, done );
3372  // Not a subtype; return 0;
3373  __ clr( Otos_i );
3374
3375  if (ProfileInterpreter) {
3376    __ ba(false, done);
3377    __ delayed()->nop();
3378  }
3379  __ bind(is_null);
3380  __ profile_null_seen(G3_scratch);
3381  __ bind(done);
3382}
3383
3384void TemplateTable::_breakpoint() {
3385
3386   // Note: We get here even if we are single stepping..
3387   // jbug inists on setting breakpoints at every bytecode
3388   // even if we are in single step mode.
3389
3390   transition(vtos, vtos);
3391   // get the unpatched byte code
3392   __ call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::get_original_bytecode_at), Lmethod, Lbcp);
3393   __ mov(O0, Lbyte_code);
3394
3395   // post the breakpoint event
3396   __ call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::_breakpoint), Lmethod, Lbcp);
3397
3398   // complete the execution of original bytecode
3399   __ dispatch_normal(vtos);
3400}
3401
3402
3403//----------------------------------------------------------------------------------------------------
3404// Exceptions
3405
3406void TemplateTable::athrow() {
3407  transition(atos, vtos);
3408
3409  // This works because exception is cached in Otos_i which is same as O0,
3410  // which is same as what throw_exception_entry_expects
3411  assert(Otos_i == Oexception, "see explanation above");
3412
3413  __ verify_oop(Otos_i);
3414  __ null_check(Otos_i);
3415  __ throw_if_not_x(Assembler::never, Interpreter::throw_exception_entry(), G3_scratch);
3416}
3417
3418
3419//----------------------------------------------------------------------------------------------------
3420// Synchronization
3421
3422
3423// See frame_sparc.hpp for monitor block layout.
3424// Monitor elements are dynamically allocated by growing stack as needed.
3425
3426void TemplateTable::monitorenter() {
3427  transition(atos, vtos);
3428  __ verify_oop(Otos_i);
3429  // Try to acquire a lock on the object
3430  // Repeat until succeeded (i.e., until
3431  // monitorenter returns true).
3432
3433  {   Label ok;
3434    __ tst(Otos_i);
3435    __ throw_if_not_1_x( Assembler::notZero,  ok);
3436    __ delayed()->mov(Otos_i, Lscratch); // save obj
3437    __ throw_if_not_2( Interpreter::_throw_NullPointerException_entry, G3_scratch, ok);
3438  }
3439
3440  assert(O0 == Otos_i, "Be sure where the object to lock is");
3441
3442  // find a free slot in the monitor block
3443
3444
3445  // initialize entry pointer
3446  __ clr(O1); // points to free slot or NULL
3447
3448  {
3449    Label entry, loop, exit;
3450    __ add( __ top_most_monitor(), O2 ); // last one to check
3451    __ ba( false, entry );
3452    __ delayed()->mov( Lmonitors, O3 ); // first one to check
3453
3454
3455    __ bind( loop );
3456
3457    __ verify_oop(O4);          // verify each monitor's oop
3458    __ tst(O4); // is this entry unused?
3459    if (VM_Version::v9_instructions_work())
3460      __ movcc( Assembler::zero, false, Assembler::ptr_cc, O3, O1);
3461    else {
3462      Label L;
3463      __ br( Assembler::zero, true, Assembler::pn, L );
3464      __ delayed()->mov(O3, O1); // rememeber this one if match
3465      __ bind(L);
3466    }
3467
3468    __ cmp(O4, O0); // check if current entry is for same object
3469    __ brx( Assembler::equal, false, Assembler::pn, exit );
3470    __ delayed()->inc( O3, frame::interpreter_frame_monitor_size() * wordSize ); // check next one
3471
3472    __ bind( entry );
3473
3474    __ cmp( O3, O2 );
3475    __ brx( Assembler::lessEqualUnsigned, true, Assembler::pt, loop );
3476    __ delayed()->ld_ptr(O3, BasicObjectLock::obj_offset_in_bytes(), O4);
3477
3478    __ bind( exit );
3479  }
3480
3481  { Label allocated;
3482
3483    // found free slot?
3484    __ br_notnull(O1, false, Assembler::pn, allocated);
3485    __ delayed()->nop();
3486
3487    __ add_monitor_to_stack( false, O2, O3 );
3488    __ mov(Lmonitors, O1);
3489
3490    __ bind(allocated);
3491  }
3492
3493  // Increment bcp to point to the next bytecode, so exception handling for async. exceptions work correctly.
3494  // The object has already been poped from the stack, so the expression stack looks correct.
3495  __ inc(Lbcp);
3496
3497  __ st_ptr(O0, O1, BasicObjectLock::obj_offset_in_bytes()); // store object
3498  __ lock_object(O1, O0);
3499
3500  // check if there's enough space on the stack for the monitors after locking
3501  __ generate_stack_overflow_check(0);
3502
3503  // The bcp has already been incremented. Just need to dispatch to next instruction.
3504  __ dispatch_next(vtos);
3505}
3506
3507
3508void TemplateTable::monitorexit() {
3509  transition(atos, vtos);
3510  __ verify_oop(Otos_i);
3511  __ tst(Otos_i);
3512  __ throw_if_not_x( Assembler::notZero, Interpreter::_throw_NullPointerException_entry, G3_scratch );
3513
3514  assert(O0 == Otos_i, "just checking");
3515
3516  { Label entry, loop, found;
3517    __ add( __ top_most_monitor(), O2 ); // last one to check
3518    __ ba(false, entry );
3519    // use Lscratch to hold monitor elem to check, start with most recent monitor,
3520    // By using a local it survives the call to the C routine.
3521    __ delayed()->mov( Lmonitors, Lscratch );
3522
3523    __ bind( loop );
3524
3525    __ verify_oop(O4);          // verify each monitor's oop
3526    __ cmp(O4, O0); // check if current entry is for desired object
3527    __ brx( Assembler::equal, true, Assembler::pt, found );
3528    __ delayed()->mov(Lscratch, O1); // pass found entry as argument to monitorexit
3529
3530    __ inc( Lscratch, frame::interpreter_frame_monitor_size() * wordSize ); // advance to next
3531
3532    __ bind( entry );
3533
3534    __ cmp( Lscratch, O2 );
3535    __ brx( Assembler::lessEqualUnsigned, true, Assembler::pt, loop );
3536    __ delayed()->ld_ptr(Lscratch, BasicObjectLock::obj_offset_in_bytes(), O4);
3537
3538    call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::throw_illegal_monitor_state_exception));
3539    __ should_not_reach_here();
3540
3541    __ bind(found);
3542  }
3543  __ unlock_object(O1);
3544}
3545
3546
3547//----------------------------------------------------------------------------------------------------
3548// Wide instructions
3549
3550void TemplateTable::wide() {
3551  transition(vtos, vtos);
3552  __ ldub(Lbcp, 1, G3_scratch);// get next bc
3553  __ sll(G3_scratch, LogBytesPerWord, G3_scratch);
3554  Address ep(G4_scratch, (address)Interpreter::_wentry_point);
3555  __ load_address(ep);
3556  __ ld_ptr(ep.base(), G3_scratch, G3_scratch);
3557  __ jmp(G3_scratch, G0);
3558  __ delayed()->nop();
3559  // Note: the Lbcp increment step is part of the individual wide bytecode implementations
3560}
3561
3562
3563//----------------------------------------------------------------------------------------------------
3564// Multi arrays
3565
3566void TemplateTable::multianewarray() {
3567  transition(vtos, atos);
3568     // put ndims * wordSize into Lscratch
3569  __ ldub( Lbcp,     3,               Lscratch);
3570  __ sll(  Lscratch, Interpreter::logStackElementSize(), Lscratch);
3571     // Lesp points past last_dim, so set to O1 to first_dim address
3572  __ add(  Lesp,     Lscratch,        O1);
3573     call_VM(Otos_i, CAST_FROM_FN_PTR(address, InterpreterRuntime::multianewarray), O1);
3574  __ add(  Lesp,     Lscratch,        Lesp); // pop all dimensions off the stack
3575}
3576#endif /* !CC_INTERP */
3577