1/*
2 * Copyright (c) 2008, 2017, Oracle and/or its affiliates. All rights reserved.
3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
4 *
5 * This code is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 only, as
7 * published by the Free Software Foundation.
8 *
9 * This code is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
12 * version 2 for more details (a copy is included in the LICENSE file that
13 * accompanied this code).
14 *
15 * You should have received a copy of the GNU General Public License version
16 * 2 along with this work; if not, write to the Free Software Foundation,
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18 *
19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20 * or visit www.oracle.com if you need additional information or have any
21 * questions.
22 *
23 */
24
25#include "precompiled.hpp"
26#include "gc/shared/barrierSet.inline.hpp"
27#include "gc/shared/cardTableModRefBS.inline.hpp"
28#include "gc/shared/collectedHeap.hpp"
29#include "interp_masm_arm.hpp"
30#include "interpreter/interpreter.hpp"
31#include "interpreter/interpreterRuntime.hpp"
32#include "logging/log.hpp"
33#include "oops/arrayOop.hpp"
34#include "oops/markOop.hpp"
35#include "oops/method.hpp"
36#include "oops/methodData.hpp"
37#include "prims/jvmtiExport.hpp"
38#include "prims/jvmtiThreadState.hpp"
39#include "runtime/basicLock.hpp"
40#include "runtime/biasedLocking.hpp"
41#include "runtime/sharedRuntime.hpp"
42
43#if INCLUDE_ALL_GCS
44#include "gc/g1/g1CollectedHeap.inline.hpp"
45#include "gc/g1/g1SATBCardTableModRefBS.hpp"
46#include "gc/g1/heapRegion.hpp"
47#endif // INCLUDE_ALL_GCS
48
49//--------------------------------------------------------------------
50// Implementation of InterpreterMacroAssembler
51
52
53
54
55InterpreterMacroAssembler::InterpreterMacroAssembler(CodeBuffer* code) : MacroAssembler(code) {
56}
57
58void InterpreterMacroAssembler::call_VM_helper(Register oop_result, address entry_point, int number_of_arguments, bool check_exceptions) {
59#if defined(ASSERT) && !defined(AARCH64)
60  // Ensure that last_sp is not filled.
61  { Label L;
62    ldr(Rtemp, Address(FP, frame::interpreter_frame_last_sp_offset * wordSize));
63    cbz(Rtemp, L);
64    stop("InterpreterMacroAssembler::call_VM_helper: last_sp != NULL");
65    bind(L);
66  }
67#endif // ASSERT && !AARCH64
68
69  // Rbcp must be saved/restored since it may change due to GC.
70  save_bcp();
71
72#ifdef AARCH64
73  check_no_cached_stack_top(Rtemp);
74  save_stack_top();
75  check_extended_sp(Rtemp);
76  cut_sp_before_call();
77#endif // AARCH64
78
79  // super call
80  MacroAssembler::call_VM_helper(oop_result, entry_point, number_of_arguments, check_exceptions);
81
82#ifdef AARCH64
83  // Restore SP to extended SP
84  restore_sp_after_call(Rtemp);
85  check_stack_top();
86  clear_cached_stack_top();
87#endif // AARCH64
88
89  // Restore interpreter specific registers.
90  restore_bcp();
91  restore_method();
92}
93
94void InterpreterMacroAssembler::jump_to_entry(address entry) {
95  assert(entry, "Entry must have been generated by now");
96  b(entry);
97}
98
99void InterpreterMacroAssembler::check_and_handle_popframe() {
100  if (can_pop_frame()) {
101    Label L;
102    const Register popframe_cond = R2_tmp;
103
104    // Initiate popframe handling only if it is not already being processed.  If the flag
105    // has the popframe_processing bit set, it means that this code is called *during* popframe
106    // handling - we don't want to reenter.
107
108    ldr_s32(popframe_cond, Address(Rthread, JavaThread::popframe_condition_offset()));
109    tbz(popframe_cond, exact_log2(JavaThread::popframe_pending_bit), L);
110    tbnz(popframe_cond, exact_log2(JavaThread::popframe_processing_bit), L);
111
112    // Call Interpreter::remove_activation_preserving_args_entry() to get the
113    // address of the same-named entrypoint in the generated interpreter code.
114    call_VM_leaf(CAST_FROM_FN_PTR(address, Interpreter::remove_activation_preserving_args_entry));
115
116    // Call indirectly to avoid generation ordering problem.
117    jump(R0);
118
119    bind(L);
120  }
121}
122
123
124// Blows R2, Rtemp. Sets TOS cached value.
125void InterpreterMacroAssembler::load_earlyret_value(TosState state) {
126  const Register thread_state = R2_tmp;
127
128  ldr(thread_state, Address(Rthread, JavaThread::jvmti_thread_state_offset()));
129
130  const Address tos_addr(thread_state, JvmtiThreadState::earlyret_tos_offset());
131  const Address oop_addr(thread_state, JvmtiThreadState::earlyret_oop_offset());
132  const Address val_addr(thread_state, JvmtiThreadState::earlyret_value_offset());
133#ifndef AARCH64
134  const Address val_addr_hi(thread_state, JvmtiThreadState::earlyret_value_offset()
135                             + in_ByteSize(wordSize));
136#endif // !AARCH64
137
138  Register zero = zero_register(Rtemp);
139
140  switch (state) {
141    case atos: ldr(R0_tos, oop_addr);
142               str(zero, oop_addr);
143               interp_verify_oop(R0_tos, state, __FILE__, __LINE__);
144               break;
145
146#ifdef AARCH64
147    case ltos: ldr(R0_tos, val_addr);              break;
148#else
149    case ltos: ldr(R1_tos_hi, val_addr_hi);        // fall through
150#endif // AARCH64
151    case btos:                                     // fall through
152    case ztos:                                     // fall through
153    case ctos:                                     // fall through
154    case stos:                                     // fall through
155    case itos: ldr_s32(R0_tos, val_addr);          break;
156#ifdef __SOFTFP__
157    case dtos: ldr(R1_tos_hi, val_addr_hi);        // fall through
158    case ftos: ldr(R0_tos, val_addr);              break;
159#else
160    case ftos: ldr_float (S0_tos, val_addr);       break;
161    case dtos: ldr_double(D0_tos, val_addr);       break;
162#endif // __SOFTFP__
163    case vtos: /* nothing to do */                 break;
164    default  : ShouldNotReachHere();
165  }
166  // Clean up tos value in the thread object
167  str(zero, val_addr);
168#ifndef AARCH64
169  str(zero, val_addr_hi);
170#endif // !AARCH64
171
172  mov(Rtemp, (int) ilgl);
173  str_32(Rtemp, tos_addr);
174}
175
176
177// Blows R2, Rtemp.
178void InterpreterMacroAssembler::check_and_handle_earlyret() {
179  if (can_force_early_return()) {
180    Label L;
181    const Register thread_state = R2_tmp;
182
183    ldr(thread_state, Address(Rthread, JavaThread::jvmti_thread_state_offset()));
184    cbz(thread_state, L); // if (thread->jvmti_thread_state() == NULL) exit;
185
186    // Initiate earlyret handling only if it is not already being processed.
187    // If the flag has the earlyret_processing bit set, it means that this code
188    // is called *during* earlyret handling - we don't want to reenter.
189
190    ldr_s32(Rtemp, Address(thread_state, JvmtiThreadState::earlyret_state_offset()));
191    cmp(Rtemp, JvmtiThreadState::earlyret_pending);
192    b(L, ne);
193
194    // Call Interpreter::remove_activation_early_entry() to get the address of the
195    // same-named entrypoint in the generated interpreter code.
196
197    ldr_s32(R0, Address(thread_state, JvmtiThreadState::earlyret_tos_offset()));
198    call_VM_leaf(CAST_FROM_FN_PTR(address, Interpreter::remove_activation_early_entry), R0);
199
200    jump(R0);
201
202    bind(L);
203  }
204}
205
206
207// Sets reg. Blows Rtemp.
208void InterpreterMacroAssembler::get_unsigned_2_byte_index_at_bcp(Register reg, int bcp_offset) {
209  assert(bcp_offset >= 0, "bcp is still pointing to start of bytecode");
210  assert(reg != Rtemp, "should be different registers");
211
212  ldrb(Rtemp, Address(Rbcp, bcp_offset));
213  ldrb(reg, Address(Rbcp, bcp_offset+1));
214  orr(reg, reg, AsmOperand(Rtemp, lsl, BitsPerByte));
215}
216
217void InterpreterMacroAssembler::get_index_at_bcp(Register index, int bcp_offset, Register tmp_reg, size_t index_size) {
218  assert_different_registers(index, tmp_reg);
219  if (index_size == sizeof(u2)) {
220    // load bytes of index separately to avoid unaligned access
221    ldrb(index, Address(Rbcp, bcp_offset+1));
222    ldrb(tmp_reg, Address(Rbcp, bcp_offset));
223    orr(index, tmp_reg, AsmOperand(index, lsl, BitsPerByte));
224  } else if (index_size == sizeof(u4)) {
225    // TODO-AARCH64: consider using unaligned access here
226    ldrb(index, Address(Rbcp, bcp_offset+3));
227    ldrb(tmp_reg, Address(Rbcp, bcp_offset+2));
228    orr(index, tmp_reg, AsmOperand(index, lsl, BitsPerByte));
229    ldrb(tmp_reg, Address(Rbcp, bcp_offset+1));
230    orr(index, tmp_reg, AsmOperand(index, lsl, BitsPerByte));
231    ldrb(tmp_reg, Address(Rbcp, bcp_offset));
232    orr(index, tmp_reg, AsmOperand(index, lsl, BitsPerByte));
233    // Check if the secondary index definition is still ~x, otherwise
234    // we have to change the following assembler code to calculate the
235    // plain index.
236    assert(ConstantPool::decode_invokedynamic_index(~123) == 123, "else change next line");
237    mvn_32(index, index);  // convert to plain index
238  } else if (index_size == sizeof(u1)) {
239    ldrb(index, Address(Rbcp, bcp_offset));
240  } else {
241    ShouldNotReachHere();
242  }
243}
244
245// Sets cache, index.
246void InterpreterMacroAssembler::get_cache_and_index_at_bcp(Register cache, Register index, int bcp_offset, size_t index_size) {
247  assert(bcp_offset > 0, "bcp is still pointing to start of bytecode");
248  assert_different_registers(cache, index);
249
250  get_index_at_bcp(index, bcp_offset, cache, index_size);
251
252  // load constant pool cache pointer
253  ldr(cache, Address(FP, frame::interpreter_frame_cache_offset * wordSize));
254
255  // convert from field index to ConstantPoolCacheEntry index
256  assert(sizeof(ConstantPoolCacheEntry) == 4*wordSize, "adjust code below");
257  // TODO-AARCH64 merge this shift with shift "add(..., Rcache, AsmOperand(Rindex, lsl, LogBytesPerWord))" after this method is called
258  logical_shift_left(index, index, 2);
259}
260
261// Sets cache, index, bytecode.
262void InterpreterMacroAssembler::get_cache_and_index_and_bytecode_at_bcp(Register cache, Register index, Register bytecode, int byte_no, int bcp_offset, size_t index_size) {
263  get_cache_and_index_at_bcp(cache, index, bcp_offset, index_size);
264  // caution index and bytecode can be the same
265  add(bytecode, cache, AsmOperand(index, lsl, LogBytesPerWord));
266#ifdef AARCH64
267  add(bytecode, bytecode, (1 + byte_no) + in_bytes(ConstantPoolCache::base_offset() + ConstantPoolCacheEntry::indices_offset()));
268  ldarb(bytecode, bytecode);
269#else
270  ldrb(bytecode, Address(bytecode, (1 + byte_no) + in_bytes(ConstantPoolCache::base_offset() + ConstantPoolCacheEntry::indices_offset())));
271  TemplateTable::volatile_barrier(MacroAssembler::LoadLoad, noreg, true);
272#endif // AARCH64
273}
274
275// Sets cache. Blows reg_tmp.
276void InterpreterMacroAssembler::get_cache_entry_pointer_at_bcp(Register cache, Register reg_tmp, int bcp_offset, size_t index_size) {
277  assert(bcp_offset > 0, "bcp is still pointing to start of bytecode");
278  assert_different_registers(cache, reg_tmp);
279
280  get_index_at_bcp(reg_tmp, bcp_offset, cache, index_size);
281
282  // load constant pool cache pointer
283  ldr(cache, Address(FP, frame::interpreter_frame_cache_offset * wordSize));
284
285  // skip past the header
286  add(cache, cache, in_bytes(ConstantPoolCache::base_offset()));
287  // convert from field index to ConstantPoolCacheEntry index
288  // and from word offset to byte offset
289  assert(sizeof(ConstantPoolCacheEntry) == 4*wordSize, "adjust code below");
290  add(cache, cache, AsmOperand(reg_tmp, lsl, 2 + LogBytesPerWord));
291}
292
293// Load object from cpool->resolved_references(index)
294void InterpreterMacroAssembler::load_resolved_reference_at_index(
295                                           Register result, Register index) {
296  assert_different_registers(result, index);
297  get_constant_pool(result);
298
299  Register cache = result;
300  // load pointer for resolved_references[] objArray
301  ldr(cache, Address(result, ConstantPool::cache_offset_in_bytes()));
302  ldr(cache, Address(result, ConstantPoolCache::resolved_references_offset_in_bytes()));
303  resolve_oop_handle(cache);
304  // Add in the index
305  // convert from field index to resolved_references() index and from
306  // word index to byte offset. Since this is a java object, it can be compressed
307  add(cache, cache, AsmOperand(index, lsl, LogBytesPerHeapOop));
308  load_heap_oop(result, Address(cache, arrayOopDesc::base_offset_in_bytes(T_OBJECT)));
309}
310
311void InterpreterMacroAssembler::load_resolved_klass_at_offset(
312                                           Register Rcpool, Register Rindex, Register Rklass) {
313  add(Rtemp, Rcpool, AsmOperand(Rindex, lsl, LogBytesPerWord));
314  ldrh(Rtemp, Address(Rtemp, sizeof(ConstantPool))); // Rtemp = resolved_klass_index
315  ldr(Rklass, Address(Rcpool,  ConstantPool::resolved_klasses_offset_in_bytes())); // Rklass = cpool->_resolved_klasses
316  add(Rklass, Rklass, AsmOperand(Rtemp, lsl, LogBytesPerWord));
317  ldr(Rklass, Address(Rklass, Array<Klass*>::base_offset_in_bytes()));
318}
319
320// Generate a subtype check: branch to not_subtype if sub_klass is
321// not a subtype of super_klass.
322// Profiling code for the subtype check failure (profile_typecheck_failed)
323// should be explicitly generated by the caller in the not_subtype case.
324// Blows Rtemp, tmp1, tmp2.
325void InterpreterMacroAssembler::gen_subtype_check(Register Rsub_klass,
326                                                  Register Rsuper_klass,
327                                                  Label &not_subtype,
328                                                  Register tmp1,
329                                                  Register tmp2) {
330
331  assert_different_registers(Rsub_klass, Rsuper_klass, tmp1, tmp2, Rtemp);
332  Label ok_is_subtype, loop, update_cache;
333
334  const Register super_check_offset = tmp1;
335  const Register cached_super = tmp2;
336
337  // Profile the not-null value's klass.
338  profile_typecheck(tmp1, Rsub_klass);
339
340  // Load the super-klass's check offset into
341  ldr_u32(super_check_offset, Address(Rsuper_klass, Klass::super_check_offset_offset()));
342
343  // Check for self
344  cmp(Rsub_klass, Rsuper_klass);
345
346  // Load from the sub-klass's super-class display list, or a 1-word cache of
347  // the secondary superclass list, or a failing value with a sentinel offset
348  // if the super-klass is an interface or exceptionally deep in the Java
349  // hierarchy and we have to scan the secondary superclass list the hard way.
350  // See if we get an immediate positive hit
351  ldr(cached_super, Address(Rsub_klass, super_check_offset));
352
353  cond_cmp(Rsuper_klass, cached_super, ne);
354  b(ok_is_subtype, eq);
355
356  // Check for immediate negative hit
357  cmp(super_check_offset, in_bytes(Klass::secondary_super_cache_offset()));
358  b(not_subtype, ne);
359
360  // Now do a linear scan of the secondary super-klass chain.
361  const Register supers_arr = tmp1;
362  const Register supers_cnt = tmp2;
363  const Register cur_super  = Rtemp;
364
365  // Load objArrayOop of secondary supers.
366  ldr(supers_arr, Address(Rsub_klass, Klass::secondary_supers_offset()));
367
368  ldr_u32(supers_cnt, Address(supers_arr, Array<Klass*>::length_offset_in_bytes())); // Load the array length
369#ifdef AARCH64
370  cbz(supers_cnt, not_subtype);
371  add(supers_arr, supers_arr, Array<Klass*>::base_offset_in_bytes());
372#else
373  cmp(supers_cnt, 0);
374
375  // Skip to the start of array elements and prefetch the first super-klass.
376  ldr(cur_super, Address(supers_arr, Array<Klass*>::base_offset_in_bytes(), pre_indexed), ne);
377  b(not_subtype, eq);
378#endif // AARCH64
379
380  bind(loop);
381
382#ifdef AARCH64
383  ldr(cur_super, Address(supers_arr, wordSize, post_indexed));
384#endif // AARCH64
385
386  cmp(cur_super, Rsuper_klass);
387  b(update_cache, eq);
388
389  subs(supers_cnt, supers_cnt, 1);
390
391#ifndef AARCH64
392  ldr(cur_super, Address(supers_arr, wordSize, pre_indexed), ne);
393#endif // !AARCH64
394
395  b(loop, ne);
396
397  b(not_subtype);
398
399  bind(update_cache);
400  // Must be equal but missed in cache.  Update cache.
401  str(Rsuper_klass, Address(Rsub_klass, Klass::secondary_super_cache_offset()));
402
403  bind(ok_is_subtype);
404}
405
406
407// The 1st part of the store check.
408// Sets card_table_base register.
409void InterpreterMacroAssembler::store_check_part1(Register card_table_base) {
410  // Check barrier set type (should be card table) and element size
411  BarrierSet* bs = Universe::heap()->barrier_set();
412  assert(bs->kind() == BarrierSet::CardTableForRS ||
413         bs->kind() == BarrierSet::CardTableExtension,
414         "Wrong barrier set kind");
415
416  CardTableModRefBS* ct = barrier_set_cast<CardTableModRefBS>(bs);
417  assert(sizeof(*ct->byte_map_base) == sizeof(jbyte), "Adjust store check code");
418
419  // Load card table base address.
420
421  /* Performance note.
422
423     There is an alternative way of loading card table base address
424     from thread descriptor, which may look more efficient:
425
426     ldr(card_table_base, Address(Rthread, JavaThread::card_table_base_offset()));
427
428     However, performance measurements of micro benchmarks and specJVM98
429     showed that loading of card table base from thread descriptor is
430     7-18% slower compared to loading of literal embedded into the code.
431     Possible cause is a cache miss (card table base address resides in a
432     rarely accessed area of thread descriptor).
433  */
434  // TODO-AARCH64 Investigate if mov_slow is faster than ldr from Rthread on AArch64
435  mov_address(card_table_base, (address)ct->byte_map_base, symbolic_Relocation::card_table_reference);
436}
437
438// The 2nd part of the store check.
439void InterpreterMacroAssembler::store_check_part2(Register obj, Register card_table_base, Register tmp) {
440  assert_different_registers(obj, card_table_base, tmp);
441
442  assert(CardTableModRefBS::dirty_card_val() == 0, "Dirty card value must be 0 due to optimizations.");
443#ifdef AARCH64
444  add(card_table_base, card_table_base, AsmOperand(obj, lsr, CardTableModRefBS::card_shift));
445  Address card_table_addr(card_table_base);
446#else
447  Address card_table_addr(card_table_base, obj, lsr, CardTableModRefBS::card_shift);
448#endif
449
450  if (UseCondCardMark) {
451    if (UseConcMarkSweepGC) {
452      membar(MacroAssembler::Membar_mask_bits(MacroAssembler::StoreLoad), noreg);
453    }
454    Label already_dirty;
455
456    ldrb(tmp, card_table_addr);
457    cbz(tmp, already_dirty);
458
459    set_card(card_table_base, card_table_addr, tmp);
460    bind(already_dirty);
461
462  } else {
463    if (UseConcMarkSweepGC && CMSPrecleaningEnabled) {
464      membar(MacroAssembler::Membar_mask_bits(MacroAssembler::StoreStore), noreg);
465    }
466    set_card(card_table_base, card_table_addr, tmp);
467  }
468}
469
470void InterpreterMacroAssembler::set_card(Register card_table_base, Address card_table_addr, Register tmp) {
471#ifdef AARCH64
472  strb(ZR, card_table_addr);
473#else
474  CardTableModRefBS* ct = barrier_set_cast<CardTableModRefBS>(Universe::heap()->barrier_set());
475  if ((((uintptr_t)ct->byte_map_base & 0xff) == 0)) {
476    // Card table is aligned so the lowest byte of the table address base is zero.
477    // This works only if the code is not saved for later use, possibly
478    // in a context where the base would no longer be aligned.
479    strb(card_table_base, card_table_addr);
480  } else {
481    mov(tmp, 0);
482    strb(tmp, card_table_addr);
483  }
484#endif // AARCH64
485}
486
487//////////////////////////////////////////////////////////////////////////////////
488
489
490// Java Expression Stack
491
492void InterpreterMacroAssembler::pop_ptr(Register r) {
493  assert(r != Rstack_top, "unpredictable instruction");
494  ldr(r, Address(Rstack_top, wordSize, post_indexed));
495}
496
497void InterpreterMacroAssembler::pop_i(Register r) {
498  assert(r != Rstack_top, "unpredictable instruction");
499  ldr_s32(r, Address(Rstack_top, wordSize, post_indexed));
500  zap_high_non_significant_bits(r);
501}
502
503#ifdef AARCH64
504void InterpreterMacroAssembler::pop_l(Register r) {
505  assert(r != Rstack_top, "unpredictable instruction");
506  ldr(r, Address(Rstack_top, 2*wordSize, post_indexed));
507}
508#else
509void InterpreterMacroAssembler::pop_l(Register lo, Register hi) {
510  assert_different_registers(lo, hi);
511  assert(lo < hi, "lo must be < hi");
512  pop(RegisterSet(lo) | RegisterSet(hi));
513}
514#endif // AARCH64
515
516void InterpreterMacroAssembler::pop_f(FloatRegister fd) {
517#ifdef AARCH64
518  ldr_s(fd, Address(Rstack_top, wordSize, post_indexed));
519#else
520  fpops(fd);
521#endif // AARCH64
522}
523
524void InterpreterMacroAssembler::pop_d(FloatRegister fd) {
525#ifdef AARCH64
526  ldr_d(fd, Address(Rstack_top, 2*wordSize, post_indexed));
527#else
528  fpopd(fd);
529#endif // AARCH64
530}
531
532
533// Transition vtos -> state. Blows R0, R1. Sets TOS cached value.
534void InterpreterMacroAssembler::pop(TosState state) {
535  switch (state) {
536    case atos: pop_ptr(R0_tos);                              break;
537    case btos:                                               // fall through
538    case ztos:                                               // fall through
539    case ctos:                                               // fall through
540    case stos:                                               // fall through
541    case itos: pop_i(R0_tos);                                break;
542#ifdef AARCH64
543    case ltos: pop_l(R0_tos);                                break;
544#else
545    case ltos: pop_l(R0_tos_lo, R1_tos_hi);                  break;
546#endif // AARCH64
547#ifdef __SOFTFP__
548    case ftos: pop_i(R0_tos);                                break;
549    case dtos: pop_l(R0_tos_lo, R1_tos_hi);                  break;
550#else
551    case ftos: pop_f(S0_tos);                                break;
552    case dtos: pop_d(D0_tos);                                break;
553#endif // __SOFTFP__
554    case vtos: /* nothing to do */                           break;
555    default  : ShouldNotReachHere();
556  }
557  interp_verify_oop(R0_tos, state, __FILE__, __LINE__);
558}
559
560void InterpreterMacroAssembler::push_ptr(Register r) {
561  assert(r != Rstack_top, "unpredictable instruction");
562  str(r, Address(Rstack_top, -wordSize, pre_indexed));
563  check_stack_top_on_expansion();
564}
565
566void InterpreterMacroAssembler::push_i(Register r) {
567  assert(r != Rstack_top, "unpredictable instruction");
568  str_32(r, Address(Rstack_top, -wordSize, pre_indexed));
569  check_stack_top_on_expansion();
570}
571
572#ifdef AARCH64
573void InterpreterMacroAssembler::push_l(Register r) {
574  assert(r != Rstack_top, "unpredictable instruction");
575  stp(r, ZR, Address(Rstack_top, -2*wordSize, pre_indexed));
576  check_stack_top_on_expansion();
577}
578#else
579void InterpreterMacroAssembler::push_l(Register lo, Register hi) {
580  assert_different_registers(lo, hi);
581  assert(lo < hi, "lo must be < hi");
582  push(RegisterSet(lo) | RegisterSet(hi));
583}
584#endif // AARCH64
585
586void InterpreterMacroAssembler::push_f() {
587#ifdef AARCH64
588  str_s(S0_tos, Address(Rstack_top, -wordSize, pre_indexed));
589  check_stack_top_on_expansion();
590#else
591  fpushs(S0_tos);
592#endif // AARCH64
593}
594
595void InterpreterMacroAssembler::push_d() {
596#ifdef AARCH64
597  str_d(D0_tos, Address(Rstack_top, -2*wordSize, pre_indexed));
598  check_stack_top_on_expansion();
599#else
600  fpushd(D0_tos);
601#endif // AARCH64
602}
603
604// Transition state -> vtos. Blows Rtemp.
605void InterpreterMacroAssembler::push(TosState state) {
606  interp_verify_oop(R0_tos, state, __FILE__, __LINE__);
607  switch (state) {
608    case atos: push_ptr(R0_tos);                              break;
609    case btos:                                                // fall through
610    case ztos:                                                // fall through
611    case ctos:                                                // fall through
612    case stos:                                                // fall through
613    case itos: push_i(R0_tos);                                break;
614#ifdef AARCH64
615    case ltos: push_l(R0_tos);                                break;
616#else
617    case ltos: push_l(R0_tos_lo, R1_tos_hi);                  break;
618#endif // AARCH64
619#ifdef __SOFTFP__
620    case ftos: push_i(R0_tos);                                break;
621    case dtos: push_l(R0_tos_lo, R1_tos_hi);                  break;
622#else
623    case ftos: push_f();                                      break;
624    case dtos: push_d();                                      break;
625#endif // __SOFTFP__
626    case vtos: /* nothing to do */                            break;
627    default  : ShouldNotReachHere();
628  }
629}
630
631
632#ifndef AARCH64
633
634// Converts return value in R0/R1 (interpreter calling conventions) to TOS cached value.
635void InterpreterMacroAssembler::convert_retval_to_tos(TosState state) {
636#if (!defined __SOFTFP__ && !defined __ABI_HARD__)
637  // According to interpreter calling conventions, result is returned in R0/R1,
638  // but templates expect ftos in S0, and dtos in D0.
639  if (state == ftos) {
640    fmsr(S0_tos, R0);
641  } else if (state == dtos) {
642    fmdrr(D0_tos, R0, R1);
643  }
644#endif // !__SOFTFP__ && !__ABI_HARD__
645}
646
647// Converts TOS cached value to return value in R0/R1 (according to interpreter calling conventions).
648void InterpreterMacroAssembler::convert_tos_to_retval(TosState state) {
649#if (!defined __SOFTFP__ && !defined __ABI_HARD__)
650  // According to interpreter calling conventions, result is returned in R0/R1,
651  // so ftos (S0) and dtos (D0) are moved to R0/R1.
652  if (state == ftos) {
653    fmrs(R0, S0_tos);
654  } else if (state == dtos) {
655    fmrrd(R0, R1, D0_tos);
656  }
657#endif // !__SOFTFP__ && !__ABI_HARD__
658}
659
660#endif // !AARCH64
661
662
663// Helpers for swap and dup
664void InterpreterMacroAssembler::load_ptr(int n, Register val) {
665  ldr(val, Address(Rstack_top, Interpreter::expr_offset_in_bytes(n)));
666}
667
668void InterpreterMacroAssembler::store_ptr(int n, Register val) {
669  str(val, Address(Rstack_top, Interpreter::expr_offset_in_bytes(n)));
670}
671
672
673void InterpreterMacroAssembler::prepare_to_jump_from_interpreted() {
674#ifdef AARCH64
675  check_no_cached_stack_top(Rtemp);
676  save_stack_top();
677  cut_sp_before_call();
678  mov(Rparams, Rstack_top);
679#endif // AARCH64
680
681  // set sender sp
682  mov(Rsender_sp, SP);
683
684#ifndef AARCH64
685  // record last_sp
686  str(Rsender_sp, Address(FP, frame::interpreter_frame_last_sp_offset * wordSize));
687#endif // !AARCH64
688}
689
690// Jump to from_interpreted entry of a call unless single stepping is possible
691// in this thread in which case we must call the i2i entry
692void InterpreterMacroAssembler::jump_from_interpreted(Register method) {
693  assert_different_registers(method, Rtemp);
694
695  prepare_to_jump_from_interpreted();
696
697  if (can_post_interpreter_events()) {
698    // JVMTI events, such as single-stepping, are implemented partly by avoiding running
699    // compiled code in threads for which the event is enabled.  Check here for
700    // interp_only_mode if these events CAN be enabled.
701
702    ldr_s32(Rtemp, Address(Rthread, JavaThread::interp_only_mode_offset()));
703#ifdef AARCH64
704    {
705      Label not_interp_only_mode;
706
707      cbz(Rtemp, not_interp_only_mode);
708      indirect_jump(Address(method, Method::interpreter_entry_offset()), Rtemp);
709
710      bind(not_interp_only_mode);
711    }
712#else
713    cmp(Rtemp, 0);
714    ldr(PC, Address(method, Method::interpreter_entry_offset()), ne);
715#endif // AARCH64
716  }
717
718  indirect_jump(Address(method, Method::from_interpreted_offset()), Rtemp);
719}
720
721
722void InterpreterMacroAssembler::restore_dispatch() {
723  mov_slow(RdispatchTable, (address)Interpreter::dispatch_table(vtos));
724}
725
726
727// The following two routines provide a hook so that an implementation
728// can schedule the dispatch in two parts.
729void InterpreterMacroAssembler::dispatch_prolog(TosState state, int step) {
730  // Nothing ARM-specific to be done here.
731}
732
733void InterpreterMacroAssembler::dispatch_epilog(TosState state, int step) {
734  dispatch_next(state, step);
735}
736
737void InterpreterMacroAssembler::dispatch_base(TosState state,
738                                              DispatchTableMode table_mode,
739                                              bool verifyoop) {
740  if (VerifyActivationFrameSize) {
741    Label L;
742#ifdef AARCH64
743    mov(Rtemp, SP);
744    sub(Rtemp, FP, Rtemp);
745#else
746    sub(Rtemp, FP, SP);
747#endif // AARCH64
748    int min_frame_size = (frame::link_offset - frame::interpreter_frame_initial_sp_offset) * wordSize;
749    cmp(Rtemp, min_frame_size);
750    b(L, ge);
751    stop("broken stack frame");
752    bind(L);
753  }
754
755  if (verifyoop) {
756    interp_verify_oop(R0_tos, state, __FILE__, __LINE__);
757  }
758
759  if((state == itos) || (state == btos) || (state == ztos) || (state == ctos) || (state == stos)) {
760    zap_high_non_significant_bits(R0_tos);
761  }
762
763#ifdef ASSERT
764  Label L;
765  mov_slow(Rtemp, (address)Interpreter::dispatch_table(vtos));
766  cmp(Rtemp, RdispatchTable);
767  b(L, eq);
768  stop("invalid RdispatchTable");
769  bind(L);
770#endif
771
772  if (table_mode == DispatchDefault) {
773    if (state == vtos) {
774      indirect_jump(Address::indexed_ptr(RdispatchTable, R3_bytecode), Rtemp);
775    } else {
776#ifdef AARCH64
777      sub(Rtemp, R3_bytecode, (Interpreter::distance_from_dispatch_table(vtos) -
778                           Interpreter::distance_from_dispatch_table(state)));
779      indirect_jump(Address::indexed_ptr(RdispatchTable, Rtemp), Rtemp);
780#else
781      // on 32-bit ARM this method is faster than the one above.
782      sub(Rtemp, RdispatchTable, (Interpreter::distance_from_dispatch_table(vtos) -
783                           Interpreter::distance_from_dispatch_table(state)) * wordSize);
784      indirect_jump(Address::indexed_ptr(Rtemp, R3_bytecode), Rtemp);
785#endif
786    }
787  } else {
788    assert(table_mode == DispatchNormal, "invalid dispatch table mode");
789    address table = (address) Interpreter::normal_table(state);
790    mov_slow(Rtemp, table);
791    indirect_jump(Address::indexed_ptr(Rtemp, R3_bytecode), Rtemp);
792  }
793
794  nop(); // to avoid filling CPU pipeline with invalid instructions
795  nop();
796}
797
798void InterpreterMacroAssembler::dispatch_only(TosState state) {
799  dispatch_base(state, DispatchDefault);
800}
801
802
803void InterpreterMacroAssembler::dispatch_only_normal(TosState state) {
804  dispatch_base(state, DispatchNormal);
805}
806
807void InterpreterMacroAssembler::dispatch_only_noverify(TosState state) {
808  dispatch_base(state, DispatchNormal, false);
809}
810
811void InterpreterMacroAssembler::dispatch_next(TosState state, int step) {
812  // load next bytecode and advance Rbcp
813  ldrb(R3_bytecode, Address(Rbcp, step, pre_indexed));
814  dispatch_base(state, DispatchDefault);
815}
816
817void InterpreterMacroAssembler::narrow(Register result) {
818  // mask integer result to narrower return type.
819  const Register Rtmp = R2;
820
821  // get method type
822  ldr(Rtmp, Address(Rmethod, Method::const_offset()));
823  ldrb(Rtmp, Address(Rtmp, ConstMethod::result_type_offset()));
824
825  Label notBool, notByte, notChar, done;
826  cmp(Rtmp, T_INT);
827  b(done, eq);
828
829  cmp(Rtmp, T_BOOLEAN);
830  b(notBool, ne);
831  and_32(result, result, 1);
832  b(done);
833
834  bind(notBool);
835  cmp(Rtmp, T_BYTE);
836  b(notByte, ne);
837  sign_extend(result, result, 8);
838  b(done);
839
840  bind(notByte);
841  cmp(Rtmp, T_CHAR);
842  b(notChar, ne);
843  zero_extend(result, result, 16);
844  b(done);
845
846  bind(notChar);
847  // cmp(Rtmp, T_SHORT);
848  // b(done, ne);
849  sign_extend(result, result, 16);
850
851  // Nothing to do
852  bind(done);
853}
854
855// remove activation
856//
857// Unlock the receiver if this is a synchronized method.
858// Unlock any Java monitors from syncronized blocks.
859// Remove the activation from the stack.
860//
861// If there are locked Java monitors
862//    If throw_monitor_exception
863//       throws IllegalMonitorStateException
864//    Else if install_monitor_exception
865//       installs IllegalMonitorStateException
866//    Else
867//       no error processing
868void InterpreterMacroAssembler::remove_activation(TosState state, Register ret_addr,
869                                                  bool throw_monitor_exception,
870                                                  bool install_monitor_exception,
871                                                  bool notify_jvmdi) {
872  Label unlock, unlocked, no_unlock;
873
874  // Note: Registers R0, R1, S0 and D0 (TOS cached value) may be in use for the result.
875
876  const Address do_not_unlock_if_synchronized(Rthread,
877                         JavaThread::do_not_unlock_if_synchronized_offset());
878
879  const Register Rflag = R2;
880  const Register Raccess_flags = R3;
881
882  restore_method();
883
884  ldrb(Rflag, do_not_unlock_if_synchronized);
885
886  // get method access flags
887  ldr_u32(Raccess_flags, Address(Rmethod, Method::access_flags_offset()));
888
889  strb(zero_register(Rtemp), do_not_unlock_if_synchronized); // reset the flag
890
891  // check if method is synchronized
892
893  tbz(Raccess_flags, JVM_ACC_SYNCHRONIZED_BIT, unlocked);
894
895  // Don't unlock anything if the _do_not_unlock_if_synchronized flag is set.
896  cbnz(Rflag, no_unlock);
897
898  // unlock monitor
899  push(state);                                   // save result
900
901  // BasicObjectLock will be first in list, since this is a synchronized method. However, need
902  // to check that the object has not been unlocked by an explicit monitorexit bytecode.
903
904  const Register Rmonitor = R1;                  // fixed in unlock_object()
905  const Register Robj = R2;
906
907  // address of first monitor
908  sub(Rmonitor, FP, - frame::interpreter_frame_monitor_block_bottom_offset * wordSize + (int)sizeof(BasicObjectLock));
909
910  ldr(Robj, Address(Rmonitor, BasicObjectLock::obj_offset_in_bytes()));
911  cbnz(Robj, unlock);
912
913  pop(state);
914
915  if (throw_monitor_exception) {
916    // Entry already unlocked, need to throw exception
917    call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::throw_illegal_monitor_state_exception));
918    should_not_reach_here();
919  } else {
920    // Monitor already unlocked during a stack unroll.
921    // If requested, install an illegal_monitor_state_exception.
922    // Continue with stack unrolling.
923    if (install_monitor_exception) {
924      call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::new_illegal_monitor_state_exception));
925    }
926    b(unlocked);
927  }
928
929
930  // Exception case for the check that all monitors are unlocked.
931  const Register Rcur = R2;
932  Label restart_check_monitors_unlocked, exception_monitor_is_still_locked;
933
934  bind(exception_monitor_is_still_locked);
935  // Monitor entry is still locked, need to throw exception.
936  // Rcur: monitor entry.
937
938  if (throw_monitor_exception) {
939    // Throw exception
940    call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::throw_illegal_monitor_state_exception));
941    should_not_reach_here();
942  } else {
943    // Stack unrolling. Unlock object and install illegal_monitor_exception
944    // Unlock does not block, so don't have to worry about the frame
945
946    push(state);
947    mov(R1, Rcur);
948    unlock_object(R1);
949
950    if (install_monitor_exception) {
951      call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::new_illegal_monitor_state_exception));
952    }
953
954    pop(state);
955    b(restart_check_monitors_unlocked);
956  }
957
958  bind(unlock);
959  unlock_object(Rmonitor);
960  pop(state);
961
962  // Check that for block-structured locking (i.e., that all locked objects has been unlocked)
963  bind(unlocked);
964
965  // Check that all monitors are unlocked
966  {
967    Label loop;
968
969    const int entry_size = frame::interpreter_frame_monitor_size() * wordSize;
970    const Register Rbottom = R3;
971    const Register Rcur_obj = Rtemp;
972
973    bind(restart_check_monitors_unlocked);
974
975    ldr(Rcur, Address(FP, frame::interpreter_frame_monitor_block_top_offset * wordSize));
976                                 // points to current entry, starting with top-most entry
977    sub(Rbottom, FP, -frame::interpreter_frame_monitor_block_bottom_offset * wordSize);
978                                 // points to word before bottom of monitor block
979
980    cmp(Rcur, Rbottom);          // check if there are no monitors
981#ifndef AARCH64
982    ldr(Rcur_obj, Address(Rcur, BasicObjectLock::obj_offset_in_bytes()), ne);
983                                 // prefetch monitor's object
984#endif // !AARCH64
985    b(no_unlock, eq);
986
987    bind(loop);
988#ifdef AARCH64
989    ldr(Rcur_obj, Address(Rcur, BasicObjectLock::obj_offset_in_bytes()));
990#endif // AARCH64
991    // check if current entry is used
992    cbnz(Rcur_obj, exception_monitor_is_still_locked);
993
994    add(Rcur, Rcur, entry_size);      // otherwise advance to next entry
995    cmp(Rcur, Rbottom);               // check if bottom reached
996#ifndef AARCH64
997    ldr(Rcur_obj, Address(Rcur, BasicObjectLock::obj_offset_in_bytes()), ne);
998                                      // prefetch monitor's object
999#endif // !AARCH64
1000    b(loop, ne);                      // if not at bottom then check this entry
1001  }
1002
1003  bind(no_unlock);
1004
1005  // jvmti support
1006  if (notify_jvmdi) {
1007    notify_method_exit(state, NotifyJVMTI);     // preserve TOSCA
1008  } else {
1009    notify_method_exit(state, SkipNotifyJVMTI); // preserve TOSCA
1010  }
1011
1012  // remove activation
1013#ifdef AARCH64
1014  ldr(Rtemp, Address(FP, frame::interpreter_frame_sender_sp_offset * wordSize));
1015  ldp(FP, LR, Address(FP));
1016  mov(SP, Rtemp);
1017#else
1018  mov(Rtemp, FP);
1019  ldmia(FP, RegisterSet(FP) | RegisterSet(LR));
1020  ldr(SP, Address(Rtemp, frame::interpreter_frame_sender_sp_offset * wordSize));
1021#endif
1022
1023  if (ret_addr != LR) {
1024    mov(ret_addr, LR);
1025  }
1026}
1027
1028
1029// At certain points in the method invocation the monitor of
1030// synchronized methods hasn't been entered yet.
1031// To correctly handle exceptions at these points, we set the thread local
1032// variable _do_not_unlock_if_synchronized to true. The remove_activation will
1033// check this flag.
1034void InterpreterMacroAssembler::set_do_not_unlock_if_synchronized(bool flag, Register tmp) {
1035  const Address do_not_unlock_if_synchronized(Rthread,
1036                         JavaThread::do_not_unlock_if_synchronized_offset());
1037  if (flag) {
1038    mov(tmp, 1);
1039    strb(tmp, do_not_unlock_if_synchronized);
1040  } else {
1041    strb(zero_register(tmp), do_not_unlock_if_synchronized);
1042  }
1043}
1044
1045// Lock object
1046//
1047// Argument: R1 : Points to BasicObjectLock to be used for locking.
1048// Must be initialized with object to lock.
1049// Blows volatile registers (R0-R3 on 32-bit ARM, R0-R18 on AArch64), Rtemp, LR. Calls VM.
1050void InterpreterMacroAssembler::lock_object(Register Rlock) {
1051  assert(Rlock == R1, "the second argument");
1052
1053  if (UseHeavyMonitors) {
1054    call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::monitorenter), Rlock);
1055  } else {
1056    Label done;
1057
1058    const Register Robj = R2;
1059    const Register Rmark = R3;
1060    assert_different_registers(Robj, Rmark, Rlock, R0, Rtemp);
1061
1062    const int obj_offset = BasicObjectLock::obj_offset_in_bytes();
1063    const int lock_offset = BasicObjectLock::lock_offset_in_bytes ();
1064    const int mark_offset = lock_offset + BasicLock::displaced_header_offset_in_bytes();
1065
1066    Label already_locked, slow_case;
1067
1068    // Load object pointer
1069    ldr(Robj, Address(Rlock, obj_offset));
1070
1071    if (UseBiasedLocking) {
1072      biased_locking_enter(Robj, Rmark/*scratched*/, R0, false, Rtemp, done, slow_case);
1073    }
1074
1075#ifdef AARCH64
1076    assert(oopDesc::mark_offset_in_bytes() == 0, "must be");
1077    ldr(Rmark, Robj);
1078
1079    // Test if object is already locked
1080    assert(markOopDesc::unlocked_value == 1, "adjust this code");
1081    tbz(Rmark, exact_log2(markOopDesc::unlocked_value), already_locked);
1082
1083#else // AARCH64
1084
1085    // On MP platforms the next load could return a 'stale' value if the memory location has been modified by another thread.
1086    // That would be acceptable as ether CAS or slow case path is taken in that case.
1087    // Exception to that is if the object is locked by the calling thread, then the recursive test will pass (guaranteed as
1088    // loads are satisfied from a store queue if performed on the same processor).
1089
1090    assert(oopDesc::mark_offset_in_bytes() == 0, "must be");
1091    ldr(Rmark, Address(Robj, oopDesc::mark_offset_in_bytes()));
1092
1093    // Test if object is already locked
1094    tst(Rmark, markOopDesc::unlocked_value);
1095    b(already_locked, eq);
1096
1097#endif // !AARCH64
1098    // Save old object->mark() into BasicLock's displaced header
1099    str(Rmark, Address(Rlock, mark_offset));
1100
1101    cas_for_lock_acquire(Rmark, Rlock, Robj, Rtemp, slow_case);
1102
1103#ifndef PRODUCT
1104    if (PrintBiasedLockingStatistics) {
1105      cond_atomic_inc32(al, BiasedLocking::fast_path_entry_count_addr());
1106    }
1107#endif //!PRODUCT
1108
1109    b(done);
1110
1111    // If we got here that means the object is locked by ether calling thread or another thread.
1112    bind(already_locked);
1113    // Handling of locked objects: recursive locks and slow case.
1114
1115    // Fast check for recursive lock.
1116    //
1117    // Can apply the optimization only if this is a stack lock
1118    // allocated in this thread. For efficiency, we can focus on
1119    // recently allocated stack locks (instead of reading the stack
1120    // base and checking whether 'mark' points inside the current
1121    // thread stack):
1122    //  1) (mark & 3) == 0
1123    //  2) SP <= mark < SP + os::pagesize()
1124    //
1125    // Warning: SP + os::pagesize can overflow the stack base. We must
1126    // neither apply the optimization for an inflated lock allocated
1127    // just above the thread stack (this is why condition 1 matters)
1128    // nor apply the optimization if the stack lock is inside the stack
1129    // of another thread. The latter is avoided even in case of overflow
1130    // because we have guard pages at the end of all stacks. Hence, if
1131    // we go over the stack base and hit the stack of another thread,
1132    // this should not be in a writeable area that could contain a
1133    // stack lock allocated by that thread. As a consequence, a stack
1134    // lock less than page size away from SP is guaranteed to be
1135    // owned by the current thread.
1136    //
1137    // Note: assuming SP is aligned, we can check the low bits of
1138    // (mark-SP) instead of the low bits of mark. In that case,
1139    // assuming page size is a power of 2, we can merge the two
1140    // conditions into a single test:
1141    // => ((mark - SP) & (3 - os::pagesize())) == 0
1142
1143#ifdef AARCH64
1144    // Use the single check since the immediate is OK for AARCH64
1145    sub(R0, Rmark, Rstack_top);
1146    intptr_t mask = ((intptr_t)3) - ((intptr_t)os::vm_page_size());
1147    Assembler::LogicalImmediate imm(mask, false);
1148    ands(R0, R0, imm);
1149
1150    // For recursive case store 0 into lock record.
1151    // It is harmless to store it unconditionally as lock record contains some garbage
1152    // value in its _displaced_header field by this moment.
1153    str(ZR, Address(Rlock, mark_offset));
1154
1155#else // AARCH64
1156    // (3 - os::pagesize()) cannot be encoded as an ARM immediate operand.
1157    // Check independently the low bits and the distance to SP.
1158    // -1- test low 2 bits
1159    movs(R0, AsmOperand(Rmark, lsl, 30));
1160    // -2- test (mark - SP) if the low two bits are 0
1161    sub(R0, Rmark, SP, eq);
1162    movs(R0, AsmOperand(R0, lsr, exact_log2(os::vm_page_size())), eq);
1163    // If still 'eq' then recursive locking OK: store 0 into lock record
1164    str(R0, Address(Rlock, mark_offset), eq);
1165
1166#endif // AARCH64
1167
1168#ifndef PRODUCT
1169    if (PrintBiasedLockingStatistics) {
1170      cond_atomic_inc32(eq, BiasedLocking::fast_path_entry_count_addr());
1171    }
1172#endif // !PRODUCT
1173
1174    b(done, eq);
1175
1176    bind(slow_case);
1177
1178    // Call the runtime routine for slow case
1179    call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::monitorenter), Rlock);
1180
1181    bind(done);
1182  }
1183}
1184
1185
1186// Unlocks an object. Used in monitorexit bytecode and remove_activation.
1187//
1188// Argument: R1: Points to BasicObjectLock structure for lock
1189// Throw an IllegalMonitorException if object is not locked by current thread
1190// Blows volatile registers (R0-R3 on 32-bit ARM, R0-R18 on AArch64), Rtemp, LR. Calls VM.
1191void InterpreterMacroAssembler::unlock_object(Register Rlock) {
1192  assert(Rlock == R1, "the second argument");
1193
1194  if (UseHeavyMonitors) {
1195    call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::monitorexit), Rlock);
1196  } else {
1197    Label done, slow_case;
1198
1199    const Register Robj = R2;
1200    const Register Rmark = R3;
1201    const Register Rresult = R0;
1202    assert_different_registers(Robj, Rmark, Rlock, R0, Rtemp);
1203
1204    const int obj_offset = BasicObjectLock::obj_offset_in_bytes();
1205    const int lock_offset = BasicObjectLock::lock_offset_in_bytes ();
1206    const int mark_offset = lock_offset + BasicLock::displaced_header_offset_in_bytes();
1207
1208    const Register Rzero = zero_register(Rtemp);
1209
1210    // Load oop into Robj
1211    ldr(Robj, Address(Rlock, obj_offset));
1212
1213    // Free entry
1214    str(Rzero, Address(Rlock, obj_offset));
1215
1216    if (UseBiasedLocking) {
1217      biased_locking_exit(Robj, Rmark, done);
1218    }
1219
1220    // Load the old header from BasicLock structure
1221    ldr(Rmark, Address(Rlock, mark_offset));
1222
1223    // Test for recursion (zero mark in BasicLock)
1224    cbz(Rmark, done);
1225
1226    bool allow_fallthrough_on_failure = true;
1227
1228    cas_for_lock_release(Rlock, Rmark, Robj, Rtemp, slow_case, allow_fallthrough_on_failure);
1229
1230    b(done, eq);
1231
1232    bind(slow_case);
1233
1234    // Call the runtime routine for slow case.
1235    str(Robj, Address(Rlock, obj_offset)); // restore obj
1236    call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::monitorexit), Rlock);
1237
1238    bind(done);
1239  }
1240}
1241
1242
1243// Test ImethodDataPtr.  If it is null, continue at the specified label
1244void InterpreterMacroAssembler::test_method_data_pointer(Register mdp, Label& zero_continue) {
1245  assert(ProfileInterpreter, "must be profiling interpreter");
1246  ldr(mdp, Address(FP, frame::interpreter_frame_mdp_offset * wordSize));
1247  cbz(mdp, zero_continue);
1248}
1249
1250
1251// Set the method data pointer for the current bcp.
1252// Blows volatile registers (R0-R3 on 32-bit ARM, R0-R18 on AArch64), Rtemp, LR.
1253void InterpreterMacroAssembler::set_method_data_pointer_for_bcp() {
1254  assert(ProfileInterpreter, "must be profiling interpreter");
1255  Label set_mdp;
1256
1257  // Test MDO to avoid the call if it is NULL.
1258  ldr(Rtemp, Address(Rmethod, Method::method_data_offset()));
1259  cbz(Rtemp, set_mdp);
1260
1261  mov(R0, Rmethod);
1262  mov(R1, Rbcp);
1263  call_VM_leaf(CAST_FROM_FN_PTR(address, InterpreterRuntime::bcp_to_di), R0, R1);
1264  // R0/W0: mdi
1265
1266  // mdo is guaranteed to be non-zero here, we checked for it before the call.
1267  ldr(Rtemp, Address(Rmethod, Method::method_data_offset()));
1268  add(Rtemp, Rtemp, in_bytes(MethodData::data_offset()));
1269  add_ptr_scaled_int32(Rtemp, Rtemp, R0, 0);
1270
1271  bind(set_mdp);
1272  str(Rtemp, Address(FP, frame::interpreter_frame_mdp_offset * wordSize));
1273}
1274
1275
1276void InterpreterMacroAssembler::verify_method_data_pointer() {
1277  assert(ProfileInterpreter, "must be profiling interpreter");
1278#ifdef ASSERT
1279  Label verify_continue;
1280  save_caller_save_registers();
1281
1282  const Register Rmdp = R2;
1283  test_method_data_pointer(Rmdp, verify_continue); // If mdp is zero, continue
1284
1285  // If the mdp is valid, it will point to a DataLayout header which is
1286  // consistent with the bcp.  The converse is highly probable also.
1287
1288  ldrh(R3, Address(Rmdp, DataLayout::bci_offset()));
1289  ldr(Rtemp, Address(Rmethod, Method::const_offset()));
1290  add(R3, R3, Rtemp);
1291  add(R3, R3, in_bytes(ConstMethod::codes_offset()));
1292  cmp(R3, Rbcp);
1293  b(verify_continue, eq);
1294
1295  mov(R0, Rmethod);
1296  mov(R1, Rbcp);
1297  call_VM_leaf(CAST_FROM_FN_PTR(address, InterpreterRuntime::verify_mdp), R0, R1, Rmdp);
1298
1299  bind(verify_continue);
1300  restore_caller_save_registers();
1301#endif // ASSERT
1302}
1303
1304
1305void InterpreterMacroAssembler::set_mdp_data_at(Register mdp_in, int offset, Register value) {
1306  assert(ProfileInterpreter, "must be profiling interpreter");
1307  assert_different_registers(mdp_in, value);
1308  str(value, Address(mdp_in, offset));
1309}
1310
1311
1312// Increments mdp data. Sets bumped_count register to adjusted counter.
1313void InterpreterMacroAssembler::increment_mdp_data_at(Register mdp_in,
1314                                                      int offset,
1315                                                      Register bumped_count,
1316                                                      bool decrement) {
1317  assert(ProfileInterpreter, "must be profiling interpreter");
1318
1319  // Counter address
1320  Address data(mdp_in, offset);
1321  assert_different_registers(mdp_in, bumped_count);
1322
1323  increment_mdp_data_at(data, bumped_count, decrement);
1324}
1325
1326void InterpreterMacroAssembler::set_mdp_flag_at(Register mdp_in, int flag_byte_constant) {
1327  assert_different_registers(mdp_in, Rtemp);
1328  assert(ProfileInterpreter, "must be profiling interpreter");
1329  assert((0 < flag_byte_constant) && (flag_byte_constant < (1 << BitsPerByte)), "flag mask is out of range");
1330
1331  // Set the flag
1332  ldrb(Rtemp, Address(mdp_in, in_bytes(DataLayout::flags_offset())));
1333  orr(Rtemp, Rtemp, (unsigned)flag_byte_constant);
1334  strb(Rtemp, Address(mdp_in, in_bytes(DataLayout::flags_offset())));
1335}
1336
1337
1338// Increments mdp data. Sets bumped_count register to adjusted counter.
1339void InterpreterMacroAssembler::increment_mdp_data_at(Address data,
1340                                                      Register bumped_count,
1341                                                      bool decrement) {
1342  assert(ProfileInterpreter, "must be profiling interpreter");
1343
1344  ldr(bumped_count, data);
1345  if (decrement) {
1346    // Decrement the register. Set condition codes.
1347    subs(bumped_count, bumped_count, DataLayout::counter_increment);
1348    // Avoid overflow.
1349#ifdef AARCH64
1350    assert(DataLayout::counter_increment == 1, "required for cinc");
1351    cinc(bumped_count, bumped_count, pl);
1352#else
1353    add(bumped_count, bumped_count, DataLayout::counter_increment, pl);
1354#endif // AARCH64
1355  } else {
1356    // Increment the register. Set condition codes.
1357    adds(bumped_count, bumped_count, DataLayout::counter_increment);
1358    // Avoid overflow.
1359#ifdef AARCH64
1360    assert(DataLayout::counter_increment == 1, "required for cinv");
1361    cinv(bumped_count, bumped_count, mi); // inverts 0x80..00 back to 0x7f..ff
1362#else
1363    sub(bumped_count, bumped_count, DataLayout::counter_increment, mi);
1364#endif // AARCH64
1365  }
1366  str(bumped_count, data);
1367}
1368
1369
1370void InterpreterMacroAssembler::test_mdp_data_at(Register mdp_in,
1371                                                 int offset,
1372                                                 Register value,
1373                                                 Register test_value_out,
1374                                                 Label& not_equal_continue) {
1375  assert(ProfileInterpreter, "must be profiling interpreter");
1376  assert_different_registers(mdp_in, test_value_out, value);
1377
1378  ldr(test_value_out, Address(mdp_in, offset));
1379  cmp(test_value_out, value);
1380
1381  b(not_equal_continue, ne);
1382}
1383
1384
1385void InterpreterMacroAssembler::update_mdp_by_offset(Register mdp_in, int offset_of_disp, Register reg_temp) {
1386  assert(ProfileInterpreter, "must be profiling interpreter");
1387  assert_different_registers(mdp_in, reg_temp);
1388
1389  ldr(reg_temp, Address(mdp_in, offset_of_disp));
1390  add(mdp_in, mdp_in, reg_temp);
1391  str(mdp_in, Address(FP, frame::interpreter_frame_mdp_offset * wordSize));
1392}
1393
1394
1395void InterpreterMacroAssembler::update_mdp_by_offset(Register mdp_in, Register reg_offset, Register reg_tmp) {
1396  assert(ProfileInterpreter, "must be profiling interpreter");
1397  assert_different_registers(mdp_in, reg_offset, reg_tmp);
1398
1399  ldr(reg_tmp, Address(mdp_in, reg_offset));
1400  add(mdp_in, mdp_in, reg_tmp);
1401  str(mdp_in, Address(FP, frame::interpreter_frame_mdp_offset * wordSize));
1402}
1403
1404
1405void InterpreterMacroAssembler::update_mdp_by_constant(Register mdp_in, int constant) {
1406  assert(ProfileInterpreter, "must be profiling interpreter");
1407  add(mdp_in, mdp_in, constant);
1408  str(mdp_in, Address(FP, frame::interpreter_frame_mdp_offset * wordSize));
1409}
1410
1411
1412// Blows volatile registers (R0-R3 on 32-bit ARM, R0-R18 on AArch64, Rtemp, LR).
1413void InterpreterMacroAssembler::update_mdp_for_ret(Register return_bci) {
1414  assert(ProfileInterpreter, "must be profiling interpreter");
1415  assert_different_registers(return_bci, R0, R1, R2, R3, Rtemp);
1416
1417  mov(R1, return_bci);
1418  call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::update_mdp_for_ret), R1);
1419}
1420
1421
1422// Sets mdp, bumped_count registers, blows Rtemp.
1423void InterpreterMacroAssembler::profile_taken_branch(Register mdp, Register bumped_count) {
1424  assert_different_registers(mdp, bumped_count);
1425
1426  if (ProfileInterpreter) {
1427    Label profile_continue;
1428
1429    // If no method data exists, go to profile_continue.
1430    // Otherwise, assign to mdp
1431    test_method_data_pointer(mdp, profile_continue);
1432
1433    // We are taking a branch. Increment the taken count.
1434    increment_mdp_data_at(mdp, in_bytes(JumpData::taken_offset()), bumped_count);
1435
1436    // The method data pointer needs to be updated to reflect the new target.
1437    update_mdp_by_offset(mdp, in_bytes(JumpData::displacement_offset()), Rtemp);
1438
1439    bind (profile_continue);
1440  }
1441}
1442
1443
1444// Sets mdp, blows Rtemp.
1445void InterpreterMacroAssembler::profile_not_taken_branch(Register mdp) {
1446  assert_different_registers(mdp, Rtemp);
1447
1448  if (ProfileInterpreter) {
1449    Label profile_continue;
1450
1451    // If no method data exists, go to profile_continue.
1452    test_method_data_pointer(mdp, profile_continue);
1453
1454    // We are taking a branch.  Increment the not taken count.
1455    increment_mdp_data_at(mdp, in_bytes(BranchData::not_taken_offset()), Rtemp);
1456
1457    // The method data pointer needs to be updated to correspond to the next bytecode
1458    update_mdp_by_constant(mdp, in_bytes(BranchData::branch_data_size()));
1459
1460    bind (profile_continue);
1461  }
1462}
1463
1464
1465// Sets mdp, blows Rtemp.
1466void InterpreterMacroAssembler::profile_call(Register mdp) {
1467  assert_different_registers(mdp, Rtemp);
1468
1469  if (ProfileInterpreter) {
1470    Label profile_continue;
1471
1472    // If no method data exists, go to profile_continue.
1473    test_method_data_pointer(mdp, profile_continue);
1474
1475    // We are making a call.  Increment the count.
1476    increment_mdp_data_at(mdp, in_bytes(CounterData::count_offset()), Rtemp);
1477
1478    // The method data pointer needs to be updated to reflect the new target.
1479    update_mdp_by_constant(mdp, in_bytes(CounterData::counter_data_size()));
1480
1481    bind (profile_continue);
1482  }
1483}
1484
1485
1486// Sets mdp, blows Rtemp.
1487void InterpreterMacroAssembler::profile_final_call(Register mdp) {
1488  if (ProfileInterpreter) {
1489    Label profile_continue;
1490
1491    // If no method data exists, go to profile_continue.
1492    test_method_data_pointer(mdp, profile_continue);
1493
1494    // We are making a call.  Increment the count.
1495    increment_mdp_data_at(mdp, in_bytes(CounterData::count_offset()), Rtemp);
1496
1497    // The method data pointer needs to be updated to reflect the new target.
1498    update_mdp_by_constant(mdp, in_bytes(VirtualCallData::virtual_call_data_size()));
1499
1500    bind (profile_continue);
1501  }
1502}
1503
1504
1505// Sets mdp, blows Rtemp.
1506void InterpreterMacroAssembler::profile_virtual_call(Register mdp, Register receiver, bool receiver_can_be_null) {
1507  assert_different_registers(mdp, receiver, Rtemp);
1508
1509  if (ProfileInterpreter) {
1510    Label profile_continue;
1511
1512    // If no method data exists, go to profile_continue.
1513    test_method_data_pointer(mdp, profile_continue);
1514
1515    Label skip_receiver_profile;
1516    if (receiver_can_be_null) {
1517      Label not_null;
1518      cbnz(receiver, not_null);
1519      // We are making a call.  Increment the count for null receiver.
1520      increment_mdp_data_at(mdp, in_bytes(CounterData::count_offset()), Rtemp);
1521      b(skip_receiver_profile);
1522      bind(not_null);
1523    }
1524
1525    // Record the receiver type.
1526    record_klass_in_profile(receiver, mdp, Rtemp, true);
1527    bind(skip_receiver_profile);
1528
1529    // The method data pointer needs to be updated to reflect the new target.
1530    update_mdp_by_constant(mdp, in_bytes(VirtualCallData::virtual_call_data_size()));
1531    bind(profile_continue);
1532  }
1533}
1534
1535
1536void InterpreterMacroAssembler::record_klass_in_profile_helper(
1537                                        Register receiver, Register mdp,
1538                                        Register reg_tmp,
1539                                        int start_row, Label& done, bool is_virtual_call) {
1540  if (TypeProfileWidth == 0)
1541    return;
1542
1543  assert_different_registers(receiver, mdp, reg_tmp);
1544
1545  int last_row = VirtualCallData::row_limit() - 1;
1546  assert(start_row <= last_row, "must be work left to do");
1547  // Test this row for both the receiver and for null.
1548  // Take any of three different outcomes:
1549  //   1. found receiver => increment count and goto done
1550  //   2. found null => keep looking for case 1, maybe allocate this cell
1551  //   3. found something else => keep looking for cases 1 and 2
1552  // Case 3 is handled by a recursive call.
1553  for (int row = start_row; row <= last_row; row++) {
1554    Label next_test;
1555
1556    // See if the receiver is receiver[n].
1557    int recvr_offset = in_bytes(VirtualCallData::receiver_offset(row));
1558
1559    test_mdp_data_at(mdp, recvr_offset, receiver, reg_tmp, next_test);
1560
1561    // The receiver is receiver[n].  Increment count[n].
1562    int count_offset = in_bytes(VirtualCallData::receiver_count_offset(row));
1563    increment_mdp_data_at(mdp, count_offset, reg_tmp);
1564    b(done);
1565
1566    bind(next_test);
1567    // reg_tmp now contains the receiver from the CallData.
1568
1569    if (row == start_row) {
1570      Label found_null;
1571      // Failed the equality check on receiver[n]...  Test for null.
1572      if (start_row == last_row) {
1573        // The only thing left to do is handle the null case.
1574        if (is_virtual_call) {
1575          cbz(reg_tmp, found_null);
1576          // Receiver did not match any saved receiver and there is no empty row for it.
1577          // Increment total counter to indicate polymorphic case.
1578          increment_mdp_data_at(mdp, in_bytes(CounterData::count_offset()), reg_tmp);
1579          b(done);
1580          bind(found_null);
1581        } else {
1582          cbnz(reg_tmp, done);
1583        }
1584        break;
1585      }
1586      // Since null is rare, make it be the branch-taken case.
1587      cbz(reg_tmp, found_null);
1588
1589      // Put all the "Case 3" tests here.
1590      record_klass_in_profile_helper(receiver, mdp, reg_tmp, start_row + 1, done, is_virtual_call);
1591
1592      // Found a null.  Keep searching for a matching receiver,
1593      // but remember that this is an empty (unused) slot.
1594      bind(found_null);
1595    }
1596  }
1597
1598  // In the fall-through case, we found no matching receiver, but we
1599  // observed the receiver[start_row] is NULL.
1600
1601  // Fill in the receiver field and increment the count.
1602  int recvr_offset = in_bytes(VirtualCallData::receiver_offset(start_row));
1603  set_mdp_data_at(mdp, recvr_offset, receiver);
1604  int count_offset = in_bytes(VirtualCallData::receiver_count_offset(start_row));
1605  mov(reg_tmp, DataLayout::counter_increment);
1606  set_mdp_data_at(mdp, count_offset, reg_tmp);
1607  if (start_row > 0) {
1608    b(done);
1609  }
1610}
1611
1612void InterpreterMacroAssembler::record_klass_in_profile(Register receiver,
1613                                                        Register mdp,
1614                                                        Register reg_tmp,
1615                                                        bool is_virtual_call) {
1616  assert(ProfileInterpreter, "must be profiling");
1617  assert_different_registers(receiver, mdp, reg_tmp);
1618
1619  Label done;
1620
1621  record_klass_in_profile_helper(receiver, mdp, reg_tmp, 0, done, is_virtual_call);
1622
1623  bind (done);
1624}
1625
1626// Sets mdp, blows volatile registers (R0-R3 on 32-bit ARM, R0-R18 on AArch64, Rtemp, LR).
1627void InterpreterMacroAssembler::profile_ret(Register mdp, Register return_bci) {
1628  assert_different_registers(mdp, return_bci, Rtemp, R0, R1, R2, R3);
1629
1630  if (ProfileInterpreter) {
1631    Label profile_continue;
1632    uint row;
1633
1634    // If no method data exists, go to profile_continue.
1635    test_method_data_pointer(mdp, profile_continue);
1636
1637    // Update the total ret count.
1638    increment_mdp_data_at(mdp, in_bytes(CounterData::count_offset()), Rtemp);
1639
1640    for (row = 0; row < RetData::row_limit(); row++) {
1641      Label next_test;
1642
1643      // See if return_bci is equal to bci[n]:
1644      test_mdp_data_at(mdp, in_bytes(RetData::bci_offset(row)), return_bci,
1645                       Rtemp, next_test);
1646
1647      // return_bci is equal to bci[n].  Increment the count.
1648      increment_mdp_data_at(mdp, in_bytes(RetData::bci_count_offset(row)), Rtemp);
1649
1650      // The method data pointer needs to be updated to reflect the new target.
1651      update_mdp_by_offset(mdp, in_bytes(RetData::bci_displacement_offset(row)), Rtemp);
1652      b(profile_continue);
1653      bind(next_test);
1654    }
1655
1656    update_mdp_for_ret(return_bci);
1657
1658    bind(profile_continue);
1659  }
1660}
1661
1662
1663// Sets mdp.
1664void InterpreterMacroAssembler::profile_null_seen(Register mdp) {
1665  if (ProfileInterpreter) {
1666    Label profile_continue;
1667
1668    // If no method data exists, go to profile_continue.
1669    test_method_data_pointer(mdp, profile_continue);
1670
1671    set_mdp_flag_at(mdp, BitData::null_seen_byte_constant());
1672
1673    // The method data pointer needs to be updated.
1674    int mdp_delta = in_bytes(BitData::bit_data_size());
1675    if (TypeProfileCasts) {
1676      mdp_delta = in_bytes(VirtualCallData::virtual_call_data_size());
1677    }
1678    update_mdp_by_constant(mdp, mdp_delta);
1679
1680    bind (profile_continue);
1681  }
1682}
1683
1684
1685// Sets mdp, blows Rtemp.
1686void InterpreterMacroAssembler::profile_typecheck_failed(Register mdp) {
1687  assert_different_registers(mdp, Rtemp);
1688
1689  if (ProfileInterpreter && TypeProfileCasts) {
1690    Label profile_continue;
1691
1692    // If no method data exists, go to profile_continue.
1693    test_method_data_pointer(mdp, profile_continue);
1694
1695    int count_offset = in_bytes(CounterData::count_offset());
1696    // Back up the address, since we have already bumped the mdp.
1697    count_offset -= in_bytes(VirtualCallData::virtual_call_data_size());
1698
1699    // *Decrement* the counter.  We expect to see zero or small negatives.
1700    increment_mdp_data_at(mdp, count_offset, Rtemp, true);
1701
1702    bind (profile_continue);
1703  }
1704}
1705
1706
1707// Sets mdp, blows Rtemp.
1708void InterpreterMacroAssembler::profile_typecheck(Register mdp, Register klass)
1709{
1710  assert_different_registers(mdp, klass, Rtemp);
1711
1712  if (ProfileInterpreter) {
1713    Label profile_continue;
1714
1715    // If no method data exists, go to profile_continue.
1716    test_method_data_pointer(mdp, profile_continue);
1717
1718    // The method data pointer needs to be updated.
1719    int mdp_delta = in_bytes(BitData::bit_data_size());
1720    if (TypeProfileCasts) {
1721      mdp_delta = in_bytes(VirtualCallData::virtual_call_data_size());
1722
1723      // Record the object type.
1724      record_klass_in_profile(klass, mdp, Rtemp, false);
1725    }
1726    update_mdp_by_constant(mdp, mdp_delta);
1727
1728    bind(profile_continue);
1729  }
1730}
1731
1732
1733// Sets mdp, blows Rtemp.
1734void InterpreterMacroAssembler::profile_switch_default(Register mdp) {
1735  assert_different_registers(mdp, Rtemp);
1736
1737  if (ProfileInterpreter) {
1738    Label profile_continue;
1739
1740    // If no method data exists, go to profile_continue.
1741    test_method_data_pointer(mdp, profile_continue);
1742
1743    // Update the default case count
1744    increment_mdp_data_at(mdp, in_bytes(MultiBranchData::default_count_offset()), Rtemp);
1745
1746    // The method data pointer needs to be updated.
1747    update_mdp_by_offset(mdp, in_bytes(MultiBranchData::default_displacement_offset()), Rtemp);
1748
1749    bind(profile_continue);
1750  }
1751}
1752
1753
1754// Sets mdp. Blows reg_tmp1, reg_tmp2. Index could be the same as reg_tmp2.
1755void InterpreterMacroAssembler::profile_switch_case(Register mdp, Register index, Register reg_tmp1, Register reg_tmp2) {
1756  assert_different_registers(mdp, reg_tmp1, reg_tmp2);
1757  assert_different_registers(mdp, reg_tmp1, index);
1758
1759  if (ProfileInterpreter) {
1760    Label profile_continue;
1761
1762    const int count_offset = in_bytes(MultiBranchData::case_array_offset()) +
1763                              in_bytes(MultiBranchData::relative_count_offset());
1764
1765    const int displacement_offset = in_bytes(MultiBranchData::case_array_offset()) +
1766                              in_bytes(MultiBranchData::relative_displacement_offset());
1767
1768    // If no method data exists, go to profile_continue.
1769    test_method_data_pointer(mdp, profile_continue);
1770
1771    // Build the base (index * per_case_size_in_bytes())
1772    logical_shift_left(reg_tmp1, index, exact_log2(in_bytes(MultiBranchData::per_case_size())));
1773
1774    // Update the case count
1775    add(reg_tmp1, reg_tmp1, count_offset);
1776    increment_mdp_data_at(Address(mdp, reg_tmp1), reg_tmp2);
1777
1778    // The method data pointer needs to be updated.
1779    add(reg_tmp1, reg_tmp1, displacement_offset - count_offset);
1780    update_mdp_by_offset(mdp, reg_tmp1, reg_tmp2);
1781
1782    bind (profile_continue);
1783  }
1784}
1785
1786
1787void InterpreterMacroAssembler::byteswap_u32(Register r, Register rtmp1, Register rtmp2) {
1788#ifdef AARCH64
1789  rev_w(r, r);
1790#else
1791  if (VM_Version::supports_rev()) {
1792    rev(r, r);
1793  } else {
1794    eor(rtmp1, r, AsmOperand(r, ror, 16));
1795    mvn(rtmp2, 0x0000ff00);
1796    andr(rtmp1, rtmp2, AsmOperand(rtmp1, lsr, 8));
1797    eor(r, rtmp1, AsmOperand(r, ror, 8));
1798  }
1799#endif // AARCH64
1800}
1801
1802
1803void InterpreterMacroAssembler::inc_global_counter(address address_of_counter, int offset, Register tmp1, Register tmp2, bool avoid_overflow) {
1804  const intx addr = (intx) (address_of_counter + offset);
1805
1806  assert ((addr & 0x3) == 0, "address of counter should be aligned");
1807  const intx offset_mask = right_n_bits(AARCH64_ONLY(12 + 2) NOT_AARCH64(12));
1808
1809  const address base = (address) (addr & ~offset_mask);
1810  const int offs = (int) (addr & offset_mask);
1811
1812  const Register addr_base = tmp1;
1813  const Register val = tmp2;
1814
1815  mov_slow(addr_base, base);
1816  ldr_s32(val, Address(addr_base, offs));
1817
1818  if (avoid_overflow) {
1819    adds_32(val, val, 1);
1820#ifdef AARCH64
1821    Label L;
1822    b(L, mi);
1823    str_32(val, Address(addr_base, offs));
1824    bind(L);
1825#else
1826    str(val, Address(addr_base, offs), pl);
1827#endif // AARCH64
1828  } else {
1829    add_32(val, val, 1);
1830    str_32(val, Address(addr_base, offs));
1831  }
1832}
1833
1834void InterpreterMacroAssembler::interp_verify_oop(Register reg, TosState state, const char *file, int line) {
1835  if (state == atos) { MacroAssembler::_verify_oop(reg, "broken oop", file, line); }
1836}
1837
1838// Inline assembly for:
1839//
1840// if (thread is in interp_only_mode) {
1841//   InterpreterRuntime::post_method_entry();
1842// }
1843// if (DTraceMethodProbes) {
1844//   SharedRuntime::dtrace_method_entry(method, receiver);
1845// }
1846// if (RC_TRACE_IN_RANGE(0x00001000, 0x00002000)) {
1847//   SharedRuntime::rc_trace_method_entry(method, receiver);
1848// }
1849
1850void InterpreterMacroAssembler::notify_method_entry() {
1851  // Whenever JVMTI is interp_only_mode, method entry/exit events are sent to
1852  // track stack depth.  If it is possible to enter interp_only_mode we add
1853  // the code to check if the event should be sent.
1854  if (can_post_interpreter_events()) {
1855    Label L;
1856
1857    ldr_s32(Rtemp, Address(Rthread, JavaThread::interp_only_mode_offset()));
1858    cbz(Rtemp, L);
1859
1860    call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::post_method_entry));
1861
1862    bind(L);
1863  }
1864
1865  // Note: Disable DTrace runtime check for now to eliminate overhead on each method entry
1866  if (DTraceMethodProbes) {
1867    Label Lcontinue;
1868
1869    ldrb_global(Rtemp, (address)&DTraceMethodProbes);
1870    cbz(Rtemp, Lcontinue);
1871
1872    mov(R0, Rthread);
1873    mov(R1, Rmethod);
1874    call_VM_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::dtrace_method_entry), R0, R1);
1875
1876    bind(Lcontinue);
1877  }
1878  // RedefineClasses() tracing support for obsolete method entry
1879  if (log_is_enabled(Trace, redefine, class, obsolete)) {
1880    mov(R0, Rthread);
1881    mov(R1, Rmethod);
1882    call_VM_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::rc_trace_method_entry),
1883                 R0, R1);
1884  }
1885}
1886
1887
1888void InterpreterMacroAssembler::notify_method_exit(
1889                 TosState state, NotifyMethodExitMode mode,
1890                 bool native, Register result_lo, Register result_hi, FloatRegister result_fp) {
1891  // Whenever JVMTI is interp_only_mode, method entry/exit events are sent to
1892  // track stack depth.  If it is possible to enter interp_only_mode we add
1893  // the code to check if the event should be sent.
1894  if (mode == NotifyJVMTI && can_post_interpreter_events()) {
1895    Label L;
1896    // Note: frame::interpreter_frame_result has a dependency on how the
1897    // method result is saved across the call to post_method_exit. If this
1898    // is changed then the interpreter_frame_result implementation will
1899    // need to be updated too.
1900
1901    ldr_s32(Rtemp, Address(Rthread, JavaThread::interp_only_mode_offset()));
1902    cbz(Rtemp, L);
1903
1904    if (native) {
1905      // For c++ and template interpreter push both result registers on the
1906      // stack in native, we don't know the state.
1907      // On AArch64 result registers are stored into the frame at known locations.
1908      // See frame::interpreter_frame_result for code that gets the result values from here.
1909      assert(result_lo != noreg, "result registers should be defined");
1910
1911#ifdef AARCH64
1912      assert(result_hi == noreg, "result_hi is not used on AArch64");
1913      assert(result_fp != fnoreg, "FP result register must be defined");
1914
1915      str_d(result_fp, Address(FP, frame::interpreter_frame_fp_saved_result_offset * wordSize));
1916      str(result_lo, Address(FP, frame::interpreter_frame_gp_saved_result_offset * wordSize));
1917#else
1918      assert(result_hi != noreg, "result registers should be defined");
1919
1920#ifdef __ABI_HARD__
1921      assert(result_fp != fnoreg, "FP result register must be defined");
1922      sub(SP, SP, 2 * wordSize);
1923      fstd(result_fp, Address(SP));
1924#endif // __ABI_HARD__
1925
1926      push(RegisterSet(result_lo) | RegisterSet(result_hi));
1927#endif // AARCH64
1928
1929      call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::post_method_exit));
1930
1931#ifdef AARCH64
1932      ldr_d(result_fp, Address(FP, frame::interpreter_frame_fp_saved_result_offset * wordSize));
1933      ldr(result_lo, Address(FP, frame::interpreter_frame_gp_saved_result_offset * wordSize));
1934#else
1935      pop(RegisterSet(result_lo) | RegisterSet(result_hi));
1936#ifdef __ABI_HARD__
1937      fldd(result_fp, Address(SP));
1938      add(SP, SP, 2 * wordSize);
1939#endif // __ABI_HARD__
1940#endif // AARCH64
1941
1942    } else {
1943      // For the template interpreter, the value on tos is the size of the
1944      // state. (c++ interpreter calls jvmti somewhere else).
1945      push(state);
1946      call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::post_method_exit));
1947      pop(state);
1948    }
1949
1950    bind(L);
1951  }
1952
1953  // Note: Disable DTrace runtime check for now to eliminate overhead on each method exit
1954  if (DTraceMethodProbes) {
1955    Label Lcontinue;
1956
1957    ldrb_global(Rtemp, (address)&DTraceMethodProbes);
1958    cbz(Rtemp, Lcontinue);
1959
1960    push(state);
1961
1962    mov(R0, Rthread);
1963    mov(R1, Rmethod);
1964
1965    call_VM_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::dtrace_method_exit), R0, R1);
1966
1967    pop(state);
1968
1969    bind(Lcontinue);
1970  }
1971}
1972
1973
1974#ifndef PRODUCT
1975
1976void InterpreterMacroAssembler::trace_state(const char* msg) {
1977  int push_size = save_caller_save_registers();
1978
1979  Label Lcontinue;
1980  InlinedString Lmsg0("%s: FP=" INTPTR_FORMAT ", SP=" INTPTR_FORMAT "\n");
1981  InlinedString Lmsg(msg);
1982  InlinedAddress Lprintf((address)printf);
1983
1984  ldr_literal(R0, Lmsg0);
1985  ldr_literal(R1, Lmsg);
1986  mov(R2, FP);
1987  add(R3, SP, push_size);  // original SP (without saved registers)
1988  ldr_literal(Rtemp, Lprintf);
1989  call(Rtemp);
1990
1991  b(Lcontinue);
1992
1993  bind_literal(Lmsg0);
1994  bind_literal(Lmsg);
1995  bind_literal(Lprintf);
1996
1997
1998  bind(Lcontinue);
1999
2000  restore_caller_save_registers();
2001}
2002
2003#endif
2004
2005// Jump if ((*counter_addr += increment) & mask) satisfies the condition.
2006void InterpreterMacroAssembler::increment_mask_and_jump(Address counter_addr,
2007                                                        int increment, Address mask_addr,
2008                                                        Register scratch, Register scratch2,
2009                                                        AsmCondition cond, Label* where) {
2010  // caution: scratch2 and base address of counter_addr can be the same
2011  assert_different_registers(scratch, scratch2);
2012  ldr_u32(scratch, counter_addr);
2013  add(scratch, scratch, increment);
2014  str_32(scratch, counter_addr);
2015
2016#ifdef AARCH64
2017  ldr_u32(scratch2, mask_addr);
2018  ands_w(ZR, scratch, scratch2);
2019#else
2020  ldr(scratch2, mask_addr);
2021  andrs(scratch, scratch, scratch2);
2022#endif // AARCH64
2023  b(*where, cond);
2024}
2025
2026void InterpreterMacroAssembler::get_method_counters(Register method,
2027                                                    Register Rcounters,
2028                                                    Label& skip,
2029                                                    bool saveRegs,
2030                                                    Register reg1,
2031                                                    Register reg2,
2032                                                    Register reg3) {
2033  const Address method_counters(method, Method::method_counters_offset());
2034  Label has_counters;
2035
2036  ldr(Rcounters, method_counters);
2037  cbnz(Rcounters, has_counters);
2038
2039  if (saveRegs) {
2040    // Save and restore in use caller-saved registers since they will be trashed by call_VM
2041    assert(reg1 != noreg, "must specify reg1");
2042    assert(reg2 != noreg, "must specify reg2");
2043#ifdef AARCH64
2044    assert(reg3 != noreg, "must specify reg3");
2045    stp(reg1, reg2, Address(Rstack_top, -2*wordSize, pre_indexed));
2046    stp(reg3, ZR, Address(Rstack_top, -2*wordSize, pre_indexed));
2047#else
2048    assert(reg3 == noreg, "must not specify reg3");
2049    push(RegisterSet(reg1) | RegisterSet(reg2));
2050#endif
2051  }
2052
2053  mov(R1, method);
2054  call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::build_method_counters), R1);
2055
2056  if (saveRegs) {
2057#ifdef AARCH64
2058    ldp(reg3, ZR, Address(Rstack_top, 2*wordSize, post_indexed));
2059    ldp(reg1, reg2, Address(Rstack_top, 2*wordSize, post_indexed));
2060#else
2061    pop(RegisterSet(reg1) | RegisterSet(reg2));
2062#endif
2063  }
2064
2065  ldr(Rcounters, method_counters);
2066  cbz(Rcounters, skip); // No MethodCounters created, OutOfMemory
2067
2068  bind(has_counters);
2069}
2070