macroAssembler_ppc.hpp revision 11374:3fb9a97eb099
1/*
2 * Copyright (c) 2002, 2016, Oracle and/or its affiliates. All rights reserved.
3 * Copyright (c) 2012, 2016 SAP SE. All rights reserved.
4 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
5 *
6 * This code is free software; you can redistribute it and/or modify it
7 * under the terms of the GNU General Public License version 2 only, as
8 * published by the Free Software Foundation.
9 *
10 * This code is distributed in the hope that it will be useful, but WITHOUT
11 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
12 * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
13 * version 2 for more details (a copy is included in the LICENSE file that
14 * accompanied this code).
15 *
16 * You should have received a copy of the GNU General Public License version
17 * 2 along with this work; if not, write to the Free Software Foundation,
18 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
19 *
20 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
21 * or visit www.oracle.com if you need additional information or have any
22 * questions.
23 *
24 */
25
26#ifndef CPU_PPC_VM_MACROASSEMBLER_PPC_HPP
27#define CPU_PPC_VM_MACROASSEMBLER_PPC_HPP
28
29#include "asm/assembler.hpp"
30#include "runtime/rtmLocking.hpp"
31#include "utilities/macros.hpp"
32
33// MacroAssembler extends Assembler by a few frequently used macros.
34
35class ciTypeArray;
36
37class MacroAssembler: public Assembler {
38 public:
39  MacroAssembler(CodeBuffer* code) : Assembler(code) {}
40
41  //
42  // Optimized instruction emitters
43  //
44
45  inline static int largeoffset_si16_si16_hi(int si31) { return (si31 + (1<<15)) >> 16; }
46  inline static int largeoffset_si16_si16_lo(int si31) { return si31 - (((si31 + (1<<15)) >> 16) << 16); }
47
48  // load d = *[a+si31]
49  // Emits several instructions if the offset is not encodable in one instruction.
50  void ld_largeoffset_unchecked(Register d, int si31, Register a, int emit_filler_nop);
51  void ld_largeoffset          (Register d, int si31, Register a, int emit_filler_nop);
52  inline static bool is_ld_largeoffset(address a);
53  inline static int get_ld_largeoffset_offset(address a);
54
55  inline void round_to(Register r, int modulus);
56
57  // Load/store with type given by parameter.
58  void load_sized_value( Register dst, RegisterOrConstant offs, Register base, size_t size_in_bytes, bool is_signed);
59  void store_sized_value(Register dst, RegisterOrConstant offs, Register base, size_t size_in_bytes);
60
61  // Move register if destination register and target register are different
62  inline void mr_if_needed(Register rd, Register rs);
63  inline void fmr_if_needed(FloatRegister rd, FloatRegister rs);
64  // This is dedicated for emitting scheduled mach nodes. For better
65  // readability of the ad file I put it here.
66  // Endgroups are not needed if
67  //  - the scheduler is off
68  //  - the scheduler found that there is a natural group end, in that
69  //    case it reduced the size of the instruction used in the test
70  //    yielding 'needed'.
71  inline void endgroup_if_needed(bool needed);
72
73  // Memory barriers.
74  inline void membar(int bits);
75  inline void release();
76  inline void acquire();
77  inline void fence();
78
79  // nop padding
80  void align(int modulus, int max = 252, int rem = 0);
81
82  //
83  // Constants, loading constants, TOC support
84  //
85
86  // Address of the global TOC.
87  inline static address global_toc();
88  // Offset of given address to the global TOC.
89  inline static int offset_to_global_toc(const address addr);
90
91  // Address of TOC of the current method.
92  inline address method_toc();
93  // Offset of given address to TOC of the current method.
94  inline int offset_to_method_toc(const address addr);
95
96  // Global TOC.
97  void calculate_address_from_global_toc(Register dst, address addr,
98                                         bool hi16 = true, bool lo16 = true,
99                                         bool add_relocation = true, bool emit_dummy_addr = false);
100  inline void calculate_address_from_global_toc_hi16only(Register dst, address addr) {
101    calculate_address_from_global_toc(dst, addr, true, false);
102  };
103  inline void calculate_address_from_global_toc_lo16only(Register dst, address addr) {
104    calculate_address_from_global_toc(dst, addr, false, true);
105  };
106
107  inline static bool is_calculate_address_from_global_toc_at(address a, address bound);
108  static int patch_calculate_address_from_global_toc_at(address a, address addr, address bound);
109  static address get_address_of_calculate_address_from_global_toc_at(address a, address addr);
110
111#ifdef _LP64
112  // Patch narrow oop constant.
113  inline static bool is_set_narrow_oop(address a, address bound);
114  static int patch_set_narrow_oop(address a, address bound, narrowOop data);
115  static narrowOop get_narrow_oop(address a, address bound);
116#endif
117
118  inline static bool is_load_const_at(address a);
119
120  // Emits an oop const to the constant pool, loads the constant, and
121  // sets a relocation info with address current_pc.
122  // Returns true if successful.
123  bool load_const_from_method_toc(Register dst, AddressLiteral& a, Register toc, bool fixed_size = false);
124
125  static bool is_load_const_from_method_toc_at(address a);
126  static int get_offset_of_load_const_from_method_toc_at(address a);
127
128  // Get the 64 bit constant from a `load_const' sequence.
129  static long get_const(address load_const);
130
131  // Patch the 64 bit constant of a `load_const' sequence. This is a
132  // low level procedure. It neither flushes the instruction cache nor
133  // is it atomic.
134  static void patch_const(address load_const, long x);
135
136  // Metadata in code that we have to keep track of.
137  AddressLiteral allocate_metadata_address(Metadata* obj); // allocate_index
138  AddressLiteral constant_metadata_address(Metadata* obj); // find_index
139  // Oops used directly in compiled code are stored in the constant pool,
140  // and loaded from there.
141  // Allocate new entry for oop in constant pool. Generate relocation.
142  AddressLiteral allocate_oop_address(jobject obj);
143  // Find oop obj in constant pool. Return relocation with it's index.
144  AddressLiteral constant_oop_address(jobject obj);
145
146  // Find oop in constant pool and emit instructions to load it.
147  // Uses constant_oop_address.
148  inline void set_oop_constant(jobject obj, Register d);
149  // Same as load_address.
150  inline void set_oop         (AddressLiteral obj_addr, Register d);
151
152  // Read runtime constant:  Issue load if constant not yet established,
153  // else use real constant.
154  virtual RegisterOrConstant delayed_value_impl(intptr_t* delayed_value_addr,
155                                                Register tmp,
156                                                int offset);
157
158  //
159  // branch, jump
160  //
161
162  inline void pd_patch_instruction(address branch, address target);
163  NOT_PRODUCT(static void pd_print_patched_instruction(address branch);)
164
165  // Conditional far branch for destinations encodable in 24+2 bits.
166  // Same interface as bc, e.g. no inverse boint-field.
167  enum {
168    bc_far_optimize_not         = 0,
169    bc_far_optimize_on_relocate = 1
170  };
171  // optimize: flag for telling the conditional far branch to optimize
172  //           itself when relocated.
173  void bc_far(int boint, int biint, Label& dest, int optimize);
174  void bc_far_optimized(int boint, int biint, Label& dest); // 1 or 2 instructions
175  // Relocation of conditional far branches.
176  static bool    is_bc_far_at(address instruction_addr);
177  static address get_dest_of_bc_far_at(address instruction_addr);
178  static void    set_dest_of_bc_far_at(address instruction_addr, address dest);
179 private:
180  static bool inline is_bc_far_variant1_at(address instruction_addr);
181  static bool inline is_bc_far_variant2_at(address instruction_addr);
182  static bool inline is_bc_far_variant3_at(address instruction_addr);
183 public:
184
185  // Convenience bc_far versions.
186  inline void blt_far(ConditionRegister crx, Label& L, int optimize);
187  inline void bgt_far(ConditionRegister crx, Label& L, int optimize);
188  inline void beq_far(ConditionRegister crx, Label& L, int optimize);
189  inline void bso_far(ConditionRegister crx, Label& L, int optimize);
190  inline void bge_far(ConditionRegister crx, Label& L, int optimize);
191  inline void ble_far(ConditionRegister crx, Label& L, int optimize);
192  inline void bne_far(ConditionRegister crx, Label& L, int optimize);
193  inline void bns_far(ConditionRegister crx, Label& L, int optimize);
194
195  // Emit, identify and patch a NOT mt-safe patchable 64 bit absolute call/jump.
196 private:
197  enum {
198    bxx64_patchable_instruction_count = (2/*load_codecache_const*/ + 3/*5load_const*/ + 1/*mtctr*/ + 1/*bctrl*/),
199    bxx64_patchable_size              = bxx64_patchable_instruction_count * BytesPerInstWord,
200    bxx64_patchable_ret_addr_offset   = bxx64_patchable_size
201  };
202  void bxx64_patchable(address target, relocInfo::relocType rt, bool link);
203  static bool is_bxx64_patchable_at(            address instruction_addr, bool link);
204  // Does the instruction use a pc-relative encoding of the destination?
205  static bool is_bxx64_patchable_pcrelative_at( address instruction_addr, bool link);
206  static bool is_bxx64_patchable_variant1_at(   address instruction_addr, bool link);
207  // Load destination relative to global toc.
208  static bool is_bxx64_patchable_variant1b_at(  address instruction_addr, bool link);
209  static bool is_bxx64_patchable_variant2_at(   address instruction_addr, bool link);
210  static void set_dest_of_bxx64_patchable_at(   address instruction_addr, address target, bool link);
211  static address get_dest_of_bxx64_patchable_at(address instruction_addr, bool link);
212
213 public:
214  // call
215  enum {
216    bl64_patchable_instruction_count = bxx64_patchable_instruction_count,
217    bl64_patchable_size              = bxx64_patchable_size,
218    bl64_patchable_ret_addr_offset   = bxx64_patchable_ret_addr_offset
219  };
220  inline void bl64_patchable(address target, relocInfo::relocType rt) {
221    bxx64_patchable(target, rt, /*link=*/true);
222  }
223  inline static bool is_bl64_patchable_at(address instruction_addr) {
224    return is_bxx64_patchable_at(instruction_addr, /*link=*/true);
225  }
226  inline static bool is_bl64_patchable_pcrelative_at(address instruction_addr) {
227    return is_bxx64_patchable_pcrelative_at(instruction_addr, /*link=*/true);
228  }
229  inline static void set_dest_of_bl64_patchable_at(address instruction_addr, address target) {
230    set_dest_of_bxx64_patchable_at(instruction_addr, target, /*link=*/true);
231  }
232  inline static address get_dest_of_bl64_patchable_at(address instruction_addr) {
233    return get_dest_of_bxx64_patchable_at(instruction_addr, /*link=*/true);
234  }
235  // jump
236  enum {
237    b64_patchable_instruction_count = bxx64_patchable_instruction_count,
238    b64_patchable_size              = bxx64_patchable_size,
239  };
240  inline void b64_patchable(address target, relocInfo::relocType rt) {
241    bxx64_patchable(target, rt, /*link=*/false);
242  }
243  inline static bool is_b64_patchable_at(address instruction_addr) {
244    return is_bxx64_patchable_at(instruction_addr, /*link=*/false);
245  }
246  inline static bool is_b64_patchable_pcrelative_at(address instruction_addr) {
247    return is_bxx64_patchable_pcrelative_at(instruction_addr, /*link=*/false);
248  }
249  inline static void set_dest_of_b64_patchable_at(address instruction_addr, address target) {
250    set_dest_of_bxx64_patchable_at(instruction_addr, target, /*link=*/false);
251  }
252  inline static address get_dest_of_b64_patchable_at(address instruction_addr) {
253    return get_dest_of_bxx64_patchable_at(instruction_addr, /*link=*/false);
254  }
255
256  //
257  // Support for frame handling
258  //
259
260  // some ABI-related functions
261  void save_nonvolatile_gprs(   Register dst_base, int offset);
262  void restore_nonvolatile_gprs(Register src_base, int offset);
263  enum { num_volatile_regs = 11 + 14 }; // GPR + FPR
264  void save_volatile_gprs(   Register dst_base, int offset);
265  void restore_volatile_gprs(Register src_base, int offset);
266  void save_LR_CR(   Register tmp);     // tmp contains LR on return.
267  void restore_LR_CR(Register tmp);
268
269  // Get current PC using bl-next-instruction trick.
270  address get_PC_trash_LR(Register result);
271
272  // Resize current frame either relatively wrt to current SP or absolute.
273  void resize_frame(Register offset, Register tmp);
274  void resize_frame(int      offset, Register tmp);
275  void resize_frame_absolute(Register addr, Register tmp1, Register tmp2);
276
277  // Push a frame of size bytes.
278  void push_frame(Register bytes, Register tmp);
279
280  // Push a frame of size `bytes'. No abi space provided.
281  void push_frame(unsigned int bytes, Register tmp);
282
283  // Push a frame of size `bytes' plus abi_reg_args on top.
284  void push_frame_reg_args(unsigned int bytes, Register tmp);
285
286  // Setup up a new C frame with a spill area for non-volatile GPRs and additional
287  // space for local variables
288  void push_frame_reg_args_nonvolatiles(unsigned int bytes, Register tmp);
289
290  // pop current C frame
291  void pop_frame();
292
293  //
294  // Calls
295  //
296
297 private:
298  address _last_calls_return_pc;
299
300#if defined(ABI_ELFv2)
301  // Generic version of a call to C function.
302  // Updates and returns _last_calls_return_pc.
303  address branch_to(Register function_entry, bool and_link);
304#else
305  // Generic version of a call to C function via a function descriptor
306  // with variable support for C calling conventions (TOC, ENV, etc.).
307  // updates and returns _last_calls_return_pc.
308  address branch_to(Register function_descriptor, bool and_link, bool save_toc_before_call,
309                    bool restore_toc_after_call, bool load_toc_of_callee, bool load_env_of_callee);
310#endif
311
312 public:
313
314  // Get the pc where the last call will return to. returns _last_calls_return_pc.
315  inline address last_calls_return_pc();
316
317#if defined(ABI_ELFv2)
318  // Call a C function via a function descriptor and use full C
319  // calling conventions. Updates and returns _last_calls_return_pc.
320  address call_c(Register function_entry);
321  // For tail calls: only branch, don't link, so callee returns to caller of this function.
322  address call_c_and_return_to_caller(Register function_entry);
323  address call_c(address function_entry, relocInfo::relocType rt);
324#else
325  // Call a C function via a function descriptor and use full C
326  // calling conventions. Updates and returns _last_calls_return_pc.
327  address call_c(Register function_descriptor);
328  // For tail calls: only branch, don't link, so callee returns to caller of this function.
329  address call_c_and_return_to_caller(Register function_descriptor);
330  address call_c(const FunctionDescriptor* function_descriptor, relocInfo::relocType rt);
331  address call_c_using_toc(const FunctionDescriptor* function_descriptor, relocInfo::relocType rt,
332                           Register toc);
333#endif
334
335 protected:
336
337  // It is imperative that all calls into the VM are handled via the
338  // call_VM macros. They make sure that the stack linkage is setup
339  // correctly. call_VM's correspond to ENTRY/ENTRY_X entry points
340  // while call_VM_leaf's correspond to LEAF entry points.
341  //
342  // This is the base routine called by the different versions of
343  // call_VM. The interpreter may customize this version by overriding
344  // it for its purposes (e.g., to save/restore additional registers
345  // when doing a VM call).
346  //
347  // If no last_java_sp is specified (noreg) then SP will be used instead.
348  virtual void call_VM_base(
349     // where an oop-result ends up if any; use noreg otherwise
350    Register        oop_result,
351    // to set up last_Java_frame in stubs; use noreg otherwise
352    Register        last_java_sp,
353    // the entry point
354    address         entry_point,
355    // flag which indicates if exception should be checked
356    bool            check_exception = true
357  );
358
359  // Support for VM calls. This is the base routine called by the
360  // different versions of call_VM_leaf. The interpreter may customize
361  // this version by overriding it for its purposes (e.g., to
362  // save/restore additional registers when doing a VM call).
363  void call_VM_leaf_base(address entry_point);
364
365 public:
366  // Call into the VM.
367  // Passes the thread pointer (in R3_ARG1) as a prepended argument.
368  // Makes sure oop return values are visible to the GC.
369  void call_VM(Register oop_result, address entry_point, bool check_exceptions = true);
370  void call_VM(Register oop_result, address entry_point, Register arg_1, bool check_exceptions = true);
371  void call_VM(Register oop_result, address entry_point, Register arg_1, Register arg_2, bool check_exceptions = true);
372  void call_VM(Register oop_result, address entry_point, Register arg_1, Register arg_2, Register arg3, bool check_exceptions = true);
373  void call_VM_leaf(address entry_point);
374  void call_VM_leaf(address entry_point, Register arg_1);
375  void call_VM_leaf(address entry_point, Register arg_1, Register arg_2);
376  void call_VM_leaf(address entry_point, Register arg_1, Register arg_2, Register arg_3);
377
378  // Call a stub function via a function descriptor, but don't save
379  // TOC before call, don't setup TOC and ENV for call, and don't
380  // restore TOC after call. Updates and returns _last_calls_return_pc.
381  inline address call_stub(Register function_entry);
382  inline void call_stub_and_return_to(Register function_entry, Register return_pc);
383
384  //
385  // Java utilities
386  //
387
388  // Read from the polling page, its address is already in a register.
389  inline void load_from_polling_page(Register polling_page_address, int offset = 0);
390  // Check whether instruction is a read access to the polling page
391  // which was emitted by load_from_polling_page(..).
392  static bool is_load_from_polling_page(int instruction, void* ucontext/*may be NULL*/,
393                                        address* polling_address_ptr = NULL);
394
395  // Check whether instruction is a write access to the memory
396  // serialization page realized by one of the instructions stw, stwu,
397  // stwx, or stwux.
398  static bool is_memory_serialization(int instruction, JavaThread* thread, void* ucontext);
399
400  // Support for NULL-checks
401  //
402  // Generates code that causes a NULL OS exception if the content of reg is NULL.
403  // If the accessed location is M[reg + offset] and the offset is known, provide the
404  // offset. No explicit code generation is needed if the offset is within a certain
405  // range (0 <= offset <= page_size).
406
407  // Stack overflow checking
408  void bang_stack_with_offset(int offset);
409
410  // If instruction is a stack bang of the form ld, stdu, or
411  // stdux, return the banged address. Otherwise, return 0.
412  static address get_stack_bang_address(int instruction, void* ucontext);
413
414  // Check for reserved stack access in method being exited. If the reserved
415  // stack area was accessed, protect it again and throw StackOverflowError.
416  void reserved_stack_check(Register return_pc);
417
418  // Atomics
419  // CmpxchgX sets condition register to cmpX(current, compare).
420  // (flag == ne) => (dest_current_value != compare_value), (!swapped)
421  // (flag == eq) => (dest_current_value == compare_value), ( swapped)
422  static inline bool cmpxchgx_hint_acquire_lock()  { return true; }
423  // The stxcx will probably not be succeeded by a releasing store.
424  static inline bool cmpxchgx_hint_release_lock()  { return false; }
425  static inline bool cmpxchgx_hint_atomic_update() { return false; }
426
427  // Cmpxchg semantics
428  enum {
429    MemBarNone = 0,
430    MemBarRel  = 1,
431    MemBarAcq  = 2,
432    MemBarFenceAfter = 4 // use powers of 2
433  };
434  void cmpxchgw(ConditionRegister flag,
435                Register dest_current_value, Register compare_value, Register exchange_value, Register addr_base,
436                int semantics, bool cmpxchgx_hint = false,
437                Register int_flag_success = noreg, bool contention_hint = false, bool weak = false);
438  void cmpxchgd(ConditionRegister flag,
439                Register dest_current_value, RegisterOrConstant compare_value, Register exchange_value,
440                Register addr_base, int semantics, bool cmpxchgx_hint = false,
441                Register int_flag_success = noreg, Label* failed = NULL, bool contention_hint = false, bool weak = false);
442
443  // interface method calling
444  void lookup_interface_method(Register recv_klass,
445                               Register intf_klass,
446                               RegisterOrConstant itable_index,
447                               Register method_result,
448                               Register temp_reg, Register temp2_reg,
449                               Label& no_such_interface);
450
451  // virtual method calling
452  void lookup_virtual_method(Register recv_klass,
453                             RegisterOrConstant vtable_index,
454                             Register method_result);
455
456  // Test sub_klass against super_klass, with fast and slow paths.
457
458  // The fast path produces a tri-state answer: yes / no / maybe-slow.
459  // One of the three labels can be NULL, meaning take the fall-through.
460  // If super_check_offset is -1, the value is loaded up from super_klass.
461  // No registers are killed, except temp_reg and temp2_reg.
462  // If super_check_offset is not -1, temp2_reg is not used and can be noreg.
463  void check_klass_subtype_fast_path(Register sub_klass,
464                                     Register super_klass,
465                                     Register temp1_reg,
466                                     Register temp2_reg,
467                                     Label* L_success,
468                                     Label* L_failure,
469                                     Label* L_slow_path = NULL, // default fall through
470                                     RegisterOrConstant super_check_offset = RegisterOrConstant(-1));
471
472  // The rest of the type check; must be wired to a corresponding fast path.
473  // It does not repeat the fast path logic, so don't use it standalone.
474  // The temp_reg can be noreg, if no temps are available.
475  // It can also be sub_klass or super_klass, meaning it's OK to kill that one.
476  // Updates the sub's secondary super cache as necessary.
477  void check_klass_subtype_slow_path(Register sub_klass,
478                                     Register super_klass,
479                                     Register temp1_reg,
480                                     Register temp2_reg,
481                                     Label* L_success = NULL,
482                                     Register result_reg = noreg);
483
484  // Simplified, combined version, good for typical uses.
485  // Falls through on failure.
486  void check_klass_subtype(Register sub_klass,
487                           Register super_klass,
488                           Register temp1_reg,
489                           Register temp2_reg,
490                           Label& L_success);
491
492  // Method handle support (JSR 292).
493  void check_method_handle_type(Register mtype_reg, Register mh_reg, Register temp_reg, Label& wrong_method_type);
494
495  RegisterOrConstant argument_offset(RegisterOrConstant arg_slot, Register temp_reg, int extra_slot_offset = 0);
496
497  // Biased locking support
498  // Upon entry,obj_reg must contain the target object, and mark_reg
499  // must contain the target object's header.
500  // Destroys mark_reg if an attempt is made to bias an anonymously
501  // biased lock. In this case a failure will go either to the slow
502  // case or fall through with the notEqual condition code set with
503  // the expectation that the slow case in the runtime will be called.
504  // In the fall-through case where the CAS-based lock is done,
505  // mark_reg is not destroyed.
506  void biased_locking_enter(ConditionRegister cr_reg, Register obj_reg, Register mark_reg, Register temp_reg,
507                            Register temp2_reg, Label& done, Label* slow_case = NULL);
508  // Upon entry, the base register of mark_addr must contain the oop.
509  // Destroys temp_reg.
510  // If allow_delay_slot_filling is set to true, the next instruction
511  // emitted after this one will go in an annulled delay slot if the
512  // biased locking exit case failed.
513  void biased_locking_exit(ConditionRegister cr_reg, Register mark_addr, Register temp_reg, Label& done);
514
515  // allocation (for C1)
516  void eden_allocate(
517    Register obj,                      // result: pointer to object after successful allocation
518    Register var_size_in_bytes,        // object size in bytes if unknown at compile time; invalid otherwise
519    int      con_size_in_bytes,        // object size in bytes if   known at compile time
520    Register t1,                       // temp register
521    Register t2,                       // temp register
522    Label&   slow_case                 // continuation point if fast allocation fails
523  );
524  void tlab_allocate(
525    Register obj,                      // result: pointer to object after successful allocation
526    Register var_size_in_bytes,        // object size in bytes if unknown at compile time; invalid otherwise
527    int      con_size_in_bytes,        // object size in bytes if   known at compile time
528    Register t1,                       // temp register
529    Label&   slow_case                 // continuation point if fast allocation fails
530  );
531  void tlab_refill(Label& retry_tlab, Label& try_eden, Label& slow_case);
532  void incr_allocated_bytes(RegisterOrConstant size_in_bytes, Register t1, Register t2);
533
534  enum { trampoline_stub_size = 6 * 4 };
535  address emit_trampoline_stub(int destination_toc_offset, int insts_call_instruction_offset, Register Rtoc = noreg);
536
537  void atomic_inc_ptr(Register addr, Register result, int simm16 = 1);
538  void atomic_ori_int(Register addr, Register result, int uimm16);
539
540#if INCLUDE_RTM_OPT
541  void rtm_counters_update(Register abort_status, Register rtm_counters);
542  void branch_on_random_using_tb(Register tmp, int count, Label& brLabel);
543  void rtm_abort_ratio_calculation(Register rtm_counters_reg, RTMLockingCounters* rtm_counters,
544                                   Metadata* method_data);
545  void rtm_profiling(Register abort_status_Reg, Register temp_Reg,
546                     RTMLockingCounters* rtm_counters, Metadata* method_data, bool profile_rtm);
547  void rtm_retry_lock_on_abort(Register retry_count, Register abort_status,
548                               Label& retryLabel, Label* checkRetry = NULL);
549  void rtm_retry_lock_on_busy(Register retry_count, Register owner_addr, Label& retryLabel);
550  void rtm_stack_locking(ConditionRegister flag, Register obj, Register mark_word, Register tmp,
551                         Register retry_on_abort_count,
552                         RTMLockingCounters* stack_rtm_counters,
553                         Metadata* method_data, bool profile_rtm,
554                         Label& DONE_LABEL, Label& IsInflated);
555  void rtm_inflated_locking(ConditionRegister flag, Register obj, Register mark_word, Register box,
556                            Register retry_on_busy_count, Register retry_on_abort_count,
557                            RTMLockingCounters* rtm_counters,
558                            Metadata* method_data, bool profile_rtm,
559                            Label& DONE_LABEL);
560#endif
561
562  void compiler_fast_lock_object(ConditionRegister flag, Register oop, Register box,
563                                 Register tmp1, Register tmp2, Register tmp3,
564                                 bool try_bias = UseBiasedLocking,
565                                 RTMLockingCounters* rtm_counters = NULL,
566                                 RTMLockingCounters* stack_rtm_counters = NULL,
567                                 Metadata* method_data = NULL,
568                                 bool use_rtm = false, bool profile_rtm = false);
569
570  void compiler_fast_unlock_object(ConditionRegister flag, Register oop, Register box,
571                                   Register tmp1, Register tmp2, Register tmp3,
572                                   bool try_bias = UseBiasedLocking, bool use_rtm = false);
573
574  // Support for serializing memory accesses between threads
575  void serialize_memory(Register thread, Register tmp1, Register tmp2);
576
577  // GC barrier support.
578  void card_write_barrier_post(Register Rstore_addr, Register Rnew_val, Register Rtmp);
579  void card_table_write(jbyte* byte_map_base, Register Rtmp, Register Robj);
580
581#if INCLUDE_ALL_GCS
582  // General G1 pre-barrier generator.
583  void g1_write_barrier_pre(Register Robj, RegisterOrConstant offset, Register Rpre_val,
584                            Register Rtmp1, Register Rtmp2, bool needs_frame = false);
585  // General G1 post-barrier generator
586  void g1_write_barrier_post(Register Rstore_addr, Register Rnew_val, Register Rtmp1,
587                             Register Rtmp2, Register Rtmp3, Label *filtered_ext = NULL);
588#endif
589
590  // Support for managing the JavaThread pointer (i.e.; the reference to
591  // thread-local information).
592
593  // Support for last Java frame (but use call_VM instead where possible):
594  // access R16_thread->last_Java_sp.
595  void set_last_Java_frame(Register last_java_sp, Register last_Java_pc);
596  void reset_last_Java_frame(void);
597  void set_top_ijava_frame_at_SP_as_last_Java_frame(Register sp, Register tmp1);
598
599  // Read vm result from thread: oop_result = R16_thread->result;
600  void get_vm_result  (Register oop_result);
601  void get_vm_result_2(Register metadata_result);
602
603  static bool needs_explicit_null_check(intptr_t offset);
604
605  // Trap-instruction-based checks.
606  // Range checks can be distinguished from zero checks as they check 32 bit,
607  // zero checks all 64 bits (tw, td).
608  inline void trap_null_check(Register a, trap_to_bits cmp = traptoEqual);
609  static bool is_trap_null_check(int x) {
610    return is_tdi(x, traptoEqual,               -1/*any reg*/, 0) ||
611           is_tdi(x, traptoGreaterThanUnsigned, -1/*any reg*/, 0);
612  }
613
614  inline void trap_zombie_not_entrant();
615  static bool is_trap_zombie_not_entrant(int x) { return is_tdi(x, traptoUnconditional, 0/*reg 0*/, 1); }
616
617  inline void trap_should_not_reach_here();
618  static bool is_trap_should_not_reach_here(int x) { return is_tdi(x, traptoUnconditional, 0/*reg 0*/, 2); }
619
620  inline void trap_ic_miss_check(Register a, Register b);
621  static bool is_trap_ic_miss_check(int x) {
622    return is_td(x, traptoGreaterThanUnsigned | traptoLessThanUnsigned, -1/*any reg*/, -1/*any reg*/);
623  }
624
625  // Implicit or explicit null check, jumps to static address exception_entry.
626  inline void null_check_throw(Register a, int offset, Register temp_reg, address exception_entry);
627  inline void null_check(Register a, int offset, Label *Lis_null); // implicit only if Lis_null not provided
628
629  // Load heap oop and decompress. Loaded oop may not be null.
630  // Specify tmp to save one cycle.
631  inline void load_heap_oop_not_null(Register d, RegisterOrConstant offs, Register s1 = noreg,
632                                     Register tmp = noreg);
633  // Store heap oop and decompress.  Decompressed oop may not be null.
634  // Specify tmp register if d should not be changed.
635  inline void store_heap_oop_not_null(Register d, RegisterOrConstant offs, Register s1,
636                                      Register tmp = noreg);
637
638  // Null allowed.
639  inline void load_heap_oop(Register d, RegisterOrConstant offs, Register s1 = noreg, Label *is_null = NULL);
640
641  // Encode/decode heap oop. Oop may not be null, else en/decoding goes wrong.
642  // src == d allowed.
643  inline Register encode_heap_oop_not_null(Register d, Register src = noreg);
644  inline Register decode_heap_oop_not_null(Register d, Register src = noreg);
645
646  // Null allowed.
647  inline Register encode_heap_oop(Register d, Register src); // Prefer null check in GC barrier!
648  inline void decode_heap_oop(Register d);
649
650  // Load/Store klass oop from klass field. Compress.
651  void load_klass(Register dst, Register src);
652  void store_klass(Register dst_oop, Register klass, Register tmp = R0);
653  void store_klass_gap(Register dst_oop, Register val = noreg); // Will store 0 if val not specified.
654
655  void load_mirror(Register mirror, Register method);
656
657  static int instr_size_for_decode_klass_not_null();
658  void decode_klass_not_null(Register dst, Register src = noreg);
659  Register encode_klass_not_null(Register dst, Register src = noreg);
660
661  // SIGTRAP-based range checks for arrays.
662  inline void trap_range_check_l(Register a, Register b);
663  inline void trap_range_check_l(Register a, int si16);
664  static bool is_trap_range_check_l(int x) {
665    return (is_tw (x, traptoLessThanUnsigned, -1/*any reg*/, -1/*any reg*/) ||
666            is_twi(x, traptoLessThanUnsigned, -1/*any reg*/)                  );
667  }
668  inline void trap_range_check_le(Register a, int si16);
669  static bool is_trap_range_check_le(int x) {
670    return is_twi(x, traptoEqual | traptoLessThanUnsigned, -1/*any reg*/);
671  }
672  inline void trap_range_check_g(Register a, int si16);
673  static bool is_trap_range_check_g(int x) {
674    return is_twi(x, traptoGreaterThanUnsigned, -1/*any reg*/);
675  }
676  inline void trap_range_check_ge(Register a, Register b);
677  inline void trap_range_check_ge(Register a, int si16);
678  static bool is_trap_range_check_ge(int x) {
679    return (is_tw (x, traptoEqual | traptoGreaterThanUnsigned, -1/*any reg*/, -1/*any reg*/) ||
680            is_twi(x, traptoEqual | traptoGreaterThanUnsigned, -1/*any reg*/)                  );
681  }
682  static bool is_trap_range_check(int x) {
683    return is_trap_range_check_l(x) || is_trap_range_check_le(x) ||
684           is_trap_range_check_g(x) || is_trap_range_check_ge(x);
685  }
686
687  void clear_memory_doubleword(Register base_ptr, Register cnt_dwords, Register tmp = R0);
688
689#ifdef COMPILER2
690  // Intrinsics for CompactStrings
691  // Compress char[] to byte[] by compressing 16 bytes at once.
692  void string_compress_16(Register src, Register dst, Register cnt,
693                          Register tmp1, Register tmp2, Register tmp3, Register tmp4, Register tmp5,
694                          Label& Lfailure);
695
696  // Compress char[] to byte[]. cnt must be positive int.
697  void string_compress(Register src, Register dst, Register cnt, Register tmp, Label& Lfailure);
698
699  // Inflate byte[] to char[] by inflating 16 bytes at once.
700  void string_inflate_16(Register src, Register dst, Register cnt,
701                         Register tmp1, Register tmp2, Register tmp3, Register tmp4, Register tmp5);
702
703  // Inflate byte[] to char[]. cnt must be positive int.
704  void string_inflate(Register src, Register dst, Register cnt, Register tmp);
705
706  void string_compare(Register str1, Register str2, Register cnt1, Register cnt2,
707                      Register tmp1, Register result, int ae);
708
709  void array_equals(bool is_array_equ, Register ary1, Register ary2,
710                    Register limit, Register tmp1, Register result, bool is_byte);
711
712  void string_indexof(Register result, Register haystack, Register haycnt,
713                      Register needle, ciTypeArray* needle_values, Register needlecnt, int needlecntval,
714                      Register tmp1, Register tmp2, Register tmp3, Register tmp4, int ae);
715
716  void string_indexof_char(Register result, Register haystack, Register haycnt,
717                           Register needle, jchar needleChar, Register tmp1, Register tmp2, bool is_byte);
718
719  void has_negatives(Register src, Register cnt, Register result, Register tmp1, Register tmp2);
720
721  // Intrinsics for non-CompactStrings
722  // Needle of length 1.
723  void string_indexof_1(Register result, Register haystack, Register haycnt,
724                        Register needle, jchar needleChar,
725                        Register tmp1, Register tmp2);
726  // General indexof, eventually with constant needle length.
727  void string_indexof(Register result, Register haystack, Register haycnt,
728                      Register needle, ciTypeArray* needle_values, Register needlecnt, int needlecntval,
729                      Register tmp1, Register tmp2, Register tmp3, Register tmp4);
730  void string_compare(Register str1_reg, Register str2_reg, Register cnt1_reg, Register cnt2_reg,
731                      Register result_reg, Register tmp_reg);
732  void char_arrays_equals(Register str1_reg, Register str2_reg, Register cnt_reg, Register result_reg,
733                          Register tmp1_reg, Register tmp2_reg, Register tmp3_reg, Register tmp4_reg,
734                          Register tmp5_reg);
735  void char_arrays_equalsImm(Register str1_reg, Register str2_reg, int cntval, Register result_reg,
736                             Register tmp1_reg, Register tmp2_reg);
737#endif
738
739  // Emitters for BigInteger.multiplyToLen intrinsic.
740  inline void multiply64(Register dest_hi, Register dest_lo,
741                         Register x, Register y);
742  void add2_with_carry(Register dest_hi, Register dest_lo,
743                       Register src1, Register src2);
744  void multiply_64_x_64_loop(Register x, Register xstart, Register x_xstart,
745                             Register y, Register y_idx, Register z,
746                             Register carry, Register product_high, Register product,
747                             Register idx, Register kdx, Register tmp);
748  void multiply_add_128_x_128(Register x_xstart, Register y, Register z,
749                              Register yz_idx, Register idx, Register carry,
750                              Register product_high, Register product, Register tmp,
751                              int offset);
752  void multiply_128_x_128_loop(Register x_xstart,
753                               Register y, Register z,
754                               Register yz_idx, Register idx, Register carry,
755                               Register product_high, Register product,
756                               Register carry2, Register tmp);
757  void multiply_to_len(Register x, Register xlen,
758                       Register y, Register ylen,
759                       Register z, Register zlen,
760                       Register tmp1, Register tmp2, Register tmp3, Register tmp4, Register tmp5,
761                       Register tmp6, Register tmp7, Register tmp8, Register tmp9, Register tmp10,
762                       Register tmp11, Register tmp12, Register tmp13);
763
764  // CRC32 Intrinsics.
765  void load_reverse_32(Register dst, Register src);
766  int  crc32_table_columns(Register table, Register tc0, Register tc1, Register tc2, Register tc3);
767  void fold_byte_crc32(Register crc, Register val, Register table, Register tmp);
768  void fold_8bit_crc32(Register crc, Register table, Register tmp);
769  void update_byte_crc32(Register crc, Register val, Register table);
770  void update_byteLoop_crc32(Register crc, Register buf, Register len, Register table,
771                             Register data, bool loopAlignment, bool invertCRC);
772  void update_1word_crc32(Register crc, Register buf, Register table, int bufDisp, int bufInc,
773                          Register t0,  Register t1,  Register t2,  Register t3,
774                          Register tc0, Register tc1, Register tc2, Register tc3);
775  void kernel_crc32_2word(Register crc, Register buf, Register len, Register table,
776                          Register t0,  Register t1,  Register t2,  Register t3,
777                          Register tc0, Register tc1, Register tc2, Register tc3);
778  void kernel_crc32_1word(Register crc, Register buf, Register len, Register table,
779                          Register t0,  Register t1,  Register t2,  Register t3,
780                          Register tc0, Register tc1, Register tc2, Register tc3);
781  void kernel_crc32_1byte(Register crc, Register buf, Register len, Register table,
782                          Register t0,  Register t1,  Register t2,  Register t3);
783  void kernel_crc32_singleByte(Register crc, Register buf, Register len, Register table, Register tmp);
784
785  //
786  // Debugging
787  //
788
789  // assert on cr0
790  void asm_assert(bool check_equal, const char* msg, int id);
791  void asm_assert_eq(const char* msg, int id) { asm_assert(true, msg, id); }
792  void asm_assert_ne(const char* msg, int id) { asm_assert(false, msg, id); }
793
794 private:
795  void asm_assert_mems_zero(bool check_equal, int size, int mem_offset, Register mem_base,
796                            const char* msg, int id);
797
798 public:
799
800  void asm_assert_mem8_is_zero(int mem_offset, Register mem_base, const char* msg, int id) {
801    asm_assert_mems_zero(true,  8, mem_offset, mem_base, msg, id);
802  }
803  void asm_assert_mem8_isnot_zero(int mem_offset, Register mem_base, const char* msg, int id) {
804    asm_assert_mems_zero(false, 8, mem_offset, mem_base, msg, id);
805  }
806
807  // Verify R16_thread contents.
808  void verify_thread();
809
810  // Emit code to verify that reg contains a valid oop if +VerifyOops is set.
811  void verify_oop(Register reg, const char* s = "broken oop");
812  void verify_oop_addr(RegisterOrConstant offs, Register base, const char* s = "contains broken oop");
813
814  // TODO: verify method and klass metadata (compare against vptr?)
815  void _verify_method_ptr(Register reg, const char * msg, const char * file, int line) {}
816  void _verify_klass_ptr(Register reg, const char * msg, const char * file, int line) {}
817
818  // Convenience method returning function entry. For the ELFv1 case
819  // creates function descriptor at the current address and returs
820  // the pointer to it. For the ELFv2 case returns the current address.
821  inline address function_entry();
822
823#define verify_method_ptr(reg) _verify_method_ptr(reg, "broken method " #reg, __FILE__, __LINE__)
824#define verify_klass_ptr(reg) _verify_klass_ptr(reg, "broken klass " #reg, __FILE__, __LINE__)
825
826 private:
827
828  enum {
829    stop_stop                = 0,
830    stop_untested            = 1,
831    stop_unimplemented       = 2,
832    stop_shouldnotreachhere  = 3,
833    stop_end                 = 4
834  };
835  void stop(int type, const char* msg, int id);
836
837 public:
838  // Prints msg, dumps registers and stops execution.
839  void stop         (const char* msg = "", int id = 0) { stop(stop_stop,               msg, id); }
840  void untested     (const char* msg = "", int id = 0) { stop(stop_untested,           msg, id); }
841  void unimplemented(const char* msg = "", int id = 0) { stop(stop_unimplemented,      msg, id); }
842  void should_not_reach_here()                         { stop(stop_shouldnotreachhere,  "", -1); }
843
844  void zap_from_to(Register low, int before, Register high, int after, Register val, Register addr) PRODUCT_RETURN;
845};
846
847// class SkipIfEqualZero:
848//
849// Instantiating this class will result in assembly code being output that will
850// jump around any code emitted between the creation of the instance and it's
851// automatic destruction at the end of a scope block, depending on the value of
852// the flag passed to the constructor, which will be checked at run-time.
853class SkipIfEqualZero : public StackObj {
854 private:
855  MacroAssembler* _masm;
856  Label _label;
857
858 public:
859   // 'Temp' is a temp register that this object can use (and trash).
860   explicit SkipIfEqualZero(MacroAssembler*, Register temp, const bool* flag_addr);
861   ~SkipIfEqualZero();
862};
863
864#endif // CPU_PPC_VM_MACROASSEMBLER_PPC_HPP
865