macroAssembler_ppc.hpp revision 7575:a7fd2288ce2f
1/*
2 * Copyright (c) 2002, 2013, Oracle and/or its affiliates. All rights reserved.
3 * Copyright 2012, 2014 SAP AG. All rights reserved.
4 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
5 *
6 * This code is free software; you can redistribute it and/or modify it
7 * under the terms of the GNU General Public License version 2 only, as
8 * published by the Free Software Foundation.
9 *
10 * This code is distributed in the hope that it will be useful, but WITHOUT
11 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
12 * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
13 * version 2 for more details (a copy is included in the LICENSE file that
14 * accompanied this code).
15 *
16 * You should have received a copy of the GNU General Public License version
17 * 2 along with this work; if not, write to the Free Software Foundation,
18 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
19 *
20 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
21 * or visit www.oracle.com if you need additional information or have any
22 * questions.
23 *
24 */
25
26#ifndef CPU_PPC_VM_MACROASSEMBLER_PPC_HPP
27#define CPU_PPC_VM_MACROASSEMBLER_PPC_HPP
28
29#include "asm/assembler.hpp"
30#include "utilities/macros.hpp"
31
32// MacroAssembler extends Assembler by a few frequently used macros.
33
34class ciTypeArray;
35
36class MacroAssembler: public Assembler {
37 public:
38  MacroAssembler(CodeBuffer* code) : Assembler(code) {}
39
40  //
41  // Optimized instruction emitters
42  //
43
44  inline static int largeoffset_si16_si16_hi(int si31) { return (si31 + (1<<15)) >> 16; }
45  inline static int largeoffset_si16_si16_lo(int si31) { return si31 - (((si31 + (1<<15)) >> 16) << 16); }
46
47  // load d = *[a+si31]
48  // Emits several instructions if the offset is not encodable in one instruction.
49  void ld_largeoffset_unchecked(Register d, int si31, Register a, int emit_filler_nop);
50  void ld_largeoffset          (Register d, int si31, Register a, int emit_filler_nop);
51  inline static bool is_ld_largeoffset(address a);
52  inline static int get_ld_largeoffset_offset(address a);
53
54  inline void round_to(Register r, int modulus);
55
56  // Load/store with type given by parameter.
57  void load_sized_value( Register dst, RegisterOrConstant offs, Register base, size_t size_in_bytes, bool is_signed);
58  void store_sized_value(Register dst, RegisterOrConstant offs, Register base, size_t size_in_bytes);
59
60  // Move register if destination register and target register are different
61  inline void mr_if_needed(Register rd, Register rs);
62  inline void fmr_if_needed(FloatRegister rd, FloatRegister rs);
63  // This is dedicated for emitting scheduled mach nodes. For better
64  // readability of the ad file I put it here.
65  // Endgroups are not needed if
66  //  - the scheduler is off
67  //  - the scheduler found that there is a natural group end, in that
68  //    case it reduced the size of the instruction used in the test
69  //    yielding 'needed'.
70  inline void endgroup_if_needed(bool needed);
71
72  // Memory barriers.
73  inline void membar(int bits);
74  inline void release();
75  inline void acquire();
76  inline void fence();
77
78  // nop padding
79  void align(int modulus, int max = 252, int rem = 0);
80
81  //
82  // Constants, loading constants, TOC support
83  //
84
85  // Address of the global TOC.
86  inline static address global_toc();
87  // Offset of given address to the global TOC.
88  inline static int offset_to_global_toc(const address addr);
89
90  // Address of TOC of the current method.
91  inline address method_toc();
92  // Offset of given address to TOC of the current method.
93  inline int offset_to_method_toc(const address addr);
94
95  // Global TOC.
96  void calculate_address_from_global_toc(Register dst, address addr,
97                                         bool hi16 = true, bool lo16 = true,
98                                         bool add_relocation = true, bool emit_dummy_addr = false);
99  inline void calculate_address_from_global_toc_hi16only(Register dst, address addr) {
100    calculate_address_from_global_toc(dst, addr, true, false);
101  };
102  inline void calculate_address_from_global_toc_lo16only(Register dst, address addr) {
103    calculate_address_from_global_toc(dst, addr, false, true);
104  };
105
106  inline static bool is_calculate_address_from_global_toc_at(address a, address bound);
107  static int patch_calculate_address_from_global_toc_at(address a, address addr, address bound);
108  static address get_address_of_calculate_address_from_global_toc_at(address a, address addr);
109
110#ifdef _LP64
111  // Patch narrow oop constant.
112  inline static bool is_set_narrow_oop(address a, address bound);
113  static int patch_set_narrow_oop(address a, address bound, narrowOop data);
114  static narrowOop get_narrow_oop(address a, address bound);
115#endif
116
117  inline static bool is_load_const_at(address a);
118
119  // Emits an oop const to the constant pool, loads the constant, and
120  // sets a relocation info with address current_pc.
121  void load_const_from_method_toc(Register dst, AddressLiteral& a, Register toc);
122  void load_toc_from_toc(Register dst, AddressLiteral& a, Register toc) {
123    assert(dst == R2_TOC, "base register must be TOC");
124    load_const_from_method_toc(dst, a, toc);
125  }
126
127  static bool is_load_const_from_method_toc_at(address a);
128  static int get_offset_of_load_const_from_method_toc_at(address a);
129
130  // Get the 64 bit constant from a `load_const' sequence.
131  static long get_const(address load_const);
132
133  // Patch the 64 bit constant of a `load_const' sequence. This is a
134  // low level procedure. It neither flushes the instruction cache nor
135  // is it atomic.
136  static void patch_const(address load_const, long x);
137
138  // Metadata in code that we have to keep track of.
139  AddressLiteral allocate_metadata_address(Metadata* obj); // allocate_index
140  AddressLiteral constant_metadata_address(Metadata* obj); // find_index
141  // Oops used directly in compiled code are stored in the constant pool,
142  // and loaded from there.
143  // Allocate new entry for oop in constant pool. Generate relocation.
144  AddressLiteral allocate_oop_address(jobject obj);
145  // Find oop obj in constant pool. Return relocation with it's index.
146  AddressLiteral constant_oop_address(jobject obj);
147
148  // Find oop in constant pool and emit instructions to load it.
149  // Uses constant_oop_address.
150  inline void set_oop_constant(jobject obj, Register d);
151  // Same as load_address.
152  inline void set_oop         (AddressLiteral obj_addr, Register d);
153
154  // Read runtime constant:  Issue load if constant not yet established,
155  // else use real constant.
156  virtual RegisterOrConstant delayed_value_impl(intptr_t* delayed_value_addr,
157                                                Register tmp,
158                                                int offset);
159
160  //
161  // branch, jump
162  //
163
164  inline void pd_patch_instruction(address branch, address target);
165  NOT_PRODUCT(static void pd_print_patched_instruction(address branch);)
166
167  // Conditional far branch for destinations encodable in 24+2 bits.
168  // Same interface as bc, e.g. no inverse boint-field.
169  enum {
170    bc_far_optimize_not         = 0,
171    bc_far_optimize_on_relocate = 1
172  };
173  // optimize: flag for telling the conditional far branch to optimize
174  //           itself when relocated.
175  void bc_far(int boint, int biint, Label& dest, int optimize);
176  // Relocation of conditional far branches.
177  static bool    is_bc_far_at(address instruction_addr);
178  static address get_dest_of_bc_far_at(address instruction_addr);
179  static void    set_dest_of_bc_far_at(address instruction_addr, address dest);
180 private:
181  static bool inline is_bc_far_variant1_at(address instruction_addr);
182  static bool inline is_bc_far_variant2_at(address instruction_addr);
183  static bool inline is_bc_far_variant3_at(address instruction_addr);
184 public:
185
186  // Convenience bc_far versions.
187  inline void blt_far(ConditionRegister crx, Label& L, int optimize);
188  inline void bgt_far(ConditionRegister crx, Label& L, int optimize);
189  inline void beq_far(ConditionRegister crx, Label& L, int optimize);
190  inline void bso_far(ConditionRegister crx, Label& L, int optimize);
191  inline void bge_far(ConditionRegister crx, Label& L, int optimize);
192  inline void ble_far(ConditionRegister crx, Label& L, int optimize);
193  inline void bne_far(ConditionRegister crx, Label& L, int optimize);
194  inline void bns_far(ConditionRegister crx, Label& L, int optimize);
195
196  // Emit, identify and patch a NOT mt-safe patchable 64 bit absolute call/jump.
197 private:
198  enum {
199    bxx64_patchable_instruction_count = (2/*load_codecache_const*/ + 3/*5load_const*/ + 1/*mtctr*/ + 1/*bctrl*/),
200    bxx64_patchable_size              = bxx64_patchable_instruction_count * BytesPerInstWord,
201    bxx64_patchable_ret_addr_offset   = bxx64_patchable_size
202  };
203  void bxx64_patchable(address target, relocInfo::relocType rt, bool link);
204  static bool is_bxx64_patchable_at(            address instruction_addr, bool link);
205  // Does the instruction use a pc-relative encoding of the destination?
206  static bool is_bxx64_patchable_pcrelative_at( address instruction_addr, bool link);
207  static bool is_bxx64_patchable_variant1_at(   address instruction_addr, bool link);
208  // Load destination relative to global toc.
209  static bool is_bxx64_patchable_variant1b_at(  address instruction_addr, bool link);
210  static bool is_bxx64_patchable_variant2_at(   address instruction_addr, bool link);
211  static void set_dest_of_bxx64_patchable_at(   address instruction_addr, address target, bool link);
212  static address get_dest_of_bxx64_patchable_at(address instruction_addr, bool link);
213
214 public:
215  // call
216  enum {
217    bl64_patchable_instruction_count = bxx64_patchable_instruction_count,
218    bl64_patchable_size              = bxx64_patchable_size,
219    bl64_patchable_ret_addr_offset   = bxx64_patchable_ret_addr_offset
220  };
221  inline void bl64_patchable(address target, relocInfo::relocType rt) {
222    bxx64_patchable(target, rt, /*link=*/true);
223  }
224  inline static bool is_bl64_patchable_at(address instruction_addr) {
225    return is_bxx64_patchable_at(instruction_addr, /*link=*/true);
226  }
227  inline static bool is_bl64_patchable_pcrelative_at(address instruction_addr) {
228    return is_bxx64_patchable_pcrelative_at(instruction_addr, /*link=*/true);
229  }
230  inline static void set_dest_of_bl64_patchable_at(address instruction_addr, address target) {
231    set_dest_of_bxx64_patchable_at(instruction_addr, target, /*link=*/true);
232  }
233  inline static address get_dest_of_bl64_patchable_at(address instruction_addr) {
234    return get_dest_of_bxx64_patchable_at(instruction_addr, /*link=*/true);
235  }
236  // jump
237  enum {
238    b64_patchable_instruction_count = bxx64_patchable_instruction_count,
239    b64_patchable_size              = bxx64_patchable_size,
240  };
241  inline void b64_patchable(address target, relocInfo::relocType rt) {
242    bxx64_patchable(target, rt, /*link=*/false);
243  }
244  inline static bool is_b64_patchable_at(address instruction_addr) {
245    return is_bxx64_patchable_at(instruction_addr, /*link=*/false);
246  }
247  inline static bool is_b64_patchable_pcrelative_at(address instruction_addr) {
248    return is_bxx64_patchable_pcrelative_at(instruction_addr, /*link=*/false);
249  }
250  inline static void set_dest_of_b64_patchable_at(address instruction_addr, address target) {
251    set_dest_of_bxx64_patchable_at(instruction_addr, target, /*link=*/false);
252  }
253  inline static address get_dest_of_b64_patchable_at(address instruction_addr) {
254    return get_dest_of_bxx64_patchable_at(instruction_addr, /*link=*/false);
255  }
256
257  //
258  // Support for frame handling
259  //
260
261  // some ABI-related functions
262  void save_nonvolatile_gprs(   Register dst_base, int offset);
263  void restore_nonvolatile_gprs(Register src_base, int offset);
264  void save_volatile_gprs(   Register dst_base, int offset);
265  void restore_volatile_gprs(Register src_base, int offset);
266  void save_LR_CR(   Register tmp);     // tmp contains LR on return.
267  void restore_LR_CR(Register tmp);
268
269  // Get current PC using bl-next-instruction trick.
270  address get_PC_trash_LR(Register result);
271
272  // Resize current frame either relatively wrt to current SP or absolute.
273  void resize_frame(Register offset, Register tmp);
274  void resize_frame(int      offset, Register tmp);
275  void resize_frame_absolute(Register addr, Register tmp1, Register tmp2);
276
277  // Push a frame of size bytes.
278  void push_frame(Register bytes, Register tmp);
279
280  // Push a frame of size `bytes'. No abi space provided.
281  void push_frame(unsigned int bytes, Register tmp);
282
283  // Push a frame of size `bytes' plus abi_reg_args on top.
284  void push_frame_reg_args(unsigned int bytes, Register tmp);
285
286  // Setup up a new C frame with a spill area for non-volatile GPRs and additional
287  // space for local variables
288  void push_frame_reg_args_nonvolatiles(unsigned int bytes, Register tmp);
289
290  // pop current C frame
291  void pop_frame();
292
293  //
294  // Calls
295  //
296
297 private:
298  address _last_calls_return_pc;
299
300#if defined(ABI_ELFv2)
301  // Generic version of a call to C function.
302  // Updates and returns _last_calls_return_pc.
303  address branch_to(Register function_entry, bool and_link);
304#else
305  // Generic version of a call to C function via a function descriptor
306  // with variable support for C calling conventions (TOC, ENV, etc.).
307  // updates and returns _last_calls_return_pc.
308  address branch_to(Register function_descriptor, bool and_link, bool save_toc_before_call,
309                    bool restore_toc_after_call, bool load_toc_of_callee, bool load_env_of_callee);
310#endif
311
312 public:
313
314  // Get the pc where the last call will return to. returns _last_calls_return_pc.
315  inline address last_calls_return_pc();
316
317#if defined(ABI_ELFv2)
318  // Call a C function via a function descriptor and use full C
319  // calling conventions. Updates and returns _last_calls_return_pc.
320  address call_c(Register function_entry);
321  // For tail calls: only branch, don't link, so callee returns to caller of this function.
322  address call_c_and_return_to_caller(Register function_entry);
323  address call_c(address function_entry, relocInfo::relocType rt);
324#else
325  // Call a C function via a function descriptor and use full C
326  // calling conventions. Updates and returns _last_calls_return_pc.
327  address call_c(Register function_descriptor);
328  // For tail calls: only branch, don't link, so callee returns to caller of this function.
329  address call_c_and_return_to_caller(Register function_descriptor);
330  address call_c(const FunctionDescriptor* function_descriptor, relocInfo::relocType rt);
331  address call_c_using_toc(const FunctionDescriptor* function_descriptor, relocInfo::relocType rt,
332                           Register toc);
333#endif
334
335 protected:
336
337  // It is imperative that all calls into the VM are handled via the
338  // call_VM macros. They make sure that the stack linkage is setup
339  // correctly. call_VM's correspond to ENTRY/ENTRY_X entry points
340  // while call_VM_leaf's correspond to LEAF entry points.
341  //
342  // This is the base routine called by the different versions of
343  // call_VM. The interpreter may customize this version by overriding
344  // it for its purposes (e.g., to save/restore additional registers
345  // when doing a VM call).
346  //
347  // If no last_java_sp is specified (noreg) then SP will be used instead.
348  virtual void call_VM_base(
349     // where an oop-result ends up if any; use noreg otherwise
350    Register        oop_result,
351    // to set up last_Java_frame in stubs; use noreg otherwise
352    Register        last_java_sp,
353    // the entry point
354    address         entry_point,
355    // flag which indicates if exception should be checked
356    bool            check_exception = true
357  );
358
359  // Support for VM calls. This is the base routine called by the
360  // different versions of call_VM_leaf. The interpreter may customize
361  // this version by overriding it for its purposes (e.g., to
362  // save/restore additional registers when doing a VM call).
363  void call_VM_leaf_base(address entry_point);
364
365 public:
366  // Call into the VM.
367  // Passes the thread pointer (in R3_ARG1) as a prepended argument.
368  // Makes sure oop return values are visible to the GC.
369  void call_VM(Register oop_result, address entry_point, bool check_exceptions = true);
370  void call_VM(Register oop_result, address entry_point, Register arg_1, bool check_exceptions = true);
371  void call_VM(Register oop_result, address entry_point, Register arg_1, Register arg_2, bool check_exceptions = true);
372  void call_VM(Register oop_result, address entry_point, Register arg_1, Register arg_2, Register arg3, bool check_exceptions = true);
373  void call_VM_leaf(address entry_point);
374  void call_VM_leaf(address entry_point, Register arg_1);
375  void call_VM_leaf(address entry_point, Register arg_1, Register arg_2);
376  void call_VM_leaf(address entry_point, Register arg_1, Register arg_2, Register arg_3);
377
378  // Call a stub function via a function descriptor, but don't save
379  // TOC before call, don't setup TOC and ENV for call, and don't
380  // restore TOC after call. Updates and returns _last_calls_return_pc.
381  inline address call_stub(Register function_entry);
382  inline void call_stub_and_return_to(Register function_entry, Register return_pc);
383
384  //
385  // Java utilities
386  //
387
388  // Read from the polling page, its address is already in a register.
389  inline void load_from_polling_page(Register polling_page_address, int offset = 0);
390  // Check whether instruction is a read access to the polling page
391  // which was emitted by load_from_polling_page(..).
392  static bool is_load_from_polling_page(int instruction, void* ucontext/*may be NULL*/,
393                                        address* polling_address_ptr = NULL);
394
395  // Check whether instruction is a write access to the memory
396  // serialization page realized by one of the instructions stw, stwu,
397  // stwx, or stwux.
398  static bool is_memory_serialization(int instruction, JavaThread* thread, void* ucontext);
399
400  // Support for NULL-checks
401  //
402  // Generates code that causes a NULL OS exception if the content of reg is NULL.
403  // If the accessed location is M[reg + offset] and the offset is known, provide the
404  // offset. No explicit code generation is needed if the offset is within a certain
405  // range (0 <= offset <= page_size).
406
407  // Stack overflow checking
408  void bang_stack_with_offset(int offset);
409
410  // If instruction is a stack bang of the form ld, stdu, or
411  // stdux, return the banged address. Otherwise, return 0.
412  static address get_stack_bang_address(int instruction, void* ucontext);
413
414  // Atomics
415  // CmpxchgX sets condition register to cmpX(current, compare).
416  // (flag == ne) => (dest_current_value != compare_value), (!swapped)
417  // (flag == eq) => (dest_current_value == compare_value), ( swapped)
418  static inline bool cmpxchgx_hint_acquire_lock()  { return true; }
419  // The stxcx will probably not be succeeded by a releasing store.
420  static inline bool cmpxchgx_hint_release_lock()  { return false; }
421  static inline bool cmpxchgx_hint_atomic_update() { return false; }
422
423  // Cmpxchg semantics
424  enum {
425    MemBarNone = 0,
426    MemBarRel  = 1,
427    MemBarAcq  = 2,
428    MemBarFenceAfter = 4 // use powers of 2
429  };
430  void cmpxchgw(ConditionRegister flag,
431                Register dest_current_value, Register compare_value, Register exchange_value, Register addr_base,
432                int semantics, bool cmpxchgx_hint = false,
433                Register int_flag_success = noreg, bool contention_hint = false);
434  void cmpxchgd(ConditionRegister flag,
435                Register dest_current_value, Register compare_value, Register exchange_value, Register addr_base,
436                int semantics, bool cmpxchgx_hint = false,
437                Register int_flag_success = noreg, Label* failed = NULL, bool contention_hint = false);
438
439  // interface method calling
440  void lookup_interface_method(Register recv_klass,
441                               Register intf_klass,
442                               RegisterOrConstant itable_index,
443                               Register method_result,
444                               Register temp_reg, Register temp2_reg,
445                               Label& no_such_interface);
446
447  // virtual method calling
448  void lookup_virtual_method(Register recv_klass,
449                             RegisterOrConstant vtable_index,
450                             Register method_result);
451
452  // Test sub_klass against super_klass, with fast and slow paths.
453
454  // The fast path produces a tri-state answer: yes / no / maybe-slow.
455  // One of the three labels can be NULL, meaning take the fall-through.
456  // If super_check_offset is -1, the value is loaded up from super_klass.
457  // No registers are killed, except temp_reg and temp2_reg.
458  // If super_check_offset is not -1, temp2_reg is not used and can be noreg.
459  void check_klass_subtype_fast_path(Register sub_klass,
460                                     Register super_klass,
461                                     Register temp1_reg,
462                                     Register temp2_reg,
463                                     Label& L_success,
464                                     Label& L_failure);
465
466  // The rest of the type check; must be wired to a corresponding fast path.
467  // It does not repeat the fast path logic, so don't use it standalone.
468  // The temp_reg can be noreg, if no temps are available.
469  // It can also be sub_klass or super_klass, meaning it's OK to kill that one.
470  // Updates the sub's secondary super cache as necessary.
471  void check_klass_subtype_slow_path(Register sub_klass,
472                                     Register super_klass,
473                                     Register temp1_reg,
474                                     Register temp2_reg,
475                                     Label* L_success = NULL,
476                                     Register result_reg = noreg);
477
478  // Simplified, combined version, good for typical uses.
479  // Falls through on failure.
480  void check_klass_subtype(Register sub_klass,
481                           Register super_klass,
482                           Register temp1_reg,
483                           Register temp2_reg,
484                           Label& L_success);
485
486  // Method handle support (JSR 292).
487  void check_method_handle_type(Register mtype_reg, Register mh_reg, Register temp_reg, Label& wrong_method_type);
488
489  RegisterOrConstant argument_offset(RegisterOrConstant arg_slot, Register temp_reg, int extra_slot_offset = 0);
490
491  // Biased locking support
492  // Upon entry,obj_reg must contain the target object, and mark_reg
493  // must contain the target object's header.
494  // Destroys mark_reg if an attempt is made to bias an anonymously
495  // biased lock. In this case a failure will go either to the slow
496  // case or fall through with the notEqual condition code set with
497  // the expectation that the slow case in the runtime will be called.
498  // In the fall-through case where the CAS-based lock is done,
499  // mark_reg is not destroyed.
500  void biased_locking_enter(ConditionRegister cr_reg, Register obj_reg, Register mark_reg, Register temp_reg,
501                            Register temp2_reg, Label& done, Label* slow_case = NULL);
502  // Upon entry, the base register of mark_addr must contain the oop.
503  // Destroys temp_reg.
504  // If allow_delay_slot_filling is set to true, the next instruction
505  // emitted after this one will go in an annulled delay slot if the
506  // biased locking exit case failed.
507  void biased_locking_exit(ConditionRegister cr_reg, Register mark_addr, Register temp_reg, Label& done);
508
509  void compiler_fast_lock_object(  ConditionRegister flag, Register oop, Register box, Register tmp1, Register tmp2, Register tmp3);
510  void compiler_fast_unlock_object(ConditionRegister flag, Register oop, Register box, Register tmp1, Register tmp2, Register tmp3);
511
512  // Support for serializing memory accesses between threads
513  void serialize_memory(Register thread, Register tmp1, Register tmp2);
514
515  // GC barrier support.
516  void card_write_barrier_post(Register Rstore_addr, Register Rnew_val, Register Rtmp);
517  void card_table_write(jbyte* byte_map_base, Register Rtmp, Register Robj);
518
519#if INCLUDE_ALL_GCS
520  // General G1 pre-barrier generator.
521  void g1_write_barrier_pre(Register Robj, RegisterOrConstant offset, Register Rpre_val,
522                            Register Rtmp1, Register Rtmp2, bool needs_frame = false);
523  // General G1 post-barrier generator
524  void g1_write_barrier_post(Register Rstore_addr, Register Rnew_val, Register Rtmp1,
525                             Register Rtmp2, Register Rtmp3, Label *filtered_ext = NULL);
526#endif
527
528  // Support for managing the JavaThread pointer (i.e.; the reference to
529  // thread-local information).
530
531  // Support for last Java frame (but use call_VM instead where possible):
532  // access R16_thread->last_Java_sp.
533  void set_last_Java_frame(Register last_java_sp, Register last_Java_pc);
534  void reset_last_Java_frame(void);
535  void set_top_ijava_frame_at_SP_as_last_Java_frame(Register sp, Register tmp1);
536
537  // Read vm result from thread: oop_result = R16_thread->result;
538  void get_vm_result  (Register oop_result);
539  void get_vm_result_2(Register metadata_result);
540
541  static bool needs_explicit_null_check(intptr_t offset);
542
543  // Trap-instruction-based checks.
544  // Range checks can be distinguished from zero checks as they check 32 bit,
545  // zero checks all 64 bits (tw, td).
546  inline void trap_null_check(Register a, trap_to_bits cmp = traptoEqual);
547  static bool is_trap_null_check(int x) {
548    return is_tdi(x, traptoEqual,               -1/*any reg*/, 0) ||
549           is_tdi(x, traptoGreaterThanUnsigned, -1/*any reg*/, 0);
550  }
551
552  inline void trap_zombie_not_entrant();
553  static bool is_trap_zombie_not_entrant(int x) { return is_tdi(x, traptoUnconditional, 0/*reg 0*/, 1); }
554
555  inline void trap_should_not_reach_here();
556  static bool is_trap_should_not_reach_here(int x) { return is_tdi(x, traptoUnconditional, 0/*reg 0*/, 2); }
557
558  inline void trap_ic_miss_check(Register a, Register b);
559  static bool is_trap_ic_miss_check(int x) {
560    return is_td(x, traptoGreaterThanUnsigned | traptoLessThanUnsigned, -1/*any reg*/, -1/*any reg*/);
561  }
562
563  // Implicit or explicit null check, jumps to static address exception_entry.
564  inline void null_check_throw(Register a, int offset, Register temp_reg, address exception_entry);
565
566  // Check accessed object for null. Use SIGTRAP-based null checks on AIX.
567  inline void load_with_trap_null_check(Register d, int si16, Register s1);
568
569  // Load heap oop and decompress. Loaded oop may not be null.
570  inline void load_heap_oop_not_null(Register d, RegisterOrConstant offs, Register s1 = noreg);
571  inline void store_heap_oop_not_null(Register d, RegisterOrConstant offs, Register s1,
572                                      /*specify if d must stay uncompressed*/ Register tmp = noreg);
573
574  // Null allowed.
575  inline void load_heap_oop(Register d, RegisterOrConstant offs, Register s1 = noreg);
576
577  // Encode/decode heap oop. Oop may not be null, else en/decoding goes wrong.
578  inline Register encode_heap_oop_not_null(Register d, Register src = noreg);
579  inline void decode_heap_oop_not_null(Register d);
580
581  // Null allowed.
582  inline void decode_heap_oop(Register d);
583
584  // Load/Store klass oop from klass field. Compress.
585  void load_klass(Register dst, Register src);
586  void load_klass_with_trap_null_check(Register dst, Register src);
587  void store_klass(Register dst_oop, Register klass, Register tmp = R0);
588  void store_klass_gap(Register dst_oop, Register val = noreg); // Will store 0 if val not specified.
589  static int instr_size_for_decode_klass_not_null();
590  void decode_klass_not_null(Register dst, Register src = noreg);
591  void encode_klass_not_null(Register dst, Register src = noreg);
592
593  // Load common heap base into register.
594  void reinit_heapbase(Register d, Register tmp = noreg);
595
596  // SIGTRAP-based range checks for arrays.
597  inline void trap_range_check_l(Register a, Register b);
598  inline void trap_range_check_l(Register a, int si16);
599  static bool is_trap_range_check_l(int x) {
600    return (is_tw (x, traptoLessThanUnsigned, -1/*any reg*/, -1/*any reg*/) ||
601            is_twi(x, traptoLessThanUnsigned, -1/*any reg*/)                  );
602  }
603  inline void trap_range_check_le(Register a, int si16);
604  static bool is_trap_range_check_le(int x) {
605    return is_twi(x, traptoEqual | traptoLessThanUnsigned, -1/*any reg*/);
606  }
607  inline void trap_range_check_g(Register a, int si16);
608  static bool is_trap_range_check_g(int x) {
609    return is_twi(x, traptoGreaterThanUnsigned, -1/*any reg*/);
610  }
611  inline void trap_range_check_ge(Register a, Register b);
612  inline void trap_range_check_ge(Register a, int si16);
613  static bool is_trap_range_check_ge(int x) {
614    return (is_tw (x, traptoEqual | traptoGreaterThanUnsigned, -1/*any reg*/, -1/*any reg*/) ||
615            is_twi(x, traptoEqual | traptoGreaterThanUnsigned, -1/*any reg*/)                  );
616  }
617  static bool is_trap_range_check(int x) {
618    return is_trap_range_check_l(x) || is_trap_range_check_le(x) ||
619           is_trap_range_check_g(x) || is_trap_range_check_ge(x);
620  }
621
622  void clear_memory_doubleword(Register base_ptr, Register cnt_dwords, Register tmp = R0);
623
624  // Needle of length 1.
625  void string_indexof_1(Register result, Register haystack, Register haycnt,
626                        Register needle, jchar needleChar,
627                        Register tmp1, Register tmp2);
628  // General indexof, eventually with constant needle length.
629  void string_indexof(Register result, Register haystack, Register haycnt,
630                      Register needle, ciTypeArray* needle_values, Register needlecnt, int needlecntval,
631                      Register tmp1, Register tmp2, Register tmp3, Register tmp4);
632  void string_compare(Register str1_reg, Register str2_reg, Register cnt1_reg, Register cnt2_reg,
633                      Register result_reg, Register tmp_reg);
634  void char_arrays_equals(Register str1_reg, Register str2_reg, Register cnt_reg, Register result_reg,
635                          Register tmp1_reg, Register tmp2_reg, Register tmp3_reg, Register tmp4_reg,
636                          Register tmp5_reg);
637  void char_arrays_equalsImm(Register str1_reg, Register str2_reg, int cntval, Register result_reg,
638                             Register tmp1_reg, Register tmp2_reg);
639
640  //
641  // Debugging
642  //
643
644  // assert on cr0
645  void asm_assert(bool check_equal, const char* msg, int id);
646  void asm_assert_eq(const char* msg, int id) { asm_assert(true, msg, id); }
647  void asm_assert_ne(const char* msg, int id) { asm_assert(false, msg, id); }
648
649 private:
650  void asm_assert_mems_zero(bool check_equal, int size, int mem_offset, Register mem_base,
651                            const char* msg, int id);
652
653 public:
654
655  void asm_assert_mem8_is_zero(int mem_offset, Register mem_base, const char* msg, int id) {
656    asm_assert_mems_zero(true,  8, mem_offset, mem_base, msg, id);
657  }
658  void asm_assert_mem8_isnot_zero(int mem_offset, Register mem_base, const char* msg, int id) {
659    asm_assert_mems_zero(false, 8, mem_offset, mem_base, msg, id);
660  }
661
662  // Verify R16_thread contents.
663  void verify_thread();
664
665  // Emit code to verify that reg contains a valid oop if +VerifyOops is set.
666  void verify_oop(Register reg, const char* s = "broken oop");
667
668  // TODO: verify method and klass metadata (compare against vptr?)
669  void _verify_method_ptr(Register reg, const char * msg, const char * file, int line) {}
670  void _verify_klass_ptr(Register reg, const char * msg, const char * file, int line) {}
671
672  // Convenience method returning function entry. For the ELFv1 case
673  // creates function descriptor at the current address and returs
674  // the pointer to it. For the ELFv2 case returns the current address.
675  inline address function_entry();
676
677#define verify_method_ptr(reg) _verify_method_ptr(reg, "broken method " #reg, __FILE__, __LINE__)
678#define verify_klass_ptr(reg) _verify_klass_ptr(reg, "broken klass " #reg, __FILE__, __LINE__)
679
680 private:
681
682  enum {
683    stop_stop                = 0,
684    stop_untested            = 1,
685    stop_unimplemented       = 2,
686    stop_shouldnotreachhere  = 3,
687    stop_end                 = 4
688  };
689  void stop(int type, const char* msg, int id);
690
691 public:
692  // Prints msg, dumps registers and stops execution.
693  void stop         (const char* msg = "", int id = 0) { stop(stop_stop,               msg, id); }
694  void untested     (const char* msg = "", int id = 0) { stop(stop_untested,           msg, id); }
695  void unimplemented(const char* msg = "", int id = 0) { stop(stop_unimplemented,      msg, id); }
696  void should_not_reach_here()                         { stop(stop_shouldnotreachhere,  "", -1); }
697
698  void zap_from_to(Register low, int before, Register high, int after, Register val, Register addr) PRODUCT_RETURN;
699};
700
701// class SkipIfEqualZero:
702//
703// Instantiating this class will result in assembly code being output that will
704// jump around any code emitted between the creation of the instance and it's
705// automatic destruction at the end of a scope block, depending on the value of
706// the flag passed to the constructor, which will be checked at run-time.
707class SkipIfEqualZero : public StackObj {
708 private:
709  MacroAssembler* _masm;
710  Label _label;
711
712 public:
713   // 'Temp' is a temp register that this object can use (and trash).
714   explicit SkipIfEqualZero(MacroAssembler*, Register temp, const bool* flag_addr);
715   ~SkipIfEqualZero();
716};
717
718#endif // CPU_PPC_VM_MACROASSEMBLER_PPC_HPP
719