nativeInst_ppc.hpp revision 9751:4a24de859a87
1/*
2 * Copyright (c) 2002, 2015, Oracle and/or its affiliates. All rights reserved.
3 * Copyright 2012, 2015 SAP AG. All rights reserved.
4 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
5 *
6 * This code is free software; you can redistribute it and/or modify it
7 * under the terms of the GNU General Public License version 2 only, as
8 * published by the Free Software Foundation.
9 *
10 * This code is distributed in the hope that it will be useful, but WITHOUT
11 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
12 * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
13 * version 2 for more details (a copy is included in the LICENSE file that
14 * accompanied this code).
15 *
16 * You should have received a copy of the GNU General Public License version
17 * 2 along with this work; if not, write to the Free Software Foundation,
18 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
19 *
20 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
21 * or visit www.oracle.com if you need additional information or have any
22 * questions.
23 *
24 */
25
26#ifndef CPU_PPC_VM_NATIVEINST_PPC_HPP
27#define CPU_PPC_VM_NATIVEINST_PPC_HPP
28
29#include "asm/assembler.hpp"
30#include "asm/macroAssembler.hpp"
31#include "memory/allocation.hpp"
32#include "runtime/icache.hpp"
33#include "runtime/os.hpp"
34#include "utilities/top.hpp"
35
36// We have interfaces for the following instructions:
37//
38// - NativeInstruction
39//   - NativeCall
40//   - NativeFarCall
41//   - NativeMovConstReg
42//   - NativeJump
43//   - NativeIllegalInstruction
44//   - NativeConditionalFarBranch
45//   - NativeCallTrampolineStub
46
47// The base class for different kinds of native instruction abstractions.
48// It provides the primitive operations to manipulate code relative to this.
49class NativeInstruction VALUE_OBJ_CLASS_SPEC {
50  friend class Relocation;
51
52 public:
53  bool is_jump() { return Assembler::is_b(long_at(0)); } // See NativeGeneralJump.
54
55  bool is_sigtrap_ic_miss_check() {
56    assert(UseSIGTRAP, "precondition");
57    return MacroAssembler::is_trap_ic_miss_check(long_at(0));
58  }
59
60  bool is_sigtrap_null_check() {
61    assert(UseSIGTRAP && TrapBasedNullChecks, "precondition");
62    return MacroAssembler::is_trap_null_check(long_at(0));
63  }
64
65  // We use a special trap for marking a method as not_entrant or zombie
66  // iff UseSIGTRAP.
67  bool is_sigtrap_zombie_not_entrant() {
68    assert(UseSIGTRAP, "precondition");
69    return MacroAssembler::is_trap_zombie_not_entrant(long_at(0));
70  }
71
72  // We use an illtrap for marking a method as not_entrant or zombie
73  // iff !UseSIGTRAP.
74  bool is_sigill_zombie_not_entrant() {
75    assert(!UseSIGTRAP, "precondition");
76    // Work around a C++ compiler bug which changes 'this'.
77    return NativeInstruction::is_sigill_zombie_not_entrant_at(addr_at(0));
78  }
79  static bool is_sigill_zombie_not_entrant_at(address addr);
80
81#ifdef COMPILER2
82  // SIGTRAP-based implicit range checks
83  bool is_sigtrap_range_check() {
84    assert(UseSIGTRAP && TrapBasedRangeChecks, "precondition");
85    return MacroAssembler::is_trap_range_check(long_at(0));
86  }
87#endif
88
89  // 'should not reach here'.
90  bool is_sigtrap_should_not_reach_here() {
91    return MacroAssembler::is_trap_should_not_reach_here(long_at(0));
92  }
93
94  bool is_safepoint_poll() {
95    // Is the current instruction a POTENTIAL read access to the polling page?
96    // The current arguments of the instruction are not checked!
97    return MacroAssembler::is_load_from_polling_page(long_at(0), NULL);
98  }
99
100  bool is_memory_serialization(JavaThread *thread, void *ucontext) {
101    // Is the current instruction a write access of thread to the
102    // memory serialization page?
103    return MacroAssembler::is_memory_serialization(long_at(0), thread, ucontext);
104  }
105
106  address get_stack_bang_address(void *ucontext) {
107    // If long_at(0) is not a stack bang, return 0. Otherwise, return
108    // banged address.
109    return MacroAssembler::get_stack_bang_address(long_at(0), ucontext);
110  }
111
112 protected:
113  address  addr_at(int offset) const    { return address(this) + offset; }
114  int      long_at(int offset) const    { return *(int*)addr_at(offset); }
115
116 public:
117  void verify() NOT_DEBUG_RETURN;
118};
119
120inline NativeInstruction* nativeInstruction_at(address address) {
121  NativeInstruction* inst = (NativeInstruction*)address;
122  inst->verify();
123  return inst;
124}
125
126// The NativeCall is an abstraction for accessing/manipulating call
127// instructions. It is used to manipulate inline caches, primitive &
128// dll calls, etc.
129//
130// Sparc distinguishes `NativeCall' and `NativeFarCall'. On PPC64,
131// at present, we provide a single class `NativeCall' representing the
132// sequence `load_const, mtctr, bctrl' or the sequence 'ld_from_toc,
133// mtctr, bctrl'.
134class NativeCall: public NativeInstruction {
135 public:
136
137  enum ppc_specific_constants {
138    load_const_instruction_size                 = 28,
139    load_const_from_method_toc_instruction_size = 16,
140    instruction_size                            = 16 // Used in shared code for calls with reloc_info.
141  };
142
143  static bool is_call_at(address a) {
144    return Assembler::is_bl(*(int*)(a));
145  }
146
147  static bool is_call_before(address return_address) {
148    return NativeCall::is_call_at(return_address - 4);
149  }
150
151  address instruction_address() const {
152    return addr_at(0);
153  }
154
155  address next_instruction_address() const {
156    // We have only bl.
157    assert(MacroAssembler::is_bl(*(int*)instruction_address()), "Should be bl instruction!");
158    return addr_at(4);
159  }
160
161  address return_address() const {
162    return next_instruction_address();
163  }
164
165  address destination() const;
166
167  // The parameter assert_lock disables the assertion during code generation.
168  void set_destination_mt_safe(address dest, bool assert_lock = true);
169
170  address get_trampoline();
171
172  void verify_alignment() {} // do nothing on ppc
173  void verify() NOT_DEBUG_RETURN;
174};
175
176inline NativeCall* nativeCall_at(address instr) {
177  NativeCall* call = (NativeCall*)instr;
178  call->verify();
179  return call;
180}
181
182inline NativeCall* nativeCall_before(address return_address) {
183  NativeCall* call = NULL;
184  if (MacroAssembler::is_bl(*(int*)(return_address - 4)))
185    call = (NativeCall*)(return_address - 4);
186  call->verify();
187  return call;
188}
189
190// The NativeFarCall is an abstraction for accessing/manipulating native
191// call-anywhere instructions.
192// Used to call native methods which may be loaded anywhere in the address
193// space, possibly out of reach of a call instruction.
194class NativeFarCall: public NativeInstruction {
195 public:
196  // We use MacroAssembler::bl64_patchable() for implementing a
197  // call-anywhere instruction.
198
199  // Checks whether instr points at a NativeFarCall instruction.
200  static bool is_far_call_at(address instr) {
201    return MacroAssembler::is_bl64_patchable_at(instr);
202  }
203
204  // Does the NativeFarCall implementation use a pc-relative encoding
205  // of the call destination?
206  // Used when relocating code.
207  bool is_pcrelative() {
208    assert(MacroAssembler::is_bl64_patchable_at((address)this),
209           "unexpected call type");
210    return MacroAssembler::is_bl64_patchable_pcrelative_at((address)this);
211  }
212
213  // Returns the NativeFarCall's destination.
214  address destination() const {
215    assert(MacroAssembler::is_bl64_patchable_at((address)this),
216           "unexpected call type");
217    return MacroAssembler::get_dest_of_bl64_patchable_at((address)this);
218  }
219
220  // Sets the NativeCall's destination, not necessarily mt-safe.
221  // Used when relocating code.
222  void set_destination(address dest) {
223    // Set new destination (implementation of call may change here).
224    assert(MacroAssembler::is_bl64_patchable_at((address)this),
225           "unexpected call type");
226    MacroAssembler::set_dest_of_bl64_patchable_at((address)this, dest);
227  }
228
229  void verify() NOT_DEBUG_RETURN;
230};
231
232// Instantiates a NativeFarCall object starting at the given instruction
233// address and returns the NativeFarCall object.
234inline NativeFarCall* nativeFarCall_at(address instr) {
235  NativeFarCall* call = (NativeFarCall*)instr;
236  call->verify();
237  return call;
238}
239
240// An interface for accessing/manipulating native set_oop imm, reg instructions
241// (used to manipulate inlined data references, etc.).
242class NativeMovConstReg: public NativeInstruction {
243 public:
244
245  enum ppc_specific_constants {
246    load_const_instruction_size                 = 20,
247    load_const_from_method_toc_instruction_size =  8,
248    instruction_size                            =  8 // Used in shared code for calls with reloc_info.
249  };
250
251  address instruction_address() const {
252    return addr_at(0);
253  }
254
255  address next_instruction_address() const;
256
257  // (The [set_]data accessor respects oop_type relocs also.)
258  intptr_t data() const;
259
260  // Patch the code stream.
261  address set_data_plain(intptr_t x, CodeBlob *code);
262  // Patch the code stream and oop pool.
263  void set_data(intptr_t x);
264
265  // Patch narrow oop constants. Use this also for narrow klass.
266  void set_narrow_oop(narrowOop data, CodeBlob *code = NULL);
267
268  void verify() NOT_DEBUG_RETURN;
269};
270
271inline NativeMovConstReg* nativeMovConstReg_at(address address) {
272  NativeMovConstReg* test = (NativeMovConstReg*)address;
273  test->verify();
274  return test;
275}
276
277// The NativeJump is an abstraction for accessing/manipulating native
278// jump-anywhere instructions.
279class NativeJump: public NativeInstruction {
280 public:
281  // We use MacroAssembler::b64_patchable() for implementing a
282  // jump-anywhere instruction.
283
284  enum ppc_specific_constants {
285    instruction_size = MacroAssembler::b64_patchable_size
286  };
287
288  // Checks whether instr points at a NativeJump instruction.
289  static bool is_jump_at(address instr) {
290    return MacroAssembler::is_b64_patchable_at(instr)
291      || (   MacroAssembler::is_load_const_from_method_toc_at(instr)
292          && Assembler::is_mtctr(*(int*)(instr + 2 * 4))
293          && Assembler::is_bctr(*(int*)(instr + 3 * 4)));
294  }
295
296  // Does the NativeJump implementation use a pc-relative encoding
297  // of the call destination?
298  // Used when relocating code or patching jumps.
299  bool is_pcrelative() {
300    return MacroAssembler::is_b64_patchable_pcrelative_at((address)this);
301  }
302
303  // Returns the NativeJump's destination.
304  address jump_destination() const {
305    if (MacroAssembler::is_b64_patchable_at((address)this)) {
306      return MacroAssembler::get_dest_of_b64_patchable_at((address)this);
307    } else if (MacroAssembler::is_load_const_from_method_toc_at((address)this)
308               && Assembler::is_mtctr(*(int*)((address)this + 2 * 4))
309               && Assembler::is_bctr(*(int*)((address)this + 3 * 4))) {
310      return (address)((NativeMovConstReg *)this)->data();
311    } else {
312      ShouldNotReachHere();
313      return NULL;
314    }
315  }
316
317  // Sets the NativeJump's destination, not necessarily mt-safe.
318  // Used when relocating code or patching jumps.
319  void set_jump_destination(address dest) {
320    // Set new destination (implementation of call may change here).
321    if (MacroAssembler::is_b64_patchable_at((address)this)) {
322      MacroAssembler::set_dest_of_b64_patchable_at((address)this, dest);
323    } else if (MacroAssembler::is_load_const_from_method_toc_at((address)this)
324               && Assembler::is_mtctr(*(int*)((address)this + 2 * 4))
325               && Assembler::is_bctr(*(int*)((address)this + 3 * 4))) {
326      ((NativeMovConstReg *)this)->set_data((intptr_t)dest);
327    } else {
328      ShouldNotReachHere();
329    }
330  }
331
332  // MT-safe insertion of native jump at verified method entry
333  static void patch_verified_entry(address entry, address verified_entry, address dest);
334
335  void verify() NOT_DEBUG_RETURN;
336
337  static void check_verified_entry_alignment(address entry, address verified_entry) {
338    // We just patch one instruction on ppc64, so the jump doesn't have to
339    // be aligned. Nothing to do here.
340  }
341};
342
343// Instantiates a NativeJump object starting at the given instruction
344// address and returns the NativeJump object.
345inline NativeJump* nativeJump_at(address instr) {
346  NativeJump* call = (NativeJump*)instr;
347  call->verify();
348  return call;
349}
350
351// NativeConditionalFarBranch is abstraction for accessing/manipulating
352// conditional far branches.
353class NativeConditionalFarBranch : public NativeInstruction {
354 public:
355
356  static bool is_conditional_far_branch_at(address instr) {
357    return MacroAssembler::is_bc_far_at(instr);
358  }
359
360  address branch_destination() const {
361    return MacroAssembler::get_dest_of_bc_far_at((address)this);
362  }
363
364  void set_branch_destination(address dest) {
365    MacroAssembler::set_dest_of_bc_far_at((address)this, dest);
366  }
367};
368
369inline NativeConditionalFarBranch* NativeConditionalFarBranch_at(address address) {
370  assert(NativeConditionalFarBranch::is_conditional_far_branch_at(address),
371         "must be a conditional far branch");
372  return (NativeConditionalFarBranch*)address;
373}
374
375// Call trampoline stubs.
376class NativeCallTrampolineStub : public NativeInstruction {
377 private:
378
379  address encoded_destination_addr() const;
380
381 public:
382
383  address destination(nmethod *nm = NULL) const;
384  int destination_toc_offset() const;
385
386  void set_destination(address new_destination);
387};
388
389// Note: Other stubs must not begin with this pattern.
390inline bool is_NativeCallTrampolineStub_at(address address) {
391  int first_instr = *(int*)address;
392  // calculate_address_from_global_toc and long form of ld_largeoffset_unchecked begin with addis with target R12
393  if (Assembler::is_addis(first_instr) &&
394      (Register)(intptr_t)Assembler::inv_rt_field(first_instr) == R12_scratch2) return true;
395
396  // short form of ld_largeoffset_unchecked is ld which is followed by mtctr
397  int second_instr = *((int*)address + 1);
398  if (Assembler::is_ld(first_instr) &&
399      (Register)(intptr_t)Assembler::inv_rt_field(first_instr) == R12_scratch2 &&
400      Assembler::is_mtctr(second_instr) &&
401      (Register)(intptr_t)Assembler::inv_rs_field(second_instr) == R12_scratch2) return true;
402
403  return false;
404}
405
406inline NativeCallTrampolineStub* NativeCallTrampolineStub_at(address address) {
407  assert(is_NativeCallTrampolineStub_at(address), "no call trampoline found");
408  return (NativeCallTrampolineStub*)address;
409}
410
411///////////////////////////////////////////////////////////////////////////////////////////////////
412
413//-------------------------------------
414//  N a t i v e G e n e r a l J u m p
415//-------------------------------------
416
417// Despite the name, handles only simple branches.
418class NativeGeneralJump;
419inline NativeGeneralJump* nativeGeneralJump_at(address address);
420
421// Currently only implemented as single unconditional branch.
422class NativeGeneralJump: public NativeInstruction {
423 public:
424
425  enum PPC64_specific_constants {
426    instruction_size = 4
427  };
428
429  address instruction_address() const { return addr_at(0); }
430
431  // Creation.
432  friend inline NativeGeneralJump* nativeGeneralJump_at(address addr) {
433    NativeGeneralJump* jump = (NativeGeneralJump*)(addr);
434    DEBUG_ONLY( jump->verify(); )
435    return jump;
436  }
437
438  // Insertion of native general jump instruction.
439  static void insert_unconditional(address code_pos, address entry);
440
441  address jump_destination() const {
442    DEBUG_ONLY( verify(); )
443    return addr_at(0) + Assembler::inv_li_field(long_at(0));
444  }
445
446  void set_jump_destination(address dest) {
447    DEBUG_ONLY( verify(); )
448    insert_unconditional(addr_at(0), dest);
449  }
450
451  static void replace_mt_safe(address instr_addr, address code_buffer);
452
453  void verify() const { guarantee(Assembler::is_b(long_at(0)), "invalid NativeGeneralJump"); }
454};
455
456// An interface for accessing/manipulating native load int (load_const32).
457class NativeMovRegMem;
458inline NativeMovRegMem* nativeMovRegMem_at(address address);
459class NativeMovRegMem: public NativeInstruction {
460 public:
461
462  enum PPC64_specific_constants {
463    instruction_size = 8
464  };
465
466  address instruction_address() const { return addr_at(0); }
467
468  intptr_t offset() const {
469#ifdef VM_LITTLE_ENDIAN
470    short *hi_ptr = (short*)(addr_at(0));
471    short *lo_ptr = (short*)(addr_at(4));
472#else
473    short *hi_ptr = (short*)(addr_at(0) + 2);
474    short *lo_ptr = (short*)(addr_at(4) + 2);
475#endif
476    return ((*hi_ptr) << 16) | ((*lo_ptr) & 0xFFFF);
477  }
478
479  void set_offset(intptr_t x) {
480#ifdef VM_LITTLE_ENDIAN
481    short *hi_ptr = (short*)(addr_at(0));
482    short *lo_ptr = (short*)(addr_at(4));
483#else
484    short *hi_ptr = (short*)(addr_at(0) + 2);
485    short *lo_ptr = (short*)(addr_at(4) + 2);
486#endif
487    *hi_ptr = x >> 16;
488    *lo_ptr = x & 0xFFFF;
489    ICache::ppc64_flush_icache_bytes(addr_at(0), NativeMovRegMem::instruction_size);
490  }
491
492  void add_offset_in_bytes(intptr_t radd_offset) {
493    set_offset(offset() + radd_offset);
494  }
495
496  void verify() const {
497    guarantee(Assembler::is_lis(long_at(0)), "load_const32 1st instr");
498    guarantee(Assembler::is_ori(long_at(4)), "load_const32 2nd instr");
499  }
500
501 private:
502  friend inline NativeMovRegMem* nativeMovRegMem_at(address address) {
503    NativeMovRegMem* test = (NativeMovRegMem*)address;
504    DEBUG_ONLY( test->verify(); )
505    return test;
506  }
507};
508
509#endif // CPU_PPC_VM_NATIVEINST_PPC_HPP
510