1/*
2 * Copyright (c) 2002, 2015, Oracle and/or its affiliates. All rights reserved.
3 * Copyright (c) 2012, 2015 SAP SE. All rights reserved.
4 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
5 *
6 * This code is free software; you can redistribute it and/or modify it
7 * under the terms of the GNU General Public License version 2 only, as
8 * published by the Free Software Foundation.
9 *
10 * This code is distributed in the hope that it will be useful, but WITHOUT
11 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
12 * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
13 * version 2 for more details (a copy is included in the LICENSE file that
14 * accompanied this code).
15 *
16 * You should have received a copy of the GNU General Public License version
17 * 2 along with this work; if not, write to the Free Software Foundation,
18 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
19 *
20 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
21 * or visit www.oracle.com if you need additional information or have any
22 * questions.
23 *
24 */
25
26#ifndef CPU_PPC_VM_NATIVEINST_PPC_HPP
27#define CPU_PPC_VM_NATIVEINST_PPC_HPP
28
29#include "asm/assembler.hpp"
30#include "asm/macroAssembler.hpp"
31#include "memory/allocation.hpp"
32#include "runtime/icache.hpp"
33#include "runtime/os.hpp"
34
35// We have interfaces for the following instructions:
36//
37// - NativeInstruction
38//   - NativeCall
39//   - NativeFarCall
40//   - NativeMovConstReg
41//   - NativeJump
42//   - NativeIllegalInstruction
43//   - NativeConditionalFarBranch
44//   - NativeCallTrampolineStub
45
46// The base class for different kinds of native instruction abstractions.
47// It provides the primitive operations to manipulate code relative to this.
48class NativeInstruction VALUE_OBJ_CLASS_SPEC {
49  friend class Relocation;
50
51 public:
52  bool is_jump() { return Assembler::is_b(long_at(0)); } // See NativeGeneralJump.
53
54  bool is_sigtrap_ic_miss_check() {
55    assert(UseSIGTRAP, "precondition");
56    return MacroAssembler::is_trap_ic_miss_check(long_at(0));
57  }
58
59  bool is_sigtrap_null_check() {
60    assert(UseSIGTRAP && TrapBasedNullChecks, "precondition");
61    return MacroAssembler::is_trap_null_check(long_at(0));
62  }
63
64  // We use a special trap for marking a method as not_entrant or zombie
65  // iff UseSIGTRAP.
66  bool is_sigtrap_zombie_not_entrant() {
67    assert(UseSIGTRAP, "precondition");
68    return MacroAssembler::is_trap_zombie_not_entrant(long_at(0));
69  }
70
71  // We use an illtrap for marking a method as not_entrant or zombie
72  // iff !UseSIGTRAP.
73  bool is_sigill_zombie_not_entrant() {
74    assert(!UseSIGTRAP, "precondition");
75    // Work around a C++ compiler bug which changes 'this'.
76    return NativeInstruction::is_sigill_zombie_not_entrant_at(addr_at(0));
77  }
78  static bool is_sigill_zombie_not_entrant_at(address addr);
79
80#ifdef COMPILER2
81  // SIGTRAP-based implicit range checks
82  bool is_sigtrap_range_check() {
83    assert(UseSIGTRAP && TrapBasedRangeChecks, "precondition");
84    return MacroAssembler::is_trap_range_check(long_at(0));
85  }
86#endif
87
88  // 'should not reach here'.
89  bool is_sigtrap_should_not_reach_here() {
90    return MacroAssembler::is_trap_should_not_reach_here(long_at(0));
91  }
92
93  bool is_safepoint_poll() {
94    // Is the current instruction a POTENTIAL read access to the polling page?
95    // The current arguments of the instruction are not checked!
96    return MacroAssembler::is_load_from_polling_page(long_at(0), NULL);
97  }
98
99  bool is_memory_serialization(JavaThread *thread, void *ucontext) {
100    // Is the current instruction a write access of thread to the
101    // memory serialization page?
102    return MacroAssembler::is_memory_serialization(long_at(0), thread, ucontext);
103  }
104
105  address get_stack_bang_address(void *ucontext) {
106    // If long_at(0) is not a stack bang, return 0. Otherwise, return
107    // banged address.
108    return MacroAssembler::get_stack_bang_address(long_at(0), ucontext);
109  }
110
111 protected:
112  address  addr_at(int offset) const    { return address(this) + offset; }
113  int      long_at(int offset) const    { return *(int*)addr_at(offset); }
114
115 public:
116  void verify() NOT_DEBUG_RETURN;
117};
118
119inline NativeInstruction* nativeInstruction_at(address address) {
120  NativeInstruction* inst = (NativeInstruction*)address;
121  inst->verify();
122  return inst;
123}
124
125// The NativeCall is an abstraction for accessing/manipulating call
126// instructions. It is used to manipulate inline caches, primitive &
127// dll calls, etc.
128//
129// Sparc distinguishes `NativeCall' and `NativeFarCall'. On PPC64,
130// at present, we provide a single class `NativeCall' representing the
131// sequence `load_const, mtctr, bctrl' or the sequence 'ld_from_toc,
132// mtctr, bctrl'.
133class NativeCall: public NativeInstruction {
134 public:
135
136  enum ppc_specific_constants {
137    load_const_instruction_size                 = 28,
138    load_const_from_method_toc_instruction_size = 16,
139    instruction_size                            = 16 // Used in shared code for calls with reloc_info.
140  };
141
142  static bool is_call_at(address a) {
143    return Assembler::is_bl(*(int*)(a));
144  }
145
146  static bool is_call_before(address return_address) {
147    return NativeCall::is_call_at(return_address - 4);
148  }
149
150  address instruction_address() const {
151    return addr_at(0);
152  }
153
154  address next_instruction_address() const {
155    // We have only bl.
156    assert(MacroAssembler::is_bl(*(int*)instruction_address()), "Should be bl instruction!");
157    return addr_at(4);
158  }
159
160  address return_address() const {
161    return next_instruction_address();
162  }
163
164  address destination() const;
165
166  // The parameter assert_lock disables the assertion during code generation.
167  void set_destination_mt_safe(address dest, bool assert_lock = true);
168
169  address get_trampoline();
170
171  void verify_alignment() {} // do nothing on ppc
172  void verify() NOT_DEBUG_RETURN;
173};
174
175inline NativeCall* nativeCall_at(address instr) {
176  NativeCall* call = (NativeCall*)instr;
177  call->verify();
178  return call;
179}
180
181inline NativeCall* nativeCall_before(address return_address) {
182  NativeCall* call = NULL;
183  if (MacroAssembler::is_bl(*(int*)(return_address - 4)))
184    call = (NativeCall*)(return_address - 4);
185  call->verify();
186  return call;
187}
188
189// The NativeFarCall is an abstraction for accessing/manipulating native
190// call-anywhere instructions.
191// Used to call native methods which may be loaded anywhere in the address
192// space, possibly out of reach of a call instruction.
193class NativeFarCall: public NativeInstruction {
194 public:
195  // We use MacroAssembler::bl64_patchable() for implementing a
196  // call-anywhere instruction.
197
198  // Checks whether instr points at a NativeFarCall instruction.
199  static bool is_far_call_at(address instr) {
200    return MacroAssembler::is_bl64_patchable_at(instr);
201  }
202
203  // Does the NativeFarCall implementation use a pc-relative encoding
204  // of the call destination?
205  // Used when relocating code.
206  bool is_pcrelative() {
207    assert(MacroAssembler::is_bl64_patchable_at((address)this),
208           "unexpected call type");
209    return MacroAssembler::is_bl64_patchable_pcrelative_at((address)this);
210  }
211
212  // Returns the NativeFarCall's destination.
213  address destination() const {
214    assert(MacroAssembler::is_bl64_patchable_at((address)this),
215           "unexpected call type");
216    return MacroAssembler::get_dest_of_bl64_patchable_at((address)this);
217  }
218
219  // Sets the NativeCall's destination, not necessarily mt-safe.
220  // Used when relocating code.
221  void set_destination(address dest) {
222    // Set new destination (implementation of call may change here).
223    assert(MacroAssembler::is_bl64_patchable_at((address)this),
224           "unexpected call type");
225    MacroAssembler::set_dest_of_bl64_patchable_at((address)this, dest);
226  }
227
228  void verify() NOT_DEBUG_RETURN;
229};
230
231// Instantiates a NativeFarCall object starting at the given instruction
232// address and returns the NativeFarCall object.
233inline NativeFarCall* nativeFarCall_at(address instr) {
234  NativeFarCall* call = (NativeFarCall*)instr;
235  call->verify();
236  return call;
237}
238
239// An interface for accessing/manipulating native set_oop imm, reg instructions
240// (used to manipulate inlined data references, etc.).
241class NativeMovConstReg: public NativeInstruction {
242 public:
243
244  enum ppc_specific_constants {
245    load_const_instruction_size                 = 20,
246    load_const_from_method_toc_instruction_size =  8,
247    instruction_size                            =  8 // Used in shared code for calls with reloc_info.
248  };
249
250  address instruction_address() const {
251    return addr_at(0);
252  }
253
254  address next_instruction_address() const;
255
256  // (The [set_]data accessor respects oop_type relocs also.)
257  intptr_t data() const;
258
259  // Patch the code stream.
260  address set_data_plain(intptr_t x, CodeBlob *code);
261  // Patch the code stream and oop pool.
262  void set_data(intptr_t x);
263
264  // Patch narrow oop constants. Use this also for narrow klass.
265  void set_narrow_oop(narrowOop data, CodeBlob *code = NULL);
266
267  void verify() NOT_DEBUG_RETURN;
268};
269
270inline NativeMovConstReg* nativeMovConstReg_at(address address) {
271  NativeMovConstReg* test = (NativeMovConstReg*)address;
272  test->verify();
273  return test;
274}
275
276// The NativeJump is an abstraction for accessing/manipulating native
277// jump-anywhere instructions.
278class NativeJump: public NativeInstruction {
279 public:
280  // We use MacroAssembler::b64_patchable() for implementing a
281  // jump-anywhere instruction.
282
283  enum ppc_specific_constants {
284    instruction_size = MacroAssembler::b64_patchable_size
285  };
286
287  // Checks whether instr points at a NativeJump instruction.
288  static bool is_jump_at(address instr) {
289    return MacroAssembler::is_b64_patchable_at(instr)
290      || (   MacroAssembler::is_load_const_from_method_toc_at(instr)
291          && Assembler::is_mtctr(*(int*)(instr + 2 * 4))
292          && Assembler::is_bctr(*(int*)(instr + 3 * 4)));
293  }
294
295  // Does the NativeJump implementation use a pc-relative encoding
296  // of the call destination?
297  // Used when relocating code or patching jumps.
298  bool is_pcrelative() {
299    return MacroAssembler::is_b64_patchable_pcrelative_at((address)this);
300  }
301
302  // Returns the NativeJump's destination.
303  address jump_destination() const {
304    if (MacroAssembler::is_b64_patchable_at((address)this)) {
305      return MacroAssembler::get_dest_of_b64_patchable_at((address)this);
306    } else if (MacroAssembler::is_load_const_from_method_toc_at((address)this)
307               && Assembler::is_mtctr(*(int*)((address)this + 2 * 4))
308               && Assembler::is_bctr(*(int*)((address)this + 3 * 4))) {
309      return (address)((NativeMovConstReg *)this)->data();
310    } else {
311      ShouldNotReachHere();
312      return NULL;
313    }
314  }
315
316  // Sets the NativeJump's destination, not necessarily mt-safe.
317  // Used when relocating code or patching jumps.
318  void set_jump_destination(address dest) {
319    // Set new destination (implementation of call may change here).
320    if (MacroAssembler::is_b64_patchable_at((address)this)) {
321      MacroAssembler::set_dest_of_b64_patchable_at((address)this, dest);
322    } else if (MacroAssembler::is_load_const_from_method_toc_at((address)this)
323               && Assembler::is_mtctr(*(int*)((address)this + 2 * 4))
324               && Assembler::is_bctr(*(int*)((address)this + 3 * 4))) {
325      ((NativeMovConstReg *)this)->set_data((intptr_t)dest);
326    } else {
327      ShouldNotReachHere();
328    }
329  }
330
331  // MT-safe insertion of native jump at verified method entry
332  static void patch_verified_entry(address entry, address verified_entry, address dest);
333
334  void verify() NOT_DEBUG_RETURN;
335
336  static void check_verified_entry_alignment(address entry, address verified_entry) {
337    // We just patch one instruction on ppc64, so the jump doesn't have to
338    // be aligned. Nothing to do here.
339  }
340};
341
342// Instantiates a NativeJump object starting at the given instruction
343// address and returns the NativeJump object.
344inline NativeJump* nativeJump_at(address instr) {
345  NativeJump* call = (NativeJump*)instr;
346  call->verify();
347  return call;
348}
349
350// NativeConditionalFarBranch is abstraction for accessing/manipulating
351// conditional far branches.
352class NativeConditionalFarBranch : public NativeInstruction {
353 public:
354
355  static bool is_conditional_far_branch_at(address instr) {
356    return MacroAssembler::is_bc_far_at(instr);
357  }
358
359  address branch_destination() const {
360    return MacroAssembler::get_dest_of_bc_far_at((address)this);
361  }
362
363  void set_branch_destination(address dest) {
364    MacroAssembler::set_dest_of_bc_far_at((address)this, dest);
365  }
366};
367
368inline NativeConditionalFarBranch* NativeConditionalFarBranch_at(address address) {
369  assert(NativeConditionalFarBranch::is_conditional_far_branch_at(address),
370         "must be a conditional far branch");
371  return (NativeConditionalFarBranch*)address;
372}
373
374// Call trampoline stubs.
375class NativeCallTrampolineStub : public NativeInstruction {
376 private:
377
378  address encoded_destination_addr() const;
379
380 public:
381
382  address destination(nmethod *nm = NULL) const;
383  int destination_toc_offset() const;
384
385  void set_destination(address new_destination);
386};
387
388// Note: Other stubs must not begin with this pattern.
389inline bool is_NativeCallTrampolineStub_at(address address) {
390  int first_instr = *(int*)address;
391  // calculate_address_from_global_toc and long form of ld_largeoffset_unchecked begin with addis with target R12
392  if (Assembler::is_addis(first_instr) &&
393      (Register)(intptr_t)Assembler::inv_rt_field(first_instr) == R12_scratch2) return true;
394
395  // short form of ld_largeoffset_unchecked is ld which is followed by mtctr
396  int second_instr = *((int*)address + 1);
397  if (Assembler::is_ld(first_instr) &&
398      (Register)(intptr_t)Assembler::inv_rt_field(first_instr) == R12_scratch2 &&
399      Assembler::is_mtctr(second_instr) &&
400      (Register)(intptr_t)Assembler::inv_rs_field(second_instr) == R12_scratch2) return true;
401
402  return false;
403}
404
405inline NativeCallTrampolineStub* NativeCallTrampolineStub_at(address address) {
406  assert(is_NativeCallTrampolineStub_at(address), "no call trampoline found");
407  return (NativeCallTrampolineStub*)address;
408}
409
410///////////////////////////////////////////////////////////////////////////////////////////////////
411
412//-------------------------------------
413//  N a t i v e G e n e r a l J u m p
414//-------------------------------------
415
416// Despite the name, handles only simple branches.
417class NativeGeneralJump;
418inline NativeGeneralJump* nativeGeneralJump_at(address address);
419
420// Currently only implemented as single unconditional branch.
421class NativeGeneralJump: public NativeInstruction {
422 public:
423
424  enum PPC64_specific_constants {
425    instruction_size = 4
426  };
427
428  address instruction_address() const { return addr_at(0); }
429
430  // Creation.
431  friend inline NativeGeneralJump* nativeGeneralJump_at(address addr) {
432    NativeGeneralJump* jump = (NativeGeneralJump*)(addr);
433    DEBUG_ONLY( jump->verify(); )
434    return jump;
435  }
436
437  // Insertion of native general jump instruction.
438  static void insert_unconditional(address code_pos, address entry);
439
440  address jump_destination() const {
441    DEBUG_ONLY( verify(); )
442    return addr_at(0) + Assembler::inv_li_field(long_at(0));
443  }
444
445  void set_jump_destination(address dest) {
446    DEBUG_ONLY( verify(); )
447    insert_unconditional(addr_at(0), dest);
448  }
449
450  static void replace_mt_safe(address instr_addr, address code_buffer);
451
452  void verify() const { guarantee(Assembler::is_b(long_at(0)), "invalid NativeGeneralJump"); }
453};
454
455// An interface for accessing/manipulating native load int (load_const32).
456class NativeMovRegMem;
457inline NativeMovRegMem* nativeMovRegMem_at(address address);
458class NativeMovRegMem: public NativeInstruction {
459 public:
460
461  enum PPC64_specific_constants {
462    instruction_size = 8
463  };
464
465  address instruction_address() const { return addr_at(0); }
466
467  intptr_t offset() const {
468#ifdef VM_LITTLE_ENDIAN
469    short *hi_ptr = (short*)(addr_at(0));
470    short *lo_ptr = (short*)(addr_at(4));
471#else
472    short *hi_ptr = (short*)(addr_at(0) + 2);
473    short *lo_ptr = (short*)(addr_at(4) + 2);
474#endif
475    return ((*hi_ptr) << 16) | ((*lo_ptr) & 0xFFFF);
476  }
477
478  void set_offset(intptr_t x) {
479#ifdef VM_LITTLE_ENDIAN
480    short *hi_ptr = (short*)(addr_at(0));
481    short *lo_ptr = (short*)(addr_at(4));
482#else
483    short *hi_ptr = (short*)(addr_at(0) + 2);
484    short *lo_ptr = (short*)(addr_at(4) + 2);
485#endif
486    *hi_ptr = x >> 16;
487    *lo_ptr = x & 0xFFFF;
488    ICache::ppc64_flush_icache_bytes(addr_at(0), NativeMovRegMem::instruction_size);
489  }
490
491  void add_offset_in_bytes(intptr_t radd_offset) {
492    set_offset(offset() + radd_offset);
493  }
494
495  void verify() const {
496    guarantee(Assembler::is_lis(long_at(0)), "load_const32 1st instr");
497    guarantee(Assembler::is_ori(long_at(4)), "load_const32 2nd instr");
498  }
499
500 private:
501  friend inline NativeMovRegMem* nativeMovRegMem_at(address address) {
502    NativeMovRegMem* test = (NativeMovRegMem*)address;
503    DEBUG_ONLY( test->verify(); )
504    return test;
505  }
506};
507
508#endif // CPU_PPC_VM_NATIVEINST_PPC_HPP
509