1/*
2 * Copyright (c) 2003, 2016, Oracle and/or its affiliates. All rights reserved.
3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
4 *
5 * This code is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 only, as
7 * published by the Free Software Foundation.
8 *
9 * This code is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
12 * version 2 for more details (a copy is included in the LICENSE file that
13 * accompanied this code).
14 *
15 * You should have received a copy of the GNU General Public License version
16 * 2 along with this work; if not, write to the Free Software Foundation,
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18 *
19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20 * or visit www.oracle.com if you need additional information or have any
21 * questions.
22 *
23 */
24
25#include "precompiled.hpp"
26#include "asm/macroAssembler.hpp"
27#include "code/vtableStubs.hpp"
28#include "interp_masm_x86.hpp"
29#include "memory/resourceArea.hpp"
30#include "oops/instanceKlass.hpp"
31#include "oops/klassVtable.hpp"
32#include "runtime/sharedRuntime.hpp"
33#include "vmreg_x86.inline.hpp"
34#ifdef COMPILER2
35#include "opto/runtime.hpp"
36#endif
37
38// machine-dependent part of VtableStubs: create VtableStub of correct size and
39// initialize its code
40
41#define __ masm->
42
43#ifndef PRODUCT
44extern "C" void bad_compiled_vtable_index(JavaThread* thread,
45                                          oop receiver,
46                                          int index);
47#endif
48
49VtableStub* VtableStubs::create_vtable_stub(int vtable_index) {
50  const int amd64_code_length = VtableStub::pd_code_size_limit(true);
51  VtableStub* s = new(amd64_code_length) VtableStub(true, vtable_index);
52  // Can be NULL if there is no free space in the code cache.
53  if (s == NULL) {
54    return NULL;
55  }
56
57  ResourceMark rm;
58  CodeBuffer cb(s->entry_point(), amd64_code_length);
59  MacroAssembler* masm = new MacroAssembler(&cb);
60
61#ifndef PRODUCT
62  if (CountCompiledCalls) {
63    __ incrementl(ExternalAddress((address) SharedRuntime::nof_megamorphic_calls_addr()));
64  }
65#endif
66
67  // get receiver (need to skip return address on top of stack)
68  assert(VtableStub::receiver_location() == j_rarg0->as_VMReg(), "receiver expected in j_rarg0");
69
70  // Free registers (non-args) are rax, rbx
71
72  // get receiver klass
73  address npe_addr = __ pc();
74  __ load_klass(rax, j_rarg0);
75
76#ifndef PRODUCT
77  if (DebugVtables) {
78    Label L;
79    // check offset vs vtable length
80    __ cmpl(Address(rax, Klass::vtable_length_offset()),
81            vtable_index * vtableEntry::size());
82    __ jcc(Assembler::greater, L);
83    __ movl(rbx, vtable_index);
84    __ call_VM(noreg,
85               CAST_FROM_FN_PTR(address, bad_compiled_vtable_index), j_rarg0, rbx);
86    __ bind(L);
87  }
88#endif // PRODUCT
89
90  // load Method* and target address
91  const Register method = rbx;
92
93  __ lookup_virtual_method(rax, vtable_index, method);
94
95  if (DebugVtables) {
96    Label L;
97    __ cmpptr(method, (int32_t)NULL_WORD);
98    __ jcc(Assembler::equal, L);
99    __ cmpptr(Address(method, Method::from_compiled_offset()), (int32_t)NULL_WORD);
100    __ jcc(Assembler::notZero, L);
101    __ stop("Vtable entry is NULL");
102    __ bind(L);
103  }
104  // rax: receiver klass
105  // rbx: Method*
106  // rcx: receiver
107  address ame_addr = __ pc();
108  __ jmp( Address(rbx, Method::from_compiled_offset()));
109
110  __ flush();
111
112  if (PrintMiscellaneous && (WizardMode || Verbose)) {
113    tty->print_cr("vtable #%d at " PTR_FORMAT "[%d] left over: %d",
114                  vtable_index, p2i(s->entry_point()),
115                  (int)(s->code_end() - s->entry_point()),
116                  (int)(s->code_end() - __ pc()));
117  }
118  guarantee(__ pc() <= s->code_end(), "overflowed buffer");
119  // shut the door on sizing bugs
120  int slop = 3;  // 32-bit offset is this much larger than an 8-bit one
121  assert(vtable_index > 10 || __ pc() + slop <= s->code_end(), "room for 32-bit offset");
122
123  s->set_exception_points(npe_addr, ame_addr);
124  return s;
125}
126
127
128VtableStub* VtableStubs::create_itable_stub(int itable_index) {
129  // Note well: pd_code_size_limit is the absolute minimum we can get
130  // away with.  If you add code here, bump the code stub size
131  // returned by pd_code_size_limit!
132  const int amd64_code_length = VtableStub::pd_code_size_limit(false);
133  VtableStub* s = new(amd64_code_length) VtableStub(false, itable_index);
134  // Can be NULL if there is no free space in the code cache.
135  if (s == NULL) {
136    return NULL;
137  }
138
139  ResourceMark rm;
140  CodeBuffer cb(s->entry_point(), amd64_code_length);
141  MacroAssembler* masm = new MacroAssembler(&cb);
142
143#ifndef PRODUCT
144  if (CountCompiledCalls) {
145    __ incrementl(ExternalAddress((address) SharedRuntime::nof_megamorphic_calls_addr()));
146  }
147#endif
148
149  // Entry arguments:
150  //  rax: Interface
151  //  j_rarg0: Receiver
152
153  // Free registers (non-args) are rax (interface), rbx
154
155  // get receiver (need to skip return address on top of stack)
156
157  assert(VtableStub::receiver_location() == j_rarg0->as_VMReg(), "receiver expected in j_rarg0");
158  // get receiver klass (also an implicit null-check)
159  address npe_addr = __ pc();
160
161  // Most registers are in use; we'll use rax, rbx, r10, r11
162  // (various calling sequences use r[cd]x, r[sd]i, r[89]; stay away from them)
163  __ load_klass(r10, j_rarg0);
164
165  // If we take a trap while this arg is on the stack we will not
166  // be able to walk the stack properly. This is not an issue except
167  // when there are mistakes in this assembly code that could generate
168  // a spurious fault. Ask me how I know...
169
170  const Register method = rbx;
171  Label throw_icce;
172
173  // Get Method* and entrypoint for compiler
174  __ lookup_interface_method(// inputs: rec. class, interface, itable index
175                             r10, rax, itable_index,
176                             // outputs: method, scan temp. reg
177                             method, r11,
178                             throw_icce);
179
180  // method (rbx): Method*
181  // j_rarg0: receiver
182
183#ifdef ASSERT
184  if (DebugVtables) {
185    Label L2;
186    __ cmpptr(method, (int32_t)NULL_WORD);
187    __ jcc(Assembler::equal, L2);
188    __ cmpptr(Address(method, Method::from_compiled_offset()), (int32_t)NULL_WORD);
189    __ jcc(Assembler::notZero, L2);
190    __ stop("compiler entrypoint is null");
191    __ bind(L2);
192  }
193#endif // ASSERT
194
195  // rbx: Method*
196  // j_rarg0: receiver
197  address ame_addr = __ pc();
198  __ jmp(Address(method, Method::from_compiled_offset()));
199
200  __ bind(throw_icce);
201  __ jump(RuntimeAddress(StubRoutines::throw_IncompatibleClassChangeError_entry()));
202
203  __ flush();
204
205  if (PrintMiscellaneous && (WizardMode || Verbose)) {
206    tty->print_cr("itable #%d at " PTR_FORMAT "[%d] left over: %d",
207                  itable_index, p2i(s->entry_point()),
208                  (int)(s->code_end() - s->entry_point()),
209                  (int)(s->code_end() - __ pc()));
210  }
211  guarantee(__ pc() <= s->code_end(), "overflowed buffer");
212  // shut the door on sizing bugs
213  int slop = 3;  // 32-bit offset is this much larger than an 8-bit one
214  assert(itable_index > 10 || __ pc() + slop <= s->code_end(), "room for 32-bit offset");
215
216  s->set_exception_points(npe_addr, ame_addr);
217  return s;
218}
219
220int VtableStub::pd_code_size_limit(bool is_vtable_stub) {
221  if (is_vtable_stub) {
222    // Vtable stub size
223    return (DebugVtables ? 512 : 24) + (CountCompiledCalls ? 13 : 0) +
224           (UseCompressedClassPointers ?  MacroAssembler::instr_size_for_decode_klass_not_null() : 0);
225  } else {
226    // Itable stub size
227    return (DebugVtables ? 512 : 74) + (CountCompiledCalls ? 13 : 0) +
228           (UseCompressedClassPointers ?  MacroAssembler::instr_size_for_decode_klass_not_null() : 0);
229  }
230  // In order to tune these parameters, run the JVM with VM options
231  // +PrintMiscellaneous and +WizardMode to see information about
232  // actual itable stubs.  Look for lines like this:
233  //   itable #1 at 0x5551212[71] left over: 3
234  // Reduce the constants so that the "left over" number is >=3
235  // for the common cases.
236  // Do not aim at a left-over number of zero, because a
237  // large vtable or itable index (>= 32) will require a 32-bit
238  // immediate displacement instead of an 8-bit one.
239  //
240  // The JVM98 app. _202_jess has a megamorphic interface call.
241  // The itable code looks like this:
242  // Decoding VtableStub itbl[1]@12
243  //   mov    0x8(%rsi),%r10
244  //   mov    0x198(%r10),%r11d
245  //   lea    0x218(%r10,%r11,8),%r11
246  //   lea    0x8(%r10),%r10
247  //   mov    (%r11),%rbx
248  //   cmp    %rbx,%rax
249  //   je     success
250  // loop:
251  //   test   %rbx,%rbx
252  //   je     throw_icce
253  //   add    $0x10,%r11
254  //   mov    (%r11),%rbx
255  //   cmp    %rbx,%rax
256  //   jne    loop
257  // success:
258  //   mov    0x8(%r11),%r11d
259  //   mov    (%r10,%r11,1),%rbx
260  //   jmpq   *0x60(%rbx)
261  // throw_icce:
262  //   jmpq   throw_ICCE_entry
263}
264
265int VtableStub::pd_code_alignment() {
266  return wordSize;
267}
268