1/*
2 * Copyright (c) 2016, Oracle and/or its affiliates. All rights reserved.
3 * Copyright (c) 2016 SAP SE. All rights reserved.
4 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
5 *
6 * This code is free software; you can redistribute it and/or modify it
7 * under the terms of the GNU General Public License version 2 only, as
8 * published by the Free Software Foundation.
9 *
10 * This code is distributed in the hope that it will be useful, but WITHOUT
11 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
12 * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
13 * version 2 for more details (a copy is included in the LICENSE file that
14 * accompanied this code).
15 *
16 * You should have received a copy of the GNU General Public License version
17 * 2 along with this work; if not, write to the Free Software Foundation,
18 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
19 *
20 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
21 * or visit www.oracle.com if you need additional information or have any
22 * questions.
23 *
24 */
25
26#include "precompiled.hpp"
27#include "asm/macroAssembler.inline.hpp"
28#include "code/vtableStubs.hpp"
29#include "interp_masm_s390.hpp"
30#include "memory/resourceArea.hpp"
31#include "oops/instanceKlass.hpp"
32#include "oops/klassVtable.hpp"
33#include "runtime/sharedRuntime.hpp"
34#include "vmreg_s390.inline.hpp"
35#ifdef COMPILER2
36#include "opto/runtime.hpp"
37#endif
38
39// Machine-dependent part of VtableStubs: create vtableStub of correct
40// size and initialize its code.
41
42#define __ masm->
43
44#ifndef PRODUCT
45extern "C" void bad_compiled_vtable_index(JavaThread* thread, oop receiver, int index);
46#endif
47
48// Used by compiler only; may use only caller saved, non-argument registers.
49VtableStub* VtableStubs::create_vtable_stub(int vtable_index) {
50
51  const int   code_length = VtableStub::pd_code_size_limit(true);
52  VtableStub *s = new(code_length) VtableStub(true, vtable_index);
53  if (s == NULL) { // Indicates OOM In the code cache.
54    return NULL;
55  }
56
57  ResourceMark    rm;
58  CodeBuffer      cb(s->entry_point(), code_length);
59  MacroAssembler *masm = new MacroAssembler(&cb);
60  address start_pc;
61  int     padding_bytes = 0;
62
63#if (!defined(PRODUCT) && defined(COMPILER2))
64  if (CountCompiledCalls) {
65    // Count unused bytes
66    //                  worst case             actual size
67    padding_bytes += __ load_const_size() - __ load_const_optimized_rtn_len(Z_R1_scratch, (long)SharedRuntime::nof_megamorphic_calls_addr(), true);
68
69    // Use generic emitter for direct memory increment.
70    // Abuse Z_method as scratch register for generic emitter.
71    // It is loaded further down anyway before it is first used.
72    __ add2mem_32(Address(Z_R1_scratch), 1, Z_method);
73  }
74#endif
75
76  assert(VtableStub::receiver_location() == Z_R2->as_VMReg(), "receiver expected in Z_ARG1");
77
78  // Get receiver klass.
79  // Must do an explicit check if implicit checks are disabled.
80  address npe_addr = __ pc(); // npe == NULL ptr exception
81  __ null_check(Z_ARG1, Z_R1_scratch, oopDesc::klass_offset_in_bytes());
82  const Register rcvr_klass = Z_R1_scratch;
83  __ load_klass(rcvr_klass, Z_ARG1);
84
85  // Set method (in case of interpreted method), and destination address.
86  int entry_offset = in_bytes(Klass::vtable_start_offset()) +
87                     vtable_index * vtableEntry::size_in_bytes();
88
89#ifndef PRODUCT
90  if (DebugVtables) {
91    Label L;
92    // Check offset vs vtable length.
93    const Register vtable_idx = Z_R0_scratch;
94
95    // Count unused bytes.
96    //                  worst case             actual size
97    padding_bytes += __ load_const_size() - __ load_const_optimized_rtn_len(vtable_idx, vtable_index*vtableEntry::size_in_bytes(), true);
98
99    assert(Immediate::is_uimm12(in_bytes(Klass::vtable_length_offset())), "disp to large");
100    __ z_cl(vtable_idx, in_bytes(Klass::vtable_length_offset()), rcvr_klass);
101    __ z_brl(L);
102    __ z_lghi(Z_ARG3, vtable_index);  // Debug code, don't optimize.
103    __ call_VM(noreg, CAST_FROM_FN_PTR(address, bad_compiled_vtable_index), Z_ARG1, Z_ARG3, false);
104    // Count unused bytes (assume worst case here).
105    padding_bytes += 12;
106    __ bind(L);
107  }
108#endif
109
110  int v_off = entry_offset + vtableEntry::method_offset_in_bytes();
111
112  // Duplicate safety code from enc_class Java_Dynamic_Call_dynTOC.
113  if (Displacement::is_validDisp(v_off)) {
114    __ z_lg(Z_method/*method oop*/, v_off, rcvr_klass/*class oop*/);
115    // Account for the load_const in the else path.
116    padding_bytes += __ load_const_size();
117  } else {
118    // Worse case, offset does not fit in displacement field.
119    __ load_const(Z_method, v_off); // Z_method temporarily holds the offset value.
120    __ z_lg(Z_method/*method oop*/, 0, Z_method/*method offset*/, rcvr_klass/*class oop*/);
121  }
122
123#ifndef PRODUCT
124  if (DebugVtables) {
125    Label L;
126    __ z_ltgr(Z_method, Z_method);
127    __ z_brne(L);
128    __ stop("Vtable entry is ZERO",102);
129    __ bind(L);
130  }
131#endif
132
133  address ame_addr = __ pc(); // ame = abstract method error
134
135  // Must do an explicit check if implicit checks are disabled.
136  __ null_check(Z_method, Z_R1_scratch, in_bytes(Method::from_compiled_offset()));
137  __ z_lg(Z_R1_scratch, in_bytes(Method::from_compiled_offset()), Z_method);
138  __ z_br(Z_R1_scratch);
139
140  masm->flush();
141
142  s->set_exception_points(npe_addr, ame_addr);
143
144  return s;
145}
146
147VtableStub* VtableStubs::create_itable_stub(int vtable_index) {
148  const int   code_length = VtableStub::pd_code_size_limit(false);
149  VtableStub *s = new(code_length) VtableStub(false, vtable_index);
150  if (s == NULL) { // Indicates OOM in the code cache.
151    return NULL;
152  }
153
154  ResourceMark    rm;
155  CodeBuffer      cb(s->entry_point(), code_length);
156  MacroAssembler *masm = new MacroAssembler(&cb);
157  address start_pc;
158  int     padding_bytes = 0;
159
160#if (!defined(PRODUCT) && defined(COMPILER2))
161  if (CountCompiledCalls) {
162    // Count unused bytes
163    //                  worst case             actual size
164    padding_bytes += __ load_const_size() - __ load_const_optimized_rtn_len(Z_R1_scratch, (long)SharedRuntime::nof_megamorphic_calls_addr(), true);
165
166    // Use generic emitter for direct memory increment.
167    // Use Z_tmp_1 as scratch register for generic emitter.
168    __ add2mem_32((Z_R1_scratch), 1, Z_tmp_1);
169  }
170#endif
171
172  assert(VtableStub::receiver_location() == Z_R2->as_VMReg(), "receiver expected in Z_ARG1");
173
174  // Entry arguments:
175  //  Z_method: Interface
176  //  Z_ARG1:   Receiver
177  const Register rcvr_klass = Z_tmp_1;    // Used to compute itable_entry_addr.
178                                          // Use extra reg to avoid re-load.
179  const Register vtable_len = Z_tmp_2;    // Used to compute itable_entry_addr.
180  const Register itable_entry_addr = Z_R1_scratch;
181  const Register itable_interface  = Z_R0_scratch;
182
183  // Get receiver klass.
184  // Must do an explicit check if implicit checks are disabled.
185  address npe_addr = __ pc(); // npe == NULL ptr exception
186  __ null_check(Z_ARG1, Z_R1_scratch, oopDesc::klass_offset_in_bytes());
187  __ load_klass(rcvr_klass, Z_ARG1);
188
189  // Load start of itable entries into itable_entry.
190  __ z_llgf(vtable_len, Address(rcvr_klass, Klass::vtable_length_offset()));
191  __ z_sllg(vtable_len, vtable_len, exact_log2(vtableEntry::size_in_bytes()));
192
193  // Loop over all itable entries until desired interfaceOop(Rinterface) found.
194  const int vtable_base_offset = in_bytes(Klass::vtable_start_offset());
195  // Count unused bytes.
196  start_pc = __ pc();
197  __ add2reg_with_index(itable_entry_addr, vtable_base_offset + itableOffsetEntry::interface_offset_in_bytes(), rcvr_klass, vtable_len);
198  padding_bytes += 20 - (__ pc() - start_pc);
199
200  const int itable_offset_search_inc = itableOffsetEntry::size() * wordSize;
201  Label search;
202  __ bind(search);
203
204  // Handle IncompatibleClassChangeError in itable stubs.
205  // If the entry is NULL then we've reached the end of the table
206  // without finding the expected interface, so throw an exception.
207  NearLabel   throw_icce;
208  __ load_and_test_long(itable_interface, Address(itable_entry_addr));
209  __ z_bre(throw_icce); // Throw the exception out-of-line.
210  // Count unused bytes.
211  start_pc = __ pc();
212  __ add2reg(itable_entry_addr, itable_offset_search_inc);
213  padding_bytes += 20 - (__ pc() - start_pc);
214  __ z_cgr(itable_interface, Z_method);
215  __ z_brne(search);
216
217  // Entry found. Itable_entry_addr points to the subsequent entry (itable_offset_search_inc too far).
218  // Get offset of vtable for interface.
219
220  const Register vtable_offset = Z_R1_scratch;
221  const Register itable_method = rcvr_klass;   // Calculated before.
222
223  const int vtable_offset_offset = (itableOffsetEntry::offset_offset_in_bytes() -
224                                    itableOffsetEntry::interface_offset_in_bytes()) -
225                                   itable_offset_search_inc;
226  __ z_llgf(vtable_offset, vtable_offset_offset, itable_entry_addr);
227
228  // Compute itableMethodEntry and get method and entry point for compiler.
229  const int method_offset = (itableMethodEntry::size() * wordSize * vtable_index) +
230                            itableMethodEntry::method_offset_in_bytes();
231
232  __ z_lg(Z_method, method_offset, vtable_offset, itable_method);
233
234#ifndef PRODUCT
235  if (DebugVtables) {
236    Label ok1;
237    __ z_ltgr(Z_method, Z_method);
238    __ z_brne(ok1);
239    __ stop("method is null",103);
240    __ bind(ok1);
241  }
242#endif
243
244  address ame_addr = __ pc();
245  // Must do an explicit check if implicit checks are disabled.
246  if (!ImplicitNullChecks) {
247    __ compare64_and_branch(Z_method, (intptr_t) 0, Assembler::bcondEqual, throw_icce);
248  }
249  __ z_lg(Z_R1_scratch, in_bytes(Method::from_compiled_offset()), Z_method);
250  __ z_br(Z_R1_scratch);
251
252  // Handle IncompatibleClassChangeError in itable stubs.
253  __ bind(throw_icce);
254  // Count unused bytes
255  //                  worst case          actual size
256  // We force resolving of the call site by jumping to
257  // the "handle wrong method" stub, and so let the
258  // interpreter runtime do all the dirty work.
259  padding_bytes += __ load_const_size() - __ load_const_optimized_rtn_len(Z_R1_scratch, (long)SharedRuntime::get_handle_wrong_method_stub(), true);
260  __ z_br(Z_R1_scratch);
261
262  masm->flush();
263
264  s->set_exception_points(npe_addr, ame_addr);
265  return s;
266}
267
268// In order to tune these parameters, run the JVM with VM options
269// +PrintMiscellaneous and +WizardMode to see information about
270// actual itable stubs. Run it with -Xmx31G -XX:+UseCompressedOops.
271int VtableStub::pd_code_size_limit(bool is_vtable_stub) {
272  int size = DebugVtables ? 216 : 0;
273  if (CountCompiledCalls) {
274    size += 6 * 4;
275  }
276  if (is_vtable_stub) {
277    size += 52;
278  } else {
279    size += 104;
280  }
281  if (Universe::narrow_klass_base() != NULL) {
282    size += 16; // A guess.
283  }
284  return size;
285}
286
287int VtableStub::pd_code_alignment() {
288  const unsigned int icache_line_size = 32;
289  return icache_line_size;
290}
291