1/*
2 * Copyright (c) 1998, 2015, Oracle and/or its affiliates. All rights reserved.
3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
4 *
5 * This code is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 only, as
7 * published by the Free Software Foundation.
8 *
9 * This code is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
12 * version 2 for more details (a copy is included in the LICENSE file that
13 * accompanied this code).
14 *
15 * You should have received a copy of the GNU General Public License version
16 * 2 along with this work; if not, write to the Free Software Foundation,
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18 *
19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20 * or visit www.oracle.com if you need additional information or have any
21 * questions.
22 *
23 */
24
25#ifndef SHARE_VM_INTERPRETER_REWRITER_HPP
26#define SHARE_VM_INTERPRETER_REWRITER_HPP
27
28#include "memory/allocation.hpp"
29#include "runtime/handles.inline.hpp"
30#include "utilities/growableArray.hpp"
31
32// The Rewriter adds caches to the constant pool and rewrites bytecode indices
33// pointing into the constant pool for better interpreter performance.
34
35class Rewriter: public StackObj {
36 private:
37  instanceKlassHandle _klass;
38  constantPoolHandle  _pool;
39  Array<Method*>*     _methods;
40  GrowableArray<int>  _cp_map;
41  GrowableArray<int>  _cp_cache_map;  // for Methodref, Fieldref,
42                                      // InterfaceMethodref and InvokeDynamic
43  GrowableArray<int>  _reference_map; // maps from cp index to resolved_refs index (or -1)
44  GrowableArray<int>  _resolved_references_map; // for strings, methodHandle, methodType
45  GrowableArray<int>  _invokedynamic_references_map; // for invokedynamic resolved refs
46  GrowableArray<int>  _method_handle_invokers;
47  int                 _resolved_reference_limit;
48
49  // For mapping invokedynamic bytecodes, which are discovered during method
50  // scanning.  The invokedynamic entries are added at the end of the cpCache.
51  // If there are any invokespecial/InterfaceMethodref special case bytecodes,
52  // these entries are added before invokedynamic entries so that the
53  // invokespecial bytecode 16 bit index doesn't overflow.
54  GrowableArray<int>      _invokedynamic_cp_cache_map;
55
56  // For patching.
57  GrowableArray<address>* _patch_invokedynamic_bcps;
58  GrowableArray<int>*     _patch_invokedynamic_refs;
59
60  void init_maps(int length) {
61    _cp_map.trunc_to(0);
62    _cp_map.at_grow(length, -1);
63
64    _cp_cache_map.trunc_to(0);
65    // Also cache resolved objects, in another different cache.
66    _reference_map.trunc_to(0);
67    _reference_map.at_grow(length, -1);
68
69    _method_handle_invokers.trunc_to(0);
70    _resolved_references_map.trunc_to(0);
71    _invokedynamic_references_map.trunc_to(0);
72    _resolved_reference_limit = -1;
73    _first_iteration_cp_cache_limit = -1;
74
75    // invokedynamic specific fields
76    _invokedynamic_cp_cache_map.trunc_to(0);
77    _patch_invokedynamic_bcps = new GrowableArray<address>(length / 4);
78    _patch_invokedynamic_refs = new GrowableArray<int>(length / 4);
79  }
80
81  int _first_iteration_cp_cache_limit;
82  void record_map_limits() {
83    // Record initial size of the two arrays generated for the CP cache
84    // relative to walking the constant pool.
85    _first_iteration_cp_cache_limit = _cp_cache_map.length();
86    _resolved_reference_limit = _resolved_references_map.length();
87  }
88
89  int cp_cache_delta() {
90    // How many cp cache entries were added since recording map limits after
91    // cp cache initialization?
92    assert(_first_iteration_cp_cache_limit != -1, "only valid after first iteration");
93    return _cp_cache_map.length() - _first_iteration_cp_cache_limit;
94  }
95
96  int  cp_entry_to_cp_cache(int i) { assert(has_cp_cache(i), "oob"); return _cp_map.at(i); }
97  bool has_cp_cache(int i) { return (uint) i < (uint) _cp_map.length() && _cp_map.at(i) >= 0; }
98
99  int add_map_entry(int cp_index, GrowableArray<int>* cp_map, GrowableArray<int>* cp_cache_map) {
100    assert(cp_map->at(cp_index) == -1, "not twice on same cp_index");
101    int cache_index = cp_cache_map->append(cp_index);
102    cp_map->at_put(cp_index, cache_index);
103    return cache_index;
104  }
105
106  int add_cp_cache_entry(int cp_index) {
107    assert(_pool->tag_at(cp_index).value() != JVM_CONSTANT_InvokeDynamic, "use indy version");
108    assert(_first_iteration_cp_cache_limit == -1, "do not add cache entries after first iteration");
109    int cache_index = add_map_entry(cp_index, &_cp_map, &_cp_cache_map);
110    assert(cp_entry_to_cp_cache(cp_index) == cache_index, "");
111    assert(cp_cache_entry_pool_index(cache_index) == cp_index, "");
112    return cache_index;
113  }
114
115  int add_invokedynamic_cp_cache_entry(int cp_index) {
116    assert(_pool->tag_at(cp_index).value() == JVM_CONSTANT_InvokeDynamic, "use non-indy version");
117    assert(_first_iteration_cp_cache_limit >= 0, "add indy cache entries after first iteration");
118    // add to the invokedynamic index map.
119    int cache_index = _invokedynamic_cp_cache_map.append(cp_index);
120    // do not update _cp_map, since the mapping is one-to-many
121    assert(invokedynamic_cp_cache_entry_pool_index(cache_index) == cp_index, "");
122    // this index starts at one but in the bytecode it's appended to the end.
123    return cache_index + _first_iteration_cp_cache_limit;
124  }
125
126  int invokedynamic_cp_cache_entry_pool_index(int cache_index) {
127    int cp_index = _invokedynamic_cp_cache_map.at(cache_index);
128    return cp_index;
129  }
130
131  // add a new CP cache entry beyond the normal cache for the special case of
132  // invokespecial with InterfaceMethodref as cpool operand.
133  int add_invokespecial_cp_cache_entry(int cp_index) {
134    assert(_first_iteration_cp_cache_limit >= 0, "add these special cache entries after first iteration");
135    // Don't add InterfaceMethodref if it already exists at the end.
136    for (int i = _first_iteration_cp_cache_limit; i < _cp_cache_map.length(); i++) {
137      if (cp_cache_entry_pool_index(i) == cp_index) {
138        return i;
139      }
140    }
141    int cache_index = _cp_cache_map.append(cp_index);
142    assert(cache_index >= _first_iteration_cp_cache_limit, "");
143    // do not update _cp_map, since the mapping is one-to-many
144    assert(cp_cache_entry_pool_index(cache_index) == cp_index, "");
145    return cache_index;
146  }
147
148  int  cp_entry_to_resolved_references(int cp_index) const {
149    assert(has_entry_in_resolved_references(cp_index), "oob");
150    return _reference_map.at(cp_index);
151  }
152  bool has_entry_in_resolved_references(int cp_index) const {
153    return (uint) cp_index < (uint) _reference_map.length() && _reference_map.at(cp_index) >= 0;
154  }
155
156  // add a new entry to the resolved_references map
157  int add_resolved_references_entry(int cp_index) {
158    int ref_index = add_map_entry(cp_index, &_reference_map, &_resolved_references_map);
159    assert(cp_entry_to_resolved_references(cp_index) == ref_index, "");
160    return ref_index;
161  }
162
163  // add a new entries to the resolved_references map (for invokedynamic and invokehandle only)
164  int add_invokedynamic_resolved_references_entries(int cp_index, int cache_index) {
165    assert(_resolved_reference_limit >= 0, "must add indy refs after first iteration");
166    int ref_index = -1;
167    for (int entry = 0; entry < ConstantPoolCacheEntry::_indy_resolved_references_entries; entry++) {
168      const int index = _resolved_references_map.append(cp_index);  // many-to-one
169      assert(index >= _resolved_reference_limit, "");
170      if (entry == 0) {
171        ref_index = index;
172      }
173      assert((index - entry) == ref_index, "entries must be consecutive");
174      _invokedynamic_references_map.at_put_grow(index, cache_index, -1);
175    }
176    return ref_index;
177  }
178
179  int resolved_references_entry_to_pool_index(int ref_index) {
180    int cp_index = _resolved_references_map.at(ref_index);
181    return cp_index;
182  }
183
184  // Access the contents of _cp_cache_map to determine CP cache layout.
185  int cp_cache_entry_pool_index(int cache_index) {
186    int cp_index = _cp_cache_map.at(cache_index);
187    return cp_index;
188  }
189
190  // All the work goes in here:
191  Rewriter(instanceKlassHandle klass, const constantPoolHandle& cpool, Array<Method*>* methods, TRAPS);
192
193  void compute_index_maps();
194  void make_constant_pool_cache(TRAPS);
195  void scan_method(Method* m, bool reverse, bool* invokespecial_error);
196  void rewrite_Object_init(methodHandle m, TRAPS);
197  void rewrite_member_reference(address bcp, int offset, bool reverse);
198  void maybe_rewrite_invokehandle(address opc, int cp_index, int cache_index, bool reverse);
199  void rewrite_invokedynamic(address bcp, int offset, bool reverse);
200  void maybe_rewrite_ldc(address bcp, int offset, bool is_wide, bool reverse);
201  void rewrite_invokespecial(address bcp, int offset, bool reverse, bool* invokespecial_error);
202
203  void patch_invokedynamic_bytecodes();
204
205  // Do all the work.
206  void rewrite_bytecodes(TRAPS);
207
208  // Revert bytecodes in case of an exception.
209  void restore_bytecodes();
210
211  static methodHandle rewrite_jsrs(methodHandle m, TRAPS);
212 public:
213  // Driver routine:
214  static void rewrite(instanceKlassHandle klass, TRAPS);
215};
216
217#endif // SHARE_VM_INTERPRETER_REWRITER_HPP
218