instanceKlass.inline.hpp (10606:24c6f885d316) instanceKlass.inline.hpp (11910:24d88ded4cb6)
1/*
2 * Copyright (c) 2015, 2016, Oracle and/or its affiliates. All rights reserved.
3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
4 *
5 * This code is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 only, as
7 * published by the Free Software Foundation.
8 *
9 * This code is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
12 * version 2 for more details (a copy is included in the LICENSE file that
13 * accompanied this code).
14 *
15 * You should have received a copy of the GNU General Public License version
16 * 2 along with this work; if not, write to the Free Software Foundation,
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18 *
19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20 * or visit www.oracle.com if you need additional information or have any
21 * questions.
22 *
23 */
24
25#ifndef SHARE_VM_OOPS_INSTANCEKLASS_INLINE_HPP
26#define SHARE_VM_OOPS_INSTANCEKLASS_INLINE_HPP
27
28#include "memory/iterator.hpp"
29#include "oops/instanceKlass.hpp"
30#include "oops/klass.hpp"
31#include "oops/oop.inline.hpp"
1/*
2 * Copyright (c) 2015, 2016, Oracle and/or its affiliates. All rights reserved.
3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
4 *
5 * This code is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 only, as
7 * published by the Free Software Foundation.
8 *
9 * This code is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
12 * version 2 for more details (a copy is included in the LICENSE file that
13 * accompanied this code).
14 *
15 * You should have received a copy of the GNU General Public License version
16 * 2 along with this work; if not, write to the Free Software Foundation,
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18 *
19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20 * or visit www.oracle.com if you need additional information or have any
21 * questions.
22 *
23 */
24
25#ifndef SHARE_VM_OOPS_INSTANCEKLASS_INLINE_HPP
26#define SHARE_VM_OOPS_INSTANCEKLASS_INLINE_HPP
27
28#include "memory/iterator.hpp"
29#include "oops/instanceKlass.hpp"
30#include "oops/klass.hpp"
31#include "oops/oop.inline.hpp"
32#include "runtime/orderAccess.inline.hpp"
32#include "utilities/debug.hpp"
33#include "utilities/globalDefinitions.hpp"
34#include "utilities/macros.hpp"
35
33#include "utilities/debug.hpp"
34#include "utilities/globalDefinitions.hpp"
35#include "utilities/macros.hpp"
36
37inline Klass* InstanceKlass::array_klasses_acquire() const {
38 return (Klass*) OrderAccess::load_ptr_acquire(&_array_klasses);
39}
40
41inline void InstanceKlass::release_set_array_klasses(Klass* k) {
42 OrderAccess::release_store_ptr(&_array_klasses, k);
43}
44
45inline jmethodID* InstanceKlass::methods_jmethod_ids_acquire() const {
46 return (jmethodID*)OrderAccess::load_ptr_acquire(&_methods_jmethod_ids);
47}
48
49inline void InstanceKlass::release_set_methods_jmethod_ids(jmethodID* jmeths) {
50 OrderAccess::release_store_ptr(&_methods_jmethod_ids, jmeths);
51}
52
36// The iteration over the oops in objects is a hot path in the GC code.
37// By force inlining the following functions, we get similar GC performance
38// as the previous macro based implementation.
39
40template <bool nv, typename T, class OopClosureType>
41ALWAYSINLINE void InstanceKlass::oop_oop_iterate_oop_map(OopMapBlock* map, oop obj, OopClosureType* closure) {
42 T* p = (T*)obj->obj_field_addr<T>(map->offset());
43 T* const end = p + map->count();
44
45 for (; p < end; ++p) {
46 Devirtualizer<nv>::do_oop(closure, p);
47 }
48}
49
50#if INCLUDE_ALL_GCS
51template <bool nv, typename T, class OopClosureType>
52ALWAYSINLINE void InstanceKlass::oop_oop_iterate_oop_map_reverse(OopMapBlock* map, oop obj, OopClosureType* closure) {
53 T* const start = (T*)obj->obj_field_addr<T>(map->offset());
54 T* p = start + map->count();
55
56 while (start < p) {
57 --p;
58 Devirtualizer<nv>::do_oop(closure, p);
59 }
60}
61#endif
62
63template <bool nv, typename T, class OopClosureType>
64ALWAYSINLINE void InstanceKlass::oop_oop_iterate_oop_map_bounded(OopMapBlock* map, oop obj, OopClosureType* closure, MemRegion mr) {
65 T* p = (T*)obj->obj_field_addr<T>(map->offset());
66 T* end = p + map->count();
67
68 T* const l = (T*)mr.start();
69 T* const h = (T*)mr.end();
70 assert(mask_bits((intptr_t)l, sizeof(T)-1) == 0 &&
71 mask_bits((intptr_t)h, sizeof(T)-1) == 0,
72 "bounded region must be properly aligned");
73
74 if (p < l) {
75 p = l;
76 }
77 if (end > h) {
78 end = h;
79 }
80
81 for (;p < end; ++p) {
82 Devirtualizer<nv>::do_oop(closure, p);
83 }
84}
85
86template <bool nv, typename T, class OopClosureType>
87ALWAYSINLINE void InstanceKlass::oop_oop_iterate_oop_maps_specialized(oop obj, OopClosureType* closure) {
88 OopMapBlock* map = start_of_nonstatic_oop_maps();
89 OopMapBlock* const end_map = map + nonstatic_oop_map_count();
90
91 for (; map < end_map; ++map) {
92 oop_oop_iterate_oop_map<nv, T>(map, obj, closure);
93 }
94}
95
96#if INCLUDE_ALL_GCS
97template <bool nv, typename T, class OopClosureType>
98ALWAYSINLINE void InstanceKlass::oop_oop_iterate_oop_maps_specialized_reverse(oop obj, OopClosureType* closure) {
99 OopMapBlock* const start_map = start_of_nonstatic_oop_maps();
100 OopMapBlock* map = start_map + nonstatic_oop_map_count();
101
102 while (start_map < map) {
103 --map;
104 oop_oop_iterate_oop_map_reverse<nv, T>(map, obj, closure);
105 }
106}
107#endif
108
109template <bool nv, typename T, class OopClosureType>
110ALWAYSINLINE void InstanceKlass::oop_oop_iterate_oop_maps_specialized_bounded(oop obj, OopClosureType* closure, MemRegion mr) {
111 OopMapBlock* map = start_of_nonstatic_oop_maps();
112 OopMapBlock* const end_map = map + nonstatic_oop_map_count();
113
114 for (;map < end_map; ++map) {
115 oop_oop_iterate_oop_map_bounded<nv, T>(map, obj, closure, mr);
116 }
117}
118
119template <bool nv, class OopClosureType>
120ALWAYSINLINE void InstanceKlass::oop_oop_iterate_oop_maps(oop obj, OopClosureType* closure) {
121 if (UseCompressedOops) {
122 oop_oop_iterate_oop_maps_specialized<nv, narrowOop>(obj, closure);
123 } else {
124 oop_oop_iterate_oop_maps_specialized<nv, oop>(obj, closure);
125 }
126}
127
128#if INCLUDE_ALL_GCS
129template <bool nv, class OopClosureType>
130ALWAYSINLINE void InstanceKlass::oop_oop_iterate_oop_maps_reverse(oop obj, OopClosureType* closure) {
131 if (UseCompressedOops) {
132 oop_oop_iterate_oop_maps_specialized_reverse<nv, narrowOop>(obj, closure);
133 } else {
134 oop_oop_iterate_oop_maps_specialized_reverse<nv, oop>(obj, closure);
135 }
136}
137#endif
138
139template <bool nv, class OopClosureType>
140ALWAYSINLINE void InstanceKlass::oop_oop_iterate_oop_maps_bounded(oop obj, OopClosureType* closure, MemRegion mr) {
141 if (UseCompressedOops) {
142 oop_oop_iterate_oop_maps_specialized_bounded<nv, narrowOop>(obj, closure, mr);
143 } else {
144 oop_oop_iterate_oop_maps_specialized_bounded<nv, oop>(obj, closure, mr);
145 }
146}
147
148template <bool nv, class OopClosureType>
149ALWAYSINLINE int InstanceKlass::oop_oop_iterate(oop obj, OopClosureType* closure) {
150 if (Devirtualizer<nv>::do_metadata(closure)) {
151 Devirtualizer<nv>::do_klass(closure, this);
152 }
153
154 oop_oop_iterate_oop_maps<nv>(obj, closure);
155
156 return size_helper();
157}
158
159#if INCLUDE_ALL_GCS
160template <bool nv, class OopClosureType>
161ALWAYSINLINE int InstanceKlass::oop_oop_iterate_reverse(oop obj, OopClosureType* closure) {
162 assert(!Devirtualizer<nv>::do_metadata(closure),
163 "Code to handle metadata is not implemented");
164
165 oop_oop_iterate_oop_maps_reverse<nv>(obj, closure);
166
167 return size_helper();
168}
169#endif
170
171template <bool nv, class OopClosureType>
172ALWAYSINLINE int InstanceKlass::oop_oop_iterate_bounded(oop obj, OopClosureType* closure, MemRegion mr) {
173 if (Devirtualizer<nv>::do_metadata(closure)) {
174 if (mr.contains(obj)) {
175 Devirtualizer<nv>::do_klass(closure, this);
176 }
177 }
178
179 oop_oop_iterate_oop_maps_bounded<nv>(obj, closure, mr);
180
181 return size_helper();
182}
183
184#define ALL_INSTANCE_KLASS_OOP_OOP_ITERATE_DEFN(OopClosureType, nv_suffix) \
185 OOP_OOP_ITERATE_DEFN( InstanceKlass, OopClosureType, nv_suffix) \
186 OOP_OOP_ITERATE_DEFN_BOUNDED( InstanceKlass, OopClosureType, nv_suffix) \
187 OOP_OOP_ITERATE_DEFN_BACKWARDS(InstanceKlass, OopClosureType, nv_suffix)
188
189#endif // SHARE_VM_OOPS_INSTANCEKLASS_INLINE_HPP
53// The iteration over the oops in objects is a hot path in the GC code.
54// By force inlining the following functions, we get similar GC performance
55// as the previous macro based implementation.
56
57template <bool nv, typename T, class OopClosureType>
58ALWAYSINLINE void InstanceKlass::oop_oop_iterate_oop_map(OopMapBlock* map, oop obj, OopClosureType* closure) {
59 T* p = (T*)obj->obj_field_addr<T>(map->offset());
60 T* const end = p + map->count();
61
62 for (; p < end; ++p) {
63 Devirtualizer<nv>::do_oop(closure, p);
64 }
65}
66
67#if INCLUDE_ALL_GCS
68template <bool nv, typename T, class OopClosureType>
69ALWAYSINLINE void InstanceKlass::oop_oop_iterate_oop_map_reverse(OopMapBlock* map, oop obj, OopClosureType* closure) {
70 T* const start = (T*)obj->obj_field_addr<T>(map->offset());
71 T* p = start + map->count();
72
73 while (start < p) {
74 --p;
75 Devirtualizer<nv>::do_oop(closure, p);
76 }
77}
78#endif
79
80template <bool nv, typename T, class OopClosureType>
81ALWAYSINLINE void InstanceKlass::oop_oop_iterate_oop_map_bounded(OopMapBlock* map, oop obj, OopClosureType* closure, MemRegion mr) {
82 T* p = (T*)obj->obj_field_addr<T>(map->offset());
83 T* end = p + map->count();
84
85 T* const l = (T*)mr.start();
86 T* const h = (T*)mr.end();
87 assert(mask_bits((intptr_t)l, sizeof(T)-1) == 0 &&
88 mask_bits((intptr_t)h, sizeof(T)-1) == 0,
89 "bounded region must be properly aligned");
90
91 if (p < l) {
92 p = l;
93 }
94 if (end > h) {
95 end = h;
96 }
97
98 for (;p < end; ++p) {
99 Devirtualizer<nv>::do_oop(closure, p);
100 }
101}
102
103template <bool nv, typename T, class OopClosureType>
104ALWAYSINLINE void InstanceKlass::oop_oop_iterate_oop_maps_specialized(oop obj, OopClosureType* closure) {
105 OopMapBlock* map = start_of_nonstatic_oop_maps();
106 OopMapBlock* const end_map = map + nonstatic_oop_map_count();
107
108 for (; map < end_map; ++map) {
109 oop_oop_iterate_oop_map<nv, T>(map, obj, closure);
110 }
111}
112
113#if INCLUDE_ALL_GCS
114template <bool nv, typename T, class OopClosureType>
115ALWAYSINLINE void InstanceKlass::oop_oop_iterate_oop_maps_specialized_reverse(oop obj, OopClosureType* closure) {
116 OopMapBlock* const start_map = start_of_nonstatic_oop_maps();
117 OopMapBlock* map = start_map + nonstatic_oop_map_count();
118
119 while (start_map < map) {
120 --map;
121 oop_oop_iterate_oop_map_reverse<nv, T>(map, obj, closure);
122 }
123}
124#endif
125
126template <bool nv, typename T, class OopClosureType>
127ALWAYSINLINE void InstanceKlass::oop_oop_iterate_oop_maps_specialized_bounded(oop obj, OopClosureType* closure, MemRegion mr) {
128 OopMapBlock* map = start_of_nonstatic_oop_maps();
129 OopMapBlock* const end_map = map + nonstatic_oop_map_count();
130
131 for (;map < end_map; ++map) {
132 oop_oop_iterate_oop_map_bounded<nv, T>(map, obj, closure, mr);
133 }
134}
135
136template <bool nv, class OopClosureType>
137ALWAYSINLINE void InstanceKlass::oop_oop_iterate_oop_maps(oop obj, OopClosureType* closure) {
138 if (UseCompressedOops) {
139 oop_oop_iterate_oop_maps_specialized<nv, narrowOop>(obj, closure);
140 } else {
141 oop_oop_iterate_oop_maps_specialized<nv, oop>(obj, closure);
142 }
143}
144
145#if INCLUDE_ALL_GCS
146template <bool nv, class OopClosureType>
147ALWAYSINLINE void InstanceKlass::oop_oop_iterate_oop_maps_reverse(oop obj, OopClosureType* closure) {
148 if (UseCompressedOops) {
149 oop_oop_iterate_oop_maps_specialized_reverse<nv, narrowOop>(obj, closure);
150 } else {
151 oop_oop_iterate_oop_maps_specialized_reverse<nv, oop>(obj, closure);
152 }
153}
154#endif
155
156template <bool nv, class OopClosureType>
157ALWAYSINLINE void InstanceKlass::oop_oop_iterate_oop_maps_bounded(oop obj, OopClosureType* closure, MemRegion mr) {
158 if (UseCompressedOops) {
159 oop_oop_iterate_oop_maps_specialized_bounded<nv, narrowOop>(obj, closure, mr);
160 } else {
161 oop_oop_iterate_oop_maps_specialized_bounded<nv, oop>(obj, closure, mr);
162 }
163}
164
165template <bool nv, class OopClosureType>
166ALWAYSINLINE int InstanceKlass::oop_oop_iterate(oop obj, OopClosureType* closure) {
167 if (Devirtualizer<nv>::do_metadata(closure)) {
168 Devirtualizer<nv>::do_klass(closure, this);
169 }
170
171 oop_oop_iterate_oop_maps<nv>(obj, closure);
172
173 return size_helper();
174}
175
176#if INCLUDE_ALL_GCS
177template <bool nv, class OopClosureType>
178ALWAYSINLINE int InstanceKlass::oop_oop_iterate_reverse(oop obj, OopClosureType* closure) {
179 assert(!Devirtualizer<nv>::do_metadata(closure),
180 "Code to handle metadata is not implemented");
181
182 oop_oop_iterate_oop_maps_reverse<nv>(obj, closure);
183
184 return size_helper();
185}
186#endif
187
188template <bool nv, class OopClosureType>
189ALWAYSINLINE int InstanceKlass::oop_oop_iterate_bounded(oop obj, OopClosureType* closure, MemRegion mr) {
190 if (Devirtualizer<nv>::do_metadata(closure)) {
191 if (mr.contains(obj)) {
192 Devirtualizer<nv>::do_klass(closure, this);
193 }
194 }
195
196 oop_oop_iterate_oop_maps_bounded<nv>(obj, closure, mr);
197
198 return size_helper();
199}
200
201#define ALL_INSTANCE_KLASS_OOP_OOP_ITERATE_DEFN(OopClosureType, nv_suffix) \
202 OOP_OOP_ITERATE_DEFN( InstanceKlass, OopClosureType, nv_suffix) \
203 OOP_OOP_ITERATE_DEFN_BOUNDED( InstanceKlass, OopClosureType, nv_suffix) \
204 OOP_OOP_ITERATE_DEFN_BACKWARDS(InstanceKlass, OopClosureType, nv_suffix)
205
206#endif // SHARE_VM_OOPS_INSTANCEKLASS_INLINE_HPP