oop.inline.hpp revision 3602:da91efe96a93
1/*
2 * Copyright (c) 1997, 2012, Oracle and/or its affiliates. All rights reserved.
3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
4 *
5 * This code is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 only, as
7 * published by the Free Software Foundation.
8 *
9 * This code is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
12 * version 2 for more details (a copy is included in the LICENSE file that
13 * accompanied this code).
14 *
15 * You should have received a copy of the GNU General Public License version
16 * 2 along with this work; if not, write to the Free Software Foundation,
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18 *
19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20 * or visit www.oracle.com if you need additional information or have any
21 * questions.
22 *
23 */
24
25#ifndef SHARE_VM_OOPS_OOP_INLINE_HPP
26#define SHARE_VM_OOPS_OOP_INLINE_HPP
27
28#include "gc_implementation/shared/ageTable.hpp"
29#include "gc_implementation/shared/markSweep.inline.hpp"
30#include "gc_interface/collectedHeap.inline.hpp"
31#include "memory/barrierSet.inline.hpp"
32#include "memory/cardTableModRefBS.hpp"
33#include "memory/genCollectedHeap.hpp"
34#include "memory/generation.hpp"
35#include "memory/specialized_oop_closures.hpp"
36#include "oops/arrayKlass.hpp"
37#include "oops/arrayOop.hpp"
38#include "oops/klass.hpp"
39#include "oops/markOop.inline.hpp"
40#include "oops/oop.hpp"
41#include "runtime/atomic.hpp"
42#include "runtime/os.hpp"
43#ifdef TARGET_ARCH_x86
44# include "bytes_x86.hpp"
45#endif
46#ifdef TARGET_ARCH_sparc
47# include "bytes_sparc.hpp"
48#endif
49#ifdef TARGET_ARCH_zero
50# include "bytes_zero.hpp"
51#endif
52#ifdef TARGET_ARCH_arm
53# include "bytes_arm.hpp"
54#endif
55#ifdef TARGET_ARCH_ppc
56# include "bytes_ppc.hpp"
57#endif
58
59// Implementation of all inlined member functions defined in oop.hpp
60// We need a separate file to avoid circular references
61
62inline void oopDesc::release_set_mark(markOop m) {
63  OrderAccess::release_store_ptr(&_mark, m);
64}
65
66inline markOop oopDesc::cas_set_mark(markOop new_mark, markOop old_mark) {
67  return (markOop) Atomic::cmpxchg_ptr(new_mark, &_mark, old_mark);
68}
69
70inline Klass* oopDesc::klass() const {
71  if (UseCompressedKlassPointers) {
72    return decode_klass_not_null(_metadata._compressed_klass);
73  } else {
74    return _metadata._klass;
75  }
76}
77
78inline Klass* oopDesc::klass_or_null() const volatile {
79  // can be NULL in CMS
80  if (UseCompressedKlassPointers) {
81    return decode_klass(_metadata._compressed_klass);
82  } else {
83    return _metadata._klass;
84  }
85}
86
87inline int oopDesc::klass_gap_offset_in_bytes() {
88  assert(UseCompressedKlassPointers, "only applicable to compressed klass pointers");
89  return oopDesc::klass_offset_in_bytes() + sizeof(narrowOop);
90}
91
92inline Klass** oopDesc::klass_addr() {
93  // Only used internally and with CMS and will not work with
94  // UseCompressedOops
95  assert(!UseCompressedKlassPointers, "only supported with uncompressed klass pointers");
96  return (Klass**) &_metadata._klass;
97}
98
99inline narrowOop* oopDesc::compressed_klass_addr() {
100  assert(UseCompressedKlassPointers, "only called by compressed klass pointers");
101  return (narrowOop*) &_metadata._compressed_klass;
102}
103
104inline void oopDesc::set_klass(Klass* k) {
105  // since klasses are promoted no store check is needed
106  assert(Universe::is_bootstrapping() || k != NULL, "must be a real Klass*");
107  assert(Universe::is_bootstrapping() || k->is_klass(), "not a Klass*");
108  if (UseCompressedKlassPointers) {
109    *compressed_klass_addr() = encode_klass_not_null(k);
110  } else {
111    *klass_addr() = k;
112  }
113}
114
115inline int oopDesc::klass_gap() const {
116  return *(int*)(((intptr_t)this) + klass_gap_offset_in_bytes());
117}
118
119inline void oopDesc::set_klass_gap(int v) {
120  if (UseCompressedKlassPointers) {
121    *(int*)(((intptr_t)this) + klass_gap_offset_in_bytes()) = v;
122  }
123}
124
125inline void oopDesc::set_klass_to_list_ptr(oop k) {
126  // This is only to be used during GC, for from-space objects, so no
127  // barrier is needed.
128  if (UseCompressedKlassPointers) {
129    _metadata._compressed_klass = encode_heap_oop(k);  // may be null (parnew overflow handling)
130  } else {
131    _metadata._klass = (Klass*)(address)k;
132  }
133}
134
135inline oop oopDesc::list_ptr_from_klass() {
136  // This is only to be used during GC, for from-space objects.
137  if (UseCompressedKlassPointers) {
138    return (oop)decode_heap_oop((oop)(address)_metadata._compressed_klass);
139  } else {
140    // Special case for GC
141    return (oop)(address)_metadata._klass;
142  }
143}
144
145inline void   oopDesc::init_mark()                 { set_mark(markOopDesc::prototype_for_object(this)); }
146
147inline bool oopDesc::is_a(Klass* k)        const { return klass()->is_subtype_of(k); }
148
149inline bool oopDesc::is_instance()           const { return klass()->oop_is_instance(); }
150inline bool oopDesc::is_instanceMirror()     const { return klass()->oop_is_instanceMirror(); }
151inline bool oopDesc::is_instanceRef()        const { return klass()->oop_is_instanceRef(); }
152inline bool oopDesc::is_array()              const { return klass()->oop_is_array(); }
153inline bool oopDesc::is_objArray()           const { return klass()->oop_is_objArray(); }
154inline bool oopDesc::is_typeArray()          const { return klass()->oop_is_typeArray(); }
155
156inline void*     oopDesc::field_base(int offset)        const { return (void*)&((char*)this)[offset]; }
157
158template <class T> inline T* oopDesc::obj_field_addr(int offset) const { return (T*)field_base(offset); }
159inline Metadata** oopDesc::metadata_field_addr(int offset) const { return (Metadata**)field_base(offset); }
160inline jbyte*    oopDesc::byte_field_addr(int offset)   const { return (jbyte*)   field_base(offset); }
161inline jchar*    oopDesc::char_field_addr(int offset)   const { return (jchar*)   field_base(offset); }
162inline jboolean* oopDesc::bool_field_addr(int offset)   const { return (jboolean*)field_base(offset); }
163inline jint*     oopDesc::int_field_addr(int offset)    const { return (jint*)    field_base(offset); }
164inline jshort*   oopDesc::short_field_addr(int offset)  const { return (jshort*)  field_base(offset); }
165inline jlong*    oopDesc::long_field_addr(int offset)   const { return (jlong*)   field_base(offset); }
166inline jfloat*   oopDesc::float_field_addr(int offset)  const { return (jfloat*)  field_base(offset); }
167inline jdouble*  oopDesc::double_field_addr(int offset) const { return (jdouble*) field_base(offset); }
168inline address*  oopDesc::address_field_addr(int offset) const { return (address*) field_base(offset); }
169
170
171// Functions for getting and setting oops within instance objects.
172// If the oops are compressed, the type passed to these overloaded functions
173// is narrowOop.  All functions are overloaded so they can be called by
174// template functions without conditionals (the compiler instantiates via
175// the right type and inlines the appopriate code).
176
177inline bool oopDesc::is_null(oop obj)       { return obj == NULL; }
178inline bool oopDesc::is_null(Klass* obj)  { return obj == NULL; }
179inline bool oopDesc::is_null(narrowOop obj) { return obj == 0; }
180
181// Algorithm for encoding and decoding oops from 64 bit pointers to 32 bit
182// offset from the heap base.  Saving the check for null can save instructions
183// in inner GC loops so these are separated.
184
185inline bool check_obj_alignment(oop obj) {
186  return (intptr_t)obj % MinObjAlignmentInBytes == 0;
187}
188inline bool check_obj_alignment(Klass* obj) {
189  return (intptr_t)obj % MinObjAlignmentInBytes == 0;
190}
191
192inline narrowOop oopDesc::encode_heap_oop_not_null(oop v) {
193  assert(!is_null(v), "oop value can never be zero");
194  assert(check_obj_alignment(v), "Address not aligned");
195  assert(Universe::heap()->is_in_reserved(v), "Address not in heap");
196  address base = Universe::narrow_oop_base();
197  int    shift = Universe::narrow_oop_shift();
198  uint64_t  pd = (uint64_t)(pointer_delta((void*)v, (void*)base, 1));
199  assert(OopEncodingHeapMax > pd, "change encoding max if new encoding");
200  uint64_t result = pd >> shift;
201  assert((result & CONST64(0xffffffff00000000)) == 0, "narrow oop overflow");
202  assert(decode_heap_oop(result) == v, "reversibility");
203  return (narrowOop)result;
204}
205
206inline narrowOop oopDesc::encode_heap_oop(oop v) {
207  return (is_null(v)) ? (narrowOop)0 : encode_heap_oop_not_null(v);
208}
209
210inline oop oopDesc::decode_heap_oop_not_null(narrowOop v) {
211  assert(!is_null(v), "narrow oop value can never be zero");
212  address base = Universe::narrow_oop_base();
213  int    shift = Universe::narrow_oop_shift();
214  oop result = (oop)(void*)((uintptr_t)base + ((uintptr_t)v << shift));
215  assert(check_obj_alignment(result), err_msg("address not aligned: " PTR_FORMAT, (void*) result));
216  return result;
217}
218
219inline oop oopDesc::decode_heap_oop(narrowOop v) {
220  return is_null(v) ? (oop)NULL : decode_heap_oop_not_null(v);
221}
222
223inline oop oopDesc::decode_heap_oop_not_null(oop v) { return v; }
224inline oop oopDesc::decode_heap_oop(oop v)  { return v; }
225
226// Encoding and decoding for klass field.  It is copied code, but someday
227// might not be the same as oop.
228
229inline narrowOop oopDesc::encode_klass_not_null(Klass* v) {
230  assert(!is_null(v), "oop value can never be zero");
231  assert(check_obj_alignment(v), "Address not aligned");
232  address base = Universe::narrow_oop_base();
233  int    shift = Universe::narrow_oop_shift();
234  uint64_t  pd = (uint64_t)(pointer_delta((void*)v, (void*)base, 1));
235  assert(OopEncodingHeapMax > pd, "change encoding max if new encoding");
236  uint64_t result = pd >> shift;
237  assert((result & CONST64(0xffffffff00000000)) == 0, "narrow klass pointer overflow");
238  assert(decode_klass(result) == v, "reversibility");
239  return (narrowOop)result;
240}
241
242inline narrowOop oopDesc::encode_klass(Klass* v) {
243  return (is_null(v)) ? (narrowOop)0 : encode_klass_not_null(v);
244}
245
246inline Klass* oopDesc::decode_klass_not_null(narrowOop v) {
247  assert(!is_null(v), "narrow oop value can never be zero");
248  address base = Universe::narrow_oop_base();
249  int    shift = Universe::narrow_oop_shift();
250  Klass* result = (Klass*)(void*)((uintptr_t)base + ((uintptr_t)v << shift));
251  assert(check_obj_alignment(result), err_msg("address not aligned: " PTR_FORMAT, (void*) result));
252  return result;
253}
254
255inline Klass* oopDesc::decode_klass(narrowOop v) {
256  return is_null(v) ? (Klass*)NULL : decode_klass_not_null(v);
257}
258
259// Load an oop out of the Java heap as is without decoding.
260// Called by GC to check for null before decoding.
261inline oop       oopDesc::load_heap_oop(oop* p)          { return *p; }
262inline narrowOop oopDesc::load_heap_oop(narrowOop* p)    { return *p; }
263
264// Load and decode an oop out of the Java heap into a wide oop.
265inline oop oopDesc::load_decode_heap_oop_not_null(oop* p)       { return *p; }
266inline oop oopDesc::load_decode_heap_oop_not_null(narrowOop* p) {
267  return decode_heap_oop_not_null(*p);
268}
269
270// Load and decode an oop out of the heap accepting null
271inline oop oopDesc::load_decode_heap_oop(oop* p) { return *p; }
272inline oop oopDesc::load_decode_heap_oop(narrowOop* p) {
273  return decode_heap_oop(*p);
274}
275
276// Store already encoded heap oop into the heap.
277inline void oopDesc::store_heap_oop(oop* p, oop v)                 { *p = v; }
278inline void oopDesc::store_heap_oop(narrowOop* p, narrowOop v)     { *p = v; }
279
280// Encode and store a heap oop.
281inline void oopDesc::encode_store_heap_oop_not_null(narrowOop* p, oop v) {
282  *p = encode_heap_oop_not_null(v);
283}
284inline void oopDesc::encode_store_heap_oop_not_null(oop* p, oop v) { *p = v; }
285
286// Encode and store a heap oop allowing for null.
287inline void oopDesc::encode_store_heap_oop(narrowOop* p, oop v) {
288  *p = encode_heap_oop(v);
289}
290inline void oopDesc::encode_store_heap_oop(oop* p, oop v) { *p = v; }
291
292// Store heap oop as is for volatile fields.
293inline void oopDesc::release_store_heap_oop(volatile oop* p, oop v) {
294  OrderAccess::release_store_ptr(p, v);
295}
296inline void oopDesc::release_store_heap_oop(volatile narrowOop* p,
297                                            narrowOop v) {
298  OrderAccess::release_store(p, v);
299}
300
301inline void oopDesc::release_encode_store_heap_oop_not_null(
302                                                volatile narrowOop* p, oop v) {
303  // heap oop is not pointer sized.
304  OrderAccess::release_store(p, encode_heap_oop_not_null(v));
305}
306
307inline void oopDesc::release_encode_store_heap_oop_not_null(
308                                                      volatile oop* p, oop v) {
309  OrderAccess::release_store_ptr(p, v);
310}
311
312inline void oopDesc::release_encode_store_heap_oop(volatile oop* p,
313                                                           oop v) {
314  OrderAccess::release_store_ptr(p, v);
315}
316inline void oopDesc::release_encode_store_heap_oop(
317                                                volatile narrowOop* p, oop v) {
318  OrderAccess::release_store(p, encode_heap_oop(v));
319}
320
321
322// These functions are only used to exchange oop fields in instances,
323// not headers.
324inline oop oopDesc::atomic_exchange_oop(oop exchange_value, volatile HeapWord *dest) {
325  if (UseCompressedOops) {
326    // encode exchange value from oop to T
327    narrowOop val = encode_heap_oop(exchange_value);
328    narrowOop old = (narrowOop)Atomic::xchg(val, (narrowOop*)dest);
329    // decode old from T to oop
330    return decode_heap_oop(old);
331  } else {
332    return (oop)Atomic::xchg_ptr(exchange_value, (oop*)dest);
333  }
334}
335
336// In order to put or get a field out of an instance, must first check
337// if the field has been compressed and uncompress it.
338inline oop oopDesc::obj_field(int offset) const {
339  return UseCompressedOops ?
340    load_decode_heap_oop(obj_field_addr<narrowOop>(offset)) :
341    load_decode_heap_oop(obj_field_addr<oop>(offset));
342}
343inline volatile oop oopDesc::obj_field_volatile(int offset) const {
344  volatile oop value = obj_field(offset);
345  OrderAccess::acquire();
346  return value;
347}
348inline void oopDesc::obj_field_put(int offset, oop value) {
349  UseCompressedOops ? oop_store(obj_field_addr<narrowOop>(offset), value) :
350                      oop_store(obj_field_addr<oop>(offset),       value);
351}
352
353inline Metadata* oopDesc::metadata_field(int offset) const {
354  return *metadata_field_addr(offset);
355}
356
357inline void oopDesc::metadata_field_put(int offset, Metadata* value) {
358  *metadata_field_addr(offset) = value;
359}
360
361inline void oopDesc::obj_field_put_raw(int offset, oop value) {
362  UseCompressedOops ?
363    encode_store_heap_oop(obj_field_addr<narrowOop>(offset), value) :
364    encode_store_heap_oop(obj_field_addr<oop>(offset),       value);
365}
366inline void oopDesc::obj_field_put_volatile(int offset, oop value) {
367  OrderAccess::release();
368  obj_field_put(offset, value);
369  OrderAccess::fence();
370}
371
372inline jbyte oopDesc::byte_field(int offset) const                  { return (jbyte) *byte_field_addr(offset);    }
373inline void oopDesc::byte_field_put(int offset, jbyte contents)     { *byte_field_addr(offset) = (jint) contents; }
374
375inline jboolean oopDesc::bool_field(int offset) const               { return (jboolean) *bool_field_addr(offset); }
376inline void oopDesc::bool_field_put(int offset, jboolean contents)  { *bool_field_addr(offset) = (jint) contents; }
377
378inline jchar oopDesc::char_field(int offset) const                  { return (jchar) *char_field_addr(offset);    }
379inline void oopDesc::char_field_put(int offset, jchar contents)     { *char_field_addr(offset) = (jint) contents; }
380
381inline jint oopDesc::int_field(int offset) const                    { return *int_field_addr(offset);        }
382inline void oopDesc::int_field_put(int offset, jint contents)       { *int_field_addr(offset) = contents;    }
383
384inline jshort oopDesc::short_field(int offset) const                { return (jshort) *short_field_addr(offset);  }
385inline void oopDesc::short_field_put(int offset, jshort contents)   { *short_field_addr(offset) = (jint) contents;}
386
387inline jlong oopDesc::long_field(int offset) const                  { return *long_field_addr(offset);       }
388inline void oopDesc::long_field_put(int offset, jlong contents)     { *long_field_addr(offset) = contents;   }
389
390inline jfloat oopDesc::float_field(int offset) const                { return *float_field_addr(offset);      }
391inline void oopDesc::float_field_put(int offset, jfloat contents)   { *float_field_addr(offset) = contents;  }
392
393inline jdouble oopDesc::double_field(int offset) const              { return *double_field_addr(offset);     }
394inline void oopDesc::double_field_put(int offset, jdouble contents) { *double_field_addr(offset) = contents; }
395
396inline address oopDesc::address_field(int offset) const              { return *address_field_addr(offset);     }
397inline void oopDesc::address_field_put(int offset, address contents) { *address_field_addr(offset) = contents; }
398
399inline oop oopDesc::obj_field_acquire(int offset) const {
400  return UseCompressedOops ?
401             decode_heap_oop((narrowOop)
402               OrderAccess::load_acquire(obj_field_addr<narrowOop>(offset)))
403           : decode_heap_oop((oop)
404               OrderAccess::load_ptr_acquire(obj_field_addr<oop>(offset)));
405}
406inline void oopDesc::release_obj_field_put(int offset, oop value) {
407  UseCompressedOops ?
408    oop_store((volatile narrowOop*)obj_field_addr<narrowOop>(offset), value) :
409    oop_store((volatile oop*)      obj_field_addr<oop>(offset),       value);
410}
411
412inline jbyte oopDesc::byte_field_acquire(int offset) const                  { return OrderAccess::load_acquire(byte_field_addr(offset));     }
413inline void oopDesc::release_byte_field_put(int offset, jbyte contents)     { OrderAccess::release_store(byte_field_addr(offset), contents); }
414
415inline jboolean oopDesc::bool_field_acquire(int offset) const               { return OrderAccess::load_acquire(bool_field_addr(offset));     }
416inline void oopDesc::release_bool_field_put(int offset, jboolean contents)  { OrderAccess::release_store(bool_field_addr(offset), contents); }
417
418inline jchar oopDesc::char_field_acquire(int offset) const                  { return OrderAccess::load_acquire(char_field_addr(offset));     }
419inline void oopDesc::release_char_field_put(int offset, jchar contents)     { OrderAccess::release_store(char_field_addr(offset), contents); }
420
421inline jint oopDesc::int_field_acquire(int offset) const                    { return OrderAccess::load_acquire(int_field_addr(offset));      }
422inline void oopDesc::release_int_field_put(int offset, jint contents)       { OrderAccess::release_store(int_field_addr(offset), contents);  }
423
424inline jshort oopDesc::short_field_acquire(int offset) const                { return (jshort)OrderAccess::load_acquire(short_field_addr(offset)); }
425inline void oopDesc::release_short_field_put(int offset, jshort contents)   { OrderAccess::release_store(short_field_addr(offset), contents);     }
426
427inline jlong oopDesc::long_field_acquire(int offset) const                  { return OrderAccess::load_acquire(long_field_addr(offset));       }
428inline void oopDesc::release_long_field_put(int offset, jlong contents)     { OrderAccess::release_store(long_field_addr(offset), contents);   }
429
430inline jfloat oopDesc::float_field_acquire(int offset) const                { return OrderAccess::load_acquire(float_field_addr(offset));      }
431inline void oopDesc::release_float_field_put(int offset, jfloat contents)   { OrderAccess::release_store(float_field_addr(offset), contents);  }
432
433inline jdouble oopDesc::double_field_acquire(int offset) const              { return OrderAccess::load_acquire(double_field_addr(offset));     }
434inline void oopDesc::release_double_field_put(int offset, jdouble contents) { OrderAccess::release_store(double_field_addr(offset), contents); }
435
436inline address oopDesc::address_field_acquire(int offset) const             { return (address) OrderAccess::load_ptr_acquire(address_field_addr(offset)); }
437inline void oopDesc::release_address_field_put(int offset, address contents) { OrderAccess::release_store_ptr(address_field_addr(offset), contents); }
438
439inline int oopDesc::size_given_klass(Klass* klass)  {
440  int lh = klass->layout_helper();
441  int s;
442
443  // lh is now a value computed at class initialization that may hint
444  // at the size.  For instances, this is positive and equal to the
445  // size.  For arrays, this is negative and provides log2 of the
446  // array element size.  For other oops, it is zero and thus requires
447  // a virtual call.
448  //
449  // We go to all this trouble because the size computation is at the
450  // heart of phase 2 of mark-compaction, and called for every object,
451  // alive or dead.  So the speed here is equal in importance to the
452  // speed of allocation.
453
454  if (lh > Klass::_lh_neutral_value) {
455    if (!Klass::layout_helper_needs_slow_path(lh)) {
456      s = lh >> LogHeapWordSize;  // deliver size scaled by wordSize
457    } else {
458      s = klass->oop_size(this);
459    }
460  } else if (lh <= Klass::_lh_neutral_value) {
461    // The most common case is instances; fall through if so.
462    if (lh < Klass::_lh_neutral_value) {
463      // Second most common case is arrays.  We have to fetch the
464      // length of the array, shift (multiply) it appropriately,
465      // up to wordSize, add the header, and align to object size.
466      size_t size_in_bytes;
467#ifdef _M_IA64
468      // The Windows Itanium Aug 2002 SDK hoists this load above
469      // the check for s < 0.  An oop at the end of the heap will
470      // cause an access violation if this load is performed on a non
471      // array oop.  Making the reference volatile prohibits this.
472      // (%%% please explain by what magic the length is actually fetched!)
473      volatile int *array_length;
474      array_length = (volatile int *)( (intptr_t)this +
475                          arrayOopDesc::length_offset_in_bytes() );
476      assert(array_length > 0, "Integer arithmetic problem somewhere");
477      // Put into size_t to avoid overflow.
478      size_in_bytes = (size_t) array_length;
479      size_in_bytes = size_in_bytes << Klass::layout_helper_log2_element_size(lh);
480#else
481      size_t array_length = (size_t) ((arrayOop)this)->length();
482      size_in_bytes = array_length << Klass::layout_helper_log2_element_size(lh);
483#endif
484      size_in_bytes += Klass::layout_helper_header_size(lh);
485
486      // This code could be simplified, but by keeping array_header_in_bytes
487      // in units of bytes and doing it this way we can round up just once,
488      // skipping the intermediate round to HeapWordSize.  Cast the result
489      // of round_to to size_t to guarantee unsigned division == right shift.
490      s = (int)((size_t)round_to(size_in_bytes, MinObjAlignmentInBytes) /
491        HeapWordSize);
492
493      // UseParNewGC, UseParallelGC and UseG1GC can change the length field
494      // of an "old copy" of an object array in the young gen so it indicates
495      // the grey portion of an already copied array. This will cause the first
496      // disjunct below to fail if the two comparands are computed across such
497      // a concurrent change.
498      // UseParNewGC also runs with promotion labs (which look like int
499      // filler arrays) which are subject to changing their declared size
500      // when finally retiring a PLAB; this also can cause the first disjunct
501      // to fail for another worker thread that is concurrently walking the block
502      // offset table. Both these invariant failures are benign for their
503      // current uses; we relax the assertion checking to cover these two cases below:
504      //     is_objArray() && is_forwarded()   // covers first scenario above
505      //  || is_typeArray()                    // covers second scenario above
506      // If and when UseParallelGC uses the same obj array oop stealing/chunking
507      // technique, we will need to suitably modify the assertion.
508      assert((s == klass->oop_size(this)) ||
509             (Universe::heap()->is_gc_active() &&
510              ((is_typeArray() && UseParNewGC) ||
511               (is_objArray()  && is_forwarded() && (UseParNewGC || UseParallelGC || UseG1GC)))),
512             "wrong array object size");
513    } else {
514      // Must be zero, so bite the bullet and take the virtual call.
515      s = klass->oop_size(this);
516    }
517  }
518
519  assert(s % MinObjAlignment == 0, "alignment check");
520  assert(s > 0, "Bad size calculated");
521  return s;
522}
523
524
525inline int oopDesc::size()  {
526  return size_given_klass(klass());
527}
528
529inline void update_barrier_set(void* p, oop v) {
530  assert(oopDesc::bs() != NULL, "Uninitialized bs in oop!");
531  oopDesc::bs()->write_ref_field(p, v);
532}
533
534template <class T> inline void update_barrier_set_pre(T* p, oop v) {
535  oopDesc::bs()->write_ref_field_pre(p, v);
536}
537
538template <class T> inline void oop_store(T* p, oop v) {
539  if (always_do_update_barrier) {
540    oop_store((volatile T*)p, v);
541  } else {
542    update_barrier_set_pre(p, v);
543    oopDesc::encode_store_heap_oop(p, v);
544    update_barrier_set((void*)p, v);  // cast away type
545  }
546}
547
548template <class T> inline void oop_store(volatile T* p, oop v) {
549  update_barrier_set_pre((T*)p, v);   // cast away volatile
550  // Used by release_obj_field_put, so use release_store_ptr.
551  oopDesc::release_encode_store_heap_oop(p, v);
552  update_barrier_set((void*)p, v);    // cast away type
553}
554
555// Should replace *addr = oop assignments where addr type depends on UseCompressedOops
556// (without having to remember the function name this calls).
557inline void oop_store_raw(HeapWord* addr, oop value) {
558  if (UseCompressedOops) {
559    oopDesc::encode_store_heap_oop((narrowOop*)addr, value);
560  } else {
561    oopDesc::encode_store_heap_oop((oop*)addr, value);
562  }
563}
564
565inline oop oopDesc::atomic_compare_exchange_oop(oop exchange_value,
566                                                volatile HeapWord *dest,
567                                                oop compare_value,
568                                                bool prebarrier) {
569  if (UseCompressedOops) {
570    if (prebarrier) {
571      update_barrier_set_pre((narrowOop*)dest, exchange_value);
572    }
573    // encode exchange and compare value from oop to T
574    narrowOop val = encode_heap_oop(exchange_value);
575    narrowOop cmp = encode_heap_oop(compare_value);
576
577    narrowOop old = (narrowOop) Atomic::cmpxchg(val, (narrowOop*)dest, cmp);
578    // decode old from T to oop
579    return decode_heap_oop(old);
580  } else {
581    if (prebarrier) {
582      update_barrier_set_pre((oop*)dest, exchange_value);
583    }
584    return (oop)Atomic::cmpxchg_ptr(exchange_value, (oop*)dest, compare_value);
585  }
586}
587
588// Used only for markSweep, scavenging
589inline bool oopDesc::is_gc_marked() const {
590  return mark()->is_marked();
591}
592
593inline bool oopDesc::is_locked() const {
594  return mark()->is_locked();
595}
596
597inline bool oopDesc::is_unlocked() const {
598  return mark()->is_unlocked();
599}
600
601inline bool oopDesc::has_bias_pattern() const {
602  return mark()->has_bias_pattern();
603}
604
605
606// used only for asserts
607inline bool oopDesc::is_oop(bool ignore_mark_word) const {
608  oop obj = (oop) this;
609  if (!check_obj_alignment(obj)) return false;
610  if (!Universe::heap()->is_in_reserved(obj)) return false;
611  // obj is aligned and accessible in heap
612  if (Universe::heap()->is_in_reserved(obj->klass_or_null())) return false;
613
614  // Header verification: the mark is typically non-NULL. If we're
615  // at a safepoint, it must not be null.
616  // Outside of a safepoint, the header could be changing (for example,
617  // another thread could be inflating a lock on this object).
618  if (ignore_mark_word) {
619    return true;
620  }
621  if (mark() != NULL) {
622    return true;
623  }
624  return !SafepointSynchronize::is_at_safepoint();
625}
626
627
628// used only for asserts
629inline bool oopDesc::is_oop_or_null(bool ignore_mark_word) const {
630  return this == NULL ? true : is_oop(ignore_mark_word);
631}
632
633#ifndef PRODUCT
634// used only for asserts
635inline bool oopDesc::is_unlocked_oop() const {
636  if (!Universe::heap()->is_in_reserved(this)) return false;
637  return mark()->is_unlocked();
638}
639#endif // PRODUCT
640
641inline void oopDesc::follow_contents(void) {
642  assert (is_gc_marked(), "should be marked");
643  klass()->oop_follow_contents(this);
644}
645
646// Used by scavengers
647
648inline bool oopDesc::is_forwarded() const {
649  // The extra heap check is needed since the obj might be locked, in which case the
650  // mark would point to a stack location and have the sentinel bit cleared
651  return mark()->is_marked();
652}
653
654// Used by scavengers
655inline void oopDesc::forward_to(oop p) {
656  assert(check_obj_alignment(p),
657         "forwarding to something not aligned");
658  assert(Universe::heap()->is_in_reserved(p),
659         "forwarding to something not in heap");
660  markOop m = markOopDesc::encode_pointer_as_mark(p);
661  assert(m->decode_pointer() == p, "encoding must be reversable");
662  set_mark(m);
663}
664
665// Used by parallel scavengers
666inline bool oopDesc::cas_forward_to(oop p, markOop compare) {
667  assert(check_obj_alignment(p),
668         "forwarding to something not aligned");
669  assert(Universe::heap()->is_in_reserved(p),
670         "forwarding to something not in heap");
671  markOop m = markOopDesc::encode_pointer_as_mark(p);
672  assert(m->decode_pointer() == p, "encoding must be reversable");
673  return cas_set_mark(m, compare) == compare;
674}
675
676// Note that the forwardee is not the same thing as the displaced_mark.
677// The forwardee is used when copying during scavenge and mark-sweep.
678// It does need to clear the low two locking- and GC-related bits.
679inline oop oopDesc::forwardee() const {
680  return (oop) mark()->decode_pointer();
681}
682
683inline bool oopDesc::has_displaced_mark() const {
684  return mark()->has_displaced_mark_helper();
685}
686
687inline markOop oopDesc::displaced_mark() const {
688  return mark()->displaced_mark_helper();
689}
690
691inline void oopDesc::set_displaced_mark(markOop m) {
692  mark()->set_displaced_mark_helper(m);
693}
694
695// The following method needs to be MT safe.
696inline int oopDesc::age() const {
697  assert(!is_forwarded(), "Attempt to read age from forwarded mark");
698  if (has_displaced_mark()) {
699    return displaced_mark()->age();
700  } else {
701    return mark()->age();
702  }
703}
704
705inline void oopDesc::incr_age() {
706  assert(!is_forwarded(), "Attempt to increment age of forwarded mark");
707  if (has_displaced_mark()) {
708    set_displaced_mark(displaced_mark()->incr_age());
709  } else {
710    set_mark(mark()->incr_age());
711  }
712}
713
714
715inline intptr_t oopDesc::identity_hash() {
716  // Fast case; if the object is unlocked and the hash value is set, no locking is needed
717  // Note: The mark must be read into local variable to avoid concurrent updates.
718  markOop mrk = mark();
719  if (mrk->is_unlocked() && !mrk->has_no_hash()) {
720    return mrk->hash();
721  } else if (mrk->is_marked()) {
722    return mrk->hash();
723  } else {
724    return slow_identity_hash();
725  }
726}
727
728inline int oopDesc::adjust_pointers() {
729  debug_only(int check_size = size());
730  int s = klass()->oop_adjust_pointers(this);
731  assert(s == check_size, "should be the same");
732  return s;
733}
734
735#define OOP_ITERATE_DEFN(OopClosureType, nv_suffix)                        \
736                                                                           \
737inline int oopDesc::oop_iterate(OopClosureType* blk) {                     \
738  SpecializationStats::record_call();                                      \
739  return klass()->oop_oop_iterate##nv_suffix(this, blk);               \
740}                                                                          \
741                                                                           \
742inline int oopDesc::oop_iterate(OopClosureType* blk, MemRegion mr) {       \
743  SpecializationStats::record_call();                                      \
744  return klass()->oop_oop_iterate##nv_suffix##_m(this, blk, mr);       \
745}
746
747
748inline int oopDesc::oop_iterate_no_header(OopClosure* blk) {
749  // The NoHeaderExtendedOopClosure wraps the OopClosure and proxies all
750  // the do_oop calls, but turns off all other features in ExtendedOopClosure.
751  NoHeaderExtendedOopClosure cl(blk);
752  return oop_iterate(&cl);
753}
754
755inline int oopDesc::oop_iterate_no_header(OopClosure* blk, MemRegion mr) {
756  NoHeaderExtendedOopClosure cl(blk);
757  return oop_iterate(&cl, mr);
758}
759
760ALL_OOP_OOP_ITERATE_CLOSURES_1(OOP_ITERATE_DEFN)
761ALL_OOP_OOP_ITERATE_CLOSURES_2(OOP_ITERATE_DEFN)
762
763#ifndef SERIALGC
764#define OOP_ITERATE_BACKWARDS_DEFN(OopClosureType, nv_suffix)              \
765                                                                           \
766inline int oopDesc::oop_iterate_backwards(OopClosureType* blk) {           \
767  SpecializationStats::record_call();                                      \
768  return klass()->oop_oop_iterate_backwards##nv_suffix(this, blk);     \
769}
770
771ALL_OOP_OOP_ITERATE_CLOSURES_1(OOP_ITERATE_BACKWARDS_DEFN)
772ALL_OOP_OOP_ITERATE_CLOSURES_2(OOP_ITERATE_BACKWARDS_DEFN)
773#endif // !SERIALGC
774
775#endif // SHARE_VM_OOPS_OOP_INLINE_HPP
776