oop.inline.hpp revision 647:bd441136a5ce
1/*
2 * Copyright 1997-2009 Sun Microsystems, Inc.  All Rights Reserved.
3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
4 *
5 * This code is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 only, as
7 * published by the Free Software Foundation.
8 *
9 * This code is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
12 * version 2 for more details (a copy is included in the LICENSE file that
13 * accompanied this code).
14 *
15 * You should have received a copy of the GNU General Public License version
16 * 2 along with this work; if not, write to the Free Software Foundation,
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18 *
19 * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
20 * CA 95054 USA or visit www.sun.com if you need additional information or
21 * have any questions.
22 *
23 */
24
25// Implementation of all inlined member functions defined in oop.hpp
26// We need a separate file to avoid circular references
27
28inline void oopDesc::release_set_mark(markOop m) {
29  OrderAccess::release_store_ptr(&_mark, m);
30}
31
32inline markOop oopDesc::cas_set_mark(markOop new_mark, markOop old_mark) {
33  return (markOop) Atomic::cmpxchg_ptr(new_mark, &_mark, old_mark);
34}
35
36inline klassOop oopDesc::klass() const {
37  if (UseCompressedOops) {
38    return (klassOop)decode_heap_oop_not_null(_metadata._compressed_klass);
39  } else {
40    return _metadata._klass;
41  }
42}
43
44inline klassOop oopDesc::klass_or_null() const volatile {
45  // can be NULL in CMS
46  if (UseCompressedOops) {
47    return (klassOop)decode_heap_oop(_metadata._compressed_klass);
48  } else {
49    return _metadata._klass;
50  }
51}
52
53inline int oopDesc::klass_gap_offset_in_bytes() {
54  assert(UseCompressedOops, "only applicable to compressed headers");
55  return oopDesc::klass_offset_in_bytes() + sizeof(narrowOop);
56}
57
58inline oop* oopDesc::klass_addr() {
59  // Only used internally and with CMS and will not work with
60  // UseCompressedOops
61  assert(!UseCompressedOops, "only supported with uncompressed oops");
62  return (oop*) &_metadata._klass;
63}
64
65inline narrowOop* oopDesc::compressed_klass_addr() {
66  assert(UseCompressedOops, "only called by compressed oops");
67  return (narrowOop*) &_metadata._compressed_klass;
68}
69
70inline void oopDesc::set_klass(klassOop k) {
71  // since klasses are promoted no store check is needed
72  assert(Universe::is_bootstrapping() || k != NULL, "must be a real klassOop");
73  assert(Universe::is_bootstrapping() || k->is_klass(), "not a klassOop");
74  if (UseCompressedOops) {
75    oop_store_without_check(compressed_klass_addr(), (oop)k);
76  } else {
77    oop_store_without_check(klass_addr(), (oop) k);
78  }
79}
80
81inline int oopDesc::klass_gap() const {
82  return *(int*)(((intptr_t)this) + klass_gap_offset_in_bytes());
83}
84
85inline void oopDesc::set_klass_gap(int v) {
86  if (UseCompressedOops) {
87    *(int*)(((intptr_t)this) + klass_gap_offset_in_bytes()) = v;
88  }
89}
90
91inline void oopDesc::set_klass_to_list_ptr(oop k) {
92  // This is only to be used during GC, for from-space objects, so no
93  // barrier is needed.
94  if (UseCompressedOops) {
95    _metadata._compressed_klass = encode_heap_oop(k);  // may be null (parnew overflow handling)
96  } else {
97    _metadata._klass = (klassOop)k;
98  }
99}
100
101inline void   oopDesc::init_mark()                 { set_mark(markOopDesc::prototype_for_object(this)); }
102inline Klass* oopDesc::blueprint()           const { return klass()->klass_part(); }
103
104inline bool oopDesc::is_a(klassOop k)        const { return blueprint()->is_subtype_of(k); }
105
106inline bool oopDesc::is_instance()           const { return blueprint()->oop_is_instance(); }
107inline bool oopDesc::is_instanceRef()        const { return blueprint()->oop_is_instanceRef(); }
108inline bool oopDesc::is_array()              const { return blueprint()->oop_is_array(); }
109inline bool oopDesc::is_objArray()           const { return blueprint()->oop_is_objArray(); }
110inline bool oopDesc::is_typeArray()          const { return blueprint()->oop_is_typeArray(); }
111inline bool oopDesc::is_javaArray()          const { return blueprint()->oop_is_javaArray(); }
112inline bool oopDesc::is_symbol()             const { return blueprint()->oop_is_symbol(); }
113inline bool oopDesc::is_klass()              const { return blueprint()->oop_is_klass(); }
114inline bool oopDesc::is_thread()             const { return blueprint()->oop_is_thread(); }
115inline bool oopDesc::is_method()             const { return blueprint()->oop_is_method(); }
116inline bool oopDesc::is_constMethod()        const { return blueprint()->oop_is_constMethod(); }
117inline bool oopDesc::is_methodData()         const { return blueprint()->oop_is_methodData(); }
118inline bool oopDesc::is_constantPool()       const { return blueprint()->oop_is_constantPool(); }
119inline bool oopDesc::is_constantPoolCache()  const { return blueprint()->oop_is_constantPoolCache(); }
120inline bool oopDesc::is_compiledICHolder()   const { return blueprint()->oop_is_compiledICHolder(); }
121
122inline void*     oopDesc::field_base(int offset)        const { return (void*)&((char*)this)[offset]; }
123
124template <class T> inline T* oopDesc::obj_field_addr(int offset) const { return (T*)field_base(offset); }
125inline jbyte*    oopDesc::byte_field_addr(int offset)   const { return (jbyte*)   field_base(offset); }
126inline jchar*    oopDesc::char_field_addr(int offset)   const { return (jchar*)   field_base(offset); }
127inline jboolean* oopDesc::bool_field_addr(int offset)   const { return (jboolean*)field_base(offset); }
128inline jint*     oopDesc::int_field_addr(int offset)    const { return (jint*)    field_base(offset); }
129inline jshort*   oopDesc::short_field_addr(int offset)  const { return (jshort*)  field_base(offset); }
130inline jlong*    oopDesc::long_field_addr(int offset)   const { return (jlong*)   field_base(offset); }
131inline jfloat*   oopDesc::float_field_addr(int offset)  const { return (jfloat*)  field_base(offset); }
132inline jdouble*  oopDesc::double_field_addr(int offset) const { return (jdouble*) field_base(offset); }
133inline address*  oopDesc::address_field_addr(int offset) const { return (address*) field_base(offset); }
134
135
136// Functions for getting and setting oops within instance objects.
137// If the oops are compressed, the type passed to these overloaded functions
138// is narrowOop.  All functions are overloaded so they can be called by
139// template functions without conditionals (the compiler instantiates via
140// the right type and inlines the appopriate code).
141
142inline bool oopDesc::is_null(oop obj)       { return obj == NULL; }
143inline bool oopDesc::is_null(narrowOop obj) { return obj == 0; }
144
145// Algorithm for encoding and decoding oops from 64 bit pointers to 32 bit
146// offset from the heap base.  Saving the check for null can save instructions
147// in inner GC loops so these are separated.
148
149inline narrowOop oopDesc::encode_heap_oop_not_null(oop v) {
150  assert(!is_null(v), "oop value can never be zero");
151  address base = Universe::narrow_oop_base();
152  int    shift = Universe::narrow_oop_shift();
153  uint64_t  pd = (uint64_t)(pointer_delta((void*)v, (void*)base, 1));
154  assert(OopEncodingHeapMax > pd, "change encoding max if new encoding");
155  uint64_t result = pd >> shift;
156  assert((result & CONST64(0xffffffff00000000)) == 0, "narrow oop overflow");
157  return (narrowOop)result;
158}
159
160inline narrowOop oopDesc::encode_heap_oop(oop v) {
161  return (is_null(v)) ? (narrowOop)0 : encode_heap_oop_not_null(v);
162}
163
164inline oop oopDesc::decode_heap_oop_not_null(narrowOop v) {
165  assert(!is_null(v), "narrow oop value can never be zero");
166  address base = Universe::narrow_oop_base();
167  int    shift = Universe::narrow_oop_shift();
168  return (oop)(void*)((uintptr_t)base + ((uintptr_t)v << shift));
169}
170
171inline oop oopDesc::decode_heap_oop(narrowOop v) {
172  return is_null(v) ? (oop)NULL : decode_heap_oop_not_null(v);
173}
174
175inline oop oopDesc::decode_heap_oop_not_null(oop v) { return v; }
176inline oop oopDesc::decode_heap_oop(oop v)  { return v; }
177
178// Load an oop out of the Java heap as is without decoding.
179// Called by GC to check for null before decoding.
180inline oop       oopDesc::load_heap_oop(oop* p)          { return *p; }
181inline narrowOop oopDesc::load_heap_oop(narrowOop* p)    { return *p; }
182
183// Load and decode an oop out of the Java heap into a wide oop.
184inline oop oopDesc::load_decode_heap_oop_not_null(oop* p)       { return *p; }
185inline oop oopDesc::load_decode_heap_oop_not_null(narrowOop* p) {
186  return decode_heap_oop_not_null(*p);
187}
188
189// Load and decode an oop out of the heap accepting null
190inline oop oopDesc::load_decode_heap_oop(oop* p) { return *p; }
191inline oop oopDesc::load_decode_heap_oop(narrowOop* p) {
192  return decode_heap_oop(*p);
193}
194
195// Store already encoded heap oop into the heap.
196inline void oopDesc::store_heap_oop(oop* p, oop v)                 { *p = v; }
197inline void oopDesc::store_heap_oop(narrowOop* p, narrowOop v)     { *p = v; }
198
199// Encode and store a heap oop.
200inline void oopDesc::encode_store_heap_oop_not_null(narrowOop* p, oop v) {
201  *p = encode_heap_oop_not_null(v);
202}
203inline void oopDesc::encode_store_heap_oop_not_null(oop* p, oop v) { *p = v; }
204
205// Encode and store a heap oop allowing for null.
206inline void oopDesc::encode_store_heap_oop(narrowOop* p, oop v) {
207  *p = encode_heap_oop(v);
208}
209inline void oopDesc::encode_store_heap_oop(oop* p, oop v) { *p = v; }
210
211// Store heap oop as is for volatile fields.
212inline void oopDesc::release_store_heap_oop(volatile oop* p, oop v) {
213  OrderAccess::release_store_ptr(p, v);
214}
215inline void oopDesc::release_store_heap_oop(volatile narrowOop* p,
216                                            narrowOop v) {
217  OrderAccess::release_store(p, v);
218}
219
220inline void oopDesc::release_encode_store_heap_oop_not_null(
221                                                volatile narrowOop* p, oop v) {
222  // heap oop is not pointer sized.
223  OrderAccess::release_store(p, encode_heap_oop_not_null(v));
224}
225
226inline void oopDesc::release_encode_store_heap_oop_not_null(
227                                                      volatile oop* p, oop v) {
228  OrderAccess::release_store_ptr(p, v);
229}
230
231inline void oopDesc::release_encode_store_heap_oop(volatile oop* p,
232                                                           oop v) {
233  OrderAccess::release_store_ptr(p, v);
234}
235inline void oopDesc::release_encode_store_heap_oop(
236                                                volatile narrowOop* p, oop v) {
237  OrderAccess::release_store(p, encode_heap_oop(v));
238}
239
240
241// These functions are only used to exchange oop fields in instances,
242// not headers.
243inline oop oopDesc::atomic_exchange_oop(oop exchange_value, volatile HeapWord *dest) {
244  if (UseCompressedOops) {
245    // encode exchange value from oop to T
246    narrowOop val = encode_heap_oop(exchange_value);
247    narrowOop old = (narrowOop)Atomic::xchg(val, (narrowOop*)dest);
248    // decode old from T to oop
249    return decode_heap_oop(old);
250  } else {
251    return (oop)Atomic::xchg_ptr(exchange_value, (oop*)dest);
252  }
253}
254
255inline oop oopDesc::atomic_compare_exchange_oop(oop exchange_value,
256                                                volatile HeapWord *dest,
257                                                oop compare_value) {
258  if (UseCompressedOops) {
259    // encode exchange and compare value from oop to T
260    narrowOop val = encode_heap_oop(exchange_value);
261    narrowOop cmp = encode_heap_oop(compare_value);
262
263    narrowOop old = (narrowOop) Atomic::cmpxchg(val, (narrowOop*)dest, cmp);
264    // decode old from T to oop
265    return decode_heap_oop(old);
266  } else {
267    return (oop)Atomic::cmpxchg_ptr(exchange_value, (oop*)dest, compare_value);
268  }
269}
270
271// In order to put or get a field out of an instance, must first check
272// if the field has been compressed and uncompress it.
273inline oop oopDesc::obj_field(int offset) const {
274  return UseCompressedOops ?
275    load_decode_heap_oop(obj_field_addr<narrowOop>(offset)) :
276    load_decode_heap_oop(obj_field_addr<oop>(offset));
277}
278inline void oopDesc::obj_field_put(int offset, oop value) {
279  UseCompressedOops ? oop_store(obj_field_addr<narrowOop>(offset), value) :
280                      oop_store(obj_field_addr<oop>(offset),       value);
281}
282inline void oopDesc::obj_field_raw_put(int offset, oop value) {
283  UseCompressedOops ?
284    encode_store_heap_oop(obj_field_addr<narrowOop>(offset), value) :
285    encode_store_heap_oop(obj_field_addr<oop>(offset),       value);
286}
287
288inline jbyte oopDesc::byte_field(int offset) const                  { return (jbyte) *byte_field_addr(offset);    }
289inline void oopDesc::byte_field_put(int offset, jbyte contents)     { *byte_field_addr(offset) = (jint) contents; }
290
291inline jboolean oopDesc::bool_field(int offset) const               { return (jboolean) *bool_field_addr(offset); }
292inline void oopDesc::bool_field_put(int offset, jboolean contents)  { *bool_field_addr(offset) = (jint) contents; }
293
294inline jchar oopDesc::char_field(int offset) const                  { return (jchar) *char_field_addr(offset);    }
295inline void oopDesc::char_field_put(int offset, jchar contents)     { *char_field_addr(offset) = (jint) contents; }
296
297inline jint oopDesc::int_field(int offset) const                    { return *int_field_addr(offset);        }
298inline void oopDesc::int_field_put(int offset, jint contents)       { *int_field_addr(offset) = contents;    }
299
300inline jshort oopDesc::short_field(int offset) const                { return (jshort) *short_field_addr(offset);  }
301inline void oopDesc::short_field_put(int offset, jshort contents)   { *short_field_addr(offset) = (jint) contents;}
302
303inline jlong oopDesc::long_field(int offset) const                  { return *long_field_addr(offset);       }
304inline void oopDesc::long_field_put(int offset, jlong contents)     { *long_field_addr(offset) = contents;   }
305
306inline jfloat oopDesc::float_field(int offset) const                { return *float_field_addr(offset);      }
307inline void oopDesc::float_field_put(int offset, jfloat contents)   { *float_field_addr(offset) = contents;  }
308
309inline jdouble oopDesc::double_field(int offset) const              { return *double_field_addr(offset);     }
310inline void oopDesc::double_field_put(int offset, jdouble contents) { *double_field_addr(offset) = contents; }
311
312inline address oopDesc::address_field(int offset) const              { return *address_field_addr(offset);     }
313inline void oopDesc::address_field_put(int offset, address contents) { *address_field_addr(offset) = contents; }
314
315inline oop oopDesc::obj_field_acquire(int offset) const {
316  return UseCompressedOops ?
317             decode_heap_oop((narrowOop)
318               OrderAccess::load_acquire(obj_field_addr<narrowOop>(offset)))
319           : decode_heap_oop((oop)
320               OrderAccess::load_ptr_acquire(obj_field_addr<oop>(offset)));
321}
322inline void oopDesc::release_obj_field_put(int offset, oop value) {
323  UseCompressedOops ?
324    oop_store((volatile narrowOop*)obj_field_addr<narrowOop>(offset), value) :
325    oop_store((volatile oop*)      obj_field_addr<oop>(offset),       value);
326}
327
328inline jbyte oopDesc::byte_field_acquire(int offset) const                  { return OrderAccess::load_acquire(byte_field_addr(offset));     }
329inline void oopDesc::release_byte_field_put(int offset, jbyte contents)     { OrderAccess::release_store(byte_field_addr(offset), contents); }
330
331inline jboolean oopDesc::bool_field_acquire(int offset) const               { return OrderAccess::load_acquire(bool_field_addr(offset));     }
332inline void oopDesc::release_bool_field_put(int offset, jboolean contents)  { OrderAccess::release_store(bool_field_addr(offset), contents); }
333
334inline jchar oopDesc::char_field_acquire(int offset) const                  { return OrderAccess::load_acquire(char_field_addr(offset));     }
335inline void oopDesc::release_char_field_put(int offset, jchar contents)     { OrderAccess::release_store(char_field_addr(offset), contents); }
336
337inline jint oopDesc::int_field_acquire(int offset) const                    { return OrderAccess::load_acquire(int_field_addr(offset));      }
338inline void oopDesc::release_int_field_put(int offset, jint contents)       { OrderAccess::release_store(int_field_addr(offset), contents);  }
339
340inline jshort oopDesc::short_field_acquire(int offset) const                { return (jshort)OrderAccess::load_acquire(short_field_addr(offset)); }
341inline void oopDesc::release_short_field_put(int offset, jshort contents)   { OrderAccess::release_store(short_field_addr(offset), contents);     }
342
343inline jlong oopDesc::long_field_acquire(int offset) const                  { return OrderAccess::load_acquire(long_field_addr(offset));       }
344inline void oopDesc::release_long_field_put(int offset, jlong contents)     { OrderAccess::release_store(long_field_addr(offset), contents);   }
345
346inline jfloat oopDesc::float_field_acquire(int offset) const                { return OrderAccess::load_acquire(float_field_addr(offset));      }
347inline void oopDesc::release_float_field_put(int offset, jfloat contents)   { OrderAccess::release_store(float_field_addr(offset), contents);  }
348
349inline jdouble oopDesc::double_field_acquire(int offset) const              { return OrderAccess::load_acquire(double_field_addr(offset));     }
350inline void oopDesc::release_double_field_put(int offset, jdouble contents) { OrderAccess::release_store(double_field_addr(offset), contents); }
351
352inline int oopDesc::size_given_klass(Klass* klass)  {
353  int lh = klass->layout_helper();
354  int s  = lh >> LogHeapWordSize;  // deliver size scaled by wordSize
355
356  // lh is now a value computed at class initialization that may hint
357  // at the size.  For instances, this is positive and equal to the
358  // size.  For arrays, this is negative and provides log2 of the
359  // array element size.  For other oops, it is zero and thus requires
360  // a virtual call.
361  //
362  // We go to all this trouble because the size computation is at the
363  // heart of phase 2 of mark-compaction, and called for every object,
364  // alive or dead.  So the speed here is equal in importance to the
365  // speed of allocation.
366
367  if (lh <= Klass::_lh_neutral_value) {
368    // The most common case is instances; fall through if so.
369    if (lh < Klass::_lh_neutral_value) {
370      // Second most common case is arrays.  We have to fetch the
371      // length of the array, shift (multiply) it appropriately,
372      // up to wordSize, add the header, and align to object size.
373      size_t size_in_bytes;
374#ifdef _M_IA64
375      // The Windows Itanium Aug 2002 SDK hoists this load above
376      // the check for s < 0.  An oop at the end of the heap will
377      // cause an access violation if this load is performed on a non
378      // array oop.  Making the reference volatile prohibits this.
379      // (%%% please explain by what magic the length is actually fetched!)
380      volatile int *array_length;
381      array_length = (volatile int *)( (intptr_t)this +
382                          arrayOopDesc::length_offset_in_bytes() );
383      assert(array_length > 0, "Integer arithmetic problem somewhere");
384      // Put into size_t to avoid overflow.
385      size_in_bytes = (size_t) array_length;
386      size_in_bytes = size_in_bytes << Klass::layout_helper_log2_element_size(lh);
387#else
388      size_t array_length = (size_t) ((arrayOop)this)->length();
389      size_in_bytes = array_length << Klass::layout_helper_log2_element_size(lh);
390#endif
391      size_in_bytes += Klass::layout_helper_header_size(lh);
392
393      // This code could be simplified, but by keeping array_header_in_bytes
394      // in units of bytes and doing it this way we can round up just once,
395      // skipping the intermediate round to HeapWordSize.  Cast the result
396      // of round_to to size_t to guarantee unsigned division == right shift.
397      s = (int)((size_t)round_to(size_in_bytes, MinObjAlignmentInBytes) /
398        HeapWordSize);
399
400      // UseParNewGC, UseParallelGC and UseG1GC can change the length field
401      // of an "old copy" of an object array in the young gen so it indicates
402      // the grey portion of an already copied array. This will cause the first
403      // disjunct below to fail if the two comparands are computed across such
404      // a concurrent change.
405      // UseParNewGC also runs with promotion labs (which look like int
406      // filler arrays) which are subject to changing their declared size
407      // when finally retiring a PLAB; this also can cause the first disjunct
408      // to fail for another worker thread that is concurrently walking the block
409      // offset table. Both these invariant failures are benign for their
410      // current uses; we relax the assertion checking to cover these two cases below:
411      //     is_objArray() && is_forwarded()   // covers first scenario above
412      //  || is_typeArray()                    // covers second scenario above
413      // If and when UseParallelGC uses the same obj array oop stealing/chunking
414      // technique, we will need to suitably modify the assertion.
415      assert((s == klass->oop_size(this)) ||
416             (Universe::heap()->is_gc_active() &&
417              ((is_typeArray() && UseParNewGC) ||
418               (is_objArray()  && is_forwarded() && (UseParNewGC || UseParallelGC || UseG1GC)))),
419             "wrong array object size");
420    } else {
421      // Must be zero, so bite the bullet and take the virtual call.
422      s = klass->oop_size(this);
423    }
424  }
425
426  assert(s % MinObjAlignment == 0, "alignment check");
427  assert(s > 0, "Bad size calculated");
428  return s;
429}
430
431
432inline int oopDesc::size()  {
433  return size_given_klass(blueprint());
434}
435
436inline bool oopDesc::is_parsable() {
437  return blueprint()->oop_is_parsable(this);
438}
439
440inline bool oopDesc::is_conc_safe() {
441  return blueprint()->oop_is_conc_safe(this);
442}
443
444inline void update_barrier_set(void* p, oop v) {
445  assert(oopDesc::bs() != NULL, "Uninitialized bs in oop!");
446  oopDesc::bs()->write_ref_field(p, v);
447}
448
449inline void update_barrier_set_pre(void* p, oop v) {
450  oopDesc::bs()->write_ref_field_pre(p, v);
451}
452
453template <class T> inline void oop_store(T* p, oop v) {
454  if (always_do_update_barrier) {
455    oop_store((volatile T*)p, v);
456  } else {
457    update_barrier_set_pre(p, v);
458    oopDesc::encode_store_heap_oop(p, v);
459    update_barrier_set(p, v);
460  }
461}
462
463template <class T> inline void oop_store(volatile T* p, oop v) {
464  update_barrier_set_pre((void*)p, v);
465  // Used by release_obj_field_put, so use release_store_ptr.
466  oopDesc::release_encode_store_heap_oop(p, v);
467  update_barrier_set((void*)p, v);
468}
469
470template <class T> inline void oop_store_without_check(T* p, oop v) {
471  // XXX YSR FIX ME!!!
472  if (always_do_update_barrier) {
473    oop_store(p, v);
474  } else {
475    assert(!Universe::heap()->barrier_set()->write_ref_needs_barrier(p, v),
476           "oop store without store check failed");
477    oopDesc::encode_store_heap_oop(p, v);
478  }
479}
480
481// When it absolutely has to get there.
482template <class T> inline void oop_store_without_check(volatile T* p, oop v) {
483  // XXX YSR FIX ME!!!
484  if (always_do_update_barrier) {
485    oop_store(p, v);
486  } else {
487    assert(!Universe::heap()->barrier_set()->write_ref_needs_barrier((T*)p, v),
488           "oop store without store check failed");
489    oopDesc::release_encode_store_heap_oop(p, v);
490  }
491}
492
493// Should replace *addr = oop assignments where addr type depends on UseCompressedOops
494// (without having to remember the function name this calls).
495inline void oop_store_raw(HeapWord* addr, oop value) {
496  if (UseCompressedOops) {
497    oopDesc::encode_store_heap_oop((narrowOop*)addr, value);
498  } else {
499    oopDesc::encode_store_heap_oop((oop*)addr, value);
500  }
501}
502
503// Used only for markSweep, scavenging
504inline bool oopDesc::is_gc_marked() const {
505  return mark()->is_marked();
506}
507
508inline bool oopDesc::is_locked() const {
509  return mark()->is_locked();
510}
511
512inline bool oopDesc::is_unlocked() const {
513  return mark()->is_unlocked();
514}
515
516inline bool oopDesc::has_bias_pattern() const {
517  return mark()->has_bias_pattern();
518}
519
520inline bool check_obj_alignment(oop obj) {
521  return (intptr_t)obj % MinObjAlignmentInBytes == 0;
522}
523
524
525// used only for asserts
526inline bool oopDesc::is_oop(bool ignore_mark_word) const {
527  oop obj = (oop) this;
528  if (!check_obj_alignment(obj)) return false;
529  if (!Universe::heap()->is_in_reserved(obj)) return false;
530  // obj is aligned and accessible in heap
531  // try to find metaclass cycle safely without seg faulting on bad input
532  // we should reach klassKlassObj by following klass link at most 3 times
533  for (int i = 0; i < 3; i++) {
534    obj = obj->klass_or_null();
535    // klass should be aligned and in permspace
536    if (!check_obj_alignment(obj)) return false;
537    if (!Universe::heap()->is_in_permanent(obj)) return false;
538  }
539  if (obj != Universe::klassKlassObj()) {
540    // During a dump, the _klassKlassObj moved to a shared space.
541    if (DumpSharedSpaces && Universe::klassKlassObj()->is_shared()) {
542      return true;
543    }
544    return false;
545  }
546
547  // Header verification: the mark is typically non-NULL. If we're
548  // at a safepoint, it must not be null.
549  // Outside of a safepoint, the header could be changing (for example,
550  // another thread could be inflating a lock on this object).
551  if (ignore_mark_word) {
552    return true;
553  }
554  if (mark() != NULL) {
555    return true;
556  }
557  return !SafepointSynchronize::is_at_safepoint();
558}
559
560
561// used only for asserts
562inline bool oopDesc::is_oop_or_null(bool ignore_mark_word) const {
563  return this == NULL ? true : is_oop(ignore_mark_word);
564}
565
566#ifndef PRODUCT
567// used only for asserts
568inline bool oopDesc::is_unlocked_oop() const {
569  if (!Universe::heap()->is_in_reserved(this)) return false;
570  return mark()->is_unlocked();
571}
572#endif // PRODUCT
573
574inline void oopDesc::follow_header() {
575  if (UseCompressedOops) {
576    MarkSweep::mark_and_push(compressed_klass_addr());
577  } else {
578    MarkSweep::mark_and_push(klass_addr());
579  }
580}
581
582inline void oopDesc::follow_contents(void) {
583  assert (is_gc_marked(), "should be marked");
584  blueprint()->oop_follow_contents(this);
585}
586
587
588// Used by scavengers
589
590inline bool oopDesc::is_forwarded() const {
591  // The extra heap check is needed since the obj might be locked, in which case the
592  // mark would point to a stack location and have the sentinel bit cleared
593  return mark()->is_marked();
594}
595
596// Used by scavengers
597inline void oopDesc::forward_to(oop p) {
598  assert(Universe::heap()->is_in_reserved(p),
599         "forwarding to something not in heap");
600  markOop m = markOopDesc::encode_pointer_as_mark(p);
601  assert(m->decode_pointer() == p, "encoding must be reversable");
602  set_mark(m);
603}
604
605// Used by parallel scavengers
606inline bool oopDesc::cas_forward_to(oop p, markOop compare) {
607  assert(Universe::heap()->is_in_reserved(p),
608         "forwarding to something not in heap");
609  markOop m = markOopDesc::encode_pointer_as_mark(p);
610  assert(m->decode_pointer() == p, "encoding must be reversable");
611  return cas_set_mark(m, compare) == compare;
612}
613
614// Note that the forwardee is not the same thing as the displaced_mark.
615// The forwardee is used when copying during scavenge and mark-sweep.
616// It does need to clear the low two locking- and GC-related bits.
617inline oop oopDesc::forwardee() const {
618  return (oop) mark()->decode_pointer();
619}
620
621inline bool oopDesc::has_displaced_mark() const {
622  return mark()->has_displaced_mark_helper();
623}
624
625inline markOop oopDesc::displaced_mark() const {
626  return mark()->displaced_mark_helper();
627}
628
629inline void oopDesc::set_displaced_mark(markOop m) {
630  mark()->set_displaced_mark_helper(m);
631}
632
633// The following method needs to be MT safe.
634inline int oopDesc::age() const {
635  assert(!is_forwarded(), "Attempt to read age from forwarded mark");
636  if (has_displaced_mark()) {
637    return displaced_mark()->age();
638  } else {
639    return mark()->age();
640  }
641}
642
643inline void oopDesc::incr_age() {
644  assert(!is_forwarded(), "Attempt to increment age of forwarded mark");
645  if (has_displaced_mark()) {
646    set_displaced_mark(displaced_mark()->incr_age());
647  } else {
648    set_mark(mark()->incr_age());
649  }
650}
651
652
653inline intptr_t oopDesc::identity_hash() {
654  // Fast case; if the object is unlocked and the hash value is set, no locking is needed
655  // Note: The mark must be read into local variable to avoid concurrent updates.
656  markOop mrk = mark();
657  if (mrk->is_unlocked() && !mrk->has_no_hash()) {
658    return mrk->hash();
659  } else if (mrk->is_marked()) {
660    return mrk->hash();
661  } else {
662    return slow_identity_hash();
663  }
664}
665
666inline void oopDesc::oop_iterate_header(OopClosure* blk) {
667  if (UseCompressedOops) {
668    blk->do_oop(compressed_klass_addr());
669  } else {
670    blk->do_oop(klass_addr());
671  }
672}
673
674inline void oopDesc::oop_iterate_header(OopClosure* blk, MemRegion mr) {
675  if (UseCompressedOops) {
676    if (mr.contains(compressed_klass_addr())) {
677      blk->do_oop(compressed_klass_addr());
678    }
679  } else {
680    if (mr.contains(klass_addr())) blk->do_oop(klass_addr());
681  }
682}
683
684inline int oopDesc::adjust_pointers() {
685  debug_only(int check_size = size());
686  int s = blueprint()->oop_adjust_pointers(this);
687  assert(s == check_size, "should be the same");
688  return s;
689}
690
691inline void oopDesc::adjust_header() {
692  if (UseCompressedOops) {
693    MarkSweep::adjust_pointer(compressed_klass_addr());
694  } else {
695    MarkSweep::adjust_pointer(klass_addr());
696  }
697}
698
699#define OOP_ITERATE_DEFN(OopClosureType, nv_suffix)                        \
700                                                                           \
701inline int oopDesc::oop_iterate(OopClosureType* blk) {                     \
702  SpecializationStats::record_call();                                      \
703  return blueprint()->oop_oop_iterate##nv_suffix(this, blk);               \
704}                                                                          \
705                                                                           \
706inline int oopDesc::oop_iterate(OopClosureType* blk, MemRegion mr) {       \
707  SpecializationStats::record_call();                                      \
708  return blueprint()->oop_oop_iterate##nv_suffix##_m(this, blk, mr);       \
709}
710
711ALL_OOP_OOP_ITERATE_CLOSURES_1(OOP_ITERATE_DEFN)
712ALL_OOP_OOP_ITERATE_CLOSURES_2(OOP_ITERATE_DEFN)
713
714#ifndef SERIALGC
715#define OOP_ITERATE_BACKWARDS_DEFN(OopClosureType, nv_suffix)              \
716                                                                           \
717inline int oopDesc::oop_iterate_backwards(OopClosureType* blk) {           \
718  SpecializationStats::record_call();                                      \
719  return blueprint()->oop_oop_iterate_backwards##nv_suffix(this, blk);     \
720}
721
722ALL_OOP_OOP_ITERATE_CLOSURES_1(OOP_ITERATE_BACKWARDS_DEFN)
723ALL_OOP_OOP_ITERATE_CLOSURES_2(OOP_ITERATE_BACKWARDS_DEFN)
724#endif // !SERIALGC
725
726inline bool oopDesc::is_shared() const {
727  return CompactingPermGenGen::is_shared(this);
728}
729
730inline bool oopDesc::is_shared_readonly() const {
731  return CompactingPermGenGen::is_shared_readonly(this);
732}
733
734inline bool oopDesc::is_shared_readwrite() const {
735  return CompactingPermGenGen::is_shared_readwrite(this);
736}
737