oop.inline.hpp revision 1472:c18cbe5936b8
1/*
2 * Copyright (c) 1997, 2009, Oracle and/or its affiliates. All rights reserved.
3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
4 *
5 * This code is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 only, as
7 * published by the Free Software Foundation.
8 *
9 * This code is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
12 * version 2 for more details (a copy is included in the LICENSE file that
13 * accompanied this code).
14 *
15 * You should have received a copy of the GNU General Public License version
16 * 2 along with this work; if not, write to the Free Software Foundation,
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18 *
19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20 * or visit www.oracle.com if you need additional information or have any
21 * questions.
22 *
23 */
24
25// Implementation of all inlined member functions defined in oop.hpp
26// We need a separate file to avoid circular references
27
28inline void oopDesc::release_set_mark(markOop m) {
29  OrderAccess::release_store_ptr(&_mark, m);
30}
31
32inline markOop oopDesc::cas_set_mark(markOop new_mark, markOop old_mark) {
33  return (markOop) Atomic::cmpxchg_ptr(new_mark, &_mark, old_mark);
34}
35
36inline klassOop oopDesc::klass() const {
37  if (UseCompressedOops) {
38    return (klassOop)decode_heap_oop_not_null(_metadata._compressed_klass);
39  } else {
40    return _metadata._klass;
41  }
42}
43
44inline klassOop oopDesc::klass_or_null() const volatile {
45  // can be NULL in CMS
46  if (UseCompressedOops) {
47    return (klassOop)decode_heap_oop(_metadata._compressed_klass);
48  } else {
49    return _metadata._klass;
50  }
51}
52
53inline int oopDesc::klass_gap_offset_in_bytes() {
54  assert(UseCompressedOops, "only applicable to compressed headers");
55  return oopDesc::klass_offset_in_bytes() + sizeof(narrowOop);
56}
57
58inline oop* oopDesc::klass_addr() {
59  // Only used internally and with CMS and will not work with
60  // UseCompressedOops
61  assert(!UseCompressedOops, "only supported with uncompressed oops");
62  return (oop*) &_metadata._klass;
63}
64
65inline narrowOop* oopDesc::compressed_klass_addr() {
66  assert(UseCompressedOops, "only called by compressed oops");
67  return (narrowOop*) &_metadata._compressed_klass;
68}
69
70inline void oopDesc::set_klass(klassOop k) {
71  // since klasses are promoted no store check is needed
72  assert(Universe::is_bootstrapping() || k != NULL, "must be a real klassOop");
73  assert(Universe::is_bootstrapping() || k->is_klass(), "not a klassOop");
74  if (UseCompressedOops) {
75    oop_store_without_check(compressed_klass_addr(), (oop)k);
76  } else {
77    oop_store_without_check(klass_addr(), (oop) k);
78  }
79}
80
81inline int oopDesc::klass_gap() const {
82  return *(int*)(((intptr_t)this) + klass_gap_offset_in_bytes());
83}
84
85inline void oopDesc::set_klass_gap(int v) {
86  if (UseCompressedOops) {
87    *(int*)(((intptr_t)this) + klass_gap_offset_in_bytes()) = v;
88  }
89}
90
91inline void oopDesc::set_klass_to_list_ptr(oop k) {
92  // This is only to be used during GC, for from-space objects, so no
93  // barrier is needed.
94  if (UseCompressedOops) {
95    _metadata._compressed_klass = encode_heap_oop(k);  // may be null (parnew overflow handling)
96  } else {
97    _metadata._klass = (klassOop)k;
98  }
99}
100
101inline void   oopDesc::init_mark()                 { set_mark(markOopDesc::prototype_for_object(this)); }
102inline Klass* oopDesc::blueprint()           const { return klass()->klass_part(); }
103
104inline bool oopDesc::is_a(klassOop k)        const { return blueprint()->is_subtype_of(k); }
105
106inline bool oopDesc::is_instance()           const { return blueprint()->oop_is_instance(); }
107inline bool oopDesc::is_instanceRef()        const { return blueprint()->oop_is_instanceRef(); }
108inline bool oopDesc::is_array()              const { return blueprint()->oop_is_array(); }
109inline bool oopDesc::is_objArray()           const { return blueprint()->oop_is_objArray(); }
110inline bool oopDesc::is_typeArray()          const { return blueprint()->oop_is_typeArray(); }
111inline bool oopDesc::is_javaArray()          const { return blueprint()->oop_is_javaArray(); }
112inline bool oopDesc::is_symbol()             const { return blueprint()->oop_is_symbol(); }
113inline bool oopDesc::is_klass()              const { return blueprint()->oop_is_klass(); }
114inline bool oopDesc::is_thread()             const { return blueprint()->oop_is_thread(); }
115inline bool oopDesc::is_method()             const { return blueprint()->oop_is_method(); }
116inline bool oopDesc::is_constMethod()        const { return blueprint()->oop_is_constMethod(); }
117inline bool oopDesc::is_methodData()         const { return blueprint()->oop_is_methodData(); }
118inline bool oopDesc::is_constantPool()       const { return blueprint()->oop_is_constantPool(); }
119inline bool oopDesc::is_constantPoolCache()  const { return blueprint()->oop_is_constantPoolCache(); }
120inline bool oopDesc::is_compiledICHolder()   const { return blueprint()->oop_is_compiledICHolder(); }
121
122inline void*     oopDesc::field_base(int offset)        const { return (void*)&((char*)this)[offset]; }
123
124template <class T> inline T* oopDesc::obj_field_addr(int offset) const { return (T*)field_base(offset); }
125inline jbyte*    oopDesc::byte_field_addr(int offset)   const { return (jbyte*)   field_base(offset); }
126inline jchar*    oopDesc::char_field_addr(int offset)   const { return (jchar*)   field_base(offset); }
127inline jboolean* oopDesc::bool_field_addr(int offset)   const { return (jboolean*)field_base(offset); }
128inline jint*     oopDesc::int_field_addr(int offset)    const { return (jint*)    field_base(offset); }
129inline jshort*   oopDesc::short_field_addr(int offset)  const { return (jshort*)  field_base(offset); }
130inline jlong*    oopDesc::long_field_addr(int offset)   const { return (jlong*)   field_base(offset); }
131inline jfloat*   oopDesc::float_field_addr(int offset)  const { return (jfloat*)  field_base(offset); }
132inline jdouble*  oopDesc::double_field_addr(int offset) const { return (jdouble*) field_base(offset); }
133inline address*  oopDesc::address_field_addr(int offset) const { return (address*) field_base(offset); }
134
135
136// Functions for getting and setting oops within instance objects.
137// If the oops are compressed, the type passed to these overloaded functions
138// is narrowOop.  All functions are overloaded so they can be called by
139// template functions without conditionals (the compiler instantiates via
140// the right type and inlines the appopriate code).
141
142inline bool oopDesc::is_null(oop obj)       { return obj == NULL; }
143inline bool oopDesc::is_null(narrowOop obj) { return obj == 0; }
144
145// Algorithm for encoding and decoding oops from 64 bit pointers to 32 bit
146// offset from the heap base.  Saving the check for null can save instructions
147// in inner GC loops so these are separated.
148
149inline narrowOop oopDesc::encode_heap_oop_not_null(oop v) {
150  assert(!is_null(v), "oop value can never be zero");
151  assert(Universe::heap()->is_in_reserved(v), "Address not in heap");
152  address base = Universe::narrow_oop_base();
153  int    shift = Universe::narrow_oop_shift();
154  uint64_t  pd = (uint64_t)(pointer_delta((void*)v, (void*)base, 1));
155  assert(OopEncodingHeapMax > pd, "change encoding max if new encoding");
156  uint64_t result = pd >> shift;
157  assert((result & CONST64(0xffffffff00000000)) == 0, "narrow oop overflow");
158  assert(decode_heap_oop(result) == v, "reversibility");
159  return (narrowOop)result;
160}
161
162inline narrowOop oopDesc::encode_heap_oop(oop v) {
163  return (is_null(v)) ? (narrowOop)0 : encode_heap_oop_not_null(v);
164}
165
166inline oop oopDesc::decode_heap_oop_not_null(narrowOop v) {
167  assert(!is_null(v), "narrow oop value can never be zero");
168  address base = Universe::narrow_oop_base();
169  int    shift = Universe::narrow_oop_shift();
170  return (oop)(void*)((uintptr_t)base + ((uintptr_t)v << shift));
171}
172
173inline oop oopDesc::decode_heap_oop(narrowOop v) {
174  return is_null(v) ? (oop)NULL : decode_heap_oop_not_null(v);
175}
176
177inline oop oopDesc::decode_heap_oop_not_null(oop v) { return v; }
178inline oop oopDesc::decode_heap_oop(oop v)  { return v; }
179
180// Load an oop out of the Java heap as is without decoding.
181// Called by GC to check for null before decoding.
182inline oop       oopDesc::load_heap_oop(oop* p)          { return *p; }
183inline narrowOop oopDesc::load_heap_oop(narrowOop* p)    { return *p; }
184
185// Load and decode an oop out of the Java heap into a wide oop.
186inline oop oopDesc::load_decode_heap_oop_not_null(oop* p)       { return *p; }
187inline oop oopDesc::load_decode_heap_oop_not_null(narrowOop* p) {
188  return decode_heap_oop_not_null(*p);
189}
190
191// Load and decode an oop out of the heap accepting null
192inline oop oopDesc::load_decode_heap_oop(oop* p) { return *p; }
193inline oop oopDesc::load_decode_heap_oop(narrowOop* p) {
194  return decode_heap_oop(*p);
195}
196
197// Store already encoded heap oop into the heap.
198inline void oopDesc::store_heap_oop(oop* p, oop v)                 { *p = v; }
199inline void oopDesc::store_heap_oop(narrowOop* p, narrowOop v)     { *p = v; }
200
201// Encode and store a heap oop.
202inline void oopDesc::encode_store_heap_oop_not_null(narrowOop* p, oop v) {
203  *p = encode_heap_oop_not_null(v);
204}
205inline void oopDesc::encode_store_heap_oop_not_null(oop* p, oop v) { *p = v; }
206
207// Encode and store a heap oop allowing for null.
208inline void oopDesc::encode_store_heap_oop(narrowOop* p, oop v) {
209  *p = encode_heap_oop(v);
210}
211inline void oopDesc::encode_store_heap_oop(oop* p, oop v) { *p = v; }
212
213// Store heap oop as is for volatile fields.
214inline void oopDesc::release_store_heap_oop(volatile oop* p, oop v) {
215  OrderAccess::release_store_ptr(p, v);
216}
217inline void oopDesc::release_store_heap_oop(volatile narrowOop* p,
218                                            narrowOop v) {
219  OrderAccess::release_store(p, v);
220}
221
222inline void oopDesc::release_encode_store_heap_oop_not_null(
223                                                volatile narrowOop* p, oop v) {
224  // heap oop is not pointer sized.
225  OrderAccess::release_store(p, encode_heap_oop_not_null(v));
226}
227
228inline void oopDesc::release_encode_store_heap_oop_not_null(
229                                                      volatile oop* p, oop v) {
230  OrderAccess::release_store_ptr(p, v);
231}
232
233inline void oopDesc::release_encode_store_heap_oop(volatile oop* p,
234                                                           oop v) {
235  OrderAccess::release_store_ptr(p, v);
236}
237inline void oopDesc::release_encode_store_heap_oop(
238                                                volatile narrowOop* p, oop v) {
239  OrderAccess::release_store(p, encode_heap_oop(v));
240}
241
242
243// These functions are only used to exchange oop fields in instances,
244// not headers.
245inline oop oopDesc::atomic_exchange_oop(oop exchange_value, volatile HeapWord *dest) {
246  if (UseCompressedOops) {
247    // encode exchange value from oop to T
248    narrowOop val = encode_heap_oop(exchange_value);
249    narrowOop old = (narrowOop)Atomic::xchg(val, (narrowOop*)dest);
250    // decode old from T to oop
251    return decode_heap_oop(old);
252  } else {
253    return (oop)Atomic::xchg_ptr(exchange_value, (oop*)dest);
254  }
255}
256
257inline oop oopDesc::atomic_compare_exchange_oop(oop exchange_value,
258                                                volatile HeapWord *dest,
259                                                oop compare_value) {
260  if (UseCompressedOops) {
261    // encode exchange and compare value from oop to T
262    narrowOop val = encode_heap_oop(exchange_value);
263    narrowOop cmp = encode_heap_oop(compare_value);
264
265    narrowOop old = (narrowOop) Atomic::cmpxchg(val, (narrowOop*)dest, cmp);
266    // decode old from T to oop
267    return decode_heap_oop(old);
268  } else {
269    return (oop)Atomic::cmpxchg_ptr(exchange_value, (oop*)dest, compare_value);
270  }
271}
272
273// In order to put or get a field out of an instance, must first check
274// if the field has been compressed and uncompress it.
275inline oop oopDesc::obj_field(int offset) const {
276  return UseCompressedOops ?
277    load_decode_heap_oop(obj_field_addr<narrowOop>(offset)) :
278    load_decode_heap_oop(obj_field_addr<oop>(offset));
279}
280inline void oopDesc::obj_field_put(int offset, oop value) {
281  UseCompressedOops ? oop_store(obj_field_addr<narrowOop>(offset), value) :
282                      oop_store(obj_field_addr<oop>(offset),       value);
283}
284inline void oopDesc::obj_field_raw_put(int offset, oop value) {
285  UseCompressedOops ?
286    encode_store_heap_oop(obj_field_addr<narrowOop>(offset), value) :
287    encode_store_heap_oop(obj_field_addr<oop>(offset),       value);
288}
289
290inline jbyte oopDesc::byte_field(int offset) const                  { return (jbyte) *byte_field_addr(offset);    }
291inline void oopDesc::byte_field_put(int offset, jbyte contents)     { *byte_field_addr(offset) = (jint) contents; }
292
293inline jboolean oopDesc::bool_field(int offset) const               { return (jboolean) *bool_field_addr(offset); }
294inline void oopDesc::bool_field_put(int offset, jboolean contents)  { *bool_field_addr(offset) = (jint) contents; }
295
296inline jchar oopDesc::char_field(int offset) const                  { return (jchar) *char_field_addr(offset);    }
297inline void oopDesc::char_field_put(int offset, jchar contents)     { *char_field_addr(offset) = (jint) contents; }
298
299inline jint oopDesc::int_field(int offset) const                    { return *int_field_addr(offset);        }
300inline void oopDesc::int_field_put(int offset, jint contents)       { *int_field_addr(offset) = contents;    }
301
302inline jshort oopDesc::short_field(int offset) const                { return (jshort) *short_field_addr(offset);  }
303inline void oopDesc::short_field_put(int offset, jshort contents)   { *short_field_addr(offset) = (jint) contents;}
304
305inline jlong oopDesc::long_field(int offset) const                  { return *long_field_addr(offset);       }
306inline void oopDesc::long_field_put(int offset, jlong contents)     { *long_field_addr(offset) = contents;   }
307
308inline jfloat oopDesc::float_field(int offset) const                { return *float_field_addr(offset);      }
309inline void oopDesc::float_field_put(int offset, jfloat contents)   { *float_field_addr(offset) = contents;  }
310
311inline jdouble oopDesc::double_field(int offset) const              { return *double_field_addr(offset);     }
312inline void oopDesc::double_field_put(int offset, jdouble contents) { *double_field_addr(offset) = contents; }
313
314inline address oopDesc::address_field(int offset) const              { return *address_field_addr(offset);     }
315inline void oopDesc::address_field_put(int offset, address contents) { *address_field_addr(offset) = contents; }
316
317inline oop oopDesc::obj_field_acquire(int offset) const {
318  return UseCompressedOops ?
319             decode_heap_oop((narrowOop)
320               OrderAccess::load_acquire(obj_field_addr<narrowOop>(offset)))
321           : decode_heap_oop((oop)
322               OrderAccess::load_ptr_acquire(obj_field_addr<oop>(offset)));
323}
324inline void oopDesc::release_obj_field_put(int offset, oop value) {
325  UseCompressedOops ?
326    oop_store((volatile narrowOop*)obj_field_addr<narrowOop>(offset), value) :
327    oop_store((volatile oop*)      obj_field_addr<oop>(offset),       value);
328}
329
330inline jbyte oopDesc::byte_field_acquire(int offset) const                  { return OrderAccess::load_acquire(byte_field_addr(offset));     }
331inline void oopDesc::release_byte_field_put(int offset, jbyte contents)     { OrderAccess::release_store(byte_field_addr(offset), contents); }
332
333inline jboolean oopDesc::bool_field_acquire(int offset) const               { return OrderAccess::load_acquire(bool_field_addr(offset));     }
334inline void oopDesc::release_bool_field_put(int offset, jboolean contents)  { OrderAccess::release_store(bool_field_addr(offset), contents); }
335
336inline jchar oopDesc::char_field_acquire(int offset) const                  { return OrderAccess::load_acquire(char_field_addr(offset));     }
337inline void oopDesc::release_char_field_put(int offset, jchar contents)     { OrderAccess::release_store(char_field_addr(offset), contents); }
338
339inline jint oopDesc::int_field_acquire(int offset) const                    { return OrderAccess::load_acquire(int_field_addr(offset));      }
340inline void oopDesc::release_int_field_put(int offset, jint contents)       { OrderAccess::release_store(int_field_addr(offset), contents);  }
341
342inline jshort oopDesc::short_field_acquire(int offset) const                { return (jshort)OrderAccess::load_acquire(short_field_addr(offset)); }
343inline void oopDesc::release_short_field_put(int offset, jshort contents)   { OrderAccess::release_store(short_field_addr(offset), contents);     }
344
345inline jlong oopDesc::long_field_acquire(int offset) const                  { return OrderAccess::load_acquire(long_field_addr(offset));       }
346inline void oopDesc::release_long_field_put(int offset, jlong contents)     { OrderAccess::release_store(long_field_addr(offset), contents);   }
347
348inline jfloat oopDesc::float_field_acquire(int offset) const                { return OrderAccess::load_acquire(float_field_addr(offset));      }
349inline void oopDesc::release_float_field_put(int offset, jfloat contents)   { OrderAccess::release_store(float_field_addr(offset), contents);  }
350
351inline jdouble oopDesc::double_field_acquire(int offset) const              { return OrderAccess::load_acquire(double_field_addr(offset));     }
352inline void oopDesc::release_double_field_put(int offset, jdouble contents) { OrderAccess::release_store(double_field_addr(offset), contents); }
353
354inline address oopDesc::address_field_acquire(int offset) const             { return (address) OrderAccess::load_ptr_acquire(address_field_addr(offset)); }
355inline void oopDesc::release_address_field_put(int offset, address contents) { OrderAccess::release_store_ptr(address_field_addr(offset), contents); }
356
357inline int oopDesc::size_given_klass(Klass* klass)  {
358  int lh = klass->layout_helper();
359  int s  = lh >> LogHeapWordSize;  // deliver size scaled by wordSize
360
361  // lh is now a value computed at class initialization that may hint
362  // at the size.  For instances, this is positive and equal to the
363  // size.  For arrays, this is negative and provides log2 of the
364  // array element size.  For other oops, it is zero and thus requires
365  // a virtual call.
366  //
367  // We go to all this trouble because the size computation is at the
368  // heart of phase 2 of mark-compaction, and called for every object,
369  // alive or dead.  So the speed here is equal in importance to the
370  // speed of allocation.
371
372  if (lh <= Klass::_lh_neutral_value) {
373    // The most common case is instances; fall through if so.
374    if (lh < Klass::_lh_neutral_value) {
375      // Second most common case is arrays.  We have to fetch the
376      // length of the array, shift (multiply) it appropriately,
377      // up to wordSize, add the header, and align to object size.
378      size_t size_in_bytes;
379#ifdef _M_IA64
380      // The Windows Itanium Aug 2002 SDK hoists this load above
381      // the check for s < 0.  An oop at the end of the heap will
382      // cause an access violation if this load is performed on a non
383      // array oop.  Making the reference volatile prohibits this.
384      // (%%% please explain by what magic the length is actually fetched!)
385      volatile int *array_length;
386      array_length = (volatile int *)( (intptr_t)this +
387                          arrayOopDesc::length_offset_in_bytes() );
388      assert(array_length > 0, "Integer arithmetic problem somewhere");
389      // Put into size_t to avoid overflow.
390      size_in_bytes = (size_t) array_length;
391      size_in_bytes = size_in_bytes << Klass::layout_helper_log2_element_size(lh);
392#else
393      size_t array_length = (size_t) ((arrayOop)this)->length();
394      size_in_bytes = array_length << Klass::layout_helper_log2_element_size(lh);
395#endif
396      size_in_bytes += Klass::layout_helper_header_size(lh);
397
398      // This code could be simplified, but by keeping array_header_in_bytes
399      // in units of bytes and doing it this way we can round up just once,
400      // skipping the intermediate round to HeapWordSize.  Cast the result
401      // of round_to to size_t to guarantee unsigned division == right shift.
402      s = (int)((size_t)round_to(size_in_bytes, MinObjAlignmentInBytes) /
403        HeapWordSize);
404
405      // UseParNewGC, UseParallelGC and UseG1GC can change the length field
406      // of an "old copy" of an object array in the young gen so it indicates
407      // the grey portion of an already copied array. This will cause the first
408      // disjunct below to fail if the two comparands are computed across such
409      // a concurrent change.
410      // UseParNewGC also runs with promotion labs (which look like int
411      // filler arrays) which are subject to changing their declared size
412      // when finally retiring a PLAB; this also can cause the first disjunct
413      // to fail for another worker thread that is concurrently walking the block
414      // offset table. Both these invariant failures are benign for their
415      // current uses; we relax the assertion checking to cover these two cases below:
416      //     is_objArray() && is_forwarded()   // covers first scenario above
417      //  || is_typeArray()                    // covers second scenario above
418      // If and when UseParallelGC uses the same obj array oop stealing/chunking
419      // technique, we will need to suitably modify the assertion.
420      assert((s == klass->oop_size(this)) ||
421             (Universe::heap()->is_gc_active() &&
422              ((is_typeArray() && UseParNewGC) ||
423               (is_objArray()  && is_forwarded() && (UseParNewGC || UseParallelGC || UseG1GC)))),
424             "wrong array object size");
425    } else {
426      // Must be zero, so bite the bullet and take the virtual call.
427      s = klass->oop_size(this);
428    }
429  }
430
431  assert(s % MinObjAlignment == 0, "alignment check");
432  assert(s > 0, "Bad size calculated");
433  return s;
434}
435
436
437inline int oopDesc::size()  {
438  return size_given_klass(blueprint());
439}
440
441inline bool oopDesc::is_parsable() {
442  return blueprint()->oop_is_parsable(this);
443}
444
445inline bool oopDesc::is_conc_safe() {
446  return blueprint()->oop_is_conc_safe(this);
447}
448
449inline void update_barrier_set(void* p, oop v) {
450  assert(oopDesc::bs() != NULL, "Uninitialized bs in oop!");
451  oopDesc::bs()->write_ref_field(p, v);
452}
453
454template <class T> inline void update_barrier_set_pre(T* p, oop v) {
455  oopDesc::bs()->write_ref_field_pre(p, v);
456}
457
458template <class T> inline void oop_store(T* p, oop v) {
459  if (always_do_update_barrier) {
460    oop_store((volatile T*)p, v);
461  } else {
462    update_barrier_set_pre(p, v);
463    oopDesc::encode_store_heap_oop(p, v);
464    update_barrier_set((void*)p, v);  // cast away type
465  }
466}
467
468template <class T> inline void oop_store(volatile T* p, oop v) {
469  update_barrier_set_pre((T*)p, v);   // cast away volatile
470  // Used by release_obj_field_put, so use release_store_ptr.
471  oopDesc::release_encode_store_heap_oop(p, v);
472  update_barrier_set((void*)p, v);    // cast away type
473}
474
475template <class T> inline void oop_store_without_check(T* p, oop v) {
476  // XXX YSR FIX ME!!!
477  if (always_do_update_barrier) {
478    oop_store(p, v);
479  } else {
480    assert(!Universe::heap()->barrier_set()->write_ref_needs_barrier(p, v),
481           "oop store without store check failed");
482    oopDesc::encode_store_heap_oop(p, v);
483  }
484}
485
486// When it absolutely has to get there.
487template <class T> inline void oop_store_without_check(volatile T* p, oop v) {
488  // XXX YSR FIX ME!!!
489  if (always_do_update_barrier) {
490    oop_store(p, v);
491  } else {
492    assert(!Universe::heap()->barrier_set()->write_ref_needs_barrier((T*)p, v),
493           "oop store without store check failed");
494    oopDesc::release_encode_store_heap_oop(p, v);
495  }
496}
497
498// Should replace *addr = oop assignments where addr type depends on UseCompressedOops
499// (without having to remember the function name this calls).
500inline void oop_store_raw(HeapWord* addr, oop value) {
501  if (UseCompressedOops) {
502    oopDesc::encode_store_heap_oop((narrowOop*)addr, value);
503  } else {
504    oopDesc::encode_store_heap_oop((oop*)addr, value);
505  }
506}
507
508// Used only for markSweep, scavenging
509inline bool oopDesc::is_gc_marked() const {
510  return mark()->is_marked();
511}
512
513inline bool oopDesc::is_locked() const {
514  return mark()->is_locked();
515}
516
517inline bool oopDesc::is_unlocked() const {
518  return mark()->is_unlocked();
519}
520
521inline bool oopDesc::has_bias_pattern() const {
522  return mark()->has_bias_pattern();
523}
524
525inline bool check_obj_alignment(oop obj) {
526  return (intptr_t)obj % MinObjAlignmentInBytes == 0;
527}
528
529
530// used only for asserts
531inline bool oopDesc::is_oop(bool ignore_mark_word) const {
532  oop obj = (oop) this;
533  if (!check_obj_alignment(obj)) return false;
534  if (!Universe::heap()->is_in_reserved(obj)) return false;
535  // obj is aligned and accessible in heap
536  // try to find metaclass cycle safely without seg faulting on bad input
537  // we should reach klassKlassObj by following klass link at most 3 times
538  for (int i = 0; i < 3; i++) {
539    obj = obj->klass_or_null();
540    // klass should be aligned and in permspace
541    if (!check_obj_alignment(obj)) return false;
542    if (!Universe::heap()->is_in_permanent(obj)) return false;
543  }
544  if (obj != Universe::klassKlassObj()) {
545    // During a dump, the _klassKlassObj moved to a shared space.
546    if (DumpSharedSpaces && Universe::klassKlassObj()->is_shared()) {
547      return true;
548    }
549    return false;
550  }
551
552  // Header verification: the mark is typically non-NULL. If we're
553  // at a safepoint, it must not be null.
554  // Outside of a safepoint, the header could be changing (for example,
555  // another thread could be inflating a lock on this object).
556  if (ignore_mark_word) {
557    return true;
558  }
559  if (mark() != NULL) {
560    return true;
561  }
562  return !SafepointSynchronize::is_at_safepoint();
563}
564
565
566// used only for asserts
567inline bool oopDesc::is_oop_or_null(bool ignore_mark_word) const {
568  return this == NULL ? true : is_oop(ignore_mark_word);
569}
570
571#ifndef PRODUCT
572// used only for asserts
573inline bool oopDesc::is_unlocked_oop() const {
574  if (!Universe::heap()->is_in_reserved(this)) return false;
575  return mark()->is_unlocked();
576}
577#endif // PRODUCT
578
579inline void oopDesc::follow_header() {
580  if (UseCompressedOops) {
581    MarkSweep::mark_and_push(compressed_klass_addr());
582  } else {
583    MarkSweep::mark_and_push(klass_addr());
584  }
585}
586
587inline void oopDesc::follow_contents(void) {
588  assert (is_gc_marked(), "should be marked");
589  blueprint()->oop_follow_contents(this);
590}
591
592
593// Used by scavengers
594
595inline bool oopDesc::is_forwarded() const {
596  // The extra heap check is needed since the obj might be locked, in which case the
597  // mark would point to a stack location and have the sentinel bit cleared
598  return mark()->is_marked();
599}
600
601// Used by scavengers
602inline void oopDesc::forward_to(oop p) {
603  assert(Universe::heap()->is_in_reserved(p),
604         "forwarding to something not in heap");
605  markOop m = markOopDesc::encode_pointer_as_mark(p);
606  assert(m->decode_pointer() == p, "encoding must be reversable");
607  set_mark(m);
608}
609
610// Used by parallel scavengers
611inline bool oopDesc::cas_forward_to(oop p, markOop compare) {
612  assert(Universe::heap()->is_in_reserved(p),
613         "forwarding to something not in heap");
614  markOop m = markOopDesc::encode_pointer_as_mark(p);
615  assert(m->decode_pointer() == p, "encoding must be reversable");
616  return cas_set_mark(m, compare) == compare;
617}
618
619// Note that the forwardee is not the same thing as the displaced_mark.
620// The forwardee is used when copying during scavenge and mark-sweep.
621// It does need to clear the low two locking- and GC-related bits.
622inline oop oopDesc::forwardee() const {
623  return (oop) mark()->decode_pointer();
624}
625
626inline bool oopDesc::has_displaced_mark() const {
627  return mark()->has_displaced_mark_helper();
628}
629
630inline markOop oopDesc::displaced_mark() const {
631  return mark()->displaced_mark_helper();
632}
633
634inline void oopDesc::set_displaced_mark(markOop m) {
635  mark()->set_displaced_mark_helper(m);
636}
637
638// The following method needs to be MT safe.
639inline int oopDesc::age() const {
640  assert(!is_forwarded(), "Attempt to read age from forwarded mark");
641  if (has_displaced_mark()) {
642    return displaced_mark()->age();
643  } else {
644    return mark()->age();
645  }
646}
647
648inline void oopDesc::incr_age() {
649  assert(!is_forwarded(), "Attempt to increment age of forwarded mark");
650  if (has_displaced_mark()) {
651    set_displaced_mark(displaced_mark()->incr_age());
652  } else {
653    set_mark(mark()->incr_age());
654  }
655}
656
657
658inline intptr_t oopDesc::identity_hash() {
659  // Fast case; if the object is unlocked and the hash value is set, no locking is needed
660  // Note: The mark must be read into local variable to avoid concurrent updates.
661  markOop mrk = mark();
662  if (mrk->is_unlocked() && !mrk->has_no_hash()) {
663    return mrk->hash();
664  } else if (mrk->is_marked()) {
665    return mrk->hash();
666  } else {
667    return slow_identity_hash();
668  }
669}
670
671inline void oopDesc::oop_iterate_header(OopClosure* blk) {
672  if (UseCompressedOops) {
673    blk->do_oop(compressed_klass_addr());
674  } else {
675    blk->do_oop(klass_addr());
676  }
677}
678
679inline void oopDesc::oop_iterate_header(OopClosure* blk, MemRegion mr) {
680  if (UseCompressedOops) {
681    if (mr.contains(compressed_klass_addr())) {
682      blk->do_oop(compressed_klass_addr());
683    }
684  } else {
685    if (mr.contains(klass_addr())) blk->do_oop(klass_addr());
686  }
687}
688
689inline int oopDesc::adjust_pointers() {
690  debug_only(int check_size = size());
691  int s = blueprint()->oop_adjust_pointers(this);
692  assert(s == check_size, "should be the same");
693  return s;
694}
695
696inline void oopDesc::adjust_header() {
697  if (UseCompressedOops) {
698    MarkSweep::adjust_pointer(compressed_klass_addr());
699  } else {
700    MarkSweep::adjust_pointer(klass_addr());
701  }
702}
703
704#define OOP_ITERATE_DEFN(OopClosureType, nv_suffix)                        \
705                                                                           \
706inline int oopDesc::oop_iterate(OopClosureType* blk) {                     \
707  SpecializationStats::record_call();                                      \
708  return blueprint()->oop_oop_iterate##nv_suffix(this, blk);               \
709}                                                                          \
710                                                                           \
711inline int oopDesc::oop_iterate(OopClosureType* blk, MemRegion mr) {       \
712  SpecializationStats::record_call();                                      \
713  return blueprint()->oop_oop_iterate##nv_suffix##_m(this, blk, mr);       \
714}
715
716ALL_OOP_OOP_ITERATE_CLOSURES_1(OOP_ITERATE_DEFN)
717ALL_OOP_OOP_ITERATE_CLOSURES_2(OOP_ITERATE_DEFN)
718
719#ifndef SERIALGC
720#define OOP_ITERATE_BACKWARDS_DEFN(OopClosureType, nv_suffix)              \
721                                                                           \
722inline int oopDesc::oop_iterate_backwards(OopClosureType* blk) {           \
723  SpecializationStats::record_call();                                      \
724  return blueprint()->oop_oop_iterate_backwards##nv_suffix(this, blk);     \
725}
726
727ALL_OOP_OOP_ITERATE_CLOSURES_1(OOP_ITERATE_BACKWARDS_DEFN)
728ALL_OOP_OOP_ITERATE_CLOSURES_2(OOP_ITERATE_BACKWARDS_DEFN)
729#endif // !SERIALGC
730
731inline bool oopDesc::is_shared() const {
732  return CompactingPermGenGen::is_shared(this);
733}
734
735inline bool oopDesc::is_shared_readonly() const {
736  return CompactingPermGenGen::is_shared_readonly(this);
737}
738
739inline bool oopDesc::is_shared_readwrite() const {
740  return CompactingPermGenGen::is_shared_readwrite(this);
741}
742