1/*
2 * Copyright (c) 2001, 2015, Oracle and/or its affiliates. All rights reserved.
3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
4 *
5 * This code is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 only, as
7 * published by the Free Software Foundation.
8 *
9 * This code is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
12 * version 2 for more details (a copy is included in the LICENSE file that
13 * accompanied this code).
14 *
15 * You should have received a copy of the GNU General Public License version
16 * 2 along with this work; if not, write to the Free Software Foundation,
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18 *
19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20 * or visit www.oracle.com if you need additional information or have any
21 * questions.
22 *
23 */
24
25#ifndef SHARE_VM_GC_CMS_FREECHUNK_HPP
26#define SHARE_VM_GC_CMS_FREECHUNK_HPP
27
28#include "memory/allocation.hpp"
29#include "memory/memRegion.hpp"
30#include "oops/markOop.hpp"
31#include "runtime/mutex.hpp"
32#include "runtime/orderAccess.hpp"
33#include "utilities/debug.hpp"
34#include "utilities/globalDefinitions.hpp"
35#include "utilities/ostream.hpp"
36
37//
38// Free block maintenance for Concurrent Mark Sweep Generation
39//
40// The main data structure for free blocks are
41// . an indexed array of small free blocks, and
42// . a dictionary of large free blocks
43//
44
45// No virtuals in FreeChunk (don't want any vtables).
46
47// A FreeChunk is merely a chunk that can be in a doubly linked list
48// and has a size field. NOTE: FreeChunks are distinguished from allocated
49// objects in two ways (by the sweeper), depending on whether the VM is 32 or
50// 64 bits.
51// In 32 bits or 64 bits without CompressedOops, the second word (prev) has the
52// LSB set to indicate a free chunk; allocated objects' klass() pointers
53// don't have their LSB set. The corresponding bit in the CMSBitMap is
54// set when the chunk is allocated. There are also blocks that "look free"
55// but are not part of the free list and should not be coalesced into larger
56// free blocks. These free blocks have their two LSB's set.
57
58class FreeChunk VALUE_OBJ_CLASS_SPEC {
59  friend class VMStructs;
60  // For 64 bit compressed oops, the markOop encodes both the size and the
61  // indication that this is a FreeChunk and not an object.
62  volatile size_t   _size;
63  FreeChunk* _prev;
64  FreeChunk* _next;
65
66  markOop mark()     const volatile { return (markOop)_size; }
67  void set_mark(markOop m)          { _size = (size_t)m; }
68
69 public:
70  NOT_PRODUCT(static const size_t header_size();)
71
72  // Returns "true" if the address indicates that the block represents
73  // a free chunk.
74  static bool indicatesFreeChunk(const HeapWord* addr) {
75    // Force volatile read from addr because value might change between
76    // calls.  We really want the read of _mark and _prev from this pointer
77    // to be volatile but making the fields volatile causes all sorts of
78    // compilation errors.
79    return ((volatile FreeChunk*)addr)->is_free();
80  }
81
82  bool is_free() const volatile {
83    LP64_ONLY(if (UseCompressedOops) return mark()->is_cms_free_chunk(); else)
84    return (((intptr_t)_prev) & 0x1) == 0x1;
85  }
86  bool cantCoalesce() const {
87    assert(is_free(), "can't get coalesce bit on not free");
88    return (((intptr_t)_prev) & 0x2) == 0x2;
89  }
90  void dontCoalesce() {
91    // the block should be free
92    assert(is_free(), "Should look like a free block");
93    _prev = (FreeChunk*)(((intptr_t)_prev) | 0x2);
94  }
95  FreeChunk* prev() const {
96    return (FreeChunk*)(((intptr_t)_prev) & ~(0x3));
97  }
98
99  debug_only(void* prev_addr() const { return (void*)&_prev; })
100  debug_only(void* next_addr() const { return (void*)&_next; })
101  debug_only(void* size_addr() const { return (void*)&_size; })
102
103  size_t size() const volatile {
104    LP64_ONLY(if (UseCompressedOops) return mark()->get_size(); else )
105    return _size;
106  }
107  void set_size(size_t sz) {
108    LP64_ONLY(if (UseCompressedOops) set_mark(markOopDesc::set_size_and_free(sz)); else )
109    _size = sz;
110  }
111
112  FreeChunk* next()   const { return _next; }
113
114  void link_after(FreeChunk* ptr) {
115    link_next(ptr);
116    if (ptr != NULL) ptr->link_prev(this);
117  }
118  void link_next(FreeChunk* ptr) { _next = ptr; }
119  void link_prev(FreeChunk* ptr) {
120    LP64_ONLY(if (UseCompressedOops) _prev = ptr; else)
121    _prev = (FreeChunk*)((intptr_t)ptr | 0x1);
122  }
123  void clear_next()              { _next = NULL; }
124  void markNotFree() {
125    // Set _prev (klass) to null before (if) clearing the mark word below
126    _prev = NULL;
127#ifdef _LP64
128    if (UseCompressedOops) {
129      OrderAccess::storestore();
130      set_mark(markOopDesc::prototype());
131    }
132#endif
133    assert(!is_free(), "Error");
134  }
135
136  // Return the address past the end of this chunk
137  uintptr_t* end() const { return ((uintptr_t*) this) + size(); }
138
139  // debugging
140  void verify()             const PRODUCT_RETURN;
141  void verifyList()         const PRODUCT_RETURN;
142  void mangleAllocated(size_t size) PRODUCT_RETURN;
143  void mangleFreed(size_t size)     PRODUCT_RETURN;
144
145  void print_on(outputStream* st);
146};
147
148extern size_t MinChunkSize;
149
150
151#endif // SHARE_VM_GC_CMS_FREECHUNK_HPP
152