stack.inline.hpp revision 8935:2cad024257e9
1/*
2 * Copyright (c) 2009, 2014, Oracle and/or its affiliates. All rights reserved.
3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
4 *
5 * This code is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 only, as
7 * published by the Free Software Foundation.
8 *
9 * This code is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
12 * version 2 for more details (a copy is included in the LICENSE file that
13 * accompanied this code).
14 *
15 * You should have received a copy of the GNU General Public License version
16 * 2 along with this work; if not, write to the Free Software Foundation,
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18 *
19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20 * or visit www.oracle.com if you need additional information or have any
21 * questions.
22 *
23 */
24
25#ifndef SHARE_VM_UTILITIES_STACK_INLINE_HPP
26#define SHARE_VM_UTILITIES_STACK_INLINE_HPP
27
28#include "utilities/stack.hpp"
29
30// Stack is used by the GC code and in some hot paths a lot of the Stack
31// code gets inlined. This is generally good, but when too much code has
32// been inlined, no further inlining is allowed by GCC. Therefore we need
33// to prevent parts of the slow path in Stack to be inlined to allow other
34// code to be.
35#if defined(TARGET_COMPILER_gcc)
36#define NOINLINE __attribute__((noinline))
37#else
38#define NOINLINE
39#endif
40
41template <MEMFLAGS F> StackBase<F>::StackBase(size_t segment_size, size_t max_cache_size,
42                     size_t max_size):
43  _seg_size(segment_size),
44  _max_cache_size(max_cache_size),
45  _max_size(adjust_max_size(max_size, segment_size))
46{
47  assert(_max_size % _seg_size == 0, "not a multiple");
48}
49
50template <MEMFLAGS F> size_t StackBase<F>::adjust_max_size(size_t max_size, size_t seg_size)
51{
52  assert(seg_size > 0, "cannot be 0");
53  assert(max_size >= seg_size || max_size == 0, "max_size too small");
54  const size_t limit = max_uintx - (seg_size - 1);
55  if (max_size == 0 || max_size > limit) {
56    max_size = limit;
57  }
58  return (max_size + seg_size - 1) / seg_size * seg_size;
59}
60
61template <class E, MEMFLAGS F>
62Stack<E, F>::Stack(size_t segment_size, size_t max_cache_size, size_t max_size):
63  StackBase<F>(adjust_segment_size(segment_size), max_cache_size, max_size)
64{
65  reset(true);
66}
67
68template <class E, MEMFLAGS F>
69void Stack<E, F>::push(E item)
70{
71  assert(!is_full(), "pushing onto a full stack");
72  if (this->_cur_seg_size == this->_seg_size) {
73    push_segment();
74  }
75  this->_cur_seg[this->_cur_seg_size] = item;
76  ++this->_cur_seg_size;
77}
78
79template <class E, MEMFLAGS F>
80E Stack<E, F>::pop()
81{
82  assert(!is_empty(), "popping from an empty stack");
83  if (this->_cur_seg_size == 1) {
84    E tmp = _cur_seg[--this->_cur_seg_size];
85    pop_segment();
86    return tmp;
87  }
88  return this->_cur_seg[--this->_cur_seg_size];
89}
90
91template <class E, MEMFLAGS F>
92void Stack<E, F>::clear(bool clear_cache)
93{
94  free_segments(_cur_seg);
95  if (clear_cache) free_segments(_cache);
96  reset(clear_cache);
97}
98
99template <class E, MEMFLAGS F>
100size_t Stack<E, F>::adjust_segment_size(size_t seg_size)
101{
102  const size_t elem_sz = sizeof(E);
103  const size_t ptr_sz = sizeof(E*);
104  assert(elem_sz % ptr_sz == 0 || ptr_sz % elem_sz == 0, "bad element size");
105  if (elem_sz < ptr_sz) {
106    return align_size_up(seg_size * elem_sz, ptr_sz) / elem_sz;
107  }
108  return seg_size;
109}
110
111template <class E, MEMFLAGS F>
112size_t Stack<E, F>::link_offset() const
113{
114  return align_size_up(this->_seg_size * sizeof(E), sizeof(E*));
115}
116
117template <class E, MEMFLAGS F>
118size_t Stack<E, F>::segment_bytes() const
119{
120  return link_offset() + sizeof(E*);
121}
122
123template <class E, MEMFLAGS F>
124E** Stack<E, F>::link_addr(E* seg) const
125{
126  return (E**) ((char*)seg + link_offset());
127}
128
129template <class E, MEMFLAGS F>
130E* Stack<E, F>::get_link(E* seg) const
131{
132  return *link_addr(seg);
133}
134
135template <class E, MEMFLAGS F>
136E* Stack<E, F>::set_link(E* new_seg, E* old_seg)
137{
138  *link_addr(new_seg) = old_seg;
139  return new_seg;
140}
141
142template <class E, MEMFLAGS F>
143E* Stack<E, F>::alloc(size_t bytes)
144{
145  return (E*) NEW_C_HEAP_ARRAY(char, bytes, F);
146}
147
148template <class E, MEMFLAGS F>
149void Stack<E, F>::free(E* addr, size_t bytes)
150{
151  FREE_C_HEAP_ARRAY(char, (char*) addr);
152}
153
154template <class E, MEMFLAGS F>
155NOINLINE void Stack<E, F>::push_segment()
156{
157  assert(this->_cur_seg_size == this->_seg_size, "current segment is not full");
158  E* next;
159  if (this->_cache_size > 0) {
160    // Use a cached segment.
161    next = _cache;
162    _cache = get_link(_cache);
163    --this->_cache_size;
164  } else {
165    next = alloc(segment_bytes());
166    DEBUG_ONLY(zap_segment(next, true);)
167  }
168  const bool at_empty_transition = is_empty();
169  this->_cur_seg = set_link(next, _cur_seg);
170  this->_cur_seg_size = 0;
171  this->_full_seg_size += at_empty_transition ? 0 : this->_seg_size;
172  DEBUG_ONLY(verify(at_empty_transition);)
173}
174
175template <class E, MEMFLAGS F>
176void Stack<E, F>::pop_segment()
177{
178  assert(this->_cur_seg_size == 0, "current segment is not empty");
179  E* const prev = get_link(_cur_seg);
180  if (this->_cache_size < this->_max_cache_size) {
181    // Add the current segment to the cache.
182    DEBUG_ONLY(zap_segment(_cur_seg, false);)
183    _cache = set_link(_cur_seg, _cache);
184    ++this->_cache_size;
185  } else {
186    DEBUG_ONLY(zap_segment(_cur_seg, true);)
187    free(_cur_seg, segment_bytes());
188  }
189  const bool at_empty_transition = prev == NULL;
190  this->_cur_seg = prev;
191  this->_cur_seg_size = this->_seg_size;
192  this->_full_seg_size -= at_empty_transition ? 0 : this->_seg_size;
193  DEBUG_ONLY(verify(at_empty_transition);)
194}
195
196template <class E, MEMFLAGS F>
197void Stack<E, F>::free_segments(E* seg)
198{
199  const size_t bytes = segment_bytes();
200  while (seg != NULL) {
201    E* const prev = get_link(seg);
202    free(seg, bytes);
203    seg = prev;
204  }
205}
206
207template <class E, MEMFLAGS F>
208void Stack<E, F>::reset(bool reset_cache)
209{
210  this->_cur_seg_size = this->_seg_size; // So push() will alloc a new segment.
211  this->_full_seg_size = 0;
212  _cur_seg = NULL;
213  if (reset_cache) {
214    this->_cache_size = 0;
215    _cache = NULL;
216  }
217}
218
219#ifdef ASSERT
220template <class E, MEMFLAGS F>
221void Stack<E, F>::verify(bool at_empty_transition) const
222{
223  assert(size() <= this->max_size(), "stack exceeded bounds");
224  assert(this->cache_size() <= this->max_cache_size(), "cache exceeded bounds");
225  assert(this->_cur_seg_size <= this->segment_size(), "segment index exceeded bounds");
226
227  assert(this->_full_seg_size % this->_seg_size == 0, "not a multiple");
228  assert(at_empty_transition || is_empty() == (size() == 0), "mismatch");
229  assert((_cache == NULL) == (this->cache_size() == 0), "mismatch");
230
231  if (is_empty()) {
232    assert(this->_cur_seg_size == this->segment_size(), "sanity");
233  }
234}
235
236template <class E, MEMFLAGS F>
237void Stack<E, F>::zap_segment(E* seg, bool zap_link_field) const
238{
239  if (!ZapStackSegments) return;
240  const size_t zap_bytes = segment_bytes() - (zap_link_field ? 0 : sizeof(E*));
241  uint32_t* cur = (uint32_t*)seg;
242  const uint32_t* end = cur + zap_bytes / sizeof(uint32_t);
243  while (cur < end) {
244    *cur++ = 0xfadfaded;
245  }
246}
247#endif
248
249template <class E, MEMFLAGS F>
250E* ResourceStack<E, F>::alloc(size_t bytes)
251{
252  return (E*) resource_allocate_bytes(bytes);
253}
254
255template <class E, MEMFLAGS F>
256void ResourceStack<E, F>::free(E* addr, size_t bytes)
257{
258  resource_free_bytes((char*) addr, bytes);
259}
260
261template <class E, MEMFLAGS F>
262void StackIterator<E, F>::sync()
263{
264  _full_seg_size = _stack._full_seg_size;
265  _cur_seg_size = _stack._cur_seg_size;
266  _cur_seg = _stack._cur_seg;
267}
268
269template <class E, MEMFLAGS F>
270E* StackIterator<E, F>::next_addr()
271{
272  assert(!is_empty(), "no items left");
273  if (_cur_seg_size == 1) {
274    E* addr = _cur_seg;
275    _cur_seg = _stack.get_link(_cur_seg);
276    _cur_seg_size = _stack.segment_size();
277    _full_seg_size -= _stack.segment_size();
278    return addr;
279  }
280  return _cur_seg + --_cur_seg_size;
281}
282
283#undef NOINLINE
284
285#endif // SHARE_VM_UTILITIES_STACK_INLINE_HPP
286