1/*
2 * Copyright (c) 2009, 2016, Oracle and/or its affiliates. All rights reserved.
3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
4 *
5 * This code is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 only, as
7 * published by the Free Software Foundation.
8 *
9 * This code is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
12 * version 2 for more details (a copy is included in the LICENSE file that
13 * accompanied this code).
14 *
15 * You should have received a copy of the GNU General Public License version
16 * 2 along with this work; if not, write to the Free Software Foundation,
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18 *
19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20 * or visit www.oracle.com if you need additional information or have any
21 * questions.
22 *
23 */
24
25#ifndef SHARE_VM_UTILITIES_STACK_INLINE_HPP
26#define SHARE_VM_UTILITIES_STACK_INLINE_HPP
27
28#include "utilities/align.hpp"
29#include "utilities/stack.hpp"
30#include "utilities/copy.hpp"
31
32template <MEMFLAGS F> StackBase<F>::StackBase(size_t segment_size, size_t max_cache_size,
33                     size_t max_size):
34  _seg_size(segment_size),
35  _max_cache_size(max_cache_size),
36  _max_size(adjust_max_size(max_size, segment_size))
37{
38  assert(_max_size % _seg_size == 0, "not a multiple");
39}
40
41template <MEMFLAGS F> size_t StackBase<F>::adjust_max_size(size_t max_size, size_t seg_size)
42{
43  assert(seg_size > 0, "cannot be 0");
44  assert(max_size >= seg_size || max_size == 0, "max_size too small");
45  const size_t limit = max_uintx - (seg_size - 1);
46  if (max_size == 0 || max_size > limit) {
47    max_size = limit;
48  }
49  return (max_size + seg_size - 1) / seg_size * seg_size;
50}
51
52template <class E, MEMFLAGS F>
53Stack<E, F>::Stack(size_t segment_size, size_t max_cache_size, size_t max_size):
54  StackBase<F>(adjust_segment_size(segment_size), max_cache_size, max_size)
55{
56  reset(true);
57}
58
59template <class E, MEMFLAGS F>
60void Stack<E, F>::push(E item)
61{
62  assert(!is_full(), "pushing onto a full stack");
63  if (this->_cur_seg_size == this->_seg_size) {
64    push_segment();
65  }
66  this->_cur_seg[this->_cur_seg_size] = item;
67  ++this->_cur_seg_size;
68}
69
70template <class E, MEMFLAGS F>
71E Stack<E, F>::pop()
72{
73  assert(!is_empty(), "popping from an empty stack");
74  if (this->_cur_seg_size == 1) {
75    E tmp = _cur_seg[--this->_cur_seg_size];
76    pop_segment();
77    return tmp;
78  }
79  return this->_cur_seg[--this->_cur_seg_size];
80}
81
82template <class E, MEMFLAGS F>
83void Stack<E, F>::clear(bool clear_cache)
84{
85  free_segments(_cur_seg);
86  if (clear_cache) free_segments(_cache);
87  reset(clear_cache);
88}
89
90template <class E, MEMFLAGS F>
91size_t Stack<E, F>::adjust_segment_size(size_t seg_size)
92{
93  const size_t elem_sz = sizeof(E);
94  const size_t ptr_sz = sizeof(E*);
95  assert(elem_sz % ptr_sz == 0 || ptr_sz % elem_sz == 0, "bad element size");
96  if (elem_sz < ptr_sz) {
97    return align_up(seg_size * elem_sz, ptr_sz) / elem_sz;
98  }
99  return seg_size;
100}
101
102template <class E, MEMFLAGS F>
103size_t Stack<E, F>::link_offset() const
104{
105  return align_up(this->_seg_size * sizeof(E), sizeof(E*));
106}
107
108template <class E, MEMFLAGS F>
109size_t Stack<E, F>::segment_bytes() const
110{
111  return link_offset() + sizeof(E*);
112}
113
114template <class E, MEMFLAGS F>
115E** Stack<E, F>::link_addr(E* seg) const
116{
117  return (E**) ((char*)seg + link_offset());
118}
119
120template <class E, MEMFLAGS F>
121E* Stack<E, F>::get_link(E* seg) const
122{
123  return *link_addr(seg);
124}
125
126template <class E, MEMFLAGS F>
127E* Stack<E, F>::set_link(E* new_seg, E* old_seg)
128{
129  *link_addr(new_seg) = old_seg;
130  return new_seg;
131}
132
133template <class E, MEMFLAGS F>
134E* Stack<E, F>::alloc(size_t bytes)
135{
136  return (E*) NEW_C_HEAP_ARRAY(char, bytes, F);
137}
138
139template <class E, MEMFLAGS F>
140void Stack<E, F>::free(E* addr, size_t bytes)
141{
142  FREE_C_HEAP_ARRAY(char, (char*) addr);
143}
144
145// Stack is used by the GC code and in some hot paths a lot of the Stack
146// code gets inlined. This is generally good, but when too much code has
147// been inlined, no further inlining is allowed by GCC. Therefore we need
148// to prevent parts of the slow path in Stack to be inlined to allow other
149// code to be.
150template <class E, MEMFLAGS F>
151NOINLINE void Stack<E, F>::push_segment()
152{
153  assert(this->_cur_seg_size == this->_seg_size, "current segment is not full");
154  E* next;
155  if (this->_cache_size > 0) {
156    // Use a cached segment.
157    next = _cache;
158    _cache = get_link(_cache);
159    --this->_cache_size;
160  } else {
161    next = alloc(segment_bytes());
162    DEBUG_ONLY(zap_segment(next, true);)
163  }
164  const bool at_empty_transition = is_empty();
165  this->_cur_seg = set_link(next, _cur_seg);
166  this->_cur_seg_size = 0;
167  this->_full_seg_size += at_empty_transition ? 0 : this->_seg_size;
168  DEBUG_ONLY(verify(at_empty_transition);)
169}
170
171template <class E, MEMFLAGS F>
172void Stack<E, F>::pop_segment()
173{
174  assert(this->_cur_seg_size == 0, "current segment is not empty");
175  E* const prev = get_link(_cur_seg);
176  if (this->_cache_size < this->_max_cache_size) {
177    // Add the current segment to the cache.
178    DEBUG_ONLY(zap_segment(_cur_seg, false);)
179    _cache = set_link(_cur_seg, _cache);
180    ++this->_cache_size;
181  } else {
182    DEBUG_ONLY(zap_segment(_cur_seg, true);)
183    free(_cur_seg, segment_bytes());
184  }
185  const bool at_empty_transition = prev == NULL;
186  this->_cur_seg = prev;
187  this->_cur_seg_size = this->_seg_size;
188  this->_full_seg_size -= at_empty_transition ? 0 : this->_seg_size;
189  DEBUG_ONLY(verify(at_empty_transition);)
190}
191
192template <class E, MEMFLAGS F>
193void Stack<E, F>::free_segments(E* seg)
194{
195  const size_t bytes = segment_bytes();
196  while (seg != NULL) {
197    E* const prev = get_link(seg);
198    free(seg, bytes);
199    seg = prev;
200  }
201}
202
203template <class E, MEMFLAGS F>
204void Stack<E, F>::reset(bool reset_cache)
205{
206  this->_cur_seg_size = this->_seg_size; // So push() will alloc a new segment.
207  this->_full_seg_size = 0;
208  _cur_seg = NULL;
209  if (reset_cache) {
210    this->_cache_size = 0;
211    _cache = NULL;
212  }
213}
214
215#ifdef ASSERT
216template <class E, MEMFLAGS F>
217void Stack<E, F>::verify(bool at_empty_transition) const
218{
219  assert(size() <= this->max_size(), "stack exceeded bounds");
220  assert(this->cache_size() <= this->max_cache_size(), "cache exceeded bounds");
221  assert(this->_cur_seg_size <= this->segment_size(), "segment index exceeded bounds");
222
223  assert(this->_full_seg_size % this->_seg_size == 0, "not a multiple");
224  assert(at_empty_transition || is_empty() == (size() == 0), "mismatch");
225  assert((_cache == NULL) == (this->cache_size() == 0), "mismatch");
226
227  if (is_empty()) {
228    assert(this->_cur_seg_size == this->segment_size(), "sanity");
229  }
230}
231
232template <class E, MEMFLAGS F>
233void Stack<E, F>::zap_segment(E* seg, bool zap_link_field) const
234{
235  if (!ZapStackSegments) return;
236  const size_t zap_bytes = segment_bytes() - (zap_link_field ? 0 : sizeof(E*));
237  Copy::fill_to_bytes(seg, zap_bytes, badStackSegVal);
238}
239#endif
240
241template <class E, MEMFLAGS F>
242E* ResourceStack<E, F>::alloc(size_t bytes)
243{
244  return (E*) resource_allocate_bytes(bytes);
245}
246
247template <class E, MEMFLAGS F>
248void ResourceStack<E, F>::free(E* addr, size_t bytes)
249{
250  resource_free_bytes((char*) addr, bytes);
251}
252
253template <class E, MEMFLAGS F>
254void StackIterator<E, F>::sync()
255{
256  _full_seg_size = _stack._full_seg_size;
257  _cur_seg_size = _stack._cur_seg_size;
258  _cur_seg = _stack._cur_seg;
259}
260
261template <class E, MEMFLAGS F>
262E* StackIterator<E, F>::next_addr()
263{
264  assert(!is_empty(), "no items left");
265  if (_cur_seg_size == 1) {
266    E* addr = _cur_seg;
267    _cur_seg = _stack.get_link(_cur_seg);
268    _cur_seg_size = _stack.segment_size();
269    _full_seg_size -= _stack.segment_size();
270    return addr;
271  }
272  return _cur_seg + --_cur_seg_size;
273}
274
275#endif // SHARE_VM_UTILITIES_STACK_INLINE_HPP
276