ptrQueue.cpp revision 13067:f37660e24e9e
1/*
2 * Copyright (c) 2001, 2017, Oracle and/or its affiliates. All rights reserved.
3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
4 *
5 * This code is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 only, as
7 * published by the Free Software Foundation.
8 *
9 * This code is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
12 * version 2 for more details (a copy is included in the LICENSE file that
13 * accompanied this code).
14 *
15 * You should have received a copy of the GNU General Public License version
16 * 2 along with this work; if not, write to the Free Software Foundation,
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18 *
19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20 * or visit www.oracle.com if you need additional information or have any
21 * questions.
22 *
23 */
24
25#include "precompiled.hpp"
26#include "gc/g1/ptrQueue.hpp"
27#include "memory/allocation.hpp"
28#include "memory/allocation.inline.hpp"
29#include "runtime/mutex.hpp"
30#include "runtime/mutexLocker.hpp"
31#include "runtime/thread.inline.hpp"
32
33#include <new>
34
35PtrQueue::PtrQueue(PtrQueueSet* qset, bool permanent, bool active) :
36  _qset(qset),
37  _active(active),
38  _permanent(permanent),
39  _index(0),
40  _capacity_in_bytes(0),
41  _buf(NULL),
42  _lock(NULL)
43{}
44
45PtrQueue::~PtrQueue() {
46  assert(_permanent || (_buf == NULL), "queue must be flushed before delete");
47}
48
49void PtrQueue::flush_impl() {
50  if (_buf != NULL) {
51    BufferNode* node = BufferNode::make_node_from_buffer(_buf, index());
52    if (is_empty()) {
53      // No work to do.
54      qset()->deallocate_buffer(node);
55    } else {
56      qset()->enqueue_complete_buffer(node);
57    }
58    _buf = NULL;
59    set_index(0);
60  }
61}
62
63
64void PtrQueue::enqueue_known_active(void* ptr) {
65  while (_index == 0) {
66    handle_zero_index();
67  }
68
69  assert(_buf != NULL, "postcondition");
70  assert(index() > 0, "postcondition");
71  assert(index() <= capacity(), "invariant");
72  _index -= _element_size;
73  _buf[index()] = ptr;
74}
75
76void PtrQueue::locking_enqueue_completed_buffer(BufferNode* node) {
77  assert(_lock->owned_by_self(), "Required.");
78
79  // We have to unlock _lock (which may be Shared_DirtyCardQ_lock) before
80  // we acquire DirtyCardQ_CBL_mon inside enqueue_complete_buffer as they
81  // have the same rank and we may get the "possible deadlock" message
82  _lock->unlock();
83
84  qset()->enqueue_complete_buffer(node);
85  // We must relock only because the caller will unlock, for the normal
86  // case.
87  _lock->lock_without_safepoint_check();
88}
89
90
91BufferNode* BufferNode::allocate(size_t size) {
92  size_t byte_size = size * sizeof(void*);
93  void* data = NEW_C_HEAP_ARRAY(char, buffer_offset() + byte_size, mtGC);
94  return new (data) BufferNode;
95}
96
97void BufferNode::deallocate(BufferNode* node) {
98  node->~BufferNode();
99  FREE_C_HEAP_ARRAY(char, node);
100}
101
102PtrQueueSet::PtrQueueSet(bool notify_when_complete) :
103  _buffer_size(0),
104  _max_completed_queue(0),
105  _cbl_mon(NULL), _fl_lock(NULL),
106  _notify_when_complete(notify_when_complete),
107  _completed_buffers_head(NULL),
108  _completed_buffers_tail(NULL),
109  _n_completed_buffers(0),
110  _process_completed_threshold(0), _process_completed(false),
111  _buf_free_list(NULL), _buf_free_list_sz(0)
112{
113  _fl_owner = this;
114}
115
116PtrQueueSet::~PtrQueueSet() {
117  // There are presently only a couple (derived) instances ever
118  // created, and they are permanent, so no harm currently done by
119  // doing nothing here.
120}
121
122void PtrQueueSet::initialize(Monitor* cbl_mon,
123                             Mutex* fl_lock,
124                             int process_completed_threshold,
125                             int max_completed_queue,
126                             PtrQueueSet *fl_owner) {
127  _max_completed_queue = max_completed_queue;
128  _process_completed_threshold = process_completed_threshold;
129  _completed_queue_padding = 0;
130  assert(cbl_mon != NULL && fl_lock != NULL, "Init order issue?");
131  _cbl_mon = cbl_mon;
132  _fl_lock = fl_lock;
133  _fl_owner = (fl_owner != NULL) ? fl_owner : this;
134}
135
136void** PtrQueueSet::allocate_buffer() {
137  BufferNode* node = NULL;
138  {
139    MutexLockerEx x(_fl_owner->_fl_lock, Mutex::_no_safepoint_check_flag);
140    node = _fl_owner->_buf_free_list;
141    if (node != NULL) {
142      _fl_owner->_buf_free_list = node->next();
143      _fl_owner->_buf_free_list_sz--;
144    }
145  }
146  if (node == NULL) {
147    node = BufferNode::allocate(buffer_size());
148  } else {
149    // Reinitialize buffer obtained from free list.
150    node->set_index(0);
151    node->set_next(NULL);
152  }
153  return BufferNode::make_buffer_from_node(node);
154}
155
156void PtrQueueSet::deallocate_buffer(BufferNode* node) {
157  MutexLockerEx x(_fl_owner->_fl_lock, Mutex::_no_safepoint_check_flag);
158  node->set_next(_fl_owner->_buf_free_list);
159  _fl_owner->_buf_free_list = node;
160  _fl_owner->_buf_free_list_sz++;
161}
162
163void PtrQueueSet::reduce_free_list() {
164  assert(_fl_owner == this, "Free list reduction is allowed only for the owner");
165  // For now we'll adopt the strategy of deleting half.
166  MutexLockerEx x(_fl_lock, Mutex::_no_safepoint_check_flag);
167  size_t n = _buf_free_list_sz / 2;
168  for (size_t i = 0; i < n; ++i) {
169    assert(_buf_free_list != NULL,
170           "_buf_free_list_sz is wrong: " SIZE_FORMAT, _buf_free_list_sz);
171    BufferNode* node = _buf_free_list;
172    _buf_free_list = node->next();
173    _buf_free_list_sz--;
174    BufferNode::deallocate(node);
175  }
176}
177
178void PtrQueue::handle_zero_index() {
179  assert(index() == 0, "precondition");
180
181  // This thread records the full buffer and allocates a new one (while
182  // holding the lock if there is one).
183  if (_buf != NULL) {
184    if (!should_enqueue_buffer()) {
185      assert(index() > 0, "the buffer can only be re-used if it's not full");
186      return;
187    }
188
189    if (_lock) {
190      assert(_lock->owned_by_self(), "Required.");
191
192      // The current PtrQ may be the shared dirty card queue and
193      // may be being manipulated by more than one worker thread
194      // during a pause. Since the enqueueing of the completed
195      // buffer unlocks the Shared_DirtyCardQ_lock more than one
196      // worker thread can 'race' on reading the shared queue attributes
197      // (_buf and _index) and multiple threads can call into this
198      // routine for the same buffer. This will cause the completed
199      // buffer to be added to the CBL multiple times.
200
201      // We "claim" the current buffer by caching value of _buf in
202      // a local and clearing the field while holding _lock. When
203      // _lock is released (while enqueueing the completed buffer)
204      // the thread that acquires _lock will skip this code,
205      // preventing the subsequent the multiple enqueue, and
206      // install a newly allocated buffer below.
207
208      BufferNode* node = BufferNode::make_node_from_buffer(_buf, index());
209      _buf = NULL;         // clear shared _buf field
210
211      locking_enqueue_completed_buffer(node); // enqueue completed buffer
212
213      // While the current thread was enqueueing the buffer another thread
214      // may have a allocated a new buffer and inserted it into this pointer
215      // queue. If that happens then we just return so that the current
216      // thread doesn't overwrite the buffer allocated by the other thread
217      // and potentially losing some dirtied cards.
218
219      if (_buf != NULL) return;
220    } else {
221      BufferNode* node = BufferNode::make_node_from_buffer(_buf, index());
222      if (qset()->process_or_enqueue_complete_buffer(node)) {
223        // Recycle the buffer. No allocation.
224        assert(_buf == BufferNode::make_buffer_from_node(node), "invariant");
225        assert(capacity() == qset()->buffer_size(), "invariant");
226        reset();
227        return;
228      }
229    }
230  }
231  // Set capacity in case this is the first allocation.
232  set_capacity(qset()->buffer_size());
233  // Allocate a new buffer.
234  _buf = qset()->allocate_buffer();
235  reset();
236}
237
238bool PtrQueueSet::process_or_enqueue_complete_buffer(BufferNode* node) {
239  if (Thread::current()->is_Java_thread()) {
240    // We don't lock. It is fine to be epsilon-precise here.
241    if (_max_completed_queue == 0 || _max_completed_queue > 0 &&
242        _n_completed_buffers >= _max_completed_queue + _completed_queue_padding) {
243      bool b = mut_process_buffer(node);
244      if (b) {
245        // True here means that the buffer hasn't been deallocated and the caller may reuse it.
246        return true;
247      }
248    }
249  }
250  // The buffer will be enqueued. The caller will have to get a new one.
251  enqueue_complete_buffer(node);
252  return false;
253}
254
255void PtrQueueSet::enqueue_complete_buffer(BufferNode* cbn) {
256  MutexLockerEx x(_cbl_mon, Mutex::_no_safepoint_check_flag);
257  cbn->set_next(NULL);
258  if (_completed_buffers_tail == NULL) {
259    assert(_completed_buffers_head == NULL, "Well-formedness");
260    _completed_buffers_head = cbn;
261    _completed_buffers_tail = cbn;
262  } else {
263    _completed_buffers_tail->set_next(cbn);
264    _completed_buffers_tail = cbn;
265  }
266  _n_completed_buffers++;
267
268  if (!_process_completed && _process_completed_threshold >= 0 &&
269      _n_completed_buffers >= (size_t)_process_completed_threshold) {
270    _process_completed = true;
271    if (_notify_when_complete) {
272      _cbl_mon->notify();
273    }
274  }
275  DEBUG_ONLY(assert_completed_buffer_list_len_correct_locked());
276}
277
278size_t PtrQueueSet::completed_buffers_list_length() {
279  size_t n = 0;
280  BufferNode* cbn = _completed_buffers_head;
281  while (cbn != NULL) {
282    n++;
283    cbn = cbn->next();
284  }
285  return n;
286}
287
288void PtrQueueSet::assert_completed_buffer_list_len_correct() {
289  MutexLockerEx x(_cbl_mon, Mutex::_no_safepoint_check_flag);
290  assert_completed_buffer_list_len_correct_locked();
291}
292
293void PtrQueueSet::assert_completed_buffer_list_len_correct_locked() {
294  guarantee(completed_buffers_list_length() ==  _n_completed_buffers,
295            "Completed buffer length is wrong.");
296}
297
298void PtrQueueSet::set_buffer_size(size_t sz) {
299  assert(_buffer_size == 0 && sz > 0, "Should be called only once.");
300  _buffer_size = sz;
301}
302
303// Merge lists of buffers. Notify the processing threads.
304// The source queue is emptied as a result. The queues
305// must share the monitor.
306void PtrQueueSet::merge_bufferlists(PtrQueueSet *src) {
307  assert(_cbl_mon == src->_cbl_mon, "Should share the same lock");
308  MutexLockerEx x(_cbl_mon, Mutex::_no_safepoint_check_flag);
309  if (_completed_buffers_tail == NULL) {
310    assert(_completed_buffers_head == NULL, "Well-formedness");
311    _completed_buffers_head = src->_completed_buffers_head;
312    _completed_buffers_tail = src->_completed_buffers_tail;
313  } else {
314    assert(_completed_buffers_head != NULL, "Well formedness");
315    if (src->_completed_buffers_head != NULL) {
316      _completed_buffers_tail->set_next(src->_completed_buffers_head);
317      _completed_buffers_tail = src->_completed_buffers_tail;
318    }
319  }
320  _n_completed_buffers += src->_n_completed_buffers;
321
322  src->_n_completed_buffers = 0;
323  src->_completed_buffers_head = NULL;
324  src->_completed_buffers_tail = NULL;
325
326  assert(_completed_buffers_head == NULL && _completed_buffers_tail == NULL ||
327         _completed_buffers_head != NULL && _completed_buffers_tail != NULL,
328         "Sanity");
329}
330
331void PtrQueueSet::notify_if_necessary() {
332  MutexLockerEx x(_cbl_mon, Mutex::_no_safepoint_check_flag);
333  assert(_process_completed_threshold >= 0, "_process_completed is negative");
334  if (_n_completed_buffers >= (size_t)_process_completed_threshold || _max_completed_queue == 0) {
335    _process_completed = true;
336    if (_notify_when_complete)
337      _cbl_mon->notify();
338  }
339}
340