1/*
2 * Copyright (c) 2001, 2017, Oracle and/or its affiliates. All rights reserved.
3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
4 *
5 * This code is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 only, as
7 * published by the Free Software Foundation.
8 *
9 * This code is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
12 * version 2 for more details (a copy is included in the LICENSE file that
13 * accompanied this code).
14 *
15 * You should have received a copy of the GNU General Public License version
16 * 2 along with this work; if not, write to the Free Software Foundation,
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18 *
19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20 * or visit www.oracle.com if you need additional information or have any
21 * questions.
22 *
23 */
24
25#include "precompiled.hpp"
26#include "gc/g1/dirtyCardQueue.hpp"
27#include "gc/g1/g1CollectedHeap.inline.hpp"
28#include "gc/g1/g1RemSet.hpp"
29#include "gc/g1/heapRegionRemSet.hpp"
30#include "gc/shared/workgroup.hpp"
31#include "runtime/atomic.hpp"
32#include "runtime/mutexLocker.hpp"
33#include "runtime/safepoint.hpp"
34#include "runtime/thread.inline.hpp"
35
36// Closure used for updating remembered sets and recording references that
37// point into the collection set while the mutator is running.
38// Assumed to be only executed concurrently with the mutator. Yields via
39// SuspendibleThreadSet after every card.
40class G1RefineCardConcurrentlyClosure: public CardTableEntryClosure {
41public:
42  bool do_card_ptr(jbyte* card_ptr, uint worker_i) {
43    G1CollectedHeap::heap()->g1_rem_set()->refine_card_concurrently(card_ptr, worker_i);
44
45    if (SuspendibleThreadSet::should_yield()) {
46      // Caller will actually yield.
47      return false;
48    }
49    // Otherwise, we finished successfully; return true.
50    return true;
51  }
52};
53
54// Represents a set of free small integer ids.
55class FreeIdSet : public CHeapObj<mtGC> {
56  enum {
57    end_of_list = UINT_MAX,
58    claimed = UINT_MAX - 1
59  };
60
61  uint _size;
62  Monitor* _mon;
63
64  uint* _ids;
65  uint _hd;
66  uint _waiters;
67  uint _claimed;
68
69public:
70  FreeIdSet(uint size, Monitor* mon);
71  ~FreeIdSet();
72
73  // Returns an unclaimed parallel id (waiting for one to be released if
74  // necessary).
75  uint claim_par_id();
76
77  void release_par_id(uint id);
78};
79
80FreeIdSet::FreeIdSet(uint size, Monitor* mon) :
81  _size(size), _mon(mon), _hd(0), _waiters(0), _claimed(0)
82{
83  guarantee(size != 0, "must be");
84  _ids = NEW_C_HEAP_ARRAY(uint, size, mtGC);
85  for (uint i = 0; i < size - 1; i++) {
86    _ids[i] = i+1;
87  }
88  _ids[size-1] = end_of_list; // end of list.
89}
90
91FreeIdSet::~FreeIdSet() {
92  FREE_C_HEAP_ARRAY(uint, _ids);
93}
94
95uint FreeIdSet::claim_par_id() {
96  MutexLockerEx x(_mon, Mutex::_no_safepoint_check_flag);
97  while (_hd == end_of_list) {
98    _waiters++;
99    _mon->wait(Mutex::_no_safepoint_check_flag);
100    _waiters--;
101  }
102  uint res = _hd;
103  _hd = _ids[res];
104  _ids[res] = claimed;  // For debugging.
105  _claimed++;
106  return res;
107}
108
109void FreeIdSet::release_par_id(uint id) {
110  MutexLockerEx x(_mon, Mutex::_no_safepoint_check_flag);
111  assert(_ids[id] == claimed, "Precondition.");
112  _ids[id] = _hd;
113  _hd = id;
114  _claimed--;
115  if (_waiters > 0) {
116    _mon->notify_all();
117  }
118}
119
120DirtyCardQueue::DirtyCardQueue(DirtyCardQueueSet* qset, bool permanent) :
121  // Dirty card queues are always active, so we create them with their
122  // active field set to true.
123  PtrQueue(qset, permanent, true /* active */)
124{ }
125
126DirtyCardQueue::~DirtyCardQueue() {
127  if (!is_permanent()) {
128    flush();
129  }
130}
131
132DirtyCardQueueSet::DirtyCardQueueSet(bool notify_when_complete) :
133  PtrQueueSet(notify_when_complete),
134  _shared_dirty_card_queue(this, true /* permanent */),
135  _free_ids(NULL),
136  _processed_buffers_mut(0), _processed_buffers_rs_thread(0)
137{
138  _all_active = true;
139}
140
141// Determines how many mutator threads can process the buffers in parallel.
142uint DirtyCardQueueSet::num_par_ids() {
143  return (uint)os::initial_active_processor_count();
144}
145
146void DirtyCardQueueSet::initialize(Monitor* cbl_mon,
147                                   Mutex* fl_lock,
148                                   int process_completed_threshold,
149                                   int max_completed_queue,
150                                   Mutex* lock,
151                                   DirtyCardQueueSet* fl_owner,
152                                   bool init_free_ids) {
153  PtrQueueSet::initialize(cbl_mon,
154                          fl_lock,
155                          process_completed_threshold,
156                          max_completed_queue,
157                          fl_owner);
158  set_buffer_size(G1UpdateBufferSize);
159  _shared_dirty_card_queue.set_lock(lock);
160  if (init_free_ids) {
161    _free_ids = new FreeIdSet(num_par_ids(), _cbl_mon);
162  }
163}
164
165void DirtyCardQueueSet::handle_zero_index_for_thread(JavaThread* t) {
166  t->dirty_card_queue().handle_zero_index();
167}
168
169bool DirtyCardQueueSet::apply_closure_to_buffer(CardTableEntryClosure* cl,
170                                                BufferNode* node,
171                                                bool consume,
172                                                uint worker_i) {
173  if (cl == NULL) return true;
174  bool result = true;
175  void** buf = BufferNode::make_buffer_from_node(node);
176  size_t i = node->index();
177  size_t limit = buffer_size();
178  for ( ; i < limit; ++i) {
179    jbyte* card_ptr = static_cast<jbyte*>(buf[i]);
180    assert(card_ptr != NULL, "invariant");
181    if (!cl->do_card_ptr(card_ptr, worker_i)) {
182      result = false;           // Incomplete processing.
183      break;
184    }
185  }
186  if (consume) {
187    assert(i <= buffer_size(), "invariant");
188    node->set_index(i);
189  }
190  return result;
191}
192
193#ifndef ASSERT
194#define assert_fully_consumed(node, buffer_size)
195#else
196#define assert_fully_consumed(node, buffer_size)                \
197  do {                                                          \
198    size_t _afc_index = (node)->index();                        \
199    size_t _afc_size = (buffer_size);                           \
200    assert(_afc_index == _afc_size,                             \
201           "Buffer was not fully consumed as claimed: index: "  \
202           SIZE_FORMAT ", size: " SIZE_FORMAT,                  \
203            _afc_index, _afc_size);                             \
204  } while (0)
205#endif // ASSERT
206
207bool DirtyCardQueueSet::mut_process_buffer(BufferNode* node) {
208  guarantee(_free_ids != NULL, "must be");
209
210  uint worker_i = _free_ids->claim_par_id(); // temporarily claim an id
211  G1RefineCardConcurrentlyClosure cl;
212  bool result = apply_closure_to_buffer(&cl, node, true, worker_i);
213  _free_ids->release_par_id(worker_i); // release the id
214
215  if (result) {
216    assert_fully_consumed(node, buffer_size());
217    Atomic::inc(&_processed_buffers_mut);
218  }
219  return result;
220}
221
222
223BufferNode* DirtyCardQueueSet::get_completed_buffer(size_t stop_at) {
224  BufferNode* nd = NULL;
225  MutexLockerEx x(_cbl_mon, Mutex::_no_safepoint_check_flag);
226
227  if (_n_completed_buffers <= stop_at) {
228    _process_completed = false;
229    return NULL;
230  }
231
232  if (_completed_buffers_head != NULL) {
233    nd = _completed_buffers_head;
234    assert(_n_completed_buffers > 0, "Invariant");
235    _completed_buffers_head = nd->next();
236    _n_completed_buffers--;
237    if (_completed_buffers_head == NULL) {
238      assert(_n_completed_buffers == 0, "Invariant");
239      _completed_buffers_tail = NULL;
240    }
241  }
242  DEBUG_ONLY(assert_completed_buffer_list_len_correct_locked());
243  return nd;
244}
245
246bool DirtyCardQueueSet::refine_completed_buffer_concurrently(uint worker_i, size_t stop_at) {
247  G1RefineCardConcurrentlyClosure cl;
248  return apply_closure_to_completed_buffer(&cl, worker_i, stop_at, false);
249}
250
251bool DirtyCardQueueSet::apply_closure_during_gc(CardTableEntryClosure* cl, uint worker_i) {
252  assert_at_safepoint(false);
253  return apply_closure_to_completed_buffer(cl, worker_i, 0, true);
254}
255
256bool DirtyCardQueueSet::apply_closure_to_completed_buffer(CardTableEntryClosure* cl,
257                                                          uint worker_i,
258                                                          size_t stop_at,
259                                                          bool during_pause) {
260  assert(!during_pause || stop_at == 0, "Should not leave any completed buffers during a pause");
261  BufferNode* nd = get_completed_buffer(stop_at);
262  if (nd == NULL) {
263    return false;
264  } else {
265    if (apply_closure_to_buffer(cl, nd, true, worker_i)) {
266      assert_fully_consumed(nd, buffer_size());
267      // Done with fully processed buffer.
268      deallocate_buffer(nd);
269      Atomic::inc(&_processed_buffers_rs_thread);
270    } else {
271      // Return partially processed buffer to the queue.
272      guarantee(!during_pause, "Should never stop early");
273      enqueue_complete_buffer(nd);
274    }
275    return true;
276  }
277}
278
279void DirtyCardQueueSet::par_apply_closure_to_all_completed_buffers(CardTableEntryClosure* cl) {
280  BufferNode* nd = _cur_par_buffer_node;
281  while (nd != NULL) {
282    BufferNode* next = nd->next();
283    void* actual = Atomic::cmpxchg_ptr(next, &_cur_par_buffer_node, nd);
284    if (actual == nd) {
285      bool b = apply_closure_to_buffer(cl, nd, false);
286      guarantee(b, "Should not stop early.");
287      nd = next;
288    } else {
289      nd = static_cast<BufferNode*>(actual);
290    }
291  }
292}
293
294// Deallocates any completed log buffers
295void DirtyCardQueueSet::clear() {
296  BufferNode* buffers_to_delete = NULL;
297  {
298    MutexLockerEx x(_cbl_mon, Mutex::_no_safepoint_check_flag);
299    while (_completed_buffers_head != NULL) {
300      BufferNode* nd = _completed_buffers_head;
301      _completed_buffers_head = nd->next();
302      nd->set_next(buffers_to_delete);
303      buffers_to_delete = nd;
304    }
305    _n_completed_buffers = 0;
306    _completed_buffers_tail = NULL;
307    DEBUG_ONLY(assert_completed_buffer_list_len_correct_locked());
308  }
309  while (buffers_to_delete != NULL) {
310    BufferNode* nd = buffers_to_delete;
311    buffers_to_delete = nd->next();
312    deallocate_buffer(nd);
313  }
314
315}
316
317void DirtyCardQueueSet::abandon_logs() {
318  assert(SafepointSynchronize::is_at_safepoint(), "Must be at safepoint.");
319  clear();
320  // Since abandon is done only at safepoints, we can safely manipulate
321  // these queues.
322  for (JavaThread* t = Threads::first(); t; t = t->next()) {
323    t->dirty_card_queue().reset();
324  }
325  shared_dirty_card_queue()->reset();
326}
327
328void DirtyCardQueueSet::concatenate_log(DirtyCardQueue& dcq) {
329  if (!dcq.is_empty()) {
330    dcq.flush();
331  }
332}
333
334void DirtyCardQueueSet::concatenate_logs() {
335  // Iterate over all the threads, if we find a partial log add it to
336  // the global list of logs.  Temporarily turn off the limit on the number
337  // of outstanding buffers.
338  int save_max_completed_queue = _max_completed_queue;
339  _max_completed_queue = max_jint;
340  assert(SafepointSynchronize::is_at_safepoint(), "Must be at safepoint.");
341  for (JavaThread* t = Threads::first(); t; t = t->next()) {
342    concatenate_log(t->dirty_card_queue());
343  }
344  concatenate_log(_shared_dirty_card_queue);
345  // Restore the completed buffer queue limit.
346  _max_completed_queue = save_max_completed_queue;
347}
348