g1HotCardCache.cpp revision 11857:d0fbf661cc16
1193326Sed/*
2193326Sed * Copyright (c) 2013, 2016, Oracle and/or its affiliates. All rights reserved.
3193326Sed * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
4193326Sed *
5193326Sed * This code is free software; you can redistribute it and/or modify it
6193326Sed * under the terms of the GNU General Public License version 2 only, as
7193326Sed * published by the Free Software Foundation.
8193326Sed *
9193326Sed * This code is distributed in the hope that it will be useful, but WITHOUT
10193326Sed * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11193326Sed * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
12193326Sed * version 2 for more details (a copy is included in the LICENSE file that
13193326Sed * accompanied this code).
14212904Sdim *
15193326Sed * You should have received a copy of the GNU General Public License version
16193326Sed * 2 along with this work; if not, write to the Free Software Foundation,
17193326Sed * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18224145Sdim *
19207619Srdivacky * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20249423Sdim * or visit www.oracle.com if you need additional information or have any
21249423Sdim * questions.
22249423Sdim *
23249423Sdim */
24249423Sdim
25249423Sdim#include "precompiled.hpp"
26249423Sdim#include "gc/g1/dirtyCardQueue.hpp"
27249423Sdim#include "gc/g1/g1CollectedHeap.inline.hpp"
28193326Sed#include "gc/g1/g1HotCardCache.hpp"
29193326Sed#include "runtime/atomic.hpp"
30193326Sed
31218893SdimG1HotCardCache::G1HotCardCache(G1CollectedHeap *g1h):
32226633Sdim  _g1h(g1h), _hot_cache(NULL), _use_cache(false), _card_counts(g1h) {}
33193326Sed
34212904Sdimvoid G1HotCardCache::initialize(G1RegionToSpaceMapper* card_counts_storage) {
35212904Sdim  if (default_use_cache()) {
36212904Sdim    _use_cache = true;
37193326Sed
38193326Sed    _hot_cache_size = (size_t)1 << G1ConcRSLogCacheSize;
39193326Sed    _hot_cache = ArrayAllocator<jbyte*, mtGC>::allocate(_hot_cache_size);
40193326Sed
41193326Sed    reset_hot_cache_internal();
42193326Sed
43193326Sed    // For refining the cards in the hot cache in parallel
44198092Srdivacky    _hot_cache_par_chunk_size = ClaimChunkSize;
45193326Sed    _hot_cache_par_claimed_idx = 0;
46193326Sed
47193326Sed    _card_counts.initialize(card_counts_storage);
48234353Sdim  }
49226633Sdim}
50198092Srdivacky
51193326SedG1HotCardCache::~G1HotCardCache() {
52193326Sed  if (default_use_cache()) {
53198092Srdivacky    assert(_hot_cache != NULL, "Logic");
54226633Sdim    ArrayAllocator<jbyte*, mtGC>::free(_hot_cache, _hot_cache_size);
55226633Sdim    _hot_cache = NULL;
56193326Sed  }
57193326Sed}
58193326Sed
59193326Sedjbyte* G1HotCardCache::insert(jbyte* card_ptr) {
60198092Srdivacky  uint count = _card_counts.add_card_count(card_ptr);
61212904Sdim  if (!_card_counts.is_hot(count)) {
62212904Sdim    // The card is not hot so do not store it in the cache;
63198092Srdivacky    // return it for immediate refining.
64193326Sed    return card_ptr;
65193326Sed  }
66193326Sed  // Otherwise, the card is hot.
67198092Srdivacky  size_t index = Atomic::add(1, &_hot_cache_idx) - 1;
68193326Sed  size_t masked_index = index & (_hot_cache_size - 1);
69193326Sed  jbyte* current_ptr = _hot_cache[masked_index];
70224145Sdim
71226633Sdim  // Try to store the new card pointer into the cache. Compare-and-swap to guard
72193326Sed  // against the unlikely event of a race resulting in another card pointer to
73193326Sed  // have already been written to the cache. In this case we will return
74193326Sed  // card_ptr in favor of the other option, which would be starting over. This
75234353Sdim  // should be OK since card_ptr will likely be the older card already when/if
76234353Sdim  // this ever happens.
77234353Sdim  jbyte* previous_ptr = (jbyte*)Atomic::cmpxchg_ptr(card_ptr,
78198092Srdivacky                                                    &_hot_cache[masked_index],
79234353Sdim                                                    current_ptr);
80193326Sed  return (previous_ptr == current_ptr) ? previous_ptr : card_ptr;
81193326Sed}
82193326Sed
83193326Sedvoid G1HotCardCache::drain(CardTableEntryClosure* cl, uint worker_i) {
84193326Sed  assert(default_use_cache(), "Drain only necessary if we use the hot card cache.");
85193326Sed
86193326Sed  assert(_hot_cache != NULL, "Logic");
87193326Sed  assert(!use_cache(), "cache should be disabled");
88193326Sed
89193326Sed  while (_hot_cache_par_claimed_idx < _hot_cache_size) {
90198092Srdivacky    size_t end_idx = Atomic::add(_hot_cache_par_chunk_size,
91234353Sdim                                 &_hot_cache_par_claimed_idx);
92218893Sdim    size_t start_idx = end_idx - _hot_cache_par_chunk_size;
93234353Sdim    // The current worker has successfully claimed the chunk [start_idx..end_idx)
94218893Sdim    end_idx = MIN2(end_idx, _hot_cache_size);
95218893Sdim    for (size_t i = start_idx; i < end_idx; i++) {
96218893Sdim      jbyte* card_ptr = _hot_cache[i];
97218893Sdim      if (card_ptr != NULL) {
98218893Sdim        bool result = cl->do_card_ptr(card_ptr, worker_i);
99218893Sdim        assert(result, "Closure should always return true");
100234353Sdim      } else {
101207619Srdivacky        break;
102207619Srdivacky      }
103207619Srdivacky    }
104207619Srdivacky  }
105207619Srdivacky
106207619Srdivacky  // The existing entries in the hot card cache, which were just refined
107207619Srdivacky  // above, are discarded prior to re-enabling the cache near the end of the GC.
108207619Srdivacky}
109207619Srdivacky
110207619Srdivackyvoid G1HotCardCache::reset_card_counts(HeapRegion* hr) {
111207619Srdivacky  _card_counts.clear_region(hr);
112207619Srdivacky}
113193326Sed
114239462Sdimvoid G1HotCardCache::reset_card_counts() {
115234353Sdim  _card_counts.clear_all();
116207619Srdivacky}
117193326Sed