1/*
2 * Copyright (c) 2003, 2017, Oracle and/or its affiliates. All rights reserved.
3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
4 *
5 * This code is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 only, as
7 * published by the Free Software Foundation.
8 *
9 * This code is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
12 * version 2 for more details (a copy is included in the LICENSE file that
13 * accompanied this code).
14 *
15 * You should have received a copy of the GNU General Public License version
16 * 2 along with this work; if not, write to the Free Software Foundation,
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18 *
19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20 * or visit www.oracle.com if you need additional information or have any
21 * questions.
22 *
23 */
24
25#include "precompiled.hpp"
26#include "classfile/altHashing.hpp"
27#include "classfile/javaClasses.inline.hpp"
28#include "classfile/stringTable.hpp"
29#include "memory/allocation.inline.hpp"
30#include "memory/filemap.hpp"
31#include "memory/resourceArea.hpp"
32#include "oops/oop.inline.hpp"
33#include "runtime/safepoint.hpp"
34#include "utilities/dtrace.hpp"
35#include "utilities/hashtable.hpp"
36#include "utilities/hashtable.inline.hpp"
37#include "utilities/numberSeq.hpp"
38
39
40// This hashtable is implemented as an open hash table with a fixed number of buckets.
41
42template <MEMFLAGS F> BasicHashtableEntry<F>* BasicHashtable<F>::new_entry_free_list() {
43  BasicHashtableEntry<F>* entry = NULL;
44  if (_free_list != NULL) {
45    entry = _free_list;
46    _free_list = _free_list->next();
47  }
48  return entry;
49}
50
51// HashtableEntrys are allocated in blocks to reduce the space overhead.
52template <MEMFLAGS F> BasicHashtableEntry<F>* BasicHashtable<F>::new_entry(unsigned int hashValue) {
53  BasicHashtableEntry<F>* entry = new_entry_free_list();
54
55  if (entry == NULL) {
56    if (_first_free_entry + _entry_size >= _end_block) {
57      int block_size = MIN2(512, MAX2((int)_table_size / 2, (int)_number_of_entries));
58      int len = _entry_size * block_size;
59      len = 1 << log2_intptr(len); // round down to power of 2
60      assert(len >= _entry_size, "");
61      _first_free_entry = NEW_C_HEAP_ARRAY2(char, len, F, CURRENT_PC);
62      _end_block = _first_free_entry + len;
63    }
64    entry = (BasicHashtableEntry<F>*)_first_free_entry;
65    _first_free_entry += _entry_size;
66  }
67
68  assert(_entry_size % HeapWordSize == 0, "");
69  entry->set_hash(hashValue);
70  return entry;
71}
72
73
74template <class T, MEMFLAGS F> HashtableEntry<T, F>* Hashtable<T, F>::new_entry(unsigned int hashValue, T obj) {
75  HashtableEntry<T, F>* entry;
76
77  entry = (HashtableEntry<T, F>*)BasicHashtable<F>::new_entry(hashValue);
78  entry->set_literal(obj);
79  return entry;
80}
81
82// Check to see if the hashtable is unbalanced.  The caller set a flag to
83// rehash at the next safepoint.  If this bucket is 60 times greater than the
84// expected average bucket length, it's an unbalanced hashtable.
85// This is somewhat an arbitrary heuristic but if one bucket gets to
86// rehash_count which is currently 100, there's probably something wrong.
87
88template <class T, MEMFLAGS F> bool RehashableHashtable<T, F>::check_rehash_table(int count) {
89  assert(this->table_size() != 0, "underflow");
90  if (count > (((double)this->number_of_entries()/(double)this->table_size())*rehash_multiple)) {
91    // Set a flag for the next safepoint, which should be at some guaranteed
92    // safepoint interval.
93    return true;
94  }
95  return false;
96}
97
98template <class T, MEMFLAGS F> juint RehashableHashtable<T, F>::_seed = 0;
99
100// Create a new table and using alternate hash code, populate the new table
101// with the existing elements.   This can be used to change the hash code
102// and could in the future change the size of the table.
103
104template <class T, MEMFLAGS F> void RehashableHashtable<T, F>::move_to(RehashableHashtable<T, F>* new_table) {
105
106  // Initialize the global seed for hashing.
107  _seed = AltHashing::compute_seed();
108  assert(seed() != 0, "shouldn't be zero");
109
110  int saved_entry_count = this->number_of_entries();
111
112  // Iterate through the table and create a new entry for the new table
113  for (int i = 0; i < new_table->table_size(); ++i) {
114    for (HashtableEntry<T, F>* p = this->bucket(i); p != NULL; ) {
115      HashtableEntry<T, F>* next = p->next();
116      T string = p->literal();
117      // Use alternate hashing algorithm on the symbol in the first table
118      unsigned int hashValue = string->new_hash(seed());
119      // Get a new index relative to the new table (can also change size)
120      int index = new_table->hash_to_index(hashValue);
121      p->set_hash(hashValue);
122      // Keep the shared bit in the Hashtable entry to indicate that this entry
123      // can't be deleted.   The shared bit is the LSB in the _next field so
124      // walking the hashtable past these entries requires
125      // BasicHashtableEntry::make_ptr() call.
126      bool keep_shared = p->is_shared();
127      this->unlink_entry(p);
128      new_table->add_entry(index, p);
129      if (keep_shared) {
130        p->set_shared();
131      }
132      p = next;
133    }
134  }
135  // give the new table the free list as well
136  new_table->copy_freelist(this);
137  assert(new_table->number_of_entries() == saved_entry_count, "lost entry on dictionary copy?");
138
139  // Destroy memory used by the buckets in the hashtable.  The memory
140  // for the elements has been used in a new table and is not
141  // destroyed.  The memory reuse will benefit resizing the SystemDictionary
142  // to avoid a memory allocation spike at safepoint.
143  BasicHashtable<F>::free_buckets();
144}
145
146template <MEMFLAGS F> void BasicHashtable<F>::free_buckets() {
147  if (NULL != _buckets) {
148    // Don't delete the buckets in the shared space.  They aren't
149    // allocated by os::malloc
150    if (!UseSharedSpaces ||
151        !FileMapInfo::current_info()->is_in_shared_space(_buckets)) {
152       FREE_C_HEAP_ARRAY(HashtableBucket, _buckets);
153    }
154    _buckets = NULL;
155  }
156}
157
158
159// Reverse the order of elements in the hash buckets.
160
161template <MEMFLAGS F> void BasicHashtable<F>::reverse() {
162
163  for (int i = 0; i < _table_size; ++i) {
164    BasicHashtableEntry<F>* new_list = NULL;
165    BasicHashtableEntry<F>* p = bucket(i);
166    while (p != NULL) {
167      BasicHashtableEntry<F>* next = p->next();
168      p->set_next(new_list);
169      new_list = p;
170      p = next;
171    }
172    *bucket_addr(i) = new_list;
173  }
174}
175
176template <MEMFLAGS F> void BasicHashtable<F>::BucketUnlinkContext::free_entry(BasicHashtableEntry<F>* entry) {
177  entry->set_next(_removed_head);
178  _removed_head = entry;
179  if (_removed_tail == NULL) {
180    _removed_tail = entry;
181  }
182  _num_removed++;
183}
184
185template <MEMFLAGS F> void BasicHashtable<F>::bulk_free_entries(BucketUnlinkContext* context) {
186  if (context->_num_removed == 0) {
187    assert(context->_removed_head == NULL && context->_removed_tail == NULL,
188           "Zero entries in the unlink context, but elements linked from " PTR_FORMAT " to " PTR_FORMAT,
189           p2i(context->_removed_head), p2i(context->_removed_tail));
190    return;
191  }
192
193  // MT-safe add of the list of BasicHashTableEntrys from the context to the free list.
194  BasicHashtableEntry<F>* current = _free_list;
195  while (true) {
196    context->_removed_tail->set_next(current);
197    BasicHashtableEntry<F>* old = (BasicHashtableEntry<F>*)Atomic::cmpxchg_ptr(context->_removed_head, &_free_list, current);
198    if (old == current) {
199      break;
200    }
201    current = old;
202  }
203  Atomic::add(-context->_num_removed, &_number_of_entries);
204}
205
206// Copy the table to the shared space.
207
208template <MEMFLAGS F> void BasicHashtable<F>::copy_table(char** top, char* end) {
209
210  // Dump the hash table entries.
211
212  intptr_t *plen = (intptr_t*)(*top);
213  *top += sizeof(*plen);
214
215  int i;
216  for (i = 0; i < _table_size; ++i) {
217    for (BasicHashtableEntry<F>** p = _buckets[i].entry_addr();
218                              *p != NULL;
219                               p = (*p)->next_addr()) {
220      if (*top + entry_size() > end) {
221        report_out_of_shared_space(SharedMiscData);
222      }
223      *p = (BasicHashtableEntry<F>*)memcpy(*top, *p, entry_size());
224      *top += entry_size();
225    }
226  }
227  *plen = (char*)(*top) - (char*)plen - sizeof(*plen);
228
229  // Set the shared bit.
230
231  for (i = 0; i < _table_size; ++i) {
232    for (BasicHashtableEntry<F>* p = bucket(i); p != NULL; p = p->next()) {
233      p->set_shared();
234    }
235  }
236}
237
238
239
240// Reverse the order of elements in the hash buckets.
241
242template <class T, MEMFLAGS F> void Hashtable<T, F>::reverse(void* boundary) {
243
244  for (int i = 0; i < this->table_size(); ++i) {
245    HashtableEntry<T, F>* high_list = NULL;
246    HashtableEntry<T, F>* low_list = NULL;
247    HashtableEntry<T, F>* last_low_entry = NULL;
248    HashtableEntry<T, F>* p = bucket(i);
249    while (p != NULL) {
250      HashtableEntry<T, F>* next = p->next();
251      if ((void*)p->literal() >= boundary) {
252        p->set_next(high_list);
253        high_list = p;
254      } else {
255        p->set_next(low_list);
256        low_list = p;
257        if (last_low_entry == NULL) {
258          last_low_entry = p;
259        }
260      }
261      p = next;
262    }
263    if (low_list != NULL) {
264      *bucket_addr(i) = low_list;
265      last_low_entry->set_next(high_list);
266    } else {
267      *bucket_addr(i) = high_list;
268    }
269  }
270}
271
272template <class T, MEMFLAGS F> int RehashableHashtable<T, F>::literal_size(Symbol *symbol) {
273  return symbol->size() * HeapWordSize;
274}
275
276template <class T, MEMFLAGS F> int RehashableHashtable<T, F>::literal_size(oop oop) {
277  // NOTE: this would over-count if (pre-JDK8) java_lang_Class::has_offset_field() is true,
278  // and the String.value array is shared by several Strings. However, starting from JDK8,
279  // the String.value array is not shared anymore.
280  assert(oop != NULL && oop->klass() == SystemDictionary::String_klass(), "only strings are supported");
281  return (oop->size() + java_lang_String::value(oop)->size()) * HeapWordSize;
282}
283
284// Dump footprint and bucket length statistics
285//
286// Note: if you create a new subclass of Hashtable<MyNewType, F>, you will need to
287// add a new function Hashtable<T, F>::literal_size(MyNewType lit)
288
289template <class T, MEMFLAGS F> void RehashableHashtable<T, F>::dump_table(outputStream* st, const char *table_name) {
290  NumberSeq summary;
291  int literal_bytes = 0;
292  for (int i = 0; i < this->table_size(); ++i) {
293    int count = 0;
294    for (HashtableEntry<T, F>* e = this->bucket(i);
295       e != NULL; e = e->next()) {
296      count++;
297      literal_bytes += literal_size(e->literal());
298    }
299    summary.add((double)count);
300  }
301  double num_buckets = summary.num();
302  double num_entries = summary.sum();
303
304  int bucket_bytes = (int)num_buckets * sizeof(HashtableBucket<F>);
305  int entry_bytes  = (int)num_entries * sizeof(HashtableEntry<T, F>);
306  int total_bytes = literal_bytes +  bucket_bytes + entry_bytes;
307
308  double bucket_avg  = (num_buckets <= 0) ? 0 : (bucket_bytes  / num_buckets);
309  double entry_avg   = (num_entries <= 0) ? 0 : (entry_bytes   / num_entries);
310  double literal_avg = (num_entries <= 0) ? 0 : (literal_bytes / num_entries);
311
312  st->print_cr("%s statistics:", table_name);
313  st->print_cr("Number of buckets       : %9d = %9d bytes, avg %7.3f", (int)num_buckets, bucket_bytes,  bucket_avg);
314  st->print_cr("Number of entries       : %9d = %9d bytes, avg %7.3f", (int)num_entries, entry_bytes,   entry_avg);
315  st->print_cr("Number of literals      : %9d = %9d bytes, avg %7.3f", (int)num_entries, literal_bytes, literal_avg);
316  st->print_cr("Total footprint         : %9s = %9d bytes", "", total_bytes);
317  st->print_cr("Average bucket size     : %9.3f", summary.avg());
318  st->print_cr("Variance of bucket size : %9.3f", summary.variance());
319  st->print_cr("Std. dev. of bucket size: %9.3f", summary.sd());
320  st->print_cr("Maximum bucket size     : %9d", (int)summary.maximum());
321}
322
323
324// Dump the hash table buckets.
325
326template <MEMFLAGS F> void BasicHashtable<F>::copy_buckets(char** top, char* end) {
327  intptr_t len = _table_size * sizeof(HashtableBucket<F>);
328  *(intptr_t*)(*top) = len;
329  *top += sizeof(intptr_t);
330
331  *(intptr_t*)(*top) = _number_of_entries;
332  *top += sizeof(intptr_t);
333
334  if (*top + len > end) {
335    report_out_of_shared_space(SharedMiscData);
336  }
337  _buckets = (HashtableBucket<F>*)memcpy(*top, _buckets, len);
338  *top += len;
339}
340
341
342#ifndef PRODUCT
343
344template <class T, MEMFLAGS F> void Hashtable<T, F>::print() {
345  ResourceMark rm;
346
347  for (int i = 0; i < BasicHashtable<F>::table_size(); i++) {
348    HashtableEntry<T, F>* entry = bucket(i);
349    while(entry != NULL) {
350      tty->print("%d : ", i);
351      entry->literal()->print();
352      tty->cr();
353      entry = entry->next();
354    }
355  }
356}
357
358
359template <MEMFLAGS F> void BasicHashtable<F>::verify() {
360  int count = 0;
361  for (int i = 0; i < table_size(); i++) {
362    for (BasicHashtableEntry<F>* p = bucket(i); p != NULL; p = p->next()) {
363      ++count;
364    }
365  }
366  assert(count == number_of_entries(), "number of hashtable entries incorrect");
367}
368
369
370#endif // PRODUCT
371
372#ifdef ASSERT
373
374template <MEMFLAGS F> bool BasicHashtable<F>::verify_lookup_length(double load, const char *table_name) {
375  if ((!_lookup_warning) && (_lookup_count != 0)
376      && ((double)_lookup_length / (double)_lookup_count > load * 2.0)) {
377    warning("Performance bug: %s lookup_count=%d "
378            "lookup_length=%d average=%lf load=%f",
379            table_name, _lookup_count, _lookup_length,
380            (double)_lookup_length / _lookup_count, load);
381    _lookup_warning = true;
382
383    return false;
384  }
385  return true;
386}
387
388#endif
389
390
391// Explicitly instantiate these types
392#if INCLUDE_ALL_GCS
393template class Hashtable<nmethod*, mtGC>;
394template class HashtableEntry<nmethod*, mtGC>;
395template class BasicHashtable<mtGC>;
396#endif
397template class Hashtable<ConstantPool*, mtClass>;
398template class RehashableHashtable<Symbol*, mtSymbol>;
399template class RehashableHashtable<oopDesc*, mtSymbol>;
400template class Hashtable<Symbol*, mtSymbol>;
401template class Hashtable<Klass*, mtClass>;
402template class Hashtable<InstanceKlass*, mtClass>;
403template class Hashtable<oop, mtClass>;
404#if defined(SOLARIS) || defined(CHECK_UNHANDLED_OOPS)
405template class Hashtable<oop, mtSymbol>;
406template class RehashableHashtable<oop, mtSymbol>;
407#endif // SOLARIS || CHECK_UNHANDLED_OOPS
408template class Hashtable<oopDesc*, mtSymbol>;
409template class Hashtable<Symbol*, mtClass>;
410template class HashtableEntry<Symbol*, mtSymbol>;
411template class HashtableEntry<Symbol*, mtClass>;
412template class HashtableEntry<oop, mtSymbol>;
413template class BasicHashtableEntry<mtSymbol>;
414template class BasicHashtableEntry<mtCode>;
415template class BasicHashtable<mtClass>;
416template class BasicHashtable<mtClassShared>;
417template class BasicHashtable<mtSymbol>;
418template class BasicHashtable<mtCode>;
419template class BasicHashtable<mtInternal>;
420template class BasicHashtable<mtModule>;
421#if INCLUDE_TRACE
422template class Hashtable<Symbol*, mtTracing>;
423template class HashtableEntry<Symbol*, mtTracing>;
424template class BasicHashtable<mtTracing>;
425#endif
426template class BasicHashtable<mtCompiler>;
427