virtualspace.cpp revision 12252:d9aa9adb7dd2
1/*
2 * Copyright (c) 1997, 2016, Oracle and/or its affiliates. All rights reserved.
3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
4 *
5 * This code is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 only, as
7 * published by the Free Software Foundation.
8 *
9 * This code is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
12 * version 2 for more details (a copy is included in the LICENSE file that
13 * accompanied this code).
14 *
15 * You should have received a copy of the GNU General Public License version
16 * 2 along with this work; if not, write to the Free Software Foundation,
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18 *
19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20 * or visit www.oracle.com if you need additional information or have any
21 * questions.
22 *
23 */
24
25#include "precompiled.hpp"
26#include "code/codeCacheExtensions.hpp"
27#include "logging/log.hpp"
28#include "memory/resourceArea.hpp"
29#include "memory/virtualspace.hpp"
30#include "oops/markOop.hpp"
31#include "oops/oop.inline.hpp"
32#include "services/memTracker.hpp"
33
34// ReservedSpace
35
36// Dummy constructor
37ReservedSpace::ReservedSpace() : _base(NULL), _size(0), _noaccess_prefix(0),
38    _alignment(0), _special(false), _executable(false) {
39}
40
41ReservedSpace::ReservedSpace(size_t size, size_t preferred_page_size) {
42  bool has_preferred_page_size = preferred_page_size != 0;
43  // Want to use large pages where possible and pad with small pages.
44  size_t page_size = has_preferred_page_size ? preferred_page_size : os::page_size_for_region_unaligned(size, 1);
45  bool large_pages = page_size != (size_t)os::vm_page_size();
46  size_t alignment;
47  if (large_pages && has_preferred_page_size) {
48    alignment = MAX2(page_size, (size_t)os::vm_allocation_granularity());
49    // ReservedSpace initialization requires size to be aligned to the given
50    // alignment. Align the size up.
51    size = align_size_up(size, alignment);
52  } else {
53    // Don't force the alignment to be large page aligned,
54    // since that will waste memory.
55    alignment = os::vm_allocation_granularity();
56  }
57  initialize(size, alignment, large_pages, NULL, false);
58}
59
60ReservedSpace::ReservedSpace(size_t size, size_t alignment,
61                             bool large,
62                             char* requested_address) {
63  initialize(size, alignment, large, requested_address, false);
64}
65
66ReservedSpace::ReservedSpace(size_t size, size_t alignment,
67                             bool large,
68                             bool executable) {
69  initialize(size, alignment, large, NULL, executable);
70}
71
72// Helper method.
73static bool failed_to_reserve_as_requested(char* base, char* requested_address,
74                                           const size_t size, bool special)
75{
76  if (base == requested_address || requested_address == NULL)
77    return false; // did not fail
78
79  if (base != NULL) {
80    // Different reserve address may be acceptable in other cases
81    // but for compressed oops heap should be at requested address.
82    assert(UseCompressedOops, "currently requested address used only for compressed oops");
83    log_debug(gc, heap, coops)("Reserved memory not at requested address: " PTR_FORMAT " vs " PTR_FORMAT, p2i(base), p2i(requested_address));
84    // OS ignored requested address. Try different address.
85    if (special) {
86      if (!os::release_memory_special(base, size)) {
87        fatal("os::release_memory_special failed");
88      }
89    } else {
90      if (!os::release_memory(base, size)) {
91        fatal("os::release_memory failed");
92      }
93    }
94  }
95  return true;
96}
97
98void ReservedSpace::initialize(size_t size, size_t alignment, bool large,
99                               char* requested_address,
100                               bool executable) {
101  const size_t granularity = os::vm_allocation_granularity();
102  assert((size & (granularity - 1)) == 0,
103         "size not aligned to os::vm_allocation_granularity()");
104  assert((alignment & (granularity - 1)) == 0,
105         "alignment not aligned to os::vm_allocation_granularity()");
106  assert(alignment == 0 || is_power_of_2((intptr_t)alignment),
107         "not a power of 2");
108
109  alignment = MAX2(alignment, (size_t)os::vm_page_size());
110
111  _base = NULL;
112  _size = 0;
113  _special = false;
114  _executable = executable;
115  _alignment = 0;
116  _noaccess_prefix = 0;
117  if (size == 0) {
118    return;
119  }
120
121  // If OS doesn't support demand paging for large page memory, we need
122  // to use reserve_memory_special() to reserve and pin the entire region.
123  bool special = large && !os::can_commit_large_page_memory();
124  char* base = NULL;
125
126  if (special) {
127
128    base = os::reserve_memory_special(size, alignment, requested_address, executable);
129
130    if (base != NULL) {
131      if (failed_to_reserve_as_requested(base, requested_address, size, true)) {
132        // OS ignored requested address. Try different address.
133        return;
134      }
135      // Check alignment constraints.
136      assert((uintptr_t) base % alignment == 0,
137             "Large pages returned a non-aligned address, base: "
138             PTR_FORMAT " alignment: " SIZE_FORMAT_HEX,
139             p2i(base), alignment);
140      _special = true;
141    } else {
142      // failed; try to reserve regular memory below
143      if (UseLargePages && (!FLAG_IS_DEFAULT(UseLargePages) ||
144                            !FLAG_IS_DEFAULT(LargePageSizeInBytes))) {
145        log_debug(gc, heap, coops)("Reserve regular memory without large pages");
146      }
147    }
148  }
149
150  if (base == NULL) {
151    // Optimistically assume that the OSes returns an aligned base pointer.
152    // When reserving a large address range, most OSes seem to align to at
153    // least 64K.
154
155    // If the memory was requested at a particular address, use
156    // os::attempt_reserve_memory_at() to avoid over mapping something
157    // important.  If available space is not detected, return NULL.
158
159    if (requested_address != 0) {
160      base = os::attempt_reserve_memory_at(size, requested_address);
161      if (failed_to_reserve_as_requested(base, requested_address, size, false)) {
162        // OS ignored requested address. Try different address.
163        base = NULL;
164      }
165    } else {
166      base = os::reserve_memory(size, NULL, alignment);
167    }
168
169    if (base == NULL) return;
170
171    // Check alignment constraints
172    if ((((size_t)base) & (alignment - 1)) != 0) {
173      // Base not aligned, retry
174      if (!os::release_memory(base, size)) fatal("os::release_memory failed");
175      // Make sure that size is aligned
176      size = align_size_up(size, alignment);
177      base = os::reserve_memory_aligned(size, alignment);
178
179      if (requested_address != 0 &&
180          failed_to_reserve_as_requested(base, requested_address, size, false)) {
181        // As a result of the alignment constraints, the allocated base differs
182        // from the requested address. Return back to the caller who can
183        // take remedial action (like try again without a requested address).
184        assert(_base == NULL, "should be");
185        return;
186      }
187    }
188  }
189  // Done
190  _base = base;
191  _size = size;
192  _alignment = alignment;
193}
194
195
196ReservedSpace::ReservedSpace(char* base, size_t size, size_t alignment,
197                             bool special, bool executable) {
198  assert((size % os::vm_allocation_granularity()) == 0,
199         "size not allocation aligned");
200  _base = base;
201  _size = size;
202  _alignment = alignment;
203  _noaccess_prefix = 0;
204  _special = special;
205  _executable = executable;
206}
207
208
209ReservedSpace ReservedSpace::first_part(size_t partition_size, size_t alignment,
210                                        bool split, bool realloc) {
211  assert(partition_size <= size(), "partition failed");
212  if (split) {
213    os::split_reserved_memory(base(), size(), partition_size, realloc);
214  }
215  ReservedSpace result(base(), partition_size, alignment, special(),
216                       executable());
217  return result;
218}
219
220
221ReservedSpace
222ReservedSpace::last_part(size_t partition_size, size_t alignment) {
223  assert(partition_size <= size(), "partition failed");
224  ReservedSpace result(base() + partition_size, size() - partition_size,
225                       alignment, special(), executable());
226  return result;
227}
228
229
230size_t ReservedSpace::page_align_size_up(size_t size) {
231  return align_size_up(size, os::vm_page_size());
232}
233
234
235size_t ReservedSpace::page_align_size_down(size_t size) {
236  return align_size_down(size, os::vm_page_size());
237}
238
239
240size_t ReservedSpace::allocation_align_size_up(size_t size) {
241  return align_size_up(size, os::vm_allocation_granularity());
242}
243
244
245size_t ReservedSpace::allocation_align_size_down(size_t size) {
246  return align_size_down(size, os::vm_allocation_granularity());
247}
248
249
250void ReservedSpace::release() {
251  if (is_reserved()) {
252    char *real_base = _base - _noaccess_prefix;
253    const size_t real_size = _size + _noaccess_prefix;
254    if (special()) {
255      os::release_memory_special(real_base, real_size);
256    } else{
257      os::release_memory(real_base, real_size);
258    }
259    _base = NULL;
260    _size = 0;
261    _noaccess_prefix = 0;
262    _alignment = 0;
263    _special = false;
264    _executable = false;
265  }
266}
267
268static size_t noaccess_prefix_size(size_t alignment) {
269  return lcm(os::vm_page_size(), alignment);
270}
271
272void ReservedHeapSpace::establish_noaccess_prefix() {
273  assert(_alignment >= (size_t)os::vm_page_size(), "must be at least page size big");
274  _noaccess_prefix = noaccess_prefix_size(_alignment);
275
276  if (base() && base() + _size > (char *)OopEncodingHeapMax) {
277    if (true
278        WIN64_ONLY(&& !UseLargePages)
279        AIX_ONLY(&& os::vm_page_size() != 64*K)) {
280      // Protect memory at the base of the allocated region.
281      // If special, the page was committed (only matters on windows)
282      if (!os::protect_memory(_base, _noaccess_prefix, os::MEM_PROT_NONE, _special)) {
283        fatal("cannot protect protection page");
284      }
285      log_debug(gc, heap, coops)("Protected page at the reserved heap base: "
286                                 PTR_FORMAT " / " INTX_FORMAT " bytes",
287                                 p2i(_base),
288                                 _noaccess_prefix);
289      assert(Universe::narrow_oop_use_implicit_null_checks() == true, "not initialized?");
290    } else {
291      Universe::set_narrow_oop_use_implicit_null_checks(false);
292    }
293  }
294
295  _base += _noaccess_prefix;
296  _size -= _noaccess_prefix;
297  assert(((uintptr_t)_base % _alignment == 0), "must be exactly of required alignment");
298}
299
300// Tries to allocate memory of size 'size' at address requested_address with alignment 'alignment'.
301// Does not check whether the reserved memory actually is at requested_address, as the memory returned
302// might still fulfill the wishes of the caller.
303// Assures the memory is aligned to 'alignment'.
304// NOTE: If ReservedHeapSpace already points to some reserved memory this is freed, first.
305void ReservedHeapSpace::try_reserve_heap(size_t size,
306                                         size_t alignment,
307                                         bool large,
308                                         char* requested_address) {
309  if (_base != NULL) {
310    // We tried before, but we didn't like the address delivered.
311    release();
312  }
313
314  // If OS doesn't support demand paging for large page memory, we need
315  // to use reserve_memory_special() to reserve and pin the entire region.
316  bool special = large && !os::can_commit_large_page_memory();
317  char* base = NULL;
318
319  log_trace(gc, heap, coops)("Trying to allocate at address " PTR_FORMAT
320                             " heap of size " SIZE_FORMAT_HEX,
321                             p2i(requested_address),
322                             size);
323
324  if (special) {
325    base = os::reserve_memory_special(size, alignment, requested_address, false);
326
327    if (base != NULL) {
328      // Check alignment constraints.
329      assert((uintptr_t) base % alignment == 0,
330             "Large pages returned a non-aligned address, base: "
331             PTR_FORMAT " alignment: " SIZE_FORMAT_HEX,
332             p2i(base), alignment);
333      _special = true;
334    }
335  }
336
337  if (base == NULL) {
338    // Failed; try to reserve regular memory below
339    if (UseLargePages && (!FLAG_IS_DEFAULT(UseLargePages) ||
340                          !FLAG_IS_DEFAULT(LargePageSizeInBytes))) {
341      log_debug(gc, heap, coops)("Reserve regular memory without large pages");
342    }
343
344    // Optimistically assume that the OSes returns an aligned base pointer.
345    // When reserving a large address range, most OSes seem to align to at
346    // least 64K.
347
348    // If the memory was requested at a particular address, use
349    // os::attempt_reserve_memory_at() to avoid over mapping something
350    // important.  If available space is not detected, return NULL.
351
352    if (requested_address != 0) {
353      base = os::attempt_reserve_memory_at(size, requested_address);
354    } else {
355      base = os::reserve_memory(size, NULL, alignment);
356    }
357  }
358  if (base == NULL) { return; }
359
360  // Done
361  _base = base;
362  _size = size;
363  _alignment = alignment;
364
365  // Check alignment constraints
366  if ((((size_t)base) & (alignment - 1)) != 0) {
367    // Base not aligned, retry.
368    release();
369  }
370}
371
372void ReservedHeapSpace::try_reserve_range(char *highest_start,
373                                          char *lowest_start,
374                                          size_t attach_point_alignment,
375                                          char *aligned_heap_base_min_address,
376                                          char *upper_bound,
377                                          size_t size,
378                                          size_t alignment,
379                                          bool large) {
380  const size_t attach_range = highest_start - lowest_start;
381  // Cap num_attempts at possible number.
382  // At least one is possible even for 0 sized attach range.
383  const uint64_t num_attempts_possible = (attach_range / attach_point_alignment) + 1;
384  const uint64_t num_attempts_to_try   = MIN2((uint64_t)HeapSearchSteps, num_attempts_possible);
385
386  const size_t stepsize = (attach_range == 0) ? // Only one try.
387    (size_t) highest_start : align_size_up(attach_range / num_attempts_to_try, attach_point_alignment);
388
389  // Try attach points from top to bottom.
390  char* attach_point = highest_start;
391  while (attach_point >= lowest_start  &&
392         attach_point <= highest_start &&  // Avoid wrap around.
393         ((_base == NULL) ||
394          (_base < aligned_heap_base_min_address || _base + size > upper_bound))) {
395    try_reserve_heap(size, alignment, large, attach_point);
396    attach_point -= stepsize;
397  }
398}
399
400#define SIZE_64K  ((uint64_t) UCONST64(      0x10000))
401#define SIZE_256M ((uint64_t) UCONST64(   0x10000000))
402#define SIZE_32G  ((uint64_t) UCONST64(  0x800000000))
403
404// Helper for heap allocation. Returns an array with addresses
405// (OS-specific) which are suited for disjoint base mode. Array is
406// NULL terminated.
407static char** get_attach_addresses_for_disjoint_mode() {
408  static uint64_t addresses[] = {
409     2 * SIZE_32G,
410     3 * SIZE_32G,
411     4 * SIZE_32G,
412     8 * SIZE_32G,
413    10 * SIZE_32G,
414     1 * SIZE_64K * SIZE_32G,
415     2 * SIZE_64K * SIZE_32G,
416     3 * SIZE_64K * SIZE_32G,
417     4 * SIZE_64K * SIZE_32G,
418    16 * SIZE_64K * SIZE_32G,
419    32 * SIZE_64K * SIZE_32G,
420    34 * SIZE_64K * SIZE_32G,
421    0
422  };
423
424  // Sort out addresses smaller than HeapBaseMinAddress. This assumes
425  // the array is sorted.
426  uint i = 0;
427  while (addresses[i] != 0 &&
428         (addresses[i] < OopEncodingHeapMax || addresses[i] < HeapBaseMinAddress)) {
429    i++;
430  }
431  uint start = i;
432
433  // Avoid more steps than requested.
434  i = 0;
435  while (addresses[start+i] != 0) {
436    if (i == HeapSearchSteps) {
437      addresses[start+i] = 0;
438      break;
439    }
440    i++;
441  }
442
443  return (char**) &addresses[start];
444}
445
446void ReservedHeapSpace::initialize_compressed_heap(const size_t size, size_t alignment, bool large) {
447  guarantee(size + noaccess_prefix_size(alignment) <= OopEncodingHeapMax,
448            "can not allocate compressed oop heap for this size");
449  guarantee(alignment == MAX2(alignment, (size_t)os::vm_page_size()), "alignment too small");
450  assert(HeapBaseMinAddress > 0, "sanity");
451
452  const size_t granularity = os::vm_allocation_granularity();
453  assert((size & (granularity - 1)) == 0,
454         "size not aligned to os::vm_allocation_granularity()");
455  assert((alignment & (granularity - 1)) == 0,
456         "alignment not aligned to os::vm_allocation_granularity()");
457  assert(alignment == 0 || is_power_of_2((intptr_t)alignment),
458         "not a power of 2");
459
460  // The necessary attach point alignment for generated wish addresses.
461  // This is needed to increase the chance of attaching for mmap and shmat.
462  const size_t os_attach_point_alignment =
463    AIX_ONLY(SIZE_256M)  // Known shm boundary alignment.
464    NOT_AIX(os::vm_allocation_granularity());
465  const size_t attach_point_alignment = lcm(alignment, os_attach_point_alignment);
466
467  char *aligned_heap_base_min_address = (char *)align_ptr_up((void *)HeapBaseMinAddress, alignment);
468  size_t noaccess_prefix = ((aligned_heap_base_min_address + size) > (char*)OopEncodingHeapMax) ?
469    noaccess_prefix_size(alignment) : 0;
470
471  // Attempt to alloc at user-given address.
472  if (!FLAG_IS_DEFAULT(HeapBaseMinAddress)) {
473    try_reserve_heap(size + noaccess_prefix, alignment, large, aligned_heap_base_min_address);
474    if (_base != aligned_heap_base_min_address) { // Enforce this exact address.
475      release();
476    }
477  }
478
479  // Keep heap at HeapBaseMinAddress.
480  if (_base == NULL) {
481
482    // Try to allocate the heap at addresses that allow efficient oop compression.
483    // Different schemes are tried, in order of decreasing optimization potential.
484    //
485    // For this, try_reserve_heap() is called with the desired heap base addresses.
486    // A call into the os layer to allocate at a given address can return memory
487    // at a different address than requested.  Still, this might be memory at a useful
488    // address. try_reserve_heap() always returns this allocated memory, as only here
489    // the criteria for a good heap are checked.
490
491    // Attempt to allocate so that we can run without base and scale (32-Bit unscaled compressed oops).
492    // Give it several tries from top of range to bottom.
493    if (aligned_heap_base_min_address + size <= (char *)UnscaledOopHeapMax) {
494
495      // Calc address range within we try to attach (range of possible start addresses).
496      char* const highest_start = (char *)align_ptr_down((char *)UnscaledOopHeapMax - size, attach_point_alignment);
497      char* const lowest_start  = (char *)align_ptr_up(aligned_heap_base_min_address, attach_point_alignment);
498      try_reserve_range(highest_start, lowest_start, attach_point_alignment,
499                        aligned_heap_base_min_address, (char *)UnscaledOopHeapMax, size, alignment, large);
500    }
501
502    // zerobased: Attempt to allocate in the lower 32G.
503    // But leave room for the compressed class pointers, which is allocated above
504    // the heap.
505    char *zerobased_max = (char *)OopEncodingHeapMax;
506    const size_t class_space = align_size_up(CompressedClassSpaceSize, alignment);
507    // For small heaps, save some space for compressed class pointer
508    // space so it can be decoded with no base.
509    if (UseCompressedClassPointers && !UseSharedSpaces &&
510        OopEncodingHeapMax <= KlassEncodingMetaspaceMax &&
511        (uint64_t)(aligned_heap_base_min_address + size + class_space) <= KlassEncodingMetaspaceMax) {
512      zerobased_max = (char *)OopEncodingHeapMax - class_space;
513    }
514
515    // Give it several tries from top of range to bottom.
516    if (aligned_heap_base_min_address + size <= zerobased_max &&    // Zerobased theoretical possible.
517        ((_base == NULL) ||                        // No previous try succeeded.
518         (_base + size > zerobased_max))) {        // Unscaled delivered an arbitrary address.
519
520      // Calc address range within we try to attach (range of possible start addresses).
521      char *const highest_start = (char *)align_ptr_down(zerobased_max - size, attach_point_alignment);
522      // Need to be careful about size being guaranteed to be less
523      // than UnscaledOopHeapMax due to type constraints.
524      char *lowest_start = aligned_heap_base_min_address;
525      uint64_t unscaled_end = UnscaledOopHeapMax - size;
526      if (unscaled_end < UnscaledOopHeapMax) { // unscaled_end wrapped if size is large
527        lowest_start = MAX2(lowest_start, (char*)unscaled_end);
528      }
529      lowest_start  = (char *)align_ptr_up(lowest_start, attach_point_alignment);
530      try_reserve_range(highest_start, lowest_start, attach_point_alignment,
531                        aligned_heap_base_min_address, zerobased_max, size, alignment, large);
532    }
533
534    // Now we go for heaps with base != 0.  We need a noaccess prefix to efficiently
535    // implement null checks.
536    noaccess_prefix = noaccess_prefix_size(alignment);
537
538    // Try to attach at addresses that are aligned to OopEncodingHeapMax. Disjointbase mode.
539    char** addresses = get_attach_addresses_for_disjoint_mode();
540    int i = 0;
541    while (addresses[i] &&                                 // End of array not yet reached.
542           ((_base == NULL) ||                             // No previous try succeeded.
543            (_base + size >  (char *)OopEncodingHeapMax && // Not zerobased or unscaled address.
544             !Universe::is_disjoint_heap_base_address((address)_base)))) {  // Not disjoint address.
545      char* const attach_point = addresses[i];
546      assert(attach_point >= aligned_heap_base_min_address, "Flag support broken");
547      try_reserve_heap(size + noaccess_prefix, alignment, large, attach_point);
548      i++;
549    }
550
551    // Last, desperate try without any placement.
552    if (_base == NULL) {
553      log_trace(gc, heap, coops)("Trying to allocate at address NULL heap of size " SIZE_FORMAT_HEX, size + noaccess_prefix);
554      initialize(size + noaccess_prefix, alignment, large, NULL, false);
555    }
556  }
557}
558
559ReservedHeapSpace::ReservedHeapSpace(size_t size, size_t alignment, bool large) : ReservedSpace() {
560
561  if (size == 0) {
562    return;
563  }
564
565  // Heap size should be aligned to alignment, too.
566  guarantee(is_size_aligned(size, alignment), "set by caller");
567
568  if (UseCompressedOops) {
569    initialize_compressed_heap(size, alignment, large);
570    if (_size > size) {
571      // We allocated heap with noaccess prefix.
572      // It can happen we get a zerobased/unscaled heap with noaccess prefix,
573      // if we had to try at arbitrary address.
574      establish_noaccess_prefix();
575    }
576  } else {
577    initialize(size, alignment, large, NULL, false);
578  }
579
580  assert(markOopDesc::encode_pointer_as_mark(_base)->decode_pointer() == _base,
581         "area must be distinguishable from marks for mark-sweep");
582  assert(markOopDesc::encode_pointer_as_mark(&_base[size])->decode_pointer() == &_base[size],
583         "area must be distinguishable from marks for mark-sweep");
584
585  if (base() > 0) {
586    MemTracker::record_virtual_memory_type((address)base(), mtJavaHeap);
587  }
588}
589
590// Reserve space for code segment.  Same as Java heap only we mark this as
591// executable.
592ReservedCodeSpace::ReservedCodeSpace(size_t r_size,
593                                     size_t rs_align,
594                                     bool large) :
595  ReservedSpace(r_size, rs_align, large, /*executable*/ CodeCacheExtensions::support_dynamic_code()) {
596  MemTracker::record_virtual_memory_type((address)base(), mtCode);
597}
598
599// VirtualSpace
600
601VirtualSpace::VirtualSpace() {
602  _low_boundary           = NULL;
603  _high_boundary          = NULL;
604  _low                    = NULL;
605  _high                   = NULL;
606  _lower_high             = NULL;
607  _middle_high            = NULL;
608  _upper_high             = NULL;
609  _lower_high_boundary    = NULL;
610  _middle_high_boundary   = NULL;
611  _upper_high_boundary    = NULL;
612  _lower_alignment        = 0;
613  _middle_alignment       = 0;
614  _upper_alignment        = 0;
615  _special                = false;
616  _executable             = false;
617}
618
619
620bool VirtualSpace::initialize(ReservedSpace rs, size_t committed_size) {
621  const size_t max_commit_granularity = os::page_size_for_region_unaligned(rs.size(), 1);
622  return initialize_with_granularity(rs, committed_size, max_commit_granularity);
623}
624
625bool VirtualSpace::initialize_with_granularity(ReservedSpace rs, size_t committed_size, size_t max_commit_granularity) {
626  if(!rs.is_reserved()) return false;  // allocation failed.
627  assert(_low_boundary == NULL, "VirtualSpace already initialized");
628  assert(max_commit_granularity > 0, "Granularity must be non-zero.");
629
630  _low_boundary  = rs.base();
631  _high_boundary = low_boundary() + rs.size();
632
633  _low = low_boundary();
634  _high = low();
635
636  _special = rs.special();
637  _executable = rs.executable();
638
639  // When a VirtualSpace begins life at a large size, make all future expansion
640  // and shrinking occur aligned to a granularity of large pages.  This avoids
641  // fragmentation of physical addresses that inhibits the use of large pages
642  // by the OS virtual memory system.  Empirically,  we see that with a 4MB
643  // page size, the only spaces that get handled this way are codecache and
644  // the heap itself, both of which provide a substantial performance
645  // boost in many benchmarks when covered by large pages.
646  //
647  // No attempt is made to force large page alignment at the very top and
648  // bottom of the space if they are not aligned so already.
649  _lower_alignment  = os::vm_page_size();
650  _middle_alignment = max_commit_granularity;
651  _upper_alignment  = os::vm_page_size();
652
653  // End of each region
654  _lower_high_boundary = (char*) round_to((intptr_t) low_boundary(), middle_alignment());
655  _middle_high_boundary = (char*) round_down((intptr_t) high_boundary(), middle_alignment());
656  _upper_high_boundary = high_boundary();
657
658  // High address of each region
659  _lower_high = low_boundary();
660  _middle_high = lower_high_boundary();
661  _upper_high = middle_high_boundary();
662
663  // commit to initial size
664  if (committed_size > 0) {
665    if (!expand_by(committed_size)) {
666      return false;
667    }
668  }
669  return true;
670}
671
672
673VirtualSpace::~VirtualSpace() {
674  release();
675}
676
677
678void VirtualSpace::release() {
679  // This does not release memory it reserved.
680  // Caller must release via rs.release();
681  _low_boundary           = NULL;
682  _high_boundary          = NULL;
683  _low                    = NULL;
684  _high                   = NULL;
685  _lower_high             = NULL;
686  _middle_high            = NULL;
687  _upper_high             = NULL;
688  _lower_high_boundary    = NULL;
689  _middle_high_boundary   = NULL;
690  _upper_high_boundary    = NULL;
691  _lower_alignment        = 0;
692  _middle_alignment       = 0;
693  _upper_alignment        = 0;
694  _special                = false;
695  _executable             = false;
696}
697
698
699size_t VirtualSpace::committed_size() const {
700  return pointer_delta(high(), low(), sizeof(char));
701}
702
703
704size_t VirtualSpace::reserved_size() const {
705  return pointer_delta(high_boundary(), low_boundary(), sizeof(char));
706}
707
708
709size_t VirtualSpace::uncommitted_size()  const {
710  return reserved_size() - committed_size();
711}
712
713size_t VirtualSpace::actual_committed_size() const {
714  // Special VirtualSpaces commit all reserved space up front.
715  if (special()) {
716    return reserved_size();
717  }
718
719  size_t committed_low    = pointer_delta(_lower_high,  _low_boundary,         sizeof(char));
720  size_t committed_middle = pointer_delta(_middle_high, _lower_high_boundary,  sizeof(char));
721  size_t committed_high   = pointer_delta(_upper_high,  _middle_high_boundary, sizeof(char));
722
723#ifdef ASSERT
724  size_t lower  = pointer_delta(_lower_high_boundary,  _low_boundary,         sizeof(char));
725  size_t middle = pointer_delta(_middle_high_boundary, _lower_high_boundary,  sizeof(char));
726  size_t upper  = pointer_delta(_upper_high_boundary,  _middle_high_boundary, sizeof(char));
727
728  if (committed_high > 0) {
729    assert(committed_low == lower, "Must be");
730    assert(committed_middle == middle, "Must be");
731  }
732
733  if (committed_middle > 0) {
734    assert(committed_low == lower, "Must be");
735  }
736  if (committed_middle < middle) {
737    assert(committed_high == 0, "Must be");
738  }
739
740  if (committed_low < lower) {
741    assert(committed_high == 0, "Must be");
742    assert(committed_middle == 0, "Must be");
743  }
744#endif
745
746  return committed_low + committed_middle + committed_high;
747}
748
749
750bool VirtualSpace::contains(const void* p) const {
751  return low() <= (const char*) p && (const char*) p < high();
752}
753
754static void pretouch_expanded_memory(void* start, void* end) {
755  assert(is_ptr_aligned(start, os::vm_page_size()), "Unexpected alignment");
756  assert(is_ptr_aligned(end,   os::vm_page_size()), "Unexpected alignment");
757
758  os::pretouch_memory(start, end);
759}
760
761static bool commit_expanded(char* start, size_t size, size_t alignment, bool pre_touch, bool executable) {
762  if (os::commit_memory(start, size, alignment, executable)) {
763    if (pre_touch || AlwaysPreTouch) {
764      pretouch_expanded_memory(start, start + size);
765    }
766    return true;
767  }
768
769  debug_only(warning(
770      "INFO: os::commit_memory(" PTR_FORMAT ", " PTR_FORMAT
771      " size=" SIZE_FORMAT ", executable=%d) failed",
772      p2i(start), p2i(start + size), size, executable);)
773
774  return false;
775}
776
777/*
778   First we need to determine if a particular virtual space is using large
779   pages.  This is done at the initialize function and only virtual spaces
780   that are larger than LargePageSizeInBytes use large pages.  Once we
781   have determined this, all expand_by and shrink_by calls must grow and
782   shrink by large page size chunks.  If a particular request
783   is within the current large page, the call to commit and uncommit memory
784   can be ignored.  In the case that the low and high boundaries of this
785   space is not large page aligned, the pages leading to the first large
786   page address and the pages after the last large page address must be
787   allocated with default pages.
788*/
789bool VirtualSpace::expand_by(size_t bytes, bool pre_touch) {
790  if (uncommitted_size() < bytes) {
791    return false;
792  }
793
794  if (special()) {
795    // don't commit memory if the entire space is pinned in memory
796    _high += bytes;
797    return true;
798  }
799
800  char* previous_high = high();
801  char* unaligned_new_high = high() + bytes;
802  assert(unaligned_new_high <= high_boundary(), "cannot expand by more than upper boundary");
803
804  // Calculate where the new high for each of the regions should be.  If
805  // the low_boundary() and high_boundary() are LargePageSizeInBytes aligned
806  // then the unaligned lower and upper new highs would be the
807  // lower_high() and upper_high() respectively.
808  char* unaligned_lower_new_high =  MIN2(unaligned_new_high, lower_high_boundary());
809  char* unaligned_middle_new_high = MIN2(unaligned_new_high, middle_high_boundary());
810  char* unaligned_upper_new_high =  MIN2(unaligned_new_high, upper_high_boundary());
811
812  // Align the new highs based on the regions alignment.  lower and upper
813  // alignment will always be default page size.  middle alignment will be
814  // LargePageSizeInBytes if the actual size of the virtual space is in
815  // fact larger than LargePageSizeInBytes.
816  char* aligned_lower_new_high =  (char*) round_to((intptr_t) unaligned_lower_new_high, lower_alignment());
817  char* aligned_middle_new_high = (char*) round_to((intptr_t) unaligned_middle_new_high, middle_alignment());
818  char* aligned_upper_new_high =  (char*) round_to((intptr_t) unaligned_upper_new_high, upper_alignment());
819
820  // Determine which regions need to grow in this expand_by call.
821  // If you are growing in the lower region, high() must be in that
822  // region so calculate the size based on high().  For the middle and
823  // upper regions, determine the starting point of growth based on the
824  // location of high().  By getting the MAX of the region's low address
825  // (or the previous region's high address) and high(), we can tell if it
826  // is an intra or inter region growth.
827  size_t lower_needs = 0;
828  if (aligned_lower_new_high > lower_high()) {
829    lower_needs = pointer_delta(aligned_lower_new_high, lower_high(), sizeof(char));
830  }
831  size_t middle_needs = 0;
832  if (aligned_middle_new_high > middle_high()) {
833    middle_needs = pointer_delta(aligned_middle_new_high, middle_high(), sizeof(char));
834  }
835  size_t upper_needs = 0;
836  if (aligned_upper_new_high > upper_high()) {
837    upper_needs = pointer_delta(aligned_upper_new_high, upper_high(), sizeof(char));
838  }
839
840  // Check contiguity.
841  assert(low_boundary() <= lower_high() && lower_high() <= lower_high_boundary(),
842         "high address must be contained within the region");
843  assert(lower_high_boundary() <= middle_high() && middle_high() <= middle_high_boundary(),
844         "high address must be contained within the region");
845  assert(middle_high_boundary() <= upper_high() && upper_high() <= upper_high_boundary(),
846         "high address must be contained within the region");
847
848  // Commit regions
849  if (lower_needs > 0) {
850    assert(lower_high() + lower_needs <= lower_high_boundary(), "must not expand beyond region");
851    if (!commit_expanded(lower_high(), lower_needs, _lower_alignment, pre_touch, _executable)) {
852      return false;
853    }
854    _lower_high += lower_needs;
855  }
856
857  if (middle_needs > 0) {
858    assert(middle_high() + middle_needs <= middle_high_boundary(), "must not expand beyond region");
859    if (!commit_expanded(middle_high(), middle_needs, _middle_alignment, pre_touch, _executable)) {
860      return false;
861    }
862    _middle_high += middle_needs;
863  }
864
865  if (upper_needs > 0) {
866    assert(upper_high() + upper_needs <= upper_high_boundary(), "must not expand beyond region");
867    if (!commit_expanded(upper_high(), upper_needs, _upper_alignment, pre_touch, _executable)) {
868      return false;
869    }
870    _upper_high += upper_needs;
871  }
872
873  _high += bytes;
874  return true;
875}
876
877// A page is uncommitted if the contents of the entire page is deemed unusable.
878// Continue to decrement the high() pointer until it reaches a page boundary
879// in which case that particular page can now be uncommitted.
880void VirtualSpace::shrink_by(size_t size) {
881  if (committed_size() < size)
882    fatal("Cannot shrink virtual space to negative size");
883
884  if (special()) {
885    // don't uncommit if the entire space is pinned in memory
886    _high -= size;
887    return;
888  }
889
890  char* unaligned_new_high = high() - size;
891  assert(unaligned_new_high >= low_boundary(), "cannot shrink past lower boundary");
892
893  // Calculate new unaligned address
894  char* unaligned_upper_new_high =
895    MAX2(unaligned_new_high, middle_high_boundary());
896  char* unaligned_middle_new_high =
897    MAX2(unaligned_new_high, lower_high_boundary());
898  char* unaligned_lower_new_high =
899    MAX2(unaligned_new_high, low_boundary());
900
901  // Align address to region's alignment
902  char* aligned_upper_new_high =
903    (char*) round_to((intptr_t) unaligned_upper_new_high, upper_alignment());
904  char* aligned_middle_new_high =
905    (char*) round_to((intptr_t) unaligned_middle_new_high, middle_alignment());
906  char* aligned_lower_new_high =
907    (char*) round_to((intptr_t) unaligned_lower_new_high, lower_alignment());
908
909  // Determine which regions need to shrink
910  size_t upper_needs = 0;
911  if (aligned_upper_new_high < upper_high()) {
912    upper_needs =
913      pointer_delta(upper_high(), aligned_upper_new_high, sizeof(char));
914  }
915  size_t middle_needs = 0;
916  if (aligned_middle_new_high < middle_high()) {
917    middle_needs =
918      pointer_delta(middle_high(), aligned_middle_new_high, sizeof(char));
919  }
920  size_t lower_needs = 0;
921  if (aligned_lower_new_high < lower_high()) {
922    lower_needs =
923      pointer_delta(lower_high(), aligned_lower_new_high, sizeof(char));
924  }
925
926  // Check contiguity.
927  assert(middle_high_boundary() <= upper_high() &&
928         upper_high() <= upper_high_boundary(),
929         "high address must be contained within the region");
930  assert(lower_high_boundary() <= middle_high() &&
931         middle_high() <= middle_high_boundary(),
932         "high address must be contained within the region");
933  assert(low_boundary() <= lower_high() &&
934         lower_high() <= lower_high_boundary(),
935         "high address must be contained within the region");
936
937  // Uncommit
938  if (upper_needs > 0) {
939    assert(middle_high_boundary() <= aligned_upper_new_high &&
940           aligned_upper_new_high + upper_needs <= upper_high_boundary(),
941           "must not shrink beyond region");
942    if (!os::uncommit_memory(aligned_upper_new_high, upper_needs)) {
943      debug_only(warning("os::uncommit_memory failed"));
944      return;
945    } else {
946      _upper_high -= upper_needs;
947    }
948  }
949  if (middle_needs > 0) {
950    assert(lower_high_boundary() <= aligned_middle_new_high &&
951           aligned_middle_new_high + middle_needs <= middle_high_boundary(),
952           "must not shrink beyond region");
953    if (!os::uncommit_memory(aligned_middle_new_high, middle_needs)) {
954      debug_only(warning("os::uncommit_memory failed"));
955      return;
956    } else {
957      _middle_high -= middle_needs;
958    }
959  }
960  if (lower_needs > 0) {
961    assert(low_boundary() <= aligned_lower_new_high &&
962           aligned_lower_new_high + lower_needs <= lower_high_boundary(),
963           "must not shrink beyond region");
964    if (!os::uncommit_memory(aligned_lower_new_high, lower_needs)) {
965      debug_only(warning("os::uncommit_memory failed"));
966      return;
967    } else {
968      _lower_high -= lower_needs;
969    }
970  }
971
972  _high -= size;
973}
974
975#ifndef PRODUCT
976void VirtualSpace::check_for_contiguity() {
977  // Check contiguity.
978  assert(low_boundary() <= lower_high() &&
979         lower_high() <= lower_high_boundary(),
980         "high address must be contained within the region");
981  assert(lower_high_boundary() <= middle_high() &&
982         middle_high() <= middle_high_boundary(),
983         "high address must be contained within the region");
984  assert(middle_high_boundary() <= upper_high() &&
985         upper_high() <= upper_high_boundary(),
986         "high address must be contained within the region");
987  assert(low() >= low_boundary(), "low");
988  assert(low_boundary() <= lower_high_boundary(), "lower high boundary");
989  assert(upper_high_boundary() <= high_boundary(), "upper high boundary");
990  assert(high() <= upper_high(), "upper high");
991}
992
993void VirtualSpace::print_on(outputStream* out) {
994  out->print   ("Virtual space:");
995  if (special()) out->print(" (pinned in memory)");
996  out->cr();
997  out->print_cr(" - committed: " SIZE_FORMAT, committed_size());
998  out->print_cr(" - reserved:  " SIZE_FORMAT, reserved_size());
999  out->print_cr(" - [low, high]:     [" INTPTR_FORMAT ", " INTPTR_FORMAT "]",  p2i(low()), p2i(high()));
1000  out->print_cr(" - [low_b, high_b]: [" INTPTR_FORMAT ", " INTPTR_FORMAT "]",  p2i(low_boundary()), p2i(high_boundary()));
1001}
1002
1003void VirtualSpace::print() {
1004  print_on(tty);
1005}
1006
1007/////////////// Unit tests ///////////////
1008
1009#ifndef PRODUCT
1010
1011#define test_log(...) \
1012  do {\
1013    if (VerboseInternalVMTests) { \
1014      tty->print_cr(__VA_ARGS__); \
1015      tty->flush(); \
1016    }\
1017  } while (false)
1018
1019class TestReservedSpace : AllStatic {
1020 public:
1021  static void small_page_write(void* addr, size_t size) {
1022    size_t page_size = os::vm_page_size();
1023
1024    char* end = (char*)addr + size;
1025    for (char* p = (char*)addr; p < end; p += page_size) {
1026      *p = 1;
1027    }
1028  }
1029
1030  static void release_memory_for_test(ReservedSpace rs) {
1031    if (rs.special()) {
1032      guarantee(os::release_memory_special(rs.base(), rs.size()), "Shouldn't fail");
1033    } else {
1034      guarantee(os::release_memory(rs.base(), rs.size()), "Shouldn't fail");
1035    }
1036  }
1037
1038  static void test_reserved_space1(size_t size, size_t alignment) {
1039    test_log("test_reserved_space1(%p)", (void*) (uintptr_t) size);
1040
1041    assert(is_size_aligned(size, alignment), "Incorrect input parameters");
1042
1043    ReservedSpace rs(size,          // size
1044                     alignment,     // alignment
1045                     UseLargePages, // large
1046                     (char *)NULL); // requested_address
1047
1048    test_log(" rs.special() == %d", rs.special());
1049
1050    assert(rs.base() != NULL, "Must be");
1051    assert(rs.size() == size, "Must be");
1052
1053    assert(is_ptr_aligned(rs.base(), alignment), "aligned sizes should always give aligned addresses");
1054    assert(is_size_aligned(rs.size(), alignment), "aligned sizes should always give aligned addresses");
1055
1056    if (rs.special()) {
1057      small_page_write(rs.base(), size);
1058    }
1059
1060    release_memory_for_test(rs);
1061  }
1062
1063  static void test_reserved_space2(size_t size) {
1064    test_log("test_reserved_space2(%p)", (void*)(uintptr_t)size);
1065
1066    assert(is_size_aligned(size, os::vm_allocation_granularity()), "Must be at least AG aligned");
1067
1068    ReservedSpace rs(size);
1069
1070    test_log(" rs.special() == %d", rs.special());
1071
1072    assert(rs.base() != NULL, "Must be");
1073    assert(rs.size() == size, "Must be");
1074
1075    if (rs.special()) {
1076      small_page_write(rs.base(), size);
1077    }
1078
1079    release_memory_for_test(rs);
1080  }
1081
1082  static void test_reserved_space3(size_t size, size_t alignment, bool maybe_large) {
1083    test_log("test_reserved_space3(%p, %p, %d)",
1084        (void*)(uintptr_t)size, (void*)(uintptr_t)alignment, maybe_large);
1085
1086    if (size < alignment) {
1087      // Tests might set -XX:LargePageSizeInBytes=<small pages> and cause unexpected input arguments for this test.
1088      assert((size_t)os::vm_page_size() == os::large_page_size(), "Test needs further refinement");
1089      return;
1090    }
1091
1092    assert(is_size_aligned(size, os::vm_allocation_granularity()), "Must be at least AG aligned");
1093    assert(is_size_aligned(size, alignment), "Must be at least aligned against alignment");
1094
1095    bool large = maybe_large && UseLargePages && size >= os::large_page_size();
1096
1097    ReservedSpace rs(size, alignment, large, false);
1098
1099    test_log(" rs.special() == %d", rs.special());
1100
1101    assert(rs.base() != NULL, "Must be");
1102    assert(rs.size() == size, "Must be");
1103
1104    if (rs.special()) {
1105      small_page_write(rs.base(), size);
1106    }
1107
1108    release_memory_for_test(rs);
1109  }
1110
1111
1112  static void test_reserved_space1() {
1113    size_t size = 2 * 1024 * 1024;
1114    size_t ag   = os::vm_allocation_granularity();
1115
1116    test_reserved_space1(size,      ag);
1117    test_reserved_space1(size * 2,  ag);
1118    test_reserved_space1(size * 10, ag);
1119  }
1120
1121  static void test_reserved_space2() {
1122    size_t size = 2 * 1024 * 1024;
1123    size_t ag = os::vm_allocation_granularity();
1124
1125    test_reserved_space2(size * 1);
1126    test_reserved_space2(size * 2);
1127    test_reserved_space2(size * 10);
1128    test_reserved_space2(ag);
1129    test_reserved_space2(size - ag);
1130    test_reserved_space2(size);
1131    test_reserved_space2(size + ag);
1132    test_reserved_space2(size * 2);
1133    test_reserved_space2(size * 2 - ag);
1134    test_reserved_space2(size * 2 + ag);
1135    test_reserved_space2(size * 3);
1136    test_reserved_space2(size * 3 - ag);
1137    test_reserved_space2(size * 3 + ag);
1138    test_reserved_space2(size * 10);
1139    test_reserved_space2(size * 10 + size / 2);
1140  }
1141
1142  static void test_reserved_space3() {
1143    size_t ag = os::vm_allocation_granularity();
1144
1145    test_reserved_space3(ag,      ag    , false);
1146    test_reserved_space3(ag * 2,  ag    , false);
1147    test_reserved_space3(ag * 3,  ag    , false);
1148    test_reserved_space3(ag * 2,  ag * 2, false);
1149    test_reserved_space3(ag * 4,  ag * 2, false);
1150    test_reserved_space3(ag * 8,  ag * 2, false);
1151    test_reserved_space3(ag * 4,  ag * 4, false);
1152    test_reserved_space3(ag * 8,  ag * 4, false);
1153    test_reserved_space3(ag * 16, ag * 4, false);
1154
1155    if (UseLargePages) {
1156      size_t lp = os::large_page_size();
1157
1158      // Without large pages
1159      test_reserved_space3(lp,     ag * 4, false);
1160      test_reserved_space3(lp * 2, ag * 4, false);
1161      test_reserved_space3(lp * 4, ag * 4, false);
1162      test_reserved_space3(lp,     lp    , false);
1163      test_reserved_space3(lp * 2, lp    , false);
1164      test_reserved_space3(lp * 3, lp    , false);
1165      test_reserved_space3(lp * 2, lp * 2, false);
1166      test_reserved_space3(lp * 4, lp * 2, false);
1167      test_reserved_space3(lp * 8, lp * 2, false);
1168
1169      // With large pages
1170      test_reserved_space3(lp, ag * 4    , true);
1171      test_reserved_space3(lp * 2, ag * 4, true);
1172      test_reserved_space3(lp * 4, ag * 4, true);
1173      test_reserved_space3(lp, lp        , true);
1174      test_reserved_space3(lp * 2, lp    , true);
1175      test_reserved_space3(lp * 3, lp    , true);
1176      test_reserved_space3(lp * 2, lp * 2, true);
1177      test_reserved_space3(lp * 4, lp * 2, true);
1178      test_reserved_space3(lp * 8, lp * 2, true);
1179    }
1180  }
1181
1182  static void test_reserved_space() {
1183    test_reserved_space1();
1184    test_reserved_space2();
1185    test_reserved_space3();
1186  }
1187};
1188
1189void TestReservedSpace_test() {
1190  TestReservedSpace::test_reserved_space();
1191}
1192
1193#define assert_equals(actual, expected)  \
1194  assert(actual == expected,             \
1195         "Got " SIZE_FORMAT " expected " \
1196         SIZE_FORMAT, actual, expected);
1197
1198#define assert_ge(value1, value2)                  \
1199  assert(value1 >= value2,                         \
1200         "'" #value1 "': " SIZE_FORMAT " '"        \
1201         #value2 "': " SIZE_FORMAT, value1, value2);
1202
1203#define assert_lt(value1, value2)                  \
1204  assert(value1 < value2,                          \
1205         "'" #value1 "': " SIZE_FORMAT " '"        \
1206         #value2 "': " SIZE_FORMAT, value1, value2);
1207
1208
1209class TestVirtualSpace : AllStatic {
1210  enum TestLargePages {
1211    Default,
1212    Disable,
1213    Reserve,
1214    Commit
1215  };
1216
1217  static ReservedSpace reserve_memory(size_t reserve_size_aligned, TestLargePages mode) {
1218    switch(mode) {
1219    default:
1220    case Default:
1221    case Reserve:
1222      return ReservedSpace(reserve_size_aligned);
1223    case Disable:
1224    case Commit:
1225      return ReservedSpace(reserve_size_aligned,
1226                           os::vm_allocation_granularity(),
1227                           /* large */ false, /* exec */ false);
1228    }
1229  }
1230
1231  static bool initialize_virtual_space(VirtualSpace& vs, ReservedSpace rs, TestLargePages mode) {
1232    switch(mode) {
1233    default:
1234    case Default:
1235    case Reserve:
1236      return vs.initialize(rs, 0);
1237    case Disable:
1238      return vs.initialize_with_granularity(rs, 0, os::vm_page_size());
1239    case Commit:
1240      return vs.initialize_with_granularity(rs, 0, os::page_size_for_region_unaligned(rs.size(), 1));
1241    }
1242  }
1243
1244 public:
1245  static void test_virtual_space_actual_committed_space(size_t reserve_size, size_t commit_size,
1246                                                        TestLargePages mode = Default) {
1247    size_t granularity = os::vm_allocation_granularity();
1248    size_t reserve_size_aligned = align_size_up(reserve_size, granularity);
1249
1250    ReservedSpace reserved = reserve_memory(reserve_size_aligned, mode);
1251
1252    assert(reserved.is_reserved(), "Must be");
1253
1254    VirtualSpace vs;
1255    bool initialized = initialize_virtual_space(vs, reserved, mode);
1256    assert(initialized, "Failed to initialize VirtualSpace");
1257
1258    vs.expand_by(commit_size, false);
1259
1260    if (vs.special()) {
1261      assert_equals(vs.actual_committed_size(), reserve_size_aligned);
1262    } else {
1263      assert_ge(vs.actual_committed_size(), commit_size);
1264      // Approximate the commit granularity.
1265      // Make sure that we don't commit using large pages
1266      // if large pages has been disabled for this VirtualSpace.
1267      size_t commit_granularity = (mode == Disable || !UseLargePages) ?
1268                                   os::vm_page_size() : os::large_page_size();
1269      assert_lt(vs.actual_committed_size(), commit_size + commit_granularity);
1270    }
1271
1272    reserved.release();
1273  }
1274
1275  static void test_virtual_space_actual_committed_space_one_large_page() {
1276    if (!UseLargePages) {
1277      return;
1278    }
1279
1280    size_t large_page_size = os::large_page_size();
1281
1282    ReservedSpace reserved(large_page_size, large_page_size, true, false);
1283
1284    assert(reserved.is_reserved(), "Must be");
1285
1286    VirtualSpace vs;
1287    bool initialized = vs.initialize(reserved, 0);
1288    assert(initialized, "Failed to initialize VirtualSpace");
1289
1290    vs.expand_by(large_page_size, false);
1291
1292    assert_equals(vs.actual_committed_size(), large_page_size);
1293
1294    reserved.release();
1295  }
1296
1297  static void test_virtual_space_actual_committed_space() {
1298    test_virtual_space_actual_committed_space(4 * K, 0);
1299    test_virtual_space_actual_committed_space(4 * K, 4 * K);
1300    test_virtual_space_actual_committed_space(8 * K, 0);
1301    test_virtual_space_actual_committed_space(8 * K, 4 * K);
1302    test_virtual_space_actual_committed_space(8 * K, 8 * K);
1303    test_virtual_space_actual_committed_space(12 * K, 0);
1304    test_virtual_space_actual_committed_space(12 * K, 4 * K);
1305    test_virtual_space_actual_committed_space(12 * K, 8 * K);
1306    test_virtual_space_actual_committed_space(12 * K, 12 * K);
1307    test_virtual_space_actual_committed_space(64 * K, 0);
1308    test_virtual_space_actual_committed_space(64 * K, 32 * K);
1309    test_virtual_space_actual_committed_space(64 * K, 64 * K);
1310    test_virtual_space_actual_committed_space(2 * M, 0);
1311    test_virtual_space_actual_committed_space(2 * M, 4 * K);
1312    test_virtual_space_actual_committed_space(2 * M, 64 * K);
1313    test_virtual_space_actual_committed_space(2 * M, 1 * M);
1314    test_virtual_space_actual_committed_space(2 * M, 2 * M);
1315    test_virtual_space_actual_committed_space(10 * M, 0);
1316    test_virtual_space_actual_committed_space(10 * M, 4 * K);
1317    test_virtual_space_actual_committed_space(10 * M, 8 * K);
1318    test_virtual_space_actual_committed_space(10 * M, 1 * M);
1319    test_virtual_space_actual_committed_space(10 * M, 2 * M);
1320    test_virtual_space_actual_committed_space(10 * M, 5 * M);
1321    test_virtual_space_actual_committed_space(10 * M, 10 * M);
1322  }
1323
1324  static void test_virtual_space_disable_large_pages() {
1325    if (!UseLargePages) {
1326      return;
1327    }
1328    // These test cases verify that if we force VirtualSpace to disable large pages
1329    test_virtual_space_actual_committed_space(10 * M, 0, Disable);
1330    test_virtual_space_actual_committed_space(10 * M, 4 * K, Disable);
1331    test_virtual_space_actual_committed_space(10 * M, 8 * K, Disable);
1332    test_virtual_space_actual_committed_space(10 * M, 1 * M, Disable);
1333    test_virtual_space_actual_committed_space(10 * M, 2 * M, Disable);
1334    test_virtual_space_actual_committed_space(10 * M, 5 * M, Disable);
1335    test_virtual_space_actual_committed_space(10 * M, 10 * M, Disable);
1336
1337    test_virtual_space_actual_committed_space(10 * M, 0, Reserve);
1338    test_virtual_space_actual_committed_space(10 * M, 4 * K, Reserve);
1339    test_virtual_space_actual_committed_space(10 * M, 8 * K, Reserve);
1340    test_virtual_space_actual_committed_space(10 * M, 1 * M, Reserve);
1341    test_virtual_space_actual_committed_space(10 * M, 2 * M, Reserve);
1342    test_virtual_space_actual_committed_space(10 * M, 5 * M, Reserve);
1343    test_virtual_space_actual_committed_space(10 * M, 10 * M, Reserve);
1344
1345    test_virtual_space_actual_committed_space(10 * M, 0, Commit);
1346    test_virtual_space_actual_committed_space(10 * M, 4 * K, Commit);
1347    test_virtual_space_actual_committed_space(10 * M, 8 * K, Commit);
1348    test_virtual_space_actual_committed_space(10 * M, 1 * M, Commit);
1349    test_virtual_space_actual_committed_space(10 * M, 2 * M, Commit);
1350    test_virtual_space_actual_committed_space(10 * M, 5 * M, Commit);
1351    test_virtual_space_actual_committed_space(10 * M, 10 * M, Commit);
1352  }
1353
1354  static void test_virtual_space() {
1355    test_virtual_space_actual_committed_space();
1356    test_virtual_space_actual_committed_space_one_large_page();
1357    test_virtual_space_disable_large_pages();
1358  }
1359};
1360
1361void TestVirtualSpace_test() {
1362  TestVirtualSpace::test_virtual_space();
1363}
1364
1365#endif // PRODUCT
1366
1367#endif
1368