virtualspace.cpp revision 9099:115188e14c15
1/*
2 * Copyright (c) 1997, 2015, Oracle and/or its affiliates. All rights reserved.
3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
4 *
5 * This code is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 only, as
7 * published by the Free Software Foundation.
8 *
9 * This code is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
12 * version 2 for more details (a copy is included in the LICENSE file that
13 * accompanied this code).
14 *
15 * You should have received a copy of the GNU General Public License version
16 * 2 along with this work; if not, write to the Free Software Foundation,
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18 *
19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20 * or visit www.oracle.com if you need additional information or have any
21 * questions.
22 *
23 */
24
25#include "precompiled.hpp"
26#include "code/codeCacheExtensions.hpp"
27#include "memory/virtualspace.hpp"
28#include "oops/markOop.hpp"
29#include "oops/oop.inline.hpp"
30#include "services/memTracker.hpp"
31
32// ReservedSpace
33
34// Dummy constructor
35ReservedSpace::ReservedSpace() : _base(NULL), _size(0), _noaccess_prefix(0),
36    _alignment(0), _special(false), _executable(false) {
37}
38
39ReservedSpace::ReservedSpace(size_t size, size_t preferred_page_size) {
40  bool has_preferred_page_size = preferred_page_size != 0;
41  // Want to use large pages where possible and pad with small pages.
42  size_t page_size = has_preferred_page_size ? preferred_page_size : os::page_size_for_region_unaligned(size, 1);
43  bool large_pages = page_size != (size_t)os::vm_page_size();
44  size_t alignment;
45  if (large_pages && has_preferred_page_size) {
46    alignment = MAX2(page_size, (size_t)os::vm_allocation_granularity());
47    // ReservedSpace initialization requires size to be aligned to the given
48    // alignment. Align the size up.
49    size = align_size_up(size, alignment);
50  } else {
51    // Don't force the alignment to be large page aligned,
52    // since that will waste memory.
53    alignment = os::vm_allocation_granularity();
54  }
55  initialize(size, alignment, large_pages, NULL, false);
56}
57
58ReservedSpace::ReservedSpace(size_t size, size_t alignment,
59                             bool large,
60                             char* requested_address) {
61  initialize(size, alignment, large, requested_address, false);
62}
63
64ReservedSpace::ReservedSpace(size_t size, size_t alignment,
65                             bool large,
66                             bool executable) {
67  initialize(size, alignment, large, NULL, executable);
68}
69
70// Helper method.
71static bool failed_to_reserve_as_requested(char* base, char* requested_address,
72                                           const size_t size, bool special)
73{
74  if (base == requested_address || requested_address == NULL)
75    return false; // did not fail
76
77  if (base != NULL) {
78    // Different reserve address may be acceptable in other cases
79    // but for compressed oops heap should be at requested address.
80    assert(UseCompressedOops, "currently requested address used only for compressed oops");
81    if (PrintCompressedOopsMode) {
82      tty->cr();
83      tty->print_cr("Reserved memory not at requested address: " PTR_FORMAT " vs " PTR_FORMAT, p2i(base), p2i(requested_address));
84    }
85    // OS ignored requested address. Try different address.
86    if (special) {
87      if (!os::release_memory_special(base, size)) {
88        fatal("os::release_memory_special failed");
89      }
90    } else {
91      if (!os::release_memory(base, size)) {
92        fatal("os::release_memory failed");
93      }
94    }
95  }
96  return true;
97}
98
99void ReservedSpace::initialize(size_t size, size_t alignment, bool large,
100                               char* requested_address,
101                               bool executable) {
102  const size_t granularity = os::vm_allocation_granularity();
103  assert((size & (granularity - 1)) == 0,
104         "size not aligned to os::vm_allocation_granularity()");
105  assert((alignment & (granularity - 1)) == 0,
106         "alignment not aligned to os::vm_allocation_granularity()");
107  assert(alignment == 0 || is_power_of_2((intptr_t)alignment),
108         "not a power of 2");
109
110  alignment = MAX2(alignment, (size_t)os::vm_page_size());
111
112  _base = NULL;
113  _size = 0;
114  _special = false;
115  _executable = executable;
116  _alignment = 0;
117  _noaccess_prefix = 0;
118  if (size == 0) {
119    return;
120  }
121
122  // If OS doesn't support demand paging for large page memory, we need
123  // to use reserve_memory_special() to reserve and pin the entire region.
124  bool special = large && !os::can_commit_large_page_memory();
125  char* base = NULL;
126
127  if (special) {
128
129    base = os::reserve_memory_special(size, alignment, requested_address, executable);
130
131    if (base != NULL) {
132      if (failed_to_reserve_as_requested(base, requested_address, size, true)) {
133        // OS ignored requested address. Try different address.
134        return;
135      }
136      // Check alignment constraints.
137      assert((uintptr_t) base % alignment == 0,
138             "Large pages returned a non-aligned address, base: "
139             PTR_FORMAT " alignment: " SIZE_FORMAT_HEX,
140             p2i(base), alignment);
141      _special = true;
142    } else {
143      // failed; try to reserve regular memory below
144      if (UseLargePages && (!FLAG_IS_DEFAULT(UseLargePages) ||
145                            !FLAG_IS_DEFAULT(LargePageSizeInBytes))) {
146        if (PrintCompressedOopsMode) {
147          tty->cr();
148          tty->print_cr("Reserve regular memory without large pages.");
149        }
150      }
151    }
152  }
153
154  if (base == NULL) {
155    // Optimistically assume that the OSes returns an aligned base pointer.
156    // When reserving a large address range, most OSes seem to align to at
157    // least 64K.
158
159    // If the memory was requested at a particular address, use
160    // os::attempt_reserve_memory_at() to avoid over mapping something
161    // important.  If available space is not detected, return NULL.
162
163    if (requested_address != 0) {
164      base = os::attempt_reserve_memory_at(size, requested_address);
165      if (failed_to_reserve_as_requested(base, requested_address, size, false)) {
166        // OS ignored requested address. Try different address.
167        base = NULL;
168      }
169    } else {
170      base = os::reserve_memory(size, NULL, alignment);
171    }
172
173    if (base == NULL) return;
174
175    // Check alignment constraints
176    if ((((size_t)base) & (alignment - 1)) != 0) {
177      // Base not aligned, retry
178      if (!os::release_memory(base, size)) fatal("os::release_memory failed");
179      // Make sure that size is aligned
180      size = align_size_up(size, alignment);
181      base = os::reserve_memory_aligned(size, alignment);
182
183      if (requested_address != 0 &&
184          failed_to_reserve_as_requested(base, requested_address, size, false)) {
185        // As a result of the alignment constraints, the allocated base differs
186        // from the requested address. Return back to the caller who can
187        // take remedial action (like try again without a requested address).
188        assert(_base == NULL, "should be");
189        return;
190      }
191    }
192  }
193  // Done
194  _base = base;
195  _size = size;
196  _alignment = alignment;
197}
198
199
200ReservedSpace::ReservedSpace(char* base, size_t size, size_t alignment,
201                             bool special, bool executable) {
202  assert((size % os::vm_allocation_granularity()) == 0,
203         "size not allocation aligned");
204  _base = base;
205  _size = size;
206  _alignment = alignment;
207  _noaccess_prefix = 0;
208  _special = special;
209  _executable = executable;
210}
211
212
213ReservedSpace ReservedSpace::first_part(size_t partition_size, size_t alignment,
214                                        bool split, bool realloc) {
215  assert(partition_size <= size(), "partition failed");
216  if (split) {
217    os::split_reserved_memory(base(), size(), partition_size, realloc);
218  }
219  ReservedSpace result(base(), partition_size, alignment, special(),
220                       executable());
221  return result;
222}
223
224
225ReservedSpace
226ReservedSpace::last_part(size_t partition_size, size_t alignment) {
227  assert(partition_size <= size(), "partition failed");
228  ReservedSpace result(base() + partition_size, size() - partition_size,
229                       alignment, special(), executable());
230  return result;
231}
232
233
234size_t ReservedSpace::page_align_size_up(size_t size) {
235  return align_size_up(size, os::vm_page_size());
236}
237
238
239size_t ReservedSpace::page_align_size_down(size_t size) {
240  return align_size_down(size, os::vm_page_size());
241}
242
243
244size_t ReservedSpace::allocation_align_size_up(size_t size) {
245  return align_size_up(size, os::vm_allocation_granularity());
246}
247
248
249size_t ReservedSpace::allocation_align_size_down(size_t size) {
250  return align_size_down(size, os::vm_allocation_granularity());
251}
252
253
254void ReservedSpace::release() {
255  if (is_reserved()) {
256    char *real_base = _base - _noaccess_prefix;
257    const size_t real_size = _size + _noaccess_prefix;
258    if (special()) {
259      os::release_memory_special(real_base, real_size);
260    } else{
261      os::release_memory(real_base, real_size);
262    }
263    _base = NULL;
264    _size = 0;
265    _noaccess_prefix = 0;
266    _alignment = 0;
267    _special = false;
268    _executable = false;
269  }
270}
271
272static size_t noaccess_prefix_size(size_t alignment) {
273  return lcm(os::vm_page_size(), alignment);
274}
275
276void ReservedHeapSpace::establish_noaccess_prefix() {
277  assert(_alignment >= (size_t)os::vm_page_size(), "must be at least page size big");
278  _noaccess_prefix = noaccess_prefix_size(_alignment);
279
280  if (base() && base() + _size > (char *)OopEncodingHeapMax) {
281    if (true
282        WIN64_ONLY(&& !UseLargePages)
283        AIX_ONLY(&& os::vm_page_size() != SIZE_64K)) {
284      // Protect memory at the base of the allocated region.
285      // If special, the page was committed (only matters on windows)
286      if (!os::protect_memory(_base, _noaccess_prefix, os::MEM_PROT_NONE, _special)) {
287        fatal("cannot protect protection page");
288      }
289      if (PrintCompressedOopsMode) {
290        tty->cr();
291        tty->print_cr("Protected page at the reserved heap base: "
292                      PTR_FORMAT " / " INTX_FORMAT " bytes", p2i(_base), _noaccess_prefix);
293      }
294      assert(Universe::narrow_oop_use_implicit_null_checks() == true, "not initialized?");
295    } else {
296      Universe::set_narrow_oop_use_implicit_null_checks(false);
297    }
298  }
299
300  _base += _noaccess_prefix;
301  _size -= _noaccess_prefix;
302  assert(((uintptr_t)_base % _alignment == 0), "must be exactly of required alignment");
303}
304
305// Tries to allocate memory of size 'size' at address requested_address with alignment 'alignment'.
306// Does not check whether the reserved memory actually is at requested_address, as the memory returned
307// might still fulfill the wishes of the caller.
308// Assures the memory is aligned to 'alignment'.
309// NOTE: If ReservedHeapSpace already points to some reserved memory this is freed, first.
310void ReservedHeapSpace::try_reserve_heap(size_t size,
311                                         size_t alignment,
312                                         bool large,
313                                         char* requested_address) {
314  if (_base != NULL) {
315    // We tried before, but we didn't like the address delivered.
316    release();
317  }
318
319  // If OS doesn't support demand paging for large page memory, we need
320  // to use reserve_memory_special() to reserve and pin the entire region.
321  bool special = large && !os::can_commit_large_page_memory();
322  char* base = NULL;
323
324  if (PrintCompressedOopsMode && Verbose) {
325    tty->print("Trying to allocate at address " PTR_FORMAT " heap of size " SIZE_FORMAT_HEX ".\n",
326               p2i(requested_address), size);
327  }
328
329  if (special) {
330    base = os::reserve_memory_special(size, alignment, requested_address, false);
331
332    if (base != NULL) {
333      // Check alignment constraints.
334      assert((uintptr_t) base % alignment == 0,
335             "Large pages returned a non-aligned address, base: "
336             PTR_FORMAT " alignment: " SIZE_FORMAT_HEX,
337             p2i(base), alignment);
338      _special = true;
339    }
340  }
341
342  if (base == NULL) {
343    // Failed; try to reserve regular memory below
344    if (UseLargePages && (!FLAG_IS_DEFAULT(UseLargePages) ||
345                          !FLAG_IS_DEFAULT(LargePageSizeInBytes))) {
346      if (PrintCompressedOopsMode) {
347        tty->cr();
348        tty->print_cr("Reserve regular memory without large pages.");
349      }
350    }
351
352    // Optimistically assume that the OSes returns an aligned base pointer.
353    // When reserving a large address range, most OSes seem to align to at
354    // least 64K.
355
356    // If the memory was requested at a particular address, use
357    // os::attempt_reserve_memory_at() to avoid over mapping something
358    // important.  If available space is not detected, return NULL.
359
360    if (requested_address != 0) {
361      base = os::attempt_reserve_memory_at(size, requested_address);
362    } else {
363      base = os::reserve_memory(size, NULL, alignment);
364    }
365  }
366  if (base == NULL) { return; }
367
368  // Done
369  _base = base;
370  _size = size;
371  _alignment = alignment;
372
373  // Check alignment constraints
374  if ((((size_t)base) & (alignment - 1)) != 0) {
375    // Base not aligned, retry.
376    release();
377  }
378}
379
380void ReservedHeapSpace::try_reserve_range(char *highest_start,
381                                          char *lowest_start,
382                                          size_t attach_point_alignment,
383                                          char *aligned_heap_base_min_address,
384                                          char *upper_bound,
385                                          size_t size,
386                                          size_t alignment,
387                                          bool large) {
388  const size_t attach_range = highest_start - lowest_start;
389  // Cap num_attempts at possible number.
390  // At least one is possible even for 0 sized attach range.
391  const uint64_t num_attempts_possible = (attach_range / attach_point_alignment) + 1;
392  const uint64_t num_attempts_to_try   = MIN2((uint64_t)HeapSearchSteps, num_attempts_possible);
393
394  const size_t stepsize = (attach_range == 0) ? // Only one try.
395    (size_t) highest_start : align_size_up(attach_range / num_attempts_to_try, attach_point_alignment);
396
397  // Try attach points from top to bottom.
398  char* attach_point = highest_start;
399  while (attach_point >= lowest_start  &&
400         attach_point <= highest_start &&  // Avoid wrap around.
401         ((_base == NULL) ||
402          (_base < aligned_heap_base_min_address || _base + size > upper_bound))) {
403    try_reserve_heap(size, alignment, large, attach_point);
404    attach_point -= stepsize;
405  }
406}
407
408#define SIZE_64K  ((uint64_t) UCONST64(      0x10000))
409#define SIZE_256M ((uint64_t) UCONST64(   0x10000000))
410#define SIZE_32G  ((uint64_t) UCONST64(  0x800000000))
411
412// Helper for heap allocation. Returns an array with addresses
413// (OS-specific) which are suited for disjoint base mode. Array is
414// NULL terminated.
415static char** get_attach_addresses_for_disjoint_mode() {
416  static uint64_t addresses[] = {
417     2 * SIZE_32G,
418     3 * SIZE_32G,
419     4 * SIZE_32G,
420     8 * SIZE_32G,
421    10 * SIZE_32G,
422     1 * SIZE_64K * SIZE_32G,
423     2 * SIZE_64K * SIZE_32G,
424     3 * SIZE_64K * SIZE_32G,
425     4 * SIZE_64K * SIZE_32G,
426    16 * SIZE_64K * SIZE_32G,
427    32 * SIZE_64K * SIZE_32G,
428    34 * SIZE_64K * SIZE_32G,
429    0
430  };
431
432  // Sort out addresses smaller than HeapBaseMinAddress. This assumes
433  // the array is sorted.
434  uint i = 0;
435  while (addresses[i] != 0 &&
436         (addresses[i] < OopEncodingHeapMax || addresses[i] < HeapBaseMinAddress)) {
437    i++;
438  }
439  uint start = i;
440
441  // Avoid more steps than requested.
442  i = 0;
443  while (addresses[start+i] != 0) {
444    if (i == HeapSearchSteps) {
445      addresses[start+i] = 0;
446      break;
447    }
448    i++;
449  }
450
451  return (char**) &addresses[start];
452}
453
454void ReservedHeapSpace::initialize_compressed_heap(const size_t size, size_t alignment, bool large) {
455  guarantee(size + noaccess_prefix_size(alignment) <= OopEncodingHeapMax,
456            "can not allocate compressed oop heap for this size");
457  guarantee(alignment == MAX2(alignment, (size_t)os::vm_page_size()), "alignment too small");
458  assert(HeapBaseMinAddress > 0, "sanity");
459
460  const size_t granularity = os::vm_allocation_granularity();
461  assert((size & (granularity - 1)) == 0,
462         "size not aligned to os::vm_allocation_granularity()");
463  assert((alignment & (granularity - 1)) == 0,
464         "alignment not aligned to os::vm_allocation_granularity()");
465  assert(alignment == 0 || is_power_of_2((intptr_t)alignment),
466         "not a power of 2");
467
468  // The necessary attach point alignment for generated wish addresses.
469  // This is needed to increase the chance of attaching for mmap and shmat.
470  const size_t os_attach_point_alignment =
471    AIX_ONLY(SIZE_256M)  // Known shm boundary alignment.
472    NOT_AIX(os::vm_allocation_granularity());
473  const size_t attach_point_alignment = lcm(alignment, os_attach_point_alignment);
474
475  char *aligned_heap_base_min_address = (char *)align_ptr_up((void *)HeapBaseMinAddress, alignment);
476  size_t noaccess_prefix = ((aligned_heap_base_min_address + size) > (char*)OopEncodingHeapMax) ?
477    noaccess_prefix_size(alignment) : 0;
478
479  // Attempt to alloc at user-given address.
480  if (!FLAG_IS_DEFAULT(HeapBaseMinAddress)) {
481    try_reserve_heap(size + noaccess_prefix, alignment, large, aligned_heap_base_min_address);
482    if (_base != aligned_heap_base_min_address) { // Enforce this exact address.
483      release();
484    }
485  }
486
487  // Keep heap at HeapBaseMinAddress.
488  if (_base == NULL) {
489
490    // Try to allocate the heap at addresses that allow efficient oop compression.
491    // Different schemes are tried, in order of decreasing optimization potential.
492    //
493    // For this, try_reserve_heap() is called with the desired heap base addresses.
494    // A call into the os layer to allocate at a given address can return memory
495    // at a different address than requested.  Still, this might be memory at a useful
496    // address. try_reserve_heap() always returns this allocated memory, as only here
497    // the criteria for a good heap are checked.
498
499    // Attempt to allocate so that we can run without base and scale (32-Bit unscaled compressed oops).
500    // Give it several tries from top of range to bottom.
501    if (aligned_heap_base_min_address + size <= (char *)UnscaledOopHeapMax) {
502
503      // Calc address range within we try to attach (range of possible start addresses).
504      char* const highest_start = (char *)align_ptr_down((char *)UnscaledOopHeapMax - size, attach_point_alignment);
505      char* const lowest_start  = (char *)align_ptr_up  (        aligned_heap_base_min_address             , attach_point_alignment);
506      try_reserve_range(highest_start, lowest_start, attach_point_alignment,
507                        aligned_heap_base_min_address, (char *)UnscaledOopHeapMax, size, alignment, large);
508    }
509
510    // zerobased: Attempt to allocate in the lower 32G.
511    // But leave room for the compressed class pointers, which is allocated above
512    // the heap.
513    char *zerobased_max = (char *)OopEncodingHeapMax;
514    const size_t class_space = align_size_up(CompressedClassSpaceSize, alignment);
515    // For small heaps, save some space for compressed class pointer
516    // space so it can be decoded with no base.
517    if (UseCompressedClassPointers && !UseSharedSpaces &&
518        OopEncodingHeapMax <= KlassEncodingMetaspaceMax &&
519        (uint64_t)(aligned_heap_base_min_address + size + class_space) <= KlassEncodingMetaspaceMax) {
520      zerobased_max = (char *)OopEncodingHeapMax - class_space;
521    }
522
523    // Give it several tries from top of range to bottom.
524    if (aligned_heap_base_min_address + size <= zerobased_max &&    // Zerobased theoretical possible.
525        ((_base == NULL) ||                        // No previous try succeeded.
526         (_base + size > zerobased_max))) {        // Unscaled delivered an arbitrary address.
527
528      // Calc address range within we try to attach (range of possible start addresses).
529      char *const highest_start = (char *)align_ptr_down(zerobased_max - size, attach_point_alignment);
530      // Need to be careful about size being guaranteed to be less
531      // than UnscaledOopHeapMax due to type constraints.
532      char *lowest_start = aligned_heap_base_min_address;
533      uint64_t unscaled_end = UnscaledOopHeapMax - size;
534      if (unscaled_end < UnscaledOopHeapMax) { // unscaled_end wrapped if size is large
535        lowest_start = MAX2(lowest_start, (char*)unscaled_end);
536      }
537      lowest_start  = (char *)align_ptr_up(lowest_start, attach_point_alignment);
538      try_reserve_range(highest_start, lowest_start, attach_point_alignment,
539                        aligned_heap_base_min_address, zerobased_max, size, alignment, large);
540    }
541
542    // Now we go for heaps with base != 0.  We need a noaccess prefix to efficiently
543    // implement null checks.
544    noaccess_prefix = noaccess_prefix_size(alignment);
545
546    // Try to attach at addresses that are aligned to OopEncodingHeapMax. Disjointbase mode.
547    char** addresses = get_attach_addresses_for_disjoint_mode();
548    int i = 0;
549    while (addresses[i] &&                                 // End of array not yet reached.
550           ((_base == NULL) ||                             // No previous try succeeded.
551            (_base + size >  (char *)OopEncodingHeapMax && // Not zerobased or unscaled address.
552             !Universe::is_disjoint_heap_base_address((address)_base)))) {  // Not disjoint address.
553      char* const attach_point = addresses[i];
554      assert(attach_point >= aligned_heap_base_min_address, "Flag support broken");
555      try_reserve_heap(size + noaccess_prefix, alignment, large, attach_point);
556      i++;
557    }
558
559    // Last, desperate try without any placement.
560    if (_base == NULL) {
561      if (PrintCompressedOopsMode && Verbose) {
562        tty->print("Trying to allocate at address NULL heap of size " SIZE_FORMAT_HEX ".\n", size + noaccess_prefix);
563      }
564      initialize(size + noaccess_prefix, alignment, large, NULL, false);
565    }
566  }
567}
568
569ReservedHeapSpace::ReservedHeapSpace(size_t size, size_t alignment, bool large) : ReservedSpace() {
570
571  if (size == 0) {
572    return;
573  }
574
575  // Heap size should be aligned to alignment, too.
576  guarantee(is_size_aligned(size, alignment), "set by caller");
577
578  if (UseCompressedOops) {
579    initialize_compressed_heap(size, alignment, large);
580    if (_size > size) {
581      // We allocated heap with noaccess prefix.
582      // It can happen we get a zerobased/unscaled heap with noaccess prefix,
583      // if we had to try at arbitrary address.
584      establish_noaccess_prefix();
585    }
586  } else {
587    initialize(size, alignment, large, NULL, false);
588  }
589
590  assert(markOopDesc::encode_pointer_as_mark(_base)->decode_pointer() == _base,
591         "area must be distinguishable from marks for mark-sweep");
592  assert(markOopDesc::encode_pointer_as_mark(&_base[size])->decode_pointer() == &_base[size],
593         "area must be distinguishable from marks for mark-sweep");
594
595  if (base() > 0) {
596    MemTracker::record_virtual_memory_type((address)base(), mtJavaHeap);
597  }
598}
599
600// Reserve space for code segment.  Same as Java heap only we mark this as
601// executable.
602ReservedCodeSpace::ReservedCodeSpace(size_t r_size,
603                                     size_t rs_align,
604                                     bool large) :
605  ReservedSpace(r_size, rs_align, large, /*executable*/ CodeCacheExtensions::support_dynamic_code()) {
606  MemTracker::record_virtual_memory_type((address)base(), mtCode);
607}
608
609// VirtualSpace
610
611VirtualSpace::VirtualSpace() {
612  _low_boundary           = NULL;
613  _high_boundary          = NULL;
614  _low                    = NULL;
615  _high                   = NULL;
616  _lower_high             = NULL;
617  _middle_high            = NULL;
618  _upper_high             = NULL;
619  _lower_high_boundary    = NULL;
620  _middle_high_boundary   = NULL;
621  _upper_high_boundary    = NULL;
622  _lower_alignment        = 0;
623  _middle_alignment       = 0;
624  _upper_alignment        = 0;
625  _special                = false;
626  _executable             = false;
627}
628
629
630bool VirtualSpace::initialize(ReservedSpace rs, size_t committed_size) {
631  const size_t max_commit_granularity = os::page_size_for_region_unaligned(rs.size(), 1);
632  return initialize_with_granularity(rs, committed_size, max_commit_granularity);
633}
634
635bool VirtualSpace::initialize_with_granularity(ReservedSpace rs, size_t committed_size, size_t max_commit_granularity) {
636  if(!rs.is_reserved()) return false;  // allocation failed.
637  assert(_low_boundary == NULL, "VirtualSpace already initialized");
638  assert(max_commit_granularity > 0, "Granularity must be non-zero.");
639
640  _low_boundary  = rs.base();
641  _high_boundary = low_boundary() + rs.size();
642
643  _low = low_boundary();
644  _high = low();
645
646  _special = rs.special();
647  _executable = rs.executable();
648
649  // When a VirtualSpace begins life at a large size, make all future expansion
650  // and shrinking occur aligned to a granularity of large pages.  This avoids
651  // fragmentation of physical addresses that inhibits the use of large pages
652  // by the OS virtual memory system.  Empirically,  we see that with a 4MB
653  // page size, the only spaces that get handled this way are codecache and
654  // the heap itself, both of which provide a substantial performance
655  // boost in many benchmarks when covered by large pages.
656  //
657  // No attempt is made to force large page alignment at the very top and
658  // bottom of the space if they are not aligned so already.
659  _lower_alignment  = os::vm_page_size();
660  _middle_alignment = max_commit_granularity;
661  _upper_alignment  = os::vm_page_size();
662
663  // End of each region
664  _lower_high_boundary = (char*) round_to((intptr_t) low_boundary(), middle_alignment());
665  _middle_high_boundary = (char*) round_down((intptr_t) high_boundary(), middle_alignment());
666  _upper_high_boundary = high_boundary();
667
668  // High address of each region
669  _lower_high = low_boundary();
670  _middle_high = lower_high_boundary();
671  _upper_high = middle_high_boundary();
672
673  // commit to initial size
674  if (committed_size > 0) {
675    if (!expand_by(committed_size)) {
676      return false;
677    }
678  }
679  return true;
680}
681
682
683VirtualSpace::~VirtualSpace() {
684  release();
685}
686
687
688void VirtualSpace::release() {
689  // This does not release memory it never reserved.
690  // Caller must release via rs.release();
691  _low_boundary           = NULL;
692  _high_boundary          = NULL;
693  _low                    = NULL;
694  _high                   = NULL;
695  _lower_high             = NULL;
696  _middle_high            = NULL;
697  _upper_high             = NULL;
698  _lower_high_boundary    = NULL;
699  _middle_high_boundary   = NULL;
700  _upper_high_boundary    = NULL;
701  _lower_alignment        = 0;
702  _middle_alignment       = 0;
703  _upper_alignment        = 0;
704  _special                = false;
705  _executable             = false;
706}
707
708
709size_t VirtualSpace::committed_size() const {
710  return pointer_delta(high(), low(), sizeof(char));
711}
712
713
714size_t VirtualSpace::reserved_size() const {
715  return pointer_delta(high_boundary(), low_boundary(), sizeof(char));
716}
717
718
719size_t VirtualSpace::uncommitted_size()  const {
720  return reserved_size() - committed_size();
721}
722
723size_t VirtualSpace::actual_committed_size() const {
724  // Special VirtualSpaces commit all reserved space up front.
725  if (special()) {
726    return reserved_size();
727  }
728
729  size_t committed_low    = pointer_delta(_lower_high,  _low_boundary,         sizeof(char));
730  size_t committed_middle = pointer_delta(_middle_high, _lower_high_boundary,  sizeof(char));
731  size_t committed_high   = pointer_delta(_upper_high,  _middle_high_boundary, sizeof(char));
732
733#ifdef ASSERT
734  size_t lower  = pointer_delta(_lower_high_boundary,  _low_boundary,         sizeof(char));
735  size_t middle = pointer_delta(_middle_high_boundary, _lower_high_boundary,  sizeof(char));
736  size_t upper  = pointer_delta(_upper_high_boundary,  _middle_high_boundary, sizeof(char));
737
738  if (committed_high > 0) {
739    assert(committed_low == lower, "Must be");
740    assert(committed_middle == middle, "Must be");
741  }
742
743  if (committed_middle > 0) {
744    assert(committed_low == lower, "Must be");
745  }
746  if (committed_middle < middle) {
747    assert(committed_high == 0, "Must be");
748  }
749
750  if (committed_low < lower) {
751    assert(committed_high == 0, "Must be");
752    assert(committed_middle == 0, "Must be");
753  }
754#endif
755
756  return committed_low + committed_middle + committed_high;
757}
758
759
760bool VirtualSpace::contains(const void* p) const {
761  return low() <= (const char*) p && (const char*) p < high();
762}
763
764/*
765   First we need to determine if a particular virtual space is using large
766   pages.  This is done at the initialize function and only virtual spaces
767   that are larger than LargePageSizeInBytes use large pages.  Once we
768   have determined this, all expand_by and shrink_by calls must grow and
769   shrink by large page size chunks.  If a particular request
770   is within the current large page, the call to commit and uncommit memory
771   can be ignored.  In the case that the low and high boundaries of this
772   space is not large page aligned, the pages leading to the first large
773   page address and the pages after the last large page address must be
774   allocated with default pages.
775*/
776bool VirtualSpace::expand_by(size_t bytes, bool pre_touch) {
777  if (uncommitted_size() < bytes) return false;
778
779  if (special()) {
780    // don't commit memory if the entire space is pinned in memory
781    _high += bytes;
782    return true;
783  }
784
785  char* previous_high = high();
786  char* unaligned_new_high = high() + bytes;
787  assert(unaligned_new_high <= high_boundary(),
788         "cannot expand by more than upper boundary");
789
790  // Calculate where the new high for each of the regions should be.  If
791  // the low_boundary() and high_boundary() are LargePageSizeInBytes aligned
792  // then the unaligned lower and upper new highs would be the
793  // lower_high() and upper_high() respectively.
794  char* unaligned_lower_new_high =
795    MIN2(unaligned_new_high, lower_high_boundary());
796  char* unaligned_middle_new_high =
797    MIN2(unaligned_new_high, middle_high_boundary());
798  char* unaligned_upper_new_high =
799    MIN2(unaligned_new_high, upper_high_boundary());
800
801  // Align the new highs based on the regions alignment.  lower and upper
802  // alignment will always be default page size.  middle alignment will be
803  // LargePageSizeInBytes if the actual size of the virtual space is in
804  // fact larger than LargePageSizeInBytes.
805  char* aligned_lower_new_high =
806    (char*) round_to((intptr_t) unaligned_lower_new_high, lower_alignment());
807  char* aligned_middle_new_high =
808    (char*) round_to((intptr_t) unaligned_middle_new_high, middle_alignment());
809  char* aligned_upper_new_high =
810    (char*) round_to((intptr_t) unaligned_upper_new_high, upper_alignment());
811
812  // Determine which regions need to grow in this expand_by call.
813  // If you are growing in the lower region, high() must be in that
814  // region so calculate the size based on high().  For the middle and
815  // upper regions, determine the starting point of growth based on the
816  // location of high().  By getting the MAX of the region's low address
817  // (or the previous region's high address) and high(), we can tell if it
818  // is an intra or inter region growth.
819  size_t lower_needs = 0;
820  if (aligned_lower_new_high > lower_high()) {
821    lower_needs =
822      pointer_delta(aligned_lower_new_high, lower_high(), sizeof(char));
823  }
824  size_t middle_needs = 0;
825  if (aligned_middle_new_high > middle_high()) {
826    middle_needs =
827      pointer_delta(aligned_middle_new_high, middle_high(), sizeof(char));
828  }
829  size_t upper_needs = 0;
830  if (aligned_upper_new_high > upper_high()) {
831    upper_needs =
832      pointer_delta(aligned_upper_new_high, upper_high(), sizeof(char));
833  }
834
835  // Check contiguity.
836  assert(low_boundary() <= lower_high() &&
837         lower_high() <= lower_high_boundary(),
838         "high address must be contained within the region");
839  assert(lower_high_boundary() <= middle_high() &&
840         middle_high() <= middle_high_boundary(),
841         "high address must be contained within the region");
842  assert(middle_high_boundary() <= upper_high() &&
843         upper_high() <= upper_high_boundary(),
844         "high address must be contained within the region");
845
846  // Commit regions
847  if (lower_needs > 0) {
848    assert(low_boundary() <= lower_high() &&
849           lower_high() + lower_needs <= lower_high_boundary(),
850           "must not expand beyond region");
851    if (!os::commit_memory(lower_high(), lower_needs, _executable)) {
852      debug_only(warning("INFO: os::commit_memory(" PTR_FORMAT
853                         ", lower_needs=" SIZE_FORMAT ", %d) failed",
854                         p2i(lower_high()), lower_needs, _executable);)
855      return false;
856    } else {
857      _lower_high += lower_needs;
858    }
859  }
860  if (middle_needs > 0) {
861    assert(lower_high_boundary() <= middle_high() &&
862           middle_high() + middle_needs <= middle_high_boundary(),
863           "must not expand beyond region");
864    if (!os::commit_memory(middle_high(), middle_needs, middle_alignment(),
865                           _executable)) {
866      debug_only(warning("INFO: os::commit_memory(" PTR_FORMAT
867                         ", middle_needs=" SIZE_FORMAT ", " SIZE_FORMAT
868                         ", %d) failed", p2i(middle_high()), middle_needs,
869                         middle_alignment(), _executable);)
870      return false;
871    }
872    _middle_high += middle_needs;
873  }
874  if (upper_needs > 0) {
875    assert(middle_high_boundary() <= upper_high() &&
876           upper_high() + upper_needs <= upper_high_boundary(),
877           "must not expand beyond region");
878    if (!os::commit_memory(upper_high(), upper_needs, _executable)) {
879      debug_only(warning("INFO: os::commit_memory(" PTR_FORMAT
880                         ", upper_needs=" SIZE_FORMAT ", %d) failed",
881                         p2i(upper_high()), upper_needs, _executable);)
882      return false;
883    } else {
884      _upper_high += upper_needs;
885    }
886  }
887
888  if (pre_touch || AlwaysPreTouch) {
889    os::pretouch_memory(previous_high, unaligned_new_high);
890  }
891
892  _high += bytes;
893  return true;
894}
895
896// A page is uncommitted if the contents of the entire page is deemed unusable.
897// Continue to decrement the high() pointer until it reaches a page boundary
898// in which case that particular page can now be uncommitted.
899void VirtualSpace::shrink_by(size_t size) {
900  if (committed_size() < size)
901    fatal("Cannot shrink virtual space to negative size");
902
903  if (special()) {
904    // don't uncommit if the entire space is pinned in memory
905    _high -= size;
906    return;
907  }
908
909  char* unaligned_new_high = high() - size;
910  assert(unaligned_new_high >= low_boundary(), "cannot shrink past lower boundary");
911
912  // Calculate new unaligned address
913  char* unaligned_upper_new_high =
914    MAX2(unaligned_new_high, middle_high_boundary());
915  char* unaligned_middle_new_high =
916    MAX2(unaligned_new_high, lower_high_boundary());
917  char* unaligned_lower_new_high =
918    MAX2(unaligned_new_high, low_boundary());
919
920  // Align address to region's alignment
921  char* aligned_upper_new_high =
922    (char*) round_to((intptr_t) unaligned_upper_new_high, upper_alignment());
923  char* aligned_middle_new_high =
924    (char*) round_to((intptr_t) unaligned_middle_new_high, middle_alignment());
925  char* aligned_lower_new_high =
926    (char*) round_to((intptr_t) unaligned_lower_new_high, lower_alignment());
927
928  // Determine which regions need to shrink
929  size_t upper_needs = 0;
930  if (aligned_upper_new_high < upper_high()) {
931    upper_needs =
932      pointer_delta(upper_high(), aligned_upper_new_high, sizeof(char));
933  }
934  size_t middle_needs = 0;
935  if (aligned_middle_new_high < middle_high()) {
936    middle_needs =
937      pointer_delta(middle_high(), aligned_middle_new_high, sizeof(char));
938  }
939  size_t lower_needs = 0;
940  if (aligned_lower_new_high < lower_high()) {
941    lower_needs =
942      pointer_delta(lower_high(), aligned_lower_new_high, sizeof(char));
943  }
944
945  // Check contiguity.
946  assert(middle_high_boundary() <= upper_high() &&
947         upper_high() <= upper_high_boundary(),
948         "high address must be contained within the region");
949  assert(lower_high_boundary() <= middle_high() &&
950         middle_high() <= middle_high_boundary(),
951         "high address must be contained within the region");
952  assert(low_boundary() <= lower_high() &&
953         lower_high() <= lower_high_boundary(),
954         "high address must be contained within the region");
955
956  // Uncommit
957  if (upper_needs > 0) {
958    assert(middle_high_boundary() <= aligned_upper_new_high &&
959           aligned_upper_new_high + upper_needs <= upper_high_boundary(),
960           "must not shrink beyond region");
961    if (!os::uncommit_memory(aligned_upper_new_high, upper_needs)) {
962      debug_only(warning("os::uncommit_memory failed"));
963      return;
964    } else {
965      _upper_high -= upper_needs;
966    }
967  }
968  if (middle_needs > 0) {
969    assert(lower_high_boundary() <= aligned_middle_new_high &&
970           aligned_middle_new_high + middle_needs <= middle_high_boundary(),
971           "must not shrink beyond region");
972    if (!os::uncommit_memory(aligned_middle_new_high, middle_needs)) {
973      debug_only(warning("os::uncommit_memory failed"));
974      return;
975    } else {
976      _middle_high -= middle_needs;
977    }
978  }
979  if (lower_needs > 0) {
980    assert(low_boundary() <= aligned_lower_new_high &&
981           aligned_lower_new_high + lower_needs <= lower_high_boundary(),
982           "must not shrink beyond region");
983    if (!os::uncommit_memory(aligned_lower_new_high, lower_needs)) {
984      debug_only(warning("os::uncommit_memory failed"));
985      return;
986    } else {
987      _lower_high -= lower_needs;
988    }
989  }
990
991  _high -= size;
992}
993
994#ifndef PRODUCT
995void VirtualSpace::check_for_contiguity() {
996  // Check contiguity.
997  assert(low_boundary() <= lower_high() &&
998         lower_high() <= lower_high_boundary(),
999         "high address must be contained within the region");
1000  assert(lower_high_boundary() <= middle_high() &&
1001         middle_high() <= middle_high_boundary(),
1002         "high address must be contained within the region");
1003  assert(middle_high_boundary() <= upper_high() &&
1004         upper_high() <= upper_high_boundary(),
1005         "high address must be contained within the region");
1006  assert(low() >= low_boundary(), "low");
1007  assert(low_boundary() <= lower_high_boundary(), "lower high boundary");
1008  assert(upper_high_boundary() <= high_boundary(), "upper high boundary");
1009  assert(high() <= upper_high(), "upper high");
1010}
1011
1012void VirtualSpace::print_on(outputStream* out) {
1013  out->print   ("Virtual space:");
1014  if (special()) out->print(" (pinned in memory)");
1015  out->cr();
1016  out->print_cr(" - committed: " SIZE_FORMAT, committed_size());
1017  out->print_cr(" - reserved:  " SIZE_FORMAT, reserved_size());
1018  out->print_cr(" - [low, high]:     [" INTPTR_FORMAT ", " INTPTR_FORMAT "]",  p2i(low()), p2i(high()));
1019  out->print_cr(" - [low_b, high_b]: [" INTPTR_FORMAT ", " INTPTR_FORMAT "]",  p2i(low_boundary()), p2i(high_boundary()));
1020}
1021
1022void VirtualSpace::print() {
1023  print_on(tty);
1024}
1025
1026/////////////// Unit tests ///////////////
1027
1028#ifndef PRODUCT
1029
1030#define test_log(...) \
1031  do {\
1032    if (VerboseInternalVMTests) { \
1033      tty->print_cr(__VA_ARGS__); \
1034      tty->flush(); \
1035    }\
1036  } while (false)
1037
1038class TestReservedSpace : AllStatic {
1039 public:
1040  static void small_page_write(void* addr, size_t size) {
1041    size_t page_size = os::vm_page_size();
1042
1043    char* end = (char*)addr + size;
1044    for (char* p = (char*)addr; p < end; p += page_size) {
1045      *p = 1;
1046    }
1047  }
1048
1049  static void release_memory_for_test(ReservedSpace rs) {
1050    if (rs.special()) {
1051      guarantee(os::release_memory_special(rs.base(), rs.size()), "Shouldn't fail");
1052    } else {
1053      guarantee(os::release_memory(rs.base(), rs.size()), "Shouldn't fail");
1054    }
1055  }
1056
1057  static void test_reserved_space1(size_t size, size_t alignment) {
1058    test_log("test_reserved_space1(%p)", (void*) (uintptr_t) size);
1059
1060    assert(is_size_aligned(size, alignment), "Incorrect input parameters");
1061
1062    ReservedSpace rs(size,          // size
1063                     alignment,     // alignment
1064                     UseLargePages, // large
1065                     (char *)NULL); // requested_address
1066
1067    test_log(" rs.special() == %d", rs.special());
1068
1069    assert(rs.base() != NULL, "Must be");
1070    assert(rs.size() == size, "Must be");
1071
1072    assert(is_ptr_aligned(rs.base(), alignment), "aligned sizes should always give aligned addresses");
1073    assert(is_size_aligned(rs.size(), alignment), "aligned sizes should always give aligned addresses");
1074
1075    if (rs.special()) {
1076      small_page_write(rs.base(), size);
1077    }
1078
1079    release_memory_for_test(rs);
1080  }
1081
1082  static void test_reserved_space2(size_t size) {
1083    test_log("test_reserved_space2(%p)", (void*)(uintptr_t)size);
1084
1085    assert(is_size_aligned(size, os::vm_allocation_granularity()), "Must be at least AG aligned");
1086
1087    ReservedSpace rs(size);
1088
1089    test_log(" rs.special() == %d", rs.special());
1090
1091    assert(rs.base() != NULL, "Must be");
1092    assert(rs.size() == size, "Must be");
1093
1094    if (rs.special()) {
1095      small_page_write(rs.base(), size);
1096    }
1097
1098    release_memory_for_test(rs);
1099  }
1100
1101  static void test_reserved_space3(size_t size, size_t alignment, bool maybe_large) {
1102    test_log("test_reserved_space3(%p, %p, %d)",
1103        (void*)(uintptr_t)size, (void*)(uintptr_t)alignment, maybe_large);
1104
1105    assert(is_size_aligned(size, os::vm_allocation_granularity()), "Must be at least AG aligned");
1106    assert(is_size_aligned(size, alignment), "Must be at least aligned against alignment");
1107
1108    bool large = maybe_large && UseLargePages && size >= os::large_page_size();
1109
1110    ReservedSpace rs(size, alignment, large, false);
1111
1112    test_log(" rs.special() == %d", rs.special());
1113
1114    assert(rs.base() != NULL, "Must be");
1115    assert(rs.size() == size, "Must be");
1116
1117    if (rs.special()) {
1118      small_page_write(rs.base(), size);
1119    }
1120
1121    release_memory_for_test(rs);
1122  }
1123
1124
1125  static void test_reserved_space1() {
1126    size_t size = 2 * 1024 * 1024;
1127    size_t ag   = os::vm_allocation_granularity();
1128
1129    test_reserved_space1(size,      ag);
1130    test_reserved_space1(size * 2,  ag);
1131    test_reserved_space1(size * 10, ag);
1132  }
1133
1134  static void test_reserved_space2() {
1135    size_t size = 2 * 1024 * 1024;
1136    size_t ag = os::vm_allocation_granularity();
1137
1138    test_reserved_space2(size * 1);
1139    test_reserved_space2(size * 2);
1140    test_reserved_space2(size * 10);
1141    test_reserved_space2(ag);
1142    test_reserved_space2(size - ag);
1143    test_reserved_space2(size);
1144    test_reserved_space2(size + ag);
1145    test_reserved_space2(size * 2);
1146    test_reserved_space2(size * 2 - ag);
1147    test_reserved_space2(size * 2 + ag);
1148    test_reserved_space2(size * 3);
1149    test_reserved_space2(size * 3 - ag);
1150    test_reserved_space2(size * 3 + ag);
1151    test_reserved_space2(size * 10);
1152    test_reserved_space2(size * 10 + size / 2);
1153  }
1154
1155  static void test_reserved_space3() {
1156    size_t ag = os::vm_allocation_granularity();
1157
1158    test_reserved_space3(ag,      ag    , false);
1159    test_reserved_space3(ag * 2,  ag    , false);
1160    test_reserved_space3(ag * 3,  ag    , false);
1161    test_reserved_space3(ag * 2,  ag * 2, false);
1162    test_reserved_space3(ag * 4,  ag * 2, false);
1163    test_reserved_space3(ag * 8,  ag * 2, false);
1164    test_reserved_space3(ag * 4,  ag * 4, false);
1165    test_reserved_space3(ag * 8,  ag * 4, false);
1166    test_reserved_space3(ag * 16, ag * 4, false);
1167
1168    if (UseLargePages) {
1169      size_t lp = os::large_page_size();
1170
1171      // Without large pages
1172      test_reserved_space3(lp,     ag * 4, false);
1173      test_reserved_space3(lp * 2, ag * 4, false);
1174      test_reserved_space3(lp * 4, ag * 4, false);
1175      test_reserved_space3(lp,     lp    , false);
1176      test_reserved_space3(lp * 2, lp    , false);
1177      test_reserved_space3(lp * 3, lp    , false);
1178      test_reserved_space3(lp * 2, lp * 2, false);
1179      test_reserved_space3(lp * 4, lp * 2, false);
1180      test_reserved_space3(lp * 8, lp * 2, false);
1181
1182      // With large pages
1183      test_reserved_space3(lp, ag * 4    , true);
1184      test_reserved_space3(lp * 2, ag * 4, true);
1185      test_reserved_space3(lp * 4, ag * 4, true);
1186      test_reserved_space3(lp, lp        , true);
1187      test_reserved_space3(lp * 2, lp    , true);
1188      test_reserved_space3(lp * 3, lp    , true);
1189      test_reserved_space3(lp * 2, lp * 2, true);
1190      test_reserved_space3(lp * 4, lp * 2, true);
1191      test_reserved_space3(lp * 8, lp * 2, true);
1192    }
1193  }
1194
1195  static void test_reserved_space() {
1196    test_reserved_space1();
1197    test_reserved_space2();
1198    test_reserved_space3();
1199  }
1200};
1201
1202void TestReservedSpace_test() {
1203  TestReservedSpace::test_reserved_space();
1204}
1205
1206#define assert_equals(actual, expected)  \
1207  assert(actual == expected,             \
1208         "Got " SIZE_FORMAT " expected " \
1209         SIZE_FORMAT, actual, expected);
1210
1211#define assert_ge(value1, value2)                  \
1212  assert(value1 >= value2,                         \
1213         "'" #value1 "': " SIZE_FORMAT " '"        \
1214         #value2 "': " SIZE_FORMAT, value1, value2);
1215
1216#define assert_lt(value1, value2)                  \
1217  assert(value1 < value2,                          \
1218         "'" #value1 "': " SIZE_FORMAT " '"        \
1219         #value2 "': " SIZE_FORMAT, value1, value2);
1220
1221
1222class TestVirtualSpace : AllStatic {
1223  enum TestLargePages {
1224    Default,
1225    Disable,
1226    Reserve,
1227    Commit
1228  };
1229
1230  static ReservedSpace reserve_memory(size_t reserve_size_aligned, TestLargePages mode) {
1231    switch(mode) {
1232    default:
1233    case Default:
1234    case Reserve:
1235      return ReservedSpace(reserve_size_aligned);
1236    case Disable:
1237    case Commit:
1238      return ReservedSpace(reserve_size_aligned,
1239                           os::vm_allocation_granularity(),
1240                           /* large */ false, /* exec */ false);
1241    }
1242  }
1243
1244  static bool initialize_virtual_space(VirtualSpace& vs, ReservedSpace rs, TestLargePages mode) {
1245    switch(mode) {
1246    default:
1247    case Default:
1248    case Reserve:
1249      return vs.initialize(rs, 0);
1250    case Disable:
1251      return vs.initialize_with_granularity(rs, 0, os::vm_page_size());
1252    case Commit:
1253      return vs.initialize_with_granularity(rs, 0, os::page_size_for_region_unaligned(rs.size(), 1));
1254    }
1255  }
1256
1257 public:
1258  static void test_virtual_space_actual_committed_space(size_t reserve_size, size_t commit_size,
1259                                                        TestLargePages mode = Default) {
1260    size_t granularity = os::vm_allocation_granularity();
1261    size_t reserve_size_aligned = align_size_up(reserve_size, granularity);
1262
1263    ReservedSpace reserved = reserve_memory(reserve_size_aligned, mode);
1264
1265    assert(reserved.is_reserved(), "Must be");
1266
1267    VirtualSpace vs;
1268    bool initialized = initialize_virtual_space(vs, reserved, mode);
1269    assert(initialized, "Failed to initialize VirtualSpace");
1270
1271    vs.expand_by(commit_size, false);
1272
1273    if (vs.special()) {
1274      assert_equals(vs.actual_committed_size(), reserve_size_aligned);
1275    } else {
1276      assert_ge(vs.actual_committed_size(), commit_size);
1277      // Approximate the commit granularity.
1278      // Make sure that we don't commit using large pages
1279      // if large pages has been disabled for this VirtualSpace.
1280      size_t commit_granularity = (mode == Disable || !UseLargePages) ?
1281                                   os::vm_page_size() : os::large_page_size();
1282      assert_lt(vs.actual_committed_size(), commit_size + commit_granularity);
1283    }
1284
1285    reserved.release();
1286  }
1287
1288  static void test_virtual_space_actual_committed_space_one_large_page() {
1289    if (!UseLargePages) {
1290      return;
1291    }
1292
1293    size_t large_page_size = os::large_page_size();
1294
1295    ReservedSpace reserved(large_page_size, large_page_size, true, false);
1296
1297    assert(reserved.is_reserved(), "Must be");
1298
1299    VirtualSpace vs;
1300    bool initialized = vs.initialize(reserved, 0);
1301    assert(initialized, "Failed to initialize VirtualSpace");
1302
1303    vs.expand_by(large_page_size, false);
1304
1305    assert_equals(vs.actual_committed_size(), large_page_size);
1306
1307    reserved.release();
1308  }
1309
1310  static void test_virtual_space_actual_committed_space() {
1311    test_virtual_space_actual_committed_space(4 * K, 0);
1312    test_virtual_space_actual_committed_space(4 * K, 4 * K);
1313    test_virtual_space_actual_committed_space(8 * K, 0);
1314    test_virtual_space_actual_committed_space(8 * K, 4 * K);
1315    test_virtual_space_actual_committed_space(8 * K, 8 * K);
1316    test_virtual_space_actual_committed_space(12 * K, 0);
1317    test_virtual_space_actual_committed_space(12 * K, 4 * K);
1318    test_virtual_space_actual_committed_space(12 * K, 8 * K);
1319    test_virtual_space_actual_committed_space(12 * K, 12 * K);
1320    test_virtual_space_actual_committed_space(64 * K, 0);
1321    test_virtual_space_actual_committed_space(64 * K, 32 * K);
1322    test_virtual_space_actual_committed_space(64 * K, 64 * K);
1323    test_virtual_space_actual_committed_space(2 * M, 0);
1324    test_virtual_space_actual_committed_space(2 * M, 4 * K);
1325    test_virtual_space_actual_committed_space(2 * M, 64 * K);
1326    test_virtual_space_actual_committed_space(2 * M, 1 * M);
1327    test_virtual_space_actual_committed_space(2 * M, 2 * M);
1328    test_virtual_space_actual_committed_space(10 * M, 0);
1329    test_virtual_space_actual_committed_space(10 * M, 4 * K);
1330    test_virtual_space_actual_committed_space(10 * M, 8 * K);
1331    test_virtual_space_actual_committed_space(10 * M, 1 * M);
1332    test_virtual_space_actual_committed_space(10 * M, 2 * M);
1333    test_virtual_space_actual_committed_space(10 * M, 5 * M);
1334    test_virtual_space_actual_committed_space(10 * M, 10 * M);
1335  }
1336
1337  static void test_virtual_space_disable_large_pages() {
1338    if (!UseLargePages) {
1339      return;
1340    }
1341    // These test cases verify that if we force VirtualSpace to disable large pages
1342    test_virtual_space_actual_committed_space(10 * M, 0, Disable);
1343    test_virtual_space_actual_committed_space(10 * M, 4 * K, Disable);
1344    test_virtual_space_actual_committed_space(10 * M, 8 * K, Disable);
1345    test_virtual_space_actual_committed_space(10 * M, 1 * M, Disable);
1346    test_virtual_space_actual_committed_space(10 * M, 2 * M, Disable);
1347    test_virtual_space_actual_committed_space(10 * M, 5 * M, Disable);
1348    test_virtual_space_actual_committed_space(10 * M, 10 * M, Disable);
1349
1350    test_virtual_space_actual_committed_space(10 * M, 0, Reserve);
1351    test_virtual_space_actual_committed_space(10 * M, 4 * K, Reserve);
1352    test_virtual_space_actual_committed_space(10 * M, 8 * K, Reserve);
1353    test_virtual_space_actual_committed_space(10 * M, 1 * M, Reserve);
1354    test_virtual_space_actual_committed_space(10 * M, 2 * M, Reserve);
1355    test_virtual_space_actual_committed_space(10 * M, 5 * M, Reserve);
1356    test_virtual_space_actual_committed_space(10 * M, 10 * M, Reserve);
1357
1358    test_virtual_space_actual_committed_space(10 * M, 0, Commit);
1359    test_virtual_space_actual_committed_space(10 * M, 4 * K, Commit);
1360    test_virtual_space_actual_committed_space(10 * M, 8 * K, Commit);
1361    test_virtual_space_actual_committed_space(10 * M, 1 * M, Commit);
1362    test_virtual_space_actual_committed_space(10 * M, 2 * M, Commit);
1363    test_virtual_space_actual_committed_space(10 * M, 5 * M, Commit);
1364    test_virtual_space_actual_committed_space(10 * M, 10 * M, Commit);
1365  }
1366
1367  static void test_virtual_space() {
1368    test_virtual_space_actual_committed_space();
1369    test_virtual_space_actual_committed_space_one_large_page();
1370    test_virtual_space_disable_large_pages();
1371  }
1372};
1373
1374void TestVirtualSpace_test() {
1375  TestVirtualSpace::test_virtual_space();
1376}
1377
1378#endif // PRODUCT
1379
1380#endif
1381