virtualspace.cpp revision 13249:a2753984d2c1
1120492Sfjoe/*
2120492Sfjoe * Copyright (c) 1997, 2016, Oracle and/or its affiliates. All rights reserved.
3120492Sfjoe * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
4120492Sfjoe *
5120492Sfjoe * This code is free software; you can redistribute it and/or modify it
6120492Sfjoe * under the terms of the GNU General Public License version 2 only, as
7120492Sfjoe * published by the Free Software Foundation.
8120492Sfjoe *
9120492Sfjoe * This code is distributed in the hope that it will be useful, but WITHOUT
10120492Sfjoe * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11120492Sfjoe * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
12120492Sfjoe * version 2 for more details (a copy is included in the LICENSE file that
13120492Sfjoe * accompanied this code).
14120492Sfjoe *
15120492Sfjoe * You should have received a copy of the GNU General Public License version
16120492Sfjoe * 2 along with this work; if not, write to the Free Software Foundation,
17120492Sfjoe * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18120492Sfjoe *
19120492Sfjoe * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20120492Sfjoe * or visit www.oracle.com if you need additional information or have any
21120492Sfjoe * questions.
22120492Sfjoe *
23120492Sfjoe */
24120492Sfjoe
25120492Sfjoe#include "precompiled.hpp"
26120492Sfjoe#include "logging/log.hpp"
27120492Sfjoe#include "memory/resourceArea.hpp"
28120492Sfjoe#include "memory/virtualspace.hpp"
29120492Sfjoe#include "oops/markOop.hpp"
30120492Sfjoe#include "oops/oop.inline.hpp"
31120492Sfjoe#include "services/memTracker.hpp"
32120492Sfjoe#include "utilities/align.hpp"
33120492Sfjoe
34120492Sfjoe// ReservedSpace
35120492Sfjoe
36120492Sfjoe// Dummy constructor
37120492SfjoeReservedSpace::ReservedSpace() : _base(NULL), _size(0), _noaccess_prefix(0),
38120492Sfjoe    _alignment(0), _special(false), _executable(false) {
39120492Sfjoe}
40120492Sfjoe
41120492SfjoeReservedSpace::ReservedSpace(size_t size, size_t preferred_page_size) {
42120492Sfjoe  bool has_preferred_page_size = preferred_page_size != 0;
43120492Sfjoe  // Want to use large pages where possible and pad with small pages.
44120492Sfjoe  size_t page_size = has_preferred_page_size ? preferred_page_size : os::page_size_for_region_unaligned(size, 1);
45120492Sfjoe  bool large_pages = page_size != (size_t)os::vm_page_size();
46120492Sfjoe  size_t alignment;
47120492Sfjoe  if (large_pages && has_preferred_page_size) {
48120492Sfjoe    alignment = MAX2(page_size, (size_t)os::vm_allocation_granularity());
49120492Sfjoe    // ReservedSpace initialization requires size to be aligned to the given
50120492Sfjoe    // alignment. Align the size up.
51120492Sfjoe    size = align_up(size, alignment);
52120492Sfjoe  } else {
53120492Sfjoe    // Don't force the alignment to be large page aligned,
54120492Sfjoe    // since that will waste memory.
55120492Sfjoe    alignment = os::vm_allocation_granularity();
56120492Sfjoe  }
57120492Sfjoe  initialize(size, alignment, large_pages, NULL, false);
58120492Sfjoe}
59120492Sfjoe
60120492SfjoeReservedSpace::ReservedSpace(size_t size, size_t alignment,
61120492Sfjoe                             bool large,
62120492Sfjoe                             char* requested_address) {
63120492Sfjoe  initialize(size, alignment, large, requested_address, false);
64120492Sfjoe}
65120492Sfjoe
66120492SfjoeReservedSpace::ReservedSpace(size_t size, size_t alignment,
67120492Sfjoe                             bool large,
68120492Sfjoe                             bool executable) {
69120492Sfjoe  initialize(size, alignment, large, NULL, executable);
70120492Sfjoe}
71120492Sfjoe
72120492Sfjoe// Helper method.
73120492Sfjoestatic bool failed_to_reserve_as_requested(char* base, char* requested_address,
74120492Sfjoe                                           const size_t size, bool special)
75120492Sfjoe{
76120492Sfjoe  if (base == requested_address || requested_address == NULL)
77120492Sfjoe    return false; // did not fail
78120492Sfjoe
79120492Sfjoe  if (base != NULL) {
80120492Sfjoe    // Different reserve address may be acceptable in other cases
81120492Sfjoe    // but for compressed oops heap should be at requested address.
82120492Sfjoe    assert(UseCompressedOops, "currently requested address used only for compressed oops");
83120492Sfjoe    log_debug(gc, heap, coops)("Reserved memory not at requested address: " PTR_FORMAT " vs " PTR_FORMAT, p2i(base), p2i(requested_address));
84120492Sfjoe    // OS ignored requested address. Try different address.
85120492Sfjoe    if (special) {
86120492Sfjoe      if (!os::release_memory_special(base, size)) {
87120492Sfjoe        fatal("os::release_memory_special failed");
88120492Sfjoe      }
89120492Sfjoe    } else {
90120492Sfjoe      if (!os::release_memory(base, size)) {
91120492Sfjoe        fatal("os::release_memory failed");
92120492Sfjoe      }
93120492Sfjoe    }
94120492Sfjoe  }
95120492Sfjoe  return true;
96120492Sfjoe}
97120492Sfjoe
98120492Sfjoevoid ReservedSpace::initialize(size_t size, size_t alignment, bool large,
99120492Sfjoe                               char* requested_address,
100120492Sfjoe                               bool executable) {
101120492Sfjoe  const size_t granularity = os::vm_allocation_granularity();
102120492Sfjoe  assert((size & (granularity - 1)) == 0,
103120492Sfjoe         "size not aligned to os::vm_allocation_granularity()");
104120492Sfjoe  assert((alignment & (granularity - 1)) == 0,
105120492Sfjoe         "alignment not aligned to os::vm_allocation_granularity()");
106120492Sfjoe  assert(alignment == 0 || is_power_of_2((intptr_t)alignment),
107120492Sfjoe         "not a power of 2");
108120492Sfjoe
109120492Sfjoe  alignment = MAX2(alignment, (size_t)os::vm_page_size());
110120492Sfjoe
111120492Sfjoe  _base = NULL;
112120492Sfjoe  _size = 0;
113120492Sfjoe  _special = false;
114120492Sfjoe  _executable = executable;
115120492Sfjoe  _alignment = 0;
116123293Sfjoe  _noaccess_prefix = 0;
117123293Sfjoe  if (size == 0) {
118123293Sfjoe    return;
119123293Sfjoe  }
120123293Sfjoe
121123293Sfjoe  // If OS doesn't support demand paging for large page memory, we need
122123293Sfjoe  // to use reserve_memory_special() to reserve and pin the entire region.
123123293Sfjoe  bool special = large && !os::can_commit_large_page_memory();
124123293Sfjoe  char* base = NULL;
125123293Sfjoe
126123293Sfjoe  if (special) {
127123293Sfjoe
128123293Sfjoe    base = os::reserve_memory_special(size, alignment, requested_address, executable);
129123293Sfjoe
130123293Sfjoe    if (base != NULL) {
131123293Sfjoe      if (failed_to_reserve_as_requested(base, requested_address, size, true)) {
132123293Sfjoe        // OS ignored requested address. Try different address.
133120492Sfjoe        return;
134120492Sfjoe      }
135120492Sfjoe      // Check alignment constraints.
136120492Sfjoe      assert((uintptr_t) base % alignment == 0,
137120492Sfjoe             "Large pages returned a non-aligned address, base: "
138120492Sfjoe             PTR_FORMAT " alignment: " SIZE_FORMAT_HEX,
139120492Sfjoe             p2i(base), alignment);
140120492Sfjoe      _special = true;
141120492Sfjoe    } else {
142120492Sfjoe      // failed; try to reserve regular memory below
143120492Sfjoe      if (UseLargePages && (!FLAG_IS_DEFAULT(UseLargePages) ||
144120492Sfjoe                            !FLAG_IS_DEFAULT(LargePageSizeInBytes))) {
145120492Sfjoe        log_debug(gc, heap, coops)("Reserve regular memory without large pages");
146120492Sfjoe      }
147120492Sfjoe    }
148120492Sfjoe  }
149120492Sfjoe
150120492Sfjoe  if (base == NULL) {
151120492Sfjoe    // Optimistically assume that the OSes returns an aligned base pointer.
152120492Sfjoe    // When reserving a large address range, most OSes seem to align to at
153120492Sfjoe    // least 64K.
154120492Sfjoe
155120492Sfjoe    // If the memory was requested at a particular address, use
156120492Sfjoe    // os::attempt_reserve_memory_at() to avoid over mapping something
157120492Sfjoe    // important.  If available space is not detected, return NULL.
158120492Sfjoe
159120492Sfjoe    if (requested_address != 0) {
160120492Sfjoe      base = os::attempt_reserve_memory_at(size, requested_address);
161120492Sfjoe      if (failed_to_reserve_as_requested(base, requested_address, size, false)) {
162120492Sfjoe        // OS ignored requested address. Try different address.
163120492Sfjoe        base = NULL;
164120492Sfjoe      }
165120492Sfjoe    } else {
166120492Sfjoe      base = os::reserve_memory(size, NULL, alignment);
167120492Sfjoe    }
168120492Sfjoe
169120492Sfjoe    if (base == NULL) return;
170120492Sfjoe
171120492Sfjoe    // Check alignment constraints
172120492Sfjoe    if ((((size_t)base) & (alignment - 1)) != 0) {
173120492Sfjoe      // Base not aligned, retry
174120492Sfjoe      if (!os::release_memory(base, size)) fatal("os::release_memory failed");
175120492Sfjoe      // Make sure that size is aligned
176120492Sfjoe      size = align_up(size, alignment);
177120492Sfjoe      base = os::reserve_memory_aligned(size, alignment);
178120492Sfjoe
179120492Sfjoe      if (requested_address != 0 &&
180120492Sfjoe          failed_to_reserve_as_requested(base, requested_address, size, false)) {
181120492Sfjoe        // As a result of the alignment constraints, the allocated base differs
182120492Sfjoe        // from the requested address. Return back to the caller who can
183120492Sfjoe        // take remedial action (like try again without a requested address).
184120492Sfjoe        assert(_base == NULL, "should be");
185120492Sfjoe        return;
186120492Sfjoe      }
187120492Sfjoe    }
188120492Sfjoe  }
189120492Sfjoe  // Done
190120492Sfjoe  _base = base;
191120492Sfjoe  _size = size;
192120492Sfjoe  _alignment = alignment;
193120492Sfjoe}
194120492Sfjoe
195120492Sfjoe
196120492SfjoeReservedSpace::ReservedSpace(char* base, size_t size, size_t alignment,
197120492Sfjoe                             bool special, bool executable) {
198120492Sfjoe  assert((size % os::vm_allocation_granularity()) == 0,
199120492Sfjoe         "size not allocation aligned");
200120492Sfjoe  _base = base;
201120492Sfjoe  _size = size;
202120492Sfjoe  _alignment = alignment;
203120492Sfjoe  _noaccess_prefix = 0;
204120492Sfjoe  _special = special;
205120492Sfjoe  _executable = executable;
206120492Sfjoe}
207120492Sfjoe
208120492Sfjoe
209120492SfjoeReservedSpace ReservedSpace::first_part(size_t partition_size, size_t alignment,
210120492Sfjoe                                        bool split, bool realloc) {
211120492Sfjoe  assert(partition_size <= size(), "partition failed");
212120492Sfjoe  if (split) {
213120492Sfjoe    os::split_reserved_memory(base(), size(), partition_size, realloc);
214120492Sfjoe  }
215120492Sfjoe  ReservedSpace result(base(), partition_size, alignment, special(),
216120492Sfjoe                       executable());
217120492Sfjoe  return result;
218120492Sfjoe}
219120492Sfjoe
220120492Sfjoe
221120492SfjoeReservedSpace
222120492SfjoeReservedSpace::last_part(size_t partition_size, size_t alignment) {
223120492Sfjoe  assert(partition_size <= size(), "partition failed");
224120492Sfjoe  ReservedSpace result(base() + partition_size, size() - partition_size,
225120492Sfjoe                       alignment, special(), executable());
226120492Sfjoe  return result;
227120492Sfjoe}
228120492Sfjoe
229120492Sfjoe
230120492Sfjoesize_t ReservedSpace::page_align_size_up(size_t size) {
231120492Sfjoe  return align_up(size, os::vm_page_size());
232120492Sfjoe}
233120492Sfjoe
234120492Sfjoe
235120492Sfjoesize_t ReservedSpace::page_align_size_down(size_t size) {
236120492Sfjoe  return align_down(size, os::vm_page_size());
237120492Sfjoe}
238120492Sfjoe
239120492Sfjoe
240120492Sfjoesize_t ReservedSpace::allocation_align_size_up(size_t size) {
241120492Sfjoe  return align_up(size, os::vm_allocation_granularity());
242120492Sfjoe}
243120492Sfjoe
244120492Sfjoe
245120492Sfjoesize_t ReservedSpace::allocation_align_size_down(size_t size) {
246120492Sfjoe  return align_down(size, os::vm_allocation_granularity());
247120492Sfjoe}
248120492Sfjoe
249120492Sfjoe
250120492Sfjoevoid ReservedSpace::release() {
251120492Sfjoe  if (is_reserved()) {
252120492Sfjoe    char *real_base = _base - _noaccess_prefix;
253120492Sfjoe    const size_t real_size = _size + _noaccess_prefix;
254120492Sfjoe    if (special()) {
255120492Sfjoe      os::release_memory_special(real_base, real_size);
256120492Sfjoe    } else{
257120492Sfjoe      os::release_memory(real_base, real_size);
258120492Sfjoe    }
259120492Sfjoe    _base = NULL;
260120492Sfjoe    _size = 0;
261120492Sfjoe    _noaccess_prefix = 0;
262120492Sfjoe    _alignment = 0;
263120492Sfjoe    _special = false;
264120492Sfjoe    _executable = false;
265120492Sfjoe  }
266120492Sfjoe}
267120492Sfjoe
268120492Sfjoestatic size_t noaccess_prefix_size(size_t alignment) {
269120492Sfjoe  return lcm(os::vm_page_size(), alignment);
270120492Sfjoe}
271120492Sfjoe
272120492Sfjoevoid ReservedHeapSpace::establish_noaccess_prefix() {
273120492Sfjoe  assert(_alignment >= (size_t)os::vm_page_size(), "must be at least page size big");
274120492Sfjoe  _noaccess_prefix = noaccess_prefix_size(_alignment);
275120492Sfjoe
276120492Sfjoe  if (base() && base() + _size > (char *)OopEncodingHeapMax) {
277120492Sfjoe    if (true
278120492Sfjoe        WIN64_ONLY(&& !UseLargePages)
279120492Sfjoe        AIX_ONLY(&& os::vm_page_size() != 64*K)) {
280120492Sfjoe      // Protect memory at the base of the allocated region.
281120492Sfjoe      // If special, the page was committed (only matters on windows)
282120492Sfjoe      if (!os::protect_memory(_base, _noaccess_prefix, os::MEM_PROT_NONE, _special)) {
283120492Sfjoe        fatal("cannot protect protection page");
284120492Sfjoe      }
285120492Sfjoe      log_debug(gc, heap, coops)("Protected page at the reserved heap base: "
286120492Sfjoe                                 PTR_FORMAT " / " INTX_FORMAT " bytes",
287120492Sfjoe                                 p2i(_base),
288120492Sfjoe                                 _noaccess_prefix);
289120492Sfjoe      assert(Universe::narrow_oop_use_implicit_null_checks() == true, "not initialized?");
290120492Sfjoe    } else {
291120492Sfjoe      Universe::set_narrow_oop_use_implicit_null_checks(false);
292120492Sfjoe    }
293120492Sfjoe  }
294120492Sfjoe
295120492Sfjoe  _base += _noaccess_prefix;
296120492Sfjoe  _size -= _noaccess_prefix;
297120492Sfjoe  assert(((uintptr_t)_base % _alignment == 0), "must be exactly of required alignment");
298120492Sfjoe}
299120492Sfjoe
300120492Sfjoe// Tries to allocate memory of size 'size' at address requested_address with alignment 'alignment'.
301120492Sfjoe// Does not check whether the reserved memory actually is at requested_address, as the memory returned
302120492Sfjoe// might still fulfill the wishes of the caller.
303120492Sfjoe// Assures the memory is aligned to 'alignment'.
304120492Sfjoe// NOTE: If ReservedHeapSpace already points to some reserved memory this is freed, first.
305120492Sfjoevoid ReservedHeapSpace::try_reserve_heap(size_t size,
306120492Sfjoe                                         size_t alignment,
307120492Sfjoe                                         bool large,
308120492Sfjoe                                         char* requested_address) {
309120492Sfjoe  if (_base != NULL) {
310120492Sfjoe    // We tried before, but we didn't like the address delivered.
311120492Sfjoe    release();
312120492Sfjoe  }
313120492Sfjoe
314120492Sfjoe  // If OS doesn't support demand paging for large page memory, we need
315120492Sfjoe  // to use reserve_memory_special() to reserve and pin the entire region.
316120492Sfjoe  bool special = large && !os::can_commit_large_page_memory();
317120492Sfjoe  char* base = NULL;
318120492Sfjoe
319120492Sfjoe  log_trace(gc, heap, coops)("Trying to allocate at address " PTR_FORMAT
320120492Sfjoe                             " heap of size " SIZE_FORMAT_HEX,
321120492Sfjoe                             p2i(requested_address),
322120492Sfjoe                             size);
323120492Sfjoe
324120492Sfjoe  if (special) {
325120492Sfjoe    base = os::reserve_memory_special(size, alignment, requested_address, false);
326120492Sfjoe
327120492Sfjoe    if (base != NULL) {
328120492Sfjoe      // Check alignment constraints.
329120492Sfjoe      assert((uintptr_t) base % alignment == 0,
330120492Sfjoe             "Large pages returned a non-aligned address, base: "
331120492Sfjoe             PTR_FORMAT " alignment: " SIZE_FORMAT_HEX,
332120492Sfjoe             p2i(base), alignment);
333120492Sfjoe      _special = true;
334120492Sfjoe    }
335120492Sfjoe  }
336120492Sfjoe
337120492Sfjoe  if (base == NULL) {
338120492Sfjoe    // Failed; try to reserve regular memory below
339120492Sfjoe    if (UseLargePages && (!FLAG_IS_DEFAULT(UseLargePages) ||
340120492Sfjoe                          !FLAG_IS_DEFAULT(LargePageSizeInBytes))) {
341120492Sfjoe      log_debug(gc, heap, coops)("Reserve regular memory without large pages");
342120492Sfjoe    }
343120492Sfjoe
344120492Sfjoe    // Optimistically assume that the OSes returns an aligned base pointer.
345120492Sfjoe    // When reserving a large address range, most OSes seem to align to at
346120492Sfjoe    // least 64K.
347120492Sfjoe
348120492Sfjoe    // If the memory was requested at a particular address, use
349120492Sfjoe    // os::attempt_reserve_memory_at() to avoid over mapping something
350120492Sfjoe    // important.  If available space is not detected, return NULL.
351120492Sfjoe
352120492Sfjoe    if (requested_address != 0) {
353120492Sfjoe      base = os::attempt_reserve_memory_at(size, requested_address);
354120492Sfjoe    } else {
355120492Sfjoe      base = os::reserve_memory(size, NULL, alignment);
356120492Sfjoe    }
357120492Sfjoe  }
358120492Sfjoe  if (base == NULL) { return; }
359120492Sfjoe
360120492Sfjoe  // Done
361120492Sfjoe  _base = base;
362120492Sfjoe  _size = size;
363120492Sfjoe  _alignment = alignment;
364120492Sfjoe
365120492Sfjoe  // Check alignment constraints
366120492Sfjoe  if ((((size_t)base) & (alignment - 1)) != 0) {
367120492Sfjoe    // Base not aligned, retry.
368120492Sfjoe    release();
369120492Sfjoe  }
370120492Sfjoe}
371120492Sfjoe
372120492Sfjoevoid ReservedHeapSpace::try_reserve_range(char *highest_start,
373120492Sfjoe                                          char *lowest_start,
374120492Sfjoe                                          size_t attach_point_alignment,
375120492Sfjoe                                          char *aligned_heap_base_min_address,
376120492Sfjoe                                          char *upper_bound,
377120492Sfjoe                                          size_t size,
378120492Sfjoe                                          size_t alignment,
379120492Sfjoe                                          bool large) {
380120492Sfjoe  const size_t attach_range = highest_start - lowest_start;
381120492Sfjoe  // Cap num_attempts at possible number.
382120492Sfjoe  // At least one is possible even for 0 sized attach range.
383120492Sfjoe  const uint64_t num_attempts_possible = (attach_range / attach_point_alignment) + 1;
384120492Sfjoe  const uint64_t num_attempts_to_try   = MIN2((uint64_t)HeapSearchSteps, num_attempts_possible);
385120492Sfjoe
386120492Sfjoe  const size_t stepsize = (attach_range == 0) ? // Only one try.
387120492Sfjoe    (size_t) highest_start : align_up(attach_range / num_attempts_to_try, attach_point_alignment);
388120492Sfjoe
389120492Sfjoe  // Try attach points from top to bottom.
390120492Sfjoe  char* attach_point = highest_start;
391123293Sfjoe  while (attach_point >= lowest_start  &&
392123293Sfjoe         attach_point <= highest_start &&  // Avoid wrap around.
393123293Sfjoe         ((_base == NULL) ||
394123293Sfjoe          (_base < aligned_heap_base_min_address || _base + size > upper_bound))) {
395123293Sfjoe    try_reserve_heap(size, alignment, large, attach_point);
396123293Sfjoe    attach_point -= stepsize;
397123293Sfjoe  }
398120492Sfjoe}
399
400#define SIZE_64K  ((uint64_t) UCONST64(      0x10000))
401#define SIZE_256M ((uint64_t) UCONST64(   0x10000000))
402#define SIZE_32G  ((uint64_t) UCONST64(  0x800000000))
403
404// Helper for heap allocation. Returns an array with addresses
405// (OS-specific) which are suited for disjoint base mode. Array is
406// NULL terminated.
407static char** get_attach_addresses_for_disjoint_mode() {
408  static uint64_t addresses[] = {
409     2 * SIZE_32G,
410     3 * SIZE_32G,
411     4 * SIZE_32G,
412     8 * SIZE_32G,
413    10 * SIZE_32G,
414     1 * SIZE_64K * SIZE_32G,
415     2 * SIZE_64K * SIZE_32G,
416     3 * SIZE_64K * SIZE_32G,
417     4 * SIZE_64K * SIZE_32G,
418    16 * SIZE_64K * SIZE_32G,
419    32 * SIZE_64K * SIZE_32G,
420    34 * SIZE_64K * SIZE_32G,
421    0
422  };
423
424  // Sort out addresses smaller than HeapBaseMinAddress. This assumes
425  // the array is sorted.
426  uint i = 0;
427  while (addresses[i] != 0 &&
428         (addresses[i] < OopEncodingHeapMax || addresses[i] < HeapBaseMinAddress)) {
429    i++;
430  }
431  uint start = i;
432
433  // Avoid more steps than requested.
434  i = 0;
435  while (addresses[start+i] != 0) {
436    if (i == HeapSearchSteps) {
437      addresses[start+i] = 0;
438      break;
439    }
440    i++;
441  }
442
443  return (char**) &addresses[start];
444}
445
446void ReservedHeapSpace::initialize_compressed_heap(const size_t size, size_t alignment, bool large) {
447  guarantee(size + noaccess_prefix_size(alignment) <= OopEncodingHeapMax,
448            "can not allocate compressed oop heap for this size");
449  guarantee(alignment == MAX2(alignment, (size_t)os::vm_page_size()), "alignment too small");
450  assert(HeapBaseMinAddress > 0, "sanity");
451
452  const size_t granularity = os::vm_allocation_granularity();
453  assert((size & (granularity - 1)) == 0,
454         "size not aligned to os::vm_allocation_granularity()");
455  assert((alignment & (granularity - 1)) == 0,
456         "alignment not aligned to os::vm_allocation_granularity()");
457  assert(alignment == 0 || is_power_of_2((intptr_t)alignment),
458         "not a power of 2");
459
460  // The necessary attach point alignment for generated wish addresses.
461  // This is needed to increase the chance of attaching for mmap and shmat.
462  const size_t os_attach_point_alignment =
463    AIX_ONLY(SIZE_256M)  // Known shm boundary alignment.
464    NOT_AIX(os::vm_allocation_granularity());
465  const size_t attach_point_alignment = lcm(alignment, os_attach_point_alignment);
466
467  char *aligned_heap_base_min_address = (char *)align_up((void *)HeapBaseMinAddress, alignment);
468  size_t noaccess_prefix = ((aligned_heap_base_min_address + size) > (char*)OopEncodingHeapMax) ?
469    noaccess_prefix_size(alignment) : 0;
470
471  // Attempt to alloc at user-given address.
472  if (!FLAG_IS_DEFAULT(HeapBaseMinAddress)) {
473    try_reserve_heap(size + noaccess_prefix, alignment, large, aligned_heap_base_min_address);
474    if (_base != aligned_heap_base_min_address) { // Enforce this exact address.
475      release();
476    }
477  }
478
479  // Keep heap at HeapBaseMinAddress.
480  if (_base == NULL) {
481
482    // Try to allocate the heap at addresses that allow efficient oop compression.
483    // Different schemes are tried, in order of decreasing optimization potential.
484    //
485    // For this, try_reserve_heap() is called with the desired heap base addresses.
486    // A call into the os layer to allocate at a given address can return memory
487    // at a different address than requested.  Still, this might be memory at a useful
488    // address. try_reserve_heap() always returns this allocated memory, as only here
489    // the criteria for a good heap are checked.
490
491    // Attempt to allocate so that we can run without base and scale (32-Bit unscaled compressed oops).
492    // Give it several tries from top of range to bottom.
493    if (aligned_heap_base_min_address + size <= (char *)UnscaledOopHeapMax) {
494
495      // Calc address range within we try to attach (range of possible start addresses).
496      char* const highest_start = align_down((char *)UnscaledOopHeapMax - size, attach_point_alignment);
497      char* const lowest_start  = align_up(aligned_heap_base_min_address, attach_point_alignment);
498      try_reserve_range(highest_start, lowest_start, attach_point_alignment,
499                        aligned_heap_base_min_address, (char *)UnscaledOopHeapMax, size, alignment, large);
500    }
501
502    // zerobased: Attempt to allocate in the lower 32G.
503    // But leave room for the compressed class pointers, which is allocated above
504    // the heap.
505    char *zerobased_max = (char *)OopEncodingHeapMax;
506    const size_t class_space = align_up(CompressedClassSpaceSize, alignment);
507    // For small heaps, save some space for compressed class pointer
508    // space so it can be decoded with no base.
509    if (UseCompressedClassPointers && !UseSharedSpaces &&
510        OopEncodingHeapMax <= KlassEncodingMetaspaceMax &&
511        (uint64_t)(aligned_heap_base_min_address + size + class_space) <= KlassEncodingMetaspaceMax) {
512      zerobased_max = (char *)OopEncodingHeapMax - class_space;
513    }
514
515    // Give it several tries from top of range to bottom.
516    if (aligned_heap_base_min_address + size <= zerobased_max &&    // Zerobased theoretical possible.
517        ((_base == NULL) ||                        // No previous try succeeded.
518         (_base + size > zerobased_max))) {        // Unscaled delivered an arbitrary address.
519
520      // Calc address range within we try to attach (range of possible start addresses).
521      char *const highest_start = align_down(zerobased_max - size, attach_point_alignment);
522      // Need to be careful about size being guaranteed to be less
523      // than UnscaledOopHeapMax due to type constraints.
524      char *lowest_start = aligned_heap_base_min_address;
525      uint64_t unscaled_end = UnscaledOopHeapMax - size;
526      if (unscaled_end < UnscaledOopHeapMax) { // unscaled_end wrapped if size is large
527        lowest_start = MAX2(lowest_start, (char*)unscaled_end);
528      }
529      lowest_start = align_up(lowest_start, attach_point_alignment);
530      try_reserve_range(highest_start, lowest_start, attach_point_alignment,
531                        aligned_heap_base_min_address, zerobased_max, size, alignment, large);
532    }
533
534    // Now we go for heaps with base != 0.  We need a noaccess prefix to efficiently
535    // implement null checks.
536    noaccess_prefix = noaccess_prefix_size(alignment);
537
538    // Try to attach at addresses that are aligned to OopEncodingHeapMax. Disjointbase mode.
539    char** addresses = get_attach_addresses_for_disjoint_mode();
540    int i = 0;
541    while (addresses[i] &&                                 // End of array not yet reached.
542           ((_base == NULL) ||                             // No previous try succeeded.
543            (_base + size >  (char *)OopEncodingHeapMax && // Not zerobased or unscaled address.
544             !Universe::is_disjoint_heap_base_address((address)_base)))) {  // Not disjoint address.
545      char* const attach_point = addresses[i];
546      assert(attach_point >= aligned_heap_base_min_address, "Flag support broken");
547      try_reserve_heap(size + noaccess_prefix, alignment, large, attach_point);
548      i++;
549    }
550
551    // Last, desperate try without any placement.
552    if (_base == NULL) {
553      log_trace(gc, heap, coops)("Trying to allocate at address NULL heap of size " SIZE_FORMAT_HEX, size + noaccess_prefix);
554      initialize(size + noaccess_prefix, alignment, large, NULL, false);
555    }
556  }
557}
558
559ReservedHeapSpace::ReservedHeapSpace(size_t size, size_t alignment, bool large) : ReservedSpace() {
560
561  if (size == 0) {
562    return;
563  }
564
565  // Heap size should be aligned to alignment, too.
566  guarantee(is_aligned(size, alignment), "set by caller");
567
568  if (UseCompressedOops) {
569    initialize_compressed_heap(size, alignment, large);
570    if (_size > size) {
571      // We allocated heap with noaccess prefix.
572      // It can happen we get a zerobased/unscaled heap with noaccess prefix,
573      // if we had to try at arbitrary address.
574      establish_noaccess_prefix();
575    }
576  } else {
577    initialize(size, alignment, large, NULL, false);
578  }
579
580  assert(markOopDesc::encode_pointer_as_mark(_base)->decode_pointer() == _base,
581         "area must be distinguishable from marks for mark-sweep");
582  assert(markOopDesc::encode_pointer_as_mark(&_base[size])->decode_pointer() == &_base[size],
583         "area must be distinguishable from marks for mark-sweep");
584
585  if (base() > 0) {
586    MemTracker::record_virtual_memory_type((address)base(), mtJavaHeap);
587  }
588}
589
590// Reserve space for code segment.  Same as Java heap only we mark this as
591// executable.
592ReservedCodeSpace::ReservedCodeSpace(size_t r_size,
593                                     size_t rs_align,
594                                     bool large) :
595  ReservedSpace(r_size, rs_align, large, /*executable*/ true) {
596  MemTracker::record_virtual_memory_type((address)base(), mtCode);
597}
598
599// VirtualSpace
600
601VirtualSpace::VirtualSpace() {
602  _low_boundary           = NULL;
603  _high_boundary          = NULL;
604  _low                    = NULL;
605  _high                   = NULL;
606  _lower_high             = NULL;
607  _middle_high            = NULL;
608  _upper_high             = NULL;
609  _lower_high_boundary    = NULL;
610  _middle_high_boundary   = NULL;
611  _upper_high_boundary    = NULL;
612  _lower_alignment        = 0;
613  _middle_alignment       = 0;
614  _upper_alignment        = 0;
615  _special                = false;
616  _executable             = false;
617}
618
619
620bool VirtualSpace::initialize(ReservedSpace rs, size_t committed_size) {
621  const size_t max_commit_granularity = os::page_size_for_region_unaligned(rs.size(), 1);
622  return initialize_with_granularity(rs, committed_size, max_commit_granularity);
623}
624
625bool VirtualSpace::initialize_with_granularity(ReservedSpace rs, size_t committed_size, size_t max_commit_granularity) {
626  if(!rs.is_reserved()) return false;  // allocation failed.
627  assert(_low_boundary == NULL, "VirtualSpace already initialized");
628  assert(max_commit_granularity > 0, "Granularity must be non-zero.");
629
630  _low_boundary  = rs.base();
631  _high_boundary = low_boundary() + rs.size();
632
633  _low = low_boundary();
634  _high = low();
635
636  _special = rs.special();
637  _executable = rs.executable();
638
639  // When a VirtualSpace begins life at a large size, make all future expansion
640  // and shrinking occur aligned to a granularity of large pages.  This avoids
641  // fragmentation of physical addresses that inhibits the use of large pages
642  // by the OS virtual memory system.  Empirically,  we see that with a 4MB
643  // page size, the only spaces that get handled this way are codecache and
644  // the heap itself, both of which provide a substantial performance
645  // boost in many benchmarks when covered by large pages.
646  //
647  // No attempt is made to force large page alignment at the very top and
648  // bottom of the space if they are not aligned so already.
649  _lower_alignment  = os::vm_page_size();
650  _middle_alignment = max_commit_granularity;
651  _upper_alignment  = os::vm_page_size();
652
653  // End of each region
654  _lower_high_boundary = align_up(low_boundary(), middle_alignment());
655  _middle_high_boundary = align_down(high_boundary(), middle_alignment());
656  _upper_high_boundary = high_boundary();
657
658  // High address of each region
659  _lower_high = low_boundary();
660  _middle_high = lower_high_boundary();
661  _upper_high = middle_high_boundary();
662
663  // commit to initial size
664  if (committed_size > 0) {
665    if (!expand_by(committed_size)) {
666      return false;
667    }
668  }
669  return true;
670}
671
672
673VirtualSpace::~VirtualSpace() {
674  release();
675}
676
677
678void VirtualSpace::release() {
679  // This does not release memory it reserved.
680  // Caller must release via rs.release();
681  _low_boundary           = NULL;
682  _high_boundary          = NULL;
683  _low                    = NULL;
684  _high                   = NULL;
685  _lower_high             = NULL;
686  _middle_high            = NULL;
687  _upper_high             = NULL;
688  _lower_high_boundary    = NULL;
689  _middle_high_boundary   = NULL;
690  _upper_high_boundary    = NULL;
691  _lower_alignment        = 0;
692  _middle_alignment       = 0;
693  _upper_alignment        = 0;
694  _special                = false;
695  _executable             = false;
696}
697
698
699size_t VirtualSpace::committed_size() const {
700  return pointer_delta(high(), low(), sizeof(char));
701}
702
703
704size_t VirtualSpace::reserved_size() const {
705  return pointer_delta(high_boundary(), low_boundary(), sizeof(char));
706}
707
708
709size_t VirtualSpace::uncommitted_size()  const {
710  return reserved_size() - committed_size();
711}
712
713size_t VirtualSpace::actual_committed_size() const {
714  // Special VirtualSpaces commit all reserved space up front.
715  if (special()) {
716    return reserved_size();
717  }
718
719  size_t committed_low    = pointer_delta(_lower_high,  _low_boundary,         sizeof(char));
720  size_t committed_middle = pointer_delta(_middle_high, _lower_high_boundary,  sizeof(char));
721  size_t committed_high   = pointer_delta(_upper_high,  _middle_high_boundary, sizeof(char));
722
723#ifdef ASSERT
724  size_t lower  = pointer_delta(_lower_high_boundary,  _low_boundary,         sizeof(char));
725  size_t middle = pointer_delta(_middle_high_boundary, _lower_high_boundary,  sizeof(char));
726  size_t upper  = pointer_delta(_upper_high_boundary,  _middle_high_boundary, sizeof(char));
727
728  if (committed_high > 0) {
729    assert(committed_low == lower, "Must be");
730    assert(committed_middle == middle, "Must be");
731  }
732
733  if (committed_middle > 0) {
734    assert(committed_low == lower, "Must be");
735  }
736  if (committed_middle < middle) {
737    assert(committed_high == 0, "Must be");
738  }
739
740  if (committed_low < lower) {
741    assert(committed_high == 0, "Must be");
742    assert(committed_middle == 0, "Must be");
743  }
744#endif
745
746  return committed_low + committed_middle + committed_high;
747}
748
749
750bool VirtualSpace::contains(const void* p) const {
751  return low() <= (const char*) p && (const char*) p < high();
752}
753
754static void pretouch_expanded_memory(void* start, void* end) {
755  assert(is_aligned(start, os::vm_page_size()), "Unexpected alignment");
756  assert(is_aligned(end,   os::vm_page_size()), "Unexpected alignment");
757
758  os::pretouch_memory(start, end);
759}
760
761static bool commit_expanded(char* start, size_t size, size_t alignment, bool pre_touch, bool executable) {
762  if (os::commit_memory(start, size, alignment, executable)) {
763    if (pre_touch || AlwaysPreTouch) {
764      pretouch_expanded_memory(start, start + size);
765    }
766    return true;
767  }
768
769  debug_only(warning(
770      "INFO: os::commit_memory(" PTR_FORMAT ", " PTR_FORMAT
771      " size=" SIZE_FORMAT ", executable=%d) failed",
772      p2i(start), p2i(start + size), size, executable);)
773
774  return false;
775}
776
777/*
778   First we need to determine if a particular virtual space is using large
779   pages.  This is done at the initialize function and only virtual spaces
780   that are larger than LargePageSizeInBytes use large pages.  Once we
781   have determined this, all expand_by and shrink_by calls must grow and
782   shrink by large page size chunks.  If a particular request
783   is within the current large page, the call to commit and uncommit memory
784   can be ignored.  In the case that the low and high boundaries of this
785   space is not large page aligned, the pages leading to the first large
786   page address and the pages after the last large page address must be
787   allocated with default pages.
788*/
789bool VirtualSpace::expand_by(size_t bytes, bool pre_touch) {
790  if (uncommitted_size() < bytes) {
791    return false;
792  }
793
794  if (special()) {
795    // don't commit memory if the entire space is pinned in memory
796    _high += bytes;
797    return true;
798  }
799
800  char* previous_high = high();
801  char* unaligned_new_high = high() + bytes;
802  assert(unaligned_new_high <= high_boundary(), "cannot expand by more than upper boundary");
803
804  // Calculate where the new high for each of the regions should be.  If
805  // the low_boundary() and high_boundary() are LargePageSizeInBytes aligned
806  // then the unaligned lower and upper new highs would be the
807  // lower_high() and upper_high() respectively.
808  char* unaligned_lower_new_high =  MIN2(unaligned_new_high, lower_high_boundary());
809  char* unaligned_middle_new_high = MIN2(unaligned_new_high, middle_high_boundary());
810  char* unaligned_upper_new_high =  MIN2(unaligned_new_high, upper_high_boundary());
811
812  // Align the new highs based on the regions alignment.  lower and upper
813  // alignment will always be default page size.  middle alignment will be
814  // LargePageSizeInBytes if the actual size of the virtual space is in
815  // fact larger than LargePageSizeInBytes.
816  char* aligned_lower_new_high =  align_up(unaligned_lower_new_high, lower_alignment());
817  char* aligned_middle_new_high = align_up(unaligned_middle_new_high, middle_alignment());
818  char* aligned_upper_new_high =  align_up(unaligned_upper_new_high, upper_alignment());
819
820  // Determine which regions need to grow in this expand_by call.
821  // If you are growing in the lower region, high() must be in that
822  // region so calculate the size based on high().  For the middle and
823  // upper regions, determine the starting point of growth based on the
824  // location of high().  By getting the MAX of the region's low address
825  // (or the previous region's high address) and high(), we can tell if it
826  // is an intra or inter region growth.
827  size_t lower_needs = 0;
828  if (aligned_lower_new_high > lower_high()) {
829    lower_needs = pointer_delta(aligned_lower_new_high, lower_high(), sizeof(char));
830  }
831  size_t middle_needs = 0;
832  if (aligned_middle_new_high > middle_high()) {
833    middle_needs = pointer_delta(aligned_middle_new_high, middle_high(), sizeof(char));
834  }
835  size_t upper_needs = 0;
836  if (aligned_upper_new_high > upper_high()) {
837    upper_needs = pointer_delta(aligned_upper_new_high, upper_high(), sizeof(char));
838  }
839
840  // Check contiguity.
841  assert(low_boundary() <= lower_high() && lower_high() <= lower_high_boundary(),
842         "high address must be contained within the region");
843  assert(lower_high_boundary() <= middle_high() && middle_high() <= middle_high_boundary(),
844         "high address must be contained within the region");
845  assert(middle_high_boundary() <= upper_high() && upper_high() <= upper_high_boundary(),
846         "high address must be contained within the region");
847
848  // Commit regions
849  if (lower_needs > 0) {
850    assert(lower_high() + lower_needs <= lower_high_boundary(), "must not expand beyond region");
851    if (!commit_expanded(lower_high(), lower_needs, _lower_alignment, pre_touch, _executable)) {
852      return false;
853    }
854    _lower_high += lower_needs;
855  }
856
857  if (middle_needs > 0) {
858    assert(middle_high() + middle_needs <= middle_high_boundary(), "must not expand beyond region");
859    if (!commit_expanded(middle_high(), middle_needs, _middle_alignment, pre_touch, _executable)) {
860      return false;
861    }
862    _middle_high += middle_needs;
863  }
864
865  if (upper_needs > 0) {
866    assert(upper_high() + upper_needs <= upper_high_boundary(), "must not expand beyond region");
867    if (!commit_expanded(upper_high(), upper_needs, _upper_alignment, pre_touch, _executable)) {
868      return false;
869    }
870    _upper_high += upper_needs;
871  }
872
873  _high += bytes;
874  return true;
875}
876
877// A page is uncommitted if the contents of the entire page is deemed unusable.
878// Continue to decrement the high() pointer until it reaches a page boundary
879// in which case that particular page can now be uncommitted.
880void VirtualSpace::shrink_by(size_t size) {
881  if (committed_size() < size)
882    fatal("Cannot shrink virtual space to negative size");
883
884  if (special()) {
885    // don't uncommit if the entire space is pinned in memory
886    _high -= size;
887    return;
888  }
889
890  char* unaligned_new_high = high() - size;
891  assert(unaligned_new_high >= low_boundary(), "cannot shrink past lower boundary");
892
893  // Calculate new unaligned address
894  char* unaligned_upper_new_high =
895    MAX2(unaligned_new_high, middle_high_boundary());
896  char* unaligned_middle_new_high =
897    MAX2(unaligned_new_high, lower_high_boundary());
898  char* unaligned_lower_new_high =
899    MAX2(unaligned_new_high, low_boundary());
900
901  // Align address to region's alignment
902  char* aligned_upper_new_high =  align_up(unaligned_upper_new_high, upper_alignment());
903  char* aligned_middle_new_high = align_up(unaligned_middle_new_high, middle_alignment());
904  char* aligned_lower_new_high =  align_up(unaligned_lower_new_high, lower_alignment());
905
906  // Determine which regions need to shrink
907  size_t upper_needs = 0;
908  if (aligned_upper_new_high < upper_high()) {
909    upper_needs =
910      pointer_delta(upper_high(), aligned_upper_new_high, sizeof(char));
911  }
912  size_t middle_needs = 0;
913  if (aligned_middle_new_high < middle_high()) {
914    middle_needs =
915      pointer_delta(middle_high(), aligned_middle_new_high, sizeof(char));
916  }
917  size_t lower_needs = 0;
918  if (aligned_lower_new_high < lower_high()) {
919    lower_needs =
920      pointer_delta(lower_high(), aligned_lower_new_high, sizeof(char));
921  }
922
923  // Check contiguity.
924  assert(middle_high_boundary() <= upper_high() &&
925         upper_high() <= upper_high_boundary(),
926         "high address must be contained within the region");
927  assert(lower_high_boundary() <= middle_high() &&
928         middle_high() <= middle_high_boundary(),
929         "high address must be contained within the region");
930  assert(low_boundary() <= lower_high() &&
931         lower_high() <= lower_high_boundary(),
932         "high address must be contained within the region");
933
934  // Uncommit
935  if (upper_needs > 0) {
936    assert(middle_high_boundary() <= aligned_upper_new_high &&
937           aligned_upper_new_high + upper_needs <= upper_high_boundary(),
938           "must not shrink beyond region");
939    if (!os::uncommit_memory(aligned_upper_new_high, upper_needs)) {
940      debug_only(warning("os::uncommit_memory failed"));
941      return;
942    } else {
943      _upper_high -= upper_needs;
944    }
945  }
946  if (middle_needs > 0) {
947    assert(lower_high_boundary() <= aligned_middle_new_high &&
948           aligned_middle_new_high + middle_needs <= middle_high_boundary(),
949           "must not shrink beyond region");
950    if (!os::uncommit_memory(aligned_middle_new_high, middle_needs)) {
951      debug_only(warning("os::uncommit_memory failed"));
952      return;
953    } else {
954      _middle_high -= middle_needs;
955    }
956  }
957  if (lower_needs > 0) {
958    assert(low_boundary() <= aligned_lower_new_high &&
959           aligned_lower_new_high + lower_needs <= lower_high_boundary(),
960           "must not shrink beyond region");
961    if (!os::uncommit_memory(aligned_lower_new_high, lower_needs)) {
962      debug_only(warning("os::uncommit_memory failed"));
963      return;
964    } else {
965      _lower_high -= lower_needs;
966    }
967  }
968
969  _high -= size;
970}
971
972#ifndef PRODUCT
973void VirtualSpace::check_for_contiguity() {
974  // Check contiguity.
975  assert(low_boundary() <= lower_high() &&
976         lower_high() <= lower_high_boundary(),
977         "high address must be contained within the region");
978  assert(lower_high_boundary() <= middle_high() &&
979         middle_high() <= middle_high_boundary(),
980         "high address must be contained within the region");
981  assert(middle_high_boundary() <= upper_high() &&
982         upper_high() <= upper_high_boundary(),
983         "high address must be contained within the region");
984  assert(low() >= low_boundary(), "low");
985  assert(low_boundary() <= lower_high_boundary(), "lower high boundary");
986  assert(upper_high_boundary() <= high_boundary(), "upper high boundary");
987  assert(high() <= upper_high(), "upper high");
988}
989
990void VirtualSpace::print_on(outputStream* out) {
991  out->print   ("Virtual space:");
992  if (special()) out->print(" (pinned in memory)");
993  out->cr();
994  out->print_cr(" - committed: " SIZE_FORMAT, committed_size());
995  out->print_cr(" - reserved:  " SIZE_FORMAT, reserved_size());
996  out->print_cr(" - [low, high]:     [" INTPTR_FORMAT ", " INTPTR_FORMAT "]",  p2i(low()), p2i(high()));
997  out->print_cr(" - [low_b, high_b]: [" INTPTR_FORMAT ", " INTPTR_FORMAT "]",  p2i(low_boundary()), p2i(high_boundary()));
998}
999
1000void VirtualSpace::print() {
1001  print_on(tty);
1002}
1003
1004/////////////// Unit tests ///////////////
1005
1006#ifndef PRODUCT
1007
1008#define test_log(...) \
1009  do {\
1010    if (VerboseInternalVMTests) { \
1011      tty->print_cr(__VA_ARGS__); \
1012      tty->flush(); \
1013    }\
1014  } while (false)
1015
1016class TestReservedSpace : AllStatic {
1017 public:
1018  static void small_page_write(void* addr, size_t size) {
1019    size_t page_size = os::vm_page_size();
1020
1021    char* end = (char*)addr + size;
1022    for (char* p = (char*)addr; p < end; p += page_size) {
1023      *p = 1;
1024    }
1025  }
1026
1027  static void release_memory_for_test(ReservedSpace rs) {
1028    if (rs.special()) {
1029      guarantee(os::release_memory_special(rs.base(), rs.size()), "Shouldn't fail");
1030    } else {
1031      guarantee(os::release_memory(rs.base(), rs.size()), "Shouldn't fail");
1032    }
1033  }
1034
1035  static void test_reserved_space1(size_t size, size_t alignment) {
1036    test_log("test_reserved_space1(%p)", (void*) (uintptr_t) size);
1037
1038    assert(is_aligned(size, alignment), "Incorrect input parameters");
1039
1040    ReservedSpace rs(size,          // size
1041                     alignment,     // alignment
1042                     UseLargePages, // large
1043                     (char *)NULL); // requested_address
1044
1045    test_log(" rs.special() == %d", rs.special());
1046
1047    assert(rs.base() != NULL, "Must be");
1048    assert(rs.size() == size, "Must be");
1049
1050    assert(is_aligned(rs.base(), alignment), "aligned sizes should always give aligned addresses");
1051    assert(is_aligned(rs.size(), alignment), "aligned sizes should always give aligned addresses");
1052
1053    if (rs.special()) {
1054      small_page_write(rs.base(), size);
1055    }
1056
1057    release_memory_for_test(rs);
1058  }
1059
1060  static void test_reserved_space2(size_t size) {
1061    test_log("test_reserved_space2(%p)", (void*)(uintptr_t)size);
1062
1063    assert(is_aligned(size, os::vm_allocation_granularity()), "Must be at least AG aligned");
1064
1065    ReservedSpace rs(size);
1066
1067    test_log(" rs.special() == %d", rs.special());
1068
1069    assert(rs.base() != NULL, "Must be");
1070    assert(rs.size() == size, "Must be");
1071
1072    if (rs.special()) {
1073      small_page_write(rs.base(), size);
1074    }
1075
1076    release_memory_for_test(rs);
1077  }
1078
1079  static void test_reserved_space3(size_t size, size_t alignment, bool maybe_large) {
1080    test_log("test_reserved_space3(%p, %p, %d)",
1081        (void*)(uintptr_t)size, (void*)(uintptr_t)alignment, maybe_large);
1082
1083    if (size < alignment) {
1084      // Tests might set -XX:LargePageSizeInBytes=<small pages> and cause unexpected input arguments for this test.
1085      assert((size_t)os::vm_page_size() == os::large_page_size(), "Test needs further refinement");
1086      return;
1087    }
1088
1089    assert(is_aligned(size, os::vm_allocation_granularity()), "Must be at least AG aligned");
1090    assert(is_aligned(size, alignment), "Must be at least aligned against alignment");
1091
1092    bool large = maybe_large && UseLargePages && size >= os::large_page_size();
1093
1094    ReservedSpace rs(size, alignment, large, false);
1095
1096    test_log(" rs.special() == %d", rs.special());
1097
1098    assert(rs.base() != NULL, "Must be");
1099    assert(rs.size() == size, "Must be");
1100
1101    if (rs.special()) {
1102      small_page_write(rs.base(), size);
1103    }
1104
1105    release_memory_for_test(rs);
1106  }
1107
1108
1109  static void test_reserved_space1() {
1110    size_t size = 2 * 1024 * 1024;
1111    size_t ag   = os::vm_allocation_granularity();
1112
1113    test_reserved_space1(size,      ag);
1114    test_reserved_space1(size * 2,  ag);
1115    test_reserved_space1(size * 10, ag);
1116  }
1117
1118  static void test_reserved_space2() {
1119    size_t size = 2 * 1024 * 1024;
1120    size_t ag = os::vm_allocation_granularity();
1121
1122    test_reserved_space2(size * 1);
1123    test_reserved_space2(size * 2);
1124    test_reserved_space2(size * 10);
1125    test_reserved_space2(ag);
1126    test_reserved_space2(size - ag);
1127    test_reserved_space2(size);
1128    test_reserved_space2(size + ag);
1129    test_reserved_space2(size * 2);
1130    test_reserved_space2(size * 2 - ag);
1131    test_reserved_space2(size * 2 + ag);
1132    test_reserved_space2(size * 3);
1133    test_reserved_space2(size * 3 - ag);
1134    test_reserved_space2(size * 3 + ag);
1135    test_reserved_space2(size * 10);
1136    test_reserved_space2(size * 10 + size / 2);
1137  }
1138
1139  static void test_reserved_space3() {
1140    size_t ag = os::vm_allocation_granularity();
1141
1142    test_reserved_space3(ag,      ag    , false);
1143    test_reserved_space3(ag * 2,  ag    , false);
1144    test_reserved_space3(ag * 3,  ag    , false);
1145    test_reserved_space3(ag * 2,  ag * 2, false);
1146    test_reserved_space3(ag * 4,  ag * 2, false);
1147    test_reserved_space3(ag * 8,  ag * 2, false);
1148    test_reserved_space3(ag * 4,  ag * 4, false);
1149    test_reserved_space3(ag * 8,  ag * 4, false);
1150    test_reserved_space3(ag * 16, ag * 4, false);
1151
1152    if (UseLargePages) {
1153      size_t lp = os::large_page_size();
1154
1155      // Without large pages
1156      test_reserved_space3(lp,     ag * 4, false);
1157      test_reserved_space3(lp * 2, ag * 4, false);
1158      test_reserved_space3(lp * 4, ag * 4, false);
1159      test_reserved_space3(lp,     lp    , false);
1160      test_reserved_space3(lp * 2, lp    , false);
1161      test_reserved_space3(lp * 3, lp    , false);
1162      test_reserved_space3(lp * 2, lp * 2, false);
1163      test_reserved_space3(lp * 4, lp * 2, false);
1164      test_reserved_space3(lp * 8, lp * 2, false);
1165
1166      // With large pages
1167      test_reserved_space3(lp, ag * 4    , true);
1168      test_reserved_space3(lp * 2, ag * 4, true);
1169      test_reserved_space3(lp * 4, ag * 4, true);
1170      test_reserved_space3(lp, lp        , true);
1171      test_reserved_space3(lp * 2, lp    , true);
1172      test_reserved_space3(lp * 3, lp    , true);
1173      test_reserved_space3(lp * 2, lp * 2, true);
1174      test_reserved_space3(lp * 4, lp * 2, true);
1175      test_reserved_space3(lp * 8, lp * 2, true);
1176    }
1177  }
1178
1179  static void test_reserved_space() {
1180    test_reserved_space1();
1181    test_reserved_space2();
1182    test_reserved_space3();
1183  }
1184};
1185
1186void TestReservedSpace_test() {
1187  TestReservedSpace::test_reserved_space();
1188}
1189
1190#define assert_equals(actual, expected)  \
1191  assert(actual == expected,             \
1192         "Got " SIZE_FORMAT " expected " \
1193         SIZE_FORMAT, actual, expected);
1194
1195#define assert_ge(value1, value2)                  \
1196  assert(value1 >= value2,                         \
1197         "'" #value1 "': " SIZE_FORMAT " '"        \
1198         #value2 "': " SIZE_FORMAT, value1, value2);
1199
1200#define assert_lt(value1, value2)                  \
1201  assert(value1 < value2,                          \
1202         "'" #value1 "': " SIZE_FORMAT " '"        \
1203         #value2 "': " SIZE_FORMAT, value1, value2);
1204
1205
1206class TestVirtualSpace : AllStatic {
1207  enum TestLargePages {
1208    Default,
1209    Disable,
1210    Reserve,
1211    Commit
1212  };
1213
1214  static ReservedSpace reserve_memory(size_t reserve_size_aligned, TestLargePages mode) {
1215    switch(mode) {
1216    default:
1217    case Default:
1218    case Reserve:
1219      return ReservedSpace(reserve_size_aligned);
1220    case Disable:
1221    case Commit:
1222      return ReservedSpace(reserve_size_aligned,
1223                           os::vm_allocation_granularity(),
1224                           /* large */ false, /* exec */ false);
1225    }
1226  }
1227
1228  static bool initialize_virtual_space(VirtualSpace& vs, ReservedSpace rs, TestLargePages mode) {
1229    switch(mode) {
1230    default:
1231    case Default:
1232    case Reserve:
1233      return vs.initialize(rs, 0);
1234    case Disable:
1235      return vs.initialize_with_granularity(rs, 0, os::vm_page_size());
1236    case Commit:
1237      return vs.initialize_with_granularity(rs, 0, os::page_size_for_region_unaligned(rs.size(), 1));
1238    }
1239  }
1240
1241 public:
1242  static void test_virtual_space_actual_committed_space(size_t reserve_size, size_t commit_size,
1243                                                        TestLargePages mode = Default) {
1244    size_t granularity = os::vm_allocation_granularity();
1245    size_t reserve_size_aligned = align_up(reserve_size, granularity);
1246
1247    ReservedSpace reserved = reserve_memory(reserve_size_aligned, mode);
1248
1249    assert(reserved.is_reserved(), "Must be");
1250
1251    VirtualSpace vs;
1252    bool initialized = initialize_virtual_space(vs, reserved, mode);
1253    assert(initialized, "Failed to initialize VirtualSpace");
1254
1255    vs.expand_by(commit_size, false);
1256
1257    if (vs.special()) {
1258      assert_equals(vs.actual_committed_size(), reserve_size_aligned);
1259    } else {
1260      assert_ge(vs.actual_committed_size(), commit_size);
1261      // Approximate the commit granularity.
1262      // Make sure that we don't commit using large pages
1263      // if large pages has been disabled for this VirtualSpace.
1264      size_t commit_granularity = (mode == Disable || !UseLargePages) ?
1265                                   os::vm_page_size() : os::large_page_size();
1266      assert_lt(vs.actual_committed_size(), commit_size + commit_granularity);
1267    }
1268
1269    reserved.release();
1270  }
1271
1272  static void test_virtual_space_actual_committed_space_one_large_page() {
1273    if (!UseLargePages) {
1274      return;
1275    }
1276
1277    size_t large_page_size = os::large_page_size();
1278
1279    ReservedSpace reserved(large_page_size, large_page_size, true, false);
1280
1281    assert(reserved.is_reserved(), "Must be");
1282
1283    VirtualSpace vs;
1284    bool initialized = vs.initialize(reserved, 0);
1285    assert(initialized, "Failed to initialize VirtualSpace");
1286
1287    vs.expand_by(large_page_size, false);
1288
1289    assert_equals(vs.actual_committed_size(), large_page_size);
1290
1291    reserved.release();
1292  }
1293
1294  static void test_virtual_space_actual_committed_space() {
1295    test_virtual_space_actual_committed_space(4 * K, 0);
1296    test_virtual_space_actual_committed_space(4 * K, 4 * K);
1297    test_virtual_space_actual_committed_space(8 * K, 0);
1298    test_virtual_space_actual_committed_space(8 * K, 4 * K);
1299    test_virtual_space_actual_committed_space(8 * K, 8 * K);
1300    test_virtual_space_actual_committed_space(12 * K, 0);
1301    test_virtual_space_actual_committed_space(12 * K, 4 * K);
1302    test_virtual_space_actual_committed_space(12 * K, 8 * K);
1303    test_virtual_space_actual_committed_space(12 * K, 12 * K);
1304    test_virtual_space_actual_committed_space(64 * K, 0);
1305    test_virtual_space_actual_committed_space(64 * K, 32 * K);
1306    test_virtual_space_actual_committed_space(64 * K, 64 * K);
1307    test_virtual_space_actual_committed_space(2 * M, 0);
1308    test_virtual_space_actual_committed_space(2 * M, 4 * K);
1309    test_virtual_space_actual_committed_space(2 * M, 64 * K);
1310    test_virtual_space_actual_committed_space(2 * M, 1 * M);
1311    test_virtual_space_actual_committed_space(2 * M, 2 * M);
1312    test_virtual_space_actual_committed_space(10 * M, 0);
1313    test_virtual_space_actual_committed_space(10 * M, 4 * K);
1314    test_virtual_space_actual_committed_space(10 * M, 8 * K);
1315    test_virtual_space_actual_committed_space(10 * M, 1 * M);
1316    test_virtual_space_actual_committed_space(10 * M, 2 * M);
1317    test_virtual_space_actual_committed_space(10 * M, 5 * M);
1318    test_virtual_space_actual_committed_space(10 * M, 10 * M);
1319  }
1320
1321  static void test_virtual_space_disable_large_pages() {
1322    if (!UseLargePages) {
1323      return;
1324    }
1325    // These test cases verify that if we force VirtualSpace to disable large pages
1326    test_virtual_space_actual_committed_space(10 * M, 0, Disable);
1327    test_virtual_space_actual_committed_space(10 * M, 4 * K, Disable);
1328    test_virtual_space_actual_committed_space(10 * M, 8 * K, Disable);
1329    test_virtual_space_actual_committed_space(10 * M, 1 * M, Disable);
1330    test_virtual_space_actual_committed_space(10 * M, 2 * M, Disable);
1331    test_virtual_space_actual_committed_space(10 * M, 5 * M, Disable);
1332    test_virtual_space_actual_committed_space(10 * M, 10 * M, Disable);
1333
1334    test_virtual_space_actual_committed_space(10 * M, 0, Reserve);
1335    test_virtual_space_actual_committed_space(10 * M, 4 * K, Reserve);
1336    test_virtual_space_actual_committed_space(10 * M, 8 * K, Reserve);
1337    test_virtual_space_actual_committed_space(10 * M, 1 * M, Reserve);
1338    test_virtual_space_actual_committed_space(10 * M, 2 * M, Reserve);
1339    test_virtual_space_actual_committed_space(10 * M, 5 * M, Reserve);
1340    test_virtual_space_actual_committed_space(10 * M, 10 * M, Reserve);
1341
1342    test_virtual_space_actual_committed_space(10 * M, 0, Commit);
1343    test_virtual_space_actual_committed_space(10 * M, 4 * K, Commit);
1344    test_virtual_space_actual_committed_space(10 * M, 8 * K, Commit);
1345    test_virtual_space_actual_committed_space(10 * M, 1 * M, Commit);
1346    test_virtual_space_actual_committed_space(10 * M, 2 * M, Commit);
1347    test_virtual_space_actual_committed_space(10 * M, 5 * M, Commit);
1348    test_virtual_space_actual_committed_space(10 * M, 10 * M, Commit);
1349  }
1350
1351  static void test_virtual_space() {
1352    test_virtual_space_actual_committed_space();
1353    test_virtual_space_actual_committed_space_one_large_page();
1354    test_virtual_space_disable_large_pages();
1355  }
1356};
1357
1358void TestVirtualSpace_test() {
1359  TestVirtualSpace::test_virtual_space();
1360}
1361
1362#endif // PRODUCT
1363
1364#endif
1365