1/*
2 * Copyright (c) 2013, 2016, Oracle and/or its affiliates. All rights reserved.
3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
4 *
5 * This code is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 only, as
7 * published by the Free Software Foundation.
8 *
9 * This code is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
12 * version 2 for more details (a copy is included in the LICENSE file that
13 * accompanied this code).
14 *
15 * You should have received a copy of the GNU General Public License version
16 * 2 along with this work; if not, write to the Free Software Foundation,
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18 *
19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20 * or visit www.oracle.com if you need additional information or have any
21 * questions.
22 *
23 */
24#include "precompiled.hpp"
25
26#include "runtime/atomic.hpp"
27#include "runtime/os.hpp"
28#include "runtime/threadCritical.hpp"
29#include "services/memTracker.hpp"
30#include "services/virtualMemoryTracker.hpp"
31
32size_t VirtualMemorySummary::_snapshot[CALC_OBJ_SIZE_IN_TYPE(VirtualMemorySnapshot, size_t)];
33
34void VirtualMemorySummary::initialize() {
35  assert(sizeof(_snapshot) >= sizeof(VirtualMemorySnapshot), "Sanity Check");
36  // Use placement operator new to initialize static data area.
37  ::new ((void*)_snapshot) VirtualMemorySnapshot();
38}
39
40SortedLinkedList<ReservedMemoryRegion, compare_reserved_region_base>* VirtualMemoryTracker::_reserved_regions;
41
42int compare_committed_region(const CommittedMemoryRegion& r1, const CommittedMemoryRegion& r2) {
43  return r1.compare(r2);
44}
45
46int compare_reserved_region_base(const ReservedMemoryRegion& r1, const ReservedMemoryRegion& r2) {
47  return r1.compare(r2);
48}
49
50bool ReservedMemoryRegion::add_committed_region(address addr, size_t size, const NativeCallStack& stack) {
51  assert(addr != NULL, "Invalid address");
52  assert(size > 0, "Invalid size");
53  assert(contain_region(addr, size), "Not contain this region");
54
55  if (all_committed()) return true;
56
57  CommittedMemoryRegion committed_rgn(addr, size, stack);
58  LinkedListNode<CommittedMemoryRegion>* node = _committed_regions.head();
59
60  while (node != NULL) {
61    CommittedMemoryRegion* rgn = node->data();
62    if (rgn->same_region(addr, size)) {
63      return true;
64    }
65
66    if (rgn->adjacent_to(addr, size)) {
67      // special case to expand prior region if there is no next region
68      LinkedListNode<CommittedMemoryRegion>* next = node->next();
69      if (next == NULL && rgn->call_stack()->equals(stack)) {
70        VirtualMemorySummary::record_uncommitted_memory(rgn->size(), flag());
71        // the two adjacent regions have the same call stack, merge them
72        rgn->expand_region(addr, size);
73        VirtualMemorySummary::record_committed_memory(rgn->size(), flag());
74        return true;
75      }
76      }
77
78    if (rgn->overlap_region(addr, size)) {
79      // Clear a space for this region in the case it overlaps with any regions.
80      remove_uncommitted_region(addr, size);
81      break;  // commit below
82    }
83    if (rgn->end() >= addr + size){
84      break;
85    }
86    node = node->next();
87  }
88
89    // New committed region
90    VirtualMemorySummary::record_committed_memory(size, flag());
91    return add_committed_region(committed_rgn);
92  }
93
94void ReservedMemoryRegion::set_all_committed(bool b) {
95  if (all_committed() != b) {
96    _all_committed = b;
97    if (b) {
98      VirtualMemorySummary::record_committed_memory(size(), flag());
99    }
100  }
101}
102
103bool ReservedMemoryRegion::remove_uncommitted_region(LinkedListNode<CommittedMemoryRegion>* node,
104  address addr, size_t size) {
105  assert(addr != NULL, "Invalid address");
106  assert(size > 0, "Invalid size");
107
108  CommittedMemoryRegion* rgn = node->data();
109  assert(rgn->contain_region(addr, size), "Has to be contained");
110  assert(!rgn->same_region(addr, size), "Can not be the same region");
111
112  if (rgn->base() == addr ||
113      rgn->end() == addr + size) {
114    rgn->exclude_region(addr, size);
115    return true;
116  } else {
117    // split this region
118    address top =rgn->end();
119    // use this region for lower part
120    size_t exclude_size = rgn->end() - addr;
121    rgn->exclude_region(addr, exclude_size);
122
123    // higher part
124    address high_base = addr + size;
125    size_t  high_size = top - high_base;
126
127    CommittedMemoryRegion high_rgn(high_base, high_size, *rgn->call_stack());
128    LinkedListNode<CommittedMemoryRegion>* high_node = _committed_regions.add(high_rgn);
129    assert(high_node == NULL || node->next() == high_node, "Should be right after");
130    return (high_node != NULL);
131  }
132
133  return false;
134}
135
136bool ReservedMemoryRegion::remove_uncommitted_region(address addr, size_t sz) {
137  // uncommit stack guard pages
138  if (flag() == mtThreadStack && !same_region(addr, sz)) {
139    return true;
140  }
141
142  assert(addr != NULL, "Invalid address");
143  assert(sz > 0, "Invalid size");
144
145  if (all_committed()) {
146    assert(_committed_regions.is_empty(), "Sanity check");
147    assert(contain_region(addr, sz), "Reserved region does not contain this region");
148    set_all_committed(false);
149    VirtualMemorySummary::record_uncommitted_memory(sz, flag());
150    if (same_region(addr, sz)) {
151      return true;
152    } else {
153      CommittedMemoryRegion rgn(base(), size(), *call_stack());
154      if (rgn.base() == addr || rgn.end() == (addr + sz)) {
155        rgn.exclude_region(addr, sz);
156        return add_committed_region(rgn);
157      } else {
158        // split this region
159        // top of the whole region
160        address top =rgn.end();
161        // use this region for lower part
162        size_t exclude_size = rgn.end() - addr;
163        rgn.exclude_region(addr, exclude_size);
164        if (add_committed_region(rgn)) {
165          // higher part
166          address high_base = addr + sz;
167          size_t  high_size = top - high_base;
168          CommittedMemoryRegion high_rgn(high_base, high_size, NativeCallStack::EMPTY_STACK);
169          return add_committed_region(high_rgn);
170        } else {
171          return false;
172        }
173      }
174    }
175  } else {
176    CommittedMemoryRegion del_rgn(addr, sz, *call_stack());
177    address end = addr + sz;
178
179    LinkedListNode<CommittedMemoryRegion>* head = _committed_regions.head();
180    LinkedListNode<CommittedMemoryRegion>* prev = NULL;
181    CommittedMemoryRegion* crgn;
182
183    while (head != NULL) {
184      crgn = head->data();
185
186      if (crgn->same_region(addr, sz)) {
187        VirtualMemorySummary::record_uncommitted_memory(crgn->size(), flag());
188          _committed_regions.remove_after(prev);
189          return true;
190      }
191
192      // del_rgn contains crgn
193      if (del_rgn.contain_region(crgn->base(), crgn->size())) {
194          VirtualMemorySummary::record_uncommitted_memory(crgn->size(), flag());
195          head = head->next();
196          _committed_regions.remove_after(prev);
197        continue;  // don't update head or prev
198        }
199
200      // Found addr in the current crgn. There are 2 subcases:
201      if (crgn->contain_address(addr)) {
202
203        // (1) Found addr+size in current crgn as well. (del_rgn is contained in crgn)
204        if (crgn->contain_address(end - 1)) {
205          VirtualMemorySummary::record_uncommitted_memory(sz, flag());
206          return remove_uncommitted_region(head, addr, sz); // done!
207        } else {
208          // (2) Did not find del_rgn's end in crgn.
209          size_t size = crgn->end() - del_rgn.base();
210          crgn->exclude_region(addr, size);
211          VirtualMemorySummary::record_uncommitted_memory(size, flag());
212      }
213
214      } else if (crgn->contain_address(end - 1)) {
215      // Found del_rgn's end, but not its base addr.
216        size_t size = del_rgn.end() - crgn->base();
217        crgn->exclude_region(crgn->base(), size);
218        VirtualMemorySummary::record_uncommitted_memory(size, flag());
219        return true;  // should be done if the list is sorted properly!
220      }
221
222      prev = head;
223      head = head->next();
224    }
225  }
226
227  return true;
228}
229
230void ReservedMemoryRegion::move_committed_regions(address addr, ReservedMemoryRegion& rgn) {
231  assert(addr != NULL, "Invalid address");
232
233  // split committed regions
234  LinkedListNode<CommittedMemoryRegion>* head =
235    _committed_regions.head();
236  LinkedListNode<CommittedMemoryRegion>* prev = NULL;
237
238  while (head != NULL) {
239    if (head->data()->base() >= addr) {
240      break;
241    }
242    prev = head;
243    head = head->next();
244  }
245
246  if (head != NULL) {
247    if (prev != NULL) {
248      prev->set_next(head->next());
249    } else {
250      _committed_regions.set_head(NULL);
251    }
252  }
253
254  rgn._committed_regions.set_head(head);
255}
256
257size_t ReservedMemoryRegion::committed_size() const {
258  if (all_committed()) {
259    return size();
260  } else {
261    size_t committed = 0;
262    LinkedListNode<CommittedMemoryRegion>* head =
263      _committed_regions.head();
264    while (head != NULL) {
265      committed += head->data()->size();
266      head = head->next();
267    }
268    return committed;
269  }
270}
271
272void ReservedMemoryRegion::set_flag(MEMFLAGS f) {
273  assert((flag() == mtNone || flag() == f), "Overwrite memory type");
274  if (flag() != f) {
275    VirtualMemorySummary::move_reserved_memory(flag(), f, size());
276    VirtualMemorySummary::move_committed_memory(flag(), f, committed_size());
277    _flag = f;
278  }
279}
280
281bool VirtualMemoryTracker::initialize(NMT_TrackingLevel level) {
282  if (level >= NMT_summary) {
283    VirtualMemorySummary::initialize();
284  }
285  return true;
286}
287
288bool VirtualMemoryTracker::late_initialize(NMT_TrackingLevel level) {
289  if (level >= NMT_summary) {
290    _reserved_regions = new (std::nothrow, ResourceObj::C_HEAP, mtNMT)
291      SortedLinkedList<ReservedMemoryRegion, compare_reserved_region_base>();
292    return (_reserved_regions != NULL);
293  }
294  return true;
295}
296
297bool VirtualMemoryTracker::add_reserved_region(address base_addr, size_t size,
298   const NativeCallStack& stack, MEMFLAGS flag, bool all_committed) {
299  assert(base_addr != NULL, "Invalid address");
300  assert(size > 0, "Invalid size");
301  assert(_reserved_regions != NULL, "Sanity check");
302  ReservedMemoryRegion  rgn(base_addr, size, stack, flag);
303  ReservedMemoryRegion* reserved_rgn = _reserved_regions->find(rgn);
304  LinkedListNode<ReservedMemoryRegion>* node;
305  if (reserved_rgn == NULL) {
306    VirtualMemorySummary::record_reserved_memory(size, flag);
307    node = _reserved_regions->add(rgn);
308    if (node != NULL) {
309      node->data()->set_all_committed(all_committed);
310      return true;
311    } else {
312      return false;
313    }
314  } else {
315    if (reserved_rgn->same_region(base_addr, size)) {
316      reserved_rgn->set_call_stack(stack);
317      reserved_rgn->set_flag(flag);
318      return true;
319    } else if (reserved_rgn->adjacent_to(base_addr, size)) {
320      VirtualMemorySummary::record_reserved_memory(size, flag);
321      reserved_rgn->expand_region(base_addr, size);
322      reserved_rgn->set_call_stack(stack);
323      return true;
324    } else {
325      // Overlapped reservation.
326      // It can happen when the regions are thread stacks, as JNI
327      // thread does not detach from VM before exits, and leads to
328      // leak JavaThread object
329      if (reserved_rgn->flag() == mtThreadStack) {
330        guarantee(!CheckJNICalls, "Attached JNI thread exited without being detached");
331        // Overwrite with new region
332
333        // Release old region
334        VirtualMemorySummary::record_uncommitted_memory(reserved_rgn->committed_size(), reserved_rgn->flag());
335        VirtualMemorySummary::record_released_memory(reserved_rgn->size(), reserved_rgn->flag());
336
337        // Add new region
338        VirtualMemorySummary::record_reserved_memory(rgn.size(), flag);
339
340        *reserved_rgn = rgn;
341        return true;
342      }
343
344      // CDS mapping region.
345      // CDS reserves the whole region for mapping CDS archive, then maps each section into the region.
346      // NMT reports CDS as a whole.
347      if (reserved_rgn->flag() == mtClassShared) {
348        assert(reserved_rgn->contain_region(base_addr, size), "Reserved CDS region should contain this mapping region");
349        return true;
350      }
351
352      // Mapped CDS string region.
353      // The string region(s) is part of the java heap.
354      if (reserved_rgn->flag() == mtJavaHeap) {
355        assert(reserved_rgn->contain_region(base_addr, size), "Reserved heap region should contain this mapping region");
356        return true;
357      }
358
359      ShouldNotReachHere();
360      return false;
361    }
362  }
363}
364
365void VirtualMemoryTracker::set_reserved_region_type(address addr, MEMFLAGS flag) {
366  assert(addr != NULL, "Invalid address");
367  assert(_reserved_regions != NULL, "Sanity check");
368
369  ReservedMemoryRegion   rgn(addr, 1);
370  ReservedMemoryRegion*  reserved_rgn = _reserved_regions->find(rgn);
371  if (reserved_rgn != NULL) {
372    assert(reserved_rgn->contain_address(addr), "Containment");
373    if (reserved_rgn->flag() != flag) {
374      assert(reserved_rgn->flag() == mtNone, "Overwrite memory type");
375      reserved_rgn->set_flag(flag);
376    }
377  }
378}
379
380bool VirtualMemoryTracker::add_committed_region(address addr, size_t size,
381  const NativeCallStack& stack) {
382  assert(addr != NULL, "Invalid address");
383  assert(size > 0, "Invalid size");
384  assert(_reserved_regions != NULL, "Sanity check");
385
386  ReservedMemoryRegion  rgn(addr, size);
387  ReservedMemoryRegion* reserved_rgn = _reserved_regions->find(rgn);
388
389  assert(reserved_rgn != NULL, "No reserved region");
390  assert(reserved_rgn->contain_region(addr, size), "Not completely contained");
391  bool result = reserved_rgn->add_committed_region(addr, size, stack);
392  return result;
393}
394
395bool VirtualMemoryTracker::remove_uncommitted_region(address addr, size_t size) {
396  assert(addr != NULL, "Invalid address");
397  assert(size > 0, "Invalid size");
398  assert(_reserved_regions != NULL, "Sanity check");
399
400  ReservedMemoryRegion  rgn(addr, size);
401  ReservedMemoryRegion* reserved_rgn = _reserved_regions->find(rgn);
402  assert(reserved_rgn != NULL, "No reserved region");
403  assert(reserved_rgn->contain_region(addr, size), "Not completely contained");
404  bool result = reserved_rgn->remove_uncommitted_region(addr, size);
405  return result;
406}
407
408bool VirtualMemoryTracker::remove_released_region(address addr, size_t size) {
409  assert(addr != NULL, "Invalid address");
410  assert(size > 0, "Invalid size");
411  assert(_reserved_regions != NULL, "Sanity check");
412
413  ReservedMemoryRegion  rgn(addr, size);
414  ReservedMemoryRegion* reserved_rgn = _reserved_regions->find(rgn);
415
416  assert(reserved_rgn != NULL, "No reserved region");
417
418  // uncommit regions within the released region
419  if (!reserved_rgn->remove_uncommitted_region(addr, size)) {
420    return false;
421  }
422
423  if (reserved_rgn->flag() == mtClassShared &&
424      reserved_rgn->contain_region(addr, size) &&
425      !reserved_rgn->same_region(addr, size)) {
426    // This is an unmapped CDS region, which is part of the reserved shared
427    // memory region.
428    // See special handling in VirtualMemoryTracker::add_reserved_region also.
429    return true;
430  }
431
432  VirtualMemorySummary::record_released_memory(size, reserved_rgn->flag());
433
434  if (reserved_rgn->same_region(addr, size)) {
435    return _reserved_regions->remove(rgn);
436  } else {
437    assert(reserved_rgn->contain_region(addr, size), "Not completely contained");
438    if (reserved_rgn->base() == addr ||
439        reserved_rgn->end() == addr + size) {
440        reserved_rgn->exclude_region(addr, size);
441      return true;
442    } else {
443      address top = reserved_rgn->end();
444      address high_base = addr + size;
445      ReservedMemoryRegion high_rgn(high_base, top - high_base,
446        *reserved_rgn->call_stack(), reserved_rgn->flag());
447
448      // use original region for lower region
449      reserved_rgn->exclude_region(addr, top - addr);
450      LinkedListNode<ReservedMemoryRegion>* new_rgn = _reserved_regions->add(high_rgn);
451      if (new_rgn == NULL) {
452        return false;
453      } else {
454        reserved_rgn->move_committed_regions(addr, *new_rgn->data());
455        return true;
456      }
457    }
458  }
459}
460
461
462bool VirtualMemoryTracker::walk_virtual_memory(VirtualMemoryWalker* walker) {
463  assert(_reserved_regions != NULL, "Sanity check");
464  ThreadCritical tc;
465  // Check that the _reserved_regions haven't been deleted.
466  if (_reserved_regions != NULL) {
467    LinkedListNode<ReservedMemoryRegion>* head = _reserved_regions->head();
468    while (head != NULL) {
469      const ReservedMemoryRegion* rgn = head->peek();
470      if (!walker->do_allocation_site(rgn)) {
471        return false;
472      }
473      head = head->next();
474    }
475   }
476  return true;
477}
478
479// Transition virtual memory tracking level.
480bool VirtualMemoryTracker::transition(NMT_TrackingLevel from, NMT_TrackingLevel to) {
481  assert (from != NMT_minimal, "cannot convert from the lowest tracking level to anything");
482  if (to == NMT_minimal) {
483    assert(from == NMT_summary || from == NMT_detail, "Just check");
484    // Clean up virtual memory tracking data structures.
485    ThreadCritical tc;
486    // Check for potential race with other thread calling transition
487    if (_reserved_regions != NULL) {
488      delete _reserved_regions;
489      _reserved_regions = NULL;
490    }
491  }
492
493  return true;
494}
495