virtualMemoryTracker.cpp revision 11857:d0fbf661cc16
119304Speter/* 219304Speter * Copyright (c) 2013, 2016, Oracle and/or its affiliates. All rights reserved. 319304Speter * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 419304Speter * 519304Speter * This code is free software; you can redistribute it and/or modify it 619304Speter * under the terms of the GNU General Public License version 2 only, as 719304Speter * published by the Free Software Foundation. 819304Speter * 919304Speter * This code is distributed in the hope that it will be useful, but WITHOUT 1019304Speter * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 1119304Speter * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License 1219304Speter * version 2 for more details (a copy is included in the LICENSE file that 13254225Speter * accompanied this code). 1419304Speter * 1519304Speter * You should have received a copy of the GNU General Public License version 16254225Speter * 2 along with this work; if not, write to the Free Software Foundation, 1719304Speter * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. 1819304Speter * 1919304Speter * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA 2019304Speter * or visit www.oracle.com if you need additional information or have any 2119304Speter * questions. 2219304Speter * 2319304Speter */ 2419304Speter#include "precompiled.hpp" 2519304Speter 2619304Speter#include "runtime/atomic.hpp" 2719304Speter#include "runtime/os.hpp" 2819304Speter#include "runtime/threadCritical.hpp" 2919304Speter#include "services/memTracker.hpp" 3019304Speter#include "services/virtualMemoryTracker.hpp" 3119304Speter 3219304Spetersize_t VirtualMemorySummary::_snapshot[CALC_OBJ_SIZE_IN_TYPE(VirtualMemorySnapshot, size_t)]; 3319304Speter 3419304Spetervoid VirtualMemorySummary::initialize() { 3519304Speter assert(sizeof(_snapshot) >= sizeof(VirtualMemorySnapshot), "Sanity Check"); 3619304Speter // Use placement operator new to initialize static data area. 3719304Speter ::new ((void*)_snapshot) VirtualMemorySnapshot(); 3819304Speter} 3919304Speter 4019304SpeterSortedLinkedList<ReservedMemoryRegion, compare_reserved_region_base>* VirtualMemoryTracker::_reserved_regions; 4119304Speter 4219304Speterint compare_committed_region(const CommittedMemoryRegion& r1, const CommittedMemoryRegion& r2) { 4319304Speter return r1.compare(r2); 44254225Speter} 4519304Speter 4619304Speterint compare_reserved_region_base(const ReservedMemoryRegion& r1, const ReservedMemoryRegion& r2) { 4719304Speter return r1.compare(r2); 4819304Speter} 4919304Speter 5019304Speterbool ReservedMemoryRegion::add_committed_region(address addr, size_t size, const NativeCallStack& stack) { 5119304Speter assert(addr != NULL, "Invalid address"); 5219304Speter assert(size > 0, "Invalid size"); 53254225Speter assert(contain_region(addr, size), "Not contain this region"); 54254225Speter 55254225Speter if (all_committed()) return true; 5619304Speter 5719304Speter CommittedMemoryRegion committed_rgn(addr, size, stack); 5819304Speter LinkedListNode<CommittedMemoryRegion>* node = _committed_regions.head(); 5919304Speter 6019304Speter while (node != NULL) { 6119304Speter CommittedMemoryRegion* rgn = node->data(); 62254225Speter if (rgn->same_region(addr, size)) { 63254225Speter return true; 6419304Speter } 6519304Speter 6619304Speter if (rgn->adjacent_to(addr, size)) { 6719304Speter // special case to expand prior region if there is no next region 6819304Speter LinkedListNode<CommittedMemoryRegion>* next = node->next(); 6919304Speter if (next == NULL && rgn->call_stack()->equals(stack)) { 7019304Speter VirtualMemorySummary::record_uncommitted_memory(rgn->size(), flag()); 7119304Speter // the two adjacent regions have the same call stack, merge them 7219304Speter rgn->expand_region(addr, size); 7319304Speter VirtualMemorySummary::record_committed_memory(rgn->size(), flag()); 7419304Speter return true; 7519304Speter } 7619304Speter } 7719304Speter 7819304Speter if (rgn->overlap_region(addr, size)) { 7919304Speter // Clear a space for this region in the case it overlaps with any regions. 80254225Speter remove_uncommitted_region(addr, size); 8119304Speter break; // commit below 8219304Speter } 8319304Speter if (rgn->end() >= addr + size){ 8419304Speter break; 8519304Speter } 8619304Speter node = node->next(); 8719304Speter } 8819304Speter 8919304Speter // New committed region 9019304Speter VirtualMemorySummary::record_committed_memory(size, flag()); 9119304Speter return add_committed_region(committed_rgn); 9219304Speter } 9319304Speter 9419304Spetervoid ReservedMemoryRegion::set_all_committed(bool b) { 9519304Speter if (all_committed() != b) { 9619304Speter _all_committed = b; 9719304Speter if (b) { 9819304Speter VirtualMemorySummary::record_committed_memory(size(), flag()); 9919304Speter } 10019304Speter } 10119304Speter} 10219304Speter 103254225Speterbool ReservedMemoryRegion::remove_uncommitted_region(LinkedListNode<CommittedMemoryRegion>* node, 104254225Speter address addr, size_t size) { 105254225Speter assert(addr != NULL, "Invalid address"); 10619304Speter assert(size > 0, "Invalid size"); 10719304Speter 10819304Speter CommittedMemoryRegion* rgn = node->data(); 109254225Speter assert(rgn->contain_region(addr, size), "Has to be contained"); 110254225Speter assert(!rgn->same_region(addr, size), "Can not be the same region"); 111254225Speter 11219304Speter if (rgn->base() == addr || 11319304Speter rgn->end() == addr + size) { 11419304Speter rgn->exclude_region(addr, size); 11519304Speter return true; 11619304Speter } else { 11719304Speter // split this region 11819304Speter address top =rgn->end(); 11919304Speter // use this region for lower part 12019304Speter size_t exclude_size = rgn->end() - addr; 12119304Speter rgn->exclude_region(addr, exclude_size); 12219304Speter 12319304Speter // higher part 12419304Speter address high_base = addr + size; 12519304Speter size_t high_size = top - high_base; 126254225Speter 12719304Speter CommittedMemoryRegion high_rgn(high_base, high_size, *rgn->call_stack()); 12819304Speter LinkedListNode<CommittedMemoryRegion>* high_node = _committed_regions.add(high_rgn); 12919304Speter assert(high_node == NULL || node->next() == high_node, "Should be right after"); 13019304Speter return (high_node != NULL); 13119304Speter } 13219304Speter 13319304Speter return false; 13419304Speter} 13519304Speter 13619304Speterbool ReservedMemoryRegion::remove_uncommitted_region(address addr, size_t sz) { 13719304Speter // uncommit stack guard pages 13819304Speter if (flag() == mtThreadStack && !same_region(addr, sz)) { 13919304Speter return true; 14019304Speter } 14119304Speter 14219304Speter assert(addr != NULL, "Invalid address"); 143254225Speter assert(sz > 0, "Invalid size"); 14419304Speter 14519304Speter if (all_committed()) { 146254225Speter assert(_committed_regions.is_empty(), "Sanity check"); 147254225Speter assert(contain_region(addr, sz), "Reserved region does not contain this region"); 148254225Speter set_all_committed(false); 14919304Speter VirtualMemorySummary::record_uncommitted_memory(sz, flag()); 15019304Speter if (same_region(addr, sz)) { 15119304Speter return true; 15219304Speter } else { 15319304Speter CommittedMemoryRegion rgn(base(), size(), *call_stack()); 15419304Speter if (rgn.base() == addr || rgn.end() == (addr + sz)) { 15519304Speter rgn.exclude_region(addr, sz); 15619304Speter return add_committed_region(rgn); 15719304Speter } else { 15819304Speter // split this region 15919304Speter // top of the whole region 16019304Speter address top =rgn.end(); 16119304Speter // use this region for lower part 16219304Speter size_t exclude_size = rgn.end() - addr; 16319304Speter rgn.exclude_region(addr, exclude_size); 16419304Speter if (add_committed_region(rgn)) { 16519304Speter // higher part 16619304Speter address high_base = addr + sz; 16719304Speter size_t high_size = top - high_base; 16819304Speter CommittedMemoryRegion high_rgn(high_base, high_size, NativeCallStack::EMPTY_STACK); 16919304Speter return add_committed_region(high_rgn); 17019304Speter } else { 17119304Speter return false; 17219304Speter } 17319304Speter } 17419304Speter } 17519304Speter } else { 17619304Speter CommittedMemoryRegion del_rgn(addr, sz, *call_stack()); 17719304Speter address end = addr + sz; 17819304Speter 17919304Speter LinkedListNode<CommittedMemoryRegion>* head = _committed_regions.head(); 18019304Speter LinkedListNode<CommittedMemoryRegion>* prev = NULL; 18119304Speter CommittedMemoryRegion* crgn; 18219304Speter 18319304Speter while (head != NULL) { 18419304Speter crgn = head->data(); 18519304Speter 18619304Speter if (crgn->same_region(addr, sz)) { 18719304Speter VirtualMemorySummary::record_uncommitted_memory(crgn->size(), flag()); 18819304Speter _committed_regions.remove_after(prev); 18919304Speter return true; 19019304Speter } 191254225Speter 192254225Speter // del_rgn contains crgn 19319304Speter if (del_rgn.contain_region(crgn->base(), crgn->size())) { 19419304Speter VirtualMemorySummary::record_uncommitted_memory(crgn->size(), flag()); 195254225Speter head = head->next(); 196254225Speter _committed_regions.remove_after(prev); 19719304Speter continue; // don't update head or prev 19819304Speter } 199254225Speter 200254225Speter // Found addr in the current crgn. There are 2 subcases: 201254225Speter if (crgn->contain_address(addr)) { 202254225Speter 203254225Speter // (1) Found addr+size in current crgn as well. (del_rgn is contained in crgn) 204254225Speter if (crgn->contain_address(end - 1)) { 20519304Speter VirtualMemorySummary::record_uncommitted_memory(sz, flag()); 20619304Speter return remove_uncommitted_region(head, addr, sz); // done! 207254225Speter } else { 208254225Speter // (2) Did not find del_rgn's end in crgn. 209254225Speter size_t size = crgn->end() - del_rgn.base(); 210254225Speter crgn->exclude_region(addr, size); 211254225Speter VirtualMemorySummary::record_uncommitted_memory(size, flag()); 21219304Speter } 21319304Speter 214254225Speter } else if (crgn->contain_address(end - 1)) { 21519304Speter // Found del_rgn's end, but not its base addr. 21619304Speter size_t size = del_rgn.end() - crgn->base(); 21719304Speter crgn->exclude_region(crgn->base(), size); 21819304Speter VirtualMemorySummary::record_uncommitted_memory(size, flag()); 21919304Speter return true; // should be done if the list is sorted properly! 220254225Speter } 22119304Speter 22219304Speter prev = head; 223254225Speter head = head->next(); 224254225Speter } 225254225Speter } 22619304Speter 22719304Speter return true; 22819304Speter} 22919304Speter 23019304Spetervoid ReservedMemoryRegion::move_committed_regions(address addr, ReservedMemoryRegion& rgn) { 23119304Speter assert(addr != NULL, "Invalid address"); 23219304Speter 23319304Speter // split committed regions 23419304Speter LinkedListNode<CommittedMemoryRegion>* head = 23519304Speter _committed_regions.head(); 23619304Speter LinkedListNode<CommittedMemoryRegion>* prev = NULL; 23719304Speter 23819304Speter while (head != NULL) { 23919304Speter if (head->data()->base() >= addr) { 24019304Speter break; 24119304Speter } 24219304Speter prev = head; 24319304Speter head = head->next(); 24419304Speter } 24519304Speter 24619304Speter if (head != NULL) { 24719304Speter if (prev != NULL) { 24819304Speter prev->set_next(head->next()); 24919304Speter } else { 25019304Speter _committed_regions.set_head(NULL); 25119304Speter } 25219304Speter } 25319304Speter 25419304Speter rgn._committed_regions.set_head(head); 25519304Speter} 25619304Speter 25719304Spetersize_t ReservedMemoryRegion::committed_size() const { 25819304Speter if (all_committed()) { 25919304Speter return size(); 26019304Speter } else { 26119304Speter size_t committed = 0; 26219304Speter LinkedListNode<CommittedMemoryRegion>* head = 26319304Speter _committed_regions.head(); 26419304Speter while (head != NULL) { 26519304Speter committed += head->data()->size(); 26619304Speter head = head->next(); 26719304Speter } 268254225Speter return committed; 26919304Speter } 27019304Speter} 271254225Speter 272254225Spetervoid ReservedMemoryRegion::set_flag(MEMFLAGS f) { 27319304Speter assert((flag() == mtNone || flag() == f), "Overwrite memory type"); 274254225Speter if (flag() != f) { 275254225Speter VirtualMemorySummary::move_reserved_memory(flag(), f, size()); 276254225Speter VirtualMemorySummary::move_committed_memory(flag(), f, committed_size()); 27719304Speter _flag = f; 27819304Speter } 27919304Speter} 28019304Speter 28119304Speterbool VirtualMemoryTracker::initialize(NMT_TrackingLevel level) { 28219304Speter if (level >= NMT_summary) { 28319304Speter VirtualMemorySummary::initialize(); 284254225Speter } 28519304Speter return true; 28619304Speter} 287254225Speter 28819304Speterbool VirtualMemoryTracker::late_initialize(NMT_TrackingLevel level) { 28919304Speter if (level >= NMT_summary) { 29019304Speter _reserved_regions = new (std::nothrow, ResourceObj::C_HEAP, mtNMT) 29119304Speter SortedLinkedList<ReservedMemoryRegion, compare_reserved_region_base>(); 29219304Speter return (_reserved_regions != NULL); 29319304Speter } 29419304Speter return true; 295254225Speter} 29619304Speter 29719304Speterbool VirtualMemoryTracker::add_reserved_region(address base_addr, size_t size, 29819304Speter const NativeCallStack& stack, MEMFLAGS flag, bool all_committed) { 29919304Speter assert(base_addr != NULL, "Invalid address"); 30019304Speter assert(size > 0, "Invalid size"); 30119304Speter assert(_reserved_regions != NULL, "Sanity check"); 30219304Speter ReservedMemoryRegion rgn(base_addr, size, stack, flag); 30319304Speter ReservedMemoryRegion* reserved_rgn = _reserved_regions->find(rgn); 30419304Speter LinkedListNode<ReservedMemoryRegion>* node; 305254225Speter if (reserved_rgn == NULL) { 30619304Speter VirtualMemorySummary::record_reserved_memory(size, flag); 30719304Speter node = _reserved_regions->add(rgn); 30819304Speter if (node != NULL) { 30919304Speter node->data()->set_all_committed(all_committed); 31019304Speter return true; 31119304Speter } else { 31219304Speter return false; 31319304Speter } 31419304Speter } else { 31519304Speter if (reserved_rgn->same_region(base_addr, size)) { 31619304Speter reserved_rgn->set_call_stack(stack); 31719304Speter reserved_rgn->set_flag(flag); 31819304Speter return true; 31919304Speter } else if (reserved_rgn->adjacent_to(base_addr, size)) { 32019304Speter VirtualMemorySummary::record_reserved_memory(size, flag); 32119304Speter reserved_rgn->expand_region(base_addr, size); 32219304Speter reserved_rgn->set_call_stack(stack); 32319304Speter return true; 32419304Speter } else { 32519304Speter // Overlapped reservation. 32619304Speter // It can happen when the regions are thread stacks, as JNI 32719304Speter // thread does not detach from VM before exits, and leads to 32819304Speter // leak JavaThread object 32919304Speter if (reserved_rgn->flag() == mtThreadStack) { 33019304Speter guarantee(!CheckJNICalls, "Attached JNI thread exited without being detached"); 33119304Speter // Overwrite with new region 33219304Speter 33319304Speter // Release old region 33419304Speter VirtualMemorySummary::record_uncommitted_memory(reserved_rgn->committed_size(), reserved_rgn->flag()); 33519304Speter VirtualMemorySummary::record_released_memory(reserved_rgn->size(), reserved_rgn->flag()); 33619304Speter 33719304Speter // Add new region 33819304Speter VirtualMemorySummary::record_reserved_memory(rgn.size(), flag); 33919304Speter 34019304Speter *reserved_rgn = rgn; 34119304Speter return true; 34219304Speter } 34319304Speter 34419304Speter // CDS mapping region. 34519304Speter // CDS reserves the whole region for mapping CDS archive, then maps each section into the region. 34619304Speter // NMT reports CDS as a whole. 34719304Speter if (reserved_rgn->flag() == mtClassShared) { 34819304Speter assert(reserved_rgn->contain_region(base_addr, size), "Reserved CDS region should contain this mapping region"); 34919304Speter return true; 35019304Speter } 35119304Speter 35219304Speter // Mapped CDS string region. 35319304Speter // The string region(s) is part of the java heap. 35419304Speter if (reserved_rgn->flag() == mtJavaHeap) { 35519304Speter assert(reserved_rgn->contain_region(base_addr, size), "Reserved heap region should contain this mapping region"); 35619304Speter return true; 35719304Speter } 358254225Speter 35919304Speter ShouldNotReachHere(); 36019304Speter return false; 36119304Speter } 36219304Speter } 363254225Speter} 36419304Speter 36519304Spetervoid VirtualMemoryTracker::set_reserved_region_type(address addr, MEMFLAGS flag) { 36619304Speter assert(addr != NULL, "Invalid address"); 36719304Speter assert(_reserved_regions != NULL, "Sanity check"); 36819304Speter 36919304Speter ReservedMemoryRegion rgn(addr, 1); 37019304Speter ReservedMemoryRegion* reserved_rgn = _reserved_regions->find(rgn); 37119304Speter if (reserved_rgn != NULL) { 37219304Speter assert(reserved_rgn->contain_address(addr), "Containment"); 37319304Speter if (reserved_rgn->flag() != flag) { 37419304Speter assert(reserved_rgn->flag() == mtNone, "Overwrite memory type"); 37519304Speter reserved_rgn->set_flag(flag); 37619304Speter } 37719304Speter } 37819304Speter} 37919304Speter 38019304Speterbool VirtualMemoryTracker::add_committed_region(address addr, size_t size, 38119304Speter const NativeCallStack& stack) { 38219304Speter assert(addr != NULL, "Invalid address"); 38319304Speter assert(size > 0, "Invalid size"); 38419304Speter assert(_reserved_regions != NULL, "Sanity check"); 385254225Speter 386254225Speter ReservedMemoryRegion rgn(addr, size); 387254225Speter ReservedMemoryRegion* reserved_rgn = _reserved_regions->find(rgn); 38819304Speter 389254225Speter assert(reserved_rgn != NULL, "No reserved region"); 390254225Speter assert(reserved_rgn->contain_region(addr, size), "Not completely contained"); 391254225Speter bool result = reserved_rgn->add_committed_region(addr, size, stack); 392254225Speter return result; 39319304Speter} 39419304Speter 39519304Speterbool VirtualMemoryTracker::remove_uncommitted_region(address addr, size_t size) { 396254225Speter assert(addr != NULL, "Invalid address"); 39719304Speter assert(size > 0, "Invalid size"); 39819304Speter assert(_reserved_regions != NULL, "Sanity check"); 39919304Speter 400254225Speter ReservedMemoryRegion rgn(addr, size); 40119304Speter ReservedMemoryRegion* reserved_rgn = _reserved_regions->find(rgn); 40219304Speter assert(reserved_rgn != NULL, "No reserved region"); 40319304Speter assert(reserved_rgn->contain_region(addr, size), "Not completely contained"); 404254225Speter bool result = reserved_rgn->remove_uncommitted_region(addr, size); 40519304Speter return result; 40619304Speter} 407254225Speter 408254225Speterbool VirtualMemoryTracker::remove_released_region(address addr, size_t size) { 40919304Speter assert(addr != NULL, "Invalid address"); 41019304Speter assert(size > 0, "Invalid size"); 41119304Speter assert(_reserved_regions != NULL, "Sanity check"); 41219304Speter 41319304Speter ReservedMemoryRegion rgn(addr, size); 41419304Speter ReservedMemoryRegion* reserved_rgn = _reserved_regions->find(rgn); 41519304Speter 41619304Speter assert(reserved_rgn != NULL, "No reserved region"); 41719304Speter 41819304Speter // uncommit regions within the released region 41919304Speter if (!reserved_rgn->remove_uncommitted_region(addr, size)) { 42019304Speter return false; 42119304Speter } 42219304Speter 42319304Speter if (reserved_rgn->flag() == mtClassShared && 42419304Speter reserved_rgn->contain_region(addr, size) && 42519304Speter !reserved_rgn->same_region(addr, size)) { 42619304Speter // This is an unmapped CDS region, which is part of the reserved shared 42719304Speter // memory region. 42819304Speter // See special handling in VirtualMemoryTracker::add_reserved_region also. 42919304Speter return true; 43019304Speter } 431 432 VirtualMemorySummary::record_released_memory(size, reserved_rgn->flag()); 433 434 if (reserved_rgn->same_region(addr, size)) { 435 return _reserved_regions->remove(rgn); 436 } else { 437 assert(reserved_rgn->contain_region(addr, size), "Not completely contained"); 438 if (reserved_rgn->base() == addr || 439 reserved_rgn->end() == addr + size) { 440 reserved_rgn->exclude_region(addr, size); 441 return true; 442 } else { 443 address top = reserved_rgn->end(); 444 address high_base = addr + size; 445 ReservedMemoryRegion high_rgn(high_base, top - high_base, 446 *reserved_rgn->call_stack(), reserved_rgn->flag()); 447 448 // use original region for lower region 449 reserved_rgn->exclude_region(addr, top - addr); 450 LinkedListNode<ReservedMemoryRegion>* new_rgn = _reserved_regions->add(high_rgn); 451 if (new_rgn == NULL) { 452 return false; 453 } else { 454 reserved_rgn->move_committed_regions(addr, *new_rgn->data()); 455 return true; 456 } 457 } 458 } 459} 460 461 462bool VirtualMemoryTracker::walk_virtual_memory(VirtualMemoryWalker* walker) { 463 assert(_reserved_regions != NULL, "Sanity check"); 464 ThreadCritical tc; 465 // Check that the _reserved_regions haven't been deleted. 466 if (_reserved_regions != NULL) { 467 LinkedListNode<ReservedMemoryRegion>* head = _reserved_regions->head(); 468 while (head != NULL) { 469 const ReservedMemoryRegion* rgn = head->peek(); 470 if (!walker->do_allocation_site(rgn)) { 471 return false; 472 } 473 head = head->next(); 474 } 475 } 476 return true; 477} 478 479// Transition virtual memory tracking level. 480bool VirtualMemoryTracker::transition(NMT_TrackingLevel from, NMT_TrackingLevel to) { 481 assert (from != NMT_minimal, "cannot convert from the lowest tracking level to anything"); 482 if (to == NMT_minimal) { 483 assert(from == NMT_summary || from == NMT_detail, "Just check"); 484 // Clean up virtual memory tracking data structures. 485 ThreadCritical tc; 486 // Check for potential race with other thread calling transition 487 if (_reserved_regions != NULL) { 488 delete _reserved_regions; 489 _reserved_regions = NULL; 490 } 491 } 492 493 return true; 494} 495