memBaseline.cpp revision 3758:716c64bda5ba
14910Swollman/*
24910Swollman * Copyright (c) 2012, Oracle and/or its affiliates. All rights reserved.
34910Swollman * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
44910Swollman *
530300Sjoerg * This code is free software; you can redistribute it and/or modify it
625944Sjoerg * under the terms of the GNU General Public License version 2 only, as
74910Swollman * published by the Free Software Foundation.
825944Sjoerg *
988534Sjoerg * This code is distributed in the hope that it will be useful, but WITHOUT
1025944Sjoerg * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
114910Swollman * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
124910Swollman * version 2 for more details (a copy is included in the LICENSE file that
134910Swollman * accompanied this code).
144910Swollman *
154910Swollman * You should have received a copy of the GNU General Public License version
164910Swollman * 2 along with this work; if not, write to the Free Software Foundation,
174910Swollman * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
1830300Sjoerg *
1916288Sgpalmer * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
2050477Speter * or visit www.oracle.com if you need additional information or have any
214910Swollman * questions.
224910Swollman *
2340008Sjoerg */
2440008Sjoerg#include "precompiled.hpp"
2542065Sphk#include "classfile/systemDictionary.hpp"
2632350Seivind#include "memory/allocation.hpp"
2754263Sshin#include "services/memBaseline.hpp"
2831742Seivind#include "services/memTracker.hpp"
2940008Sjoerg
3031742SeivindMemType2Name MemBaseline::MemType2NameMap[NUMBER_OF_MEMORY_TYPE] = {
3140008Sjoerg  {mtJavaHeap,   "Java Heap"},
3240008Sjoerg  {mtClass,      "Class"},
3340008Sjoerg  {mtThreadStack,"Thread Stack"},
3454263Sshin  {mtThread,     "Thread"},
3540008Sjoerg  {mtCode,       "Code"},
3640008Sjoerg  {mtGC,         "GC"},
3740008Sjoerg  {mtCompiler,   "Compiler"},
3840008Sjoerg  {mtInternal,   "Internal"},
394952Sbde  {mtOther,      "Other"},
404952Sbde  {mtSymbol,     "Symbol"},
4170199Sjhay  {mtNMT,        "Memory Tracking"},
4224204Sbde  {mtChunk,      "Pooled Free Chunks"},
434910Swollman  {mtClassShared,"Shared spaces for classes"},
4425706Sjoerg  {mtNone,       "Unknown"}  // It can happen when type tagging records are lagging
4542104Sphk                             // behind
4659604Sobrien};
4742104Sphk
4829024SbdeMemBaseline::MemBaseline() {
494910Swollman  _baselined = false;
5040008Sjoerg
5140008Sjoerg  for (int index = 0; index < NUMBER_OF_MEMORY_TYPE; index ++) {
5240008Sjoerg    _malloc_data[index].set_type(MemType2NameMap[index]._flag);
5340008Sjoerg    _vm_data[index].set_type(MemType2NameMap[index]._flag);
5430300Sjoerg    _arena_data[index].set_type(MemType2NameMap[index]._flag);
5540008Sjoerg  }
564910Swollman
574910Swollman  _malloc_cs = NULL;
584910Swollman  _vm_cs = NULL;
594910Swollman  _vm_map = NULL;
6042104Sphk
6188534Sjoerg  _number_of_classes = 0;
6288534Sjoerg  _number_of_threads = 0;
6388534Sjoerg}
6488534Sjoerg
654910Swollman
6640008Sjoergvoid MemBaseline::clear() {
6740008Sjoerg  if (_malloc_cs != NULL) {
6840008Sjoerg    delete _malloc_cs;
6942104Sphk    _malloc_cs = NULL;
7030300Sjoerg  }
7130300Sjoerg
724910Swollman  if (_vm_cs != NULL) {
734910Swollman    delete _vm_cs;
744910Swollman    _vm_cs = NULL;
754910Swollman  }
764910Swollman
774910Swollman  if (_vm_map != NULL) {
7840008Sjoerg    delete _vm_map;
7940008Sjoerg    _vm_map = NULL;
8040008Sjoerg  }
8140008Sjoerg
8240008Sjoerg  reset();
8332350Seivind}
8440008Sjoerg
854910Swollman
864910Swollmanvoid MemBaseline::reset() {
8711819Sjulian  _baselined = false;
8811819Sjulian  _total_vm_reserved = 0;
8911819Sjulian  _total_vm_committed = 0;
9011819Sjulian  _total_malloced = 0;
9111819Sjulian  _number_of_classes = 0;
924910Swollman
934910Swollman  if (_malloc_cs != NULL) _malloc_cs->clear();
944910Swollman  if (_vm_cs != NULL) _vm_cs->clear();
954910Swollman  if (_vm_map != NULL) _vm_map->clear();
964910Swollman
974910Swollman  for (int index = 0; index < NUMBER_OF_MEMORY_TYPE; index ++) {
984910Swollman    _malloc_data[index].clear();
9942065Sphk    _vm_data[index].clear();
10042064Sphk    _arena_data[index].clear();
10142064Sphk  }
10242104Sphk}
10340008Sjoerg
10442064SphkMemBaseline::~MemBaseline() {
10542064Sphk  clear();
10642104Sphk}
10740008Sjoerg
10842104Sphk// baseline malloc'd memory records, generate overall summary and summaries by
1094910Swollman// memory types
1104910Swollmanbool MemBaseline::baseline_malloc_summary(const MemPointerArray* malloc_records) {
11125944Sjoerg  MemPointerArrayIteratorImpl malloc_itr((MemPointerArray*)malloc_records);
11225944Sjoerg  MemPointerRecord* malloc_ptr = (MemPointerRecord*)malloc_itr.current();
11325944Sjoerg  size_t used_arena_size = 0;
11425955Sjoerg  int index;
11525944Sjoerg  while (malloc_ptr != NULL) {
11625944Sjoerg    index = flag2index(FLAGS_TO_MEMORY_TYPE(malloc_ptr->flags()));
11725944Sjoerg    size_t size = malloc_ptr->size();
11825955Sjoerg    _total_malloced += size;
11925955Sjoerg    _malloc_data[index].inc(size);
12025955Sjoerg    if (MemPointerRecord::is_arena_record(malloc_ptr->flags())) {
12130300Sjoerg      // see if arena size record present
12230300Sjoerg      MemPointerRecord* next_malloc_ptr = (MemPointerRecordEx*)malloc_itr.peek_next();
12330300Sjoerg      if (MemPointerRecord::is_arena_size_record(next_malloc_ptr->flags())) {
12430300Sjoerg        assert(next_malloc_ptr->is_size_record_of_arena(malloc_ptr), "arena records do not match");
12530300Sjoerg        size = next_malloc_ptr->size();
12630300Sjoerg        _arena_data[index].inc(size);
12730300Sjoerg        used_arena_size += size;
12830300Sjoerg        malloc_itr.next();
12930300Sjoerg      }
13025944Sjoerg    }
13125944Sjoerg    malloc_ptr = (MemPointerRecordEx*)malloc_itr.next();
13225955Sjoerg  }
13325955Sjoerg
13445152Sphk  // substract used arena size to get size of arena chunk in free list
13525944Sjoerg  index = flag2index(mtChunk);
13630300Sjoerg  _malloc_data[index].reduce(used_arena_size);
13730300Sjoerg  // we really don't know how many chunks in free list, so just set to
13830300Sjoerg  // 0
13930300Sjoerg  _malloc_data[index].overwrite_counter(0);
14030300Sjoerg
14130300Sjoerg  return true;
14288534Sjoerg}
14388534Sjoerg
14478064Sume// baseline mmap'd memory records, generate overall summary and summaries by
14530300Sjoerg// memory types
14630300Sjoergbool MemBaseline::baseline_vm_summary(const MemPointerArray* vm_records) {
14730300Sjoerg  MemPointerArrayIteratorImpl vm_itr((MemPointerArray*)vm_records);
14830300Sjoerg  VMMemRegion* vm_ptr = (VMMemRegion*)vm_itr.current();
14978064Sume  int index;
1504910Swollman  while (vm_ptr != NULL) {
15125944Sjoerg    if (vm_ptr->is_reserved_region()) {
15225944Sjoerg      index = flag2index(FLAGS_TO_MEMORY_TYPE(vm_ptr->flags()));
15325944Sjoerg    // we use the number of thread stack to count threads
15425944Sjoerg      if (IS_MEMORY_TYPE(vm_ptr->flags(), mtThreadStack)) {
15525944Sjoerg      _number_of_threads ++;
15625944Sjoerg    }
15725944Sjoerg      _total_vm_reserved += vm_ptr->size();
15825944Sjoerg      _vm_data[index].inc(vm_ptr->size(), 0);
15925944Sjoerg    } else {
16025944Sjoerg      _total_vm_committed += vm_ptr->size();
16125944Sjoerg      _vm_data[index].inc(0, vm_ptr->size());
1624910Swollman    }
16330300Sjoerg    vm_ptr = (VMMemRegion*)vm_itr.next();
16430300Sjoerg  }
16530300Sjoerg  return true;
16630300Sjoerg}
16730300Sjoerg
16830300Sjoerg// baseline malloc'd memory by callsites, but only the callsites with memory allocation
16930300Sjoerg// over 1KB are stored.
17030300Sjoergbool MemBaseline::baseline_malloc_details(const MemPointerArray* malloc_records) {
1714910Swollman  assert(MemTracker::track_callsite(), "detail tracking is off");
17225944Sjoerg
17325944Sjoerg  MemPointerArrayIteratorImpl malloc_itr(const_cast<MemPointerArray*>(malloc_records));
17425944Sjoerg  MemPointerRecordEx* malloc_ptr = (MemPointerRecordEx*)malloc_itr.current();
1754910Swollman  MallocCallsitePointer malloc_callsite;
17678064Sume
17778064Sume  // initailize malloc callsite array
17878064Sume  if (_malloc_cs == NULL) {
17988534Sjoerg    _malloc_cs = new (std::nothrow) MemPointerArrayImpl<MallocCallsitePointer>(64);
18088534Sjoerg    // out of native memory
18130300Sjoerg    if (_malloc_cs == NULL || _malloc_cs->out_of_memory()) {
18230300Sjoerg      return false;
18330300Sjoerg    }
1844910Swollman  } else {
18530300Sjoerg    _malloc_cs->clear();
18630300Sjoerg  }
18730300Sjoerg
18830300Sjoerg  MemPointerArray* malloc_data = const_cast<MemPointerArray*>(malloc_records);
18930300Sjoerg
19030300Sjoerg  // sort into callsite pc order. Details are aggregated by callsites
19130300Sjoerg  malloc_data->sort((FN_SORT)malloc_sort_by_pc);
19230300Sjoerg  bool ret = true;
19330300Sjoerg
19430300Sjoerg  // baseline memory that is totaled over 1 KB
19530300Sjoerg  while (malloc_ptr != NULL) {
19630300Sjoerg    if (!MemPointerRecord::is_arena_size_record(malloc_ptr->flags())) {
19730300Sjoerg      // skip thread stacks
19830300Sjoerg      if (!IS_MEMORY_TYPE(malloc_ptr->flags(), mtThreadStack)) {
19925944Sjoerg        if (malloc_callsite.addr() != malloc_ptr->pc()) {
20025944Sjoerg          if ((malloc_callsite.amount()/K) > 0) {
20125944Sjoerg            if (!_malloc_cs->append(&malloc_callsite)) {
20225944Sjoerg              ret = false;
20325944Sjoerg              break;
20425944Sjoerg            }
20525944Sjoerg          }
20625944Sjoerg          malloc_callsite = MallocCallsitePointer(malloc_ptr->pc());
20725944Sjoerg        }
20825944Sjoerg        malloc_callsite.inc(malloc_ptr->size());
20925944Sjoerg      }
21025944Sjoerg    }
2114910Swollman    malloc_ptr = (MemPointerRecordEx*)malloc_itr.next();
21211189Sjkh  }
21311189Sjkh
21411189Sjkh  // restore to address order. Snapshot malloc data is maintained in memory
2154910Swollman  // address order.
2164910Swollman  malloc_data->sort((FN_SORT)malloc_sort_by_addr);
2174910Swollman
2184910Swollman  if (!ret) {
21911189Sjkh              return false;
22011189Sjkh            }
22111189Sjkh  // deal with last record
2224910Swollman  if (malloc_callsite.addr() != 0 && (malloc_callsite.amount()/K) > 0) {
2234910Swollman    if (!_malloc_cs->append(&malloc_callsite)) {
2244910Swollman      return false;
2254910Swollman    }
22611189Sjkh  }
22711189Sjkh  return true;
22811189Sjkh}
22911189Sjkh
23011189Sjkh// baseline mmap'd memory by callsites
23111189Sjkhbool MemBaseline::baseline_vm_details(const MemPointerArray* vm_records) {
2324910Swollman  assert(MemTracker::track_callsite(), "detail tracking is off");
2334910Swollman
2344910Swollman  VMCallsitePointer  vm_callsite;
23525944Sjoerg  VMCallsitePointer* cur_callsite = NULL;
23625944Sjoerg  MemPointerArrayIteratorImpl vm_itr((MemPointerArray*)vm_records);
23725944Sjoerg  VMMemRegionEx* vm_ptr = (VMMemRegionEx*)vm_itr.current();
23825944Sjoerg
23925944Sjoerg  // initialize virtual memory map array
24025944Sjoerg  if (_vm_map == NULL) {
24125944Sjoerg    _vm_map = new (std::nothrow) MemPointerArrayImpl<VMMemRegionEx>(vm_records->length());
24225944Sjoerg   if (_vm_map == NULL || _vm_map->out_of_memory()) {
24325944Sjoerg     return false;
24425944Sjoerg   }
24525944Sjoerg  } else {
24625944Sjoerg    _vm_map->clear();
24725944Sjoerg  }
24825944Sjoerg
24925944Sjoerg  // initialize virtual memory callsite array
25025944Sjoerg  if (_vm_cs == NULL) {
25125944Sjoerg    _vm_cs = new (std::nothrow) MemPointerArrayImpl<VMCallsitePointer>(64);
25225944Sjoerg    if (_vm_cs == NULL || _vm_cs->out_of_memory()) {
25325944Sjoerg      return false;
25425944Sjoerg    }
25525944Sjoerg  } else {
25625944Sjoerg    _vm_cs->clear();
25725944Sjoerg  }
25825944Sjoerg
25925944Sjoerg  // consolidate virtual memory data
26025944Sjoerg  VMMemRegionEx*     reserved_rec = NULL;
26125944Sjoerg  VMMemRegionEx*     committed_rec = NULL;
26225944Sjoerg
26325944Sjoerg  // vm_ptr is coming in increasing base address order
26425944Sjoerg  while (vm_ptr != NULL) {
26525944Sjoerg    if (vm_ptr->is_reserved_region()) {
26625944Sjoerg      // consolidate reserved memory regions for virtual memory map.
26712820Sphk      // The criteria for consolidation is:
26842065Sphk      // 1. two adjacent reserved memory regions
26930300Sjoerg      // 2. belong to the same memory type
27040008Sjoerg      // 3. reserved from the same callsite
2714910Swollman      if (reserved_rec == NULL ||
27242065Sphk        reserved_rec->base() + reserved_rec->size() != vm_ptr->addr() ||
27340008Sjoerg        FLAGS_TO_MEMORY_TYPE(reserved_rec->flags()) != FLAGS_TO_MEMORY_TYPE(vm_ptr->flags()) ||
27440008Sjoerg        reserved_rec->pc() != vm_ptr->pc()) {
27540008Sjoerg        if (!_vm_map->append(vm_ptr)) {
27640008Sjoerg        return false;
27740008Sjoerg      }
27840008Sjoerg        // inserted reserved region, we need the pointer to the element in virtual
27940008Sjoerg        // memory map array.
2804910Swollman        reserved_rec = (VMMemRegionEx*)_vm_map->at(_vm_map->length() - 1);
2814910Swollman      } else {
2824910Swollman        reserved_rec->expand_region(vm_ptr->addr(), vm_ptr->size());
2834910Swollman    }
2844910Swollman
28530300Sjoerg      if (cur_callsite != NULL && !_vm_cs->append(cur_callsite)) {
28630300Sjoerg      return false;
2874910Swollman    }
28811189Sjkh      vm_callsite = VMCallsitePointer(vm_ptr->pc());
2894910Swollman      cur_callsite = &vm_callsite;
2904910Swollman      vm_callsite.inc(vm_ptr->size(), 0);
2914910Swollman    } else {
2924910Swollman      // consolidate committed memory regions for virtual memory map
2934910Swollman      // The criterial is:
29425944Sjoerg      // 1. two adjacent committed memory regions
29525944Sjoerg      // 2. committed from the same callsite
29625944Sjoerg      if (committed_rec == NULL ||
29725944Sjoerg        committed_rec->base() + committed_rec->size() != vm_ptr->addr() ||
29811189Sjkh        committed_rec->pc() != vm_ptr->pc()) {
29930300Sjoerg        if (!_vm_map->append(vm_ptr)) {
30025944Sjoerg          return false;
3014910Swollman  }
30225944Sjoerg        committed_rec = (VMMemRegionEx*)_vm_map->at(_vm_map->length() - 1);
30325944Sjoerg    } else {
30425944Sjoerg        committed_rec->expand_region(vm_ptr->addr(), vm_ptr->size());
30525944Sjoerg      }
30625944Sjoerg      vm_callsite.inc(0, vm_ptr->size());
30725944Sjoerg    }
30825944Sjoerg    vm_ptr = (VMMemRegionEx*)vm_itr.next();
30942104Sphk  }
31025944Sjoerg  // deal with last record
31125944Sjoerg  if (cur_callsite != NULL && !_vm_cs->append(cur_callsite)) {
31230300Sjoerg    return false;
31342104Sphk  }
31430300Sjoerg
31525944Sjoerg  // sort it into callsite pc order. Details are aggregated by callsites
31625944Sjoerg  _vm_cs->sort((FN_SORT)bl_vm_sort_by_pc);
31725944Sjoerg
31825944Sjoerg  // walk the array to consolidate record by pc
31925944Sjoerg  MemPointerArrayIteratorImpl itr(_vm_cs);
32025944Sjoerg  VMCallsitePointer* callsite_rec = (VMCallsitePointer*)itr.current();
32125944Sjoerg  VMCallsitePointer* next_rec = (VMCallsitePointer*)itr.next();
32230300Sjoerg  while (next_rec != NULL) {
32330300Sjoerg    assert(callsite_rec != NULL, "Sanity check");
32425944Sjoerg    if (next_rec->addr() == callsite_rec->addr()) {
32525944Sjoerg      callsite_rec->inc(next_rec->reserved_amount(), next_rec->committed_amount());
32625944Sjoerg      itr.remove();
32725944Sjoerg      next_rec = (VMCallsitePointer*)itr.current();
32825944Sjoerg    } else {
32925944Sjoerg      callsite_rec = next_rec;
33025944Sjoerg      next_rec = (VMCallsitePointer*)itr.next();
33125944Sjoerg    }
33225944Sjoerg  }
33325944Sjoerg
33425944Sjoerg  return true;
33525944Sjoerg}
33625944Sjoerg
33725944Sjoerg// baseline a snapshot. If summary_only = false, memory usages aggregated by
33830300Sjoerg// callsites are also baselined.
33930300Sjoergbool MemBaseline::baseline(MemSnapshot& snapshot, bool summary_only) {
34025944Sjoerg  MutexLockerEx snapshot_locker(snapshot._lock, true);
34125944Sjoerg  reset();
34225944Sjoerg  _baselined = baseline_malloc_summary(snapshot._alloc_ptrs) &&
34325944Sjoerg               baseline_vm_summary(snapshot._vm_ptrs);
34425944Sjoerg  _number_of_classes = SystemDictionary::number_of_classes();
34525944Sjoerg
34625944Sjoerg  if (!summary_only && MemTracker::track_callsite() && _baselined) {
34725944Sjoerg    _baselined =  baseline_malloc_details(snapshot._alloc_ptrs) &&
34825944Sjoerg      baseline_vm_details(snapshot._vm_ptrs);
34925944Sjoerg  }
35025944Sjoerg  return _baselined;
35125944Sjoerg}
35225944Sjoerg
35325944Sjoerg
35425944Sjoergint MemBaseline::flag2index(MEMFLAGS flag) const {
35525944Sjoerg  for (int index = 0; index < NUMBER_OF_MEMORY_TYPE; index ++) {
35678064Sume    if (MemType2NameMap[index]._flag == flag) {
35778064Sume      return index;
35878064Sume    }
35978064Sume  }
36078064Sume  assert(false, "no type");
36178064Sume  return -1;
36278064Sume}
36378064Sume
36478064Sumeconst char* MemBaseline::type2name(MEMFLAGS type) {
36578064Sume  for (int index = 0; index < NUMBER_OF_MEMORY_TYPE; index ++) {
36678064Sume    if (MemType2NameMap[index]._flag == type) {
36778064Sume      return MemType2NameMap[index]._name;
36878064Sume    }
36978064Sume  }
37078064Sume  assert(false, err_msg("bad type %x", type));
37130300Sjoerg  return NULL;
37230300Sjoerg}
37330300Sjoerg
37430300Sjoerg
37530300SjoergMemBaseline& MemBaseline::operator=(const MemBaseline& other) {
37630300Sjoerg  _total_malloced = other._total_malloced;
37730300Sjoerg  _total_vm_reserved = other._total_vm_reserved;
37830300Sjoerg  _total_vm_committed = other._total_vm_committed;
37930300Sjoerg
38030300Sjoerg  _baselined = other._baselined;
38130300Sjoerg  _number_of_classes = other._number_of_classes;
38230300Sjoerg
38330300Sjoerg  for (int index = 0; index < NUMBER_OF_MEMORY_TYPE; index ++) {
38430300Sjoerg    _malloc_data[index] = other._malloc_data[index];
38530300Sjoerg    _vm_data[index] = other._vm_data[index];
38630300Sjoerg    _arena_data[index] = other._arena_data[index];
38730300Sjoerg  }
38830300Sjoerg
38930300Sjoerg  if (MemTracker::track_callsite()) {
39030300Sjoerg    assert(_malloc_cs != NULL && _vm_cs != NULL, "out of memory");
39125944Sjoerg    assert(other._malloc_cs != NULL && other._vm_cs != NULL,
39230300Sjoerg           "not properly baselined");
39330300Sjoerg    _malloc_cs->clear();
39478064Sume    _vm_cs->clear();
39578064Sume    int index;
39678064Sume    for (index = 0; index < other._malloc_cs->length(); index ++) {
39725944Sjoerg      _malloc_cs->append(other._malloc_cs->at(index));
39825944Sjoerg    }
39925944Sjoerg
40030300Sjoerg    for (index = 0; index < other._vm_cs->length(); index ++) {
40138343Sbde      _vm_cs->append(other._vm_cs->at(index));
40230300Sjoerg    }
40330300Sjoerg  }
40430300Sjoerg  return *this;
40525944Sjoerg}
40630300Sjoerg
40730300Sjoerg/* compare functions for sorting */
40830300Sjoerg
40925944Sjoerg// sort snapshot malloc'd records in callsite pc order
41025944Sjoergint MemBaseline::malloc_sort_by_pc(const void* p1, const void* p2) {
41178064Sume  assert(MemTracker::track_callsite(),"Just check");
41278064Sume  const MemPointerRecordEx* mp1 = (const MemPointerRecordEx*)p1;
41378064Sume  const MemPointerRecordEx* mp2 = (const MemPointerRecordEx*)p2;
41478064Sume  return UNSIGNED_COMPARE(mp1->pc(), mp2->pc());
41578064Sume}
41678064Sume
41778064Sume// sort baselined malloc'd records in size order
41878064Sumeint MemBaseline::bl_malloc_sort_by_size(const void* p1, const void* p2) {
41978064Sume  assert(MemTracker::is_on(), "Just check");
42025944Sjoerg  const MallocCallsitePointer* mp1 = (const MallocCallsitePointer*)p1;
42125944Sjoerg  const MallocCallsitePointer* mp2 = (const MallocCallsitePointer*)p2;
42233181Seivind  return UNSIGNED_COMPARE(mp2->amount(), mp1->amount());
42325944Sjoerg}
42425944Sjoerg
42525944Sjoerg// sort baselined malloc'd records in callsite pc order
42625944Sjoergint MemBaseline::bl_malloc_sort_by_pc(const void* p1, const void* p2) {
42725944Sjoerg  assert(MemTracker::is_on(), "Just check");
42825944Sjoerg  const MallocCallsitePointer* mp1 = (const MallocCallsitePointer*)p1;
42925944Sjoerg  const MallocCallsitePointer* mp2 = (const MallocCallsitePointer*)p2;
43033181Seivind  return UNSIGNED_COMPARE(mp1->addr(), mp2->addr());
43125944Sjoerg}
43225944Sjoerg
43325944Sjoerg
43425944Sjoerg// sort baselined mmap'd records in size (reserved size) order
43525944Sjoergint MemBaseline::bl_vm_sort_by_size(const void* p1, const void* p2) {
43625944Sjoerg  assert(MemTracker::is_on(), "Just check");
43725944Sjoerg  const VMCallsitePointer* mp1 = (const VMCallsitePointer*)p1;
43878064Sume  const VMCallsitePointer* mp2 = (const VMCallsitePointer*)p2;
43978064Sume  return UNSIGNED_COMPARE(mp2->reserved_amount(), mp1->reserved_amount());
44078064Sume}
44178064Sume
44278064Sume// sort baselined mmap'd records in callsite pc order
44378064Sumeint MemBaseline::bl_vm_sort_by_pc(const void* p1, const void* p2) {
44478064Sume  assert(MemTracker::is_on(), "Just check");
44578064Sume  const VMCallsitePointer* mp1 = (const VMCallsitePointer*)p1;
44678064Sume  const VMCallsitePointer* mp2 = (const VMCallsitePointer*)p2;
44778064Sume  return UNSIGNED_COMPARE(mp1->addr(), mp2->addr());
44878064Sume}
44978064Sume
45078064Sume
45178064Sume// sort snapshot malloc'd records in memory block address order
45233181Seivindint MemBaseline::malloc_sort_by_addr(const void* p1, const void* p2) {
45330300Sjoerg  assert(MemTracker::is_on(), "Just check");
45430300Sjoerg  const MemPointerRecord* mp1 = (const MemPointerRecord*)p1;
45530300Sjoerg  const MemPointerRecord* mp2 = (const MemPointerRecord*)p2;
45630300Sjoerg  int delta = UNSIGNED_COMPARE(mp1->addr(), mp2->addr());
45730300Sjoerg  assert(delta != 0, "dup pointer");
45830300Sjoerg  return delta;
45930300Sjoerg}
46033181Seivind
46130300Sjoerg