allocation.inline.hpp revision 4532:5a9fa2ba85f0
1294113Sbapt/*
2275397Sbapt * Copyright (c) 1997, 2013, Oracle and/or its affiliates. All rights reserved.
3275397Sbapt * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
4275397Sbapt *
5275397Sbapt * This code is free software; you can redistribute it and/or modify it
6275397Sbapt * under the terms of the GNU General Public License version 2 only, as
7275397Sbapt * published by the Free Software Foundation.
8275397Sbapt *
9275397Sbapt * This code is distributed in the hope that it will be useful, but WITHOUT
10275397Sbapt * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11275397Sbapt * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
12275397Sbapt * version 2 for more details (a copy is included in the LICENSE file that
13275397Sbapt * accompanied this code).
14275397Sbapt *
15275397Sbapt * You should have received a copy of the GNU General Public License version
16275397Sbapt * 2 along with this work; if not, write to the Free Software Foundation,
17275397Sbapt * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18275397Sbapt *
19275397Sbapt * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20275397Sbapt * or visit www.oracle.com if you need additional information or have any
21275397Sbapt * questions.
22275397Sbapt *
23275397Sbapt */
24275397Sbapt
25275397Sbapt#ifndef SHARE_VM_MEMORY_ALLOCATION_INLINE_HPP
26275397Sbapt#define SHARE_VM_MEMORY_ALLOCATION_INLINE_HPP
27275397Sbapt
28275397Sbapt#include "runtime/atomic.inline.hpp"
29275397Sbapt#include "runtime/os.hpp"
30275397Sbapt
31275397Sbapt// Explicit C-heap memory management
32275397Sbapt
33275397Sbaptvoid trace_heap_malloc(size_t size, const char* name, void *p);
34275397Sbaptvoid trace_heap_free(void *p);
35294113Sbapt
36275397Sbapt#ifndef PRODUCT
37275397Sbapt// Increments unsigned long value for statistics (not atomic on MP).
38275397Sbaptinline void inc_stat_counter(volatile julong* dest, julong add_value) {
39275397Sbapt#if defined(SPARC) || defined(X86)
40275397Sbapt  // Sparc and X86 have atomic jlong (8 bytes) instructions
41294113Sbapt  julong value = Atomic::load((volatile jlong*)dest);
42275397Sbapt  value += add_value;
43275397Sbapt  Atomic::store((jlong)value, (volatile jlong*)dest);
44275397Sbapt#else
45275397Sbapt  // possible word-tearing during load/store
46275397Sbapt  *dest += add_value;
47294113Sbapt#endif
48275397Sbapt}
49275397Sbapt#endif
50275397Sbapt
51275397Sbapt// allocate using malloc; will fail if no memory available
52294113Sbaptinline char* AllocateHeap(size_t size, MEMFLAGS flags, address pc = 0,
53275397Sbapt    AllocFailType alloc_failmode = AllocFailStrategy::EXIT_OOM) {
54275397Sbapt  if (pc == 0) {
55275397Sbapt    pc = CURRENT_PC;
56275397Sbapt  }
57275397Sbapt  char* p = (char*) os::malloc(size, flags, pc);
58275397Sbapt  #ifdef ASSERT
59294113Sbapt  if (PrintMallocFree) trace_heap_malloc(size, "AllocateHeap", p);
60275397Sbapt  #endif
61275397Sbapt  if (p == NULL && alloc_failmode == AllocFailStrategy::EXIT_OOM) vm_exit_out_of_memory(size, "AllocateHeap");
62294113Sbapt  return p;
63275397Sbapt}
64
65inline char* ReallocateHeap(char *old, size_t size, MEMFLAGS flags,
66    AllocFailType alloc_failmode = AllocFailStrategy::EXIT_OOM) {
67  char* p = (char*) os::realloc(old, size, flags, CURRENT_PC);
68  #ifdef ASSERT
69  if (PrintMallocFree) trace_heap_malloc(size, "ReallocateHeap", p);
70  #endif
71  if (p == NULL && alloc_failmode == AllocFailStrategy::EXIT_OOM) vm_exit_out_of_memory(size, "ReallocateHeap");
72  return p;
73}
74
75inline void FreeHeap(void* p, MEMFLAGS memflags = mtInternal) {
76  #ifdef ASSERT
77  if (PrintMallocFree) trace_heap_free(p);
78  #endif
79  os::free(p, memflags);
80}
81
82
83template <MEMFLAGS F> void* CHeapObj<F>::operator new(size_t size,
84      address caller_pc){
85#ifdef ASSERT
86    void* p = (void*)AllocateHeap(size, F, (caller_pc != 0 ? caller_pc : CALLER_PC));
87    if (PrintMallocFree) trace_heap_malloc(size, "CHeapObj-new", p);
88    return p;
89#else
90    return (void *) AllocateHeap(size, F, (caller_pc != 0 ? caller_pc : CALLER_PC));
91#endif
92  }
93
94template <MEMFLAGS F> void* CHeapObj<F>::operator new (size_t size,
95  const std::nothrow_t&  nothrow_constant, address caller_pc) {
96#ifdef ASSERT
97  void* p = (void*)AllocateHeap(size, F, (caller_pc != 0 ? caller_pc : CALLER_PC),
98      AllocFailStrategy::RETURN_NULL);
99    if (PrintMallocFree) trace_heap_malloc(size, "CHeapObj-new", p);
100    return p;
101#else
102  return (void *) AllocateHeap(size, F, (caller_pc != 0 ? caller_pc : CALLER_PC),
103      AllocFailStrategy::RETURN_NULL);
104#endif
105}
106
107template <MEMFLAGS F> void CHeapObj<F>::operator delete(void* p){
108   FreeHeap(p, F);
109}
110
111template <class E, MEMFLAGS F>
112E* ArrayAllocator<E, F>::allocate(size_t length) {
113  assert(_addr == NULL, "Already in use");
114
115  _size = sizeof(E) * length;
116  _use_malloc = _size < ArrayAllocatorMallocLimit;
117
118  if (_use_malloc) {
119    _addr = AllocateHeap(_size, F);
120    if (_addr == NULL && _size >=  (size_t)os::vm_allocation_granularity()) {
121      // malloc failed let's try with mmap instead
122      _use_malloc = false;
123    } else {
124      return (E*)_addr;
125    }
126  }
127
128  int alignment = os::vm_allocation_granularity();
129  _size = align_size_up(_size, alignment);
130
131  _addr = os::reserve_memory(_size, NULL, alignment);
132  if (_addr == NULL) {
133    vm_exit_out_of_memory(_size, "Allocator (reserve)");
134  }
135
136  bool success = os::commit_memory(_addr, _size, false /* executable */);
137  if (!success) {
138    vm_exit_out_of_memory(_size, "Allocator (commit)");
139  }
140
141  return (E*)_addr;
142}
143
144template<class E, MEMFLAGS F>
145void ArrayAllocator<E, F>::free() {
146  if (_addr != NULL) {
147    if (_use_malloc) {
148      FreeHeap(_addr, F);
149    } else {
150      os::release_memory(_addr, _size);
151    }
152    _addr = NULL;
153  }
154}
155
156#endif // SHARE_VM_MEMORY_ALLOCATION_INLINE_HPP
157