1/* 2 * Copyright (C) 2011 Apple Inc. All rights reserved. 3 * 4 * Redistribution and use in source and binary forms, with or without 5 * modification, are permitted provided that the following conditions 6 * are met: 7 * 1. Redistributions of source code must retain the above copyright 8 * notice, this list of conditions and the following disclaimer. 9 * 2. Redistributions in binary form must reproduce the above copyright 10 * notice, this list of conditions and the following disclaimer in the 11 * documentation and/or other materials provided with the distribution. 12 * 13 * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY 14 * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 15 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR 16 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR 17 * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, 18 * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, 19 * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR 20 * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY 21 * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 22 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE 23 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 24 */ 25 26#include "config.h" 27#include "CopiedSpace.h" 28 29#include "CopiedSpaceInlines.h" 30#include "GCActivityCallback.h" 31#include "Operations.h" 32#include "Options.h" 33 34namespace JSC { 35 36CopiedSpace::CopiedSpace(Heap* heap) 37 : m_heap(heap) 38 , m_toSpace(0) 39 , m_fromSpace(0) 40 , m_inCopyingPhase(false) 41 , m_shouldDoCopyPhase(false) 42 , m_numberOfLoanedBlocks(0) 43{ 44 m_toSpaceLock.Init(); 45} 46 47CopiedSpace::~CopiedSpace() 48{ 49 while (!m_toSpace->isEmpty()) 50 m_heap->blockAllocator().deallocate(CopiedBlock::destroy(m_toSpace->removeHead())); 51 52 while (!m_fromSpace->isEmpty()) 53 m_heap->blockAllocator().deallocate(CopiedBlock::destroy(m_fromSpace->removeHead())); 54 55 while (!m_oversizeBlocks.isEmpty()) 56 m_heap->blockAllocator().deallocateCustomSize(CopiedBlock::destroy(m_oversizeBlocks.removeHead())); 57} 58 59void CopiedSpace::init() 60{ 61 m_toSpace = &m_blocks1; 62 m_fromSpace = &m_blocks2; 63 64 allocateBlock(); 65} 66 67CheckedBoolean CopiedSpace::tryAllocateSlowCase(size_t bytes, void** outPtr) 68{ 69 if (isOversize(bytes)) 70 return tryAllocateOversize(bytes, outPtr); 71 72 ASSERT(m_heap->vm()->apiLock().currentThreadIsHoldingLock()); 73 m_heap->didAllocate(m_allocator.currentCapacity()); 74 75 allocateBlock(); 76 77 *outPtr = m_allocator.forceAllocate(bytes); 78 return true; 79} 80 81CheckedBoolean CopiedSpace::tryAllocateOversize(size_t bytes, void** outPtr) 82{ 83 ASSERT(isOversize(bytes)); 84 85 CopiedBlock* block = CopiedBlock::create(m_heap->blockAllocator().allocateCustomSize(sizeof(CopiedBlock) + bytes, CopiedBlock::blockSize)); 86 m_oversizeBlocks.push(block); 87 m_blockFilter.add(reinterpret_cast<Bits>(block)); 88 m_blockSet.add(block); 89 90 CopiedAllocator allocator; 91 allocator.setCurrentBlock(block); 92 *outPtr = allocator.forceAllocate(bytes); 93 allocator.resetCurrentBlock(); 94 95 m_heap->didAllocate(block->region()->blockSize()); 96 97 return true; 98} 99 100CheckedBoolean CopiedSpace::tryReallocate(void** ptr, size_t oldSize, size_t newSize) 101{ 102 if (oldSize >= newSize) 103 return true; 104 105 void* oldPtr = *ptr; 106 ASSERT(!m_heap->vm()->isInitializingObject()); 107 108 if (CopiedSpace::blockFor(oldPtr)->isOversize() || isOversize(newSize)) 109 return tryReallocateOversize(ptr, oldSize, newSize); 110 111 if (m_allocator.tryReallocate(oldPtr, oldSize, newSize)) 112 return true; 113 114 void* result = 0; 115 if (!tryAllocate(newSize, &result)) { 116 *ptr = 0; 117 return false; 118 } 119 memcpy(result, oldPtr, oldSize); 120 *ptr = result; 121 return true; 122} 123 124CheckedBoolean CopiedSpace::tryReallocateOversize(void** ptr, size_t oldSize, size_t newSize) 125{ 126 ASSERT(isOversize(oldSize) || isOversize(newSize)); 127 ASSERT(newSize > oldSize); 128 129 void* oldPtr = *ptr; 130 131 void* newPtr = 0; 132 if (!tryAllocateOversize(newSize, &newPtr)) { 133 *ptr = 0; 134 return false; 135 } 136 137 memcpy(newPtr, oldPtr, oldSize); 138 139 CopiedBlock* oldBlock = CopiedSpace::blockFor(oldPtr); 140 if (oldBlock->isOversize()) { 141 m_oversizeBlocks.remove(oldBlock); 142 m_blockSet.remove(oldBlock); 143 m_heap->blockAllocator().deallocateCustomSize(CopiedBlock::destroy(oldBlock)); 144 } 145 146 *ptr = newPtr; 147 return true; 148} 149 150void CopiedSpace::doneFillingBlock(CopiedBlock* block, CopiedBlock** exchange) 151{ 152 ASSERT(m_inCopyingPhase); 153 154 if (exchange) 155 *exchange = allocateBlockForCopyingPhase(); 156 157 if (!block) 158 return; 159 160 if (!block->dataSize()) { 161 recycleBorrowedBlock(block); 162 return; 163 } 164 165 block->zeroFillWilderness(); 166 167 { 168 SpinLockHolder locker(&m_toSpaceLock); 169 m_toSpace->push(block); 170 m_blockSet.add(block); 171 m_blockFilter.add(reinterpret_cast<Bits>(block)); 172 } 173 174 { 175 MutexLocker locker(m_loanedBlocksLock); 176 ASSERT(m_numberOfLoanedBlocks > 0); 177 ASSERT(m_inCopyingPhase); 178 m_numberOfLoanedBlocks--; 179 if (!m_numberOfLoanedBlocks) 180 m_loanedBlocksCondition.signal(); 181 } 182} 183 184void CopiedSpace::startedCopying() 185{ 186 std::swap(m_fromSpace, m_toSpace); 187 188 m_blockFilter.reset(); 189 m_allocator.resetCurrentBlock(); 190 191 CopiedBlock* next = 0; 192 size_t totalLiveBytes = 0; 193 size_t totalUsableBytes = 0; 194 for (CopiedBlock* block = m_fromSpace->head(); block; block = next) { 195 next = block->next(); 196 if (!block->isPinned() && block->canBeRecycled()) { 197 recycleEvacuatedBlock(block); 198 continue; 199 } 200 totalLiveBytes += block->liveBytes(); 201 totalUsableBytes += block->payloadCapacity(); 202 } 203 204 CopiedBlock* block = m_oversizeBlocks.head(); 205 while (block) { 206 CopiedBlock* next = block->next(); 207 if (block->isPinned()) { 208 m_blockFilter.add(reinterpret_cast<Bits>(block)); 209 totalLiveBytes += block->payloadCapacity(); 210 totalUsableBytes += block->payloadCapacity(); 211 block->didSurviveGC(); 212 } else { 213 m_oversizeBlocks.remove(block); 214 m_blockSet.remove(block); 215 m_heap->blockAllocator().deallocateCustomSize(CopiedBlock::destroy(block)); 216 } 217 block = next; 218 } 219 220 double markedSpaceBytes = m_heap->objectSpace().capacity(); 221 double totalFragmentation = ((double)totalLiveBytes + markedSpaceBytes) / ((double)totalUsableBytes + markedSpaceBytes); 222 m_shouldDoCopyPhase = totalFragmentation <= Options::minHeapUtilization(); 223 if (!m_shouldDoCopyPhase) 224 return; 225 226 ASSERT(m_shouldDoCopyPhase); 227 ASSERT(!m_inCopyingPhase); 228 ASSERT(!m_numberOfLoanedBlocks); 229 m_inCopyingPhase = true; 230} 231 232void CopiedSpace::doneCopying() 233{ 234 { 235 MutexLocker locker(m_loanedBlocksLock); 236 while (m_numberOfLoanedBlocks > 0) 237 m_loanedBlocksCondition.wait(m_loanedBlocksLock); 238 } 239 240 ASSERT(m_inCopyingPhase == m_shouldDoCopyPhase); 241 m_inCopyingPhase = false; 242 243 while (!m_fromSpace->isEmpty()) { 244 CopiedBlock* block = m_fromSpace->removeHead(); 245 // All non-pinned blocks in from-space should have been reclaimed as they were evacuated. 246 ASSERT(block->isPinned() || !m_shouldDoCopyPhase); 247 block->didSurviveGC(); 248 // We don't add the block to the blockSet because it was never removed. 249 ASSERT(m_blockSet.contains(block)); 250 m_blockFilter.add(reinterpret_cast<Bits>(block)); 251 m_toSpace->push(block); 252 } 253 254 if (!m_toSpace->head()) 255 allocateBlock(); 256 else 257 m_allocator.setCurrentBlock(m_toSpace->head()); 258 259 m_shouldDoCopyPhase = false; 260} 261 262size_t CopiedSpace::size() 263{ 264 size_t calculatedSize = 0; 265 266 for (CopiedBlock* block = m_toSpace->head(); block; block = block->next()) 267 calculatedSize += block->size(); 268 269 for (CopiedBlock* block = m_fromSpace->head(); block; block = block->next()) 270 calculatedSize += block->size(); 271 272 for (CopiedBlock* block = m_oversizeBlocks.head(); block; block = block->next()) 273 calculatedSize += block->size(); 274 275 return calculatedSize; 276} 277 278size_t CopiedSpace::capacity() 279{ 280 size_t calculatedCapacity = 0; 281 282 for (CopiedBlock* block = m_toSpace->head(); block; block = block->next()) 283 calculatedCapacity += block->capacity(); 284 285 for (CopiedBlock* block = m_fromSpace->head(); block; block = block->next()) 286 calculatedCapacity += block->capacity(); 287 288 for (CopiedBlock* block = m_oversizeBlocks.head(); block; block = block->next()) 289 calculatedCapacity += block->capacity(); 290 291 return calculatedCapacity; 292} 293 294static bool isBlockListPagedOut(double deadline, DoublyLinkedList<CopiedBlock>* list) 295{ 296 unsigned itersSinceLastTimeCheck = 0; 297 CopiedBlock* current = list->head(); 298 while (current) { 299 current = current->next(); 300 ++itersSinceLastTimeCheck; 301 if (itersSinceLastTimeCheck >= Heap::s_timeCheckResolution) { 302 double currentTime = WTF::monotonicallyIncreasingTime(); 303 if (currentTime > deadline) 304 return true; 305 itersSinceLastTimeCheck = 0; 306 } 307 } 308 309 return false; 310} 311 312bool CopiedSpace::isPagedOut(double deadline) 313{ 314 return isBlockListPagedOut(deadline, m_toSpace) 315 || isBlockListPagedOut(deadline, m_fromSpace) 316 || isBlockListPagedOut(deadline, &m_oversizeBlocks); 317} 318 319} // namespace JSC 320