1/*
2 * Copyright (C) 2011 Apple Inc. All rights reserved.
3 *
4 * Redistribution and use in source and binary forms, with or without
5 * modification, are permitted provided that the following conditions
6 * are met:
7 * 1. Redistributions of source code must retain the above copyright
8 * notice, this list of conditions and the following disclaimer.
9 * 2. Redistributions in binary form must reproduce the above copyright
10 * notice, this list of conditions and the following disclaimer in the
11 * documentation and/or other materials provided with the distribution.
12 *
13 * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
14 * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
15 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
16 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR
17 * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
18 * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
19 * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
20 * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
21 * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
22 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
23 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
24 */
25
26#ifndef CopiedSpaceInlines_h
27#define CopiedSpaceInlines_h
28
29#include "CopiedBlock.h"
30#include "CopiedSpace.h"
31#include "Heap.h"
32#include "VM.h"
33
34namespace JSC {
35
36inline bool CopiedSpace::contains(CopiedBlock* block)
37{
38 return (!m_newGen.blockFilter.ruleOut(reinterpret_cast<Bits>(block)) || !m_oldGen.blockFilter.ruleOut(reinterpret_cast<Bits>(block)))
39 && m_blockSet.contains(block);
40}
41
42inline bool CopiedSpace::contains(void* ptr, CopiedBlock*& result)
43{
44 CopiedBlock* block = blockFor(ptr);
45 if (contains(block)) {
46 result = block;
47 return true;
48 }
49 result = 0;
50 return false;
51}
52
53inline void CopiedSpace::pin(CopiedBlock* block)
54{
55 block->pin();
56}
57
58inline void CopiedSpace::pinIfNecessary(void* opaquePointer)
59{
60 // Pointers into the copied space come in the following varieties:
61 // 1) Pointers to the start of a span of memory. This is the most
62 // natural though not necessarily the most common.
63 // 2) Pointers to one value-sized (8 byte) word past the end of
64 // a span of memory. This currently occurs with semi-butterflies
65 // and should be fixed soon, once the other half of the
66 // butterfly lands.
67 // 3) Pointers to the innards arising from loop induction variable
68 // optimizations (either manual ones or automatic, by the
69 // compiler).
70 // 4) Pointers to the end of a span of memory in arising from
71 // induction variable optimizations combined with the
72 // GC-to-compiler contract laid out in the C spec: a pointer to
73 // the end of a span of memory must be considered to be a
74 // pointer to that memory.
75
76 EncodedJSValue* pointer = reinterpret_cast<EncodedJSValue*>(opaquePointer);
77 CopiedBlock* block;
78
79 // Handle (1) and (3).
80 if (contains(pointer, block))
81 pin(block);
82
83 // Handle (4). We don't have to explicitly check and pin the block under this
84 // pointer because it cannot possibly point to something that cases (1) and
85 // (3) above or case (2) below wouldn't already catch.
86 pointer--;
87
88 // Handle (2)
89 pointer--;
90 if (contains(pointer, block))
91 pin(block);
92}
93
94inline void CopiedSpace::recycleEvacuatedBlock(CopiedBlock* block, HeapOperation collectionType)
95{
96 ASSERT(block);
97 ASSERT(block->canBeRecycled());
98 ASSERT(!block->m_isPinned);
99 {
100 LockHolder locker(&m_toSpaceLock);
101 m_blockSet.remove(block);
102 if (collectionType == EdenCollection)
103 m_newGen.fromSpace->remove(block);
104 else
105 m_oldGen.fromSpace->remove(block);
106 }
107 CopiedBlock::destroy(*heap(), block);
108}
109
110inline void CopiedSpace::recycleBorrowedBlock(CopiedBlock* block)
111{
112 CopiedBlock::destroy(*heap(), block);
113
114 {
115 LockHolder locker(m_loanedBlocksLock);
116 ASSERT(m_numberOfLoanedBlocks > 0);
117 ASSERT(m_inCopyingPhase);
118 m_numberOfLoanedBlocks--;
119 }
120}
121
122inline CopiedBlock* CopiedSpace::allocateBlockForCopyingPhase()
123{
124 ASSERT(m_inCopyingPhase);
125 CopiedBlock* block = CopiedBlock::createNoZeroFill(*m_heap);
126
127 {
128 LockHolder locker(m_loanedBlocksLock);
129 m_numberOfLoanedBlocks++;
130 }
131
132 ASSERT(!block->dataSize());
133 return block;
134}
135
136inline void CopiedSpace::allocateBlock()
137{
138 m_heap->collectIfNecessaryOrDefer();
139
140 m_allocator.resetCurrentBlock();
141
142 CopiedBlock* block = CopiedBlock::create(*m_heap);
143
144 m_newGen.toSpace->push(block);
145 m_newGen.blockFilter.add(reinterpret_cast<Bits>(block));
146 m_blockSet.add(block);
147 m_allocator.setCurrentBlock(block);
148}
149
150inline CheckedBoolean CopiedSpace::tryAllocate(size_t bytes, void** outPtr)
151{
152 ASSERT(!m_heap->vm()->isInitializingObject());
153 ASSERT(bytes);
154
155 if (!m_allocator.tryAllocate(bytes, outPtr))
156 return tryAllocateSlowCase(bytes, outPtr);
157
158 ASSERT(*outPtr);
159 return true;
160}
161
162inline bool CopiedSpace::isOversize(size_t bytes)
163{
164 return bytes > s_maxAllocationSize;
165}
166
167inline bool CopiedSpace::isPinned(void* ptr)
168{
169 return blockFor(ptr)->m_isPinned;
170}
171
172inline CopiedBlock* CopiedSpace::blockFor(void* ptr)
173{
174 return reinterpret_cast<CopiedBlock*>(reinterpret_cast<size_t>(ptr) & s_blockMask);
175}
176
177template <HeapOperation collectionType>
178inline void CopiedSpace::startedCopying()
179{
180 DoublyLinkedList<CopiedBlock>* fromSpace;
181 DoublyLinkedList<CopiedBlock>* oversizeBlocks;
182 TinyBloomFilter* blockFilter;
183 if (collectionType == FullCollection) {
184 ASSERT(m_oldGen.fromSpace->isEmpty());
185 ASSERT(m_newGen.fromSpace->isEmpty());
186
187 m_oldGen.toSpace->append(*m_newGen.toSpace);
188 m_oldGen.oversizeBlocks.append(m_newGen.oversizeBlocks);
189
190 ASSERT(m_newGen.toSpace->isEmpty());
191 ASSERT(m_newGen.fromSpace->isEmpty());
192 ASSERT(m_newGen.oversizeBlocks.isEmpty());
193
194 std::swap(m_oldGen.fromSpace, m_oldGen.toSpace);
195 fromSpace = m_oldGen.fromSpace;
196 oversizeBlocks = &m_oldGen.oversizeBlocks;
197 blockFilter = &m_oldGen.blockFilter;
198 } else {
199 std::swap(m_newGen.fromSpace, m_newGen.toSpace);
200 fromSpace = m_newGen.fromSpace;
201 oversizeBlocks = &m_newGen.oversizeBlocks;
202 blockFilter = &m_newGen.blockFilter;
203 }
204
205 blockFilter->reset();
206 m_allocator.resetCurrentBlock();
207
208 CopiedBlock* next = 0;
209 size_t totalLiveBytes = 0;
210 size_t totalUsableBytes = 0;
211 for (CopiedBlock* block = fromSpace->head(); block; block = next) {
212 next = block->next();
213 if (!block->isPinned() && block->canBeRecycled()) {
214 recycleEvacuatedBlock(block, collectionType);
215 continue;
216 }
217 ASSERT(block->liveBytes() <= CopiedBlock::blockSize);
218 totalLiveBytes += block->liveBytes();
219 totalUsableBytes += block->payloadCapacity();
220 block->didPromote();
221 }
222
223 CopiedBlock* block = oversizeBlocks->head();
224 while (block) {
225 CopiedBlock* next = block->next();
226 if (block->isPinned()) {
227 blockFilter->add(reinterpret_cast<Bits>(block));
228 totalLiveBytes += block->payloadCapacity();
229 totalUsableBytes += block->payloadCapacity();
230 block->didPromote();
231 } else {
232 oversizeBlocks->remove(block);
233 m_blockSet.remove(block);
234 CopiedBlock::destroy(*heap(), block);
235 }
236 block = next;
237 }
238
239 double markedSpaceBytes = m_heap->objectSpace().capacity();
240 double totalUtilization = static_cast<double>(totalLiveBytes + markedSpaceBytes) / static_cast<double>(totalUsableBytes + markedSpaceBytes);
241 m_shouldDoCopyPhase = m_heap->operationInProgress() == EdenCollection || totalUtilization <= Options::minHeapUtilization();
242 if (!m_shouldDoCopyPhase) {
243 if (Options::logGC())
244 dataLog("Skipped copying, ");
245 return;
246 }
247
248 if (Options::logGC())
249 dataLogF("Did copy, ");
250 ASSERT(m_shouldDoCopyPhase);
251 ASSERT(!m_numberOfLoanedBlocks);
252 ASSERT(!m_inCopyingPhase);
253 m_inCopyingPhase = true;
254}
255
256} // namespace JSC
257
258#endif // CopiedSpaceInlines_h
259