1/*
2 * Copyright (C) 2003, 2004, 2005, 2006, 2007, 2008, 2009 Apple Inc. All rights reserved.
3 * Copyright (C) 2007 Eric Seidel <eric@webkit.org>
4 *
5 * This library is free software; you can redistribute it and/or
6 * modify it under the terms of the GNU Lesser General Public
7 * License as published by the Free Software Foundation; either
8 * version 2 of the License, or (at your option) any later version.
9 *
10 * This library is distributed in the hope that it will be useful,
11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
13 * Lesser General Public License for more details.
14 *
15 * You should have received a copy of the GNU Lesser General Public
16 * License along with this library; if not, write to the Free Software
17 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
18 *
19 */
20
21#include "config.h"
22#include "MarkedSpace.h"
23
24#include "IncrementalSweeper.h"
25#include "JSObject.h"
26#include "JSCInlines.h"
27
28namespace JSC {
29
30struct Free : MarkedBlock::VoidFunctor {
31 Free(MarkedSpace& space) : m_markedSpace(space) { }
32 void operator()(MarkedBlock* block) { m_markedSpace.freeBlock(block); }
33private:
34 MarkedSpace& m_markedSpace;
35};
36
37struct FreeOrShrink : MarkedBlock::VoidFunctor {
38 FreeOrShrink(MarkedSpace& space) : m_markedSpace(space) { }
39 void operator()(MarkedBlock* block) { m_markedSpace.freeOrShrinkBlock(block); }
40private:
41 MarkedSpace& m_markedSpace;
42};
43
44struct VisitWeakSet : MarkedBlock::VoidFunctor {
45 VisitWeakSet(HeapRootVisitor& heapRootVisitor) : m_heapRootVisitor(heapRootVisitor) { }
46 void operator()(MarkedBlock* block) { block->visitWeakSet(m_heapRootVisitor); }
47private:
48 HeapRootVisitor& m_heapRootVisitor;
49};
50
51struct ReapWeakSet : MarkedBlock::VoidFunctor {
52 void operator()(MarkedBlock* block) { block->reapWeakSet(); }
53};
54
55MarkedSpace::MarkedSpace(Heap* heap)
56 : m_heap(heap)
57 , m_capacity(0)
58 , m_isIterating(false)
59{
60 for (size_t cellSize = preciseStep; cellSize <= preciseCutoff; cellSize += preciseStep) {
61 allocatorFor(cellSize).init(heap, this, cellSize, false);
62 destructorAllocatorFor(cellSize).init(heap, this, cellSize, true);
63 }
64
65 for (size_t cellSize = impreciseStart; cellSize <= impreciseCutoff; cellSize += impreciseStep) {
66 allocatorFor(cellSize).init(heap, this, cellSize, false);
67 destructorAllocatorFor(cellSize).init(heap, this, cellSize, true);
68 }
69
70 m_normalSpace.largeAllocator.init(heap, this, 0, false);
71 m_destructorSpace.largeAllocator.init(heap, this, 0, true);
72}
73
74MarkedSpace::~MarkedSpace()
75{
76 Free free(*this);
77 forEachBlock(free);
78 ASSERT(!m_blocks.set().size());
79}
80
81struct LastChanceToFinalize {
82 void operator()(MarkedAllocator& allocator) { allocator.lastChanceToFinalize(); }
83};
84
85void MarkedSpace::lastChanceToFinalize()
86{
87 stopAllocating();
88 forEachAllocator<LastChanceToFinalize>();
89}
90
91void MarkedSpace::sweep()
92{
93 m_heap->sweeper()->willFinishSweeping();
94 forEachBlock<Sweep>();
95}
96
97void MarkedSpace::zombifySweep()
98{
99 if (Options::logGC())
100 dataLog("Zombifying sweep...");
101 m_heap->sweeper()->willFinishSweeping();
102 forEachBlock<ZombifySweep>();
103}
104
105void MarkedSpace::resetAllocators()
106{
107 for (size_t cellSize = preciseStep; cellSize <= preciseCutoff; cellSize += preciseStep) {
108 allocatorFor(cellSize).reset();
109 destructorAllocatorFor(cellSize).reset();
110 }
111
112 for (size_t cellSize = impreciseStart; cellSize <= impreciseCutoff; cellSize += impreciseStep) {
113 allocatorFor(cellSize).reset();
114 destructorAllocatorFor(cellSize).reset();
115 }
116
117 m_normalSpace.largeAllocator.reset();
118 m_destructorSpace.largeAllocator.reset();
119
120 m_blocksWithNewObjects.clear();
121}
122
123void MarkedSpace::visitWeakSets(HeapRootVisitor& heapRootVisitor)
124{
125 VisitWeakSet visitWeakSet(heapRootVisitor);
126 if (m_heap->operationInProgress() == EdenCollection) {
127 for (unsigned i = 0; i < m_blocksWithNewObjects.size(); ++i)
128 visitWeakSet(m_blocksWithNewObjects[i]);
129 } else
130 forEachBlock(visitWeakSet);
131}
132
133void MarkedSpace::reapWeakSets()
134{
135 if (m_heap->operationInProgress() == EdenCollection) {
136 for (unsigned i = 0; i < m_blocksWithNewObjects.size(); ++i)
137 m_blocksWithNewObjects[i]->reapWeakSet();
138 } else
139 forEachBlock<ReapWeakSet>();
140}
141
142template <typename Functor>
143void MarkedSpace::forEachAllocator()
144{
145 Functor functor;
146 forEachAllocator(functor);
147}
148
149template <typename Functor>
150void MarkedSpace::forEachAllocator(Functor& functor)
151{
152 for (size_t cellSize = preciseStep; cellSize <= preciseCutoff; cellSize += preciseStep) {
153 functor(allocatorFor(cellSize));
154 functor(destructorAllocatorFor(cellSize));
155 }
156
157 for (size_t cellSize = impreciseStart; cellSize <= impreciseCutoff; cellSize += impreciseStep) {
158 functor(allocatorFor(cellSize));
159 functor(destructorAllocatorFor(cellSize));
160 }
161
162 functor(m_normalSpace.largeAllocator);
163 functor(m_destructorSpace.largeAllocator);
164}
165
166struct StopAllocatingFunctor {
167 void operator()(MarkedAllocator& allocator) { allocator.stopAllocating(); }
168};
169
170void MarkedSpace::stopAllocating()
171{
172 ASSERT(!isIterating());
173 forEachAllocator<StopAllocatingFunctor>();
174}
175
176struct ResumeAllocatingFunctor {
177 void operator()(MarkedAllocator& allocator) { allocator.resumeAllocating(); }
178};
179
180void MarkedSpace::resumeAllocating()
181{
182 ASSERT(isIterating());
183 forEachAllocator<ResumeAllocatingFunctor>();
184}
185
186bool MarkedSpace::isPagedOut(double deadline)
187{
188 for (size_t cellSize = preciseStep; cellSize <= preciseCutoff; cellSize += preciseStep) {
189 if (allocatorFor(cellSize).isPagedOut(deadline)
190 || destructorAllocatorFor(cellSize).isPagedOut(deadline))
191 return true;
192 }
193
194 for (size_t cellSize = impreciseStart; cellSize <= impreciseCutoff; cellSize += impreciseStep) {
195 if (allocatorFor(cellSize).isPagedOut(deadline)
196 || destructorAllocatorFor(cellSize).isPagedOut(deadline))
197 return true;
198 }
199
200 if (m_normalSpace.largeAllocator.isPagedOut(deadline)
201 || m_destructorSpace.largeAllocator.isPagedOut(deadline))
202 return true;
203
204 return false;
205}
206
207void MarkedSpace::freeBlock(MarkedBlock* block)
208{
209 block->allocator()->removeBlock(block);
210 m_capacity -= block->capacity();
211 m_blocks.remove(block);
212 MarkedBlock::destroy(*m_heap, block);
213}
214
215void MarkedSpace::freeOrShrinkBlock(MarkedBlock* block)
216{
217 if (!block->isEmpty()) {
218 block->shrink();
219 return;
220 }
221
222 freeBlock(block);
223}
224
225void MarkedSpace::shrink()
226{
227 FreeOrShrink freeOrShrink(*this);
228 forEachBlock(freeOrShrink);
229}
230
231static void clearNewlyAllocatedInBlock(MarkedBlock* block)
232{
233 if (!block)
234 return;
235 block->clearNewlyAllocated();
236}
237
238struct ClearNewlyAllocated : MarkedBlock::VoidFunctor {
239 void operator()(MarkedBlock* block) { block->clearNewlyAllocated(); }
240};
241
242#ifndef NDEBUG
243struct VerifyNewlyAllocated : MarkedBlock::VoidFunctor {
244 void operator()(MarkedBlock* block) { ASSERT(!block->clearNewlyAllocated()); }
245};
246#endif
247
248void MarkedSpace::clearNewlyAllocated()
249{
250 for (size_t i = 0; i < preciseCount; ++i) {
251 clearNewlyAllocatedInBlock(m_normalSpace.preciseAllocators[i].takeLastActiveBlock());
252 clearNewlyAllocatedInBlock(m_destructorSpace.preciseAllocators[i].takeLastActiveBlock());
253 }
254
255 for (size_t i = 0; i < impreciseCount; ++i) {
256 clearNewlyAllocatedInBlock(m_normalSpace.impreciseAllocators[i].takeLastActiveBlock());
257 clearNewlyAllocatedInBlock(m_destructorSpace.impreciseAllocators[i].takeLastActiveBlock());
258 }
259
260 // We have to iterate all of the blocks in the large allocators because they are
261 // canonicalized as they are used up (see MarkedAllocator::tryAllocateHelper)
262 // which creates the m_newlyAllocated bitmap.
263 ClearNewlyAllocated functor;
264 m_normalSpace.largeAllocator.forEachBlock(functor);
265 m_destructorSpace.largeAllocator.forEachBlock(functor);
266
267#ifndef NDEBUG
268 VerifyNewlyAllocated verifyFunctor;
269 forEachBlock(verifyFunctor);
270#endif
271}
272
273#ifndef NDEBUG
274struct VerifyMarkedOrRetired : MarkedBlock::VoidFunctor {
275 void operator()(MarkedBlock* block)
276 {
277 switch (block->m_state) {
278 case MarkedBlock::Marked:
279 case MarkedBlock::Retired:
280 return;
281 default:
282 RELEASE_ASSERT_NOT_REACHED();
283 }
284 }
285};
286#endif
287
288void MarkedSpace::clearMarks()
289{
290 if (m_heap->operationInProgress() == EdenCollection) {
291 for (unsigned i = 0; i < m_blocksWithNewObjects.size(); ++i)
292 m_blocksWithNewObjects[i]->clearMarks();
293 } else
294 forEachBlock<ClearMarks>();
295
296#ifndef NDEBUG
297 VerifyMarkedOrRetired verifyFunctor;
298 forEachBlock(verifyFunctor);
299#endif
300}
301
302void MarkedSpace::willStartIterating()
303{
304 ASSERT(!isIterating());
305 stopAllocating();
306 m_isIterating = true;
307}
308
309void MarkedSpace::didFinishIterating()
310{
311 ASSERT(isIterating());
312 resumeAllocating();
313 m_isIterating = false;
314}
315
316} // namespace JSC
317