1/*
2 * Copyright (C) 2011 Apple Inc. All rights reserved.
3 *
4 * Redistribution and use in source and binary forms, with or without
5 * modification, are permitted provided that the following conditions
6 * are met:
7 * 1. Redistributions of source code must retain the above copyright
8 * notice, this list of conditions and the following disclaimer.
9 * 2. Redistributions in binary form must reproduce the above copyright
10 * notice, this list of conditions and the following disclaimer in the
11 * documentation and/or other materials provided with the distribution.
12 *
13 * THIS SOFTWARE IS PROVIDED BY APPLE INC. AND ITS CONTRIBUTORS ``AS IS''
14 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
15 * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
16 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR ITS CONTRIBUTORS
17 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
18 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
19 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
20 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
21 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
22 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
23 * THE POSSIBILITY OF SUCH DAMAGE.
24 */
25
26#include "config.h"
27#include "MarkedBlock.h"
28
29#include "JSCell.h"
30#include "JSDestructibleObject.h"
31#include "JSCInlines.h"
32
33namespace JSC {
34
35static const bool computeBalance = false;
36static size_t balance;
37
38MarkedBlock* MarkedBlock::create(Heap& heap, MarkedAllocator* allocator, size_t capacity, size_t cellSize, bool needsDestruction)
39{
40 if (computeBalance) {
41 balance++;
42 if (!(balance % 10))
43 dataLog("MarkedBlock Balance: ", balance, "\n");
44 }
45 MarkedBlock* block = new (NotNull, fastAlignedMalloc(blockSize, capacity)) MarkedBlock(allocator, capacity, cellSize, needsDestruction);
46 heap.didAllocateBlock(capacity);
47 return block;
48}
49
50void MarkedBlock::destroy(Heap& heap, MarkedBlock* block)
51{
52 if (computeBalance) {
53 balance--;
54 if (!(balance % 10))
55 dataLog("MarkedBlock Balance: ", balance, "\n");
56 }
57 size_t capacity = block->capacity();
58 block->~MarkedBlock();
59 fastAlignedFree(block);
60 heap.didFreeBlock(capacity);
61}
62
63MarkedBlock::MarkedBlock(MarkedAllocator* allocator, size_t capacity, size_t cellSize, bool needsDestruction)
64 : DoublyLinkedListNode<MarkedBlock>()
65 , m_atomsPerCell((cellSize + atomSize - 1) / atomSize)
66 , m_endAtom((allocator->cellSize() ? atomsPerBlock - m_atomsPerCell : firstAtom()) + 1)
67 , m_capacity(capacity)
68 , m_needsDestruction(needsDestruction)
69 , m_allocator(allocator)
70 , m_state(New) // All cells start out unmarked.
71 , m_weakSet(allocator->heap()->vm(), *this)
72{
73 ASSERT(allocator);
74 HEAP_LOG_BLOCK_STATE_TRANSITION(this);
75}
76
77inline void MarkedBlock::callDestructor(JSCell* cell)
78{
79 // A previous eager sweep may already have run cell's destructor.
80 if (cell->isZapped())
81 return;
82
83 ASSERT(cell->structureID());
84 if (cell->inlineTypeFlags() & StructureIsImmortal)
85 cell->structure(*vm())->classInfo()->methodTable.destroy(cell);
86 else
87 jsCast<JSDestructibleObject*>(cell)->classInfo()->methodTable.destroy(cell);
88 cell->zap();
89}
90
91template<MarkedBlock::BlockState blockState, MarkedBlock::SweepMode sweepMode, bool callDestructors>
92MarkedBlock::FreeList MarkedBlock::specializedSweep()
93{
94 ASSERT(blockState != Allocated && blockState != FreeListed);
95 ASSERT(!(!callDestructors && sweepMode == SweepOnly));
96
97 SamplingRegion samplingRegion((!callDestructors && blockState != New) ? "Calling destructors" : "sweeping");
98
99 // This produces a free list that is ordered in reverse through the block.
100 // This is fine, since the allocation code makes no assumptions about the
101 // order of the free list.
102 FreeCell* head = 0;
103 size_t count = 0;
104 for (size_t i = firstAtom(); i < m_endAtom; i += m_atomsPerCell) {
105 if (blockState == Marked && (m_marks.get(i) || (m_newlyAllocated && m_newlyAllocated->get(i))))
106 continue;
107
108 JSCell* cell = reinterpret_cast_ptr<JSCell*>(&atoms()[i]);
109
110 if (callDestructors && blockState != New)
111 callDestructor(cell);
112
113 if (sweepMode == SweepToFreeList) {
114 FreeCell* freeCell = reinterpret_cast<FreeCell*>(cell);
115 freeCell->next = head;
116 head = freeCell;
117 ++count;
118 }
119 }
120
121 // We only want to discard the newlyAllocated bits if we're creating a FreeList,
122 // otherwise we would lose information on what's currently alive.
123 if (sweepMode == SweepToFreeList && m_newlyAllocated)
124 m_newlyAllocated = nullptr;
125
126 m_state = ((sweepMode == SweepToFreeList) ? FreeListed : Marked);
127 return FreeList(head, count * cellSize());
128}
129
130MarkedBlock::FreeList MarkedBlock::sweep(SweepMode sweepMode)
131{
132 HEAP_LOG_BLOCK_STATE_TRANSITION(this);
133
134 m_weakSet.sweep();
135
136 if (sweepMode == SweepOnly && !m_needsDestruction)
137 return FreeList();
138
139 if (m_needsDestruction)
140 return sweepHelper<true>(sweepMode);
141 return sweepHelper<false>(sweepMode);
142}
143
144template<bool callDestructors>
145MarkedBlock::FreeList MarkedBlock::sweepHelper(SweepMode sweepMode)
146{
147 switch (m_state) {
148 case New:
149 ASSERT(sweepMode == SweepToFreeList);
150 return specializedSweep<New, SweepToFreeList, callDestructors>();
151 case FreeListed:
152 // Happens when a block transitions to fully allocated.
153 ASSERT(sweepMode == SweepToFreeList);
154 return FreeList();
155 case Retired:
156 case Allocated:
157 RELEASE_ASSERT_NOT_REACHED();
158 return FreeList();
159 case Marked:
160 return sweepMode == SweepToFreeList
161 ? specializedSweep<Marked, SweepToFreeList, callDestructors>()
162 : specializedSweep<Marked, SweepOnly, callDestructors>();
163 }
164
165 RELEASE_ASSERT_NOT_REACHED();
166 return FreeList();
167}
168
169class SetNewlyAllocatedFunctor : public MarkedBlock::VoidFunctor {
170public:
171 SetNewlyAllocatedFunctor(MarkedBlock* block)
172 : m_block(block)
173 {
174 }
175
176 IterationStatus operator()(JSCell* cell)
177 {
178 ASSERT(MarkedBlock::blockFor(cell) == m_block);
179 m_block->setNewlyAllocated(cell);
180 return IterationStatus::Continue;
181 }
182
183private:
184 MarkedBlock* m_block;
185};
186
187void MarkedBlock::stopAllocating(const FreeList& freeList)
188{
189 HEAP_LOG_BLOCK_STATE_TRANSITION(this);
190 FreeCell* head = freeList.head;
191
192 if (m_state == Marked) {
193 // If the block is in the Marked state then we know that:
194 // 1) It was not used for allocation during the previous allocation cycle.
195 // 2) It may have dead objects, and we only know them to be dead by the
196 // fact that their mark bits are unset.
197 // Hence if the block is Marked we need to leave it Marked.
198
199 ASSERT(!head);
200 return;
201 }
202
203 ASSERT(m_state == FreeListed);
204
205 // Roll back to a coherent state for Heap introspection. Cells newly
206 // allocated from our free list are not currently marked, so we need another
207 // way to tell what's live vs dead.
208
209 ASSERT(!m_newlyAllocated);
210 m_newlyAllocated = std::make_unique<WTF::Bitmap<atomsPerBlock>>();
211
212 SetNewlyAllocatedFunctor functor(this);
213 forEachCell(functor);
214
215 FreeCell* next;
216 for (FreeCell* current = head; current; current = next) {
217 next = current->next;
218 reinterpret_cast<JSCell*>(current)->zap();
219 clearNewlyAllocated(current);
220 }
221
222 m_state = Marked;
223}
224
225void MarkedBlock::clearMarks()
226{
227 if (heap()->operationInProgress() == JSC::EdenCollection)
228 this->clearMarksWithCollectionType<EdenCollection>();
229 else
230 this->clearMarksWithCollectionType<FullCollection>();
231}
232
233template <HeapOperation collectionType>
234void MarkedBlock::clearMarksWithCollectionType()
235{
236 ASSERT(collectionType == FullCollection || collectionType == EdenCollection);
237 HEAP_LOG_BLOCK_STATE_TRANSITION(this);
238
239 ASSERT(m_state != New && m_state != FreeListed);
240 if (collectionType == FullCollection) {
241 m_marks.clearAll();
242 // This will become true at the end of the mark phase. We set it now to
243 // avoid an extra pass to do so later.
244 m_state = Marked;
245 return;
246 }
247
248 ASSERT(collectionType == EdenCollection);
249 // If a block was retired then there's no way an EdenCollection can un-retire it.
250 if (m_state != Retired)
251 m_state = Marked;
252}
253
254void MarkedBlock::lastChanceToFinalize()
255{
256 m_weakSet.lastChanceToFinalize();
257
258 clearNewlyAllocated();
259 clearMarksWithCollectionType<FullCollection>();
260 sweep();
261}
262
263MarkedBlock::FreeList MarkedBlock::resumeAllocating()
264{
265 HEAP_LOG_BLOCK_STATE_TRANSITION(this);
266
267 ASSERT(m_state == Marked);
268
269 if (!m_newlyAllocated) {
270 // We didn't have to create a "newly allocated" bitmap. That means we were already Marked
271 // when we last stopped allocation, so return an empty free list and stay in the Marked state.
272 return FreeList();
273 }
274
275 // Re-create our free list from before stopping allocation.
276 return sweep(SweepToFreeList);
277}
278
279void MarkedBlock::didRetireBlock(const FreeList& freeList)
280{
281 HEAP_LOG_BLOCK_STATE_TRANSITION(this);
282 FreeCell* head = freeList.head;
283
284 // Currently we don't notify the Heap that we're giving up on this block.
285 // The Heap might be able to make a better decision about how many bytes should
286 // be allocated before the next collection if it knew about this retired block.
287 // On the other hand we'll waste at most 10% of our Heap space between FullCollections
288 // and only under heavy fragmentation.
289
290 // We need to zap the free list when retiring a block so that we don't try to destroy
291 // previously destroyed objects when we re-sweep the block in the future.
292 FreeCell* next;
293 for (FreeCell* current = head; current; current = next) {
294 next = current->next;
295 reinterpret_cast<JSCell*>(current)->zap();
296 }
297
298 ASSERT(m_state == FreeListed);
299 m_state = Retired;
300}
301
302} // namespace JSC
303