1/*
2 * Copyright (C) 1999-2000 Harri Porten (porten@kde.org)
3 * Copyright (C) 2001 Peter Kelly (pmk@post.com)
4 * Copyright (C) 2003, 2004, 2005, 2006, 2007, 2008, 2009, 2011 Apple Inc. All rights reserved.
5 *
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
10 *
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
15 *
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, write to the Free Software
18 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
19 *
20 */
21
22#ifndef MarkedBlock_h
23#define MarkedBlock_h
24
25#include "HeapOperation.h"
26#include "IterationStatus.h"
27#include "WeakSet.h"
28#include <wtf/Bitmap.h>
29#include <wtf/DataLog.h>
30#include <wtf/DoublyLinkedList.h>
31#include <wtf/HashFunctions.h>
32#include <wtf/StdLibExtras.h>
33
34// Set to log state transitions of blocks.
35#define HEAP_LOG_BLOCK_STATE_TRANSITIONS 0
36
37#if HEAP_LOG_BLOCK_STATE_TRANSITIONS
38#define HEAP_LOG_BLOCK_STATE_TRANSITION(block) do { \
39 dataLogF( \
40 "%s:%d %s: block %s = %p, %d\n", \
41 __FILE__, __LINE__, __FUNCTION__, \
42 #block, (block), (block)->m_state); \
43 } while (false)
44#else
45#define HEAP_LOG_BLOCK_STATE_TRANSITION(block) ((void)0)
46#endif
47
48namespace JSC {
49
50 class Heap;
51 class JSCell;
52 class MarkedAllocator;
53
54 typedef uintptr_t Bits;
55
56 bool isZapped(const JSCell*);
57
58 // A marked block is a page-aligned container for heap-allocated objects.
59 // Objects are allocated within cells of the marked block. For a given
60 // marked block, all cells have the same size. Objects smaller than the
61 // cell size may be allocated in the marked block, in which case the
62 // allocation suffers from internal fragmentation: wasted space whose
63 // size is equal to the difference between the cell size and the object
64 // size.
65
66 class MarkedBlock : public DoublyLinkedListNode<MarkedBlock> {
67 friend class WTF::DoublyLinkedListNode<MarkedBlock>;
68 friend class LLIntOffsetsExtractor;
69 friend struct VerifyMarkedOrRetired;
70 public:
71 static const size_t atomSize = 16; // bytes
72 static const size_t blockSize = 16 * KB;
73 static const size_t blockMask = ~(blockSize - 1); // blockSize must be a power of two.
74
75 static const size_t atomsPerBlock = blockSize / atomSize;
76
77 static_assert(!(MarkedBlock::atomSize & (MarkedBlock::atomSize - 1)), "MarkedBlock::atomSize must be a power of two.");
78 static_assert(!(MarkedBlock::blockSize & (MarkedBlock::blockSize - 1)), "MarkedBlock::blockSize must be a power of two.");
79
80 struct FreeCell {
81 FreeCell* next;
82 };
83
84 struct FreeList {
85 FreeCell* head;
86 size_t bytes;
87
88 FreeList();
89 FreeList(FreeCell*, size_t);
90 };
91
92 struct VoidFunctor {
93 typedef void ReturnType;
94 void returnValue() { }
95 };
96
97 class CountFunctor {
98 public:
99 typedef size_t ReturnType;
100
101 CountFunctor() : m_count(0) { }
102 void count(size_t count) { m_count += count; }
103 ReturnType returnValue() { return m_count; }
104
105 private:
106 ReturnType m_count;
107 };
108
109 static MarkedBlock* create(Heap&, MarkedAllocator*, size_t capacity, size_t cellSize, bool needsDestruction);
110 static void destroy(Heap&, MarkedBlock*);
111
112 static bool isAtomAligned(const void*);
113 static MarkedBlock* blockFor(const void*);
114 static size_t firstAtom();
115
116 void lastChanceToFinalize();
117
118 MarkedAllocator* allocator() const;
119 Heap* heap() const;
120 VM* vm() const;
121 WeakSet& weakSet();
122
123 enum SweepMode { SweepOnly, SweepToFreeList };
124 FreeList sweep(SweepMode = SweepOnly);
125
126 void shrink();
127
128 void visitWeakSet(HeapRootVisitor&);
129 void reapWeakSet();
130
131 // While allocating from a free list, MarkedBlock temporarily has bogus
132 // cell liveness data. To restore accurate cell liveness data, call one
133 // of these functions:
134 void didConsumeFreeList(); // Call this once you've allocated all the items in the free list.
135 void stopAllocating(const FreeList&);
136 FreeList resumeAllocating(); // Call this if you canonicalized a block for some non-collection related purpose.
137
138 // Returns true if the "newly allocated" bitmap was non-null
139 // and was successfully cleared and false otherwise.
140 bool clearNewlyAllocated();
141 void clearMarks();
142 template <HeapOperation collectionType>
143 void clearMarksWithCollectionType();
144
145 size_t markCount();
146 bool isEmpty();
147
148 size_t cellSize();
149 bool needsDestruction() const;
150
151 size_t size();
152 size_t capacity();
153
154 bool isMarked(const void*);
155 bool testAndSetMarked(const void*);
156 bool isLive(const JSCell*);
157 bool isLiveCell(const void*);
158 bool isAtom(const void*);
159 bool isMarkedOrNewlyAllocated(const JSCell*);
160 void setMarked(const void*);
161 void clearMarked(const void*);
162
163 bool isNewlyAllocated(const void*);
164 void setNewlyAllocated(const void*);
165 void clearNewlyAllocated(const void*);
166
167 bool isAllocated() const;
168 bool isMarkedOrRetired() const;
169 bool needsSweeping() const;
170 void didRetireBlock(const FreeList&);
171 void willRemoveBlock();
172
173 template <typename Functor> IterationStatus forEachCell(Functor&);
174 template <typename Functor> IterationStatus forEachLiveCell(Functor&);
175 template <typename Functor> IterationStatus forEachDeadCell(Functor&);
176
177 private:
178 static const size_t atomAlignmentMask = atomSize - 1;
179
180 enum BlockState { New, FreeListed, Allocated, Marked, Retired };
181 template<bool callDestructors> FreeList sweepHelper(SweepMode = SweepOnly);
182
183 typedef char Atom[atomSize];
184
185 MarkedBlock(MarkedAllocator*, size_t capacity, size_t cellSize, bool needsDestruction);
186 Atom* atoms();
187 size_t atomNumber(const void*);
188 void callDestructor(JSCell*);
189 template<BlockState, SweepMode, bool callDestructors> FreeList specializedSweep();
190
191 MarkedBlock* m_prev;
192 MarkedBlock* m_next;
193
194 size_t m_atomsPerCell;
195 size_t m_endAtom; // This is a fuzzy end. Always test for < m_endAtom.
196 WTF::Bitmap<atomsPerBlock, WTF::BitmapAtomic, uint8_t> m_marks;
197 std::unique_ptr<WTF::Bitmap<atomsPerBlock>> m_newlyAllocated;
198
199 size_t m_capacity;
200 bool m_needsDestruction;
201 MarkedAllocator* m_allocator;
202 BlockState m_state;
203 WeakSet m_weakSet;
204 };
205
206 inline MarkedBlock::FreeList::FreeList()
207 : head(0)
208 , bytes(0)
209 {
210 }
211
212 inline MarkedBlock::FreeList::FreeList(FreeCell* head, size_t bytes)
213 : head(head)
214 , bytes(bytes)
215 {
216 }
217
218 inline size_t MarkedBlock::firstAtom()
219 {
220 return WTF::roundUpToMultipleOf<atomSize>(sizeof(MarkedBlock)) / atomSize;
221 }
222
223 inline MarkedBlock::Atom* MarkedBlock::atoms()
224 {
225 return reinterpret_cast<Atom*>(this);
226 }
227
228 inline bool MarkedBlock::isAtomAligned(const void* p)
229 {
230 return !(reinterpret_cast<Bits>(p) & atomAlignmentMask);
231 }
232
233 inline MarkedBlock* MarkedBlock::blockFor(const void* p)
234 {
235 return reinterpret_cast<MarkedBlock*>(reinterpret_cast<Bits>(p) & blockMask);
236 }
237
238 inline MarkedAllocator* MarkedBlock::allocator() const
239 {
240 return m_allocator;
241 }
242
243 inline Heap* MarkedBlock::heap() const
244 {
245 return m_weakSet.heap();
246 }
247
248 inline VM* MarkedBlock::vm() const
249 {
250 return m_weakSet.vm();
251 }
252
253 inline WeakSet& MarkedBlock::weakSet()
254 {
255 return m_weakSet;
256 }
257
258 inline void MarkedBlock::shrink()
259 {
260 m_weakSet.shrink();
261 }
262
263 inline void MarkedBlock::visitWeakSet(HeapRootVisitor& heapRootVisitor)
264 {
265 m_weakSet.visit(heapRootVisitor);
266 }
267
268 inline void MarkedBlock::reapWeakSet()
269 {
270 m_weakSet.reap();
271 }
272
273 inline void MarkedBlock::willRemoveBlock()
274 {
275 ASSERT(m_state != Retired);
276 }
277
278 inline void MarkedBlock::didConsumeFreeList()
279 {
280 HEAP_LOG_BLOCK_STATE_TRANSITION(this);
281
282 ASSERT(m_state == FreeListed);
283 m_state = Allocated;
284 }
285
286 inline size_t MarkedBlock::markCount()
287 {
288 return m_marks.count();
289 }
290
291 inline bool MarkedBlock::isEmpty()
292 {
293 return m_marks.isEmpty() && m_weakSet.isEmpty() && (!m_newlyAllocated || m_newlyAllocated->isEmpty());
294 }
295
296 inline size_t MarkedBlock::cellSize()
297 {
298 return m_atomsPerCell * atomSize;
299 }
300
301 inline bool MarkedBlock::needsDestruction() const
302 {
303 return m_needsDestruction;
304 }
305
306 inline size_t MarkedBlock::size()
307 {
308 return markCount() * cellSize();
309 }
310
311 inline size_t MarkedBlock::capacity()
312 {
313 return m_capacity;
314 }
315
316 inline size_t MarkedBlock::atomNumber(const void* p)
317 {
318 return (reinterpret_cast<Bits>(p) - reinterpret_cast<Bits>(this)) / atomSize;
319 }
320
321 inline bool MarkedBlock::isMarked(const void* p)
322 {
323 return m_marks.get(atomNumber(p));
324 }
325
326 inline bool MarkedBlock::testAndSetMarked(const void* p)
327 {
328 return m_marks.concurrentTestAndSet(atomNumber(p));
329 }
330
331 inline void MarkedBlock::setMarked(const void* p)
332 {
333 m_marks.set(atomNumber(p));
334 }
335
336 inline void MarkedBlock::clearMarked(const void* p)
337 {
338 ASSERT(m_marks.get(atomNumber(p)));
339 m_marks.clear(atomNumber(p));
340 }
341
342 inline bool MarkedBlock::isNewlyAllocated(const void* p)
343 {
344 return m_newlyAllocated->get(atomNumber(p));
345 }
346
347 inline void MarkedBlock::setNewlyAllocated(const void* p)
348 {
349 m_newlyAllocated->set(atomNumber(p));
350 }
351
352 inline void MarkedBlock::clearNewlyAllocated(const void* p)
353 {
354 m_newlyAllocated->clear(atomNumber(p));
355 }
356
357 inline bool MarkedBlock::clearNewlyAllocated()
358 {
359 if (m_newlyAllocated) {
360 m_newlyAllocated = nullptr;
361 return true;
362 }
363 return false;
364 }
365
366 inline bool MarkedBlock::isMarkedOrNewlyAllocated(const JSCell* cell)
367 {
368 ASSERT(m_state == Retired || m_state == Marked);
369 return m_marks.get(atomNumber(cell)) || (m_newlyAllocated && isNewlyAllocated(cell));
370 }
371
372 inline bool MarkedBlock::isLive(const JSCell* cell)
373 {
374 switch (m_state) {
375 case Allocated:
376 return true;
377
378 case Retired:
379 case Marked:
380 return isMarkedOrNewlyAllocated(cell);
381
382 case New:
383 case FreeListed:
384 RELEASE_ASSERT_NOT_REACHED();
385 return false;
386 }
387
388 RELEASE_ASSERT_NOT_REACHED();
389 return false;
390 }
391
392 inline bool MarkedBlock::isAtom(const void* p)
393 {
394 ASSERT(MarkedBlock::isAtomAligned(p));
395 size_t atomNumber = this->atomNumber(p);
396 size_t firstAtom = this->firstAtom();
397 if (atomNumber < firstAtom) // Filters pointers into MarkedBlock metadata.
398 return false;
399 if ((atomNumber - firstAtom) % m_atomsPerCell) // Filters pointers into cell middles.
400 return false;
401 if (atomNumber >= m_endAtom) // Filters pointers into invalid cells out of the range.
402 return false;
403 return true;
404 }
405
406 inline bool MarkedBlock::isLiveCell(const void* p)
407 {
408 if (!isAtom(p))
409 return false;
410 return isLive(static_cast<const JSCell*>(p));
411 }
412
413 template <typename Functor> inline IterationStatus MarkedBlock::forEachCell(Functor& functor)
414 {
415 for (size_t i = firstAtom(); i < m_endAtom; i += m_atomsPerCell) {
416 JSCell* cell = reinterpret_cast_ptr<JSCell*>(&atoms()[i]);
417 if (functor(cell) == IterationStatus::Done)
418 return IterationStatus::Done;
419 }
420 return IterationStatus::Continue;
421 }
422
423 template <typename Functor> inline IterationStatus MarkedBlock::forEachLiveCell(Functor& functor)
424 {
425 for (size_t i = firstAtom(); i < m_endAtom; i += m_atomsPerCell) {
426 JSCell* cell = reinterpret_cast_ptr<JSCell*>(&atoms()[i]);
427 if (!isLive(cell))
428 continue;
429
430 if (functor(cell) == IterationStatus::Done)
431 return IterationStatus::Done;
432 }
433 return IterationStatus::Continue;
434 }
435
436 template <typename Functor> inline IterationStatus MarkedBlock::forEachDeadCell(Functor& functor)
437 {
438 for (size_t i = firstAtom(); i < m_endAtom; i += m_atomsPerCell) {
439 JSCell* cell = reinterpret_cast_ptr<JSCell*>(&atoms()[i]);
440 if (isLive(cell))
441 continue;
442
443 if (functor(cell) == IterationStatus::Done)
444 return IterationStatus::Done;
445 }
446 return IterationStatus::Continue;
447 }
448
449 inline bool MarkedBlock::needsSweeping() const
450 {
451 return m_state == Marked;
452 }
453
454 inline bool MarkedBlock::isAllocated() const
455 {
456 return m_state == Allocated;
457 }
458
459 inline bool MarkedBlock::isMarkedOrRetired() const
460 {
461 return m_state == Marked || m_state == Retired;
462 }
463
464} // namespace JSC
465
466namespace WTF {
467
468 struct MarkedBlockHash : PtrHash<JSC::MarkedBlock*> {
469 static unsigned hash(JSC::MarkedBlock* const& key)
470 {
471 // Aligned VM regions tend to be monotonically increasing integers,
472 // which is a great hash function, but we have to remove the low bits,
473 // since they're always zero, which is a terrible hash function!
474 return reinterpret_cast<JSC::Bits>(key) / JSC::MarkedBlock::blockSize;
475 }
476 };
477
478 template<> struct DefaultHash<JSC::MarkedBlock*> {
479 typedef MarkedBlockHash Hash;
480 };
481
482} // namespace WTF
483
484#endif // MarkedBlock_h
485