1/*
2 * Copyright (C) 2003-2009, 2011, 2013-2015 Apple Inc. All rights reserved.
3 * Copyright (C) 2007 Eric Seidel <eric@webkit.org>
4 *
5 * This library is free software; you can redistribute it and/or
6 * modify it under the terms of the GNU Lesser General Public
7 * License as published by the Free Software Foundation; either
8 * version 2 of the License, or (at your option) any later version.
9 *
10 * This library is distributed in the hope that it will be useful,
11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
13 * Lesser General Public License for more details.
14 *
15 * You should have received a copy of the GNU Lesser General Public
16 * License along with this library; if not, write to the Free Software
17 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
18 *
19 */
20
21#include "config.h"
22#include "Heap.h"
23
24#include "CodeBlock.h"
25#include "ConservativeRoots.h"
26#include "CopiedSpace.h"
27#include "CopiedSpaceInlines.h"
28#include "CopyVisitorInlines.h"
29#include "DFGWorklist.h"
30#include "EdenGCActivityCallback.h"
31#include "FullGCActivityCallback.h"
32#include "GCActivityCallback.h"
33#include "GCIncomingRefCountedSetInlines.h"
34#include "HeapHelperPool.h"
35#include "HeapIterationScope.h"
36#include "HeapRootVisitor.h"
37#include "HeapStatistics.h"
38#include "HeapVerifier.h"
39#include "IncrementalSweeper.h"
40#include "Interpreter.h"
41#include "JSCInlines.h"
42#include "JSGlobalObject.h"
43#include "JSLock.h"
44#include "JSVirtualMachineInternal.h"
45#include "SamplingProfiler.h"
46#include "Tracing.h"
47#include "TypeProfilerLog.h"
48#include "UnlinkedCodeBlock.h"
49#include "VM.h"
50#include "WeakSetInlines.h"
51#include <algorithm>
52#include <wtf/CurrentTime.h>
53#include <wtf/ParallelVectorIterator.h>
54#include <wtf/ProcessID.h>
55#include <wtf/RAMSize.h>
56
57using namespace std;
58
59namespace JSC {
60
61namespace {
62
63static const size_t largeHeapSize = 32 * MB; // About 1.5X the average webpage.
64static const size_t smallHeapSize = 1 * MB; // Matches the FastMalloc per-thread cache.
65
66#define ENABLE_GC_LOGGING 0
67
68#if ENABLE(GC_LOGGING)
69#if COMPILER(CLANG)
70#define DEFINE_GC_LOGGING_GLOBAL(type, name, arguments) \
71_Pragma("clang diagnostic push") \
72_Pragma("clang diagnostic ignored \"-Wglobal-constructors\"") \
73_Pragma("clang diagnostic ignored \"-Wexit-time-destructors\"") \
74static type name arguments; \
75_Pragma("clang diagnostic pop")
76#else
77#define DEFINE_GC_LOGGING_GLOBAL(type, name, arguments) \
78static type name arguments;
79#endif // COMPILER(CLANG)
80
81struct GCTimer {
82 GCTimer(const char* name)
83 : name(name)
84 {
85 }
86 ~GCTimer()
87 {
88 logData(allCollectionData, "(All)");
89 logData(edenCollectionData, "(Eden)");
90 logData(fullCollectionData, "(Full)");
91 }
92
93 struct TimeRecord {
94 TimeRecord()
95 : time(0)
96 , min(std::numeric_limits<double>::infinity())
97 , max(0)
98 , count(0)
99 {
100 }
101
102 double time;
103 double min;
104 double max;
105 size_t count;
106 };
107
108 void logData(const TimeRecord& data, const char* extra)
109 {
110 dataLogF("[%d] %s (Parent: %s) %s: %.2lfms (avg. %.2lf, min. %.2lf, max. %.2lf, count %lu)\n",
111 getCurrentProcessID(),
112 name,
113 parent ? parent->name : "nullptr",
114 extra,
115 data.time * 1000,
116 data.time * 1000 / data.count,
117 data.min * 1000,
118 data.max * 1000,
119 data.count);
120 }
121
122 void updateData(TimeRecord& data, double duration)
123 {
124 if (duration < data.min)
125 data.min = duration;
126 if (duration > data.max)
127 data.max = duration;
128 data.count++;
129 data.time += duration;
130 }
131
132 void didFinishPhase(HeapOperation collectionType, double duration)
133 {
134 TimeRecord& data = collectionType == EdenCollection ? edenCollectionData : fullCollectionData;
135 updateData(data, duration);
136 updateData(allCollectionData, duration);
137 }
138
139 static GCTimer* s_currentGlobalTimer;
140
141 TimeRecord allCollectionData;
142 TimeRecord fullCollectionData;
143 TimeRecord edenCollectionData;
144 const char* name;
145 GCTimer* parent { nullptr };
146};
147
148GCTimer* GCTimer::s_currentGlobalTimer = nullptr;
149
150struct GCTimerScope {
151 GCTimerScope(GCTimer& timer, HeapOperation collectionType)
152 : timer(timer)
153 , start(WTF::monotonicallyIncreasingTime())
154 , collectionType(collectionType)
155 {
156 timer.parent = GCTimer::s_currentGlobalTimer;
157 GCTimer::s_currentGlobalTimer = &timer;
158 }
159 ~GCTimerScope()
160 {
161 double delta = WTF::monotonicallyIncreasingTime() - start;
162 timer.didFinishPhase(collectionType, delta);
163 GCTimer::s_currentGlobalTimer = timer.parent;
164 }
165 GCTimer& timer;
166 double start;
167 HeapOperation collectionType;
168};
169
170struct GCCounter {
171 GCCounter(const char* name)
172 : name(name)
173 , count(0)
174 , total(0)
175 , min(10000000)
176 , max(0)
177 {
178 }
179
180 void add(size_t amount)
181 {
182 count++;
183 total += amount;
184 if (amount < min)
185 min = amount;
186 if (amount > max)
187 max = amount;
188 }
189 ~GCCounter()
190 {
191 dataLogF("[%d] %s: %zu values (avg. %zu, min. %zu, max. %zu)\n", getCurrentProcessID(), name, total, total / count, min, max);
192 }
193 const char* name;
194 size_t count;
195 size_t total;
196 size_t min;
197 size_t max;
198};
199
200#define GCPHASE(name) DEFINE_GC_LOGGING_GLOBAL(GCTimer, name##Timer, (#name)); GCTimerScope name##TimerScope(name##Timer, m_operationInProgress)
201#define GCCOUNTER(name, value) do { DEFINE_GC_LOGGING_GLOBAL(GCCounter, name##Counter, (#name)); name##Counter.add(value); } while (false)
202
203#else
204
205#define GCPHASE(name) do { } while (false)
206#define GCCOUNTER(name, value) do { } while (false)
207#endif
208
209static inline size_t minHeapSize(HeapType heapType, size_t ramSize)
210{
211 if (heapType == LargeHeap)
212 return min(largeHeapSize, ramSize / 4);
213 return smallHeapSize;
214}
215
216static inline size_t proportionalHeapSize(size_t heapSize, size_t ramSize)
217{
218 // Try to stay under 1/2 RAM size to leave room for the DOM, rendering, networking, etc.
219 if (heapSize < ramSize / 4)
220 return 2 * heapSize;
221 if (heapSize < ramSize / 2)
222 return 1.5 * heapSize;
223 return 1.25 * heapSize;
224}
225
226static inline bool isValidSharedInstanceThreadState(VM* vm)
227{
228 return vm->currentThreadIsHoldingAPILock();
229}
230
231static inline bool isValidThreadState(VM* vm)
232{
233 if (vm->atomicStringTable() != wtfThreadData().atomicStringTable())
234 return false;
235
236 if (vm->isSharedInstance() && !isValidSharedInstanceThreadState(vm))
237 return false;
238
239 return true;
240}
241
242struct MarkObject : public MarkedBlock::VoidFunctor {
243 inline void visit(JSCell* cell)
244 {
245 if (cell->isZapped())
246 return;
247 Heap::heap(cell)->setMarked(cell);
248 }
249 IterationStatus operator()(JSCell* cell)
250 {
251 visit(cell);
252 return IterationStatus::Continue;
253 }
254};
255
256struct Count : public MarkedBlock::CountFunctor {
257 void operator()(JSCell*) { count(1); }
258};
259
260struct CountIfGlobalObject : MarkedBlock::CountFunctor {
261 inline void visit(JSCell* cell)
262 {
263 if (!cell->isObject())
264 return;
265 if (!asObject(cell)->isGlobalObject())
266 return;
267 count(1);
268 }
269 IterationStatus operator()(JSCell* cell)
270 {
271 visit(cell);
272 return IterationStatus::Continue;
273 }
274};
275
276class RecordType {
277public:
278 typedef std::unique_ptr<TypeCountSet> ReturnType;
279
280 RecordType();
281 IterationStatus operator()(JSCell*);
282 ReturnType returnValue();
283
284private:
285 const char* typeName(JSCell*);
286 std::unique_ptr<TypeCountSet> m_typeCountSet;
287};
288
289inline RecordType::RecordType()
290 : m_typeCountSet(std::make_unique<TypeCountSet>())
291{
292}
293
294inline const char* RecordType::typeName(JSCell* cell)
295{
296 const ClassInfo* info = cell->classInfo();
297 if (!info || !info->className)
298 return "[unknown]";
299 return info->className;
300}
301
302inline IterationStatus RecordType::operator()(JSCell* cell)
303{
304 m_typeCountSet->add(typeName(cell));
305 return IterationStatus::Continue;
306}
307
308inline std::unique_ptr<TypeCountSet> RecordType::returnValue()
309{
310 return WTFMove(m_typeCountSet);
311}
312
313} // anonymous namespace
314
315Heap::Heap(VM* vm, HeapType heapType)
316 : m_heapType(heapType)
317 , m_ramSize(Options::forceRAMSize() ? Options::forceRAMSize() : ramSize())
318 , m_minBytesPerCycle(minHeapSize(m_heapType, m_ramSize))
319 , m_sizeAfterLastCollect(0)
320 , m_sizeAfterLastFullCollect(0)
321 , m_sizeBeforeLastFullCollect(0)
322 , m_sizeAfterLastEdenCollect(0)
323 , m_sizeBeforeLastEdenCollect(0)
324 , m_bytesAllocatedThisCycle(0)
325 , m_bytesAbandonedSinceLastFullCollect(0)
326 , m_maxEdenSize(m_minBytesPerCycle)
327 , m_maxHeapSize(m_minBytesPerCycle)
328 , m_shouldDoFullCollection(false)
329 , m_totalBytesVisited(0)
330 , m_totalBytesCopied(0)
331 , m_operationInProgress(NoOperation)
332 , m_objectSpace(this)
333 , m_storageSpace(this)
334 , m_extraMemorySize(0)
335 , m_deprecatedExtraMemorySize(0)
336 , m_machineThreads(this)
337 , m_slotVisitor(*this)
338 , m_handleSet(vm)
339 , m_isSafeToCollect(false)
340 , m_writeBarrierBuffer(256)
341 , m_vm(vm)
342 // We seed with 10ms so that GCActivityCallback::didAllocate doesn't continuously
343 // schedule the timer if we've never done a collection.
344 , m_lastFullGCLength(0.01)
345 , m_lastEdenGCLength(0.01)
346 , m_fullActivityCallback(GCActivityCallback::createFullTimer(this))
347 , m_edenActivityCallback(GCActivityCallback::createEdenTimer(this))
348#if USE(CF)
349 , m_sweeper(std::make_unique<IncrementalSweeper>(this, CFRunLoopGetCurrent()))
350#else
351 , m_sweeper(std::make_unique<IncrementalSweeper>(this))
352#endif
353 , m_deferralDepth(0)
354#if USE(CF)
355 , m_delayedReleaseRecursionCount(0)
356#endif
357 , m_helperClient(&heapHelperPool())
358{
359 m_storageSpace.init();
360 if (Options::verifyHeap())
361 m_verifier = std::make_unique<HeapVerifier>(this, Options::numberOfGCCyclesToRecordForVerification());
362}
363
364Heap::~Heap()
365{
366 for (WeakBlock* block : m_logicallyEmptyWeakBlocks)
367 WeakBlock::destroy(*this, block);
368}
369
370bool Heap::isPagedOut(double deadline)
371{
372 return m_objectSpace.isPagedOut(deadline) || m_storageSpace.isPagedOut(deadline);
373}
374
375// The VM is being destroyed and the collector will never run again.
376// Run all pending finalizers now because we won't get another chance.
377void Heap::lastChanceToFinalize()
378{
379 RELEASE_ASSERT(!m_vm->entryScope);
380 RELEASE_ASSERT(m_operationInProgress == NoOperation);
381
382 m_codeBlocks.lastChanceToFinalize();
383 m_objectSpace.lastChanceToFinalize();
384 releaseDelayedReleasedObjects();
385
386 sweepAllLogicallyEmptyWeakBlocks();
387}
388
389void Heap::releaseDelayedReleasedObjects()
390{
391#if USE(CF)
392 // We need to guard against the case that releasing an object can create more objects due to the
393 // release calling into JS. When those JS call(s) exit and all locks are being dropped we end up
394 // back here and could try to recursively release objects. We guard that with a recursive entry
395 // count. Only the initial call will release objects, recursive calls simple return and let the
396 // the initial call to the function take care of any objects created during release time.
397 // This also means that we need to loop until there are no objects in m_delayedReleaseObjects
398 // and use a temp Vector for the actual releasing.
399 if (!m_delayedReleaseRecursionCount++) {
400 while (!m_delayedReleaseObjects.isEmpty()) {
401 ASSERT(m_vm->currentThreadIsHoldingAPILock());
402
403 Vector<RetainPtr<CFTypeRef>> objectsToRelease = WTFMove(m_delayedReleaseObjects);
404
405 {
406 // We need to drop locks before calling out to arbitrary code.
407 JSLock::DropAllLocks dropAllLocks(m_vm);
408
409 objectsToRelease.clear();
410 }
411 }
412 }
413 m_delayedReleaseRecursionCount--;
414#endif
415}
416
417void Heap::reportExtraMemoryAllocatedSlowCase(size_t size)
418{
419 didAllocate(size);
420 collectIfNecessaryOrDefer();
421}
422
423void Heap::deprecatedReportExtraMemorySlowCase(size_t size)
424{
425 m_deprecatedExtraMemorySize += size;
426 reportExtraMemoryAllocatedSlowCase(size);
427}
428
429void Heap::reportAbandonedObjectGraph()
430{
431 // Our clients don't know exactly how much memory they
432 // are abandoning so we just guess for them.
433 double abandonedBytes = 0.1 * m_sizeAfterLastCollect;
434
435 // We want to accelerate the next collection. Because memory has just
436 // been abandoned, the next collection has the potential to
437 // be more profitable. Since allocation is the trigger for collection,
438 // we hasten the next collection by pretending that we've allocated more memory.
439 didAbandon(abandonedBytes);
440}
441
442void Heap::didAbandon(size_t bytes)
443{
444 if (m_fullActivityCallback) {
445 m_fullActivityCallback->didAllocate(
446 m_sizeAfterLastCollect - m_sizeAfterLastFullCollect + m_bytesAllocatedThisCycle + m_bytesAbandonedSinceLastFullCollect);
447 }
448 m_bytesAbandonedSinceLastFullCollect += bytes;
449}
450
451void Heap::protect(JSValue k)
452{
453 ASSERT(k);
454 ASSERT(m_vm->currentThreadIsHoldingAPILock());
455
456 if (!k.isCell())
457 return;
458
459 m_protectedValues.add(k.asCell());
460}
461
462bool Heap::unprotect(JSValue k)
463{
464 ASSERT(k);
465 ASSERT(m_vm->currentThreadIsHoldingAPILock());
466
467 if (!k.isCell())
468 return false;
469
470 return m_protectedValues.remove(k.asCell());
471}
472
473void Heap::addReference(JSCell* cell, ArrayBuffer* buffer)
474{
475 if (m_arrayBuffers.addReference(cell, buffer)) {
476 collectIfNecessaryOrDefer();
477 didAllocate(buffer->gcSizeEstimateInBytes());
478 }
479}
480
481void Heap::harvestWeakReferences()
482{
483 m_slotVisitor.harvestWeakReferences();
484}
485
486void Heap::finalizeUnconditionalFinalizers()
487{
488 GCPHASE(FinalizeUnconditionalFinalizers);
489 m_slotVisitor.finalizeUnconditionalFinalizers();
490}
491
492inline JSStack& Heap::stack()
493{
494 return m_vm->interpreter->stack();
495}
496
497void Heap::willStartIterating()
498{
499 m_objectSpace.willStartIterating();
500}
501
502void Heap::didFinishIterating()
503{
504 m_objectSpace.didFinishIterating();
505}
506
507void Heap::completeAllDFGPlans()
508{
509#if ENABLE(DFG_JIT)
510 DFG::completeAllPlansForVM(*m_vm);
511#endif
512}
513
514void Heap::markRoots(double gcStartTime, void* stackOrigin, void* stackTop, MachineThreads::RegisterState& calleeSavedRegisters)
515{
516 SamplingRegion samplingRegion("Garbage Collection: Marking");
517
518 GCPHASE(MarkRoots);
519 ASSERT(isValidThreadState(m_vm));
520
521 // We gather conservative roots before clearing mark bits because conservative
522 // gathering uses the mark bits to determine whether a reference is valid.
523 ConservativeRoots conservativeRoots(&m_objectSpace.blocks(), &m_storageSpace);
524 gatherStackRoots(conservativeRoots, stackOrigin, stackTop, calleeSavedRegisters);
525 gatherJSStackRoots(conservativeRoots);
526 gatherScratchBufferRoots(conservativeRoots);
527
528#if ENABLE(DFG_JIT)
529 DFG::rememberCodeBlocks(*m_vm);
530#endif
531
532#if ENABLE(SAMPLING_PROFILER)
533 if (SamplingProfiler* samplingProfiler = m_vm->samplingProfiler()) {
534 // Note that we need to own the lock from now until we're done
535 // marking the SamplingProfiler's data because once we verify the
536 // SamplingProfiler's stack traces, we don't want it to accumulate
537 // more stack traces before we get the chance to mark it.
538 // This lock is released inside visitSamplingProfiler().
539 samplingProfiler->getLock().lock();
540 samplingProfiler->processUnverifiedStackTraces();
541 }
542#endif // ENABLE(SAMPLING_PROFILER)
543
544 if (m_operationInProgress == FullCollection) {
545 m_opaqueRoots.clear();
546 m_slotVisitor.clearMarkStack();
547 }
548
549 clearLivenessData();
550
551 m_parallelMarkersShouldExit = false;
552
553 m_helperClient.setFunction(
554 [this] () {
555 SlotVisitor* slotVisitor;
556 {
557 LockHolder locker(m_parallelSlotVisitorLock);
558 if (m_availableParallelSlotVisitors.isEmpty()) {
559 std::unique_ptr<SlotVisitor> newVisitor =
560 std::make_unique<SlotVisitor>(*this);
561 slotVisitor = newVisitor.get();
562 m_parallelSlotVisitors.append(WTFMove(newVisitor));
563 } else
564 slotVisitor = m_availableParallelSlotVisitors.takeLast();
565 }
566
567 WTF::registerGCThread();
568
569 {
570 ParallelModeEnabler parallelModeEnabler(*slotVisitor);
571 slotVisitor->didStartMarking();
572 slotVisitor->drainFromShared(SlotVisitor::SlaveDrain);
573 }
574
575 {
576 LockHolder locker(m_parallelSlotVisitorLock);
577 m_availableParallelSlotVisitors.append(slotVisitor);
578 }
579 });
580
581 m_slotVisitor.didStartMarking();
582
583 HeapRootVisitor heapRootVisitor(m_slotVisitor);
584
585 {
586 ParallelModeEnabler enabler(m_slotVisitor);
587
588 m_slotVisitor.donateAndDrain();
589 visitExternalRememberedSet();
590 visitSmallStrings();
591 visitConservativeRoots(conservativeRoots);
592 visitProtectedObjects(heapRootVisitor);
593 visitArgumentBuffers(heapRootVisitor);
594 visitException(heapRootVisitor);
595 visitStrongHandles(heapRootVisitor);
596 visitHandleStack(heapRootVisitor);
597 visitSamplingProfiler();
598 traceCodeBlocksAndJITStubRoutines();
599 converge();
600 }
601
602 // Weak references must be marked last because their liveness depends on
603 // the liveness of the rest of the object graph.
604 visitWeakHandles(heapRootVisitor);
605
606 {
607 std::lock_guard<Lock> lock(m_markingMutex);
608 m_parallelMarkersShouldExit = true;
609 m_markingConditionVariable.notifyAll();
610 }
611 m_helperClient.finish();
612 updateObjectCounts(gcStartTime);
613 resetVisitors();
614}
615
616void Heap::copyBackingStores()
617{
618 GCPHASE(CopyBackingStores);
619 if (m_operationInProgress == EdenCollection)
620 m_storageSpace.startedCopying<EdenCollection>();
621 else {
622 ASSERT(m_operationInProgress == FullCollection);
623 m_storageSpace.startedCopying<FullCollection>();
624 }
625
626 if (m_storageSpace.shouldDoCopyPhase()) {
627 if (m_operationInProgress == EdenCollection) {
628 // Reset the vector to be empty, but don't throw away the backing store.
629 m_blocksToCopy.shrink(0);
630 for (CopiedBlock* block = m_storageSpace.m_newGen.fromSpace->head(); block; block = block->next())
631 m_blocksToCopy.append(block);
632 } else {
633 ASSERT(m_operationInProgress == FullCollection);
634 WTF::copyToVector(m_storageSpace.m_blockSet, m_blocksToCopy);
635 }
636
637 ParallelVectorIterator<Vector<CopiedBlock*>> iterator(
638 m_blocksToCopy, s_blockFragmentLength);
639
640 // Note that it's safe to use the [&] capture list here, even though we're creating a task
641 // that other threads run. That's because after runFunctionInParallel() returns, the task
642 // we have created is not going to be running anymore. Hence, everything on the stack here
643 // outlives the task.
644 m_helperClient.runFunctionInParallel(
645 [&] () {
646 CopyVisitor copyVisitor(*this);
647
648 iterator.iterate(
649 [&] (CopiedBlock* block) {
650 if (!block->hasWorkList())
651 return;
652
653 CopyWorkList& workList = block->workList();
654 for (CopyWorklistItem item : workList) {
655 if (item.token() == ButterflyCopyToken) {
656 JSObject::copyBackingStore(
657 item.cell(), copyVisitor, ButterflyCopyToken);
658 continue;
659 }
660
661 item.cell()->methodTable()->copyBackingStore(
662 item.cell(), copyVisitor, item.token());
663 }
664
665 ASSERT(!block->liveBytes());
666 m_storageSpace.recycleEvacuatedBlock(block, m_operationInProgress);
667 });
668 });
669 }
670
671 m_storageSpace.doneCopying();
672}
673
674void Heap::gatherStackRoots(ConservativeRoots& roots, void* stackOrigin, void* stackTop, MachineThreads::RegisterState& calleeSavedRegisters)
675{
676 GCPHASE(GatherStackRoots);
677 m_jitStubRoutines.clearMarks();
678 m_machineThreads.gatherConservativeRoots(roots, m_jitStubRoutines, m_codeBlocks, stackOrigin, stackTop, calleeSavedRegisters);
679}
680
681void Heap::gatherJSStackRoots(ConservativeRoots& roots)
682{
683#if !ENABLE(JIT)
684 GCPHASE(GatherJSStackRoots);
685 stack().gatherConservativeRoots(roots, m_jitStubRoutines, m_codeBlocks);
686#else
687 UNUSED_PARAM(roots);
688#endif
689}
690
691void Heap::gatherScratchBufferRoots(ConservativeRoots& roots)
692{
693#if ENABLE(DFG_JIT)
694 GCPHASE(GatherScratchBufferRoots);
695 m_vm->gatherConservativeRoots(roots);
696#else
697 UNUSED_PARAM(roots);
698#endif
699}
700
701void Heap::clearLivenessData()
702{
703 GCPHASE(ClearLivenessData);
704 if (m_operationInProgress == FullCollection)
705 m_codeBlocks.clearMarksForFullCollection();
706
707 m_objectSpace.clearNewlyAllocated();
708 m_objectSpace.clearMarks();
709}
710
711void Heap::visitExternalRememberedSet()
712{
713#if JSC_OBJC_API_ENABLED
714 scanExternalRememberedSet(*m_vm, m_slotVisitor);
715#endif
716}
717
718void Heap::visitSmallStrings()
719{
720 GCPHASE(VisitSmallStrings);
721 if (!m_vm->smallStrings.needsToBeVisited(m_operationInProgress))
722 return;
723
724 m_vm->smallStrings.visitStrongReferences(m_slotVisitor);
725 if (Options::logGC() == GCLogging::Verbose)
726 dataLog("Small strings:\n", m_slotVisitor);
727 m_slotVisitor.donateAndDrain();
728}
729
730void Heap::visitConservativeRoots(ConservativeRoots& roots)
731{
732 GCPHASE(VisitConservativeRoots);
733 m_slotVisitor.append(roots);
734
735 if (Options::logGC() == GCLogging::Verbose)
736 dataLog("Conservative Roots:\n", m_slotVisitor);
737
738 m_slotVisitor.donateAndDrain();
739}
740
741void Heap::visitCompilerWorklistWeakReferences()
742{
743#if ENABLE(DFG_JIT)
744 for (auto worklist : m_suspendedCompilerWorklists)
745 worklist->visitWeakReferences(m_slotVisitor);
746
747 if (Options::logGC() == GCLogging::Verbose)
748 dataLog("DFG Worklists:\n", m_slotVisitor);
749#endif
750}
751
752void Heap::removeDeadCompilerWorklistEntries()
753{
754#if ENABLE(DFG_JIT)
755 GCPHASE(FinalizeDFGWorklists);
756 for (auto worklist : m_suspendedCompilerWorklists)
757 worklist->removeDeadPlans(*m_vm);
758#endif
759}
760
761void Heap::visitProtectedObjects(HeapRootVisitor& heapRootVisitor)
762{
763 GCPHASE(VisitProtectedObjects);
764
765 for (auto& pair : m_protectedValues)
766 heapRootVisitor.visit(&pair.key);
767
768 if (Options::logGC() == GCLogging::Verbose)
769 dataLog("Protected Objects:\n", m_slotVisitor);
770
771 m_slotVisitor.donateAndDrain();
772}
773
774void Heap::visitArgumentBuffers(HeapRootVisitor& visitor)
775{
776 GCPHASE(MarkingArgumentBuffers);
777 if (!m_markListSet || !m_markListSet->size())
778 return;
779
780 MarkedArgumentBuffer::markLists(visitor, *m_markListSet);
781
782 if (Options::logGC() == GCLogging::Verbose)
783 dataLog("Argument Buffers:\n", m_slotVisitor);
784
785 m_slotVisitor.donateAndDrain();
786}
787
788void Heap::visitException(HeapRootVisitor& visitor)
789{
790 GCPHASE(MarkingException);
791 if (!m_vm->exception() && !m_vm->lastException())
792 return;
793
794 visitor.visit(m_vm->addressOfException());
795 visitor.visit(m_vm->addressOfLastException());
796
797 if (Options::logGC() == GCLogging::Verbose)
798 dataLog("Exceptions:\n", m_slotVisitor);
799
800 m_slotVisitor.donateAndDrain();
801}
802
803void Heap::visitStrongHandles(HeapRootVisitor& visitor)
804{
805 GCPHASE(VisitStrongHandles);
806 m_handleSet.visitStrongHandles(visitor);
807
808 if (Options::logGC() == GCLogging::Verbose)
809 dataLog("Strong Handles:\n", m_slotVisitor);
810
811 m_slotVisitor.donateAndDrain();
812}
813
814void Heap::visitHandleStack(HeapRootVisitor& visitor)
815{
816 GCPHASE(VisitHandleStack);
817 m_handleStack.visit(visitor);
818
819 if (Options::logGC() == GCLogging::Verbose)
820 dataLog("Handle Stack:\n", m_slotVisitor);
821
822 m_slotVisitor.donateAndDrain();
823}
824
825void Heap::visitSamplingProfiler()
826{
827#if ENABLE(SAMPLING_PROFILER)
828 if (SamplingProfiler* samplingProfiler = m_vm->samplingProfiler()) {
829 ASSERT(samplingProfiler->getLock().isLocked());
830 GCPHASE(VisitSamplingProfiler);
831 samplingProfiler->visit(m_slotVisitor);
832 if (Options::logGC() == GCLogging::Verbose)
833 dataLog("Sampling Profiler data:\n", m_slotVisitor);
834
835 m_slotVisitor.donateAndDrain();
836 samplingProfiler->getLock().unlock();
837 }
838#endif // ENABLE(SAMPLING_PROFILER)
839}
840
841void Heap::traceCodeBlocksAndJITStubRoutines()
842{
843 GCPHASE(TraceCodeBlocksAndJITStubRoutines);
844 m_jitStubRoutines.traceMarkedStubRoutines(m_slotVisitor);
845
846 if (Options::logGC() == GCLogging::Verbose)
847 dataLog("Code Blocks and JIT Stub Routines:\n", m_slotVisitor);
848
849 m_slotVisitor.donateAndDrain();
850}
851
852void Heap::converge()
853{
854 GCPHASE(Convergence);
855 m_slotVisitor.drainFromShared(SlotVisitor::MasterDrain);
856}
857
858void Heap::visitWeakHandles(HeapRootVisitor& visitor)
859{
860 GCPHASE(VisitingLiveWeakHandles);
861 while (true) {
862 m_objectSpace.visitWeakSets(visitor);
863 harvestWeakReferences();
864 visitCompilerWorklistWeakReferences();
865 if (m_slotVisitor.isEmpty())
866 break;
867
868 if (Options::logGC() == GCLogging::Verbose)
869 dataLog("Live Weak Handles:\n", m_slotVisitor);
870
871 {
872 ParallelModeEnabler enabler(m_slotVisitor);
873 m_slotVisitor.donateAndDrain();
874 m_slotVisitor.drainFromShared(SlotVisitor::MasterDrain);
875 }
876 }
877}
878
879void Heap::updateObjectCounts(double gcStartTime)
880{
881 GCCOUNTER(VisitedValueCount, m_slotVisitor.visitCount() + threadVisitCount());
882
883 if (Options::logGC() == GCLogging::Verbose) {
884 size_t visitCount = m_slotVisitor.visitCount();
885 visitCount += threadVisitCount();
886 dataLogF("\nNumber of live Objects after GC %lu, took %.6f secs\n", static_cast<unsigned long>(visitCount), WTF::monotonicallyIncreasingTime() - gcStartTime);
887 }
888
889 size_t bytesRemovedFromOldSpaceDueToReallocation =
890 m_storageSpace.takeBytesRemovedFromOldSpaceDueToReallocation();
891
892 if (m_operationInProgress == FullCollection) {
893 m_totalBytesVisited = 0;
894 m_totalBytesCopied = 0;
895 } else
896 m_totalBytesCopied -= bytesRemovedFromOldSpaceDueToReallocation;
897
898 m_totalBytesVisitedThisCycle = m_slotVisitor.bytesVisited() + threadBytesVisited();
899 m_totalBytesCopiedThisCycle = m_slotVisitor.bytesCopied() + threadBytesCopied();
900
901 m_totalBytesVisited += m_totalBytesVisitedThisCycle;
902 m_totalBytesCopied += m_totalBytesCopiedThisCycle;
903}
904
905void Heap::resetVisitors()
906{
907 m_slotVisitor.reset();
908
909 for (auto& parallelVisitor : m_parallelSlotVisitors)
910 parallelVisitor->reset();
911
912 ASSERT(m_sharedMarkStack.isEmpty());
913 m_weakReferenceHarvesters.removeAll();
914}
915
916size_t Heap::objectCount()
917{
918 return m_objectSpace.objectCount();
919}
920
921size_t Heap::extraMemorySize()
922{
923 return m_extraMemorySize + m_deprecatedExtraMemorySize + m_arrayBuffers.size();
924}
925
926size_t Heap::size()
927{
928 return m_objectSpace.size() + m_storageSpace.size() + extraMemorySize();
929}
930
931size_t Heap::capacity()
932{
933 return m_objectSpace.capacity() + m_storageSpace.capacity() + extraMemorySize();
934}
935
936size_t Heap::protectedGlobalObjectCount()
937{
938 return forEachProtectedCell<CountIfGlobalObject>();
939}
940
941size_t Heap::globalObjectCount()
942{
943 HeapIterationScope iterationScope(*this);
944 return m_objectSpace.forEachLiveCell<CountIfGlobalObject>(iterationScope);
945}
946
947size_t Heap::protectedObjectCount()
948{
949 return forEachProtectedCell<Count>();
950}
951
952std::unique_ptr<TypeCountSet> Heap::protectedObjectTypeCounts()
953{
954 return forEachProtectedCell<RecordType>();
955}
956
957std::unique_ptr<TypeCountSet> Heap::objectTypeCounts()
958{
959 HeapIterationScope iterationScope(*this);
960 return m_objectSpace.forEachLiveCell<RecordType>(iterationScope);
961}
962
963void Heap::deleteAllCodeBlocks()
964{
965 // If JavaScript is running, it's not safe to delete all JavaScript code, since
966 // we'll end up returning to deleted code.
967 RELEASE_ASSERT(!m_vm->entryScope);
968 ASSERT(m_operationInProgress == NoOperation);
969
970 completeAllDFGPlans();
971
972 for (ExecutableBase* executable : m_executables)
973 executable->clearCode();
974}
975
976void Heap::deleteAllUnlinkedCodeBlocks()
977{
978 for (ExecutableBase* current : m_executables) {
979 if (!current->isFunctionExecutable())
980 continue;
981 static_cast<FunctionExecutable*>(current)->unlinkedExecutable()->clearCode();
982 }
983}
984
985void Heap::clearUnmarkedExecutables()
986{
987 GCPHASE(ClearUnmarkedExecutables);
988 for (unsigned i = m_executables.size(); i--;) {
989 ExecutableBase* current = m_executables[i];
990 if (isMarked(current))
991 continue;
992
993 // Eagerly dereference the Executable's JITCode in order to run watchpoint
994 // destructors. Otherwise, watchpoints might fire for deleted CodeBlocks.
995 current->clearCode();
996 std::swap(m_executables[i], m_executables.last());
997 m_executables.removeLast();
998 }
999
1000 m_executables.shrinkToFit();
1001}
1002
1003void Heap::deleteUnmarkedCompiledCode()
1004{
1005 GCPHASE(DeleteCodeBlocks);
1006 clearUnmarkedExecutables();
1007 m_codeBlocks.deleteUnmarkedAndUnreferenced(m_operationInProgress);
1008 m_jitStubRoutines.deleteUnmarkedJettisonedStubRoutines();
1009}
1010
1011void Heap::addToRememberedSet(const JSCell* cell)
1012{
1013 ASSERT(cell);
1014 ASSERT(!Options::useConcurrentJIT() || !isCompilationThread());
1015 ASSERT(cell->cellState() == CellState::OldBlack);
1016 // Indicate that this object is grey and that it's one of the following:
1017 // - A re-greyed object during a concurrent collection.
1018 // - An old remembered object.
1019 // "OldGrey" doesn't tell us which of these things is true, but we usually treat the two cases the
1020 // same.
1021 cell->setCellState(CellState::OldGrey);
1022 m_slotVisitor.appendToMarkStack(const_cast<JSCell*>(cell));
1023}
1024
1025void* Heap::copyBarrier(const JSCell*, void*& pointer)
1026{
1027 // Do nothing for now, except making sure that the low bits are masked off. This helps to
1028 // simulate enough of this barrier that at least we can test the low bits assumptions.
1029 pointer = bitwise_cast<void*>(
1030 bitwise_cast<uintptr_t>(pointer) & ~static_cast<uintptr_t>(CopyBarrierBase::spaceBits));
1031
1032 return pointer;
1033}
1034
1035void Heap::collectAndSweep(HeapOperation collectionType)
1036{
1037 if (!m_isSafeToCollect)
1038 return;
1039
1040 collect(collectionType);
1041
1042 SamplingRegion samplingRegion("Garbage Collection: Sweeping");
1043
1044 DeferGCForAWhile deferGC(*this);
1045 m_objectSpace.sweep();
1046 m_objectSpace.shrink();
1047
1048 sweepAllLogicallyEmptyWeakBlocks();
1049}
1050
1051NEVER_INLINE void Heap::collect(HeapOperation collectionType)
1052{
1053 void* stackTop;
1054 ALLOCATE_AND_GET_REGISTER_STATE(registers);
1055
1056 collectImpl(collectionType, wtfThreadData().stack().origin(), &stackTop, registers);
1057
1058 sanitizeStackForVM(m_vm);
1059}
1060
1061NEVER_INLINE void Heap::collectImpl(HeapOperation collectionType, void* stackOrigin, void* stackTop, MachineThreads::RegisterState& calleeSavedRegisters)
1062{
1063#if ENABLE(ALLOCATION_LOGGING)
1064 dataLogF("JSC GC starting collection.\n");
1065#endif
1066
1067 double before = 0;
1068 if (Options::logGC()) {
1069 dataLog("[GC: ", capacity() / 1024, " kb ");
1070 before = currentTimeMS();
1071 }
1072
1073 SamplingRegion samplingRegion("Garbage Collection");
1074
1075 if (vm()->typeProfiler()) {
1076 DeferGCForAWhile awhile(*this);
1077 vm()->typeProfilerLog()->processLogEntries(ASCIILiteral("GC"));
1078 }
1079
1080 RELEASE_ASSERT(!m_deferralDepth);
1081 ASSERT(vm()->currentThreadIsHoldingAPILock());
1082 RELEASE_ASSERT(vm()->atomicStringTable() == wtfThreadData().atomicStringTable());
1083 ASSERT(m_isSafeToCollect);
1084 JAVASCRIPTCORE_GC_BEGIN();
1085 RELEASE_ASSERT(m_operationInProgress == NoOperation);
1086
1087 suspendCompilerThreads();
1088 willStartCollection(collectionType);
1089 GCPHASE(Collect);
1090
1091 double gcStartTime = WTF::monotonicallyIncreasingTime();
1092 if (m_verifier) {
1093 // Verify that live objects from the last GC cycle haven't been corrupted by
1094 // mutators before we begin this new GC cycle.
1095 m_verifier->verify(HeapVerifier::Phase::BeforeGC);
1096
1097 m_verifier->initializeGCCycle();
1098 m_verifier->gatherLiveObjects(HeapVerifier::Phase::BeforeMarking);
1099 }
1100
1101 flushOldStructureIDTables();
1102 stopAllocation();
1103 flushWriteBarrierBuffer();
1104
1105 markRoots(gcStartTime, stackOrigin, stackTop, calleeSavedRegisters);
1106
1107 if (m_verifier) {
1108 m_verifier->gatherLiveObjects(HeapVerifier::Phase::AfterMarking);
1109 m_verifier->verify(HeapVerifier::Phase::AfterMarking);
1110 }
1111 JAVASCRIPTCORE_GC_MARKED();
1112
1113 if (vm()->typeProfiler())
1114 vm()->typeProfiler()->invalidateTypeSetCache();
1115
1116 reapWeakHandles();
1117 pruneStaleEntriesFromWeakGCMaps();
1118 sweepArrayBuffers();
1119 snapshotMarkedSpace();
1120
1121 copyBackingStores();
1122
1123 finalizeUnconditionalFinalizers();
1124 removeDeadCompilerWorklistEntries();
1125 deleteUnmarkedCompiledCode();
1126 deleteSourceProviderCaches();
1127 notifyIncrementalSweeper();
1128 writeBarrierCurrentlyExecutingCodeBlocks();
1129
1130 resetAllocators();
1131 updateAllocationLimits();
1132 didFinishCollection(gcStartTime);
1133 resumeCompilerThreads();
1134
1135 if (m_verifier) {
1136 m_verifier->trimDeadObjects();
1137 m_verifier->verify(HeapVerifier::Phase::AfterGC);
1138 }
1139
1140 if (Options::logGC()) {
1141 double after = currentTimeMS();
1142 dataLog(after - before, " ms]\n");
1143 }
1144}
1145
1146void Heap::suspendCompilerThreads()
1147{
1148#if ENABLE(DFG_JIT)
1149 GCPHASE(SuspendCompilerThreads);
1150 ASSERT(m_suspendedCompilerWorklists.isEmpty());
1151 for (unsigned i = DFG::numberOfWorklists(); i--;) {
1152 if (DFG::Worklist* worklist = DFG::worklistForIndexOrNull(i)) {
1153 m_suspendedCompilerWorklists.append(worklist);
1154 worklist->suspendAllThreads();
1155 }
1156 }
1157#endif
1158}
1159
1160void Heap::willStartCollection(HeapOperation collectionType)
1161{
1162 GCPHASE(StartingCollection);
1163
1164 if (Options::logGC())
1165 dataLog("=> ");
1166
1167 if (shouldDoFullCollection(collectionType)) {
1168 m_operationInProgress = FullCollection;
1169 m_shouldDoFullCollection = false;
1170 if (Options::logGC())
1171 dataLog("FullCollection, ");
1172 } else {
1173 m_operationInProgress = EdenCollection;
1174 if (Options::logGC())
1175 dataLog("EdenCollection, ");
1176 }
1177 if (m_operationInProgress == FullCollection) {
1178 m_sizeBeforeLastFullCollect = m_sizeAfterLastCollect + m_bytesAllocatedThisCycle;
1179 m_extraMemorySize = 0;
1180 m_deprecatedExtraMemorySize = 0;
1181
1182 if (m_fullActivityCallback)
1183 m_fullActivityCallback->willCollect();
1184 } else {
1185 ASSERT(m_operationInProgress == EdenCollection);
1186 m_sizeBeforeLastEdenCollect = m_sizeAfterLastCollect + m_bytesAllocatedThisCycle;
1187 }
1188
1189 if (m_edenActivityCallback)
1190 m_edenActivityCallback->willCollect();
1191
1192 for (auto* observer : m_observers)
1193 observer->willGarbageCollect();
1194}
1195
1196void Heap::flushOldStructureIDTables()
1197{
1198 GCPHASE(FlushOldStructureIDTables);
1199 m_structureIDTable.flushOldTables();
1200}
1201
1202void Heap::flushWriteBarrierBuffer()
1203{
1204 GCPHASE(FlushWriteBarrierBuffer);
1205 if (m_operationInProgress == EdenCollection) {
1206 m_writeBarrierBuffer.flush(*this);
1207 return;
1208 }
1209 m_writeBarrierBuffer.reset();
1210}
1211
1212void Heap::stopAllocation()
1213{
1214 GCPHASE(StopAllocation);
1215 m_objectSpace.stopAllocating();
1216 if (m_operationInProgress == FullCollection)
1217 m_storageSpace.didStartFullCollection();
1218}
1219
1220void Heap::reapWeakHandles()
1221{
1222 GCPHASE(ReapingWeakHandles);
1223 m_objectSpace.reapWeakSets();
1224}
1225
1226void Heap::pruneStaleEntriesFromWeakGCMaps()
1227{
1228 GCPHASE(PruningStaleEntriesFromWeakGCMaps);
1229 if (m_operationInProgress != FullCollection)
1230 return;
1231 for (auto& pruneCallback : m_weakGCMaps.values())
1232 pruneCallback();
1233}
1234
1235void Heap::sweepArrayBuffers()
1236{
1237 GCPHASE(SweepingArrayBuffers);
1238 m_arrayBuffers.sweep();
1239}
1240
1241struct MarkedBlockSnapshotFunctor : public MarkedBlock::VoidFunctor {
1242 MarkedBlockSnapshotFunctor(Vector<MarkedBlock*>& blocks)
1243 : m_index(0)
1244 , m_blocks(blocks)
1245 {
1246 }
1247
1248 void operator()(MarkedBlock* block) { m_blocks[m_index++] = block; }
1249
1250 size_t m_index;
1251 Vector<MarkedBlock*>& m_blocks;
1252};
1253
1254void Heap::snapshotMarkedSpace()
1255{
1256 GCPHASE(SnapshotMarkedSpace);
1257
1258 if (m_operationInProgress == EdenCollection) {
1259 m_blockSnapshot.appendVector(m_objectSpace.blocksWithNewObjects());
1260 // Sort and deduplicate the block snapshot since we might be appending to an unfinished work list.
1261 std::sort(m_blockSnapshot.begin(), m_blockSnapshot.end());
1262 m_blockSnapshot.shrink(std::unique(m_blockSnapshot.begin(), m_blockSnapshot.end()) - m_blockSnapshot.begin());
1263 } else {
1264 m_blockSnapshot.resizeToFit(m_objectSpace.blocks().set().size());
1265 MarkedBlockSnapshotFunctor functor(m_blockSnapshot);
1266 m_objectSpace.forEachBlock(functor);
1267 }
1268}
1269
1270void Heap::deleteSourceProviderCaches()
1271{
1272 GCPHASE(DeleteSourceProviderCaches);
1273 m_vm->clearSourceProviderCaches();
1274}
1275
1276void Heap::notifyIncrementalSweeper()
1277{
1278 GCPHASE(NotifyIncrementalSweeper);
1279
1280 if (m_operationInProgress == FullCollection) {
1281 if (!m_logicallyEmptyWeakBlocks.isEmpty())
1282 m_indexOfNextLogicallyEmptyWeakBlockToSweep = 0;
1283 }
1284
1285 m_sweeper->startSweeping();
1286}
1287
1288void Heap::writeBarrierCurrentlyExecutingCodeBlocks()
1289{
1290 GCPHASE(WriteBarrierCurrentlyExecutingCodeBlocks);
1291 m_codeBlocks.writeBarrierCurrentlyExecutingCodeBlocks(this);
1292}
1293
1294void Heap::resetAllocators()
1295{
1296 GCPHASE(ResetAllocators);
1297 m_objectSpace.resetAllocators();
1298}
1299
1300void Heap::updateAllocationLimits()
1301{
1302 GCPHASE(UpdateAllocationLimits);
1303
1304 // Calculate our current heap size threshold for the purpose of figuring out when we should
1305 // run another collection. This isn't the same as either size() or capacity(), though it should
1306 // be somewhere between the two. The key is to match the size calculations involved calls to
1307 // didAllocate(), while never dangerously underestimating capacity(). In extreme cases of
1308 // fragmentation, we may have size() much smaller than capacity(). Our collector sometimes
1309 // temporarily allows very high fragmentation because it doesn't defragment old blocks in copied
1310 // space.
1311 size_t currentHeapSize = 0;
1312
1313 // For marked space, we use the total number of bytes visited. This matches the logic for
1314 // MarkedAllocator's calls to didAllocate(), which effectively accounts for the total size of
1315 // objects allocated rather than blocks used. This will underestimate capacity(), and in case
1316 // of fragmentation, this may be substantial. Fortunately, marked space rarely fragments because
1317 // cells usually have a narrow range of sizes. So, the underestimation is probably OK.
1318 currentHeapSize += m_totalBytesVisited;
1319
1320 // For copied space, we use the capacity of storage space. This is because copied space may get
1321 // badly fragmented between full collections. This arises when each eden collection evacuates
1322 // much less than one CopiedBlock's worth of stuff. It can also happen when CopiedBlocks get
1323 // pinned due to very short-lived objects. In such a case, we want to get to a full collection
1324 // sooner rather than later. If we used m_totalBytesCopied, then for for each CopiedBlock that an
1325 // eden allocation promoted, we would only deduct the one object's size from eden size. This
1326 // would mean that we could "leak" many CopiedBlocks before we did a full collection and
1327 // defragmented all of them. It would be great to use m_totalBytesCopied, but we'd need to
1328 // augment it with something that accounts for those fragmented blocks.
1329 // FIXME: Make it possible to compute heap size using m_totalBytesCopied rather than
1330 // m_storageSpace.capacity()
1331 // https://bugs.webkit.org/show_bug.cgi?id=150268
1332 ASSERT(m_totalBytesCopied <= m_storageSpace.size());
1333 currentHeapSize += m_storageSpace.capacity();
1334
1335 // It's up to the user to ensure that extraMemorySize() ends up corresponding to allocation-time
1336 // extra memory reporting.
1337 currentHeapSize += extraMemorySize();
1338
1339 if (Options::gcMaxHeapSize() && currentHeapSize > Options::gcMaxHeapSize())
1340 HeapStatistics::exitWithFailure();
1341
1342 if (m_operationInProgress == FullCollection) {
1343 // To avoid pathological GC churn in very small and very large heaps, we set
1344 // the new allocation limit based on the current size of the heap, with a
1345 // fixed minimum.
1346 m_maxHeapSize = max(minHeapSize(m_heapType, m_ramSize), proportionalHeapSize(currentHeapSize, m_ramSize));
1347 m_maxEdenSize = m_maxHeapSize - currentHeapSize;
1348 m_sizeAfterLastFullCollect = currentHeapSize;
1349 m_bytesAbandonedSinceLastFullCollect = 0;
1350 } else {
1351 static const bool verbose = false;
1352
1353 ASSERT(currentHeapSize >= m_sizeAfterLastCollect);
1354 m_maxEdenSize = m_maxHeapSize - currentHeapSize;
1355 m_sizeAfterLastEdenCollect = currentHeapSize;
1356 if (verbose) {
1357 dataLog("Max heap size: ", m_maxHeapSize, "\n");
1358 dataLog("Current heap size: ", currentHeapSize, "\n");
1359 dataLog("Size after last eden collection: ", m_sizeAfterLastEdenCollect, "\n");
1360 }
1361 double edenToOldGenerationRatio = (double)m_maxEdenSize / (double)m_maxHeapSize;
1362 if (verbose)
1363 dataLog("Eden to old generation ratio: ", edenToOldGenerationRatio, "\n");
1364 double minEdenToOldGenerationRatio = 1.0 / 3.0;
1365 if (edenToOldGenerationRatio < minEdenToOldGenerationRatio)
1366 m_shouldDoFullCollection = true;
1367 // This seems suspect at first, but what it does is ensure that the nursery size is fixed.
1368 m_maxHeapSize += currentHeapSize - m_sizeAfterLastCollect;
1369 m_maxEdenSize = m_maxHeapSize - currentHeapSize;
1370 if (m_fullActivityCallback) {
1371 ASSERT(currentHeapSize >= m_sizeAfterLastFullCollect);
1372 m_fullActivityCallback->didAllocate(currentHeapSize - m_sizeAfterLastFullCollect);
1373 }
1374 }
1375
1376 m_sizeAfterLastCollect = currentHeapSize;
1377 m_bytesAllocatedThisCycle = 0;
1378
1379 if (Options::logGC())
1380 dataLog(currentHeapSize / 1024, " kb, ");
1381}
1382
1383void Heap::didFinishCollection(double gcStartTime)
1384{
1385 GCPHASE(FinishingCollection);
1386 double gcEndTime = WTF::monotonicallyIncreasingTime();
1387 HeapOperation operation = m_operationInProgress;
1388 if (m_operationInProgress == FullCollection)
1389 m_lastFullGCLength = gcEndTime - gcStartTime;
1390 else
1391 m_lastEdenGCLength = gcEndTime - gcStartTime;
1392
1393 if (Options::recordGCPauseTimes())
1394 HeapStatistics::recordGCPauseTime(gcStartTime, gcEndTime);
1395
1396 if (Options::useZombieMode())
1397 zombifyDeadObjects();
1398
1399 if (Options::useImmortalObjects())
1400 markDeadObjects();
1401
1402 if (Options::dumpObjectStatistics())
1403 HeapStatistics::dumpObjectStatistics(this);
1404
1405 if (Options::logGC() == GCLogging::Verbose)
1406 GCLogging::dumpObjectGraph(this);
1407
1408 RELEASE_ASSERT(m_operationInProgress == EdenCollection || m_operationInProgress == FullCollection);
1409 m_operationInProgress = NoOperation;
1410 JAVASCRIPTCORE_GC_END();
1411
1412 for (auto* observer : m_observers)
1413 observer->didGarbageCollect(operation);
1414}
1415
1416void Heap::resumeCompilerThreads()
1417{
1418#if ENABLE(DFG_JIT)
1419 GCPHASE(ResumeCompilerThreads);
1420 for (auto worklist : m_suspendedCompilerWorklists)
1421 worklist->resumeAllThreads();
1422 m_suspendedCompilerWorklists.clear();
1423#endif
1424}
1425
1426void Heap::markDeadObjects()
1427{
1428 HeapIterationScope iterationScope(*this);
1429 m_objectSpace.forEachDeadCell<MarkObject>(iterationScope);
1430}
1431
1432void Heap::setFullActivityCallback(PassRefPtr<FullGCActivityCallback> activityCallback)
1433{
1434 m_fullActivityCallback = activityCallback;
1435}
1436
1437void Heap::setEdenActivityCallback(PassRefPtr<EdenGCActivityCallback> activityCallback)
1438{
1439 m_edenActivityCallback = activityCallback;
1440}
1441
1442GCActivityCallback* Heap::fullActivityCallback()
1443{
1444 return m_fullActivityCallback.get();
1445}
1446
1447GCActivityCallback* Heap::edenActivityCallback()
1448{
1449 return m_edenActivityCallback.get();
1450}
1451
1452void Heap::setIncrementalSweeper(std::unique_ptr<IncrementalSweeper> sweeper)
1453{
1454 m_sweeper = WTFMove(sweeper);
1455}
1456
1457IncrementalSweeper* Heap::sweeper()
1458{
1459 return m_sweeper.get();
1460}
1461
1462void Heap::setGarbageCollectionTimerEnabled(bool enable)
1463{
1464 if (m_fullActivityCallback)
1465 m_fullActivityCallback->setEnabled(enable);
1466 if (m_edenActivityCallback)
1467 m_edenActivityCallback->setEnabled(enable);
1468}
1469
1470void Heap::didAllocate(size_t bytes)
1471{
1472 if (m_edenActivityCallback)
1473 m_edenActivityCallback->didAllocate(m_bytesAllocatedThisCycle + m_bytesAbandonedSinceLastFullCollect);
1474 m_bytesAllocatedThisCycle += bytes;
1475}
1476
1477bool Heap::isValidAllocation(size_t)
1478{
1479 if (!isValidThreadState(m_vm))
1480 return false;
1481
1482 if (m_operationInProgress != NoOperation)
1483 return false;
1484
1485 return true;
1486}
1487
1488void Heap::addFinalizer(JSCell* cell, Finalizer finalizer)
1489{
1490 WeakSet::allocate(cell, &m_finalizerOwner, reinterpret_cast<void*>(finalizer)); // Balanced by FinalizerOwner::finalize().
1491}
1492
1493void Heap::FinalizerOwner::finalize(Handle<Unknown> handle, void* context)
1494{
1495 HandleSlot slot = handle.slot();
1496 Finalizer finalizer = reinterpret_cast<Finalizer>(context);
1497 finalizer(slot->asCell());
1498 WeakSet::deallocate(WeakImpl::asWeakImpl(slot));
1499}
1500
1501void Heap::addExecutable(ExecutableBase* executable)
1502{
1503 m_executables.append(executable);
1504}
1505
1506void Heap::collectAllGarbageIfNotDoneRecently()
1507{
1508 if (!m_fullActivityCallback) {
1509 collectAllGarbage();
1510 return;
1511 }
1512
1513 if (m_fullActivityCallback->didSyncGCRecently()) {
1514 // A synchronous GC was already requested recently so we merely accelerate next collection.
1515 reportAbandonedObjectGraph();
1516 return;
1517 }
1518
1519 m_fullActivityCallback->setDidSyncGCRecently();
1520 collectAllGarbage();
1521}
1522
1523class Zombify : public MarkedBlock::VoidFunctor {
1524public:
1525 inline void visit(JSCell* cell)
1526 {
1527 void** current = reinterpret_cast<void**>(cell);
1528
1529 // We want to maintain zapped-ness because that's how we know if we've called
1530 // the destructor.
1531 if (cell->isZapped())
1532 current++;
1533
1534 void* limit = static_cast<void*>(reinterpret_cast<char*>(cell) + MarkedBlock::blockFor(cell)->cellSize());
1535 for (; current < limit; current++)
1536 *current = zombifiedBits;
1537 }
1538 IterationStatus operator()(JSCell* cell)
1539 {
1540 visit(cell);
1541 return IterationStatus::Continue;
1542 }
1543};
1544
1545void Heap::zombifyDeadObjects()
1546{
1547 // Sweep now because destructors will crash once we're zombified.
1548 {
1549 SamplingRegion samplingRegion("Garbage Collection: Sweeping");
1550 m_objectSpace.zombifySweep();
1551 }
1552 HeapIterationScope iterationScope(*this);
1553 m_objectSpace.forEachDeadCell<Zombify>(iterationScope);
1554}
1555
1556void Heap::flushWriteBarrierBuffer(JSCell* cell)
1557{
1558 m_writeBarrierBuffer.flush(*this);
1559 m_writeBarrierBuffer.add(cell);
1560}
1561
1562bool Heap::shouldDoFullCollection(HeapOperation requestedCollectionType) const
1563{
1564 if (!Options::useGenerationalGC())
1565 return true;
1566
1567 switch (requestedCollectionType) {
1568 case EdenCollection:
1569 return false;
1570 case FullCollection:
1571 return true;
1572 case AnyCollection:
1573 return m_shouldDoFullCollection;
1574 default:
1575 RELEASE_ASSERT_NOT_REACHED();
1576 return false;
1577 }
1578 RELEASE_ASSERT_NOT_REACHED();
1579 return false;
1580}
1581
1582void Heap::addLogicallyEmptyWeakBlock(WeakBlock* block)
1583{
1584 m_logicallyEmptyWeakBlocks.append(block);
1585}
1586
1587void Heap::sweepAllLogicallyEmptyWeakBlocks()
1588{
1589 if (m_logicallyEmptyWeakBlocks.isEmpty())
1590 return;
1591
1592 m_indexOfNextLogicallyEmptyWeakBlockToSweep = 0;
1593 while (sweepNextLogicallyEmptyWeakBlock()) { }
1594}
1595
1596bool Heap::sweepNextLogicallyEmptyWeakBlock()
1597{
1598 if (m_indexOfNextLogicallyEmptyWeakBlockToSweep == WTF::notFound)
1599 return false;
1600
1601 WeakBlock* block = m_logicallyEmptyWeakBlocks[m_indexOfNextLogicallyEmptyWeakBlockToSweep];
1602
1603 block->sweep();
1604 if (block->isEmpty()) {
1605 std::swap(m_logicallyEmptyWeakBlocks[m_indexOfNextLogicallyEmptyWeakBlockToSweep], m_logicallyEmptyWeakBlocks.last());
1606 m_logicallyEmptyWeakBlocks.removeLast();
1607 WeakBlock::destroy(*this, block);
1608 } else
1609 m_indexOfNextLogicallyEmptyWeakBlockToSweep++;
1610
1611 if (m_indexOfNextLogicallyEmptyWeakBlockToSweep >= m_logicallyEmptyWeakBlocks.size()) {
1612 m_indexOfNextLogicallyEmptyWeakBlockToSweep = WTF::notFound;
1613 return false;
1614 }
1615
1616 return true;
1617}
1618
1619size_t Heap::threadVisitCount()
1620{
1621 unsigned long result = 0;
1622 for (auto& parallelVisitor : m_parallelSlotVisitors)
1623 result += parallelVisitor->visitCount();
1624 return result;
1625}
1626
1627size_t Heap::threadBytesVisited()
1628{
1629 size_t result = 0;
1630 for (auto& parallelVisitor : m_parallelSlotVisitors)
1631 result += parallelVisitor->bytesVisited();
1632 return result;
1633}
1634
1635size_t Heap::threadBytesCopied()
1636{
1637 size_t result = 0;
1638 for (auto& parallelVisitor : m_parallelSlotVisitors)
1639 result += parallelVisitor->bytesCopied();
1640 return result;
1641}
1642
1643} // namespace JSC
1644