1// -*- mode: c++; c-basic-offset: 4 -*-
2/*
3 * This file is part of the KDE libraries
4 * Copyright (C) 2003, 2004, 2005, 2006, 2007 Apple Computer, Inc.
5 * Copyright (C) 2007 Eric Seidel <eric@webkit.org>
6 * Copyright (C) 2007 Maksim Orlovich <maksim@kde.org>
7 *
8 * This library is free software; you can redistribute it and/or
9 * modify it under the terms of the GNU Lesser General Public
10 * License as published by the Free Software Foundation; either
11 * version 2 of the License, or (at your option) any later version.
12 *
13 * This library is distributed in the hope that it will be useful,
14 * but WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
16 * Lesser General Public License for more details.
17 *
18 * You should have received a copy of the GNU Lesser General Public
19 * License along with this library; if not, write to the Free Software
20 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
21 *
22 */
23
24#include "collector.h"
25#include <config-kjs.h>
26
27#include <wtf/FastMalloc.h>
28#include <wtf/HashCountedSet.h>
29#include "internal.h"
30#include "list.h"
31#include "value.h"
32
33#include <setjmp.h>
34#include <limits.h>
35#include <algorithm>
36
37#if PLATFORM(DARWIN)
38
39#include <pthread.h>
40#include <mach/mach_port.h>
41#include <mach/mach_init.h>
42#include <mach/task.h>
43#include <mach/thread_act.h>
44#include <mach/vm_map.h>
45
46#elif PLATFORM(WIN_OS) || COMPILER(CYGWIN)
47
48#include <windows.h>
49#include <winnt.h>
50
51#elif PLATFORM(UNIX)
52
53#include <stdlib.h>
54#include <sys/types.h>
55#include <sys/mman.h>
56#include <pthread.h> //krazy:exclude=includes (yes, it's duplicate, but in a different #if branch)
57#include <unistd.h>
58
59#if PLATFORM(SOLARIS_OS)
60#include <thread.h>
61#include <signal.h>
62using std::memset;
63#endif
64
65#if HAVE(PTHREAD_NP_H)
66#include <pthread_np.h>
67#endif
68
69#endif
70
71#define DEBUG_COLLECTOR 0
72
73#if HAVE(VALGRIND_MEMCHECK_H) && !defined(NDEBUG)
74 #include <valgrind/memcheck.h>
75 #if defined(VALGRIND_MAKE_MEM_DEFINED)
76 #define VG_DEFINED(p) VALGRIND_MAKE_MEM_DEFINED(&p, sizeof(void*))
77 #else
78 #define VG_DEFINED(p)
79 #endif
80#else
81 #define VG_DEFINED(p)
82#endif
83
84
85using std::max;
86
87namespace KJS {
88
89// tunable parameters
90const size_t SPARE_EMPTY_BLOCKS = 2;
91const size_t MIN_ARRAY_SIZE = 14;
92const size_t GROWTH_FACTOR = 2;
93const size_t LOW_WATER_FACTOR = 4;
94const size_t ALLOCATIONS_PER_COLLECTION = 4000;
95
96
97// A whee bit like a WTF::Vector, but perfectly POD, so can be used in global context
98// w/o worries.
99struct BlockList {
100 CollectorBlock** m_data;
101 size_t m_used;
102 size_t m_capacity;
103
104 CollectorBlock* operator[](size_t pos) {
105 return m_data[pos];
106 }
107
108 size_t used() const {
109 return m_used;
110 }
111
112 void append(CollectorBlock* block) {
113 if (m_used == m_capacity) {
114 static const size_t maxNumBlocks = ULONG_MAX / sizeof(CollectorBlock*) / GROWTH_FACTOR;
115 if (m_capacity > maxNumBlocks)
116 CRASH();
117 m_capacity = max(MIN_ARRAY_SIZE, m_capacity * GROWTH_FACTOR);
118 m_data = static_cast<CollectorBlock **>(fastRealloc(m_data, m_capacity * sizeof(CollectorBlock *)));
119 }
120 m_data[m_used] = block;
121 ++m_used;
122 }
123
124 void remove(CollectorBlock* block) {
125 size_t c;
126 for (c = 0; c < m_used; ++c)
127 if (m_data[c] == block)
128 break;
129
130 if (c == m_used)
131 return;
132
133 // Move things up, and shrink..
134 --m_used;
135 for (; c < m_used; ++c)
136 m_data[c] = m_data[c+1];
137 }
138};
139
140struct CollectorHeap {
141 // This contains the list of both normal and oversize blocks
142 BlockList allBlocks;
143
144 // Just the normal blocks
145 BlockList blocks;
146
147 size_t firstBlockWithPossibleSpace;
148
149 // The oversize block handling is a bit tricky, since we do not wish to slow down
150 // the usual paths. Hence, we do the following:
151 // 1) We set the marked bits for any extension portions of the block.
152 // this way the stack GC doesn't have to do anything special if a pointer
153 // happens to go that way.
154 // 2) We keep track of an allBlocks list to help the stack GC tests things.
155 //
156 // The oversize heap itself represents blocks as follows:
157 // 1) free: marked = false, allocd = false, trailer = false, zeroIfFree = 0
158 // 2) alloc'd, head: marked = depends, allocd = true, trailer = false, zeroIfFree is uncertain
159 // 3) alloc'd, trailer: marked = true (see above), allocd = true, trailer = true, zeroIfFree is uncertain
160 //
161 // For now, we don't use a freelist, so performance may be quite bad if
162 // this is used heavily; this is just because the cell does not have
163 // back and forward links; which we need since we can pick a non-first cell
164 // ### actually, it may be possible to shuffle the list. Not now, though
165 BlockList oversizeBlocks;
166
167 size_t numLiveObjects;
168 size_t numLiveObjectsAtLastCollect;
169 size_t extraCost;
170};
171
172static CollectorHeap heap;
173
174bool Collector::memoryFull = false;
175
176static CollectorBlock* allocateBlock()
177{
178#if PLATFORM(DARWIN)
179 vm_address_t address = 0;
180 vm_map(current_task(), &address, BLOCK_SIZE, BLOCK_OFFSET_MASK, VM_FLAGS_ANYWHERE, MEMORY_OBJECT_NULL, 0, FALSE, VM_PROT_DEFAULT, VM_PROT_DEFAULT, VM_INHERIT_DEFAULT);
181#elif PLATFORM(WIN_OS) || COMPILER(CYGWIN)
182 // windows virtual address granularity is naturally 64k
183 LPVOID address = VirtualAlloc(NULL, BLOCK_SIZE, MEM_COMMIT | MEM_RESERVE, PAGE_READWRITE);
184#elif HAVE(POSIX_MEMALIGN)
185 void* address;
186 posix_memalign(address, BLOCK_SIZE, BLOCK_SIZE);
187 memset(reinterpret_cast<void*>(address), 0, BLOCK_SIZE);
188#else
189 static size_t pagesize = getpagesize();
190
191 size_t extra = 0;
192 if (BLOCK_SIZE > pagesize)
193 extra = BLOCK_SIZE - pagesize;
194
195 void* mmapResult = mmap(NULL, BLOCK_SIZE + extra, PROT_READ | PROT_WRITE, MAP_PRIVATE | MAP_ANON, -1, 0);
196 uintptr_t address = reinterpret_cast<uintptr_t>(mmapResult);
197
198 size_t adjust = 0;
199 if ((address & BLOCK_OFFSET_MASK) != 0)
200 adjust = BLOCK_SIZE - (address & BLOCK_OFFSET_MASK);
201
202 if (adjust > 0)
203 munmap(reinterpret_cast<void*>(address), adjust);
204
205 if (adjust < extra)
206 munmap(reinterpret_cast<void*>(address + adjust + BLOCK_SIZE), extra - adjust);
207
208 address += adjust;
209 memset(reinterpret_cast<void*>(address), 0, BLOCK_SIZE);
210#endif
211 CollectorBlock* ptr = reinterpret_cast<CollectorBlock*>(address);
212 heap.allBlocks.append(ptr); // Register with the heap
213 return ptr;
214}
215
216static void freeBlock(CollectorBlock* block)
217{
218 // Unregister the block first
219 heap.allBlocks.remove(block);
220
221#if PLATFORM(DARWIN)
222 vm_deallocate(current_task(), reinterpret_cast<vm_address_t>(block), BLOCK_SIZE);
223#elif PLATFORM(WIN_OS) || COMPILER(CYGWIN)
224 VirtualFree(block, BLOCK_SIZE, MEM_RELEASE);
225#elif HAVE(POSIX_MEMALIGN)
226 free(block);
227#else
228 munmap(block, BLOCK_SIZE);
229#endif
230}
231
232void Collector::recordExtraCost(size_t cost)
233{
234 // Our frequency of garbage collection tries to balance memory use against speed
235 // by collecting based on the number of newly created values. However, for values
236 // that hold on to a great deal of memory that's not in the form of other JS values,
237 // that is not good enough - in some cases a lot of those objects can pile up and
238 // use crazy amounts of memory without a GC happening. So we track these extra
239 // memory costs. Only unusually large objects are noted, and we only keep track
240 // of this extra cost until the next GC. In garbage collected languages, most values
241 // are either very short lived temporaries, or have extremely long lifetimes. So
242 // if a large value survives one garbage collection, there is not much point to
243 // collecting more frequently as long as it stays alive.
244
245 heap.extraCost += cost;
246}
247
248static void* allocOversize(size_t s)
249{
250 size_t cellsNeeded = (s + (CELL_SIZE - 1)) / CELL_SIZE;
251
252 // printf("allocOversize, size:%d, cellsNeeded:%d\n", s, cellsNeeded);
253
254 // We are not oversize enough to deal with things close to 64K in size
255 assert(cellsNeeded <= CELLS_PER_BLOCK);
256
257 // Look through the blocks, see if one has enough, and where.
258 CollectorBlock* sufficientBlock = 0;
259 size_t startOffset = -1;
260 for (size_t b = 0; b < heap.oversizeBlocks.used() && !sufficientBlock; ++b) {
261 CollectorBlock* candidate = heap.oversizeBlocks[b];
262 if (cellsNeeded <= CELLS_PER_BLOCK - candidate->usedCells) {
263 // Well, there is a chance we will fit.. Let's see if we can find a batch long enough..
264 for (size_t i = 0; i < CELLS_PER_BLOCK; i++) {
265 if (i % 32 == 0 && candidate->allocd.bits[i/32] == 0xFFFFFFFF) {
266 // Can skip this and 31 other cells
267 i += 31;
268 continue;
269 }
270
271 if (candidate->allocd.get(i))
272 continue;
273
274 // This cell is free -- are there enough free cells after it?
275 startOffset = i;
276 size_t last = i + cellsNeeded - 1;
277
278 if (last >= CELLS_PER_BLOCK) // No way it will fit
279 break;
280
281 ++i;
282 while (i <= last && !candidate->allocd.get(i))
283 ++i;
284
285 // Did we get through enough?
286 if (i == last + 1) {
287 sufficientBlock = candidate;
288 break;
289 }
290
291 // Not enough room -- and the current entry has a non-zero zeroIfFree,
292 // so we should go on at i + 1 on next iteration
293
294 } // for each cell per block.
295 } // if enough room in block
296 } // for each block
297
298 if (!sufficientBlock) {
299 sufficientBlock = allocateBlock();
300 startOffset = 0;
301 heap.oversizeBlocks.append(sufficientBlock);
302 }
303
304 sufficientBlock->usedCells += cellsNeeded;
305
306 // Set proper bits for trailers and the head
307 sufficientBlock->allocd.set(startOffset);
308 for (size_t t = startOffset + 1; t < startOffset + cellsNeeded; ++t) {
309 sufficientBlock->trailer.set(t);
310 sufficientBlock->marked.set(t);
311 sufficientBlock->allocd.set(t);
312 }
313
314 void* result = sufficientBlock->cells + startOffset;
315 memset(result, 0, s);
316 heap.numLiveObjects = heap.numLiveObjects + 1;
317 return result;
318}
319
320void* Collector::allocate(size_t s)
321{
322 assert(JSLock::lockCount() > 0);
323
324 // collect if needed
325 size_t numLiveObjects = heap.numLiveObjects;
326 size_t numLiveObjectsAtLastCollect = heap.numLiveObjectsAtLastCollect;
327 size_t numNewObjects = numLiveObjects - numLiveObjectsAtLastCollect;
328 size_t newCost = numNewObjects + heap.extraCost;
329
330 if (newCost >= ALLOCATIONS_PER_COLLECTION && newCost >= numLiveObjectsAtLastCollect) {
331 collect();
332 numLiveObjects = heap.numLiveObjects;
333 }
334
335 if (s > CELL_SIZE) {
336 return allocOversize(s);
337 }
338
339 // slab allocator
340
341 size_t usedBlocks = heap.blocks.used();
342
343 size_t i = heap.firstBlockWithPossibleSpace;
344 CollectorBlock *targetBlock;
345 size_t targetBlockUsedCells;
346 if (i != usedBlocks) {
347 targetBlock = heap.blocks[i];
348 targetBlockUsedCells = targetBlock->usedCells;
349 assert(targetBlockUsedCells <= CELLS_PER_BLOCK);
350 while (targetBlockUsedCells == CELLS_PER_BLOCK) {
351 if (++i == usedBlocks)
352 goto allocateNewBlock;
353 targetBlock = heap.blocks[i];
354 targetBlockUsedCells = targetBlock->usedCells;
355 assert(targetBlockUsedCells <= CELLS_PER_BLOCK);
356 }
357 heap.firstBlockWithPossibleSpace = i;
358 } else {
359allocateNewBlock:
360 // didn't find one, need to allocate a new block
361 targetBlock = allocateBlock();
362 targetBlock->freeList = targetBlock->cells;
363 targetBlockUsedCells = 0;
364 heap.blocks.append(targetBlock);
365 heap.firstBlockWithPossibleSpace = usedBlocks; // previous usedBlocks -> new one's index
366 }
367
368 // find a free spot in the block and detach it from the free list
369 CollectorCell *newCell = targetBlock->freeList;
370
371 // "next" field is a byte offset -- 0 means next cell, so a zeroed block is already initialized
372 // could avoid the casts by using a cell offset, but this avoids a relatively-slow multiply
373 targetBlock->freeList = reinterpret_cast<CollectorCell *>(reinterpret_cast<char *>(newCell + 1) + newCell->u.freeCell.next);
374
375 targetBlock->usedCells = targetBlockUsedCells + 1;
376 heap.numLiveObjects = numLiveObjects + 1;
377
378 return newCell;
379}
380
381#if USE(MULTIPLE_THREADS)
382
383struct Collector::Thread {
384 Thread(pthread_t pthread, mach_port_t mthread) : posixThread(pthread), machThread(mthread) {}
385 Thread *next;
386 pthread_t posixThread;
387 mach_port_t machThread;
388};
389
390pthread_key_t registeredThreadKey;
391pthread_once_t registeredThreadKeyOnce = PTHREAD_ONCE_INIT;
392
393Collector::Thread *registeredThreads;
394
395static void destroyRegisteredThread(void *data)
396{
397 Collector::Thread *thread = (Collector::Thread *)data;
398
399 if (registeredThreads == thread) {
400 registeredThreads = registeredThreads->next;
401 } else {
402 Collector::Thread *last = registeredThreads;
403 for (Collector::Thread *t = registeredThreads->next; t != NULL; t = t->next) {
404 if (t == thread) {
405 last->next = t->next;
406 break;
407 }
408 last = t;
409 }
410 }
411
412 delete thread;
413}
414
415static void initializeRegisteredThreadKey()
416{
417 pthread_key_create(&registeredThreadKey, destroyRegisteredThread);
418}
419
420void Collector::registerThread()
421{
422 pthread_once(&registeredThreadKeyOnce, initializeRegisteredThreadKey);
423
424 if (!pthread_getspecific(registeredThreadKey)) {
425 pthread_t pthread = pthread_self();
426 WTF::fastMallocRegisterThread(pthread);
427 Collector::Thread *thread = new Collector::Thread(pthread, pthread_mach_thread_np(pthread));
428 thread->next = registeredThreads;
429 registeredThreads = thread;
430 pthread_setspecific(registeredThreadKey, thread);
431 }
432}
433
434#endif
435
436#define IS_POINTER_ALIGNED(p) (((intptr_t)(p) & (sizeof(char *) - 1)) == 0)
437
438// cell size needs to be a power of two for this to be valid
439#define IS_CELL_ALIGNED(p) (((intptr_t)(p) & CELL_MASK) == 0)
440
441void Collector::markStackObjectsConservatively(void *start, void *end)
442{
443 if (start > end) {
444 void *tmp = start;
445 start = end;
446 end = tmp;
447 }
448
449 assert(((char *)end - (char *)start) < 0x1000000);
450 assert(IS_POINTER_ALIGNED(start));
451 assert(IS_POINTER_ALIGNED(end));
452
453 char **p = (char **)start;
454 char **e = (char **)end;
455
456 // We use allBlocks here since we want to mark oversize cells as well.
457 // Their trailers will have the mark bit set already, to avoid trouble.
458 size_t usedBlocks = heap.allBlocks.used();
459 CollectorBlock **blocks = heap.allBlocks.m_data;
460
461 const size_t lastCellOffset = sizeof(CollectorCell) * (CELLS_PER_BLOCK - 1);
462
463 while (p != e) {
464 char *x = *p++;
465 VG_DEFINED(x);
466 if (IS_CELL_ALIGNED(x) && x) {
467 uintptr_t offset = reinterpret_cast<uintptr_t>(x) & BLOCK_OFFSET_MASK;
468 CollectorBlock* blockAddr = reinterpret_cast<CollectorBlock*>(x - offset);
469 for (size_t block = 0; block < usedBlocks; block++) {
470 if ((blocks[block] == blockAddr) && (offset <= lastCellOffset)) {
471 if (((CollectorCell *)x)->u.freeCell.zeroIfFree != 0) {
472 JSCell *imp = reinterpret_cast<JSCell *>(x);
473 if (!imp->marked())
474 imp->mark();
475 }
476 } // if valid block
477 } // for each block
478 } // if cell-aligned
479 } // for each pointer
480}
481
482static inline void* currentThreadStackBase()
483{
484#if PLATFORM(DARWIN)
485 pthread_t thread = pthread_self();
486 void *stackBase = pthread_get_stackaddr_np(thread);
487#elif (PLATFORM(WIN_OS) || COMPILER(CYGWIN))
488 // tested with mingw32, mingw64, msvc2008, cygwin
489 NT_TIB *pTib = (NT_TIB*)NtCurrentTeb();
490 void *stackBase = (void *)pTib->StackBase;
491#elif PLATFORM(SOLARIS_OS)
492 stack_t s;
493 thr_stksegment(&s);
494 return s.ss_sp;
495 // NOTREACHED
496 void *stackBase = 0;
497#elif PLATFORM(UNIX)
498 static void *stackBase = 0;
499 static pthread_t stackThread;
500 pthread_t thread = pthread_self();
501 if (stackBase == 0 || thread != stackThread) {
502 pthread_attr_t sattr;
503#if HAVE(PTHREAD_NP_H) || defined(__NetBSD__)
504 // e.g. on FreeBSD 5.4, neundorf@kde.org
505 // also on NetBSD 3 and 4, raphael.langerhorst@kdemail.net
506 // HIGHLY RECCOMENDED by manpage to allocate storage, avoids
507 // crashing in JS immediately in FreeBSD.
508 pthread_attr_init(&sattr);
509 pthread_attr_get_np(thread, &sattr);
510#else
511 // FIXME: this function is non-portable; other POSIX systems may have different np alternatives
512 pthread_getattr_np(thread, &sattr);
513#endif // picking the _np function to use --- Linux or BSD
514
515 size_t stackSize;
516 pthread_attr_getstack(&sattr, &stackBase, &stackSize);
517 stackBase = (char *)stackBase + stackSize; // a matter of interpretation, apparently...
518 pthread_attr_destroy(&sattr);
519 assert(stackBase);
520 stackThread = thread;
521 }
522#else
523#error Need a way to get the stack base on this platform
524#endif
525 return stackBase;
526}
527
528
529void Collector::markCurrentThreadConservatively()
530{
531 // setjmp forces volatile registers onto the stack
532 jmp_buf registers;
533#if COMPILER(MSVC)
534#pragma warning(push)
535#pragma warning(disable: 4611)
536#endif
537 setjmp(registers);
538#if COMPILER(MSVC)
539#pragma warning(pop)
540#endif
541
542 void* dummy;
543 void* stackPointer = &dummy;
544 void* stackBase = currentThreadStackBase();
545
546 markStackObjectsConservatively(stackPointer, stackBase);
547}
548
549#if USE(MULTIPLE_THREADS)
550
551typedef unsigned long usword_t; // word size, assumed to be either 32 or 64 bit
552
553void Collector::markOtherThreadConservatively(Thread *thread)
554{
555 thread_suspend(thread->machThread);
556
557#if PLATFORM(X86)
558 i386_thread_state_t regs;
559 unsigned user_count = sizeof(regs)/sizeof(int);
560 thread_state_flavor_t flavor = i386_THREAD_STATE;
561#elif PLATFORM(X86_64)
562 x86_thread_state64_t regs;
563 unsigned user_count = x86_THREAD_STATE64_COUNT;
564 thread_state_flavor_t flavor = x86_THREAD_STATE64;
565#elif PLATFORM(PPC)
566 ppc_thread_state_t regs;
567 unsigned user_count = PPC_THREAD_STATE_COUNT;
568 thread_state_flavor_t flavor = PPC_THREAD_STATE;
569#elif PLATFORM(PPC64)
570 ppc_thread_state64_t regs;
571 unsigned user_count = PPC_THREAD_STATE64_COUNT;
572 thread_state_flavor_t flavor = PPC_THREAD_STATE64;
573#else
574#error Unknown Architecture
575#endif
576 // get the thread register state
577 thread_get_state(thread->machThread, flavor, (thread_state_t)&regs, &user_count);
578
579 // scan the registers
580 markStackObjectsConservatively((void *)&regs, (void *)((char *)&regs + (user_count * sizeof(usword_t))));
581
582 // scan the stack
583#if PLATFORM(X86) && __DARWIN_UNIX03
584 markStackObjectsConservatively((void *)regs.__esp, pthread_get_stackaddr_np(thread->posixThread));
585#elif PLATFORM(X86)
586 markStackObjectsConservatively((void *)regs.esp, pthread_get_stackaddr_np(thread->posixThread));
587#elif PLATFORM(X86_64) && __DARWIN_UNIX03
588 markStackObjectsConservatively((void *)regs.__rsp, pthread_get_stackaddr_np(thread->posixThread));
589#elif PLATFORM(X86_64)
590 markStackObjectsConservatively((void *)regs.rsp, pthread_get_stackaddr_np(thread->posixThread));
591#elif (PLATFORM(PPC) || PLATFORM(PPC64)) && __DARWIN_UNIX03
592 markStackObjectsConservatively((void *)regs.__r1, pthread_get_stackaddr_np(thread->posixThread));
593#elif PLATFORM(PPC) || PLATFORM(PPC64)
594 markStackObjectsConservatively((void *)regs.r1, pthread_get_stackaddr_np(thread->posixThread));
595#else
596#error Unknown Architecture
597#endif
598
599 thread_resume(thread->machThread);
600}
601
602#endif
603
604void Collector::markStackObjectsConservatively()
605{
606 markCurrentThreadConservatively();
607
608#if USE(MULTIPLE_THREADS)
609 for (Thread *thread = registeredThreads; thread != NULL; thread = thread->next) {
610 if (thread->posixThread != pthread_self()) {
611 markOtherThreadConservatively(thread);
612 }
613 }
614#endif
615}
616
617typedef HashCountedSet<JSCell *> ProtectCounts;
618
619static ProtectCounts& protectedValues()
620{
621 static ProtectCounts pv;
622 return pv;
623}
624
625void Collector::protect(JSValue *k)
626{
627 assert(k);
628 assert(JSLock::lockCount() > 0);
629
630 if (JSImmediate::isImmediate(k))
631 return;
632
633 protectedValues().add(k->asCell());
634}
635
636void Collector::unprotect(JSValue *k)
637{
638 assert(k);
639 assert(JSLock::lockCount() > 0);
640
641 if (JSImmediate::isImmediate(k))
642 return;
643
644 protectedValues().remove(k->asCell());
645}
646
647void Collector::markProtectedObjects()
648{
649 ProtectCounts& pv = protectedValues();
650 ProtectCounts::iterator end = pv.end();
651 for (ProtectCounts::iterator it = pv.begin(); it != end; ++it) {
652 JSCell *val = it->first;
653 if (!val->marked())
654 val->mark();
655 }
656}
657
658bool Collector::collect()
659{
660 assert(JSLock::lockCount() > 0);
661
662#if USE(MULTIPLE_THREADS)
663 bool currentThreadIsMainThread = pthread_main_np();
664#else
665 bool currentThreadIsMainThread = true;
666#endif
667
668 Interpreter::markSourceCachedObjects();
669
670 if (Interpreter::s_hook) {
671 Interpreter* scr = Interpreter::s_hook;
672 do {
673 scr->mark(currentThreadIsMainThread);
674 scr = scr->next;
675 } while (scr != Interpreter::s_hook);
676 }
677
678 // MARK: first mark all referenced objects recursively starting out from the set of root objects
679
680 markStackObjectsConservatively();
681 markProtectedObjects();
682 List::markProtectedLists();
683#if USE(MULTIPLE_THREADS)
684 if (!currentThreadIsMainThread)
685 markMainThreadOnlyObjects();
686#endif
687
688 // SWEEP: delete everything with a zero refcount (garbage) and unmark everything else
689
690 // Note: if a cell has zeroIfFree == 0, it is either free,
691 // or in the middle of being constructed and has not yet
692 // had its vtable filled. Hence, such cells should not be cleaned up
693
694 size_t emptyBlocks = 0;
695 size_t numLiveObjects = heap.numLiveObjects;
696
697 for (size_t block = 0; block < heap.blocks.used(); block++) {
698 CollectorBlock *curBlock = heap.blocks[block];
699
700 size_t usedCells = curBlock->usedCells;
701 CollectorCell *freeList = curBlock->freeList;
702
703 if (usedCells == CELLS_PER_BLOCK) {
704 // special case with a block where all cells are used -- testing indicates this happens often
705 for (size_t i = 0; i < CELLS_PER_BLOCK; i++) {
706 CollectorCell *cell = curBlock->cells + i;
707 JSCell *imp = reinterpret_cast<JSCell *>(cell);
708 if (!curBlock->marked.get(i) && currentThreadIsMainThread) {
709 // special case for allocated but uninitialized object
710 // (We don't need this check earlier because nothing prior this point assumes the object has a valid vptr.)
711 if (cell->u.freeCell.zeroIfFree == 0)
712 continue;
713 imp->~JSCell();
714 --usedCells;
715 --numLiveObjects;
716
717 // put cell on the free list
718 cell->u.freeCell.zeroIfFree = 0;
719 cell->u.freeCell.next = reinterpret_cast<char *>(freeList) - reinterpret_cast<char *>(cell + 1);
720 freeList = cell;
721 }
722 }
723 } else {
724 size_t minimumCellsToProcess = usedCells;
725 for (size_t i = 0; (i < minimumCellsToProcess) & (i < CELLS_PER_BLOCK); i++) {
726 CollectorCell *cell = curBlock->cells + i;
727 if (cell->u.freeCell.zeroIfFree == 0) {
728 ++minimumCellsToProcess;
729 } else {
730 JSCell *imp = reinterpret_cast<JSCell *>(cell);
731 if (!curBlock->marked.get(i) && currentThreadIsMainThread) {
732 imp->~JSCell();
733 --usedCells;
734 --numLiveObjects;
735
736 // put cell on the free list
737 cell->u.freeCell.zeroIfFree = 0;
738 cell->u.freeCell.next = reinterpret_cast<char *>(freeList) - reinterpret_cast<char *>(cell + 1);
739 freeList = cell;
740 }
741 }
742 }
743 }
744
745 curBlock->marked.clearAll();
746 curBlock->usedCells = usedCells;
747 curBlock->freeList = freeList;
748
749 if (usedCells == 0) {
750 emptyBlocks++;
751 if (emptyBlocks > SPARE_EMPTY_BLOCKS) {
752#if !DEBUG_COLLECTOR
753 freeBlock(curBlock);
754#endif
755 // swap with the last block so we compact as we go
756 heap.blocks.m_data[block] = heap.blocks[heap.blocks.used() - 1];
757 heap.blocks.m_used--;
758 block--; // Don't move forward a step in this case
759 }
760 }
761 }
762
763 if (heap.numLiveObjects != numLiveObjects)
764 heap.firstBlockWithPossibleSpace = 0;
765
766 // Now sweep oversize blocks.
767 emptyBlocks = 0;
768 for (size_t ob = 0; ob < heap.oversizeBlocks.used(); ++ob) {
769 CollectorBlock* curBlock = heap.oversizeBlocks[ob];
770 CollectorCell *freeList = curBlock->freeList;
771 size_t usedCells = curBlock->usedCells;
772
773 // Go through the cells
774 for (size_t i = 0; i < CELLS_PER_BLOCK; i++) {
775 if (i % 32 == 0 && curBlock->allocd.bits[i/32] == 0) {
776 // Nothing used around here, skip this and 31 next cells
777 i += 31;
778 continue;
779 }
780
781 CollectorCell *cell = curBlock->cells + i;
782 if (cell->u.freeCell.zeroIfFree == 0)
783 continue;
784
785 if (!curBlock->allocd.get(i))
786 continue;
787
788 JSCell *imp = reinterpret_cast<JSCell *>(cell);
789
790 // Live and trailer cells will all have the mark set,
791 // so we only have to collect with it clear --
792 // and we already took care of those that are
793 // already free (or being initialized) above
794 if (!curBlock->marked.get(i)) {
795 // Free this block...
796 imp->~JSCell();
797 --numLiveObjects;
798 --usedCells;
799
800 // Mark the block and the trailers as available
801 cell->u.freeCell.zeroIfFree = 0;
802 curBlock->allocd.clear(i);
803
804 ++i; // Go to the potential trailer..
805 while (curBlock->trailer.get(i) && i < CELLS_PER_BLOCK) {
806 --usedCells;
807 curBlock->allocd.clear(i);
808 curBlock->trailer.clear(i);
809 curBlock->marked.clear (i);
810 curBlock->cells[i].u.freeCell.zeroIfFree = 0;
811 ++i;
812 }
813 --i; // Last item we processed.
814 } else {
815 // If this is not a trailer cell, clear the mark
816 if (!curBlock->trailer.get(i))
817 curBlock->marked.clear(i);
818 }
819 } // each cell
820 curBlock->usedCells = usedCells;
821 curBlock->freeList = freeList;
822 if (usedCells == 0) {
823 emptyBlocks++;
824 if (emptyBlocks > SPARE_EMPTY_BLOCKS) {
825 freeBlock(curBlock);
826
827 // swap with the last block so we compact as we go
828 heap.oversizeBlocks.m_data[ob] = heap.oversizeBlocks[heap.oversizeBlocks.used() - 1];
829 heap.oversizeBlocks.m_used--;
830 ob--; // Don't move forward a step in this case
831 }
832 }
833 } // each oversize block
834
835 bool deleted = heap.numLiveObjects != numLiveObjects;
836
837 heap.numLiveObjects = numLiveObjects;
838 heap.numLiveObjectsAtLastCollect = numLiveObjects;
839 heap.extraCost = 0;
840
841 bool newMemoryFull = (numLiveObjects >= KJS_MEM_LIMIT);
842 if (newMemoryFull && newMemoryFull != memoryFull)
843 reportOutOfMemoryToAllInterpreters();
844 memoryFull = newMemoryFull;
845
846 return deleted;
847}
848
849size_t Collector::size()
850{
851 return heap.numLiveObjects;
852}
853
854#ifdef KJS_DEBUG_MEM
855void Collector::finalCheck()
856{
857}
858#endif
859
860size_t Collector::numInterpreters()
861{
862 size_t count = 0;
863 if (Interpreter::s_hook) {
864 Interpreter* scr = Interpreter::s_hook;
865 do {
866 ++count;
867 scr = scr->next;
868 } while (scr != Interpreter::s_hook);
869 }
870 return count;
871}
872
873size_t Collector::numProtectedObjects()
874{
875 return protectedValues().size();
876}
877
878static const char *typeName(JSCell *val)
879{
880 const char *name = "???";
881 switch (val->type()) {
882 case UnspecifiedType:
883 break;
884 case UndefinedType:
885 name = "undefined";
886 break;
887 case NullType:
888 name = "null";
889 break;
890 case BooleanType:
891 name = "boolean";
892 break;
893 case StringType:
894 name = "string";
895 break;
896 case NumberType:
897 name = "number";
898 break;
899 case ObjectType: {
900 const ClassInfo *info = static_cast<JSObject *>(val)->classInfo();
901 name = info ? info->className : "Object";
902 break;
903 }
904 case GetterSetterType:
905 name = "gettersetter";
906 break;
907 }
908 return name;
909}
910
911HashCountedSet<const char*>* Collector::rootObjectTypeCounts()
912{
913 HashCountedSet<const char*>* counts = new HashCountedSet<const char*>;
914
915 ProtectCounts& pv = protectedValues();
916 ProtectCounts::iterator end = pv.end();
917 for (ProtectCounts::iterator it = pv.begin(); it != end; ++it)
918 counts->add(typeName(it->first));
919
920 return counts;
921}
922
923void Collector::reportOutOfMemoryToAllInterpreters()
924{
925 if (!Interpreter::s_hook)
926 return;
927
928 Interpreter* interpreter = Interpreter::s_hook;
929 do {
930 ExecState* exec = interpreter->execState();
931
932 exec->setException(Error::create(exec, GeneralError, "Out of memory"));
933
934 interpreter = interpreter->next;
935 } while(interpreter != Interpreter::s_hook);
936}
937
938} // namespace KJS
939