1 | /* |
2 | * Copyright (C) 2003, 2004, 2005, 2006, 2007, 2008, 2009 Apple Inc. All rights reserved. |
3 | * Copyright (C) 2007 Eric Seidel <eric@webkit.org> |
4 | * |
5 | * This library is free software; you can redistribute it and/or |
6 | * modify it under the terms of the GNU Lesser General Public |
7 | * License as published by the Free Software Foundation; either |
8 | * version 2 of the License, or (at your option) any later version. |
9 | * |
10 | * This library is distributed in the hope that it will be useful, |
11 | * but WITHOUT ANY WARRANTY; without even the implied warranty of |
12 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU |
13 | * Lesser General Public License for more details. |
14 | * |
15 | * You should have received a copy of the GNU Lesser General Public |
16 | * License along with this library; if not, write to the Free Software |
17 | * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA |
18 | * |
19 | */ |
20 | |
21 | #include "config.h" |
22 | #include "Collector.h" |
23 | |
24 | #include "ArgList.h" |
25 | #include "CallFrame.h" |
26 | #include "CodeBlock.h" |
27 | #include "CollectorHeapIterator.h" |
28 | #include "Interpreter.h" |
29 | #include "JSArray.h" |
30 | #include "JSGlobalObject.h" |
31 | #include "JSLock.h" |
32 | #include "JSONObject.h" |
33 | #include "JSString.h" |
34 | #include "JSValue.h" |
35 | #include "JSZombie.h" |
36 | #include "MarkStack.h" |
37 | #include "Nodes.h" |
38 | #include "Tracing.h" |
39 | #include <algorithm> |
40 | #include <limits.h> |
41 | #include <setjmp.h> |
42 | #include <stdlib.h> |
43 | #include <wtf/FastMalloc.h> |
44 | #include <wtf/HashCountedSet.h> |
45 | #include <wtf/UnusedParam.h> |
46 | #include <wtf/VMTags.h> |
47 | |
48 | #if OS(DARWIN) |
49 | |
50 | #include <mach/mach_init.h> |
51 | #include <mach/mach_port.h> |
52 | #include <mach/task.h> |
53 | #include <mach/thread_act.h> |
54 | #include <mach/vm_map.h> |
55 | // clang's libc++ headers does not pull in pthread.h (but libstdc++ does) |
56 | #include <pthread.h> |
57 | |
58 | #elif OS(WINDOWS) |
59 | |
60 | #include <windows.h> |
61 | #include <malloc.h> |
62 | |
63 | #elif OS(HAIKU) |
64 | |
65 | #include <OS.h> |
66 | |
67 | #elif OS(UNIX) |
68 | |
69 | #include <stdlib.h> |
70 | #if !OS(HAIKU) |
71 | #include <sys/mman.h> |
72 | #endif |
73 | #include <unistd.h> |
74 | |
75 | #if OS(SOLARIS) |
76 | #include <thread.h> |
77 | #else |
78 | #include <pthread.h> |
79 | #endif |
80 | |
81 | #if HAVE(PTHREAD_NP_H) |
82 | #include <pthread_np.h> |
83 | #endif |
84 | |
85 | #if OS(QNX) |
86 | #include <sys/storage.h> |
87 | #endif |
88 | |
89 | #endif |
90 | |
91 | #define COLLECT_ON_EVERY_ALLOCATION 0 |
92 | |
93 | using std::max; |
94 | |
95 | namespace JSC { |
96 | |
97 | // tunable parameters |
98 | |
99 | const size_t GROWTH_FACTOR = 2; |
100 | const size_t LOW_WATER_FACTOR = 4; |
101 | const size_t ALLOCATIONS_PER_COLLECTION = 3600; |
102 | // This value has to be a macro to be used in max() without introducing |
103 | // a PIC branch in Mach-O binaries, see <rdar://problem/5971391>. |
104 | #define MIN_ARRAY_SIZE (static_cast<size_t>(14)) |
105 | |
106 | #if ENABLE(JSC_MULTIPLE_THREADS) |
107 | |
108 | #if OS(DARWIN) |
109 | typedef mach_port_t PlatformThread; |
110 | #elif OS(WINDOWS) |
111 | typedef HANDLE PlatformThread; |
112 | #endif |
113 | |
114 | class Heap::Thread { |
115 | public: |
116 | Thread(pthread_t pthread, const PlatformThread& platThread, void* base) |
117 | : posixThread(pthread) |
118 | , platformThread(platThread) |
119 | , stackBase(base) |
120 | { |
121 | } |
122 | |
123 | Thread* next; |
124 | pthread_t posixThread; |
125 | PlatformThread platformThread; |
126 | void* stackBase; |
127 | }; |
128 | |
129 | #endif |
130 | |
131 | Heap::Heap(JSGlobalData* globalData) |
132 | : m_markListSet(0) |
133 | #if ENABLE(JSC_MULTIPLE_THREADS) |
134 | , m_registeredThreads(0) |
135 | , m_currentThreadRegistrar(0) |
136 | #endif |
137 | #if OS(SYMBIAN) |
138 | , m_blockallocator(WTF::AlignedBlockAllocator::instance(JSCCOLLECTOR_VIRTUALMEM_RESERVATION, BLOCK_SIZE)) |
139 | #endif |
140 | , m_globalData(globalData) |
141 | { |
142 | ASSERT(globalData); |
143 | memset(&m_heap, 0, sizeof(CollectorHeap)); |
144 | allocateBlock(); |
145 | } |
146 | |
147 | Heap::~Heap() |
148 | { |
149 | // The destroy function must already have been called, so assert this. |
150 | ASSERT(!m_globalData); |
151 | } |
152 | |
153 | void Heap::destroy() |
154 | { |
155 | JSLock lock(SilenceAssertionsOnly); |
156 | |
157 | if (!m_globalData) |
158 | return; |
159 | |
160 | ASSERT(!m_globalData->dynamicGlobalObject); |
161 | ASSERT(!isBusy()); |
162 | |
163 | // The global object is not GC protected at this point, so sweeping may delete it |
164 | // (and thus the global data) before other objects that may use the global data. |
165 | RefPtr<JSGlobalData> protect(m_globalData); |
166 | |
167 | delete m_markListSet; |
168 | m_markListSet = 0; |
169 | |
170 | freeBlocks(); |
171 | |
172 | #if ENABLE(JSC_MULTIPLE_THREADS) |
173 | if (m_currentThreadRegistrar) { |
174 | int error = pthread_key_delete(m_currentThreadRegistrar); |
175 | ASSERT_UNUSED(error, !error); |
176 | } |
177 | |
178 | MutexLocker registeredThreadsLock(m_registeredThreadsMutex); |
179 | for (Heap::Thread* t = m_registeredThreads; t;) { |
180 | Heap::Thread* next = t->next; |
181 | delete t; |
182 | t = next; |
183 | } |
184 | #endif |
185 | m_globalData = 0; |
186 | } |
187 | |
188 | NEVER_INLINE CollectorBlock* Heap::allocateBlock() |
189 | { |
190 | #if OS(DARWIN) |
191 | vm_address_t address = 0; |
192 | vm_map(current_task(), &address, BLOCK_SIZE, BLOCK_OFFSET_MASK, VM_FLAGS_ANYWHERE | VM_TAG_FOR_COLLECTOR_MEMORY, MEMORY_OBJECT_NULL, 0, FALSE, VM_PROT_DEFAULT, VM_PROT_DEFAULT, VM_INHERIT_DEFAULT); |
193 | #elif OS(SYMBIAN) |
194 | void* address = m_blockallocator.alloc(); |
195 | if (!address) |
196 | CRASH(); |
197 | #elif OS(WINCE) |
198 | void* address = VirtualAlloc(NULL, BLOCK_SIZE, MEM_COMMIT | MEM_RESERVE, PAGE_READWRITE); |
199 | #elif OS(WINDOWS) |
200 | #if COMPILER(MINGW) && !COMPILER(MINGW64) |
201 | void* address = __mingw_aligned_malloc(BLOCK_SIZE, BLOCK_SIZE); |
202 | #else |
203 | void* address = _aligned_malloc(BLOCK_SIZE, BLOCK_SIZE); |
204 | #endif |
205 | memset(address, 0, BLOCK_SIZE); |
206 | #elif HAVE(POSIX_MEMALIGN) |
207 | void* address; |
208 | posix_memalign(&address, BLOCK_SIZE, BLOCK_SIZE); |
209 | #else |
210 | |
211 | #if ENABLE(JSC_MULTIPLE_THREADS) |
212 | #error Need to initialize pagesize safely. |
213 | #endif |
214 | static size_t pagesize = getpagesize(); |
215 | |
216 | size_t = 0; |
217 | if (BLOCK_SIZE > pagesize) |
218 | extra = BLOCK_SIZE - pagesize; |
219 | |
220 | void* mmapResult = mmap(NULL, BLOCK_SIZE + extra, PROT_READ | PROT_WRITE, MAP_PRIVATE | MAP_ANON, -1, 0); |
221 | uintptr_t address = reinterpret_cast<uintptr_t>(mmapResult); |
222 | |
223 | size_t adjust = 0; |
224 | if ((address & BLOCK_OFFSET_MASK) != 0) |
225 | adjust = BLOCK_SIZE - (address & BLOCK_OFFSET_MASK); |
226 | |
227 | if (adjust > 0) |
228 | munmap(reinterpret_cast<char*>(address), adjust); |
229 | |
230 | if (adjust < extra) |
231 | munmap(reinterpret_cast<char*>(address + adjust + BLOCK_SIZE), extra - adjust); |
232 | |
233 | address += adjust; |
234 | #endif |
235 | |
236 | // Initialize block. |
237 | |
238 | CollectorBlock* block = reinterpret_cast<CollectorBlock*>(address); |
239 | block->heap = this; |
240 | clearMarkBits(block); |
241 | |
242 | Structure* dummyMarkableCellStructure = m_globalData->dummyMarkableCellStructure.get(); |
243 | for (size_t i = 0; i < HeapConstants::cellsPerBlock; ++i) |
244 | new (block->cells + i) JSCell(dummyMarkableCellStructure); |
245 | |
246 | // Add block to blocks vector. |
247 | |
248 | size_t numBlocks = m_heap.numBlocks; |
249 | if (m_heap.usedBlocks == numBlocks) { |
250 | static const size_t maxNumBlocks = ULONG_MAX / sizeof(CollectorBlock*) / GROWTH_FACTOR; |
251 | if (numBlocks > maxNumBlocks) |
252 | CRASH(); |
253 | numBlocks = max(MIN_ARRAY_SIZE, numBlocks * GROWTH_FACTOR); |
254 | m_heap.numBlocks = numBlocks; |
255 | m_heap.blocks = static_cast<CollectorBlock**>(fastRealloc(m_heap.blocks, numBlocks * sizeof(CollectorBlock*))); |
256 | } |
257 | m_heap.blocks[m_heap.usedBlocks++] = block; |
258 | |
259 | return block; |
260 | } |
261 | |
262 | NEVER_INLINE void Heap::freeBlock(size_t block) |
263 | { |
264 | m_heap.didShrink = true; |
265 | |
266 | ObjectIterator it(m_heap, block); |
267 | ObjectIterator end(m_heap, block + 1); |
268 | for ( ; it != end; ++it) |
269 | (*it)->~JSCell(); |
270 | freeBlockPtr(m_heap.blocks[block]); |
271 | |
272 | // swap with the last block so we compact as we go |
273 | m_heap.blocks[block] = m_heap.blocks[m_heap.usedBlocks - 1]; |
274 | m_heap.usedBlocks--; |
275 | |
276 | if (m_heap.numBlocks > MIN_ARRAY_SIZE && m_heap.usedBlocks < m_heap.numBlocks / LOW_WATER_FACTOR) { |
277 | m_heap.numBlocks = m_heap.numBlocks / GROWTH_FACTOR; |
278 | m_heap.blocks = static_cast<CollectorBlock**>(fastRealloc(m_heap.blocks, m_heap.numBlocks * sizeof(CollectorBlock*))); |
279 | } |
280 | } |
281 | |
282 | NEVER_INLINE void Heap::freeBlockPtr(CollectorBlock* block) |
283 | { |
284 | #if OS(DARWIN) |
285 | vm_deallocate(current_task(), reinterpret_cast<vm_address_t>(block), BLOCK_SIZE); |
286 | #elif OS(SYMBIAN) |
287 | m_blockallocator.free(reinterpret_cast<void*>(block)); |
288 | #elif OS(WINCE) |
289 | VirtualFree(block, 0, MEM_RELEASE); |
290 | #elif OS(WINDOWS) |
291 | #if COMPILER(MINGW) && !COMPILER(MINGW64) |
292 | __mingw_aligned_free(block); |
293 | #else |
294 | _aligned_free(block); |
295 | #endif |
296 | #elif HAVE(POSIX_MEMALIGN) |
297 | free(block); |
298 | #else |
299 | munmap(reinterpret_cast<char*>(block), BLOCK_SIZE); |
300 | #endif |
301 | } |
302 | |
303 | void Heap::freeBlocks() |
304 | { |
305 | ProtectCountSet protectedValuesCopy = m_protectedValues; |
306 | |
307 | clearMarkBits(); |
308 | ProtectCountSet::iterator protectedValuesEnd = protectedValuesCopy.end(); |
309 | for (ProtectCountSet::iterator it = protectedValuesCopy.begin(); it != protectedValuesEnd; ++it) |
310 | markCell(it->first); |
311 | |
312 | m_heap.nextCell = 0; |
313 | m_heap.nextBlock = 0; |
314 | DeadObjectIterator it(m_heap, m_heap.nextBlock, m_heap.nextCell); |
315 | DeadObjectIterator end(m_heap, m_heap.usedBlocks); |
316 | for ( ; it != end; ++it) |
317 | (*it)->~JSCell(); |
318 | |
319 | ASSERT(!protectedObjectCount()); |
320 | |
321 | protectedValuesEnd = protectedValuesCopy.end(); |
322 | for (ProtectCountSet::iterator it = protectedValuesCopy.begin(); it != protectedValuesEnd; ++it) |
323 | it->first->~JSCell(); |
324 | |
325 | for (size_t block = 0; block < m_heap.usedBlocks; ++block) |
326 | freeBlockPtr(m_heap.blocks[block]); |
327 | |
328 | fastFree(m_heap.blocks); |
329 | |
330 | memset(&m_heap, 0, sizeof(CollectorHeap)); |
331 | } |
332 | |
333 | void Heap::(size_t cost) |
334 | { |
335 | // Our frequency of garbage collection tries to balance memory use against speed |
336 | // by collecting based on the number of newly created values. However, for values |
337 | // that hold on to a great deal of memory that's not in the form of other JS values, |
338 | // that is not good enough - in some cases a lot of those objects can pile up and |
339 | // use crazy amounts of memory without a GC happening. So we track these extra |
340 | // memory costs. Only unusually large objects are noted, and we only keep track |
341 | // of this extra cost until the next GC. In garbage collected languages, most values |
342 | // are either very short lived temporaries, or have extremely long lifetimes. So |
343 | // if a large value survives one garbage collection, there is not much point to |
344 | // collecting more frequently as long as it stays alive. |
345 | |
346 | if (m_heap.extraCost > maxExtraCost && m_heap.extraCost > m_heap.usedBlocks * BLOCK_SIZE / 2) { |
347 | // If the last iteration through the heap deallocated blocks, we need |
348 | // to clean up remaining garbage before marking. Otherwise, the conservative |
349 | // marking mechanism might follow a pointer to unmapped memory. |
350 | if (m_heap.didShrink) |
351 | sweep(); |
352 | reset(); |
353 | } |
354 | m_heap.extraCost += cost; |
355 | } |
356 | |
357 | void* Heap::allocate(size_t s) |
358 | { |
359 | typedef HeapConstants::Block Block; |
360 | typedef HeapConstants::Cell Cell; |
361 | |
362 | ASSERT(JSLock::lockCount() > 0); |
363 | ASSERT(JSLock::currentThreadIsHoldingLock()); |
364 | ASSERT_UNUSED(s, s <= HeapConstants::cellSize); |
365 | |
366 | ASSERT(m_heap.operationInProgress == NoOperation); |
367 | |
368 | #if COLLECT_ON_EVERY_ALLOCATION |
369 | collectAllGarbage(); |
370 | ASSERT(m_heap.operationInProgress == NoOperation); |
371 | #endif |
372 | |
373 | allocate: |
374 | |
375 | // Fast case: find the next garbage cell and recycle it. |
376 | |
377 | do { |
378 | ASSERT(m_heap.nextBlock < m_heap.usedBlocks); |
379 | Block* block = reinterpret_cast<Block*>(m_heap.blocks[m_heap.nextBlock]); |
380 | do { |
381 | ASSERT(m_heap.nextCell < HeapConstants::cellsPerBlock); |
382 | if (!block->marked.get(m_heap.nextCell)) { // Always false for the last cell in the block |
383 | Cell* cell = block->cells + m_heap.nextCell; |
384 | |
385 | m_heap.operationInProgress = Allocation; |
386 | JSCell* imp = reinterpret_cast<JSCell*>(cell); |
387 | imp->~JSCell(); |
388 | m_heap.operationInProgress = NoOperation; |
389 | |
390 | ++m_heap.nextCell; |
391 | return cell; |
392 | } |
393 | } while (++m_heap.nextCell != HeapConstants::cellsPerBlock); |
394 | m_heap.nextCell = 0; |
395 | } while (++m_heap.nextBlock != m_heap.usedBlocks); |
396 | |
397 | // Slow case: reached the end of the heap. Mark live objects and start over. |
398 | |
399 | reset(); |
400 | goto allocate; |
401 | } |
402 | |
403 | void Heap::resizeBlocks() |
404 | { |
405 | m_heap.didShrink = false; |
406 | |
407 | size_t usedCellCount = markedCells(); |
408 | size_t minCellCount = usedCellCount + max(ALLOCATIONS_PER_COLLECTION, usedCellCount); |
409 | size_t minBlockCount = (minCellCount + HeapConstants::cellsPerBlock - 1) / HeapConstants::cellsPerBlock; |
410 | |
411 | size_t maxCellCount = 1.25f * minCellCount; |
412 | size_t maxBlockCount = (maxCellCount + HeapConstants::cellsPerBlock - 1) / HeapConstants::cellsPerBlock; |
413 | |
414 | if (m_heap.usedBlocks < minBlockCount) |
415 | growBlocks(minBlockCount); |
416 | else if (m_heap.usedBlocks > maxBlockCount) |
417 | shrinkBlocks(maxBlockCount); |
418 | } |
419 | |
420 | void Heap::growBlocks(size_t neededBlocks) |
421 | { |
422 | ASSERT(m_heap.usedBlocks < neededBlocks); |
423 | while (m_heap.usedBlocks < neededBlocks) |
424 | allocateBlock(); |
425 | } |
426 | |
427 | void Heap::shrinkBlocks(size_t neededBlocks) |
428 | { |
429 | ASSERT(m_heap.usedBlocks > neededBlocks); |
430 | |
431 | // Clear the always-on last bit, so isEmpty() isn't fooled by it. |
432 | for (size_t i = 0; i < m_heap.usedBlocks; ++i) |
433 | m_heap.blocks[i]->marked.clear(HeapConstants::cellsPerBlock - 1); |
434 | |
435 | for (size_t i = 0; i != m_heap.usedBlocks && m_heap.usedBlocks != neededBlocks; ) { |
436 | if (m_heap.blocks[i]->marked.isEmpty()) { |
437 | freeBlock(i); |
438 | } else |
439 | ++i; |
440 | } |
441 | |
442 | // Reset the always-on last bit. |
443 | for (size_t i = 0; i < m_heap.usedBlocks; ++i) |
444 | m_heap.blocks[i]->marked.set(HeapConstants::cellsPerBlock - 1); |
445 | } |
446 | |
447 | #if OS(WINCE) |
448 | void* g_stackBase = 0; |
449 | |
450 | inline bool isPageWritable(void* page) |
451 | { |
452 | MEMORY_BASIC_INFORMATION memoryInformation; |
453 | DWORD result = VirtualQuery(page, &memoryInformation, sizeof(memoryInformation)); |
454 | |
455 | // return false on error, including ptr outside memory |
456 | if (result != sizeof(memoryInformation)) |
457 | return false; |
458 | |
459 | DWORD protect = memoryInformation.Protect & ~(PAGE_GUARD | PAGE_NOCACHE); |
460 | return protect == PAGE_READWRITE |
461 | || protect == PAGE_WRITECOPY |
462 | || protect == PAGE_EXECUTE_READWRITE |
463 | || protect == PAGE_EXECUTE_WRITECOPY; |
464 | } |
465 | |
466 | static void* getStackBase(void* previousFrame) |
467 | { |
468 | // find the address of this stack frame by taking the address of a local variable |
469 | bool isGrowingDownward; |
470 | void* thisFrame = (void*)(&isGrowingDownward); |
471 | |
472 | isGrowingDownward = previousFrame < &thisFrame; |
473 | static DWORD pageSize = 0; |
474 | if (!pageSize) { |
475 | SYSTEM_INFO systemInfo; |
476 | GetSystemInfo(&systemInfo); |
477 | pageSize = systemInfo.dwPageSize; |
478 | } |
479 | |
480 | // scan all of memory starting from this frame, and return the last writeable page found |
481 | register char* currentPage = (char*)((DWORD)thisFrame & ~(pageSize - 1)); |
482 | if (isGrowingDownward) { |
483 | while (currentPage > 0) { |
484 | // check for underflow |
485 | if (currentPage >= (char*)pageSize) |
486 | currentPage -= pageSize; |
487 | else |
488 | currentPage = 0; |
489 | if (!isPageWritable(currentPage)) |
490 | return currentPage + pageSize; |
491 | } |
492 | return 0; |
493 | } else { |
494 | while (true) { |
495 | // guaranteed to complete because isPageWritable returns false at end of memory |
496 | currentPage += pageSize; |
497 | if (!isPageWritable(currentPage)) |
498 | return currentPage; |
499 | } |
500 | } |
501 | } |
502 | #endif |
503 | |
504 | #if OS(HPUX) |
505 | struct hpux_get_stack_base_data |
506 | { |
507 | pthread_t thread; |
508 | _pthread_stack_info info; |
509 | }; |
510 | |
511 | static void *hpux_get_stack_base_internal(void *d) |
512 | { |
513 | hpux_get_stack_base_data *data = static_cast<hpux_get_stack_base_data *>(d); |
514 | |
515 | // _pthread_stack_info_np requires the target thread to be suspended |
516 | // in order to get information about it |
517 | pthread_suspend(data->thread); |
518 | |
519 | // _pthread_stack_info_np returns an errno code in case of failure |
520 | // or zero on success |
521 | if (_pthread_stack_info_np(data->thread, &data->info)) { |
522 | // failed |
523 | return 0; |
524 | } |
525 | |
526 | pthread_continue(data->thread); |
527 | return data; |
528 | } |
529 | |
530 | static void *hpux_get_stack_base() |
531 | { |
532 | hpux_get_stack_base_data data; |
533 | data.thread = pthread_self(); |
534 | |
535 | // We cannot get the stack information for the current thread |
536 | // So we start a new thread to get that information and return it to us |
537 | pthread_t other; |
538 | pthread_create(&other, 0, hpux_get_stack_base_internal, &data); |
539 | |
540 | void *result; |
541 | pthread_join(other, &result); |
542 | if (result) |
543 | return data.info.stk_stack_base; |
544 | return 0; |
545 | } |
546 | #endif |
547 | |
548 | static inline void* currentThreadStackBase() |
549 | { |
550 | #if OS(DARWIN) |
551 | pthread_t thread = pthread_self(); |
552 | return pthread_get_stackaddr_np(thread); |
553 | #elif OS(WINCE) |
554 | AtomicallyInitializedStatic(Mutex&, mutex = *new Mutex); |
555 | MutexLocker locker(mutex); |
556 | if (g_stackBase) |
557 | return g_stackBase; |
558 | else { |
559 | int dummy; |
560 | return getStackBase(&dummy); |
561 | } |
562 | #elif OS(WINDOWS) && CPU(X86) && COMPILER(MSVC) |
563 | // offset 0x18 from the FS segment register gives a pointer to |
564 | // the thread information block for the current thread |
565 | NT_TIB* pTib; |
566 | __asm { |
567 | MOV EAX, FS:[18h] |
568 | MOV pTib, EAX |
569 | } |
570 | return static_cast<void*>(pTib->StackBase); |
571 | #elif OS(WINDOWS) && CPU(X86_64) && (COMPILER(MSVC) || COMPILER(GCC)) |
572 | // FIXME: why only for MSVC? |
573 | PNT_TIB64 pTib = reinterpret_cast<PNT_TIB64>(NtCurrentTeb()); |
574 | return reinterpret_cast<void*>(pTib->StackBase); |
575 | #elif OS(WINDOWS) && CPU(X86) && COMPILER(GCC) |
576 | // offset 0x18 from the FS segment register gives a pointer to |
577 | // the thread information block for the current thread |
578 | NT_TIB* pTib; |
579 | asm ( "movl %%fs:0x18, %0\n" |
580 | : "=r" (pTib) |
581 | ); |
582 | return static_cast<void*>(pTib->StackBase); |
583 | #elif OS(HPUX) |
584 | return hpux_get_stack_base(); |
585 | #elif OS(QNX) |
586 | return (void *) (((uintptr_t)__tls() + __PAGESIZE - 1) & ~(__PAGESIZE - 1)); |
587 | #elif OS(SOLARIS) |
588 | stack_t s; |
589 | thr_stksegment(&s); |
590 | return s.ss_sp; |
591 | #elif OS(AIX) |
592 | pthread_t thread = pthread_self(); |
593 | struct __pthrdsinfo threadinfo; |
594 | char regbuf[256]; |
595 | int regbufsize = sizeof regbuf; |
596 | |
597 | if (pthread_getthrds_np(&thread, PTHRDSINFO_QUERY_ALL, |
598 | &threadinfo, sizeof threadinfo, |
599 | ®buf, ®bufsize) == 0) |
600 | return threadinfo.__pi_stackaddr; |
601 | |
602 | return 0; |
603 | #elif OS(OPENBSD) |
604 | pthread_t thread = pthread_self(); |
605 | stack_t stack; |
606 | pthread_stackseg_np(thread, &stack); |
607 | return stack.ss_sp; |
608 | #elif OS(SYMBIAN) |
609 | TThreadStackInfo info; |
610 | RThread thread; |
611 | thread.StackInfo(info); |
612 | return (void*)info.iBase; |
613 | #elif OS(HAIKU) |
614 | thread_info threadInfo; |
615 | get_thread_info(find_thread(NULL), &threadInfo); |
616 | return threadInfo.stack_end; |
617 | #elif OS(UNIX) |
618 | AtomicallyInitializedStatic(Mutex&, mutex = *new Mutex); |
619 | MutexLocker locker(mutex); |
620 | static void* stackBase = 0; |
621 | static size_t stackSize = 0; |
622 | static pthread_t stackThread; |
623 | pthread_t thread = pthread_self(); |
624 | if (stackBase == 0 || thread != stackThread) { |
625 | pthread_attr_t sattr; |
626 | pthread_attr_init(&sattr); |
627 | #if HAVE(PTHREAD_NP_H) || OS(NETBSD) |
628 | // e.g. on FreeBSD 5.4, neundorf@kde.org |
629 | pthread_attr_get_np(thread, &sattr); |
630 | #else |
631 | // FIXME: this function is non-portable; other POSIX systems may have different np alternatives |
632 | pthread_getattr_np(thread, &sattr); |
633 | #endif |
634 | int rc = pthread_attr_getstack(&sattr, &stackBase, &stackSize); |
635 | (void)rc; // FIXME: Deal with error code somehow? Seems fatal. |
636 | ASSERT(stackBase); |
637 | pthread_attr_destroy(&sattr); |
638 | stackThread = thread; |
639 | } |
640 | return static_cast<char*>(stackBase) + stackSize; |
641 | #else |
642 | #error Need a way to get the stack base on this platform |
643 | #endif |
644 | } |
645 | |
646 | #if ENABLE(JSC_MULTIPLE_THREADS) |
647 | |
648 | static inline PlatformThread getCurrentPlatformThread() |
649 | { |
650 | #if OS(DARWIN) |
651 | return pthread_mach_thread_np(pthread_self()); |
652 | #elif OS(WINDOWS) |
653 | return pthread_getw32threadhandle_np(pthread_self()); |
654 | #endif |
655 | } |
656 | |
657 | void Heap::makeUsableFromMultipleThreads() |
658 | { |
659 | if (m_currentThreadRegistrar) |
660 | return; |
661 | |
662 | int error = pthread_key_create(&m_currentThreadRegistrar, unregisterThread); |
663 | if (error) |
664 | CRASH(); |
665 | } |
666 | |
667 | void Heap::registerThread() |
668 | { |
669 | ASSERT(!m_globalData->mainThreadOnly || isMainThread()); |
670 | |
671 | if (!m_currentThreadRegistrar || pthread_getspecific(m_currentThreadRegistrar)) |
672 | return; |
673 | |
674 | pthread_setspecific(m_currentThreadRegistrar, this); |
675 | Heap::Thread* thread = new Heap::Thread(pthread_self(), getCurrentPlatformThread(), currentThreadStackBase()); |
676 | |
677 | MutexLocker lock(m_registeredThreadsMutex); |
678 | |
679 | thread->next = m_registeredThreads; |
680 | m_registeredThreads = thread; |
681 | } |
682 | |
683 | void Heap::unregisterThread(void* p) |
684 | { |
685 | if (p) |
686 | static_cast<Heap*>(p)->unregisterThread(); |
687 | } |
688 | |
689 | void Heap::unregisterThread() |
690 | { |
691 | pthread_t currentPosixThread = pthread_self(); |
692 | |
693 | MutexLocker lock(m_registeredThreadsMutex); |
694 | |
695 | if (pthread_equal(currentPosixThread, m_registeredThreads->posixThread)) { |
696 | Thread* t = m_registeredThreads; |
697 | m_registeredThreads = m_registeredThreads->next; |
698 | delete t; |
699 | } else { |
700 | Heap::Thread* last = m_registeredThreads; |
701 | Heap::Thread* t; |
702 | for (t = m_registeredThreads->next; t; t = t->next) { |
703 | if (pthread_equal(t->posixThread, currentPosixThread)) { |
704 | last->next = t->next; |
705 | break; |
706 | } |
707 | last = t; |
708 | } |
709 | ASSERT(t); // If t is NULL, we never found ourselves in the list. |
710 | delete t; |
711 | } |
712 | } |
713 | |
714 | #else // ENABLE(JSC_MULTIPLE_THREADS) |
715 | |
716 | void Heap::registerThread() |
717 | { |
718 | } |
719 | |
720 | #endif |
721 | |
722 | inline bool isPointerAligned(void* p) |
723 | { |
724 | return (((intptr_t)(p) & (sizeof(char*) - 1)) == 0); |
725 | } |
726 | |
727 | // Cell size needs to be a power of two for isPossibleCell to be valid. |
728 | COMPILE_ASSERT(sizeof(CollectorCell) % 2 == 0, Collector_cell_size_is_power_of_two); |
729 | |
730 | #if USE(JSVALUE32) |
731 | static bool isHalfCellAligned(void *p) |
732 | { |
733 | return (((intptr_t)(p) & (CELL_MASK >> 1)) == 0); |
734 | } |
735 | |
736 | static inline bool isPossibleCell(void* p) |
737 | { |
738 | return isHalfCellAligned(p) && p; |
739 | } |
740 | |
741 | #else |
742 | |
743 | static inline bool isCellAligned(void *p) |
744 | { |
745 | return (((intptr_t)(p) & CELL_MASK) == 0); |
746 | } |
747 | |
748 | static inline bool isPossibleCell(void* p) |
749 | { |
750 | return isCellAligned(p) && p; |
751 | } |
752 | #endif // USE(JSVALUE32) |
753 | |
754 | void Heap::markConservatively(MarkStack& markStack, void* start, void* end) |
755 | { |
756 | if (start > end) { |
757 | void* tmp = start; |
758 | start = end; |
759 | end = tmp; |
760 | } |
761 | |
762 | ASSERT((static_cast<char*>(end) - static_cast<char*>(start)) < 0x1000000); |
763 | ASSERT(isPointerAligned(start)); |
764 | ASSERT(isPointerAligned(end)); |
765 | |
766 | char** p = static_cast<char**>(start); |
767 | char** e = static_cast<char**>(end); |
768 | |
769 | CollectorBlock** blocks = m_heap.blocks; |
770 | while (p != e) { |
771 | char* x = *p++; |
772 | if (isPossibleCell(x)) { |
773 | size_t usedBlocks; |
774 | uintptr_t xAsBits = reinterpret_cast<uintptr_t>(x); |
775 | xAsBits &= CELL_ALIGN_MASK; |
776 | |
777 | uintptr_t offset = xAsBits & BLOCK_OFFSET_MASK; |
778 | const size_t lastCellOffset = sizeof(CollectorCell) * (CELLS_PER_BLOCK - 1); |
779 | if (offset > lastCellOffset) |
780 | continue; |
781 | |
782 | CollectorBlock* blockAddr = reinterpret_cast<CollectorBlock*>(xAsBits - offset); |
783 | usedBlocks = m_heap.usedBlocks; |
784 | for (size_t block = 0; block < usedBlocks; block++) { |
785 | if (blocks[block] != blockAddr) |
786 | continue; |
787 | markStack.append(reinterpret_cast<JSCell*>(xAsBits)); |
788 | markStack.drain(); |
789 | } |
790 | } |
791 | } |
792 | } |
793 | |
794 | void NEVER_INLINE Heap::markCurrentThreadConservativelyInternal(MarkStack& markStack) |
795 | { |
796 | void* dummy; |
797 | void* stackPointer = &dummy; |
798 | void* stackBase = currentThreadStackBase(); |
799 | markConservatively(markStack, stackPointer, stackBase); |
800 | } |
801 | |
802 | #if COMPILER(GCC) |
803 | #define REGISTER_BUFFER_ALIGNMENT __attribute__ ((aligned (sizeof(void*)))) |
804 | #else |
805 | #define REGISTER_BUFFER_ALIGNMENT |
806 | #endif |
807 | |
808 | void Heap::markCurrentThreadConservatively(MarkStack& markStack) |
809 | { |
810 | // setjmp forces volatile registers onto the stack |
811 | jmp_buf registers REGISTER_BUFFER_ALIGNMENT; |
812 | #if COMPILER(MSVC) |
813 | #pragma warning(push) |
814 | #pragma warning(disable: 4611) |
815 | #endif |
816 | setjmp(registers); |
817 | #if COMPILER(MSVC) |
818 | #pragma warning(pop) |
819 | #endif |
820 | |
821 | markCurrentThreadConservativelyInternal(markStack); |
822 | } |
823 | |
824 | #if ENABLE(JSC_MULTIPLE_THREADS) |
825 | |
826 | static inline void suspendThread(const PlatformThread& platformThread) |
827 | { |
828 | #if OS(DARWIN) |
829 | thread_suspend(platformThread); |
830 | #elif OS(WINDOWS) |
831 | SuspendThread(platformThread); |
832 | #else |
833 | #error Need a way to suspend threads on this platform |
834 | #endif |
835 | } |
836 | |
837 | static inline void resumeThread(const PlatformThread& platformThread) |
838 | { |
839 | #if OS(DARWIN) |
840 | thread_resume(platformThread); |
841 | #elif OS(WINDOWS) |
842 | ResumeThread(platformThread); |
843 | #else |
844 | #error Need a way to resume threads on this platform |
845 | #endif |
846 | } |
847 | |
848 | typedef unsigned long usword_t; // word size, assumed to be either 32 or 64 bit |
849 | |
850 | #if OS(DARWIN) |
851 | |
852 | #if CPU(X86) |
853 | typedef i386_thread_state_t PlatformThreadRegisters; |
854 | #elif CPU(X86_64) |
855 | typedef x86_thread_state64_t PlatformThreadRegisters; |
856 | #elif CPU(PPC) |
857 | typedef ppc_thread_state_t PlatformThreadRegisters; |
858 | #elif CPU(PPC64) |
859 | typedef ppc_thread_state64_t PlatformThreadRegisters; |
860 | #elif CPU(ARM) |
861 | typedef arm_thread_state_t PlatformThreadRegisters; |
862 | #else |
863 | #error Unknown Architecture |
864 | #endif |
865 | |
866 | #elif OS(WINDOWS) && CPU(X86) |
867 | typedef CONTEXT PlatformThreadRegisters; |
868 | #else |
869 | #error Need a thread register struct for this platform |
870 | #endif |
871 | |
872 | static size_t getPlatformThreadRegisters(const PlatformThread& platformThread, PlatformThreadRegisters& regs) |
873 | { |
874 | #if OS(DARWIN) |
875 | |
876 | #if CPU(X86) |
877 | unsigned user_count = sizeof(regs)/sizeof(int); |
878 | thread_state_flavor_t flavor = i386_THREAD_STATE; |
879 | #elif CPU(X86_64) |
880 | unsigned user_count = x86_THREAD_STATE64_COUNT; |
881 | thread_state_flavor_t flavor = x86_THREAD_STATE64; |
882 | #elif CPU(PPC) |
883 | unsigned user_count = PPC_THREAD_STATE_COUNT; |
884 | thread_state_flavor_t flavor = PPC_THREAD_STATE; |
885 | #elif CPU(PPC64) |
886 | unsigned user_count = PPC_THREAD_STATE64_COUNT; |
887 | thread_state_flavor_t flavor = PPC_THREAD_STATE64; |
888 | #elif CPU(ARM) |
889 | unsigned user_count = ARM_THREAD_STATE_COUNT; |
890 | thread_state_flavor_t flavor = ARM_THREAD_STATE; |
891 | #else |
892 | #error Unknown Architecture |
893 | #endif |
894 | |
895 | kern_return_t result = thread_get_state(platformThread, flavor, (thread_state_t)®s, &user_count); |
896 | if (result != KERN_SUCCESS) { |
897 | WTFReportFatalError(__FILE__, __LINE__, WTF_PRETTY_FUNCTION, |
898 | "JavaScript garbage collection failed because thread_get_state returned an error (%d). This is probably the result of running inside Rosetta, which is not supported." , result); |
899 | CRASH(); |
900 | } |
901 | return user_count * sizeof(usword_t); |
902 | // end OS(DARWIN) |
903 | |
904 | #elif OS(WINDOWS) && CPU(X86) |
905 | regs.ContextFlags = CONTEXT_INTEGER | CONTEXT_CONTROL | CONTEXT_SEGMENTS; |
906 | GetThreadContext(platformThread, ®s); |
907 | return sizeof(CONTEXT); |
908 | #else |
909 | #error Need a way to get thread registers on this platform |
910 | #endif |
911 | } |
912 | |
913 | static inline void* otherThreadStackPointer(const PlatformThreadRegisters& regs) |
914 | { |
915 | #if OS(DARWIN) |
916 | |
917 | #if __DARWIN_UNIX03 |
918 | |
919 | #if CPU(X86) |
920 | return reinterpret_cast<void*>(regs.__esp); |
921 | #elif CPU(X86_64) |
922 | return reinterpret_cast<void*>(regs.__rsp); |
923 | #elif CPU(PPC) || CPU(PPC64) |
924 | return reinterpret_cast<void*>(regs.__r1); |
925 | #elif CPU(ARM) |
926 | return reinterpret_cast<void*>(regs.__sp); |
927 | #else |
928 | #error Unknown Architecture |
929 | #endif |
930 | |
931 | #else // !__DARWIN_UNIX03 |
932 | |
933 | #if CPU(X86) |
934 | return reinterpret_cast<void*>(regs.esp); |
935 | #elif CPU(X86_64) |
936 | return reinterpret_cast<void*>(regs.rsp); |
937 | #elif CPU(PPC) || CPU(PPC64) |
938 | return reinterpret_cast<void*>(regs.r1); |
939 | #else |
940 | #error Unknown Architecture |
941 | #endif |
942 | |
943 | #endif // __DARWIN_UNIX03 |
944 | |
945 | // end OS(DARWIN) |
946 | #elif CPU(X86) && OS(WINDOWS) |
947 | return reinterpret_cast<void*>((uintptr_t) regs.Esp); |
948 | #else |
949 | #error Need a way to get the stack pointer for another thread on this platform |
950 | #endif |
951 | } |
952 | |
953 | void Heap::markOtherThreadConservatively(MarkStack& markStack, Thread* thread) |
954 | { |
955 | suspendThread(thread->platformThread); |
956 | |
957 | PlatformThreadRegisters regs; |
958 | size_t regSize = getPlatformThreadRegisters(thread->platformThread, regs); |
959 | |
960 | // mark the thread's registers |
961 | markConservatively(markStack, static_cast<void*>(®s), static_cast<void*>(reinterpret_cast<char*>(®s) + regSize)); |
962 | |
963 | void* stackPointer = otherThreadStackPointer(regs); |
964 | markConservatively(markStack, stackPointer, thread->stackBase); |
965 | |
966 | resumeThread(thread->platformThread); |
967 | } |
968 | |
969 | #endif |
970 | |
971 | void Heap::markStackObjectsConservatively(MarkStack& markStack) |
972 | { |
973 | markCurrentThreadConservatively(markStack); |
974 | |
975 | #if ENABLE(JSC_MULTIPLE_THREADS) |
976 | |
977 | if (m_currentThreadRegistrar) { |
978 | |
979 | MutexLocker lock(m_registeredThreadsMutex); |
980 | |
981 | #ifndef NDEBUG |
982 | // Forbid malloc during the mark phase. Marking a thread suspends it, so |
983 | // a malloc inside markChildren() would risk a deadlock with a thread that had been |
984 | // suspended while holding the malloc lock. |
985 | fastMallocForbid(); |
986 | #endif |
987 | // It is safe to access the registeredThreads list, because we earlier asserted that locks are being held, |
988 | // and since this is a shared heap, they are real locks. |
989 | for (Thread* thread = m_registeredThreads; thread; thread = thread->next) { |
990 | if (!pthread_equal(thread->posixThread, pthread_self())) |
991 | markOtherThreadConservatively(markStack, thread); |
992 | } |
993 | #ifndef NDEBUG |
994 | fastMallocAllow(); |
995 | #endif |
996 | } |
997 | #endif |
998 | } |
999 | |
1000 | void Heap::protect(JSValue k) |
1001 | { |
1002 | ASSERT(k); |
1003 | ASSERT(JSLock::currentThreadIsHoldingLock() || !m_globalData->isSharedInstance); |
1004 | |
1005 | if (!k.isCell()) |
1006 | return; |
1007 | |
1008 | m_protectedValues.add(k.asCell()); |
1009 | } |
1010 | |
1011 | void Heap::unprotect(JSValue k) |
1012 | { |
1013 | ASSERT(k); |
1014 | ASSERT(JSLock::currentThreadIsHoldingLock() || !m_globalData->isSharedInstance); |
1015 | |
1016 | if (!k.isCell()) |
1017 | return; |
1018 | |
1019 | m_protectedValues.remove(k.asCell()); |
1020 | } |
1021 | |
1022 | void Heap::markProtectedObjects(MarkStack& markStack) |
1023 | { |
1024 | ProtectCountSet::iterator end = m_protectedValues.end(); |
1025 | for (ProtectCountSet::iterator it = m_protectedValues.begin(); it != end; ++it) { |
1026 | markStack.append(it->first); |
1027 | markStack.drain(); |
1028 | } |
1029 | } |
1030 | |
1031 | void Heap::clearMarkBits() |
1032 | { |
1033 | for (size_t i = 0; i < m_heap.usedBlocks; ++i) |
1034 | clearMarkBits(m_heap.blocks[i]); |
1035 | } |
1036 | |
1037 | void Heap::clearMarkBits(CollectorBlock* block) |
1038 | { |
1039 | // allocate assumes that the last cell in every block is marked. |
1040 | block->marked.clearAll(); |
1041 | block->marked.set(HeapConstants::cellsPerBlock - 1); |
1042 | } |
1043 | |
1044 | size_t Heap::markedCells(size_t startBlock, size_t startCell) const |
1045 | { |
1046 | ASSERT(startBlock <= m_heap.usedBlocks); |
1047 | ASSERT(startCell < HeapConstants::cellsPerBlock); |
1048 | |
1049 | if (startBlock >= m_heap.usedBlocks) |
1050 | return 0; |
1051 | |
1052 | size_t result = 0; |
1053 | result += m_heap.blocks[startBlock]->marked.count(startCell); |
1054 | for (size_t i = startBlock + 1; i < m_heap.usedBlocks; ++i) |
1055 | result += m_heap.blocks[i]->marked.count(); |
1056 | |
1057 | return result; |
1058 | } |
1059 | |
1060 | void Heap::sweep() |
1061 | { |
1062 | ASSERT(m_heap.operationInProgress == NoOperation); |
1063 | if (m_heap.operationInProgress != NoOperation) |
1064 | CRASH(); |
1065 | m_heap.operationInProgress = Collection; |
1066 | |
1067 | #if !ENABLE(JSC_ZOMBIES) |
1068 | Structure* dummyMarkableCellStructure = m_globalData->dummyMarkableCellStructure.get(); |
1069 | #endif |
1070 | |
1071 | DeadObjectIterator it(m_heap, m_heap.nextBlock, m_heap.nextCell); |
1072 | DeadObjectIterator end(m_heap, m_heap.usedBlocks); |
1073 | for ( ; it != end; ++it) { |
1074 | JSCell* cell = *it; |
1075 | #if ENABLE(JSC_ZOMBIES) |
1076 | if (!cell->isZombie()) { |
1077 | const ClassInfo* info = cell->classInfo(); |
1078 | cell->~JSCell(); |
1079 | new (cell) JSZombie(info, JSZombie::leakedZombieStructure()); |
1080 | Heap::markCell(cell); |
1081 | } |
1082 | #else |
1083 | cell->~JSCell(); |
1084 | // Callers of sweep assume it's safe to mark any cell in the heap. |
1085 | new (cell) JSCell(dummyMarkableCellStructure); |
1086 | #endif |
1087 | } |
1088 | |
1089 | m_heap.operationInProgress = NoOperation; |
1090 | } |
1091 | |
1092 | void Heap::markRoots() |
1093 | { |
1094 | #ifndef NDEBUG |
1095 | if (m_globalData->isSharedInstance) { |
1096 | ASSERT(JSLock::lockCount() > 0); |
1097 | ASSERT(JSLock::currentThreadIsHoldingLock()); |
1098 | } |
1099 | #endif |
1100 | |
1101 | ASSERT(m_heap.operationInProgress == NoOperation); |
1102 | if (m_heap.operationInProgress != NoOperation) |
1103 | CRASH(); |
1104 | |
1105 | m_heap.operationInProgress = Collection; |
1106 | |
1107 | MarkStack& markStack = m_globalData->markStack; |
1108 | |
1109 | // Reset mark bits. |
1110 | clearMarkBits(); |
1111 | |
1112 | // Mark stack roots. |
1113 | markStackObjectsConservatively(markStack); |
1114 | m_globalData->interpreter->registerFile().markCallFrames(markStack, this); |
1115 | |
1116 | // Mark explicitly registered roots. |
1117 | markProtectedObjects(markStack); |
1118 | |
1119 | #if QT_BUILD_SCRIPT_LIB |
1120 | if (m_globalData->clientData) |
1121 | m_globalData->clientData->mark(markStack); |
1122 | #endif |
1123 | |
1124 | // Mark misc. other roots. |
1125 | if (m_markListSet && m_markListSet->size()) |
1126 | MarkedArgumentBuffer::markLists(markStack, *m_markListSet); |
1127 | if (m_globalData->exception) |
1128 | markStack.append(m_globalData->exception); |
1129 | m_globalData->smallStrings.markChildren(markStack); |
1130 | if (m_globalData->functionCodeBlockBeingReparsed) |
1131 | m_globalData->functionCodeBlockBeingReparsed->markAggregate(markStack); |
1132 | if (m_globalData->firstStringifierToMark) |
1133 | JSONObject::markStringifiers(markStack, m_globalData->firstStringifierToMark); |
1134 | |
1135 | markStack.drain(); |
1136 | markStack.compact(); |
1137 | |
1138 | m_heap.operationInProgress = NoOperation; |
1139 | } |
1140 | |
1141 | size_t Heap::objectCount() const |
1142 | { |
1143 | return m_heap.nextBlock * HeapConstants::cellsPerBlock // allocated full blocks |
1144 | + m_heap.nextCell // allocated cells in current block |
1145 | + markedCells(m_heap.nextBlock, m_heap.nextCell) // marked cells in remainder of m_heap |
1146 | - m_heap.usedBlocks; // 1 cell per block is a dummy sentinel |
1147 | } |
1148 | |
1149 | void Heap::addToStatistics(Heap::Statistics& statistics) const |
1150 | { |
1151 | statistics.size += m_heap.usedBlocks * BLOCK_SIZE; |
1152 | statistics.free += m_heap.usedBlocks * BLOCK_SIZE - (objectCount() * HeapConstants::cellSize); |
1153 | } |
1154 | |
1155 | Heap::Statistics Heap::statistics() const |
1156 | { |
1157 | Statistics statistics = { 0, 0 }; |
1158 | addToStatistics(statistics); |
1159 | return statistics; |
1160 | } |
1161 | |
1162 | size_t Heap::globalObjectCount() |
1163 | { |
1164 | size_t count = 0; |
1165 | if (JSGlobalObject* head = m_globalData->head) { |
1166 | JSGlobalObject* o = head; |
1167 | do { |
1168 | ++count; |
1169 | o = o->next(); |
1170 | } while (o != head); |
1171 | } |
1172 | return count; |
1173 | } |
1174 | |
1175 | size_t Heap::protectedGlobalObjectCount() |
1176 | { |
1177 | size_t count = 0; |
1178 | if (JSGlobalObject* head = m_globalData->head) { |
1179 | JSGlobalObject* o = head; |
1180 | do { |
1181 | if (m_protectedValues.contains(o)) |
1182 | ++count; |
1183 | o = o->next(); |
1184 | } while (o != head); |
1185 | } |
1186 | |
1187 | return count; |
1188 | } |
1189 | |
1190 | size_t Heap::protectedObjectCount() |
1191 | { |
1192 | return m_protectedValues.size(); |
1193 | } |
1194 | |
1195 | static const char* typeName(JSCell* cell) |
1196 | { |
1197 | if (cell->isString()) |
1198 | return "string" ; |
1199 | #if USE(JSVALUE32) |
1200 | if (cell->isNumber()) |
1201 | return "number" ; |
1202 | #endif |
1203 | if (cell->isGetterSetter()) |
1204 | return "gettersetter" ; |
1205 | if (cell->isAPIValueWrapper()) |
1206 | return "value wrapper" ; |
1207 | if (cell->isPropertyNameIterator()) |
1208 | return "for-in iterator" ; |
1209 | ASSERT(cell->isObject()); |
1210 | const ClassInfo* info = cell->classInfo(); |
1211 | return info ? info->className : "Object" ; |
1212 | } |
1213 | |
1214 | HashCountedSet<const char*>* Heap::protectedObjectTypeCounts() |
1215 | { |
1216 | HashCountedSet<const char*>* counts = new HashCountedSet<const char*>; |
1217 | |
1218 | ProtectCountSet::iterator end = m_protectedValues.end(); |
1219 | for (ProtectCountSet::iterator it = m_protectedValues.begin(); it != end; ++it) |
1220 | counts->add(typeName(it->first)); |
1221 | |
1222 | return counts; |
1223 | } |
1224 | |
1225 | bool Heap::isBusy() |
1226 | { |
1227 | return m_heap.operationInProgress != NoOperation; |
1228 | } |
1229 | |
1230 | void Heap::reset() |
1231 | { |
1232 | JAVASCRIPTCORE_GC_BEGIN(); |
1233 | |
1234 | markRoots(); |
1235 | |
1236 | JAVASCRIPTCORE_GC_MARKED(); |
1237 | |
1238 | m_heap.nextCell = 0; |
1239 | m_heap.nextBlock = 0; |
1240 | m_heap.nextNumber = 0; |
1241 | m_heap.extraCost = 0; |
1242 | #if ENABLE(JSC_ZOMBIES) |
1243 | sweep(); |
1244 | #endif |
1245 | resizeBlocks(); |
1246 | |
1247 | JAVASCRIPTCORE_GC_END(); |
1248 | } |
1249 | |
1250 | void Heap::collectAllGarbage() |
1251 | { |
1252 | JAVASCRIPTCORE_GC_BEGIN(); |
1253 | |
1254 | // If the last iteration through the heap deallocated blocks, we need |
1255 | // to clean up remaining garbage before marking. Otherwise, the conservative |
1256 | // marking mechanism might follow a pointer to unmapped memory. |
1257 | if (m_heap.didShrink) |
1258 | sweep(); |
1259 | |
1260 | markRoots(); |
1261 | |
1262 | JAVASCRIPTCORE_GC_MARKED(); |
1263 | |
1264 | m_heap.nextCell = 0; |
1265 | m_heap.nextBlock = 0; |
1266 | m_heap.nextNumber = 0; |
1267 | m_heap.extraCost = 0; |
1268 | sweep(); |
1269 | resizeBlocks(); |
1270 | |
1271 | JAVASCRIPTCORE_GC_END(); |
1272 | } |
1273 | |
1274 | LiveObjectIterator Heap::primaryHeapBegin() |
1275 | { |
1276 | return LiveObjectIterator(m_heap, 0); |
1277 | } |
1278 | |
1279 | LiveObjectIterator Heap::primaryHeapEnd() |
1280 | { |
1281 | return LiveObjectIterator(m_heap, m_heap.usedBlocks); |
1282 | } |
1283 | |
1284 | } // namespace JSC |
1285 | |