|
@ -41,6 +41,7 @@ namespace internal { |
|
|
&& (info).top <= (space).high() \ |
|
|
&& (info).top <= (space).high() \ |
|
|
&& (info).limit == (space).high()) |
|
|
&& (info).limit == (space).high()) |
|
|
|
|
|
|
|
|
|
|
|
intptr_t Page::watermark_invalidated_mark_ = Page::WATERMARK_INVALIDATED; |
|
|
|
|
|
|
|
|
// ----------------------------------------------------------------------------
|
|
|
// ----------------------------------------------------------------------------
|
|
|
// HeapObjectIterator
|
|
|
// HeapObjectIterator
|
|
@ -138,13 +139,6 @@ PageIterator::PageIterator(PagedSpace* space, Mode mode) : space_(space) { |
|
|
} |
|
|
} |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
// -----------------------------------------------------------------------------
|
|
|
|
|
|
// Page
|
|
|
|
|
|
|
|
|
|
|
|
#ifdef DEBUG |
|
|
|
|
|
Page::RSetState Page::rset_state_ = Page::IN_USE; |
|
|
|
|
|
#endif |
|
|
|
|
|
|
|
|
|
|
|
// -----------------------------------------------------------------------------
|
|
|
// -----------------------------------------------------------------------------
|
|
|
// CodeRange
|
|
|
// CodeRange
|
|
|
|
|
|
|
|
@ -524,7 +518,10 @@ Page* MemoryAllocator::InitializePagesInChunk(int chunk_id, int pages_in_chunk, |
|
|
for (int i = 0; i < pages_in_chunk; i++) { |
|
|
for (int i = 0; i < pages_in_chunk; i++) { |
|
|
Page* p = Page::FromAddress(page_addr); |
|
|
Page* p = Page::FromAddress(page_addr); |
|
|
p->opaque_header = OffsetFrom(page_addr + Page::kPageSize) | chunk_id; |
|
|
p->opaque_header = OffsetFrom(page_addr + Page::kPageSize) | chunk_id; |
|
|
|
|
|
p->InvalidateWatermark(true); |
|
|
p->SetIsLargeObjectPage(false); |
|
|
p->SetIsLargeObjectPage(false); |
|
|
|
|
|
p->SetAllocationWatermark(p->ObjectAreaStart()); |
|
|
|
|
|
p->SetCachedAllocationWatermark(p->ObjectAreaStart()); |
|
|
page_addr += Page::kPageSize; |
|
|
page_addr += Page::kPageSize; |
|
|
} |
|
|
} |
|
|
|
|
|
|
|
@ -681,6 +678,7 @@ Page* MemoryAllocator::RelinkPagesInChunk(int chunk_id, |
|
|
p->opaque_header = OffsetFrom(page_addr + Page::kPageSize) | chunk_id; |
|
|
p->opaque_header = OffsetFrom(page_addr + Page::kPageSize) | chunk_id; |
|
|
page_addr += Page::kPageSize; |
|
|
page_addr += Page::kPageSize; |
|
|
|
|
|
|
|
|
|
|
|
p->InvalidateWatermark(true); |
|
|
if (p->WasInUseBeforeMC()) { |
|
|
if (p->WasInUseBeforeMC()) { |
|
|
*last_page_in_use = p; |
|
|
*last_page_in_use = p; |
|
|
} |
|
|
} |
|
@ -744,10 +742,10 @@ bool PagedSpace::Setup(Address start, size_t size) { |
|
|
accounting_stats_.ExpandSpace(num_pages * Page::kObjectAreaSize); |
|
|
accounting_stats_.ExpandSpace(num_pages * Page::kObjectAreaSize); |
|
|
ASSERT(Capacity() <= max_capacity_); |
|
|
ASSERT(Capacity() <= max_capacity_); |
|
|
|
|
|
|
|
|
// Sequentially initialize remembered sets in the newly allocated
|
|
|
// Sequentially clear region marks in the newly allocated
|
|
|
// pages and cache the current last page in the space.
|
|
|
// pages and cache the current last page in the space.
|
|
|
for (Page* p = first_page_; p->is_valid(); p = p->next_page()) { |
|
|
for (Page* p = first_page_; p->is_valid(); p = p->next_page()) { |
|
|
p->ClearRSet(); |
|
|
p->SetRegionMarks(Page::kAllRegionsCleanMarks); |
|
|
last_page_ = p; |
|
|
last_page_ = p; |
|
|
} |
|
|
} |
|
|
|
|
|
|
|
@ -794,10 +792,10 @@ void PagedSpace::Unprotect() { |
|
|
#endif |
|
|
#endif |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
void PagedSpace::ClearRSet() { |
|
|
void PagedSpace::MarkAllPagesClean() { |
|
|
PageIterator it(this, PageIterator::ALL_PAGES); |
|
|
PageIterator it(this, PageIterator::ALL_PAGES); |
|
|
while (it.has_next()) { |
|
|
while (it.has_next()) { |
|
|
it.next()->ClearRSet(); |
|
|
it.next()->SetRegionMarks(Page::kAllRegionsCleanMarks); |
|
|
} |
|
|
} |
|
|
} |
|
|
} |
|
|
|
|
|
|
|
@ -900,7 +898,8 @@ HeapObject* PagedSpace::SlowMCAllocateRaw(int size_in_bytes) { |
|
|
// of forwarding addresses is as an offset in terms of live bytes, so we
|
|
|
// of forwarding addresses is as an offset in terms of live bytes, so we
|
|
|
// need quick access to the allocation top of each page to decode
|
|
|
// need quick access to the allocation top of each page to decode
|
|
|
// forwarding addresses.
|
|
|
// forwarding addresses.
|
|
|
current_page->mc_relocation_top = mc_forwarding_info_.top; |
|
|
current_page->SetAllocationWatermark(mc_forwarding_info_.top); |
|
|
|
|
|
current_page->next_page()->InvalidateWatermark(true); |
|
|
SetAllocationInfo(&mc_forwarding_info_, current_page->next_page()); |
|
|
SetAllocationInfo(&mc_forwarding_info_, current_page->next_page()); |
|
|
return AllocateLinearly(&mc_forwarding_info_, size_in_bytes); |
|
|
return AllocateLinearly(&mc_forwarding_info_, size_in_bytes); |
|
|
} |
|
|
} |
|
@ -928,10 +927,10 @@ bool PagedSpace::Expand(Page* last_page) { |
|
|
|
|
|
|
|
|
MemoryAllocator::SetNextPage(last_page, p); |
|
|
MemoryAllocator::SetNextPage(last_page, p); |
|
|
|
|
|
|
|
|
// Sequentially clear remembered set of new pages and and cache the
|
|
|
// Sequentially clear region marks of new pages and and cache the
|
|
|
// new last page in the space.
|
|
|
// new last page in the space.
|
|
|
while (p->is_valid()) { |
|
|
while (p->is_valid()) { |
|
|
p->ClearRSet(); |
|
|
p->SetRegionMarks(Page::kAllRegionsCleanMarks); |
|
|
last_page_ = p; |
|
|
last_page_ = p; |
|
|
p = p->next_page(); |
|
|
p = p->next_page(); |
|
|
} |
|
|
} |
|
@ -1030,16 +1029,11 @@ void PagedSpace::Verify(ObjectVisitor* visitor) { |
|
|
if (above_allocation_top) { |
|
|
if (above_allocation_top) { |
|
|
// We don't care what's above the allocation top.
|
|
|
// We don't care what's above the allocation top.
|
|
|
} else { |
|
|
} else { |
|
|
// Unless this is the last page in the space containing allocated
|
|
|
|
|
|
// objects, the allocation top should be at a constant offset from the
|
|
|
|
|
|
// object area end.
|
|
|
|
|
|
Address top = current_page->AllocationTop(); |
|
|
Address top = current_page->AllocationTop(); |
|
|
if (current_page == top_page) { |
|
|
if (current_page == top_page) { |
|
|
ASSERT(top == allocation_info_.top); |
|
|
ASSERT(top == allocation_info_.top); |
|
|
// The next page will be above the allocation top.
|
|
|
// The next page will be above the allocation top.
|
|
|
above_allocation_top = true; |
|
|
above_allocation_top = true; |
|
|
} else { |
|
|
|
|
|
ASSERT(top == PageAllocationLimit(current_page)); |
|
|
|
|
|
} |
|
|
} |
|
|
|
|
|
|
|
|
// It should be packed with objects from the bottom to the top.
|
|
|
// It should be packed with objects from the bottom to the top.
|
|
@ -1060,8 +1054,8 @@ void PagedSpace::Verify(ObjectVisitor* visitor) { |
|
|
object->Verify(); |
|
|
object->Verify(); |
|
|
|
|
|
|
|
|
// All the interior pointers should be contained in the heap and
|
|
|
// All the interior pointers should be contained in the heap and
|
|
|
// have their remembered set bits set if required as determined
|
|
|
// have page regions covering intergenerational references should be
|
|
|
// by the visitor.
|
|
|
// marked dirty.
|
|
|
int size = object->Size(); |
|
|
int size = object->Size(); |
|
|
object->IterateBody(map->instance_type(), size, visitor); |
|
|
object->IterateBody(map->instance_type(), size, visitor); |
|
|
|
|
|
|
|
@ -1120,7 +1114,7 @@ bool NewSpace::Setup(Address start, int size) { |
|
|
|
|
|
|
|
|
start_ = start; |
|
|
start_ = start; |
|
|
address_mask_ = ~(size - 1); |
|
|
address_mask_ = ~(size - 1); |
|
|
object_mask_ = address_mask_ | kHeapObjectTag; |
|
|
object_mask_ = address_mask_ | kHeapObjectTagMask; |
|
|
object_expected_ = reinterpret_cast<uintptr_t>(start) | kHeapObjectTag; |
|
|
object_expected_ = reinterpret_cast<uintptr_t>(start) | kHeapObjectTag; |
|
|
|
|
|
|
|
|
allocation_info_.top = to_space_.low(); |
|
|
allocation_info_.top = to_space_.low(); |
|
@ -1324,7 +1318,7 @@ bool SemiSpace::Setup(Address start, |
|
|
|
|
|
|
|
|
start_ = start; |
|
|
start_ = start; |
|
|
address_mask_ = ~(maximum_capacity - 1); |
|
|
address_mask_ = ~(maximum_capacity - 1); |
|
|
object_mask_ = address_mask_ | kHeapObjectTag; |
|
|
object_mask_ = address_mask_ | kHeapObjectTagMask; |
|
|
object_expected_ = reinterpret_cast<uintptr_t>(start) | kHeapObjectTag; |
|
|
object_expected_ = reinterpret_cast<uintptr_t>(start) | kHeapObjectTag; |
|
|
age_mark_ = start_; |
|
|
age_mark_ = start_; |
|
|
|
|
|
|
|
@ -1634,7 +1628,7 @@ void FreeListNode::set_size(int size_in_bytes) { |
|
|
// If the block is too small (eg, one or two words), to hold both a size
|
|
|
// If the block is too small (eg, one or two words), to hold both a size
|
|
|
// field and a next pointer, we give it a filler map that gives it the
|
|
|
// field and a next pointer, we give it a filler map that gives it the
|
|
|
// correct size.
|
|
|
// correct size.
|
|
|
if (size_in_bytes > ByteArray::kAlignedSize) { |
|
|
if (size_in_bytes > ByteArray::kHeaderSize) { |
|
|
set_map(Heap::raw_unchecked_byte_array_map()); |
|
|
set_map(Heap::raw_unchecked_byte_array_map()); |
|
|
// Can't use ByteArray::cast because it fails during deserialization.
|
|
|
// Can't use ByteArray::cast because it fails during deserialization.
|
|
|
ByteArray* this_as_byte_array = reinterpret_cast<ByteArray*>(this); |
|
|
ByteArray* this_as_byte_array = reinterpret_cast<ByteArray*>(this); |
|
@ -1831,7 +1825,7 @@ FixedSizeFreeList::FixedSizeFreeList(AllocationSpace owner, int object_size) |
|
|
|
|
|
|
|
|
void FixedSizeFreeList::Reset() { |
|
|
void FixedSizeFreeList::Reset() { |
|
|
available_ = 0; |
|
|
available_ = 0; |
|
|
head_ = NULL; |
|
|
head_ = tail_ = NULL; |
|
|
} |
|
|
} |
|
|
|
|
|
|
|
|
|
|
|
|
|
@ -1843,8 +1837,13 @@ void FixedSizeFreeList::Free(Address start) { |
|
|
ASSERT(!MarkCompactCollector::IsCompacting()); |
|
|
ASSERT(!MarkCompactCollector::IsCompacting()); |
|
|
FreeListNode* node = FreeListNode::FromAddress(start); |
|
|
FreeListNode* node = FreeListNode::FromAddress(start); |
|
|
node->set_size(object_size_); |
|
|
node->set_size(object_size_); |
|
|
node->set_next(head_); |
|
|
node->set_next(NULL); |
|
|
head_ = node->address(); |
|
|
if (head_ == NULL) { |
|
|
|
|
|
tail_ = head_ = node->address(); |
|
|
|
|
|
} else { |
|
|
|
|
|
FreeListNode::FromAddress(tail_)->set_next(node->address()); |
|
|
|
|
|
tail_ = node->address(); |
|
|
|
|
|
} |
|
|
available_ += object_size_; |
|
|
available_ += object_size_; |
|
|
} |
|
|
} |
|
|
|
|
|
|
|
@ -1907,15 +1906,14 @@ void OldSpace::MCCommitRelocationInfo() { |
|
|
Page* p = it.next(); |
|
|
Page* p = it.next(); |
|
|
// Space below the relocation pointer is allocated.
|
|
|
// Space below the relocation pointer is allocated.
|
|
|
computed_size += |
|
|
computed_size += |
|
|
static_cast<int>(p->mc_relocation_top - p->ObjectAreaStart()); |
|
|
static_cast<int>(p->AllocationWatermark() - p->ObjectAreaStart()); |
|
|
if (it.has_next()) { |
|
|
if (it.has_next()) { |
|
|
// Free the space at the top of the page. We cannot use
|
|
|
// Free the space at the top of the page.
|
|
|
// p->mc_relocation_top after the call to Free (because Free will clear
|
|
|
|
|
|
// remembered set bits).
|
|
|
|
|
|
int extra_size = |
|
|
int extra_size = |
|
|
static_cast<int>(p->ObjectAreaEnd() - p->mc_relocation_top); |
|
|
static_cast<int>(p->ObjectAreaEnd() - p->AllocationWatermark()); |
|
|
if (extra_size > 0) { |
|
|
if (extra_size > 0) { |
|
|
int wasted_bytes = free_list_.Free(p->mc_relocation_top, extra_size); |
|
|
int wasted_bytes = free_list_.Free(p->AllocationWatermark(), |
|
|
|
|
|
extra_size); |
|
|
// The bytes we have just "freed" to add to the free list were
|
|
|
// The bytes we have just "freed" to add to the free list were
|
|
|
// already accounted as available.
|
|
|
// already accounted as available.
|
|
|
accounting_stats_.WasteBytes(wasted_bytes); |
|
|
accounting_stats_.WasteBytes(wasted_bytes); |
|
@ -1963,7 +1961,10 @@ void PagedSpace::FreePages(Page* prev, Page* last) { |
|
|
|
|
|
|
|
|
// Clean them up.
|
|
|
// Clean them up.
|
|
|
do { |
|
|
do { |
|
|
first->ClearRSet(); |
|
|
first->InvalidateWatermark(true); |
|
|
|
|
|
first->SetAllocationWatermark(first->ObjectAreaStart()); |
|
|
|
|
|
first->SetCachedAllocationWatermark(first->ObjectAreaStart()); |
|
|
|
|
|
first->SetRegionMarks(Page::kAllRegionsCleanMarks); |
|
|
first = first->next_page(); |
|
|
first = first->next_page(); |
|
|
} while (first != NULL); |
|
|
} while (first != NULL); |
|
|
|
|
|
|
|
@ -2003,6 +2004,7 @@ void PagedSpace::PrepareForMarkCompact(bool will_compact) { |
|
|
// Current allocation top points to a page which is now in the middle
|
|
|
// Current allocation top points to a page which is now in the middle
|
|
|
// of page list. We should move allocation top forward to the new last
|
|
|
// of page list. We should move allocation top forward to the new last
|
|
|
// used page so various object iterators will continue to work properly.
|
|
|
// used page so various object iterators will continue to work properly.
|
|
|
|
|
|
last_in_use->SetAllocationWatermark(last_in_use->AllocationTop()); |
|
|
|
|
|
|
|
|
int size_in_bytes = static_cast<int>(PageAllocationLimit(last_in_use) - |
|
|
int size_in_bytes = static_cast<int>(PageAllocationLimit(last_in_use) - |
|
|
last_in_use->AllocationTop()); |
|
|
last_in_use->AllocationTop()); |
|
@ -2035,6 +2037,7 @@ void PagedSpace::PrepareForMarkCompact(bool will_compact) { |
|
|
int size_in_bytes = static_cast<int>(PageAllocationLimit(p) - |
|
|
int size_in_bytes = static_cast<int>(PageAllocationLimit(p) - |
|
|
p->ObjectAreaStart()); |
|
|
p->ObjectAreaStart()); |
|
|
|
|
|
|
|
|
|
|
|
p->SetAllocationWatermark(p->ObjectAreaStart()); |
|
|
Heap::CreateFillerObjectAt(p->ObjectAreaStart(), size_in_bytes); |
|
|
Heap::CreateFillerObjectAt(p->ObjectAreaStart(), size_in_bytes); |
|
|
} |
|
|
} |
|
|
} |
|
|
} |
|
@ -2066,6 +2069,7 @@ bool PagedSpace::ReserveSpace(int bytes) { |
|
|
if (!reserved_page->is_valid()) return false; |
|
|
if (!reserved_page->is_valid()) return false; |
|
|
} |
|
|
} |
|
|
ASSERT(TopPageOf(allocation_info_)->next_page()->is_valid()); |
|
|
ASSERT(TopPageOf(allocation_info_)->next_page()->is_valid()); |
|
|
|
|
|
TopPageOf(allocation_info_)->next_page()->InvalidateWatermark(true); |
|
|
SetAllocationInfo(&allocation_info_, |
|
|
SetAllocationInfo(&allocation_info_, |
|
|
TopPageOf(allocation_info_)->next_page()); |
|
|
TopPageOf(allocation_info_)->next_page()); |
|
|
return true; |
|
|
return true; |
|
@ -2100,7 +2104,20 @@ HeapObject* OldSpace::SlowAllocateRaw(int size_in_bytes) { |
|
|
accounting_stats_.WasteBytes(wasted_bytes); |
|
|
accounting_stats_.WasteBytes(wasted_bytes); |
|
|
if (!result->IsFailure()) { |
|
|
if (!result->IsFailure()) { |
|
|
accounting_stats_.AllocateBytes(size_in_bytes); |
|
|
accounting_stats_.AllocateBytes(size_in_bytes); |
|
|
return HeapObject::cast(result); |
|
|
|
|
|
|
|
|
HeapObject* obj = HeapObject::cast(result); |
|
|
|
|
|
Page* p = Page::FromAddress(obj->address()); |
|
|
|
|
|
|
|
|
|
|
|
if (obj->address() >= p->AllocationWatermark()) { |
|
|
|
|
|
// There should be no hole between the allocation watermark
|
|
|
|
|
|
// and allocated object address.
|
|
|
|
|
|
// Memory above the allocation watermark was not swept and
|
|
|
|
|
|
// might contain garbage pointers to new space.
|
|
|
|
|
|
ASSERT(obj->address() == p->AllocationWatermark()); |
|
|
|
|
|
p->SetAllocationWatermark(obj->address() + size_in_bytes); |
|
|
|
|
|
} |
|
|
|
|
|
|
|
|
|
|
|
return obj; |
|
|
} |
|
|
} |
|
|
} |
|
|
} |
|
|
|
|
|
|
|
@ -2123,6 +2140,7 @@ HeapObject* OldSpace::SlowAllocateRaw(int size_in_bytes) { |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
void OldSpace::PutRestOfCurrentPageOnFreeList(Page* current_page) { |
|
|
void OldSpace::PutRestOfCurrentPageOnFreeList(Page* current_page) { |
|
|
|
|
|
current_page->SetAllocationWatermark(allocation_info_.top); |
|
|
int free_size = |
|
|
int free_size = |
|
|
static_cast<int>(current_page->ObjectAreaEnd() - allocation_info_.top); |
|
|
static_cast<int>(current_page->ObjectAreaEnd() - allocation_info_.top); |
|
|
if (free_size > 0) { |
|
|
if (free_size > 0) { |
|
@ -2133,6 +2151,7 @@ void OldSpace::PutRestOfCurrentPageOnFreeList(Page* current_page) { |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
void FixedSpace::PutRestOfCurrentPageOnFreeList(Page* current_page) { |
|
|
void FixedSpace::PutRestOfCurrentPageOnFreeList(Page* current_page) { |
|
|
|
|
|
current_page->SetAllocationWatermark(allocation_info_.top); |
|
|
int free_size = |
|
|
int free_size = |
|
|
static_cast<int>(current_page->ObjectAreaEnd() - allocation_info_.top); |
|
|
static_cast<int>(current_page->ObjectAreaEnd() - allocation_info_.top); |
|
|
// In the fixed space free list all the free list items have the right size.
|
|
|
// In the fixed space free list all the free list items have the right size.
|
|
@ -2152,8 +2171,10 @@ void FixedSpace::PutRestOfCurrentPageOnFreeList(Page* current_page) { |
|
|
HeapObject* OldSpace::AllocateInNextPage(Page* current_page, |
|
|
HeapObject* OldSpace::AllocateInNextPage(Page* current_page, |
|
|
int size_in_bytes) { |
|
|
int size_in_bytes) { |
|
|
ASSERT(current_page->next_page()->is_valid()); |
|
|
ASSERT(current_page->next_page()->is_valid()); |
|
|
|
|
|
Page* next_page = current_page->next_page(); |
|
|
|
|
|
next_page->ClearGCFields(); |
|
|
PutRestOfCurrentPageOnFreeList(current_page); |
|
|
PutRestOfCurrentPageOnFreeList(current_page); |
|
|
SetAllocationInfo(&allocation_info_, current_page->next_page()); |
|
|
SetAllocationInfo(&allocation_info_, next_page); |
|
|
return AllocateLinearly(&allocation_info_, size_in_bytes); |
|
|
return AllocateLinearly(&allocation_info_, size_in_bytes); |
|
|
} |
|
|
} |
|
|
|
|
|
|
|
@ -2296,160 +2317,12 @@ void OldSpace::ReportStatistics() { |
|
|
PrintF(" capacity: %d, waste: %d, available: %d, %%%d\n", |
|
|
PrintF(" capacity: %d, waste: %d, available: %d, %%%d\n", |
|
|
Capacity(), Waste(), Available(), pct); |
|
|
Capacity(), Waste(), Available(), pct); |
|
|
|
|
|
|
|
|
// Report remembered set statistics.
|
|
|
|
|
|
int rset_marked_pointers = 0; |
|
|
|
|
|
int rset_marked_arrays = 0; |
|
|
|
|
|
int rset_marked_array_elements = 0; |
|
|
|
|
|
int cross_gen_pointers = 0; |
|
|
|
|
|
int cross_gen_array_elements = 0; |
|
|
|
|
|
|
|
|
|
|
|
PageIterator page_it(this, PageIterator::PAGES_IN_USE); |
|
|
|
|
|
while (page_it.has_next()) { |
|
|
|
|
|
Page* p = page_it.next(); |
|
|
|
|
|
|
|
|
|
|
|
for (Address rset_addr = p->RSetStart(); |
|
|
|
|
|
rset_addr < p->RSetEnd(); |
|
|
|
|
|
rset_addr += kIntSize) { |
|
|
|
|
|
int rset = Memory::int_at(rset_addr); |
|
|
|
|
|
if (rset != 0) { |
|
|
|
|
|
// Bits were set
|
|
|
|
|
|
int intoff = |
|
|
|
|
|
static_cast<int>(rset_addr - p->address() - Page::kRSetOffset); |
|
|
|
|
|
int bitoff = 0; |
|
|
|
|
|
for (; bitoff < kBitsPerInt; ++bitoff) { |
|
|
|
|
|
if ((rset & (1 << bitoff)) != 0) { |
|
|
|
|
|
int bitpos = intoff*kBitsPerByte + bitoff; |
|
|
|
|
|
Address slot = p->OffsetToAddress(bitpos << kObjectAlignmentBits); |
|
|
|
|
|
Object** obj = reinterpret_cast<Object**>(slot); |
|
|
|
|
|
if (*obj == Heap::raw_unchecked_fixed_array_map()) { |
|
|
|
|
|
rset_marked_arrays++; |
|
|
|
|
|
FixedArray* fa = FixedArray::cast(HeapObject::FromAddress(slot)); |
|
|
|
|
|
|
|
|
|
|
|
rset_marked_array_elements += fa->length(); |
|
|
|
|
|
// Manually inline FixedArray::IterateBody
|
|
|
|
|
|
Address elm_start = slot + FixedArray::kHeaderSize; |
|
|
|
|
|
Address elm_stop = elm_start + fa->length() * kPointerSize; |
|
|
|
|
|
for (Address elm_addr = elm_start; |
|
|
|
|
|
elm_addr < elm_stop; elm_addr += kPointerSize) { |
|
|
|
|
|
// Filter non-heap-object pointers
|
|
|
|
|
|
Object** elm_p = reinterpret_cast<Object**>(elm_addr); |
|
|
|
|
|
if (Heap::InNewSpace(*elm_p)) |
|
|
|
|
|
cross_gen_array_elements++; |
|
|
|
|
|
} |
|
|
|
|
|
} else { |
|
|
|
|
|
rset_marked_pointers++; |
|
|
|
|
|
if (Heap::InNewSpace(*obj)) |
|
|
|
|
|
cross_gen_pointers++; |
|
|
|
|
|
} |
|
|
|
|
|
} |
|
|
|
|
|
} |
|
|
|
|
|
} |
|
|
|
|
|
} |
|
|
|
|
|
} |
|
|
|
|
|
|
|
|
|
|
|
pct = rset_marked_pointers == 0 ? |
|
|
|
|
|
0 : cross_gen_pointers * 100 / rset_marked_pointers; |
|
|
|
|
|
PrintF(" rset-marked pointers %d, to-new-space %d (%%%d)\n", |
|
|
|
|
|
rset_marked_pointers, cross_gen_pointers, pct); |
|
|
|
|
|
PrintF(" rset_marked arrays %d, ", rset_marked_arrays); |
|
|
|
|
|
PrintF(" elements %d, ", rset_marked_array_elements); |
|
|
|
|
|
pct = rset_marked_array_elements == 0 ? 0 |
|
|
|
|
|
: cross_gen_array_elements * 100 / rset_marked_array_elements; |
|
|
|
|
|
PrintF(" pointers to new space %d (%%%d)\n", cross_gen_array_elements, pct); |
|
|
|
|
|
PrintF(" total rset-marked bits %d\n", |
|
|
|
|
|
(rset_marked_pointers + rset_marked_arrays)); |
|
|
|
|
|
pct = (rset_marked_pointers + rset_marked_array_elements) == 0 ? 0 |
|
|
|
|
|
: (cross_gen_pointers + cross_gen_array_elements) * 100 / |
|
|
|
|
|
(rset_marked_pointers + rset_marked_array_elements); |
|
|
|
|
|
PrintF(" total rset pointers %d, true cross generation ones %d (%%%d)\n", |
|
|
|
|
|
(rset_marked_pointers + rset_marked_array_elements), |
|
|
|
|
|
(cross_gen_pointers + cross_gen_array_elements), |
|
|
|
|
|
pct); |
|
|
|
|
|
|
|
|
|
|
|
ClearHistograms(); |
|
|
ClearHistograms(); |
|
|
HeapObjectIterator obj_it(this); |
|
|
HeapObjectIterator obj_it(this); |
|
|
for (HeapObject* obj = obj_it.next(); obj != NULL; obj = obj_it.next()) |
|
|
for (HeapObject* obj = obj_it.next(); obj != NULL; obj = obj_it.next()) |
|
|
CollectHistogramInfo(obj); |
|
|
CollectHistogramInfo(obj); |
|
|
ReportHistogram(true); |
|
|
ReportHistogram(true); |
|
|
} |
|
|
} |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
// Dump the range of remembered set words between [start, end) corresponding
|
|
|
|
|
|
// to the pointers starting at object_p. The allocation_top is an object
|
|
|
|
|
|
// pointer which should not be read past. This is important for large object
|
|
|
|
|
|
// pages, where some bits in the remembered set range do not correspond to
|
|
|
|
|
|
// allocated addresses.
|
|
|
|
|
|
static void PrintRSetRange(Address start, Address end, Object** object_p, |
|
|
|
|
|
Address allocation_top) { |
|
|
|
|
|
Address rset_address = start; |
|
|
|
|
|
|
|
|
|
|
|
// If the range starts on on odd numbered word (eg, for large object extra
|
|
|
|
|
|
// remembered set ranges), print some spaces.
|
|
|
|
|
|
if ((reinterpret_cast<uintptr_t>(start) / kIntSize) % 2 == 1) { |
|
|
|
|
|
PrintF(" "); |
|
|
|
|
|
} |
|
|
|
|
|
|
|
|
|
|
|
// Loop over all the words in the range.
|
|
|
|
|
|
while (rset_address < end) { |
|
|
|
|
|
uint32_t rset_word = Memory::uint32_at(rset_address); |
|
|
|
|
|
int bit_position = 0; |
|
|
|
|
|
|
|
|
|
|
|
// Loop over all the bits in the word.
|
|
|
|
|
|
while (bit_position < kBitsPerInt) { |
|
|
|
|
|
if (object_p == reinterpret_cast<Object**>(allocation_top)) { |
|
|
|
|
|
// Print a bar at the allocation pointer.
|
|
|
|
|
|
PrintF("|"); |
|
|
|
|
|
} else if (object_p > reinterpret_cast<Object**>(allocation_top)) { |
|
|
|
|
|
// Do not dereference object_p past the allocation pointer.
|
|
|
|
|
|
PrintF("#"); |
|
|
|
|
|
} else if ((rset_word & (1 << bit_position)) == 0) { |
|
|
|
|
|
// Print a dot for zero bits.
|
|
|
|
|
|
PrintF("."); |
|
|
|
|
|
} else if (Heap::InNewSpace(*object_p)) { |
|
|
|
|
|
// Print an X for one bits for pointers to new space.
|
|
|
|
|
|
PrintF("X"); |
|
|
|
|
|
} else { |
|
|
|
|
|
// Print a circle for one bits for pointers to old space.
|
|
|
|
|
|
PrintF("o"); |
|
|
|
|
|
} |
|
|
|
|
|
|
|
|
|
|
|
// Print a space after every 8th bit except the last.
|
|
|
|
|
|
if (bit_position % 8 == 7 && bit_position != (kBitsPerInt - 1)) { |
|
|
|
|
|
PrintF(" "); |
|
|
|
|
|
} |
|
|
|
|
|
|
|
|
|
|
|
// Advance to next bit.
|
|
|
|
|
|
bit_position++; |
|
|
|
|
|
object_p++; |
|
|
|
|
|
} |
|
|
|
|
|
|
|
|
|
|
|
// Print a newline after every odd numbered word, otherwise a space.
|
|
|
|
|
|
if ((reinterpret_cast<uintptr_t>(rset_address) / kIntSize) % 2 == 1) { |
|
|
|
|
|
PrintF("\n"); |
|
|
|
|
|
} else { |
|
|
|
|
|
PrintF(" "); |
|
|
|
|
|
} |
|
|
|
|
|
|
|
|
|
|
|
// Advance to next remembered set word.
|
|
|
|
|
|
rset_address += kIntSize; |
|
|
|
|
|
} |
|
|
|
|
|
} |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
void PagedSpace::DoPrintRSet(const char* space_name) { |
|
|
|
|
|
PageIterator it(this, PageIterator::PAGES_IN_USE); |
|
|
|
|
|
while (it.has_next()) { |
|
|
|
|
|
Page* p = it.next(); |
|
|
|
|
|
PrintF("%s page 0x%x:\n", space_name, p); |
|
|
|
|
|
PrintRSetRange(p->RSetStart(), p->RSetEnd(), |
|
|
|
|
|
reinterpret_cast<Object**>(p->ObjectAreaStart()), |
|
|
|
|
|
p->AllocationTop()); |
|
|
|
|
|
PrintF("\n"); |
|
|
|
|
|
} |
|
|
|
|
|
} |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
void OldSpace::PrintRSet() { DoPrintRSet("old"); } |
|
|
|
|
|
#endif |
|
|
#endif |
|
|
|
|
|
|
|
|
// -----------------------------------------------------------------------------
|
|
|
// -----------------------------------------------------------------------------
|
|
@ -2499,6 +2372,7 @@ void FixedSpace::MCCommitRelocationInfo() { |
|
|
if (it.has_next()) { |
|
|
if (it.has_next()) { |
|
|
accounting_stats_.WasteBytes( |
|
|
accounting_stats_.WasteBytes( |
|
|
static_cast<int>(page->ObjectAreaEnd() - page_top)); |
|
|
static_cast<int>(page->ObjectAreaEnd() - page_top)); |
|
|
|
|
|
page->SetAllocationWatermark(page_top); |
|
|
} |
|
|
} |
|
|
} |
|
|
} |
|
|
|
|
|
|
|
@ -2528,7 +2402,19 @@ HeapObject* FixedSpace::SlowAllocateRaw(int size_in_bytes) { |
|
|
Object* result = free_list_.Allocate(); |
|
|
Object* result = free_list_.Allocate(); |
|
|
if (!result->IsFailure()) { |
|
|
if (!result->IsFailure()) { |
|
|
accounting_stats_.AllocateBytes(size_in_bytes); |
|
|
accounting_stats_.AllocateBytes(size_in_bytes); |
|
|
return HeapObject::cast(result); |
|
|
HeapObject* obj = HeapObject::cast(result); |
|
|
|
|
|
Page* p = Page::FromAddress(obj->address()); |
|
|
|
|
|
|
|
|
|
|
|
if (obj->address() >= p->AllocationWatermark()) { |
|
|
|
|
|
// There should be no hole between the allocation watermark
|
|
|
|
|
|
// and allocated object address.
|
|
|
|
|
|
// Memory above the allocation watermark was not swept and
|
|
|
|
|
|
// might contain garbage pointers to new space.
|
|
|
|
|
|
ASSERT(obj->address() == p->AllocationWatermark()); |
|
|
|
|
|
p->SetAllocationWatermark(obj->address() + size_in_bytes); |
|
|
|
|
|
} |
|
|
|
|
|
|
|
|
|
|
|
return obj; |
|
|
} |
|
|
} |
|
|
} |
|
|
} |
|
|
|
|
|
|
|
@ -2558,8 +2444,11 @@ HeapObject* FixedSpace::AllocateInNextPage(Page* current_page, |
|
|
ASSERT(current_page->next_page()->is_valid()); |
|
|
ASSERT(current_page->next_page()->is_valid()); |
|
|
ASSERT(allocation_info_.top == PageAllocationLimit(current_page)); |
|
|
ASSERT(allocation_info_.top == PageAllocationLimit(current_page)); |
|
|
ASSERT_EQ(object_size_in_bytes_, size_in_bytes); |
|
|
ASSERT_EQ(object_size_in_bytes_, size_in_bytes); |
|
|
|
|
|
Page* next_page = current_page->next_page(); |
|
|
|
|
|
next_page->ClearGCFields(); |
|
|
|
|
|
current_page->SetAllocationWatermark(allocation_info_.top); |
|
|
accounting_stats_.WasteBytes(page_extra_); |
|
|
accounting_stats_.WasteBytes(page_extra_); |
|
|
SetAllocationInfo(&allocation_info_, current_page->next_page()); |
|
|
SetAllocationInfo(&allocation_info_, next_page); |
|
|
return AllocateLinearly(&allocation_info_, size_in_bytes); |
|
|
return AllocateLinearly(&allocation_info_, size_in_bytes); |
|
|
} |
|
|
} |
|
|
|
|
|
|
|
@ -2570,51 +2459,12 @@ void FixedSpace::ReportStatistics() { |
|
|
PrintF(" capacity: %d, waste: %d, available: %d, %%%d\n", |
|
|
PrintF(" capacity: %d, waste: %d, available: %d, %%%d\n", |
|
|
Capacity(), Waste(), Available(), pct); |
|
|
Capacity(), Waste(), Available(), pct); |
|
|
|
|
|
|
|
|
// Report remembered set statistics.
|
|
|
|
|
|
int rset_marked_pointers = 0; |
|
|
|
|
|
int cross_gen_pointers = 0; |
|
|
|
|
|
|
|
|
|
|
|
PageIterator page_it(this, PageIterator::PAGES_IN_USE); |
|
|
|
|
|
while (page_it.has_next()) { |
|
|
|
|
|
Page* p = page_it.next(); |
|
|
|
|
|
|
|
|
|
|
|
for (Address rset_addr = p->RSetStart(); |
|
|
|
|
|
rset_addr < p->RSetEnd(); |
|
|
|
|
|
rset_addr += kIntSize) { |
|
|
|
|
|
int rset = Memory::int_at(rset_addr); |
|
|
|
|
|
if (rset != 0) { |
|
|
|
|
|
// Bits were set
|
|
|
|
|
|
int intoff = |
|
|
|
|
|
static_cast<int>(rset_addr - p->address() - Page::kRSetOffset); |
|
|
|
|
|
int bitoff = 0; |
|
|
|
|
|
for (; bitoff < kBitsPerInt; ++bitoff) { |
|
|
|
|
|
if ((rset & (1 << bitoff)) != 0) { |
|
|
|
|
|
int bitpos = intoff*kBitsPerByte + bitoff; |
|
|
|
|
|
Address slot = p->OffsetToAddress(bitpos << kObjectAlignmentBits); |
|
|
|
|
|
Object** obj = reinterpret_cast<Object**>(slot); |
|
|
|
|
|
rset_marked_pointers++; |
|
|
|
|
|
if (Heap::InNewSpace(*obj)) |
|
|
|
|
|
cross_gen_pointers++; |
|
|
|
|
|
} |
|
|
|
|
|
} |
|
|
|
|
|
} |
|
|
|
|
|
} |
|
|
|
|
|
} |
|
|
|
|
|
|
|
|
|
|
|
pct = rset_marked_pointers == 0 ? |
|
|
|
|
|
0 : cross_gen_pointers * 100 / rset_marked_pointers; |
|
|
|
|
|
PrintF(" rset-marked pointers %d, to-new-space %d (%%%d)\n", |
|
|
|
|
|
rset_marked_pointers, cross_gen_pointers, pct); |
|
|
|
|
|
|
|
|
|
|
|
ClearHistograms(); |
|
|
ClearHistograms(); |
|
|
HeapObjectIterator obj_it(this); |
|
|
HeapObjectIterator obj_it(this); |
|
|
for (HeapObject* obj = obj_it.next(); obj != NULL; obj = obj_it.next()) |
|
|
for (HeapObject* obj = obj_it.next(); obj != NULL; obj = obj_it.next()) |
|
|
CollectHistogramInfo(obj); |
|
|
CollectHistogramInfo(obj); |
|
|
ReportHistogram(false); |
|
|
ReportHistogram(false); |
|
|
} |
|
|
} |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
void FixedSpace::PrintRSet() { DoPrintRSet(name_); } |
|
|
|
|
|
#endif |
|
|
#endif |
|
|
|
|
|
|
|
|
|
|
|
|
|
@ -2793,8 +2643,7 @@ Object* LargeObjectSpace::AllocateRawInternal(int requested_size, |
|
|
chunk->set_size(chunk_size); |
|
|
chunk->set_size(chunk_size); |
|
|
first_chunk_ = chunk; |
|
|
first_chunk_ = chunk; |
|
|
|
|
|
|
|
|
// Set the object address and size in the page header and clear its
|
|
|
// Initialize page header.
|
|
|
// remembered set.
|
|
|
|
|
|
Page* page = Page::FromAddress(RoundUp(chunk->address(), Page::kPageSize)); |
|
|
Page* page = Page::FromAddress(RoundUp(chunk->address(), Page::kPageSize)); |
|
|
Address object_address = page->ObjectAreaStart(); |
|
|
Address object_address = page->ObjectAreaStart(); |
|
|
// Clear the low order bit of the second word in the page to flag it as a
|
|
|
// Clear the low order bit of the second word in the page to flag it as a
|
|
@ -2802,13 +2651,7 @@ Object* LargeObjectSpace::AllocateRawInternal(int requested_size, |
|
|
// low order bit should already be clear.
|
|
|
// low order bit should already be clear.
|
|
|
ASSERT((chunk_size & 0x1) == 0); |
|
|
ASSERT((chunk_size & 0x1) == 0); |
|
|
page->SetIsLargeObjectPage(true); |
|
|
page->SetIsLargeObjectPage(true); |
|
|
page->ClearRSet(); |
|
|
page->SetRegionMarks(Page::kAllRegionsCleanMarks); |
|
|
int extra_bytes = requested_size - object_size; |
|
|
|
|
|
if (extra_bytes > 0) { |
|
|
|
|
|
// The extra memory for the remembered set should be cleared.
|
|
|
|
|
|
memset(object_address + object_size, 0, extra_bytes); |
|
|
|
|
|
} |
|
|
|
|
|
|
|
|
|
|
|
return HeapObject::FromAddress(object_address); |
|
|
return HeapObject::FromAddress(object_address); |
|
|
} |
|
|
} |
|
|
|
|
|
|
|
@ -2823,8 +2666,7 @@ Object* LargeObjectSpace::AllocateRawCode(int size_in_bytes) { |
|
|
|
|
|
|
|
|
Object* LargeObjectSpace::AllocateRawFixedArray(int size_in_bytes) { |
|
|
Object* LargeObjectSpace::AllocateRawFixedArray(int size_in_bytes) { |
|
|
ASSERT(0 < size_in_bytes); |
|
|
ASSERT(0 < size_in_bytes); |
|
|
int extra_rset_bytes = ExtraRSetBytesFor(size_in_bytes); |
|
|
return AllocateRawInternal(size_in_bytes, |
|
|
return AllocateRawInternal(size_in_bytes + extra_rset_bytes, |
|
|
|
|
|
size_in_bytes, |
|
|
size_in_bytes, |
|
|
NOT_EXECUTABLE); |
|
|
NOT_EXECUTABLE); |
|
|
} |
|
|
} |
|
@ -2851,59 +2693,61 @@ Object* LargeObjectSpace::FindObject(Address a) { |
|
|
return Failure::Exception(); |
|
|
return Failure::Exception(); |
|
|
} |
|
|
} |
|
|
|
|
|
|
|
|
|
|
|
void LargeObjectSpace::IterateDirtyRegions(ObjectSlotCallback copy_object) { |
|
|
void LargeObjectSpace::ClearRSet() { |
|
|
|
|
|
ASSERT(Page::is_rset_in_use()); |
|
|
|
|
|
|
|
|
|
|
|
LargeObjectIterator it(this); |
|
|
|
|
|
for (HeapObject* object = it.next(); object != NULL; object = it.next()) { |
|
|
|
|
|
// We only have code, sequential strings, or fixed arrays in large
|
|
|
|
|
|
// object space, and only fixed arrays need remembered set support.
|
|
|
|
|
|
if (object->IsFixedArray()) { |
|
|
|
|
|
// Clear the normal remembered set region of the page;
|
|
|
|
|
|
Page* page = Page::FromAddress(object->address()); |
|
|
|
|
|
page->ClearRSet(); |
|
|
|
|
|
|
|
|
|
|
|
// Clear the extra remembered set.
|
|
|
|
|
|
int size = object->Size(); |
|
|
|
|
|
int extra_rset_bytes = ExtraRSetBytesFor(size); |
|
|
|
|
|
memset(object->address() + size, 0, extra_rset_bytes); |
|
|
|
|
|
} |
|
|
|
|
|
} |
|
|
|
|
|
} |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
void LargeObjectSpace::IterateRSet(ObjectSlotCallback copy_object_func) { |
|
|
|
|
|
ASSERT(Page::is_rset_in_use()); |
|
|
|
|
|
|
|
|
|
|
|
static void* lo_rset_histogram = StatsTable::CreateHistogram( |
|
|
|
|
|
"V8.RSetLO", |
|
|
|
|
|
0, |
|
|
|
|
|
// Keeping this histogram's buckets the same as the paged space histogram.
|
|
|
|
|
|
Page::kObjectAreaSize / kPointerSize, |
|
|
|
|
|
30); |
|
|
|
|
|
|
|
|
|
|
|
LargeObjectIterator it(this); |
|
|
LargeObjectIterator it(this); |
|
|
for (HeapObject* object = it.next(); object != NULL; object = it.next()) { |
|
|
for (HeapObject* object = it.next(); object != NULL; object = it.next()) { |
|
|
// We only have code, sequential strings, or fixed arrays in large
|
|
|
// We only have code, sequential strings, or fixed arrays in large
|
|
|
// object space, and only fixed arrays can possibly contain pointers to
|
|
|
// object space, and only fixed arrays can possibly contain pointers to
|
|
|
// the young generation.
|
|
|
// the young generation.
|
|
|
if (object->IsFixedArray()) { |
|
|
if (object->IsFixedArray()) { |
|
|
// Iterate the normal page remembered set range.
|
|
|
|
|
|
Page* page = Page::FromAddress(object->address()); |
|
|
Page* page = Page::FromAddress(object->address()); |
|
|
Address object_end = object->address() + object->Size(); |
|
|
uint32_t marks = page->GetRegionMarks(); |
|
|
int count = Heap::IterateRSetRange(page->ObjectAreaStart(), |
|
|
uint32_t newmarks = Page::kAllRegionsCleanMarks; |
|
|
Min(page->ObjectAreaEnd(), object_end), |
|
|
|
|
|
page->RSetStart(), |
|
|
if (marks != Page::kAllRegionsCleanMarks) { |
|
|
copy_object_func); |
|
|
// For a large page a single dirty mark corresponds to several
|
|
|
|
|
|
// regions (modulo 32). So we treat a large page as a sequence of
|
|
|
// Iterate the extra array elements.
|
|
|
// normal pages of size Page::kPageSize having same dirty marks
|
|
|
if (object_end > page->ObjectAreaEnd()) { |
|
|
// and subsequently iterate dirty regions on each of these pages.
|
|
|
count += Heap::IterateRSetRange(page->ObjectAreaEnd(), object_end, |
|
|
Address start = object->address(); |
|
|
object_end, copy_object_func); |
|
|
Address end = page->ObjectAreaEnd(); |
|
|
} |
|
|
Address object_end = start + object->Size(); |
|
|
if (lo_rset_histogram != NULL) { |
|
|
|
|
|
StatsTable::AddHistogramSample(lo_rset_histogram, count); |
|
|
// Iterate regions of the first normal page covering object.
|
|
|
|
|
|
uint32_t first_region_number = page->GetRegionNumberForAddress(start); |
|
|
|
|
|
newmarks |= |
|
|
|
|
|
Heap::IterateDirtyRegions(marks >> first_region_number, |
|
|
|
|
|
start, |
|
|
|
|
|
end, |
|
|
|
|
|
&Heap::IteratePointersInDirtyRegion, |
|
|
|
|
|
copy_object) << first_region_number; |
|
|
|
|
|
|
|
|
|
|
|
start = end; |
|
|
|
|
|
end = start + Page::kPageSize; |
|
|
|
|
|
while (end <= object_end) { |
|
|
|
|
|
// Iterate next 32 regions.
|
|
|
|
|
|
newmarks |= |
|
|
|
|
|
Heap::IterateDirtyRegions(marks, |
|
|
|
|
|
start, |
|
|
|
|
|
end, |
|
|
|
|
|
&Heap::IteratePointersInDirtyRegion, |
|
|
|
|
|
copy_object); |
|
|
|
|
|
start = end; |
|
|
|
|
|
end = start + Page::kPageSize; |
|
|
|
|
|
} |
|
|
|
|
|
|
|
|
|
|
|
if (start != object_end) { |
|
|
|
|
|
// Iterate the last piece of an object which is less than
|
|
|
|
|
|
// Page::kPageSize.
|
|
|
|
|
|
newmarks |= |
|
|
|
|
|
Heap::IterateDirtyRegions(marks, |
|
|
|
|
|
start, |
|
|
|
|
|
object_end, |
|
|
|
|
|
&Heap::IteratePointersInDirtyRegion, |
|
|
|
|
|
copy_object); |
|
|
|
|
|
} |
|
|
|
|
|
|
|
|
|
|
|
page->SetRegionMarks(newmarks); |
|
|
} |
|
|
} |
|
|
} |
|
|
} |
|
|
} |
|
|
} |
|
@ -2995,7 +2839,7 @@ void LargeObjectSpace::Verify() { |
|
|
} else if (object->IsFixedArray()) { |
|
|
} else if (object->IsFixedArray()) { |
|
|
// We loop over fixed arrays ourselves, rather then using the visitor,
|
|
|
// We loop over fixed arrays ourselves, rather then using the visitor,
|
|
|
// because the visitor doesn't support the start/offset iteration
|
|
|
// because the visitor doesn't support the start/offset iteration
|
|
|
// needed for IsRSetSet.
|
|
|
// needed for IsRegionDirty.
|
|
|
FixedArray* array = FixedArray::cast(object); |
|
|
FixedArray* array = FixedArray::cast(object); |
|
|
for (int j = 0; j < array->length(); j++) { |
|
|
for (int j = 0; j < array->length(); j++) { |
|
|
Object* element = array->get(j); |
|
|
Object* element = array->get(j); |
|
@ -3004,8 +2848,11 @@ void LargeObjectSpace::Verify() { |
|
|
ASSERT(Heap::Contains(element_object)); |
|
|
ASSERT(Heap::Contains(element_object)); |
|
|
ASSERT(element_object->map()->IsMap()); |
|
|
ASSERT(element_object->map()->IsMap()); |
|
|
if (Heap::InNewSpace(element_object)) { |
|
|
if (Heap::InNewSpace(element_object)) { |
|
|
ASSERT(Page::IsRSetSet(object->address(), |
|
|
Address array_addr = object->address(); |
|
|
FixedArray::kHeaderSize + j * kPointerSize)); |
|
|
Address element_addr = array_addr + FixedArray::kHeaderSize + |
|
|
|
|
|
j * kPointerSize; |
|
|
|
|
|
|
|
|
|
|
|
ASSERT(Page::FromAddress(array_addr)->IsRegionDirty(element_addr)); |
|
|
} |
|
|
} |
|
|
} |
|
|
} |
|
|
} |
|
|
} |
|
@ -3046,33 +2893,6 @@ void LargeObjectSpace::CollectCodeStatistics() { |
|
|
} |
|
|
} |
|
|
} |
|
|
} |
|
|
} |
|
|
} |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
void LargeObjectSpace::PrintRSet() { |
|
|
|
|
|
LargeObjectIterator it(this); |
|
|
|
|
|
for (HeapObject* object = it.next(); object != NULL; object = it.next()) { |
|
|
|
|
|
if (object->IsFixedArray()) { |
|
|
|
|
|
Page* page = Page::FromAddress(object->address()); |
|
|
|
|
|
|
|
|
|
|
|
Address allocation_top = object->address() + object->Size(); |
|
|
|
|
|
PrintF("large page 0x%x:\n", page); |
|
|
|
|
|
PrintRSetRange(page->RSetStart(), page->RSetEnd(), |
|
|
|
|
|
reinterpret_cast<Object**>(object->address()), |
|
|
|
|
|
allocation_top); |
|
|
|
|
|
int extra_array_bytes = object->Size() - Page::kObjectAreaSize; |
|
|
|
|
|
int extra_rset_bits = RoundUp(extra_array_bytes / kPointerSize, |
|
|
|
|
|
kBitsPerInt); |
|
|
|
|
|
PrintF("------------------------------------------------------------" |
|
|
|
|
|
"-----------\n"); |
|
|
|
|
|
PrintRSetRange(allocation_top, |
|
|
|
|
|
allocation_top + extra_rset_bits / kBitsPerByte, |
|
|
|
|
|
reinterpret_cast<Object**>(object->address() |
|
|
|
|
|
+ Page::kObjectAreaSize), |
|
|
|
|
|
allocation_top); |
|
|
|
|
|
PrintF("\n"); |
|
|
|
|
|
} |
|
|
|
|
|
} |
|
|
|
|
|
} |
|
|
|
|
|
#endif // DEBUG
|
|
|
#endif // DEBUG
|
|
|
|
|
|
|
|
|
} } // namespace v8::internal
|
|
|
} } // namespace v8::internal
|
|
|