Browse Source

v8: Upgrade to 3.11.10.14

v0.8.7-release
isaacs 13 years ago
committed by Bert Belder
parent
commit
5b5c8b6005
  1. 7
      deps/v8/build/common.gypi
  2. 22
      deps/v8/src/heap.cc
  3. 42
      deps/v8/src/mark-compact.cc
  4. 2
      deps/v8/src/version.cc
  5. 3
      deps/v8/test/cctest/test-alloc.cc
  6. 39
      deps/v8/test/cctest/test-heap.cc
  7. 0
      deps/v8/tools/merge-to-branch.sh

7
deps/v8/build/common.gypi

@ -239,6 +239,7 @@
'WIN32', 'WIN32',
], ],
'msvs_configuration_attributes': { 'msvs_configuration_attributes': {
'OutputDirectory': '<(DEPTH)\\build\\$(ConfigurationName)',
'IntermediateDirectory': '$(OutDir)\\obj\\$(ProjectName)', 'IntermediateDirectory': '$(OutDir)\\obj\\$(ProjectName)',
'CharacterSet': '1', 'CharacterSet': '1',
}, },
@ -270,7 +271,7 @@
'target_conditions': [ 'target_conditions': [
['_toolset=="host"', { ['_toolset=="host"', {
'variables': { 'variables': {
'm32flag': '<!((echo | $(echo ${CXX_host:-$(which g++)}) -m32 -E - > /dev/null 2>&1) && echo "-m32" || true)', 'm32flag': '<!((echo | $(echo ${CXX_host:-$(which g++)}) -m32 -E - > /dev/null 2>&1) && echo -n "-m32" || true)',
}, },
'cflags': [ '<(m32flag)' ], 'cflags': [ '<(m32flag)' ],
'ldflags': [ '<(m32flag)' ], 'ldflags': [ '<(m32flag)' ],
@ -280,7 +281,7 @@
}], }],
['_toolset=="target"', { ['_toolset=="target"', {
'variables': { 'variables': {
'm32flag': '<!((echo | $(echo ${CXX_target:-${CXX:-$(which g++)}}) -m32 -E - > /dev/null 2>&1) && echo "-m32" || true)', 'm32flag': '<!((echo | $(echo ${CXX_target:-${CXX:-$(which g++)}}) -m32 -E - > /dev/null 2>&1) && echo -n "-m32" || true)',
}, },
'cflags': [ '<(m32flag)' ], 'cflags': [ '<(m32flag)' ],
'ldflags': [ '<(m32flag)' ], 'ldflags': [ '<(m32flag)' ],
@ -323,7 +324,7 @@
}, },
'conditions': [ 'conditions': [
['OS=="linux" or OS=="freebsd" or OS=="openbsd" or OS=="netbsd"', { ['OS=="linux" or OS=="freebsd" or OS=="openbsd" or OS=="netbsd"', {
'cflags': [ '-Wno-unused-parameter', 'cflags': [ '-Wall', '<(werror)', '-W', '-Wno-unused-parameter',
'-Wnon-virtual-dtor', '-Woverloaded-virtual' ], '-Wnon-virtual-dtor', '-Woverloaded-virtual' ],
}], }],
], ],

22
deps/v8/src/heap.cc

@ -5013,7 +5013,11 @@ void Heap::AdvanceIdleIncrementalMarking(intptr_t step_size) {
bool Heap::IdleNotification(int hint) { bool Heap::IdleNotification(int hint) {
// Hints greater than this value indicate that
// the embedder is requesting a lot of GC work.
const int kMaxHint = 1000; const int kMaxHint = 1000;
// Minimal hint that allows to do full GC.
const int kMinHintForFullGC = 100;
intptr_t size_factor = Min(Max(hint, 20), kMaxHint) / 4; intptr_t size_factor = Min(Max(hint, 20), kMaxHint) / 4;
// The size factor is in range [5..250]. The numbers here are chosen from // The size factor is in range [5..250]. The numbers here are chosen from
// experiments. If you changes them, make sure to test with // experiments. If you changes them, make sure to test with
@ -5081,16 +5085,30 @@ bool Heap::IdleNotification(int hint) {
mark_sweeps_since_idle_round_started_ += new_mark_sweeps; mark_sweeps_since_idle_round_started_ += new_mark_sweeps;
ms_count_at_last_idle_notification_ = ms_count_; ms_count_at_last_idle_notification_ = ms_count_;
if (mark_sweeps_since_idle_round_started_ >= kMaxMarkSweepsInIdleRound) { int remaining_mark_sweeps = kMaxMarkSweepsInIdleRound -
mark_sweeps_since_idle_round_started_;
if (remaining_mark_sweeps <= 0) {
FinishIdleRound(); FinishIdleRound();
return true; return true;
} }
if (incremental_marking()->IsStopped()) { if (incremental_marking()->IsStopped()) {
// If there are no more than two GCs left in this idle round and we are
// allowed to do a full GC, then make those GCs full in order to compact
// the code space.
// TODO(ulan): Once we enable code compaction for incremental marking,
// we can get rid of this special case and always start incremental marking.
if (remaining_mark_sweeps <= 2 && hint >= kMinHintForFullGC) {
CollectAllGarbage(kReduceMemoryFootprintMask,
"idle notification: finalize idle round");
} else {
incremental_marking()->Start(); incremental_marking()->Start();
} }
}
if (!incremental_marking()->IsStopped()) {
AdvanceIdleIncrementalMarking(step_size); AdvanceIdleIncrementalMarking(step_size);
}
return false; return false;
} }

42
deps/v8/src/mark-compact.cc

@ -500,12 +500,10 @@ void MarkCompactCollector::CollectEvacuationCandidates(PagedSpace* space) {
space->identity() == OLD_DATA_SPACE || space->identity() == OLD_DATA_SPACE ||
space->identity() == CODE_SPACE); space->identity() == CODE_SPACE);
static const int kMaxMaxEvacuationCandidates = 1000;
int number_of_pages = space->CountTotalPages(); int number_of_pages = space->CountTotalPages();
int max_evacuation_candidates =
const int kMaxMaxEvacuationCandidates = 1000; static_cast<int>(sqrt(static_cast<double>(number_of_pages / 2)) + 1);
int max_evacuation_candidates = Min(
kMaxMaxEvacuationCandidates,
static_cast<int>(sqrt(static_cast<double>(number_of_pages / 2)) + 1));
if (FLAG_stress_compaction || FLAG_always_compact) { if (FLAG_stress_compaction || FLAG_always_compact) {
max_evacuation_candidates = kMaxMaxEvacuationCandidates; max_evacuation_candidates = kMaxMaxEvacuationCandidates;
@ -535,17 +533,27 @@ void MarkCompactCollector::CollectEvacuationCandidates(PagedSpace* space) {
intptr_t over_reserved = reserved - space->SizeOfObjects(); intptr_t over_reserved = reserved - space->SizeOfObjects();
static const intptr_t kFreenessThreshold = 50; static const intptr_t kFreenessThreshold = 50;
if (over_reserved >= 2 * space->AreaSize() && if (over_reserved >= 2 * space->AreaSize()) {
reduce_memory_footprint_) { // If reduction of memory footprint was requested, we are aggressive
// about choosing pages to free. We expect that half-empty pages
// are easier to compact so slightly bump the limit.
if (reduce_memory_footprint_) {
mode = REDUCE_MEMORY_FOOTPRINT; mode = REDUCE_MEMORY_FOOTPRINT;
// We expect that empty pages are easier to compact so slightly bump the
// limit.
max_evacuation_candidates += 2; max_evacuation_candidates += 2;
}
if (FLAG_trace_fragmentation) { // If over-usage is very high (more than a third of the space), we
PrintF("Estimated over reserved memory: %.1f MB (setting threshold %d)\n", // try to free all mostly empty pages. We expect that almost empty
// pages are even easier to compact so bump the limit even more.
if (over_reserved > reserved / 3) {
mode = REDUCE_MEMORY_FOOTPRINT;
max_evacuation_candidates *= 2;
}
if (FLAG_trace_fragmentation && mode == REDUCE_MEMORY_FOOTPRINT) {
PrintF("Estimated over reserved memory: %.1f / %.1f MB (threshold %d)\n",
static_cast<double>(over_reserved) / MB, static_cast<double>(over_reserved) / MB,
static_cast<double>(reserved) / MB,
static_cast<int>(kFreenessThreshold)); static_cast<int>(kFreenessThreshold));
} }
} }
@ -554,6 +562,9 @@ void MarkCompactCollector::CollectEvacuationCandidates(PagedSpace* space) {
Candidate candidates[kMaxMaxEvacuationCandidates]; Candidate candidates[kMaxMaxEvacuationCandidates];
max_evacuation_candidates =
Min(kMaxMaxEvacuationCandidates, max_evacuation_candidates);
int count = 0; int count = 0;
int fragmentation = 0; int fragmentation = 0;
Candidate* least = NULL; Candidate* least = NULL;
@ -3817,11 +3828,6 @@ void MarkCompactCollector::SweepSpace(PagedSpace* space, SweeperType sweeper) {
bool lazy_sweeping_active = false; bool lazy_sweeping_active = false;
bool unused_page_present = false; bool unused_page_present = false;
intptr_t old_space_size = heap()->PromotedSpaceSizeOfObjects();
intptr_t space_left =
Min(heap()->OldGenPromotionLimit(old_space_size),
heap()->OldGenAllocationLimit(old_space_size)) - old_space_size;
while (it.has_next()) { while (it.has_next()) {
Page* p = it.next(); Page* p = it.next();
@ -3881,7 +3887,7 @@ void MarkCompactCollector::SweepSpace(PagedSpace* space, SweeperType sweeper) {
} }
freed_bytes += SweepConservatively(space, p); freed_bytes += SweepConservatively(space, p);
pages_swept++; pages_swept++;
if (space_left + freed_bytes > newspace_size) { if (freed_bytes > 2 * newspace_size) {
space->SetPagesToSweep(p->next_page()); space->SetPagesToSweep(p->next_page());
lazy_sweeping_active = true; lazy_sweeping_active = true;
} else { } else {

2
deps/v8/src/version.cc

@ -35,7 +35,7 @@
#define MAJOR_VERSION 3 #define MAJOR_VERSION 3
#define MINOR_VERSION 11 #define MINOR_VERSION 11
#define BUILD_NUMBER 10 #define BUILD_NUMBER 10
#define PATCH_LEVEL 12 #define PATCH_LEVEL 14
// Use 1 for candidates and 0 otherwise. // Use 1 for candidates and 0 otherwise.
// (Boolean macro values are not supported by all preprocessors.) // (Boolean macro values are not supported by all preprocessors.)
#define IS_CANDIDATE_VERSION 0 #define IS_CANDIDATE_VERSION 0

3
deps/v8/test/cctest/test-alloc.cc

@ -34,7 +34,8 @@
using namespace v8::internal; using namespace v8::internal;
static inline void SimulateFullSpace(PagedSpace* space) { // Also used in test-heap.cc test cases.
void SimulateFullSpace(PagedSpace* space) {
int old_linear_size = static_cast<int>(space->limit() - space->top()); int old_linear_size = static_cast<int>(space->limit() - space->top());
space->Free(space->top(), old_linear_size); space->Free(space->top(), old_linear_size);
space->SetTop(space->limit(), space->limit()); space->SetTop(space->limit(), space->limit());

39
deps/v8/test/cctest/test-heap.cc

@ -1898,3 +1898,42 @@ TEST(Regress2143b) {
CHECK(root->IsJSObject()); CHECK(root->IsJSObject());
CHECK(root->map()->IsMap()); CHECK(root->map()->IsMap());
} }
// Implemented in the test-alloc.cc test suite.
void SimulateFullSpace(PagedSpace* space);
TEST(ReleaseOverReservedPages) {
i::FLAG_trace_gc = true;
InitializeVM();
v8::HandleScope scope;
static const int number_of_test_pages = 20;
// Prepare many pages with low live-bytes count.
PagedSpace* old_pointer_space = HEAP->old_pointer_space();
CHECK_EQ(1, old_pointer_space->CountTotalPages());
for (int i = 0; i < number_of_test_pages; i++) {
AlwaysAllocateScope always_allocate;
SimulateFullSpace(old_pointer_space);
FACTORY->NewFixedArray(1, TENURED);
}
CHECK_EQ(number_of_test_pages + 1, old_pointer_space->CountTotalPages());
// Triggering one GC will cause a lot of garbage to be discovered but
// even spread across all allocated pages.
HEAP->CollectAllGarbage(Heap::kNoGCFlags, "triggered for preparation");
CHECK_EQ(number_of_test_pages + 1, old_pointer_space->CountTotalPages());
// Triggering subsequent GCs should cause at least half of the pages
// to be released to the OS after at most two cycles.
HEAP->CollectAllGarbage(Heap::kNoGCFlags, "triggered by test 1");
CHECK_GE(number_of_test_pages + 1, old_pointer_space->CountTotalPages());
HEAP->CollectAllGarbage(Heap::kNoGCFlags, "triggered by test 2");
CHECK_GE(number_of_test_pages + 1, old_pointer_space->CountTotalPages() * 2);
// Triggering a last-resort GC should cause all pages to be released
// to the OS so that other processes can seize the memory.
HEAP->CollectAllAvailableGarbage("triggered really hard");
CHECK_EQ(1, old_pointer_space->CountTotalPages());
}

0
deps/v8/tools/merge-to-branch.sh

Loading…
Cancel
Save