Browse Source

Upgrade v8 to 3.9.11

v0.9.1-release
isaacs 13 years ago
parent
commit
2e24ded6d2
  1. 14
      deps/v8/ChangeLog
  2. 4
      deps/v8/build/common.gypi
  3. 3
      deps/v8/build/standalone.gypi
  4. 2
      deps/v8/include/v8.h
  5. 4
      deps/v8/src/SConscript
  6. 1
      deps/v8/src/api.cc
  7. 21
      deps/v8/src/arm/full-codegen-arm.cc
  8. 6
      deps/v8/src/ast.cc
  9. 9
      deps/v8/src/bootstrapper.cc
  10. 4
      deps/v8/src/compilation-cache.h
  11. 1
      deps/v8/src/compiler.cc
  12. 3
      deps/v8/src/cpu-profiler.h
  13. 9
      deps/v8/src/d8.js
  14. 9
      deps/v8/src/debug.cc
  15. 15
      deps/v8/src/deoptimizer.cc
  16. 18
      deps/v8/src/elements.cc
  17. 3
      deps/v8/src/flag-definitions.h
  18. 2
      deps/v8/src/full-codegen.cc
  19. 8
      deps/v8/src/full-codegen.h
  20. 224
      deps/v8/src/hashmap.cc
  21. 217
      deps/v8/src/hashmap.h
  22. 11
      deps/v8/src/heap-inl.h
  23. 68
      deps/v8/src/heap.cc
  24. 9
      deps/v8/src/heap.h
  25. 23
      deps/v8/src/hydrogen-instructions.cc
  26. 16
      deps/v8/src/hydrogen-instructions.h
  27. 6
      deps/v8/src/hydrogen.cc
  28. 62
      deps/v8/src/ia32/full-codegen-ia32.cc
  29. 3
      deps/v8/src/ia32/lithium-ia32.cc
  30. 15
      deps/v8/src/ic.cc
  31. 19
      deps/v8/src/isolate.h
  32. 4
      deps/v8/src/liveedit.cc
  33. 3
      deps/v8/src/log.h
  34. 110
      deps/v8/src/mark-compact.cc
  35. 21
      deps/v8/src/mips/full-codegen-mips.cc
  36. 5
      deps/v8/src/objects-debug.cc
  37. 22
      deps/v8/src/objects-inl.h
  38. 6
      deps/v8/src/objects-printer.cc
  39. 2
      deps/v8/src/objects-visiting.h
  40. 183
      deps/v8/src/objects.cc
  41. 96
      deps/v8/src/objects.h
  42. 36
      deps/v8/src/parser.cc
  43. 11
      deps/v8/src/platform-cygwin.cc
  44. 6
      deps/v8/src/platform-freebsd.cc
  45. 6
      deps/v8/src/platform-linux.cc
  46. 6
      deps/v8/src/platform-macos.cc
  47. 6
      deps/v8/src/platform-nullos.cc
  48. 6
      deps/v8/src/platform-openbsd.cc
  49. 6
      deps/v8/src/platform-solaris.cc
  50. 11
      deps/v8/src/platform-win32.cc
  51. 3
      deps/v8/src/platform.h
  52. 3
      deps/v8/src/preparser.h
  53. 48
      deps/v8/src/profile-generator.cc
  54. 108
      deps/v8/src/runtime.cc
  55. 3
      deps/v8/src/scanner.cc
  56. 26
      deps/v8/src/scopes.cc
  57. 4
      deps/v8/src/scopes.h
  58. 16
      deps/v8/src/serialize.cc
  59. 3
      deps/v8/src/serialize.h
  60. 6
      deps/v8/src/spaces-inl.h
  61. 178
      deps/v8/src/spaces.cc
  62. 105
      deps/v8/src/spaces.h
  63. 8
      deps/v8/src/store-buffer.cc
  64. 1
      deps/v8/src/token.h
  65. 2
      deps/v8/src/version.cc
  66. 21
      deps/v8/src/x64/full-codegen-x64.cc
  67. 3
      deps/v8/src/zone.h
  68. 10
      deps/v8/test/cctest/test-alloc.cc
  69. 9
      deps/v8/test/cctest/test-api.cc
  70. 2
      deps/v8/test/cctest/test-heap-profiler.cc
  71. 14
      deps/v8/test/cctest/test-heap.cc
  72. 6
      deps/v8/test/cctest/test-mark-compact.cc
  73. 13
      deps/v8/test/cctest/test-serialize.cc
  74. 7
      deps/v8/test/cctest/test-spaces.cc
  75. 2
      deps/v8/test/mjsunit/get-own-property-descriptor.js
  76. 22
      deps/v8/test/mjsunit/harmony/module-parsing.js
  77. 5
      deps/v8/test/mjsunit/mjsunit.status
  78. 23
      deps/v8/test/mjsunit/object-define-property.js
  79. 5045
      deps/v8/test/mjsunit/regress/regress-1969.js
  80. 19
      deps/v8/test/test262/test262.status
  81. 2
      deps/v8/tools/gyp/v8.gyp

14
deps/v8/ChangeLog

@ -1,3 +1,17 @@
2012-02-27: Version 3.9.11
Make 'module' a context-sensitive keyword (V8 issue 1957).
2012-02-24: Version 3.9.10
Fixed V8 issues 1322, 1772 and 1969.
Conformance improvements.
Performance and stability improvements on all platforms.
2012-02-23: Version 3.9.9
Supported fast case for-in in Crankshaft.

4
deps/v8/build/common.gypi

@ -83,6 +83,7 @@
'v8_use_snapshot%': 'true',
'host_os%': '<(OS)',
'v8_use_liveobjectlist%': 'false',
'werror%': '-Werror',
# With post mortem support enabled, metadata is embedded into libv8 that
# describes various parameters of the VM for use by debuggers. See
@ -304,7 +305,7 @@
'cflags': [ '-I/usr/pkg/include' ],
}],
['OS=="linux" or OS=="freebsd" or OS=="openbsd" or OS=="netbsd"', {
'cflags': [ '<(werror)', '-W', '-Wno-unused-parameter',
'cflags': [ '-Wall', '<(werror)', '-W', '-Wno-unused-parameter',
'-Wnon-virtual-dtor', '-Woverloaded-virtual' ],
}],
],
@ -351,6 +352,7 @@
}], # OS=="mac"
['OS=="win"', {
'msvs_configuration_attributes': {
'OutputDirectory': '<(DEPTH)\\build\\$(ConfigurationName)',
'IntermediateDirectory': '$(OutDir)\\obj\\$(ProjectName)',
'CharacterSet': '1',
},

3
deps/v8/build/standalone.gypi

@ -61,6 +61,7 @@
'host_arch%': '<(host_arch)',
'target_arch%': '<(target_arch)',
'v8_target_arch%': '<(v8_target_arch)',
'werror%': '-Werror',
'conditions': [
['(v8_target_arch=="arm" and host_arch!="arm") or \
(v8_target_arch=="mips" and host_arch!="mips") or \
@ -83,7 +84,7 @@
['OS=="linux" or OS=="freebsd" or OS=="openbsd" or OS=="solaris" \
or OS=="netbsd"', {
'target_defaults': {
'cflags': [ '-W', '-Wno-unused-parameter',
'cflags': [ '-Wall', '<(werror)', '-W', '-Wno-unused-parameter',
'-Wnon-virtual-dtor', '-pthread', '-fno-rtti',
'-fno-exceptions', '-pedantic' ],
'ldflags': [ '-pthread', ],

2
deps/v8/include/v8.h

@ -3850,7 +3850,7 @@ class Internals {
static const int kFullStringRepresentationMask = 0x07;
static const int kExternalTwoByteRepresentationTag = 0x02;
static const int kJSObjectType = 0xa8;
static const int kJSObjectType = 0xa9;
static const int kFirstNonstringType = 0x80;
static const int kForeignType = 0x85;

4
deps/v8/src/SConscript

@ -1,4 +1,4 @@
# Copyright 2011 the V8 project authors. All rights reserved.
# Copyright 2012 the V8 project authors. All rights reserved.
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
@ -78,7 +78,6 @@ SOURCES = {
fast-dtoa.cc
fixed-dtoa.cc
handles.cc
hashmap.cc
heap-profiler.cc
heap.cc
hydrogen.cc
@ -246,7 +245,6 @@ PREPARSER_SOURCES = {
dtoa.cc
fast-dtoa.cc
fixed-dtoa.cc
hashmap.cc
preparse-data.cc
preparser.cc
preparser-api.cc

1
deps/v8/src/api.cc

@ -2760,6 +2760,7 @@ bool v8::Object::Set(uint32_t index, v8::Handle<Value> value) {
self,
index,
value_obj,
NONE,
i::kNonStrictMode);
has_pending_exception = obj.is_null();
EXCEPTION_BAILOUT_CHECK(isolate, false);

21
deps/v8/src/arm/full-codegen-arm.cc

@ -109,6 +109,11 @@ class JumpPatchSite BASE_EMBEDDED {
};
int FullCodeGenerator::self_optimization_header_size() {
return 24;
}
// Generate code for a JS function. On entry to the function the receiver
// and arguments have been pushed on the stack left to right. The actual
// argument count matches the formal parameter count expected by the
@ -130,13 +135,6 @@ void FullCodeGenerator::Generate() {
SetFunctionPosition(function());
Comment cmnt(masm_, "[ function compiled by full code generator");
#ifdef DEBUG
if (strlen(FLAG_stop_at) > 0 &&
info->function()->name()->IsEqualTo(CStrVector(FLAG_stop_at))) {
__ stop("stop-at");
}
#endif
// We can optionally optimize based on counters rather than statistical
// sampling.
if (info->ShouldSelfOptimize()) {
@ -144,6 +142,7 @@ void FullCodeGenerator::Generate() {
PrintF("[adding self-optimization header to %s]\n",
*info->function()->debug_name()->ToCString());
}
has_self_optimization_header_ = true;
MaybeObject* maybe_cell = isolate()->heap()->AllocateJSGlobalPropertyCell(
Smi::FromInt(Compiler::kCallsUntilPrimitiveOpt));
JSGlobalPropertyCell* cell;
@ -155,9 +154,17 @@ void FullCodeGenerator::Generate() {
Handle<Code> compile_stub(
isolate()->builtins()->builtin(Builtins::kLazyRecompile));
__ Jump(compile_stub, RelocInfo::CODE_TARGET, eq);
ASSERT(masm_->pc_offset() == self_optimization_header_size());
}
}
#ifdef DEBUG
if (strlen(FLAG_stop_at) > 0 &&
info->function()->name()->IsEqualTo(CStrVector(FLAG_stop_at))) {
__ stop("stop-at");
}
#endif
// Strict mode functions and builtins need to replace the receiver
// with undefined when called as functions (without an explicit
// receiver object). r5 is zero for method calls and non-zero for

6
deps/v8/src/ast.cc

@ -237,8 +237,8 @@ bool IsEqualNumber(void* first, void* second) {
void ObjectLiteral::CalculateEmitStore() {
HashMap properties(&IsEqualString);
HashMap elements(&IsEqualNumber);
ZoneHashMap properties(&IsEqualString);
ZoneHashMap elements(&IsEqualNumber);
for (int i = this->properties()->length() - 1; i >= 0; i--) {
ObjectLiteral::Property* property = this->properties()->at(i);
Literal* literal = property->key();
@ -249,7 +249,7 @@ void ObjectLiteral::CalculateEmitStore() {
}
uint32_t hash;
HashMap* table;
ZoneHashMap* table;
void* key;
Factory* factory = Isolate::Current()->factory();
if (handle->IsSymbol()) {

9
deps/v8/src/bootstrapper.cc

@ -214,13 +214,12 @@ class Genesis BASE_EMBEDDED {
};
class ExtensionStates {
public:
public:
ExtensionStates();
ExtensionTraversalState get_state(RegisteredExtension* extension);
void set_state(RegisteredExtension* extension,
ExtensionTraversalState state);
private:
Allocator allocator_;
private:
HashMap map_;
DISALLOW_COPY_AND_ASSIGN(ExtensionStates);
};
@ -1961,9 +1960,7 @@ static bool MatchRegisteredExtensions(void* key1, void* key2) {
}
Genesis::ExtensionStates::ExtensionStates()
: allocator_(),
map_(MatchRegisteredExtensions, &allocator_, 8)
{}
: map_(MatchRegisteredExtensions, 8) { }
Genesis::ExtensionTraversalState Genesis::ExtensionStates::get_state(
RegisteredExtension* extension) {

4
deps/v8/src/compilation-cache.h

@ -1,4 +1,4 @@
// Copyright 2011 the V8 project authors. All rights reserved.
// Copyright 2012 the V8 project authors. All rights reserved.
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
@ -31,8 +31,6 @@
namespace v8 {
namespace internal {
class HashMap;
// The compilation cache consists of several generational sub-caches which uses
// this class as a base class. A sub-cache contains a compilation cache tables
// for each generation of the sub-cache. Since the same source code string has

1
deps/v8/src/compiler.cc

@ -118,6 +118,7 @@ bool CompilationInfo::ShouldSelfOptimize() {
FLAG_crankshaft &&
!Serializer::enabled() &&
!function()->flags()->Contains(kDontSelfOptimize) &&
!function()->flags()->Contains(kDontOptimize) &&
(shared_info().is_null() || !shared_info()->optimization_disabled());
}

3
deps/v8/src/cpu-profiler.h

@ -1,4 +1,4 @@
// Copyright 2010 the V8 project authors. All rights reserved.
// Copyright 2012 the V8 project authors. All rights reserved.
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
@ -41,7 +41,6 @@ class CodeEntry;
class CodeMap;
class CpuProfile;
class CpuProfilesCollection;
class HashMap;
class ProfileGenerator;
class TokenEnumerator;

9
deps/v8/src/d8.js

@ -122,13 +122,15 @@ Debug.State = {
};
var trace_compile = false; // Tracing all compile events?
var trace_debug_json = false; // Tracing all debug json packets?
var last_cmd_line = '';
var last_cmd = '';
//var lol_is_enabled; // Set to true in d8.cc if LIVE_OBJECT_LIST is defined.
var lol_next_dump_index = 0;
var kDefaultLolLinesToPrintAtATime = 10;
var kMaxLolLinesToPrintAtATime = 1000;
var repeat_cmd_line = '';
var is_running = true;
// Global variable used to store whether a handle was requested.
var lookup_handle = null;
// Copied from debug-delay.js. This is needed below:
function ScriptTypeFlag(type) {
@ -155,7 +157,7 @@ function DebugMessageDetails(message) {
}
function DebugEventDetails(response) {
details = {text:'', running:false};
var details = {text:'', running:false};
// Get the running state.
details.running = response.running();
@ -588,7 +590,6 @@ DebugRequest.prototype.createLOLRequest = function(command,
// Create a JSON request for the evaluation command.
DebugRequest.prototype.makeEvaluateJSONRequest_ = function(expression) {
// Global varaible used to store whether a handle was requested.
lookup_handle = null;
if (lol_is_enabled) {
@ -1948,7 +1949,7 @@ function roundNumber(num, length) {
// Convert a JSON response to text for display in a text based debugger.
function DebugResponseDetails(response) {
details = { text: '', running: false };
var details = { text: '', running: false };
try {
if (!response.success()) {

9
deps/v8/src/debug.cc

@ -37,6 +37,7 @@
#include "debug.h"
#include "deoptimizer.h"
#include "execution.h"
#include "full-codegen.h"
#include "global-handles.h"
#include "ic.h"
#include "ic-inl.h"
@ -1752,7 +1753,6 @@ static bool CompileFullCodeForDebugging(Handle<SharedFunctionInfo> shared,
ASSERT(new_code->has_debug_break_slots());
ASSERT(current_code->is_compiled_optimizable() ==
new_code->is_compiled_optimizable());
ASSERT(current_code->instruction_size() <= new_code->instruction_size());
}
#endif
return result;
@ -1830,6 +1830,13 @@ static void RedirectActivationsToRecompiledCodeOnThread(
// break slots.
debug_break_slot_count++;
}
if (frame_code->has_self_optimization_header() &&
!new_code->has_self_optimization_header()) {
delta -= FullCodeGenerator::self_optimization_header_size();
} else {
ASSERT(frame_code->has_self_optimization_header() ==
new_code->has_self_optimization_header());
}
int debug_break_slot_bytes =
debug_break_slot_count * Assembler::kDebugBreakSlotLength;
if (FLAG_trace_deopt) {

15
deps/v8/src/deoptimizer.cc

@ -451,7 +451,7 @@ Address Deoptimizer::GetDeoptimizationEntry(int id, BailoutType type) {
base = data->lazy_deoptimization_entry_code_;
}
return
static_cast<Address>(base->body()) + (id * table_entry_size_);
static_cast<Address>(base->area_start()) + (id * table_entry_size_);
}
@ -464,14 +464,14 @@ int Deoptimizer::GetDeoptimizationId(Address addr, BailoutType type) {
base = data->lazy_deoptimization_entry_code_;
}
if (base == NULL ||
addr < base->body() ||
addr >= base->body() +
addr < base->area_start() ||
addr >= base->area_start() +
(kNumberOfEntries * table_entry_size_)) {
return kNotDeoptimizationEntry;
}
ASSERT_EQ(0,
static_cast<int>(addr - base->body()) % table_entry_size_);
return static_cast<int>(addr - base->body()) / table_entry_size_;
static_cast<int>(addr - base->area_start()) % table_entry_size_);
return static_cast<int>(addr - base->area_start()) / table_entry_size_;
}
@ -1152,11 +1152,12 @@ MemoryChunk* Deoptimizer::CreateCode(BailoutType type) {
Isolate::Current()->memory_allocator()->AllocateChunk(desc.instr_size,
EXECUTABLE,
NULL);
ASSERT(chunk->area_size() >= desc.instr_size);
if (chunk == NULL) {
V8::FatalProcessOutOfMemory("Not enough memory for deoptimization table");
}
memcpy(chunk->body(), desc.buffer, desc.instr_size);
CPU::FlushICache(chunk->body(), desc.instr_size);
memcpy(chunk->area_start(), desc.buffer, desc.instr_size);
CPU::FlushICache(chunk->area_start(), desc.instr_size);
return chunk;
}

18
deps/v8/src/elements.cc

@ -705,10 +705,20 @@ class NonStrictArgumentsElementsAccessor
} else {
// Object is not mapped, defer to the arguments.
FixedArray* arguments = FixedArray::cast(parameter_map->get(1));
return ElementsAccessor::ForArray(arguments)->Get(arguments,
key,
obj,
receiver);
MaybeObject* maybe_result = ElementsAccessor::ForArray(arguments)->Get(
arguments, key, obj, receiver);
Object* result;
if (!maybe_result->ToObject(&result)) return maybe_result;
// Elements of the arguments object in slow mode might be slow aliases.
if (result->IsAliasedArgumentsEntry()) {
AliasedArgumentsEntry* entry = AliasedArgumentsEntry::cast(result);
Context* context = Context::cast(parameter_map->get(0));
int context_index = entry->aliased_context_slot();
ASSERT(!context->get(context_index)->IsTheHole());
return context->get(context_index);
} else {
return result;
}
}
}

3
deps/v8/src/flag-definitions.h

@ -169,6 +169,9 @@ DEFINE_int(stress_runs, 0, "number of stress runs")
DEFINE_bool(optimize_closures, true, "optimize closures")
DEFINE_int(loop_weight, 1, "loop weight for representation inference")
DEFINE_bool(optimize_for_in, false,
"optimize functions containing for-in loops")
// Experimental profiler changes.
DEFINE_bool(experimental_profiler, false, "enable all profiler experiments")
DEFINE_bool(watch_ic_patching, false, "profiler considers IC stability")

2
deps/v8/src/full-codegen.cc

@ -302,6 +302,7 @@ bool FullCodeGenerator::MakeCode(CompilationInfo* info) {
Code::Flags flags = Code::ComputeFlags(Code::FUNCTION);
Handle<Code> code = CodeGenerator::MakeCodeEpilogue(&masm, flags, info);
code->set_optimizable(info->IsOptimizable());
code->set_self_optimization_header(cgen.has_self_optimization_header_);
cgen.PopulateDeoptimizationData(code);
cgen.PopulateTypeFeedbackInfo(code);
cgen.PopulateTypeFeedbackCells(code);
@ -365,6 +366,7 @@ void FullCodeGenerator::PopulateDeoptimizationData(Handle<Code> code) {
void FullCodeGenerator::PopulateTypeFeedbackInfo(Handle<Code> code) {
Handle<TypeFeedbackInfo> info = isolate()->factory()->NewTypeFeedbackInfo();
info->set_ic_total_count(ic_total_count_);
ASSERT(!isolate()->heap()->InNewSpace(*info));
code->set_type_feedback_info(*info);
}

8
deps/v8/src/full-codegen.h

@ -90,10 +90,15 @@ class FullCodeGenerator: public AstVisitor {
stack_checks_(2), // There's always at least one.
type_feedback_cells_(info->HasDeoptimizationSupport()
? info->function()->ast_node_count() : 0),
ic_total_count_(0) { }
ic_total_count_(0),
has_self_optimization_header_(false) { }
static bool MakeCode(CompilationInfo* info);
// Returns the platform-specific size in bytes of the self-optimization
// header.
static int self_optimization_header_size();
// Encode state and pc-offset as a BitField<type, start, size>.
// Only use 30 bits because we encode the result as a smi.
class StateField : public BitField<State, 0, 1> { };
@ -786,6 +791,7 @@ class FullCodeGenerator: public AstVisitor {
ZoneList<BailoutEntry> stack_checks_;
ZoneList<TypeFeedbackCellEntry> type_feedback_cells_;
int ic_total_count_;
bool has_self_optimization_header_;
Handle<FixedArray> handler_table_;
Handle<JSGlobalPropertyCell> profiling_counter_;

224
deps/v8/src/hashmap.cc

@ -1,224 +0,0 @@
// Copyright 2008 the V8 project authors. All rights reserved.
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
//
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above
// copyright notice, this list of conditions and the following
// disclaimer in the documentation and/or other materials provided
// with the distribution.
// * Neither the name of Google Inc. nor the names of its
// contributors may be used to endorse or promote products derived
// from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#include "../include/v8stdint.h"
#include "globals.h"
#include "checks.h"
#include "utils.h"
#include "allocation.h"
#include "hashmap.h"
namespace v8 {
namespace internal {
Allocator* HashMap::DefaultAllocator = ::new Allocator();
HashMap::HashMap(MatchFun match,
Allocator* allocator,
uint32_t initial_capacity) {
allocator_ = allocator;
match_ = match;
Initialize(initial_capacity);
}
HashMap::~HashMap() {
if (allocator_) {
allocator_->Delete(map_);
}
}
HashMap::Entry* HashMap::Lookup(void* key, uint32_t hash, bool insert) {
// Find a matching entry.
Entry* p = Probe(key, hash);
if (p->key != NULL) {
return p;
}
// No entry found; insert one if necessary.
if (insert) {
p->key = key;
p->value = NULL;
p->hash = hash;
occupancy_++;
// Grow the map if we reached >= 80% occupancy.
if (occupancy_ + occupancy_/4 >= capacity_) {
Resize();
p = Probe(key, hash);
}
return p;
}
// No entry found and none inserted.
return NULL;
}
void HashMap::Remove(void* key, uint32_t hash) {
// Lookup the entry for the key to remove.
Entry* p = Probe(key, hash);
if (p->key == NULL) {
// Key not found nothing to remove.
return;
}
// To remove an entry we need to ensure that it does not create an empty
// entry that will cause the search for another entry to stop too soon. If all
// the entries between the entry to remove and the next empty slot have their
// initial position inside this interval, clearing the entry to remove will
// not break the search. If, while searching for the next empty entry, an
// entry is encountered which does not have its initial position between the
// entry to remove and the position looked at, then this entry can be moved to
// the place of the entry to remove without breaking the search for it. The
// entry made vacant by this move is now the entry to remove and the process
// starts over.
// Algorithm from http://en.wikipedia.org/wiki/Open_addressing.
// This guarantees loop termination as there is at least one empty entry so
// eventually the removed entry will have an empty entry after it.
ASSERT(occupancy_ < capacity_);
// p is the candidate entry to clear. q is used to scan forwards.
Entry* q = p; // Start at the entry to remove.
while (true) {
// Move q to the next entry.
q = q + 1;
if (q == map_end()) {
q = map_;
}
// All entries between p and q have their initial position between p and q
// and the entry p can be cleared without breaking the search for these
// entries.
if (q->key == NULL) {
break;
}
// Find the initial position for the entry at position q.
Entry* r = map_ + (q->hash & (capacity_ - 1));
// If the entry at position q has its initial position outside the range
// between p and q it can be moved forward to position p and will still be
// found. There is now a new candidate entry for clearing.
if ((q > p && (r <= p || r > q)) ||
(q < p && (r <= p && r > q))) {
*p = *q;
p = q;
}
}
// Clear the entry which is allowed to en emptied.
p->key = NULL;
occupancy_--;
}
void HashMap::Clear() {
// Mark all entries as empty.
const Entry* end = map_end();
for (Entry* p = map_; p < end; p++) {
p->key = NULL;
}
occupancy_ = 0;
}
HashMap::Entry* HashMap::Start() const {
return Next(map_ - 1);
}
HashMap::Entry* HashMap::Next(Entry* p) const {
const Entry* end = map_end();
ASSERT(map_ - 1 <= p && p < end);
for (p++; p < end; p++) {
if (p->key != NULL) {
return p;
}
}
return NULL;
}
HashMap::Entry* HashMap::Probe(void* key, uint32_t hash) {
ASSERT(key != NULL);
ASSERT(IsPowerOf2(capacity_));
Entry* p = map_ + (hash & (capacity_ - 1));
const Entry* end = map_end();
ASSERT(map_ <= p && p < end);
ASSERT(occupancy_ < capacity_); // Guarantees loop termination.
while (p->key != NULL && (hash != p->hash || !match_(key, p->key))) {
p++;
if (p >= end) {
p = map_;
}
}
return p;
}
void HashMap::Initialize(uint32_t capacity) {
ASSERT(IsPowerOf2(capacity));
map_ = reinterpret_cast<Entry*>(allocator_->New(capacity * sizeof(Entry)));
if (map_ == NULL) {
v8::internal::FatalProcessOutOfMemory("HashMap::Initialize");
return;
}
capacity_ = capacity;
Clear();
}
void HashMap::Resize() {
Entry* map = map_;
uint32_t n = occupancy_;
// Allocate larger map.
Initialize(capacity_ * 2);
// Rehash all current entries.
for (Entry* p = map; n > 0; p++) {
if (p->key != NULL) {
Lookup(p->key, p->hash, true)->value = p->value;
n--;
}
}
// Delete old map.
allocator_->Delete(map);
}
} } // namespace v8::internal

217
deps/v8/src/hashmap.h

@ -1,4 +1,4 @@
// Copyright 2011 the V8 project authors. All rights reserved.
// Copyright 2012 the V8 project authors. All rights reserved.
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
@ -29,34 +29,22 @@
#define V8_HASHMAP_H_
#include "allocation.h"
#include "checks.h"
#include "utils.h"
namespace v8 {
namespace internal {
// Allocator defines the memory allocator interface
// used by HashMap and implements a default allocator.
class Allocator BASE_EMBEDDED {
public:
virtual ~Allocator() {}
virtual void* New(size_t size) { return Malloced::New(size); }
virtual void Delete(void* p) { Malloced::Delete(p); }
};
class HashMap {
template<class AllocationPolicy>
class TemplateHashMap {
public:
static Allocator* DefaultAllocator;
typedef bool (*MatchFun) (void* key1, void* key2);
// initial_capacity is the size of the initial hash map;
// it must be a power of 2 (and thus must not be 0).
explicit HashMap(MatchFun match,
Allocator* allocator = DefaultAllocator,
uint32_t initial_capacity = 8);
TemplateHashMap(MatchFun match, uint32_t initial_capacity = 8);
~HashMap();
~TemplateHashMap();
// HashMap entries are (key, value, hash) triplets.
// Some clients may not need to use the value slot
@ -100,7 +88,6 @@ class HashMap {
Entry* Next(Entry* p) const;
private:
Allocator* allocator_;
MatchFun match_;
Entry* map_;
uint32_t capacity_;
@ -112,6 +99,196 @@ class HashMap {
void Resize();
};
typedef TemplateHashMap<FreeStoreAllocationPolicy> HashMap;
template<class P>
TemplateHashMap<P>::TemplateHashMap(MatchFun match,
uint32_t initial_capacity) {
match_ = match;
Initialize(initial_capacity);
}
template<class P>
TemplateHashMap<P>::~TemplateHashMap() {
P::Delete(map_);
}
template<class P>
typename TemplateHashMap<P>::Entry* TemplateHashMap<P>::Lookup(
void* key, uint32_t hash, bool insert) {
// Find a matching entry.
Entry* p = Probe(key, hash);
if (p->key != NULL) {
return p;
}
// No entry found; insert one if necessary.
if (insert) {
p->key = key;
p->value = NULL;
p->hash = hash;
occupancy_++;
// Grow the map if we reached >= 80% occupancy.
if (occupancy_ + occupancy_/4 >= capacity_) {
Resize();
p = Probe(key, hash);
}
return p;
}
// No entry found and none inserted.
return NULL;
}
template<class P>
void TemplateHashMap<P>::Remove(void* key, uint32_t hash) {
// Lookup the entry for the key to remove.
Entry* p = Probe(key, hash);
if (p->key == NULL) {
// Key not found nothing to remove.
return;
}
// To remove an entry we need to ensure that it does not create an empty
// entry that will cause the search for another entry to stop too soon. If all
// the entries between the entry to remove and the next empty slot have their
// initial position inside this interval, clearing the entry to remove will
// not break the search. If, while searching for the next empty entry, an
// entry is encountered which does not have its initial position between the
// entry to remove and the position looked at, then this entry can be moved to
// the place of the entry to remove without breaking the search for it. The
// entry made vacant by this move is now the entry to remove and the process
// starts over.
// Algorithm from http://en.wikipedia.org/wiki/Open_addressing.
// This guarantees loop termination as there is at least one empty entry so
// eventually the removed entry will have an empty entry after it.
ASSERT(occupancy_ < capacity_);
// p is the candidate entry to clear. q is used to scan forwards.
Entry* q = p; // Start at the entry to remove.
while (true) {
// Move q to the next entry.
q = q + 1;
if (q == map_end()) {
q = map_;
}
// All entries between p and q have their initial position between p and q
// and the entry p can be cleared without breaking the search for these
// entries.
if (q->key == NULL) {
break;
}
// Find the initial position for the entry at position q.
Entry* r = map_ + (q->hash & (capacity_ - 1));
// If the entry at position q has its initial position outside the range
// between p and q it can be moved forward to position p and will still be
// found. There is now a new candidate entry for clearing.
if ((q > p && (r <= p || r > q)) ||
(q < p && (r <= p && r > q))) {
*p = *q;
p = q;
}
}
// Clear the entry which is allowed to en emptied.
p->key = NULL;
occupancy_--;
}
template<class P>
void TemplateHashMap<P>::Clear() {
// Mark all entries as empty.
const Entry* end = map_end();
for (Entry* p = map_; p < end; p++) {
p->key = NULL;
}
occupancy_ = 0;
}
template<class P>
typename TemplateHashMap<P>::Entry* TemplateHashMap<P>::Start() const {
return Next(map_ - 1);
}
template<class P>
typename TemplateHashMap<P>::Entry* TemplateHashMap<P>::Next(Entry* p) const {
const Entry* end = map_end();
ASSERT(map_ - 1 <= p && p < end);
for (p++; p < end; p++) {
if (p->key != NULL) {
return p;
}
}
return NULL;
}
template<class P>
typename TemplateHashMap<P>::Entry* TemplateHashMap<P>::Probe(void* key,
uint32_t hash) {
ASSERT(key != NULL);
ASSERT(IsPowerOf2(capacity_));
Entry* p = map_ + (hash & (capacity_ - 1));
const Entry* end = map_end();
ASSERT(map_ <= p && p < end);
ASSERT(occupancy_ < capacity_); // Guarantees loop termination.
while (p->key != NULL && (hash != p->hash || !match_(key, p->key))) {
p++;
if (p >= end) {
p = map_;
}
}
return p;
}
template<class P>
void TemplateHashMap<P>::Initialize(uint32_t capacity) {
ASSERT(IsPowerOf2(capacity));
map_ = reinterpret_cast<Entry*>(P::New(capacity * sizeof(Entry)));
if (map_ == NULL) {
v8::internal::FatalProcessOutOfMemory("HashMap::Initialize");
return;
}
capacity_ = capacity;
Clear();
}
template<class P>
void TemplateHashMap<P>::Resize() {
Entry* map = map_;
uint32_t n = occupancy_;
// Allocate larger map.
Initialize(capacity_ * 2);
// Rehash all current entries.
for (Entry* p = map; n > 0; p++) {
if (p->key != NULL) {
Lookup(p->key, p->hash, true)->value = p->value;
n--;
}
}
// Delete old map.
P::Delete(map);
}
} } // namespace v8::internal

11
deps/v8/src/heap-inl.h

@ -49,7 +49,7 @@ void PromotionQueue::insert(HeapObject* target, int size) {
NewSpacePage* rear_page =
NewSpacePage::FromAddress(reinterpret_cast<Address>(rear_));
ASSERT(!rear_page->prev_page()->is_anchor());
rear_ = reinterpret_cast<intptr_t*>(rear_page->prev_page()->body_limit());
rear_ = reinterpret_cast<intptr_t*>(rear_page->prev_page()->area_end());
ActivateGuardIfOnTheSamePage();
}
@ -81,11 +81,6 @@ void PromotionQueue::ActivateGuardIfOnTheSamePage() {
}
int Heap::MaxObjectSizeInPagedSpace() {
return Page::kMaxHeapObjectSize;
}
MaybeObject* Heap::AllocateStringFromUtf8(Vector<const char> str,
PretenureFlag pretenure) {
// Check for ASCII first since this is the common case.
@ -119,7 +114,7 @@ MaybeObject* Heap::AllocateAsciiSymbol(Vector<const char> str,
// Allocate string.
Object* result;
{ MaybeObject* maybe_result = (size > MaxObjectSizeInPagedSpace())
{ MaybeObject* maybe_result = (size > Page::kMaxNonCodeHeapObjectSize)
? lo_space_->AllocateRaw(size, NOT_EXECUTABLE)
: old_data_space_->AllocateRaw(size);
if (!maybe_result->ToObject(&result)) return maybe_result;
@ -153,7 +148,7 @@ MaybeObject* Heap::AllocateTwoByteSymbol(Vector<const uc16> str,
// Allocate string.
Object* result;
{ MaybeObject* maybe_result = (size > MaxObjectSizeInPagedSpace())
{ MaybeObject* maybe_result = (size > Page::kMaxNonCodeHeapObjectSize)
? lo_space_->AllocateRaw(size, NOT_EXECUTABLE)
: old_data_space_->AllocateRaw(size);
if (!maybe_result->ToObject(&result)) return maybe_result;

68
deps/v8/src/heap.cc

@ -1092,7 +1092,7 @@ void PromotionQueue::RelocateQueueHead() {
Page* p = Page::FromAllocationTop(reinterpret_cast<Address>(rear_));
intptr_t* head_start = rear_;
intptr_t* head_end =
Min(front_, reinterpret_cast<intptr_t*>(p->body_limit()));
Min(front_, reinterpret_cast<intptr_t*>(p->area_end()));
int entries_count =
static_cast<int>(head_end - head_start) / kEntrySizeInWords;
@ -1435,7 +1435,7 @@ Address Heap::DoScavenge(ObjectVisitor* scavenge_visitor,
NewSpaceScavenger::IterateBody(object->map(), object);
} else {
new_space_front =
NewSpacePage::FromLimit(new_space_front)->next_page()->body();
NewSpacePage::FromLimit(new_space_front)->next_page()->area_start();
}
}
@ -1597,7 +1597,7 @@ class ScavengingVisitor : public StaticVisitorBase {
HeapObject* object,
int object_size) {
SLOW_ASSERT((size_restriction != SMALL) ||
(object_size <= Page::kMaxHeapObjectSize));
(object_size <= Page::kMaxNonCodeHeapObjectSize));
SLOW_ASSERT(object->Size() == object_size);
Heap* heap = map->GetHeap();
@ -1605,7 +1605,7 @@ class ScavengingVisitor : public StaticVisitorBase {
MaybeObject* maybe_result;
if ((size_restriction != SMALL) &&
(object_size > Page::kMaxHeapObjectSize)) {
(object_size > Page::kMaxNonCodeHeapObjectSize)) {
maybe_result = heap->lo_space()->AllocateRaw(object_size,
NOT_EXECUTABLE);
} else {
@ -1951,6 +1951,16 @@ MaybeObject* Heap::AllocateTypeFeedbackInfo() {
}
MaybeObject* Heap::AllocateAliasedArgumentsEntry(int aliased_context_slot) {
AliasedArgumentsEntry* entry;
{ MaybeObject* maybe_result = AllocateStruct(ALIASED_ARGUMENTS_ENTRY_TYPE);
if (!maybe_result->To(&entry)) return maybe_result;
}
entry->set_aliased_context_slot(aliased_context_slot);
return entry;
}
const Heap::StringTypeTable Heap::string_type_table[] = {
#define STRING_TYPE_ELEMENT(type, size, name, camel_name) \
{type, size, k##camel_name##MapRootIndex},
@ -2264,7 +2274,7 @@ bool Heap::CreateInitialMaps() {
MaybeObject* Heap::AllocateHeapNumber(double value, PretenureFlag pretenure) {
// Statically ensure that it is safe to allocate heap numbers in paged
// spaces.
STATIC_ASSERT(HeapNumber::kSize <= Page::kMaxHeapObjectSize);
STATIC_ASSERT(HeapNumber::kSize <= Page::kNonCodeObjectAreaSize);
AllocationSpace space = (pretenure == TENURED) ? OLD_DATA_SPACE : NEW_SPACE;
Object* result;
@ -2285,7 +2295,7 @@ MaybeObject* Heap::AllocateHeapNumber(double value) {
// This version of AllocateHeapNumber is optimized for
// allocation in new space.
STATIC_ASSERT(HeapNumber::kSize <= Page::kMaxHeapObjectSize);
STATIC_ASSERT(HeapNumber::kSize <= Page::kMaxNonCodeHeapObjectSize);
ASSERT(allocation_allowed_ && gc_state_ == NOT_IN_GC);
Object* result;
{ MaybeObject* maybe_result = new_space_.AllocateRaw(HeapNumber::kSize);
@ -2856,7 +2866,7 @@ MaybeObject* Heap::NumberFromDouble(double value, PretenureFlag pretenure) {
MaybeObject* Heap::AllocateForeign(Address address, PretenureFlag pretenure) {
// Statically ensure that it is safe to allocate foreigns in paged spaces.
STATIC_ASSERT(Foreign::kSize <= Page::kMaxHeapObjectSize);
STATIC_ASSERT(Foreign::kSize <= Page::kMaxNonCodeHeapObjectSize);
AllocationSpace space = (pretenure == TENURED) ? OLD_DATA_SPACE : NEW_SPACE;
Foreign* result;
MaybeObject* maybe_result = Allocate(foreign_map(), space);
@ -3274,7 +3284,7 @@ MaybeObject* Heap::AllocateByteArray(int length, PretenureFlag pretenure) {
}
int size = ByteArray::SizeFor(length);
Object* result;
{ MaybeObject* maybe_result = (size <= MaxObjectSizeInPagedSpace())
{ MaybeObject* maybe_result = (size <= Page::kMaxNonCodeHeapObjectSize)
? old_data_space_->AllocateRaw(size)
: lo_space_->AllocateRaw(size, NOT_EXECUTABLE);
if (!maybe_result->ToObject(&result)) return maybe_result;
@ -3293,7 +3303,7 @@ MaybeObject* Heap::AllocateByteArray(int length) {
}
int size = ByteArray::SizeFor(length);
AllocationSpace space =
(size > MaxObjectSizeInPagedSpace()) ? LO_SPACE : NEW_SPACE;
(size > Page::kMaxNonCodeHeapObjectSize) ? LO_SPACE : NEW_SPACE;
Object* result;
{ MaybeObject* maybe_result = AllocateRaw(size, space, OLD_DATA_SPACE);
if (!maybe_result->ToObject(&result)) return maybe_result;
@ -3359,7 +3369,7 @@ MaybeObject* Heap::CreateCode(const CodeDesc& desc,
MaybeObject* maybe_result;
// Large code objects and code objects which should stay at a fixed address
// are allocated in large object space.
if (obj_size > MaxObjectSizeInPagedSpace() || immovable) {
if (obj_size > code_space()->AreaSize() || immovable) {
maybe_result = lo_space_->AllocateRaw(obj_size, EXECUTABLE);
} else {
maybe_result = code_space_->AllocateRaw(obj_size);
@ -3408,7 +3418,7 @@ MaybeObject* Heap::CopyCode(Code* code) {
// Allocate an object the same size as the code object.
int obj_size = code->Size();
MaybeObject* maybe_result;
if (obj_size > MaxObjectSizeInPagedSpace()) {
if (obj_size > code_space()->AreaSize()) {
maybe_result = lo_space_->AllocateRaw(obj_size, EXECUTABLE);
} else {
maybe_result = code_space_->AllocateRaw(obj_size);
@ -3451,7 +3461,7 @@ MaybeObject* Heap::CopyCode(Code* code, Vector<byte> reloc_info) {
static_cast<size_t>(code->instruction_end() - old_addr);
MaybeObject* maybe_result;
if (new_obj_size > MaxObjectSizeInPagedSpace()) {
if (new_obj_size > code_space()->AreaSize()) {
maybe_result = lo_space_->AllocateRaw(new_obj_size, EXECUTABLE);
} else {
maybe_result = code_space_->AllocateRaw(new_obj_size);
@ -3772,7 +3782,7 @@ MaybeObject* Heap::AllocateJSObjectFromMap(Map* map, PretenureFlag pretenure) {
// Allocate the JSObject.
AllocationSpace space =
(pretenure == TENURED) ? OLD_POINTER_SPACE : NEW_SPACE;
if (map->instance_size() > MaxObjectSizeInPagedSpace()) space = LO_SPACE;
if (map->instance_size() > Page::kMaxNonCodeHeapObjectSize) space = LO_SPACE;
Object* obj;
{ MaybeObject* maybe_obj = Allocate(map, space);
if (!maybe_obj->ToObject(&obj)) return maybe_obj;
@ -4280,7 +4290,7 @@ MaybeObject* Heap::AllocateInternalSymbol(unibrow::CharacterStream* buffer,
// Allocate string.
Object* result;
{ MaybeObject* maybe_result = (size > MaxObjectSizeInPagedSpace())
{ MaybeObject* maybe_result = (size > Page::kMaxNonCodeHeapObjectSize)
? lo_space_->AllocateRaw(size, NOT_EXECUTABLE)
: old_data_space_->AllocateRaw(size);
if (!maybe_result->ToObject(&result)) return maybe_result;
@ -4317,11 +4327,12 @@ MaybeObject* Heap::AllocateRawAsciiString(int length, PretenureFlag pretenure) {
if (size > kMaxObjectSizeInNewSpace) {
// Allocate in large object space, retry space will be ignored.
space = LO_SPACE;
} else if (size > MaxObjectSizeInPagedSpace()) {
} else if (size > Page::kMaxNonCodeHeapObjectSize) {
// Allocate in new space, retry in large object space.
retry_space = LO_SPACE;
}
} else if (space == OLD_DATA_SPACE && size > MaxObjectSizeInPagedSpace()) {
} else if (space == OLD_DATA_SPACE &&
size > Page::kMaxNonCodeHeapObjectSize) {
space = LO_SPACE;
}
Object* result;
@ -4352,11 +4363,12 @@ MaybeObject* Heap::AllocateRawTwoByteString(int length,
if (size > kMaxObjectSizeInNewSpace) {
// Allocate in large object space, retry space will be ignored.
space = LO_SPACE;
} else if (size > MaxObjectSizeInPagedSpace()) {
} else if (size > Page::kMaxNonCodeHeapObjectSize) {
// Allocate in new space, retry in large object space.
retry_space = LO_SPACE;
}
} else if (space == OLD_DATA_SPACE && size > MaxObjectSizeInPagedSpace()) {
} else if (space == OLD_DATA_SPACE &&
size > Page::kMaxNonCodeHeapObjectSize) {
space = LO_SPACE;
}
Object* result;
@ -4495,13 +4507,13 @@ MaybeObject* Heap::AllocateRawFixedArray(int length, PretenureFlag pretenure) {
// Too big for new space.
space = LO_SPACE;
} else if (space == OLD_POINTER_SPACE &&
size > MaxObjectSizeInPagedSpace()) {
size > Page::kMaxNonCodeHeapObjectSize) {
// Too big for old pointer space.
space = LO_SPACE;
}
AllocationSpace retry_space =
(size <= MaxObjectSizeInPagedSpace()) ? OLD_POINTER_SPACE : LO_SPACE;
(size <= Page::kMaxNonCodeHeapObjectSize) ? OLD_POINTER_SPACE : LO_SPACE;
return AllocateRaw(size, space, retry_space);
}
@ -4628,13 +4640,13 @@ MaybeObject* Heap::AllocateRawFixedDoubleArray(int length,
// Too big for new space.
space = LO_SPACE;
} else if (space == OLD_DATA_SPACE &&
size > MaxObjectSizeInPagedSpace()) {
size > Page::kMaxNonCodeHeapObjectSize) {
// Too big for old data space.
space = LO_SPACE;
}
AllocationSpace retry_space =
(size <= MaxObjectSizeInPagedSpace()) ? OLD_DATA_SPACE : LO_SPACE;
(size <= Page::kMaxNonCodeHeapObjectSize) ? OLD_DATA_SPACE : LO_SPACE;
return AllocateRaw(size, space, retry_space);
}
@ -4763,7 +4775,7 @@ STRUCT_LIST(MAKE_CASE)
}
int size = map->instance_size();
AllocationSpace space =
(size > MaxObjectSizeInPagedSpace()) ? LO_SPACE : OLD_POINTER_SPACE;
(size > Page::kMaxNonCodeHeapObjectSize) ? LO_SPACE : OLD_POINTER_SPACE;
Object* result;
{ MaybeObject* maybe_result = Allocate(map, space);
if (!maybe_result->ToObject(&result)) return maybe_result;
@ -5210,7 +5222,7 @@ void Heap::ZapFromSpace() {
new_space_.FromSpaceEnd());
while (it.has_next()) {
NewSpacePage* page = it.next();
for (Address cursor = page->body(), limit = page->body_limit();
for (Address cursor = page->area_start(), limit = page->area_end();
cursor < limit;
cursor += kPointerSize) {
Memory::Address_at(cursor) = kFromSpaceZapValue;
@ -5349,9 +5361,9 @@ void Heap::OldPointerSpaceCheckStoreBuffer() {
while (pages.has_next()) {
Page* page = pages.next();
Object** current = reinterpret_cast<Object**>(page->ObjectAreaStart());
Object** current = reinterpret_cast<Object**>(page->area_start());
Address end = page->ObjectAreaEnd();
Address end = page->area_end();
Object*** store_buffer_position = store_buffer()->Start();
Object*** store_buffer_top = store_buffer()->Top();
@ -5377,9 +5389,9 @@ void Heap::MapSpaceCheckStoreBuffer() {
while (pages.has_next()) {
Page* page = pages.next();
Object** current = reinterpret_cast<Object**>(page->ObjectAreaStart());
Object** current = reinterpret_cast<Object**>(page->area_start());
Address end = page->ObjectAreaEnd();
Address end = page->area_end();
Object*** store_buffer_position = store_buffer()->Start();
Object*** store_buffer_top = store_buffer()->Top();

9
deps/v8/src/heap.h

@ -177,6 +177,7 @@ namespace internal {
V(eval_symbol, "eval") \
V(function_symbol, "function") \
V(length_symbol, "length") \
V(module_symbol, "module") \
V(name_symbol, "name") \
V(native_symbol, "native") \
V(null_symbol, "null") \
@ -345,7 +346,7 @@ class PromotionQueue {
NewSpacePage::FromAddress(reinterpret_cast<Address>(front_));
ASSERT(!front_page->prev_page()->is_anchor());
front_ =
reinterpret_cast<intptr_t*>(front_page->prev_page()->body_limit());
reinterpret_cast<intptr_t*>(front_page->prev_page()->area_end());
}
*target = reinterpret_cast<HeapObject*>(*(--front_));
*size = static_cast<int>(*(--front_));
@ -484,9 +485,6 @@ class Heap {
// all available bytes. Check MaxHeapObjectSize() instead.
intptr_t Available();
// Returns the maximum object size in paged space.
inline int MaxObjectSizeInPagedSpace();
// Returns of size of all objects residing in the heap.
intptr_t SizeOfObjects();
@ -644,6 +642,9 @@ class Heap {
// Allocates an empty TypeFeedbackInfo.
MUST_USE_RESULT MaybeObject* AllocateTypeFeedbackInfo();
// Allocates an AliasedArgumentsEntry.
MUST_USE_RESULT MaybeObject* AllocateAliasedArgumentsEntry(int slot);
// Clear the Instanceof cache (used when a prototype changes).
inline void ClearInstanceofCache();

23
deps/v8/src/hydrogen-instructions.cc

@ -276,6 +276,15 @@ bool HValue::IsDefinedAfter(HBasicBlock* other) const {
}
HUseListNode* HUseListNode::tail() {
// Skip and remove dead items in the use list.
while (tail_ != NULL && tail_->value()->CheckFlag(HValue::kIsDead)) {
tail_ = tail_->tail_;
}
return tail_;
}
HUseIterator::HUseIterator(HUseListNode* head) : next_(head) {
Advance();
}
@ -374,7 +383,7 @@ void HValue::DeleteAndReplaceWith(HValue* other) {
// We replace all uses first, so Delete can assert that there are none.
if (other != NULL) ReplaceAllUsesWith(other);
ASSERT(HasNoUses());
ClearOperands();
Kill();
DeleteFromGraph();
}
@ -392,9 +401,17 @@ void HValue::ReplaceAllUsesWith(HValue* other) {
}
void HValue::ClearOperands() {
void HValue::Kill() {
// Instead of going through the entire use list of each operand, we only
// check the first item in each use list and rely on the tail() method to
// skip dead items, removing them lazily next time we traverse the list.
SetFlag(kIsDead);
for (int i = 0; i < OperandCount(); ++i) {
SetOperandAt(i, NULL);
HValue* operand = OperandAt(i);
HUseListNode* first = operand->use_list_;
if (first != NULL && first->value() == this && first->index() == i) {
operand->use_list_ = first->tail();
}
}
}

16
deps/v8/src/hydrogen-instructions.h

@ -448,7 +448,7 @@ class HUseListNode: public ZoneObject {
: tail_(tail), value_(value), index_(index) {
}
HUseListNode* tail() const { return tail_; }
HUseListNode* tail();
HValue* value() const { return value_; }
int index() const { return index_; }
@ -530,7 +530,8 @@ class HValue: public ZoneObject {
kDeoptimizeOnUndefined,
kIsArguments,
kTruncatingToInt32,
kLastFlag = kTruncatingToInt32
kIsDead,
kLastFlag = kIsDead
};
STATIC_ASSERT(kLastFlag < kBitsPerInt);
@ -630,7 +631,9 @@ class HValue: public ZoneObject {
return use_list_ != NULL && use_list_->tail() != NULL;
}
int UseCount() const;
void ClearOperands();
// Mark this HValue as dead and to be removed from other HValues' use lists.
void Kill();
int flags() const { return flags_; }
void SetFlag(Flag f) { flags_ |= (1 << f); }
@ -2454,7 +2457,12 @@ class HConstant: public HTemplateInstruction<0> {
virtual intptr_t Hashcode() {
ASSERT(!HEAP->allow_allocation(false));
return reinterpret_cast<intptr_t>(*handle());
intptr_t hash = reinterpret_cast<intptr_t>(*handle());
// Prevent smis from having fewer hash values when truncated to
// the least significant bits.
const int kShiftSize = kSmiShiftSize + kSmiTagSize;
STATIC_ASSERT(kShiftSize != 0);
return hash ^ (hash >> kShiftSize);
}
#ifdef DEBUG

6
deps/v8/src/hydrogen.cc

@ -97,7 +97,7 @@ void HBasicBlock::RemovePhi(HPhi* phi) {
ASSERT(phi->block() == this);
ASSERT(phis_.Contains(phi));
ASSERT(phi->HasNoUses() || !phi->is_live());
phi->ClearOperands();
phi->Kill();
phis_.RemoveElement(phi);
phi->SetBlock(NULL);
}
@ -3242,6 +3242,10 @@ void HGraphBuilder::VisitForInStatement(ForInStatement* stmt) {
ASSERT(current_block() != NULL);
ASSERT(current_block()->HasPredecessor());
if (!FLAG_optimize_for_in) {
return Bailout("ForInStatement optimization is disabled");
}
if (!stmt->each()->IsVariableProxy() ||
!stmt->each()->AsVariableProxy()->var()->IsStackLocal()) {
return Bailout("ForInStatement with non-local each variable");

62
deps/v8/src/ia32/full-codegen-ia32.cc

@ -100,6 +100,11 @@ class JumpPatchSite BASE_EMBEDDED {
};
int FullCodeGenerator::self_optimization_header_size() {
return 13;
}
// Generate code for a JS function. On entry to the function the receiver
// and arguments have been pushed on the stack left to right, with the
// return address on top of them. The actual argument count matches the
@ -122,13 +127,6 @@ void FullCodeGenerator::Generate() {
SetFunctionPosition(function());
Comment cmnt(masm_, "[ function compiled by full code generator");
#ifdef DEBUG
if (strlen(FLAG_stop_at) > 0 &&
info->function()->name()->IsEqualTo(CStrVector(FLAG_stop_at))) {
__ int3();
}
#endif
// We can optionally optimize based on counters rather than statistical
// sampling.
if (info->ShouldSelfOptimize()) {
@ -136,6 +134,7 @@ void FullCodeGenerator::Generate() {
PrintF("[adding self-optimization header to %s]\n",
*info->function()->debug_name()->ToCString());
}
has_self_optimization_header_ = true;
MaybeObject* maybe_cell = isolate()->heap()->AllocateJSGlobalPropertyCell(
Smi::FromInt(Compiler::kCallsUntilPrimitiveOpt));
JSGlobalPropertyCell* cell;
@ -146,9 +145,17 @@ void FullCodeGenerator::Generate() {
isolate()->builtins()->builtin(Builtins::kLazyRecompile));
STATIC_ASSERT(kSmiTag == 0);
__ j(zero, compile_stub);
ASSERT(masm_->pc_offset() == self_optimization_header_size());
}
}
#ifdef DEBUG
if (strlen(FLAG_stop_at) > 0 &&
info->function()->name()->IsEqualTo(CStrVector(FLAG_stop_at))) {
__ int3();
}
#endif
// Strict mode functions and builtins need to replace the receiver
// with undefined when called as functions (without an explicit
// receiver object). ecx is zero for method calls and non-zero for
@ -335,7 +342,15 @@ void FullCodeGenerator::EmitStackCheck(IterationStatement* stmt,
int distance = masm_->SizeOfCodeGeneratedSince(back_edge_target);
weight = Min(127, Max(1, distance / 100));
}
__ sub(Operand::Cell(profiling_counter_), Immediate(Smi::FromInt(weight)));
if (Serializer::enabled()) {
__ mov(ebx, Immediate(profiling_counter_));
__ sub(FieldOperand(ebx, JSGlobalPropertyCell::kValueOffset),
Immediate(Smi::FromInt(weight)));
} else {
// This version is slightly faster, but not snapshot safe.
__ sub(Operand::Cell(profiling_counter_),
Immediate(Smi::FromInt(weight)));
}
__ j(positive, &ok, Label::kNear);
InterruptStub stub;
__ CallStub(&stub);
@ -365,8 +380,14 @@ void FullCodeGenerator::EmitStackCheck(IterationStatement* stmt,
if (FLAG_count_based_interrupts) {
// Reset the countdown.
__ mov(Operand::Cell(profiling_counter_),
Immediate(Smi::FromInt(FLAG_interrupt_budget)));
if (Serializer::enabled()) {
__ mov(ebx, Immediate(profiling_counter_));
__ mov(FieldOperand(ebx, JSGlobalPropertyCell::kValueOffset),
Immediate(Smi::FromInt(FLAG_interrupt_budget)));
} else {
__ mov(Operand::Cell(profiling_counter_),
Immediate(Smi::FromInt(FLAG_interrupt_budget)));
}
}
__ bind(&ok);
@ -396,8 +417,15 @@ void FullCodeGenerator::EmitReturnSequence() {
int distance = masm_->pc_offset();
weight = Min(127, Max(1, distance / 100));
}
__ sub(Operand::Cell(profiling_counter_),
Immediate(Smi::FromInt(weight)));
if (Serializer::enabled()) {
__ mov(ebx, Immediate(profiling_counter_));
__ sub(FieldOperand(ebx, JSGlobalPropertyCell::kValueOffset),
Immediate(Smi::FromInt(weight)));
} else {
// This version is slightly faster, but not snapshot safe.
__ sub(Operand::Cell(profiling_counter_),
Immediate(Smi::FromInt(weight)));
}
Label ok;
__ j(positive, &ok, Label::kNear);
__ push(eax);
@ -405,8 +433,14 @@ void FullCodeGenerator::EmitReturnSequence() {
__ CallStub(&stub);
__ pop(eax);
// Reset the countdown.
__ mov(Operand::Cell(profiling_counter_),
Immediate(Smi::FromInt(FLAG_interrupt_budget)));
if (Serializer::enabled()) {
__ mov(ebx, Immediate(profiling_counter_));
__ mov(FieldOperand(ebx, JSGlobalPropertyCell::kValueOffset),
Immediate(Smi::FromInt(FLAG_interrupt_budget)));
} else {
__ mov(Operand::Cell(profiling_counter_),
Immediate(Smi::FromInt(FLAG_interrupt_budget)));
}
__ bind(&ok);
}
#ifdef DEBUG

3
deps/v8/src/ia32/lithium-ia32.cc

@ -1935,13 +1935,14 @@ LInstruction* LChunkBuilder::DoLoadNamedField(HLoadNamedField* instr) {
LInstruction* LChunkBuilder::DoLoadNamedFieldPolymorphic(
HLoadNamedFieldPolymorphic* instr) {
ASSERT(instr->representation().IsTagged());
LOperand* context = UseFixed(instr->context(), esi);
if (instr->need_generic()) {
LOperand* context = UseFixed(instr->context(), esi);
LOperand* obj = UseFixed(instr->object(), eax);
LLoadNamedFieldPolymorphic* result =
new(zone()) LLoadNamedFieldPolymorphic(context, obj);
return MarkAsCall(DefineFixed(result, eax), instr);
} else {
LOperand* context = UseAny(instr->context()); // Not actually used.
LOperand* obj = UseRegisterAtStart(instr->object());
LLoadNamedFieldPolymorphic* result =
new(zone()) LLoadNamedFieldPolymorphic(context, obj);

15
deps/v8/src/ic.cc

@ -315,10 +315,13 @@ void IC::PostPatching(Address address, Code* target, Code* old_target) {
if (delta != 0) {
Code* host = target->GetHeap()->isolate()->
inner_pointer_to_code_cache()->GetCacheEntry(address)->code;
TypeFeedbackInfo* info =
TypeFeedbackInfo::cast(host->type_feedback_info());
info->set_ic_with_typeinfo_count(
info->ic_with_typeinfo_count() + delta);
// Not all Code objects have TypeFeedbackInfo.
if (host->type_feedback_info()->IsTypeFeedbackInfo()) {
TypeFeedbackInfo* info =
TypeFeedbackInfo::cast(host->type_feedback_info());
info->set_ic_with_typeinfo_count(
info->ic_with_typeinfo_count() + delta);
}
}
}
}
@ -1329,7 +1332,7 @@ MaybeObject* StoreIC::Store(State state,
uint32_t index;
if (name->AsArrayIndex(&index)) {
Handle<Object> result =
JSObject::SetElement(receiver, index, value, strict_mode);
JSObject::SetElement(receiver, index, value, NONE, strict_mode);
RETURN_IF_EMPTY_HANDLE(isolate(), result);
return *value;
}
@ -1786,7 +1789,7 @@ MaybeObject* KeyedStoreIC::Store(State state,
uint32_t index;
if (name->AsArrayIndex(&index)) {
Handle<Object> result =
JSObject::SetElement(receiver, index, value, strict_mode);
JSObject::SetElement(receiver, index, value, NONE, strict_mode);
RETURN_IF_EMPTY_HANDLE(isolate(), result);
return *value;
}

19
deps/v8/src/isolate.h

@ -38,6 +38,7 @@
#include "frames.h"
#include "global-handles.h"
#include "handles.h"
#include "hashmap.h"
#include "heap.h"
#include "regexp-stack.h"
#include "runtime-profiler.h"
@ -280,23 +281,6 @@ class ThreadLocalTop BASE_EMBEDDED {
Address try_catch_handler_address_;
};
#if defined(V8_TARGET_ARCH_ARM) || defined(V8_TARGET_ARCH_MIPS)
#define ISOLATE_PLATFORM_INIT_LIST(V) \
/* VirtualFrame::SpilledScope state */ \
V(bool, is_virtual_frame_in_spilled_scope, false) \
/* CodeGenerator::EmitNamedStore state */ \
V(int, inlined_write_barrier_size, -1)
#if !defined(__arm__) && !defined(__mips__)
class HashMap;
#endif
#else
#define ISOLATE_PLATFORM_INIT_LIST(V)
#endif
#ifdef ENABLE_DEBUGGER_SUPPORT
@ -367,7 +351,6 @@ typedef List<HeapObject*, PreallocatedStorage> DebugObjectCache;
V(uint64_t, enabled_cpu_features, 0) \
V(CpuProfiler*, cpu_profiler, NULL) \
V(HeapProfiler*, heap_profiler, NULL) \
ISOLATE_PLATFORM_INIT_LIST(V) \
ISOLATE_DEBUGGER_INIT_LIST(V)
class Isolate {

4
deps/v8/src/liveedit.cc

@ -53,8 +53,8 @@ void SetElementNonStrict(Handle<JSObject> object,
// Ignore return value from SetElement. It can only be a failure if there
// are element setters causing exceptions and the debugger context has none
// of these.
Handle<Object> no_failure;
no_failure = JSObject::SetElement(object, index, value, kNonStrictMode);
Handle<Object> no_failure =
JSObject::SetElement(object, index, value, NONE, kNonStrictMode);
ASSERT(!no_failure.is_null());
USE(no_failure);
}

3
deps/v8/src/log.h

@ -1,4 +1,4 @@
// Copyright 2011 the V8 project authors. All rights reserved.
// Copyright 2012 the V8 project authors. All rights reserved.
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
@ -71,7 +71,6 @@ namespace internal {
// tick profiler requires code events, so --prof implies --log-code.
// Forward declarations.
class HashMap;
class LogMessageBuilder;
class Profiler;
class Semaphore;

110
deps/v8/src/mark-compact.cc

@ -107,14 +107,14 @@ static void VerifyMarking(NewSpace* space) {
Address end = space->top();
NewSpacePageIterator it(space->bottom(), end);
// The bottom position is at the start of its page. Allows us to use
// page->body() as start of range on all pages.
// page->area_start() as start of range on all pages.
ASSERT_EQ(space->bottom(),
NewSpacePage::FromAddress(space->bottom())->body());
NewSpacePage::FromAddress(space->bottom())->area_start());
while (it.has_next()) {
NewSpacePage* page = it.next();
Address limit = it.has_next() ? page->body_limit() : end;
Address limit = it.has_next() ? page->area_end() : end;
ASSERT(limit == end || !page->Contains(end));
VerifyMarking(page->body(), limit);
VerifyMarking(page->area_start(), limit);
}
}
@ -124,7 +124,7 @@ static void VerifyMarking(PagedSpace* space) {
while (it.has_next()) {
Page* p = it.next();
VerifyMarking(p->ObjectAreaStart(), p->ObjectAreaEnd());
VerifyMarking(p->area_start(), p->area_end());
}
}
@ -187,8 +187,8 @@ static void VerifyEvacuation(NewSpace* space) {
while (it.has_next()) {
NewSpacePage* page = it.next();
Address current = page->body();
Address limit = it.has_next() ? page->body_limit() : space->top();
Address current = page->area_start();
Address limit = it.has_next() ? page->area_end() : space->top();
ASSERT(limit == space->top() || !page->Contains(space->top()));
while (current < limit) {
HeapObject* object = HeapObject::FromAddress(current);
@ -205,7 +205,7 @@ static void VerifyEvacuation(PagedSpace* space) {
while (it.has_next()) {
Page* p = it.next();
if (p->IsEvacuationCandidate()) continue;
VerifyEvacuation(p->ObjectAreaStart(), p->ObjectAreaEnd());
VerifyEvacuation(p->area_start(), p->area_end());
}
}
@ -232,7 +232,7 @@ void MarkCompactCollector::AddEvacuationCandidate(Page* p) {
static void TraceFragmentation(PagedSpace* space) {
int number_of_pages = space->CountTotalPages();
intptr_t reserved = (number_of_pages * Page::kObjectAreaSize);
intptr_t reserved = (number_of_pages * space->AreaSize());
intptr_t free = reserved - space->SizeOfObjects();
PrintF("[%s]: %d pages, %d (%.1f%%) free\n",
AllocationSpaceName(space->identity()),
@ -453,13 +453,14 @@ static int FreeListFragmentation(PagedSpace* space, Page* p) {
intptr_t ratio;
intptr_t ratio_threshold;
intptr_t area_size = space->AreaSize();
if (space->identity() == CODE_SPACE) {
ratio = (sizes.medium_size_ * 10 + sizes.large_size_ * 2) * 100 /
Page::kObjectAreaSize;
area_size;
ratio_threshold = 10;
} else {
ratio = (sizes.small_size_ * 5 + sizes.medium_size_) * 100 /
Page::kObjectAreaSize;
area_size;
ratio_threshold = 15;
}
@ -469,20 +470,20 @@ static int FreeListFragmentation(PagedSpace* space, Page* p) {
AllocationSpaceName(space->identity()),
static_cast<int>(sizes.small_size_),
static_cast<double>(sizes.small_size_ * 100) /
Page::kObjectAreaSize,
area_size,
static_cast<int>(sizes.medium_size_),
static_cast<double>(sizes.medium_size_ * 100) /
Page::kObjectAreaSize,
area_size,
static_cast<int>(sizes.large_size_),
static_cast<double>(sizes.large_size_ * 100) /
Page::kObjectAreaSize,
area_size,
static_cast<int>(sizes.huge_size_),
static_cast<double>(sizes.huge_size_ * 100) /
Page::kObjectAreaSize,
area_size,
(ratio > ratio_threshold) ? "[fragmented]" : "");
}
if (FLAG_always_compact && sizes.Total() != Page::kObjectAreaSize) {
if (FLAG_always_compact && sizes.Total() != area_size) {
return 1;
}
@ -528,11 +529,11 @@ void MarkCompactCollector::CollectEvacuationCandidates(PagedSpace* space) {
CompactionMode mode = COMPACT_FREE_LISTS;
intptr_t reserved = number_of_pages * Page::kObjectAreaSize;
intptr_t reserved = number_of_pages * space->AreaSize();
intptr_t over_reserved = reserved - space->SizeOfObjects();
static const intptr_t kFreenessThreshold = 50;
if (over_reserved >= 2 * Page::kObjectAreaSize &&
if (over_reserved >= 2 * space->AreaSize() &&
reduce_memory_footprint_) {
mode = REDUCE_MEMORY_FOOTPRINT;
@ -575,18 +576,17 @@ void MarkCompactCollector::CollectEvacuationCandidates(PagedSpace* space) {
intptr_t free_bytes = 0;
if (!p->WasSwept()) {
free_bytes = (Page::kObjectAreaSize - p->LiveBytes());
free_bytes = (p->area_size() - p->LiveBytes());
} else {
FreeList::SizeStats sizes;
space->CountFreeListItems(p, &sizes);
free_bytes = sizes.Total();
}
int free_pct = static_cast<int>(free_bytes * 100 / Page::kObjectAreaSize);
int free_pct = static_cast<int>(free_bytes * 100) / p->area_size();
if (free_pct >= kFreenessThreshold) {
estimated_release += Page::kObjectAreaSize +
(Page::kObjectAreaSize - free_bytes);
estimated_release += 2 * p->area_size() - free_bytes;
fragmentation = free_pct;
} else {
fragmentation = 0;
@ -597,7 +597,7 @@ void MarkCompactCollector::CollectEvacuationCandidates(PagedSpace* space) {
reinterpret_cast<void*>(p),
AllocationSpaceName(space->identity()),
static_cast<int>(free_bytes),
static_cast<double>(free_bytes * 100) / Page::kObjectAreaSize,
static_cast<double>(free_bytes * 100) / p->area_size(),
(fragmentation > 0) ? "[fragmented]" : "");
}
} else {
@ -1977,12 +1977,15 @@ static void DiscoverGreyObjectsOnPage(MarkingDeque* marking_deque, Page* p) {
int last_cell_index =
Bitmap::IndexToCell(
Bitmap::CellAlignIndex(
p->AddressToMarkbitIndex(p->ObjectAreaEnd())));
p->AddressToMarkbitIndex(p->area_end())));
Address cell_base = p->area_start();
int cell_index = Bitmap::IndexToCell(
Bitmap::CellAlignIndex(
p->AddressToMarkbitIndex(cell_base)));
int cell_index = Page::kFirstUsedCell;
Address cell_base = p->ObjectAreaStart();
for (cell_index = Page::kFirstUsedCell;
for (;
cell_index < last_cell_index;
cell_index++, cell_base += 32 * kPointerSize) {
ASSERT((unsigned)cell_index ==
@ -2786,7 +2789,7 @@ bool MarkCompactCollector::TryPromoteObject(HeapObject* object,
int object_size) {
Object* result;
if (object_size > heap()->MaxObjectSizeInPagedSpace()) {
if (object_size > Page::kMaxNonCodeHeapObjectSize) {
MaybeObject* maybe_result =
heap()->lo_space()->AllocateRaw(object_size, NOT_EXECUTABLE);
if (maybe_result->ToObject(&result)) {
@ -2904,13 +2907,16 @@ void MarkCompactCollector::EvacuateLiveObjectsFromPage(Page* p) {
int last_cell_index =
Bitmap::IndexToCell(
Bitmap::CellAlignIndex(
p->AddressToMarkbitIndex(p->ObjectAreaEnd())));
p->AddressToMarkbitIndex(p->area_end())));
Address cell_base = p->area_start();
int cell_index = Bitmap::IndexToCell(
Bitmap::CellAlignIndex(
p->AddressToMarkbitIndex(cell_base)));
int cell_index = Page::kFirstUsedCell;
Address cell_base = p->ObjectAreaStart();
int offsets[16];
for (cell_index = Page::kFirstUsedCell;
for (;
cell_index < last_cell_index;
cell_index++, cell_base += 32 * kPointerSize) {
ASSERT((unsigned)cell_index ==
@ -3065,12 +3071,16 @@ static void SweepPrecisely(PagedSpace* space,
int last_cell_index =
Bitmap::IndexToCell(
Bitmap::CellAlignIndex(
p->AddressToMarkbitIndex(p->ObjectAreaEnd())));
p->AddressToMarkbitIndex(p->area_end())));
Address free_start = p->area_start();
int cell_index =
Bitmap::IndexToCell(
Bitmap::CellAlignIndex(
p->AddressToMarkbitIndex(free_start)));
int cell_index = Page::kFirstUsedCell;
Address free_start = p->ObjectAreaStart();
ASSERT(reinterpret_cast<intptr_t>(free_start) % (32 * kPointerSize) == 0);
Address object_address = p->ObjectAreaStart();
Address object_address = free_start;
int offsets[16];
SkipList* skip_list = p->skip_list();
@ -3079,7 +3089,7 @@ static void SweepPrecisely(PagedSpace* space,
skip_list->Clear();
}
for (cell_index = Page::kFirstUsedCell;
for (;
cell_index < last_cell_index;
cell_index++, object_address += 32 * kPointerSize) {
ASSERT((unsigned)cell_index ==
@ -3116,8 +3126,8 @@ static void SweepPrecisely(PagedSpace* space,
// Clear marking bits for current cell.
cells[cell_index] = 0;
}
if (free_start != p->ObjectAreaEnd()) {
space->Free(free_start, static_cast<int>(p->ObjectAreaEnd() - free_start));
if (free_start != p->area_end()) {
space->Free(free_start, static_cast<int>(p->area_end() - free_start));
}
p->ResetLiveBytes();
}
@ -3412,7 +3422,7 @@ void MarkCompactCollector::EvacuateNewSpaceAndCandidates() {
Page* p = evacuation_candidates_[i];
if (!p->IsEvacuationCandidate()) continue;
PagedSpace* space = static_cast<PagedSpace*>(p->owner());
space->Free(p->ObjectAreaStart(), Page::kObjectAreaSize);
space->Free(p->area_start(), p->area_size());
p->set_scan_on_scavenge(false);
slots_buffer_allocator_.DeallocateChain(p->slots_buffer_address());
p->ClearEvacuationCandidate();
@ -3715,23 +3725,27 @@ intptr_t MarkCompactCollector::SweepConservatively(PagedSpace* space, Page* p) {
int last_cell_index =
Bitmap::IndexToCell(
Bitmap::CellAlignIndex(
p->AddressToMarkbitIndex(p->ObjectAreaEnd())));
p->AddressToMarkbitIndex(p->area_end())));
int cell_index =
Bitmap::IndexToCell(
Bitmap::CellAlignIndex(
p->AddressToMarkbitIndex(p->area_start())));
int cell_index = Page::kFirstUsedCell;
intptr_t freed_bytes = 0;
// This is the start of the 32 word block that we are currently looking at.
Address block_address = p->ObjectAreaStart();
Address block_address = p->area_start();
// Skip over all the dead objects at the start of the page and mark them free.
for (cell_index = Page::kFirstUsedCell;
for (;
cell_index < last_cell_index;
cell_index++, block_address += 32 * kPointerSize) {
if (cells[cell_index] != 0) break;
}
size_t size = block_address - p->ObjectAreaStart();
size_t size = block_address - p->area_start();
if (cell_index == last_cell_index) {
freed_bytes += static_cast<int>(space->Free(p->ObjectAreaStart(),
freed_bytes += static_cast<int>(space->Free(p->area_start(),
static_cast<int>(size)));
ASSERT_EQ(0, p->LiveBytes());
return freed_bytes;
@ -3740,8 +3754,8 @@ intptr_t MarkCompactCollector::SweepConservatively(PagedSpace* space, Page* p) {
// first live object.
Address free_end = StartOfLiveObject(block_address, cells[cell_index]);
// Free the first free space.
size = free_end - p->ObjectAreaStart();
freed_bytes += space->Free(p->ObjectAreaStart(),
size = free_end - p->area_start();
freed_bytes += space->Free(p->area_start(),
static_cast<int>(size));
// The start of the current free area is represented in undigested form by
// the address of the last 32-word section that contained a live object and

21
deps/v8/src/mips/full-codegen-mips.cc

@ -119,6 +119,11 @@ class JumpPatchSite BASE_EMBEDDED {
};
int FullCodeGenerator::self_optimization_header_size() {
return 0; // TODO(jkummerow): determine correct value.
}
// Generate code for a JS function. On entry to the function the receiver
// and arguments have been pushed on the stack left to right. The actual
// argument count matches the formal parameter count expected by the
@ -140,13 +145,6 @@ void FullCodeGenerator::Generate() {
SetFunctionPosition(function());
Comment cmnt(masm_, "[ function compiled by full code generator");
#ifdef DEBUG
if (strlen(FLAG_stop_at) > 0 &&
info->function()->name()->IsEqualTo(CStrVector(FLAG_stop_at))) {
__ stop("stop-at");
}
#endif
// We can optionally optimize based on counters rather than statistical
// sampling.
if (info->ShouldSelfOptimize()) {
@ -154,6 +152,7 @@ void FullCodeGenerator::Generate() {
PrintF("[adding self-optimization header to %s]\n",
*info->function()->debug_name()->ToCString());
}
has_self_optimization_header_ = true;
MaybeObject* maybe_cell = isolate()->heap()->AllocateJSGlobalPropertyCell(
Smi::FromInt(Compiler::kCallsUntilPrimitiveOpt));
JSGlobalPropertyCell* cell;
@ -165,9 +164,17 @@ void FullCodeGenerator::Generate() {
Handle<Code> compile_stub(
isolate()->builtins()->builtin(Builtins::kLazyRecompile));
__ Jump(compile_stub, RelocInfo::CODE_TARGET, eq, a3, Operand(zero_reg));
ASSERT(masm_->pc_offset() == self_optimization_header_size());
}
}
#ifdef DEBUG
if (strlen(FLAG_stop_at) > 0 &&
info->function()->name()->IsEqualTo(CStrVector(FLAG_stop_at))) {
__ stop("stop-at");
}
#endif
// Strict mode functions and builtins need to replace the receiver
// with undefined when called as functions (without an explicit
// receiver object). t1 is zero for method calls and non-zero for

5
deps/v8/src/objects-debug.cc

@ -333,6 +333,11 @@ void TypeFeedbackInfo::TypeFeedbackInfoVerify() {
}
void AliasedArgumentsEntry::AliasedArgumentsEntryVerify() {
VerifySmiField(kAliasedContextSlot);
}
void FixedArray::FixedArrayVerify() {
for (int i = 0; i < length(); i++) {
Object* e = get(i);

22
deps/v8/src/objects-inl.h

@ -3089,6 +3089,21 @@ void Code::set_compiled_optimizable(bool value) {
}
bool Code::has_self_optimization_header() {
ASSERT(kind() == FUNCTION);
byte flags = READ_BYTE_FIELD(this, kFullCodeFlags);
return FullCodeFlagsHasSelfOptimizationHeader::decode(flags);
}
void Code::set_self_optimization_header(bool value) {
ASSERT(kind() == FUNCTION);
byte flags = READ_BYTE_FIELD(this, kFullCodeFlags);
flags = FullCodeFlagsHasSelfOptimizationHeader::update(flags, value);
WRITE_BYTE_FIELD(this, kFullCodeFlags, flags);
}
int Code::allow_osr_at_loop_nesting_level() {
ASSERT(kind() == FUNCTION);
return READ_BYTE_FIELD(this, kAllowOSRAtLoopNestingLevelOffset);
@ -3331,7 +3346,7 @@ void Map::set_prototype(Object* value, WriteBarrierMode mode) {
DescriptorArray* Map::instance_descriptors() {
Object* object = READ_FIELD(this, kInstanceDescriptorsOrBitField3Offset);
if (object->IsSmi()) {
return HEAP->empty_descriptor_array();
return GetHeap()->empty_descriptor_array();
} else {
return DescriptorArray::cast(object);
}
@ -3645,7 +3660,7 @@ BOOL_ACCESSORS(SharedFunctionInfo,
bool SharedFunctionInfo::IsInobjectSlackTrackingInProgress() {
return initial_map() != HEAP->undefined_value();
return initial_map() != GetHeap()->undefined_value();
}
@ -4806,6 +4821,9 @@ ACCESSORS(TypeFeedbackInfo, type_feedback_cells, TypeFeedbackCells,
kTypeFeedbackCellsOffset)
SMI_ACCESSORS(AliasedArgumentsEntry, aliased_context_slot, kAliasedContextSlot)
Relocatable::Relocatable(Isolate* isolate) {
ASSERT(isolate == Isolate::Current());
isolate_ = isolate;

6
deps/v8/src/objects-printer.cc

@ -563,6 +563,12 @@ void TypeFeedbackInfo::TypeFeedbackInfoPrint(FILE* out) {
}
void AliasedArgumentsEntry::AliasedArgumentsEntryPrint(FILE* out) {
HeapObject::PrintHeader(out, "AliasedArgumentsEntry");
PrintF(out, "\n - aliased_context_slot: %d", aliased_context_slot());
}
void FixedArray::FixedArrayPrint(FILE* out) {
HeapObject::PrintHeader(out, "FixedArray");
PrintF(out, " - length: %d", length());

2
deps/v8/src/objects-visiting.h

@ -135,7 +135,7 @@ class StaticVisitorBase : public AllStatic {
(base == kVisitJSObject));
ASSERT(IsAligned(object_size, kPointerSize));
ASSERT(kMinObjectSizeInWords * kPointerSize <= object_size);
ASSERT(object_size < Page::kMaxHeapObjectSize);
ASSERT(object_size < Page::kMaxNonCodeHeapObjectSize);
const VisitorId specialization = static_cast<VisitorId>(
base + (object_size >> kPointerSizeLog2) - kMinObjectSizeInWords);

183
deps/v8/src/objects.cc

@ -4450,10 +4450,7 @@ MaybeObject* JSObject::DefineElementAccessor(uint32_t index,
}
accessors->set(is_getter, fun);
{ MaybeObject* maybe_ok = SetElementCallback(index, accessors, attributes);
if (maybe_ok->IsFailure()) return maybe_ok;
}
return GetHeap()->undefined_value();
return SetElementCallback(index, accessors, attributes);
}
@ -4471,12 +4468,14 @@ MaybeObject* JSObject::DefinePropertyAccessor(String* name,
Object* obj = result.GetCallbackObject();
// Need to preserve old getters/setters.
if (obj->IsAccessorPair()) {
AccessorPair::cast(obj)->set(is_getter, fun);
// Use set to update attributes.
{ MaybeObject* maybe_ok = SetPropertyCallback(name, obj, attributes);
if (maybe_ok->IsFailure()) return maybe_ok;
AccessorPair* copy;
{ MaybeObject* maybe_copy =
AccessorPair::cast(obj)->CopyWithoutTransitions();
if (!maybe_copy->To(&copy)) return maybe_copy;
}
return GetHeap()->undefined_value();
copy->set(is_getter, fun);
// Use set to update attributes.
return SetPropertyCallback(name, copy, attributes);
}
}
}
@ -4487,10 +4486,7 @@ MaybeObject* JSObject::DefinePropertyAccessor(String* name,
}
accessors->set(is_getter, fun);
{ MaybeObject* maybe_ok = SetPropertyCallback(name, accessors, attributes);
if (maybe_ok->IsFailure()) return maybe_ok;
}
return GetHeap()->undefined_value();
return SetPropertyCallback(name, accessors, attributes);
}
@ -9248,8 +9244,10 @@ bool JSObject::HasElementWithReceiver(JSReceiver* receiver, uint32_t index) {
MaybeObject* JSObject::SetElementWithInterceptor(uint32_t index,
Object* value,
PropertyAttributes attributes,
StrictModeFlag strict_mode,
bool check_prototype) {
bool check_prototype,
SetPropertyMode set_mode) {
Isolate* isolate = GetIsolate();
// Make sure that the top context does not change when doing
// callbacks or interceptor calls.
@ -9277,8 +9275,10 @@ MaybeObject* JSObject::SetElementWithInterceptor(uint32_t index,
MaybeObject* raw_result =
this_handle->SetElementWithoutInterceptor(index,
*value_handle,
attributes,
strict_mode,
check_prototype);
check_prototype,
set_mode);
RETURN_IF_SCHEDULED_EXCEPTION(isolate);
return raw_result;
}
@ -9476,7 +9476,8 @@ MaybeObject* JSObject::SetFastElement(uint32_t index,
if (convert_to_slow) {
MaybeObject* result = NormalizeElements();
if (result->IsFailure()) return result;
return SetDictionaryElement(index, value, strict_mode, check_prototype);
return SetDictionaryElement(index, value, NONE, strict_mode,
check_prototype);
}
}
// Convert to fast double elements if appropriate.
@ -9526,8 +9527,10 @@ MaybeObject* JSObject::SetFastElement(uint32_t index,
MaybeObject* JSObject::SetDictionaryElement(uint32_t index,
Object* value,
PropertyAttributes attributes,
StrictModeFlag strict_mode,
bool check_prototype) {
bool check_prototype,
SetPropertyMode set_mode) {
ASSERT(HasDictionaryElements() || HasDictionaryArgumentsElements());
Isolate* isolate = GetIsolate();
Heap* heap = isolate->heap();
@ -9547,24 +9550,40 @@ MaybeObject* JSObject::SetDictionaryElement(uint32_t index,
if (entry != SeededNumberDictionary::kNotFound) {
Object* element = dictionary->ValueAt(entry);
PropertyDetails details = dictionary->DetailsAt(entry);
if (details.type() == CALLBACKS) {
if (details.type() == CALLBACKS && set_mode == SET_PROPERTY) {
return SetElementWithCallback(element, index, value, this, strict_mode);
} else {
dictionary->UpdateMaxNumberKey(index);
// If a value has not been initialized we allow writing to it even if it
// is read-only (a declared const that has not been initialized).
if (!dictionary->DetailsAt(entry).IsReadOnly() ||
dictionary->ValueAt(entry)->IsTheHole()) {
dictionary->ValueAtPut(entry, value);
} else if (strict_mode == kStrictMode) {
Handle<Object> holder(this);
Handle<Object> number = isolate->factory()->NewNumberFromUint(index);
Handle<Object> args[2] = { number, holder };
Handle<Object> error =
isolate->factory()->NewTypeError("strict_read_only_property",
HandleVector(args, 2));
return isolate->Throw(*error);
// is read-only (a declared const that has not been initialized). If a
// value is being defined we skip attribute checks completely.
if (set_mode == DEFINE_PROPERTY) {
details = PropertyDetails(attributes, NORMAL, details.index());
dictionary->DetailsAtPut(entry, details);
} else if (details.IsReadOnly() && !element->IsTheHole()) {
if (strict_mode == kNonStrictMode) {
return isolate->heap()->undefined_value();
} else {
Handle<Object> holder(this);
Handle<Object> number = isolate->factory()->NewNumberFromUint(index);
Handle<Object> args[2] = { number, holder };
Handle<Object> error =
isolate->factory()->NewTypeError("strict_read_only_property",
HandleVector(args, 2));
return isolate->Throw(*error);
}
}
// Elements of the arguments object in slow mode might be slow aliases.
if (is_arguments && element->IsAliasedArgumentsEntry()) {
AliasedArgumentsEntry* entry = AliasedArgumentsEntry::cast(element);
Context* context = Context::cast(elements->get(0));
int context_index = entry->aliased_context_slot();
ASSERT(!context->get(context_index)->IsTheHole());
context->set(context_index, value);
// For elements that are still writable we keep slow aliasing.
if (!details.IsReadOnly()) value = element;
}
dictionary->ValueAtPut(entry, value);
}
} else {
// Index not already used. Look for an accessor in the prototype chain.
@ -9591,7 +9610,8 @@ MaybeObject* JSObject::SetDictionaryElement(uint32_t index,
}
}
FixedArrayBase* new_dictionary;
MaybeObject* maybe = dictionary->AtNumberPut(index, value);
PropertyDetails details = PropertyDetails(attributes, NORMAL);
MaybeObject* maybe = dictionary->AddNumberEntry(index, value, details);
if (!maybe->To<FixedArrayBase>(&new_dictionary)) return maybe;
if (dictionary != SeededNumberDictionary::cast(new_dictionary)) {
if (is_arguments) {
@ -9732,18 +9752,22 @@ MUST_USE_RESULT MaybeObject* JSObject::SetFastDoubleElement(
if (!maybe_obj->ToObject(&obj)) return maybe_obj;
}
ASSERT(HasDictionaryElements());
return SetElement(index, value, strict_mode, check_prototype);
return SetElement(index, value, NONE, strict_mode, check_prototype);
}
MaybeObject* JSReceiver::SetElement(uint32_t index,
Object* value,
PropertyAttributes attributes,
StrictModeFlag strict_mode,
bool check_proto) {
return IsJSProxy()
? JSProxy::cast(this)->SetElementWithHandler(index, value, strict_mode)
: JSObject::cast(this)->SetElement(index, value, strict_mode, check_proto)
;
if (IsJSProxy()) {
return JSProxy::cast(this)->SetElementWithHandler(
index, value, strict_mode);
} else {
return JSObject::cast(this)->SetElement(
index, value, attributes, strict_mode, check_proto);
}
}
@ -9752,16 +9776,19 @@ Handle<Object> JSObject::SetOwnElement(Handle<JSObject> object,
Handle<Object> value,
StrictModeFlag strict_mode) {
ASSERT(!object->HasExternalArrayElements());
CALL_HEAP_FUNCTION(object->GetIsolate(),
object->SetElement(index, *value, strict_mode, false),
Object);
CALL_HEAP_FUNCTION(
object->GetIsolate(),
object->SetElement(index, *value, NONE, strict_mode, false),
Object);
}
Handle<Object> JSObject::SetElement(Handle<JSObject> object,
uint32_t index,
Handle<Object> value,
StrictModeFlag strict_mode) {
PropertyAttributes attr,
StrictModeFlag strict_mode,
SetPropertyMode set_mode) {
if (object->HasExternalArrayElements()) {
if (!value->IsSmi() && !value->IsHeapNumber() && !value->IsUndefined()) {
bool has_exception;
@ -9770,16 +9797,19 @@ Handle<Object> JSObject::SetElement(Handle<JSObject> object,
value = number;
}
}
CALL_HEAP_FUNCTION(object->GetIsolate(),
object->SetElement(index, *value, strict_mode, true),
Object);
CALL_HEAP_FUNCTION(
object->GetIsolate(),
object->SetElement(index, *value, attr, strict_mode, true, set_mode),
Object);
}
MaybeObject* JSObject::SetElement(uint32_t index,
Object* value,
PropertyAttributes attributes,
StrictModeFlag strict_mode,
bool check_prototype) {
bool check_prototype,
SetPropertyMode set_mode) {
// Check access rights if needed.
if (IsAccessCheckNeeded()) {
Heap* heap = GetHeap();
@ -9797,29 +9827,59 @@ MaybeObject* JSObject::SetElement(uint32_t index,
ASSERT(proto->IsJSGlobalObject());
return JSObject::cast(proto)->SetElement(index,
value,
attributes,
strict_mode,
check_prototype);
check_prototype,
set_mode);
}
// Don't allow element properties to be redefined for external arrays.
if (HasExternalArrayElements() && set_mode == DEFINE_PROPERTY) {
Isolate* isolate = GetHeap()->isolate();
Handle<Object> number = isolate->factory()->NewNumberFromUint(index);
Handle<Object> args[] = { Handle<Object>(this), number };
Handle<Object> error = isolate->factory()->NewTypeError(
"redef_external_array_element", HandleVector(args, ARRAY_SIZE(args)));
return isolate->Throw(*error);
}
// Normalize the elements to enable attributes on the property.
if ((attributes & (DONT_DELETE | DONT_ENUM | READ_ONLY)) != 0) {
SeededNumberDictionary* dictionary;
MaybeObject* maybe_object = NormalizeElements();
if (!maybe_object->To(&dictionary)) return maybe_object;
// Make sure that we never go back to fast case.
dictionary->set_requires_slow_elements();
}
// Check for lookup interceptor
if (HasIndexedInterceptor()) {
return SetElementWithInterceptor(index,
value,
attributes,
strict_mode,
check_prototype);
check_prototype,
set_mode);
}
return SetElementWithoutInterceptor(index,
value,
attributes,
strict_mode,
check_prototype);
check_prototype,
set_mode);
}
MaybeObject* JSObject::SetElementWithoutInterceptor(uint32_t index,
Object* value,
PropertyAttributes attr,
StrictModeFlag strict_mode,
bool check_prototype) {
bool check_prototype,
SetPropertyMode set_mode) {
ASSERT(HasDictionaryElements() ||
HasDictionaryArgumentsElements() ||
(attr & (DONT_DELETE | DONT_ENUM | READ_ONLY)) == 0);
Isolate* isolate = GetIsolate();
switch (GetElementsKind()) {
case FAST_SMI_ONLY_ELEMENTS:
@ -9867,7 +9927,8 @@ MaybeObject* JSObject::SetElementWithoutInterceptor(uint32_t index,
return array->SetValue(index, value);
}
case DICTIONARY_ELEMENTS:
return SetDictionaryElement(index, value, strict_mode, check_prototype);
return SetDictionaryElement(index, value, attr, strict_mode,
check_prototype, set_mode);
case NON_STRICT_ARGUMENTS_ELEMENTS: {
FixedArray* parameter_map = FixedArray::cast(elements());
uint32_t length = parameter_map->length();
@ -9878,17 +9939,23 @@ MaybeObject* JSObject::SetElementWithoutInterceptor(uint32_t index,
int context_index = Smi::cast(probe)->value();
ASSERT(!context->get(context_index)->IsTheHole());
context->set(context_index, value);
return value;
} else {
// Object is not mapped, defer to the arguments.
FixedArray* arguments = FixedArray::cast(parameter_map->get(1));
if (arguments->IsDictionary()) {
return SetDictionaryElement(index, value, strict_mode,
check_prototype);
} else {
return SetFastElement(index, value, strict_mode, check_prototype);
// Redefining attributes of an aliased element destroys fast aliasing.
if (set_mode == SET_PROPERTY || attr == NONE) return value;
parameter_map->set_the_hole(index + 2);
// For elements that are still writable we re-establish slow aliasing.
if ((attr & READ_ONLY) == 0) {
MaybeObject* maybe_entry =
isolate->heap()->AllocateAliasedArgumentsEntry(context_index);
if (!maybe_entry->ToObject(&value)) return maybe_entry;
}
}
FixedArray* arguments = FixedArray::cast(parameter_map->get(1));
if (arguments->IsDictionary()) {
return SetDictionaryElement(index, value, attr, strict_mode,
check_prototype, set_mode);
} else {
return SetFastElement(index, value, strict_mode, check_prototype);
}
}
}
// All possible cases have been handled above. Add a return to avoid the

96
deps/v8/src/objects.h

@ -440,7 +440,8 @@ const int kVariableSizeSentinel = 0;
V(SCRIPT, Script, script) \
V(CODE_CACHE, CodeCache, code_cache) \
V(POLYMORPHIC_CODE_CACHE, PolymorphicCodeCache, polymorphic_code_cache) \
V(TYPE_FEEDBACK_INFO, TypeFeedbackInfo, type_feedback_info)
V(TYPE_FEEDBACK_INFO, TypeFeedbackInfo, type_feedback_info) \
V(ALIASED_ARGUMENTS_ENTRY, AliasedArgumentsEntry, aliased_arguments_entry)
#ifdef ENABLE_DEBUGGER_SUPPORT
#define STRUCT_LIST_DEBUGGER(V) \
@ -596,6 +597,7 @@ enum InstanceType {
CODE_CACHE_TYPE,
POLYMORPHIC_CODE_CACHE_TYPE,
TYPE_FEEDBACK_INFO_TYPE,
ALIASED_ARGUMENTS_ENTRY_TYPE,
// The following two instance types are only used when ENABLE_DEBUGGER_SUPPORT
// is defined. However as include/v8.h contain some of the instance type
// constants always having them avoids them getting different numbers
@ -1348,6 +1350,16 @@ enum EnsureElementsMode {
};
// Indicates whether a property should be set or (re)defined. Setting of a
// property causes attributes to remain unchanged, writability to be checked
// and callbacks to be called. Defining of a property causes attributes to
// be updated and callbacks to be overridden.
enum SetPropertyMode {
SET_PROPERTY,
DEFINE_PROPERTY
};
// JSReceiver includes types on which properties can be defined, i.e.,
// JSObject and JSProxy.
class JSReceiver: public HeapObject {
@ -1386,6 +1398,7 @@ class JSReceiver: public HeapObject {
// Can cause GC, or return failure if GC is required.
MUST_USE_RESULT MaybeObject* SetElement(uint32_t index,
Object* value,
PropertyAttributes attributes,
StrictModeFlag strict_mode,
bool check_prototype);
@ -1739,10 +1752,13 @@ class JSObject: public JSReceiver {
StrictModeFlag strict_mode,
bool check_prototype);
MUST_USE_RESULT MaybeObject* SetDictionaryElement(uint32_t index,
Object* value,
StrictModeFlag strict_mode,
bool check_prototype);
MUST_USE_RESULT MaybeObject* SetDictionaryElement(
uint32_t index,
Object* value,
PropertyAttributes attributes,
StrictModeFlag strict_mode,
bool check_prototype,
SetPropertyMode set_mode = SET_PROPERTY);
MUST_USE_RESULT MaybeObject* SetFastDoubleElement(
uint32_t index,
@ -1750,23 +1766,28 @@ class JSObject: public JSReceiver {
StrictModeFlag strict_mode,
bool check_prototype = true);
static Handle<Object> SetOwnElement(Handle<JSObject> object,
uint32_t index,
Handle<Object> value,
StrictModeFlag strict_mode);
// Empty handle is returned if the element cannot be set to the given value.
static MUST_USE_RESULT Handle<Object> SetElement(Handle<JSObject> object,
uint32_t index,
Handle<Object> value,
StrictModeFlag strict_mode);
static MUST_USE_RESULT Handle<Object> SetElement(
Handle<JSObject> object,
uint32_t index,
Handle<Object> value,
PropertyAttributes attr,
StrictModeFlag strict_mode,
SetPropertyMode set_mode = SET_PROPERTY);
// A Failure object is returned if GC is needed.
MUST_USE_RESULT MaybeObject* SetElement(uint32_t index,
Object* value,
StrictModeFlag strict_mode,
bool check_prototype);
MUST_USE_RESULT MaybeObject* SetElement(
uint32_t index,
Object* value,
PropertyAttributes attributes,
StrictModeFlag strict_mode,
bool check_prototype = true,
SetPropertyMode set_mode = SET_PROPERTY);
// Returns the index'th element.
// The undefined object if index is out of bounds.
@ -2087,13 +2108,17 @@ class JSObject: public JSReceiver {
MUST_USE_RESULT MaybeObject* SetElementWithInterceptor(
uint32_t index,
Object* value,
PropertyAttributes attributes,
StrictModeFlag strict_mode,
bool check_prototype);
bool check_prototype,
SetPropertyMode set_mode);
MUST_USE_RESULT MaybeObject* SetElementWithoutInterceptor(
uint32_t index,
Object* value,
PropertyAttributes attributes,
StrictModeFlag strict_mode,
bool check_prototype);
bool check_prototype,
SetPropertyMode set_mode);
// Searches the prototype chain for a callback setter and sets the property
// with the setter if it finds one. The '*found' flag indicates whether
@ -4182,6 +4207,11 @@ class Code: public HeapObject {
inline bool is_compiled_optimizable();
inline void set_compiled_optimizable(bool value);
// [has_self_optimization_header]: For FUNCTION kind, tells if it has
// a self-optimization header.
inline bool has_self_optimization_header();
inline void set_self_optimization_header(bool value);
// [allow_osr_at_loop_nesting_level]: For FUNCTION kind, tells for
// how long the function has been marked for OSR and therefore which
// level of loop nesting we are willing to do on-stack replacement
@ -4401,6 +4431,7 @@ class Code: public HeapObject {
public BitField<bool, 0, 1> {}; // NOLINT
class FullCodeFlagsHasDebugBreakSlotsField: public BitField<bool, 1, 1> {};
class FullCodeFlagsIsCompiledOptimizable: public BitField<bool, 2, 1> {};
class FullCodeFlagsHasSelfOptimizationHeader: public BitField<bool, 3, 1> {};
static const int kBinaryOpReturnTypeOffset = kBinaryOpTypeOffset + 1;
@ -6412,6 +6443,39 @@ class TypeFeedbackInfo: public Struct {
};
// Representation of a slow alias as part of a non-strict arguments objects.
// For fast aliases (if HasNonStrictArgumentsElements()):
// - the parameter map contains an index into the context
// - all attributes of the element have default values
// For slow aliases (if HasDictionaryArgumentsElements()):
// - the parameter map contains no fast alias mapping (i.e. the hole)
// - this struct (in the slow backing store) contains an index into the context
// - all attributes are available as part if the property details
class AliasedArgumentsEntry: public Struct {
public:
inline int aliased_context_slot();
inline void set_aliased_context_slot(int count);
static inline AliasedArgumentsEntry* cast(Object* obj);
#ifdef OBJECT_PRINT
inline void AliasedArgumentsEntryPrint() {
AliasedArgumentsEntryPrint(stdout);
}
void AliasedArgumentsEntryPrint(FILE* out);
#endif
#ifdef DEBUG
void AliasedArgumentsEntryVerify();
#endif
static const int kAliasedContextSlot = HeapObject::kHeaderSize;
static const int kSize = kAliasedContextSlot + kPointerSize;
private:
DISALLOW_IMPLICIT_CONSTRUCTORS(AliasedArgumentsEntry);
};
enum AllowNullsFlag {ALLOW_NULLS, DISALLOW_NULLS};
enum RobustnessFlag {ROBUST_STRING_TRAVERSAL, FAST_STRING_TRAVERSAL};

36
deps/v8/src/parser.cc

@ -1188,14 +1188,28 @@ Statement* Parser::ParseModuleElement(ZoneStringList* labels,
case Token::LET:
case Token::CONST:
return ParseVariableStatement(kModuleElement, ok);
case Token::MODULE:
return ParseModuleDeclaration(ok);
case Token::IMPORT:
return ParseImportDeclaration(ok);
case Token::EXPORT:
return ParseExportDeclaration(ok);
default:
return ParseStatement(labels, ok);
default: {
Statement* stmt = ParseStatement(labels, CHECK_OK);
// Handle 'module' as a context-sensitive keyword.
if (FLAG_harmony_modules &&
peek() == Token::IDENTIFIER &&
!scanner().HasAnyLineTerminatorBeforeNext() &&
stmt != NULL) {
ExpressionStatement* estmt = stmt->AsExpressionStatement();
if (estmt != NULL &&
estmt->expression()->AsVariableProxy() != NULL &&
estmt->expression()->AsVariableProxy()->name()->Equals(
isolate()->heap()->module_symbol()) &&
!scanner().literal_contains_escapes()) {
return ParseModuleDeclaration(ok);
}
}
return stmt;
}
}
}
@ -1206,7 +1220,6 @@ Block* Parser::ParseModuleDeclaration(bool* ok) {
// Create new block with one expected declaration.
Block* block = factory()->NewBlock(NULL, 1, true);
Expect(Token::MODULE, CHECK_OK);
Handle<String> name = ParseIdentifier(CHECK_OK);
// top_scope_->AddDeclaration(
// factory()->NewModuleDeclaration(proxy, module, top_scope_));
@ -2172,8 +2185,17 @@ Statement* Parser::ParseExpressionOrLabelledStatement(ZoneStringList* labels,
return ParseNativeDeclaration(ok);
}
// Parsed expression statement.
ExpectSemicolon(CHECK_OK);
// Parsed expression statement, or the context-sensitive 'module' keyword.
// Only expect semicolon in the former case.
if (!FLAG_harmony_modules ||
peek() != Token::IDENTIFIER ||
scanner().HasAnyLineTerminatorBeforeNext() ||
expr->AsVariableProxy() == NULL ||
!expr->AsVariableProxy()->name()->Equals(
isolate()->heap()->module_symbol()) ||
scanner().literal_contains_escapes()) {
ExpectSemicolon(CHECK_OK);
}
return factory()->NewExpressionStatement(expr);
}

11
deps/v8/src/platform-cygwin.cc

@ -355,6 +355,17 @@ bool VirtualMemory::Uncommit(void* address, size_t size) {
}
bool VirtualMemory::Guard(void* address) {
if (NULL == VirtualAlloc(address,
OS::CommitPageSize(),
MEM_COMMIT,
PAGE_READONLY | PAGE_GUARD)) {
return false;
}
return true;
}
class Thread::PlatformData : public Malloced {
public:
PlatformData() : thread_(kNoThread) {}

6
deps/v8/src/platform-freebsd.cc

@ -411,6 +411,12 @@ bool VirtualMemory::Uncommit(void* address, size_t size) {
}
bool VirtualMemory::Guard(void* address) {
OS::Guard(address, OS::CommitPageSize());
return true;
}
void* VirtualMemory::ReserveRegion(size_t size) {
void* result = mmap(OS::GetRandomMmapAddr(),
size,

6
deps/v8/src/platform-linux.cc

@ -666,6 +666,12 @@ bool VirtualMemory::Uncommit(void* address, size_t size) {
}
bool VirtualMemory::Guard(void* address) {
OS::Guard(address, OS::CommitPageSize());
return true;
}
void* VirtualMemory::ReserveRegion(size_t size) {
void* result = mmap(OS::GetRandomMmapAddr(),
size,

6
deps/v8/src/platform-macos.cc

@ -429,6 +429,12 @@ bool VirtualMemory::Commit(void* address, size_t size, bool is_executable) {
}
bool VirtualMemory::Guard(void* address) {
OS::Guard(address, OS::CommitPageSize());
return true;
}
bool VirtualMemory::CommitRegion(void* address,
size_t size,
bool is_executable) {

6
deps/v8/src/platform-nullos.cc

@ -295,6 +295,12 @@ bool VirtualMemory::Uncommit(void* address, size_t size) {
}
bool VirtualMemory::Guard(void* address) {
UNIMPLEMENTED();
return false;
}
class Thread::PlatformData : public Malloced {
public:
PlatformData() {

6
deps/v8/src/platform-openbsd.cc

@ -458,6 +458,12 @@ bool VirtualMemory::Uncommit(void* address, size_t size) {
}
bool VirtualMemory::Guard(void* address) {
OS::Guard(address, OS::CommitPageSize());
return true;
}
void* VirtualMemory::ReserveRegion(size_t size) {
void* result = mmap(GetRandomMmapAddr(),
size,

6
deps/v8/src/platform-solaris.cc

@ -401,6 +401,12 @@ bool VirtualMemory::Uncommit(void* address, size_t size) {
}
bool VirtualMemory::Guard(void* address) {
OS::Guard(address, OS::CommitPageSize());
return true;
}
void* VirtualMemory::ReserveRegion(size_t size) {
void* result = mmap(OS::GetRandomMmapAddr(),
size,

11
deps/v8/src/platform-win32.cc

@ -1511,6 +1511,17 @@ bool VirtualMemory::CommitRegion(void* base, size_t size, bool is_executable) {
}
bool VirtualMemory::Guard(void* address) {
if (NULL == VirtualAlloc(address,
OS::CommitPageSize(),
MEM_COMMIT,
PAGE_READONLY | PAGE_GUARD)) {
return false;
}
return true;
}
bool VirtualMemory::UncommitRegion(void* base, size_t size) {
return VirtualFree(base, size, MEM_DECOMMIT) != 0;
}

3
deps/v8/src/platform.h

@ -356,6 +356,9 @@ class VirtualMemory {
// Uncommit real memory. Returns whether the operation succeeded.
bool Uncommit(void* address, size_t size);
// Creates a single guard page at the given address.
bool Guard(void* address);
void Release() {
ASSERT(IsReserved());
// Notice: Order is important here. The VirtualMemory object might live

3
deps/v8/src/preparser.h

@ -1,4 +1,4 @@
// Copyright 2011 the V8 project authors. All rights reserved.
// Copyright 2012 the V8 project authors. All rights reserved.
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
@ -28,6 +28,7 @@
#ifndef V8_PREPARSER_H
#define V8_PREPARSER_H
#include "hashmap.h"
#include "token.h"
#include "scanner.h"

48
deps/v8/src/profile-generator.cc

@ -3166,7 +3166,7 @@ bool HeapSnapshotGenerator::GenerateSnapshot() {
debug_heap->Verify();
#endif
SetProgressTotal(4); // 2 passes + dominators + sizes.
SetProgressTotal(2); // 2 passes.
#ifdef DEBUG
debug_heap->Verify();
@ -3293,29 +3293,25 @@ bool HeapSnapshotGenerator::BuildDominatorTree(
for (int i = 0; i < root_index; ++i) (*dominators)[i] = kNoDominator;
(*dominators)[root_index] = root_index;
// We use time_stamps array to stamp entries with the iteration number
// when the dominance for the entry has been updated.
ScopedVector<int> time_stamps(entries_length);
for (int i = 0; i < entries_length; ++i) time_stamps[i] = -1;
// The affected array is used to mark those entries that may
// be affected because of dominators change among their retainers.
ScopedVector<bool> affected(entries_length);
for (int i = 0; i < entries_length; ++i) affected[i] = false;
Vector<HeapGraphEdge> children = entries[root_index]->children();
for (int i = 0; i < children.length(); ++i) {
// Mark the root direct children as affected on iteration zero.
time_stamps[children[i].to()->ordered_index()] = 0;
// Mark the root direct children as affected.
affected[children[i].to()->ordered_index()] = true;
}
int changed = 1;
int iteration = 0;
const int base_progress_counter = progress_counter_;
while (changed != 0) {
++iteration;
changed = 0;
bool changed = true;
while (changed) {
changed = false;
for (int i = root_index - 1; i >= 0; --i) {
// If dominator of the entry has already been set to root,
// then it can't propagate any further.
if ((*dominators)[i] == root_index) continue;
// If no retainers of the entry had been updated on current
// or previous iteration, then this entry is not affected.
if (time_stamps[i] < iteration - 1) continue;
if (!affected[i]) continue;
affected[i] = false;
int new_idom_index = kNoDominator;
Vector<HeapGraphEdge*> rets = entries[i]->retainers();
for (int j = 0; j < rets.length(); ++j) {
@ -3333,17 +3329,13 @@ bool HeapSnapshotGenerator::BuildDominatorTree(
if (new_idom_index != kNoDominator
&& dominators->at(i) != new_idom_index) {
(*dominators)[i] = new_idom_index;
++changed;
changed = true;
Vector<HeapGraphEdge> children = entries[i]->children();
for (int j = 0; j < children.length(); ++j) {
time_stamps[children[j].to()->ordered_index()] = iteration;
affected[children[j].to()->ordered_index()] = true;
}
}
}
int remaining = entries_length - changed;
ASSERT(remaining >= 0);
progress_counter_ = base_progress_counter + remaining;
if (!ProgressReport(true)) return false;
}
return true;
}
@ -3367,21 +3359,19 @@ bool HeapSnapshotGenerator::ApproximateRetainedSizes() {
// As for the dominators tree we only know parent nodes, not
// children, to sum up total sizes we "bubble" node's self size
// adding it to all of its parents.
for (int i = 0; i < snapshot_->entries()->length(); ++i) {
HeapEntry* entry = snapshot_->entries()->at(i);
List<HeapEntry*>& entries = *snapshot_->entries();
for (int i = 0; i < entries.length(); ++i) {
HeapEntry* entry = entries[i];
entry->set_retained_size(entry->self_size());
}
for (int i = 0;
i < snapshot_->entries()->length();
++i, ProgressStep()) {
HeapEntry* entry = snapshot_->entries()->at(i);
for (int i = 0; i < entries.length(); ++i) {
HeapEntry* entry = entries[i];
int entry_size = entry->self_size();
for (HeapEntry* dominator = entry->dominator();
dominator != entry;
entry = dominator, dominator = entry->dominator()) {
dominator->add_retained_size(entry_size);
}
if (!ProgressReport()) return false;
}
return true;
}

108
deps/v8/src/runtime.cc

@ -1039,7 +1039,7 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_GetOwnProperty) {
elms->set(IS_ACCESSOR_INDEX, heap->false_value());
elms->set(VALUE_INDEX, *substr);
elms->set(WRITABLE_INDEX, heap->false_value());
elms->set(ENUMERABLE_INDEX, heap->false_value());
elms->set(ENUMERABLE_INDEX, heap->true_value());
elms->set(CONFIGURABLE_INDEX, heap->false_value());
return *desc;
}
@ -4355,53 +4355,6 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_DefineOrRedefineDataProperty) {
RUNTIME_ASSERT((unchecked & ~(READ_ONLY | DONT_ENUM | DONT_DELETE)) == 0);
PropertyAttributes attr = static_cast<PropertyAttributes>(unchecked);
// Check if this is an element.
uint32_t index;
bool is_element = name->AsArrayIndex(&index);
// Special case for elements if any of the flags might be involved.
// If elements are in fast case we always implicitly assume that:
// DONT_DELETE: false, DONT_ENUM: false, READ_ONLY: false.
if (is_element && (attr != NONE ||
js_object->HasLocalElement(index) == JSObject::DICTIONARY_ELEMENT)) {
// Normalize the elements to enable attributes on the property.
if (js_object->IsJSGlobalProxy()) {
// We do not need to do access checks here since these has already
// been performed by the call to GetOwnProperty.
Handle<Object> proto(js_object->GetPrototype());
// If proxy is detached, ignore the assignment. Alternatively,
// we could throw an exception.
if (proto->IsNull()) return *obj_value;
js_object = Handle<JSObject>::cast(proto);
}
// Don't allow element properties to be redefined on objects with external
// array elements.
if (js_object->HasExternalArrayElements()) {
Handle<Object> args[2] = { js_object, name };
Handle<Object> error =
isolate->factory()->NewTypeError("redef_external_array_element",
HandleVector(args, 2));
return isolate->Throw(*error);
}
Handle<SeededNumberDictionary> dictionary =
JSObject::NormalizeElements(js_object);
// Make sure that we never go back to fast case.
dictionary->set_requires_slow_elements();
PropertyDetails details = PropertyDetails(attr, NORMAL);
Handle<SeededNumberDictionary> extended_dictionary =
SeededNumberDictionary::Set(dictionary, index, obj_value, details);
if (*extended_dictionary != *dictionary) {
if (js_object->GetElementsKind() == NON_STRICT_ARGUMENTS_ELEMENTS) {
FixedArray::cast(js_object->elements())->set(1, *extended_dictionary);
} else {
js_object->set_elements(*extended_dictionary);
}
}
return *obj_value;
}
LookupResult result(isolate);
js_object->LocalLookupRealNamedProperty(*name, &result);
@ -4457,35 +4410,13 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_DefineOrRedefineDataProperty) {
}
// Special case for elements if any of the flags are true.
// If elements are in fast case we always implicitly assume that:
// DONT_DELETE: false, DONT_ENUM: false, READ_ONLY: false.
static MaybeObject* NormalizeObjectSetElement(Isolate* isolate,
Handle<JSObject> js_object,
uint32_t index,
Handle<Object> value,
PropertyAttributes attr) {
// Normalize the elements to enable attributes on the property.
Handle<SeededNumberDictionary> dictionary =
JSObject::NormalizeElements(js_object);
// Make sure that we never go back to fast case.
dictionary->set_requires_slow_elements();
PropertyDetails details = PropertyDetails(attr, NORMAL);
Handle<SeededNumberDictionary> extended_dictionary =
SeededNumberDictionary::Set(dictionary, index, value, details);
if (*extended_dictionary != *dictionary) {
js_object->set_elements(*extended_dictionary);
}
return *value;
}
MaybeObject* Runtime::SetObjectProperty(Isolate* isolate,
Handle<Object> object,
Handle<Object> key,
Handle<Object> value,
PropertyAttributes attr,
StrictModeFlag strict_mode) {
SetPropertyMode set_mode = attr == NONE ? SET_PROPERTY : DEFINE_PROPERTY;
HandleScope scope(isolate);
if (object->IsUndefined() || object->IsNull()) {
@ -4523,12 +4454,8 @@ MaybeObject* Runtime::SetObjectProperty(Isolate* isolate,
return *value;
}
if (((attr & (DONT_DELETE | DONT_ENUM | READ_ONLY)) != 0)) {
return NormalizeObjectSetElement(isolate, js_object, index, value, attr);
}
Handle<Object> result =
JSObject::SetElement(js_object, index, value, strict_mode);
Handle<Object> result = JSObject::SetElement(
js_object, index, value, attr, strict_mode, set_mode);
if (result.is_null()) return Failure::Exception();
return *value;
}
@ -4536,15 +4463,8 @@ MaybeObject* Runtime::SetObjectProperty(Isolate* isolate,
if (key->IsString()) {
Handle<Object> result;
if (Handle<String>::cast(key)->AsArrayIndex(&index)) {
if (((attr & (DONT_DELETE | DONT_ENUM | READ_ONLY)) != 0)) {
return NormalizeObjectSetElement(isolate,
js_object,
index,
value,
attr);
}
result =
JSObject::SetElement(js_object, index, value, strict_mode);
result = JSObject::SetElement(
js_object, index, value, attr, strict_mode, set_mode);
} else {
Handle<String> key_string = Handle<String>::cast(key);
key_string->TryFlatten();
@ -4562,7 +4482,8 @@ MaybeObject* Runtime::SetObjectProperty(Isolate* isolate,
Handle<String> name = Handle<String>::cast(converted);
if (name->AsArrayIndex(&index)) {
return js_object->SetElement(index, *value, strict_mode, true);
return js_object->SetElement(
index, *value, attr, strict_mode, true, set_mode);
} else {
return js_object->SetProperty(*name, *value, attr, strict_mode);
}
@ -4590,12 +4511,14 @@ MaybeObject* Runtime::ForceSetObjectProperty(Isolate* isolate,
return *value;
}
return js_object->SetElement(index, *value, kNonStrictMode, true);
return js_object->SetElement(
index, *value, attr, kNonStrictMode, false, DEFINE_PROPERTY);
}
if (key->IsString()) {
if (Handle<String>::cast(key)->AsArrayIndex(&index)) {
return js_object->SetElement(index, *value, kNonStrictMode, true);
return js_object->SetElement(
index, *value, attr, kNonStrictMode, false, DEFINE_PROPERTY);
} else {
Handle<String> key_string = Handle<String>::cast(key);
key_string->TryFlatten();
@ -4612,7 +4535,8 @@ MaybeObject* Runtime::ForceSetObjectProperty(Isolate* isolate,
Handle<String> name = Handle<String>::cast(converted);
if (name->AsArrayIndex(&index)) {
return js_object->SetElement(index, *value, kNonStrictMode, true);
return js_object->SetElement(
index, *value, attr, kNonStrictMode, false, DEFINE_PROPERTY);
} else {
return js_object->SetLocalPropertyIgnoreAttributes(*name, *value, attr);
}
@ -10316,9 +10240,9 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_SwapElements) {
RETURN_IF_EMPTY_HANDLE(isolate, tmp2);
RETURN_IF_EMPTY_HANDLE(
isolate, JSObject::SetElement(jsobject, index1, tmp2, kStrictMode));
isolate, JSObject::SetElement(jsobject, index1, tmp2, NONE, kStrictMode));
RETURN_IF_EMPTY_HANDLE(
isolate, JSObject::SetElement(jsobject, index2, tmp1, kStrictMode));
isolate, JSObject::SetElement(jsobject, index2, tmp1, NONE, kStrictMode));
return isolate->heap()->undefined_value();
}

3
deps/v8/src/scanner.cc

@ -850,9 +850,6 @@ uc32 Scanner::ScanIdentifierUnicodeEscape() {
KEYWORD_GROUP('l') \
KEYWORD("let", harmony_scoping \
? Token::LET : Token::FUTURE_STRICT_RESERVED_WORD) \
KEYWORD_GROUP('m') \
KEYWORD("module", harmony_modules \
? Token::MODULE : Token::IDENTIFIER) \
KEYWORD_GROUP('n') \
KEYWORD("new", Token::NEW) \
KEYWORD("null", Token::NULL_LITERAL) \

26
deps/v8/src/scopes.cc

@ -39,26 +39,6 @@
namespace v8 {
namespace internal {
// ----------------------------------------------------------------------------
// A Zone allocator for use with LocalsMap.
// TODO(isolates): It is probably worth it to change the Allocator class to
// take a pointer to an isolate.
class ZoneAllocator: public Allocator {
public:
/* nothing to do */
virtual ~ZoneAllocator() {}
virtual void* New(size_t size) { return ZONE->New(static_cast<int>(size)); }
/* ignored - Zone is freed in one fell swoop */
virtual void Delete(void* p) {}
};
static ZoneAllocator* LocalsMapAllocator = ::new ZoneAllocator();
// ----------------------------------------------------------------------------
// Implementation of LocalsMap
//
@ -77,7 +57,7 @@ static bool Match(void* key1, void* key2) {
}
VariableMap::VariableMap() : HashMap(Match, LocalsMapAllocator, 8) {}
VariableMap::VariableMap() : ZoneHashMap(Match, 8) {}
VariableMap::~VariableMap() {}
@ -88,7 +68,7 @@ Variable* VariableMap::Declare(
bool is_valid_lhs,
Variable::Kind kind,
InitializationFlag initialization_flag) {
HashMap::Entry* p = HashMap::Lookup(name.location(), name->Hash(), true);
Entry* p = ZoneHashMap::Lookup(name.location(), name->Hash(), true);
if (p->value == NULL) {
// The variable has not been declared yet -> insert it.
ASSERT(p->key == name.location());
@ -104,7 +84,7 @@ Variable* VariableMap::Declare(
Variable* VariableMap::Lookup(Handle<String> name) {
HashMap::Entry* p = HashMap::Lookup(name.location(), name->Hash(), false);
Entry* p = ZoneHashMap::Lookup(name.location(), name->Hash(), false);
if (p != NULL) {
ASSERT(*reinterpret_cast<String**>(p->key) == *name);
ASSERT(p->value != NULL);

4
deps/v8/src/scopes.h

@ -29,7 +29,7 @@
#define V8_SCOPES_H_
#include "ast.h"
#include "hashmap.h"
#include "zone.h"
namespace v8 {
namespace internal {
@ -38,7 +38,7 @@ class CompilationInfo;
// A hash map to support fast variable declaration and lookup.
class VariableMap: public HashMap {
class VariableMap: public ZoneHashMap {
public:
VariableMap();

16
deps/v8/src/serialize.cc

@ -1088,9 +1088,10 @@ Serializer::Serializer(SnapshotByteSink* sink)
external_reference_encoder_(new ExternalReferenceEncoder),
large_object_total_(0),
root_index_wave_front_(0) {
isolate_ = Isolate::Current();
// The serializer is meant to be used only to generate initial heap images
// from a context in which there is only one isolate.
ASSERT(Isolate::Current()->IsDefaultIsolate());
ASSERT(isolate_->IsDefaultIsolate());
for (int i = 0; i <= LAST_SPACE; i++) {
fullness_[i] = 0;
}
@ -1642,8 +1643,8 @@ int Serializer::Allocate(int space, int size, bool* new_page) {
// serialized address.
CHECK(IsPowerOf2(Page::kPageSize));
int used_in_this_page = (fullness_[space] & (Page::kPageSize - 1));
CHECK(size <= Page::kObjectAreaSize);
if (used_in_this_page + size > Page::kObjectAreaSize) {
CHECK(size <= SpaceAreaSize(space));
if (used_in_this_page + size > SpaceAreaSize(space)) {
*new_page = true;
fullness_[space] = RoundUp(fullness_[space], Page::kPageSize);
}
@ -1654,4 +1655,13 @@ int Serializer::Allocate(int space, int size, bool* new_page) {
}
int Serializer::SpaceAreaSize(int space) {
if (space == CODE_SPACE) {
return isolate_->memory_allocator()->CodePageAreaSize();
} else {
return Page::kPageSize - Page::kObjectStartOffset;
}
}
} } // namespace v8::internal

3
deps/v8/src/serialize.h

@ -556,6 +556,9 @@ class Serializer : public SerializerDeserializer {
return external_reference_encoder_->Encode(addr);
}
int SpaceAreaSize(int space);
Isolate* isolate_;
// Keep track of the fullness of each space in order to generate
// relative addresses for back references. Large objects are
// just numbered sequentially since relative addresses make no

6
deps/v8/src/spaces-inl.h

@ -166,10 +166,8 @@ Page* Page::Initialize(Heap* heap,
Page* page = reinterpret_cast<Page*>(chunk);
ASSERT(chunk->size() == static_cast<size_t>(kPageSize));
ASSERT(chunk->owner() == owner);
owner->IncreaseCapacity(Page::kObjectAreaSize);
owner->Free(page->ObjectAreaStart(),
static_cast<int>(page->ObjectAreaEnd() -
page->ObjectAreaStart()));
owner->IncreaseCapacity(page->area_size());
owner->Free(page->area_start(), page->area_size());
heap->incremental_marking()->SetOldSpacePageFlags(chunk);

178
deps/v8/src/spaces.cc

@ -75,8 +75,8 @@ HeapObjectIterator::HeapObjectIterator(Page* page,
owner == HEAP->cell_space() ||
owner == HEAP->code_space());
Initialize(reinterpret_cast<PagedSpace*>(owner),
page->ObjectAreaStart(),
page->ObjectAreaEnd(),
page->area_start(),
page->area_end(),
kOnePageOnly,
size_func);
ASSERT(page->WasSweptPrecisely());
@ -108,12 +108,12 @@ bool HeapObjectIterator::AdvanceToNextPage() {
cur_page = space_->anchor();
} else {
cur_page = Page::FromAddress(cur_addr_ - 1);
ASSERT(cur_addr_ == cur_page->ObjectAreaEnd());
ASSERT(cur_addr_ == cur_page->area_end());
}
cur_page = cur_page->next_page();
if (cur_page == space_->anchor()) return false;
cur_addr_ = cur_page->ObjectAreaStart();
cur_end_ = cur_page->ObjectAreaEnd();
cur_addr_ = cur_page->area_start();
cur_end_ = cur_page->area_end();
ASSERT(cur_page->WasSweptPrecisely());
return true;
}
@ -227,7 +227,9 @@ Address CodeRange::AllocateRawMemory(const size_t requested,
}
ASSERT(*allocated <= current.size);
ASSERT(IsAddressAligned(current.start, MemoryChunk::kAlignment));
if (!code_range_->Commit(current.start, *allocated, true)) {
if (!MemoryAllocator::CommitCodePage(code_range_,
current.start,
*allocated)) {
*allocated = 0;
return NULL;
}
@ -358,11 +360,17 @@ Address MemoryAllocator::AllocateAlignedMemory(size_t size,
VirtualMemory reservation;
Address base = ReserveAlignedMemory(size, alignment, &reservation);
if (base == NULL) return NULL;
if (!reservation.Commit(base,
size,
executable == EXECUTABLE)) {
return NULL;
if (executable == EXECUTABLE) {
CommitCodePage(&reservation, base, size);
} else {
if (!reservation.Commit(base,
size,
executable == EXECUTABLE)) {
return NULL;
}
}
controller->TakeControl(&reservation);
return base;
}
@ -378,9 +386,14 @@ void Page::InitializeAsAnchor(PagedSpace* owner) {
NewSpacePage* NewSpacePage::Initialize(Heap* heap,
Address start,
SemiSpace* semi_space) {
Address area_start = start + NewSpacePage::kObjectStartOffset;
Address area_end = start + Page::kPageSize;
MemoryChunk* chunk = MemoryChunk::Initialize(heap,
start,
Page::kPageSize,
area_start,
area_end,
NOT_EXECUTABLE,
semi_space);
chunk->set_next_chunk(NULL);
@ -410,6 +423,8 @@ void NewSpacePage::InitializeAsAnchor(SemiSpace* semi_space) {
MemoryChunk* MemoryChunk::Initialize(Heap* heap,
Address base,
size_t size,
Address area_start,
Address area_end,
Executability executable,
Space* owner) {
MemoryChunk* chunk = FromAddress(base);
@ -418,6 +433,8 @@ MemoryChunk* MemoryChunk::Initialize(Heap* heap,
chunk->heap_ = heap;
chunk->size_ = size;
chunk->area_start_ = area_start;
chunk->area_end_ = area_end;
chunk->flags_ = 0;
chunk->set_owner(owner);
chunk->InitializeReservedMemory();
@ -431,9 +448,13 @@ MemoryChunk* MemoryChunk::Initialize(Heap* heap,
ASSERT(OFFSET_OF(MemoryChunk, flags_) == kFlagsOffset);
ASSERT(OFFSET_OF(MemoryChunk, live_byte_count_) == kLiveBytesOffset);
if (executable == EXECUTABLE) chunk->SetFlag(IS_EXECUTABLE);
if (executable == EXECUTABLE) {
chunk->SetFlag(IS_EXECUTABLE);
}
if (owner == heap->old_data_space()) chunk->SetFlag(CONTAINS_ONLY_DATA);
if (owner == heap->old_data_space()) {
chunk->SetFlag(CONTAINS_ONLY_DATA);
}
return chunk;
}
@ -462,11 +483,16 @@ void MemoryChunk::Unlink() {
MemoryChunk* MemoryAllocator::AllocateChunk(intptr_t body_size,
Executability executable,
Space* owner) {
size_t chunk_size = MemoryChunk::kObjectStartOffset + body_size;
size_t chunk_size;
Heap* heap = isolate_->heap();
Address base = NULL;
VirtualMemory reservation;
Address area_start = NULL;
Address area_end = NULL;
if (executable == EXECUTABLE) {
chunk_size = RoundUp(CodePageAreaStartOffset() + body_size,
OS::CommitPageSize()) + CodePageGuardSize();
// Check executable memory limit.
if (size_executable_ + chunk_size > capacity_executable_) {
LOG(isolate_,
@ -494,18 +520,30 @@ MemoryChunk* MemoryAllocator::AllocateChunk(intptr_t body_size,
// Update executable memory size.
size_executable_ += reservation.size();
}
#ifdef DEBUG
ZapBlock(base, CodePageGuardStartOffset());
ZapBlock(base + CodePageAreaStartOffset(), body_size);
#endif
area_start = base + CodePageAreaStartOffset();
area_end = area_start + body_size;
} else {
chunk_size = MemoryChunk::kObjectStartOffset + body_size;
base = AllocateAlignedMemory(chunk_size,
MemoryChunk::kAlignment,
executable,
&reservation);
if (base == NULL) return NULL;
}
#ifdef DEBUG
ZapBlock(base, chunk_size);
ZapBlock(base, chunk_size);
#endif
area_start = base + Page::kObjectStartOffset;
area_end = base + chunk_size;
}
isolate_->counters()->memory_allocated()->
Increment(static_cast<int>(chunk_size));
@ -518,6 +556,8 @@ MemoryChunk* MemoryAllocator::AllocateChunk(intptr_t body_size,
MemoryChunk* result = MemoryChunk::Initialize(heap,
base,
chunk_size,
area_start,
area_end,
executable,
owner);
result->set_reserved_memory(&reservation);
@ -527,7 +567,9 @@ MemoryChunk* MemoryAllocator::AllocateChunk(intptr_t body_size,
Page* MemoryAllocator::AllocatePage(PagedSpace* owner,
Executability executable) {
MemoryChunk* chunk = AllocateChunk(Page::kObjectAreaSize, executable, owner);
MemoryChunk* chunk = AllocateChunk(owner->AreaSize(),
executable,
owner);
if (chunk == NULL) return NULL;
@ -648,6 +690,65 @@ void MemoryAllocator::ReportStatistics() {
}
#endif
int MemoryAllocator::CodePageGuardStartOffset() {
// We are guarding code pages: the first OS page after the header
// will be protected as non-writable.
return RoundUp(Page::kObjectStartOffset, OS::CommitPageSize());
}
int MemoryAllocator::CodePageGuardSize() {
return static_cast<int>(OS::CommitPageSize());
}
int MemoryAllocator::CodePageAreaStartOffset() {
// We are guarding code pages: the first OS page after the header
// will be protected as non-writable.
return CodePageGuardStartOffset() + CodePageGuardSize();
}
int MemoryAllocator::CodePageAreaEndOffset() {
// We are guarding code pages: the last OS page will be protected as
// non-writable.
return Page::kPageSize - static_cast<int>(OS::CommitPageSize());
}
bool MemoryAllocator::CommitCodePage(VirtualMemory* vm,
Address start,
size_t size) {
// Commit page header (not executable).
if (!vm->Commit(start,
CodePageGuardStartOffset(),
false)) {
return false;
}
// Create guard page after the header.
if (!vm->Guard(start + CodePageGuardStartOffset())) {
return false;
}
// Commit page body (executable).
size_t area_size = size - CodePageAreaStartOffset() - CodePageGuardSize();
if (!vm->Commit(start + CodePageAreaStartOffset(),
area_size,
true)) {
return false;
}
// Create guard page after the allocatable area.
if (!vm->Guard(start + CodePageAreaStartOffset() + area_size)) {
return false;
}
return true;
}
// -----------------------------------------------------------------------------
// MemoryChunk implementation
@ -671,8 +772,14 @@ PagedSpace::PagedSpace(Heap* heap,
was_swept_conservatively_(false),
first_unswept_page_(Page::FromAddress(NULL)),
unswept_free_bytes_(0) {
if (id == CODE_SPACE) {
area_size_ = heap->isolate()->memory_allocator()->
CodePageAreaSize();
} else {
area_size_ = Page::kPageSize - Page::kObjectStartOffset;
}
max_capacity_ = (RoundDown(max_capacity, Page::kPageSize) / Page::kPageSize)
* Page::kObjectAreaSize;
* AreaSize();
accounting_stats_.Clear();
allocation_info_.top = NULL;
@ -722,8 +829,8 @@ MaybeObject* PagedSpace::FindObject(Address addr) {
}
bool PagedSpace::CanExpand() {
ASSERT(max_capacity_ % Page::kObjectAreaSize == 0);
ASSERT(Capacity() % Page::kObjectAreaSize == 0);
ASSERT(max_capacity_ % AreaSize() == 0);
ASSERT(Capacity() % AreaSize() == 0);
if (Capacity() == max_capacity_) return false;
@ -763,6 +870,7 @@ int PagedSpace::CountTotalPages() {
void PagedSpace::ReleasePage(Page* page) {
ASSERT(page->LiveBytes() == 0);
ASSERT(AreaSize() == page->area_size());
// Adjust list of unswept pages if the page is the head of the list.
if (first_unswept_page_ == page) {
@ -775,7 +883,7 @@ void PagedSpace::ReleasePage(Page* page) {
if (page->WasSwept()) {
intptr_t size = free_list_.EvictFreeListItems(page);
accounting_stats_.AllocateBytes(size);
ASSERT_EQ(Page::kObjectAreaSize, static_cast<int>(size));
ASSERT_EQ(AreaSize(), static_cast<int>(size));
} else {
DecreaseUnsweptFreeBytes(page);
}
@ -792,8 +900,8 @@ void PagedSpace::ReleasePage(Page* page) {
}
ASSERT(Capacity() > 0);
ASSERT(Capacity() % Page::kObjectAreaSize == 0);
accounting_stats_.ShrinkSpace(Page::kObjectAreaSize);
ASSERT(Capacity() % AreaSize() == 0);
accounting_stats_.ShrinkSpace(AreaSize());
}
@ -804,9 +912,9 @@ void PagedSpace::ReleaseAllUnusedPages() {
if (!page->WasSwept()) {
if (page->LiveBytes() == 0) ReleasePage(page);
} else {
HeapObject* obj = HeapObject::FromAddress(page->body());
HeapObject* obj = HeapObject::FromAddress(page->area_start());
if (obj->IsFreeSpace() &&
FreeSpace::cast(obj)->size() == Page::kObjectAreaSize) {
FreeSpace::cast(obj)->size() == AreaSize()) {
// Sometimes we allocate memory from free list but don't
// immediately initialize it (e.g. see PagedSpace::ReserveSpace
// called from Heap::ReserveSpace that can cause GC before
@ -817,7 +925,7 @@ void PagedSpace::ReleaseAllUnusedPages() {
// by free list items.
FreeList::SizeStats sizes;
free_list_.CountFreeListItems(page, &sizes);
if (sizes.Total() == Page::kObjectAreaSize) {
if (sizes.Total() == AreaSize()) {
ReleasePage(page);
}
}
@ -848,8 +956,8 @@ void PagedSpace::Verify(ObjectVisitor* visitor) {
}
ASSERT(page->WasSweptPrecisely());
HeapObjectIterator it(page, NULL);
Address end_of_previous_object = page->ObjectAreaStart();
Address top = page->ObjectAreaEnd();
Address end_of_previous_object = page->area_start();
Address top = page->area_end();
int black_size = 0;
for (HeapObject* object = it.Next(); object != NULL; object = it.Next()) {
ASSERT(end_of_previous_object <= object->address());
@ -1061,7 +1169,7 @@ bool NewSpace::AddFreshPage() {
}
// Clear remainder of current page.
Address limit = NewSpacePage::FromLimit(top)->body_limit();
Address limit = NewSpacePage::FromLimit(top)->area_end();
if (heap()->gc_state() == Heap::SCAVENGE) {
heap()->promotion_queue()->SetNewLimit(limit);
heap()->promotion_queue()->ActivateGuardIfOnTheSamePage();
@ -1111,7 +1219,7 @@ void NewSpace::Verify() {
// There should be objects packed in from the low address up to the
// allocation pointer.
Address current = to_space_.first_page()->body();
Address current = to_space_.first_page()->area_start();
CHECK_EQ(current, to_space_.space_start());
while (current != top()) {
@ -1146,7 +1254,7 @@ void NewSpace::Verify() {
NewSpacePage* page = NewSpacePage::FromLimit(current)->next_page();
// Next page should be valid.
CHECK(!page->is_anchor());
current = page->body();
current = page->area_start();
}
}
@ -1932,7 +2040,7 @@ static intptr_t CountFreeListItemsInList(FreeListNode* n, Page* p) {
void FreeList::CountFreeListItems(Page* p, SizeStats* sizes) {
sizes->huge_size_ = CountFreeListItemsInList(huge_list_, p);
if (sizes->huge_size_ < Page::kObjectAreaSize) {
if (sizes->huge_size_ < p->area_size()) {
sizes->small_size_ = CountFreeListItemsInList(small_list_, p);
sizes->medium_size_ = CountFreeListItemsInList(medium_list_, p);
sizes->large_size_ = CountFreeListItemsInList(large_list_, p);
@ -1962,7 +2070,7 @@ static intptr_t EvictFreeListItemsInList(FreeListNode** n, Page* p) {
intptr_t FreeList::EvictFreeListItems(Page* p) {
intptr_t sum = EvictFreeListItemsInList(&huge_list_, p);
if (sum < Page::kObjectAreaSize) {
if (sum < p->area_size()) {
sum += EvictFreeListItemsInList(&small_list_, p) +
EvictFreeListItemsInList(&medium_list_, p) +
EvictFreeListItemsInList(&large_list_, p);
@ -2084,7 +2192,7 @@ void PagedSpace::PrepareForMarkCompact() {
bool PagedSpace::ReserveSpace(int size_in_bytes) {
ASSERT(size_in_bytes <= Page::kMaxHeapObjectSize);
ASSERT(size_in_bytes <= AreaSize());
ASSERT(size_in_bytes == RoundSizeDownToObjectAlignment(size_in_bytes));
Address current_top = allocation_info_.top;
Address new_top = current_top + size_in_bytes;
@ -2464,7 +2572,7 @@ MaybeObject* LargeObjectSpace::AllocateRaw(int object_size,
LargePage* page = heap()->isolate()->memory_allocator()->
AllocateLargePage(object_size, executable, this);
if (page == NULL) return Failure::RetryAfterGC(identity());
ASSERT(page->body_size() >= object_size);
ASSERT(page->area_size() >= object_size);
size_ += static_cast<int>(page->size());
objects_size_ += object_size;
@ -2580,7 +2688,7 @@ void LargeObjectSpace::Verify() {
// object area start.
HeapObject* object = chunk->GetObject();
Page* page = Page::FromAddress(object->address());
ASSERT(object->address() == page->ObjectAreaStart());
ASSERT(object->address() == page->area_start());
// The first word should be a map, and we expect all map pointers to be
// in map space.

105
deps/v8/src/spaces.h

@ -103,7 +103,7 @@ class Isolate;
ASSERT((OffsetFrom(address) & kMapAlignmentMask) == 0)
#define ASSERT_OBJECT_SIZE(size) \
ASSERT((0 < size) && (size <= Page::kMaxHeapObjectSize))
ASSERT((0 < size) && (size <= Page::kMaxNonCodeHeapObjectSize))
#define ASSERT_PAGE_OFFSET(offset) \
ASSERT((Page::kObjectStartOffset <= offset) \
@ -361,21 +361,15 @@ class MemoryChunk {
store_buffer_counter_ = counter;
}
Address body() { return address() + kObjectStartOffset; }
Address body_limit() { return address() + size(); }
int body_size() { return static_cast<int>(size() - kObjectStartOffset); }
bool Contains(Address addr) {
return addr >= body() && addr < address() + size();
return addr >= area_start() && addr < area_end();
}
// Checks whether addr can be a limit of addresses in this page.
// It's a limit if it's in the page, or if it's just after the
// last byte of the page.
bool ContainsLimit(Address addr) {
return addr >= body() && addr <= address() + size();
return addr >= area_start() && addr <= area_end();
}
enum MemoryChunkFlags {
@ -487,8 +481,9 @@ class MemoryChunk {
static const intptr_t kSizeOffset = kPointerSize + kPointerSize;
static const intptr_t kLiveBytesOffset =
kSizeOffset + kPointerSize + kPointerSize + kPointerSize +
kPointerSize + kPointerSize + kPointerSize + kIntSize;
kSizeOffset + kPointerSize + kPointerSize + kPointerSize +
kPointerSize + kPointerSize +
kPointerSize + kPointerSize + kPointerSize + kIntSize;
static const size_t kSlotsBufferOffset = kLiveBytesOffset + kIntSize;
@ -594,12 +589,22 @@ class MemoryChunk {
ClearFlag(EVACUATION_CANDIDATE);
}
Address area_start() { return area_start_; }
Address area_end() { return area_end_; }
int area_size() {
return static_cast<int>(area_end() - area_start());
}
protected:
MemoryChunk* next_chunk_;
MemoryChunk* prev_chunk_;
size_t size_;
intptr_t flags_;
// Start and end of allocatable memory on this chunk.
Address area_start_;
Address area_end_;
// If the chunk needs to remember its memory reservation, it is stored here.
VirtualMemory reservation_;
// The identity of the owning space. This is tagged as a failure pointer, but
@ -618,6 +623,8 @@ class MemoryChunk {
static MemoryChunk* Initialize(Heap* heap,
Address base,
size_t size,
Address area_start,
Address area_end,
Executability executable,
Space* owner);
@ -657,12 +664,6 @@ class Page : public MemoryChunk {
inline void set_next_page(Page* page);
inline void set_prev_page(Page* page);
// Returns the start address of the object area in this page.
Address ObjectAreaStart() { return address() + kObjectStartOffset; }
// Returns the end address (exclusive) of the object area in this page.
Address ObjectAreaEnd() { return address() + Page::kPageSize; }
// Checks whether an address is page aligned.
static bool IsAlignedToPageSize(Address a) {
return 0 == (OffsetFrom(a) & kPageAlignmentMask);
@ -685,21 +686,14 @@ class Page : public MemoryChunk {
// Page size in bytes. This must be a multiple of the OS page size.
static const int kPageSize = 1 << kPageSizeBits;
// Page size mask.
static const intptr_t kPageAlignmentMask = (1 << kPageSizeBits) - 1;
// Object area size in bytes.
static const int kObjectAreaSize = kPageSize - kObjectStartOffset;
static const int kNonCodeObjectAreaSize = kPageSize - kObjectStartOffset;
// Maximum object size that fits in a page.
static const int kMaxHeapObjectSize = kObjectAreaSize;
static const int kFirstUsedCell =
(kObjectStartOffset/kPointerSize) >> Bitmap::kBitsPerCellLog2;
static const int kMaxNonCodeHeapObjectSize = kNonCodeObjectAreaSize;
static const int kLastUsedCell =
((kPageSize - kPointerSize)/kPointerSize) >>
Bitmap::kBitsPerCellLog2;
// Page size mask.
static const intptr_t kPageAlignmentMask = (1 << kPageSizeBits) - 1;
inline void ClearGCFields();
@ -734,7 +728,7 @@ STATIC_CHECK(sizeof(Page) <= MemoryChunk::kHeaderSize);
class LargePage : public MemoryChunk {
public:
HeapObject* GetObject() {
return HeapObject::FromAddress(body());
return HeapObject::FromAddress(area_start());
}
inline LargePage* next_page() const {
@ -975,7 +969,7 @@ class MemoryAllocator {
// Returns maximum available bytes that the old space can have.
intptr_t MaxAvailable() {
return (Available() / Page::kPageSize) * Page::kObjectAreaSize;
return (Available() / Page::kPageSize) * Page::kMaxNonCodeHeapObjectSize;
}
#ifdef DEBUG
@ -1028,6 +1022,20 @@ class MemoryAllocator {
bool MemoryAllocationCallbackRegistered(
MemoryAllocationCallback callback);
static int CodePageGuardStartOffset();
static int CodePageGuardSize();
static int CodePageAreaStartOffset();
static int CodePageAreaEndOffset();
static int CodePageAreaSize() {
return CodePageAreaEndOffset() - CodePageAreaStartOffset();
}
static bool CommitCodePage(VirtualMemory* vm, Address start, size_t size);
private:
Isolate* isolate_;
@ -1380,7 +1388,7 @@ class FreeList BASE_EMBEDDED {
private:
// The size range of blocks, in bytes.
static const int kMinBlockSize = 3 * kPointerSize;
static const int kMaxBlockSize = Page::kMaxHeapObjectSize;
static const int kMaxBlockSize = Page::kMaxNonCodeHeapObjectSize;
FreeListNode* PickNodeFromList(FreeListNode** list, int* node_size);
@ -1572,12 +1580,12 @@ class PagedSpace : public Space {
void IncreaseUnsweptFreeBytes(Page* p) {
ASSERT(ShouldBeSweptLazily(p));
unswept_free_bytes_ += (Page::kObjectAreaSize - p->LiveBytes());
unswept_free_bytes_ += (p->area_size() - p->LiveBytes());
}
void DecreaseUnsweptFreeBytes(Page* p) {
ASSERT(ShouldBeSweptLazily(p));
unswept_free_bytes_ -= (Page::kObjectAreaSize - p->LiveBytes());
unswept_free_bytes_ -= (p->area_size() - p->LiveBytes());
}
bool AdvanceSweeper(intptr_t bytes_to_sweep);
@ -1600,7 +1608,14 @@ class PagedSpace : public Space {
// Returns the number of total pages in this space.
int CountTotalPages();
// Return size of allocatable area on a page in this space.
inline int AreaSize() {
return area_size_;
}
protected:
int area_size_;
// Maximum capacity of this space.
intptr_t max_capacity_;
@ -1702,6 +1717,8 @@ class NewSpacePage : public MemoryChunk {
(1 << MemoryChunk::POINTERS_FROM_HERE_ARE_INTERESTING) |
(1 << MemoryChunk::SCAN_ON_SCAVENGE);
static const int kAreaSize = Page::kNonCodeObjectAreaSize;
inline NewSpacePage* next_page() const {
return static_cast<NewSpacePage*>(next_chunk());
}
@ -1814,22 +1831,22 @@ class SemiSpace : public Space {
// Returns the start address of the first page of the space.
Address space_start() {
ASSERT(anchor_.next_page() != &anchor_);
return anchor_.next_page()->body();
return anchor_.next_page()->area_start();
}
// Returns the start address of the current page of the space.
Address page_low() {
return current_page_->body();
return current_page_->area_start();
}
// Returns one past the end address of the space.
Address space_end() {
return anchor_.prev_page()->body_limit();
return anchor_.prev_page()->area_end();
}
// Returns one past the end address of the current page of the space.
Address page_high() {
return current_page_->body_limit();
return current_page_->area_end();
}
bool AdvancePage() {
@ -1965,7 +1982,7 @@ class SemiSpaceIterator : public ObjectIterator {
NewSpacePage* page = NewSpacePage::FromLimit(current_);
page = page->next_page();
ASSERT(!page->is_anchor());
current_ = page->body();
current_ = page->area_start();
if (current_ == limit_) return NULL;
}
@ -2073,7 +2090,7 @@ class NewSpace : public Space {
// Return the allocated bytes in the active semispace.
virtual intptr_t Size() {
return pages_used_ * Page::kObjectAreaSize +
return pages_used_ * NewSpacePage::kAreaSize +
static_cast<int>(top() - to_space_.page_low());
}
@ -2085,7 +2102,7 @@ class NewSpace : public Space {
// Return the current capacity of a semispace.
intptr_t EffectiveCapacity() {
SLOW_ASSERT(to_space_.Capacity() == from_space_.Capacity());
return (to_space_.Capacity() / Page::kPageSize) * Page::kObjectAreaSize;
return (to_space_.Capacity() / Page::kPageSize) * NewSpacePage::kAreaSize;
}
// Return the current capacity of a semispace.
@ -2302,7 +2319,7 @@ class OldSpace : public PagedSpace {
// The limit of allocation for a page in this space.
virtual Address PageAllocationLimit(Page* page) {
return page->ObjectAreaEnd();
return page->area_end();
}
public:
@ -2331,12 +2348,12 @@ class FixedSpace : public PagedSpace {
: PagedSpace(heap, max_capacity, id, NOT_EXECUTABLE),
object_size_in_bytes_(object_size_in_bytes),
name_(name) {
page_extra_ = Page::kObjectAreaSize % object_size_in_bytes;
page_extra_ = Page::kNonCodeObjectAreaSize % object_size_in_bytes;
}
// The limit of allocation for a page in this space.
virtual Address PageAllocationLimit(Page* page) {
return page->ObjectAreaEnd() - page_extra_;
return page->area_end() - page_extra_;
}
int object_size_in_bytes() { return object_size_in_bytes_; }
@ -2387,7 +2404,7 @@ class MapSpace : public FixedSpace {
#endif
private:
static const int kMapsPerPage = Page::kObjectAreaSize / Map::kSize;
static const int kMapsPerPage = Page::kNonCodeObjectAreaSize / Map::kSize;
// Do map space compaction if there is a page gap.
int CompactionThreshold() {

8
deps/v8/src/store-buffer.cc

@ -453,14 +453,14 @@ void StoreBuffer::FindPointersToNewSpaceInRegion(
// Compute start address of the first map following given addr.
static inline Address MapStartAlign(Address addr) {
Address page = Page::FromAddress(addr)->ObjectAreaStart();
Address page = Page::FromAddress(addr)->area_start();
return page + (((addr - page) + (Map::kSize - 1)) / Map::kSize * Map::kSize);
}
// Compute end address of the first map preceding given addr.
static inline Address MapEndAlign(Address addr) {
Address page = Page::FromAllocationTop(addr)->ObjectAreaStart();
Address page = Page::FromAllocationTop(addr)->area_start();
return page + ((addr - page) / Map::kSize * Map::kSize);
}
@ -523,8 +523,8 @@ void StoreBuffer::FindPointersToNewSpaceOnPage(
Page* page,
RegionCallback region_callback,
ObjectSlotCallback slot_callback) {
Address visitable_start = page->ObjectAreaStart();
Address end_of_page = page->ObjectAreaEnd();
Address visitable_start = page->area_start();
Address end_of_page = page->area_end();
Address visitable_end = visitable_start;

1
deps/v8/src/token.h

@ -173,7 +173,6 @@ namespace internal {
K(EXPORT, "export", 0) \
K(IMPORT, "import", 0) \
K(LET, "let", 0) \
K(MODULE, "module", 0) \
\
/* Illegal token - not able to scan. */ \
T(ILLEGAL, "ILLEGAL", 0) \

2
deps/v8/src/version.cc

@ -34,7 +34,7 @@
// cannot be changed without changing the SCons build script.
#define MAJOR_VERSION 3
#define MINOR_VERSION 9
#define BUILD_NUMBER 9
#define BUILD_NUMBER 11
#define PATCH_LEVEL 0
// Use 1 for candidates and 0 otherwise.
// (Boolean macro values are not supported by all preprocessors.)

21
deps/v8/src/x64/full-codegen-x64.cc

@ -100,6 +100,11 @@ class JumpPatchSite BASE_EMBEDDED {
};
int FullCodeGenerator::self_optimization_header_size() {
return 20;
}
// Generate code for a JS function. On entry to the function the receiver
// and arguments have been pushed on the stack left to right, with the
// return address on top of them. The actual argument count matches the
@ -120,13 +125,6 @@ void FullCodeGenerator::Generate() {
SetFunctionPosition(function());
Comment cmnt(masm_, "[ function compiled by full code generator");
#ifdef DEBUG
if (strlen(FLAG_stop_at) > 0 &&
info->function()->name()->IsEqualTo(CStrVector(FLAG_stop_at))) {
__ int3();
}
#endif
// We can optionally optimize based on counters rather than statistical
// sampling.
if (info->ShouldSelfOptimize()) {
@ -134,6 +132,7 @@ void FullCodeGenerator::Generate() {
PrintF("[adding self-optimization header to %s]\n",
*info->function()->debug_name()->ToCString());
}
has_self_optimization_header_ = true;
MaybeObject* maybe_cell = isolate()->heap()->AllocateJSGlobalPropertyCell(
Smi::FromInt(Compiler::kCallsUntilPrimitiveOpt));
JSGlobalPropertyCell* cell;
@ -145,9 +144,17 @@ void FullCodeGenerator::Generate() {
Handle<Code> compile_stub(
isolate()->builtins()->builtin(Builtins::kLazyRecompile));
__ j(zero, compile_stub, RelocInfo::CODE_TARGET);
ASSERT(masm_->pc_offset() == self_optimization_header_size());
}
}
#ifdef DEBUG
if (strlen(FLAG_stop_at) > 0 &&
info->function()->name()->IsEqualTo(CStrVector(FLAG_stop_at))) {
__ int3();
}
#endif
// Strict mode functions and builtins need to replace the receiver
// with undefined when called as functions (without an explicit
// receiver object). rcx is zero for method calls and non-zero for

3
deps/v8/src/zone.h

@ -30,6 +30,7 @@
#include "allocation.h"
#include "checks.h"
#include "hashmap.h"
#include "globals.h"
#include "list.h"
#include "splay-tree.h"
@ -239,6 +240,8 @@ class ZoneSplayTree: public SplayTree<Config, ZoneListAllocationPolicy> {
};
typedef TemplateHashMap<ZoneListAllocationPolicy> ZoneHashMap;
} } // namespace v8::internal
#endif // V8_ZONE_H_

10
deps/v8/test/cctest/test-alloc.cc

@ -88,7 +88,7 @@ static MaybeObject* AllocateAfterFailures() {
static const int kLargeObjectSpaceFillerLength = 300000;
static const int kLargeObjectSpaceFillerSize = FixedArray::SizeFor(
kLargeObjectSpaceFillerLength);
ASSERT(kLargeObjectSpaceFillerSize > heap->MaxObjectSizeInPagedSpace());
ASSERT(kLargeObjectSpaceFillerSize > heap->old_pointer_space()->AreaSize());
while (heap->OldGenerationSpaceAvailable() > kLargeObjectSpaceFillerSize) {
CHECK(!heap->AllocateFixedArray(kLargeObjectSpaceFillerLength, TENURED)->
IsFailure());
@ -214,11 +214,13 @@ TEST(CodeRange) {
while (total_allocated < 5 * code_range_size) {
if (current_allocated < code_range_size / 10) {
// Allocate a block.
// Geometrically distributed sizes, greater than Page::kMaxHeapObjectSize.
// Geometrically distributed sizes, greater than
// Page::kMaxNonCodeHeapObjectSize (which is greater than code page area).
// TODO(gc): instead of using 3 use some contant based on code_range_size
// kMaxHeapObjectSize.
size_t requested = (Page::kMaxHeapObjectSize << (Pseudorandom() % 3)) +
Pseudorandom() % 5000 + 1;
size_t requested =
(Page::kMaxNonCodeHeapObjectSize << (Pseudorandom() % 3)) +
Pseudorandom() % 5000 + 1;
size_t allocated = 0;
Address base = code_range->AllocateRawMemory(requested, &allocated);
CHECK(base != NULL);

9
deps/v8/test/cctest/test-api.cc

@ -12331,18 +12331,21 @@ THREADED_TEST(PixelArray) {
i::Handle<i::Smi> value(i::Smi::FromInt(2));
i::Handle<i::Object> no_failure;
no_failure = i::JSObject::SetElement(jsobj, 1, value, i::kNonStrictMode);
no_failure =
i::JSObject::SetElement(jsobj, 1, value, NONE, i::kNonStrictMode);
ASSERT(!no_failure.is_null());
i::USE(no_failure);
CHECK_EQ(2, i::Smi::cast(jsobj->GetElement(1)->ToObjectChecked())->value());
*value.location() = i::Smi::FromInt(256);
no_failure = i::JSObject::SetElement(jsobj, 1, value, i::kNonStrictMode);
no_failure =
i::JSObject::SetElement(jsobj, 1, value, NONE, i::kNonStrictMode);
ASSERT(!no_failure.is_null());
i::USE(no_failure);
CHECK_EQ(255,
i::Smi::cast(jsobj->GetElement(1)->ToObjectChecked())->value());
*value.location() = i::Smi::FromInt(-1);
no_failure = i::JSObject::SetElement(jsobj, 1, value, i::kNonStrictMode);
no_failure =
i::JSObject::SetElement(jsobj, 1, value, NONE, i::kNonStrictMode);
ASSERT(!no_failure.is_null());
i::USE(no_failure);
CHECK_EQ(0, i::Smi::cast(jsobj->GetElement(1)->ToObjectChecked())->value());

2
deps/v8/test/cctest/test-heap-profiler.cc

@ -677,7 +677,7 @@ TEST(TakeHeapSnapshotAborting) {
LocalContext env;
const int snapshots_count = v8::HeapProfiler::GetSnapshotsCount();
TestActivityControl aborting_control(3);
TestActivityControl aborting_control(1);
const v8::HeapSnapshot* no_snapshot =
v8::HeapProfiler::TakeSnapshot(v8_str("abort"),
v8::HeapSnapshot::kFull,

14
deps/v8/test/cctest/test-heap.cc

@ -676,7 +676,7 @@ TEST(JSArray) {
CHECK(array->HasFastTypeElements());
// array[length] = name.
array->SetElement(0, *name, kNonStrictMode, true)->ToObjectChecked();
array->SetElement(0, *name, NONE, kNonStrictMode)->ToObjectChecked();
CHECK_EQ(Smi::FromInt(1), array->length());
CHECK_EQ(array->GetElement(0), *name);
@ -691,7 +691,7 @@ TEST(JSArray) {
CHECK(array->HasDictionaryElements()); // Must be in slow mode.
// array[length] = name.
array->SetElement(int_length, *name, kNonStrictMode, true)->ToObjectChecked();
array->SetElement(int_length, *name, NONE, kNonStrictMode)->ToObjectChecked();
uint32_t new_int_length = 0;
CHECK(array->length()->ToArrayIndex(&new_int_length));
CHECK_EQ(static_cast<double>(int_length), new_int_length - 1);
@ -718,8 +718,8 @@ TEST(JSObjectCopy) {
obj->SetProperty(
*second, Smi::FromInt(2), NONE, kNonStrictMode)->ToObjectChecked();
obj->SetElement(0, *first, kNonStrictMode, true)->ToObjectChecked();
obj->SetElement(1, *second, kNonStrictMode, true)->ToObjectChecked();
obj->SetElement(0, *first, NONE, kNonStrictMode)->ToObjectChecked();
obj->SetElement(1, *second, NONE, kNonStrictMode)->ToObjectChecked();
// Make the clone.
Handle<JSObject> clone = Copy(obj);
@ -737,8 +737,8 @@ TEST(JSObjectCopy) {
clone->SetProperty(
*second, Smi::FromInt(1), NONE, kNonStrictMode)->ToObjectChecked();
clone->SetElement(0, *second, kNonStrictMode, true)->ToObjectChecked();
clone->SetElement(1, *first, kNonStrictMode, true)->ToObjectChecked();
clone->SetElement(0, *second, NONE, kNonStrictMode)->ToObjectChecked();
clone->SetElement(1, *first, NONE, kNonStrictMode)->ToObjectChecked();
CHECK_EQ(obj->GetElement(1), clone->GetElement(0));
CHECK_EQ(obj->GetElement(0), clone->GetElement(1));
@ -820,7 +820,7 @@ TEST(Iteration) {
FACTORY->NewStringFromAscii(CStrVector("abcdefghij"), TENURED);
// Allocate a large string (for large object space).
int large_size = HEAP->MaxObjectSizeInPagedSpace() + 1;
int large_size = Page::kMaxNonCodeHeapObjectSize + 1;
char* str = new char[large_size];
for (int i = 0; i < large_size - 1; ++i) str[i] = 'a';
str[large_size - 1] = '\0';

6
deps/v8/test/cctest/test-mark-compact.cc

@ -94,7 +94,7 @@ TEST(Promotion) {
// Allocate a fixed array in the new space.
int array_size =
(HEAP->MaxObjectSizeInPagedSpace() - FixedArray::kHeaderSize) /
(Page::kMaxNonCodeHeapObjectSize - FixedArray::kHeaderSize) /
(kPointerSize * 4);
Object* obj = HEAP->AllocateFixedArray(array_size)->ToObjectChecked();
@ -125,7 +125,7 @@ TEST(NoPromotion) {
// Allocate a big Fixed array in the new space.
int max_size =
Min(HEAP->MaxObjectSizeInPagedSpace(), HEAP->MaxObjectSizeInNewSpace());
Min(Page::kMaxNonCodeHeapObjectSize, HEAP->MaxObjectSizeInNewSpace());
int length = (max_size - FixedArray::kHeaderSize) / (2*kPointerSize);
Object* obj = i::Isolate::Current()->heap()->AllocateFixedArray(length)->
@ -542,7 +542,7 @@ TEST(BootUpMemoryUse) {
if (v8::internal::Snapshot::IsEnabled()) {
CHECK_LE(booted_memory - initial_memory, 6532 * 1024); // 6388.
} else {
CHECK_LE(booted_memory - initial_memory, 6686 * 1024); // 6456
CHECK_LE(booted_memory - initial_memory, 6940 * 1024); // 6456
}
}
}

13
deps/v8/test/cctest/test-serialize.cc

@ -558,7 +558,8 @@ DEPENDENT_TEST(ContextDeserialization, ContextSerialization) {
TEST(LinearAllocation) {
v8::V8::Initialize();
int new_space_max = 512 * KB;
int paged_space_max = Page::kMaxHeapObjectSize;
int paged_space_max = Page::kMaxNonCodeHeapObjectSize;
int code_space_max = HEAP->code_space()->AreaSize();
for (int size = 1000; size < 5 * MB; size += size >> 1) {
size &= ~8; // Round.
@ -568,7 +569,7 @@ TEST(LinearAllocation) {
new_space_size,
paged_space_size, // Old pointer space.
paged_space_size, // Old data space.
HEAP->code_space()->RoundSizeDownToObjectAlignment(paged_space_size),
HEAP->code_space()->RoundSizeDownToObjectAlignment(code_space_max),
HEAP->map_space()->RoundSizeDownToObjectAlignment(paged_space_size),
HEAP->cell_space()->RoundSizeDownToObjectAlignment(paged_space_size),
size); // Large object space.
@ -604,7 +605,7 @@ TEST(LinearAllocation) {
int old_page_fullness = i % Page::kPageSize;
int page_fullness = (i + kSmallFixedArraySize) % Page::kPageSize;
if (page_fullness < old_page_fullness ||
page_fullness > Page::kObjectAreaSize) {
page_fullness > HEAP->old_pointer_space()->AreaSize()) {
i = RoundUp(i, Page::kPageSize);
pointer_last = NULL;
}
@ -624,7 +625,7 @@ TEST(LinearAllocation) {
int old_page_fullness = i % Page::kPageSize;
int page_fullness = (i + kSmallStringSize) % Page::kPageSize;
if (page_fullness < old_page_fullness ||
page_fullness > Page::kObjectAreaSize) {
page_fullness > HEAP->old_data_space()->AreaSize()) {
i = RoundUp(i, Page::kPageSize);
data_last = NULL;
}
@ -642,7 +643,7 @@ TEST(LinearAllocation) {
int old_page_fullness = i % Page::kPageSize;
int page_fullness = (i + kMapSize) % Page::kPageSize;
if (page_fullness < old_page_fullness ||
page_fullness > Page::kObjectAreaSize) {
page_fullness > HEAP->map_space()->AreaSize()) {
i = RoundUp(i, Page::kPageSize);
map_last = NULL;
}
@ -653,7 +654,7 @@ TEST(LinearAllocation) {
map_last = obj;
}
if (size > Page::kObjectAreaSize) {
if (size > Page::kMaxNonCodeHeapObjectSize) {
// Support for reserving space in large object space is not there yet,
// but using an always-allocate scope is fine for now.
AlwaysAllocateScope always;

7
deps/v8/test/cctest/test-spaces.cc

@ -191,9 +191,10 @@ TEST(NewSpace) {
HEAP->ReservedSemiSpaceSize()));
CHECK(new_space.HasBeenSetUp());
while (new_space.Available() >= Page::kMaxHeapObjectSize) {
while (new_space.Available() >= Page::kMaxNonCodeHeapObjectSize) {
Object* obj =
new_space.AllocateRaw(Page::kMaxHeapObjectSize)->ToObjectUnchecked();
new_space.AllocateRaw(Page::kMaxNonCodeHeapObjectSize)->
ToObjectUnchecked();
CHECK(new_space.Contains(HeapObject::cast(obj)));
}
@ -223,7 +224,7 @@ TEST(OldSpace) {
CHECK(s->SetUp());
while (s->Available() > 0) {
s->AllocateRaw(Page::kMaxHeapObjectSize)->ToObjectUnchecked();
s->AllocateRaw(Page::kMaxNonCodeHeapObjectSize)->ToObjectUnchecked();
}
s->TearDown();

2
deps/v8/test/mjsunit/get-own-property-descriptor.js

@ -73,7 +73,7 @@ assertEquals(descObjectElement.value, 42);
var a = new String('foobar');
for (var i = 0; i < a.length; i++) {
var descStringObject = Object.getOwnPropertyDescriptor(a, i);
assertFalse(descStringObject.enumerable);
assertTrue(descStringObject.enumerable);
assertFalse(descStringObject.configurable);
assertFalse(descStringObject.writable);
assertEquals(descStringObject.value, a.substring(i, i+1));

22
deps/v8/test/mjsunit/harmony/module-parsing.js

@ -63,18 +63,28 @@ module E3 = E1.F
// Check that ASI does not interfere.
module
X
module X
{
let x
}
module
Y
module Y
=
X
module
Z
module Z
at
"file://local"
// Check that 'module' still works as an identifier.
var module
module = {}
module["a"] = 6
function module() {}
function f(module) { return module }
try {} catch (module) {}
module
v = 20

5
deps/v8/test/mjsunit/mjsunit.status

@ -1,4 +1,4 @@
# Copyright 2011 the V8 project authors. All rights reserved.
# Copyright 2012 the V8 project authors. All rights reserved.
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
@ -65,6 +65,9 @@ regress/regress-524: (PASS || TIMEOUT), SKIP if $mode == debug
debug-liveedit-check-stack: SKIP
debug-liveedit-patch-positions-replace: SKIP
# Test Crankshaft compilation time. Expected to take too long in debug mode.
regress/regress-1969: PASS, SKIP if $mode == debug
##############################################################################
[ $isolates ]

23
deps/v8/test/mjsunit/object-define-property.js

@ -1053,4 +1053,25 @@ for (var i = 0; i < 1000; i++) {
// Non-enumerable property forces dictionary mode.
Object.defineProperty(o, i, {value: i, enumerable: false});
}
assertEquals(999, o[999]);
assertEquals(999, o[999]);
// Regression test: Bizzare behavior on non-strict arguments object.
(function test(arg0) {
// Here arguments[0] is a fast alias on arg0.
Object.defineProperty(arguments, "0", {
value:1,
enumerable:false
});
// Here arguments[0] is a slow alias on arg0.
Object.defineProperty(arguments, "0", {
value:2,
writable:false
});
// Here arguments[0] is no alias at all.
Object.defineProperty(arguments, "0", {
value:3
});
assertEquals(2, arg0);
assertEquals(3, arguments[0]);
})(0);

5045
deps/v8/test/mjsunit/regress/regress-1969.js

File diff suppressed because it is too large

19
deps/v8/test/test262/test262.status

@ -42,20 +42,6 @@ S10.4.2.1_A1: FAIL
15.2.3.6-4-415: FAIL
15.2.3.6-4-420: FAIL
# V8 Bug: http://code.google.com/p/v8/issues/detail?id=1772
15.2.3.6-4-292-1: FAIL
15.2.3.6-4-293-2: FAIL
15.2.3.6-4-293-3: FAIL
15.2.3.6-4-294-1: FAIL
15.2.3.6-4-295-1: FAIL
15.2.3.6-4-296-1: FAIL
15.2.3.6-4-333-11: FAIL
15.2.3.7-6-a-281: FAIL
15.2.3.7-6-a-282: FAIL
15.2.3.7-6-a-283: FAIL
15.2.3.7-6-a-284: FAIL
15.2.3.7-6-a-285: FAIL
##################### DELIBERATE INCOMPATIBILITIES #####################
# We deliberately treat arguments to parseInt() with a leading zero as
@ -69,11 +55,6 @@ S15.8.2.16_A7: PASS || FAIL_OK
S15.8.2.18_A7: PASS || FAIL_OK
S15.8.2.13_A23: PASS || FAIL_OK
# We are silent in some regexp cases where the spec wants us to give
# errors, for compatibility.
S15.10.2.11_A1_T2: FAIL
S15.10.2.11_A1_T3: FAIL
# We are more lenient in which string character escapes we allow than
# the spec (7.8.4 p. 19) wants us to be. This is for compatibility.
S7.8.4_A6.1_T4: FAIL_OK

2
deps/v8/tools/gyp/v8.gyp

@ -325,7 +325,6 @@
'../../src/handles-inl.h',
'../../src/handles.cc',
'../../src/handles.h',
'../../src/hashmap.cc',
'../../src/hashmap.h',
'../../src/heap-inl.h',
'../../src/heap.cc',
@ -923,7 +922,6 @@
'../../src/fixed-dtoa.cc',
'../../src/fixed-dtoa.h',
'../../src/globals.h',
'../../src/hashmap.cc',
'../../src/hashmap.h',
'../../src/list-inl.h',
'../../src/list.h',

Loading…
Cancel
Save