mirror of https://github.com/lukechilds/node.git
Ryan Dahl
14 years ago
352 changed files with 53660 additions and 2999 deletions
@ -0,0 +1,123 @@ |
|||||
|
// Copyright 2010 the V8 project authors. All rights reserved.
|
||||
|
// Redistribution and use in source and binary forms, with or without
|
||||
|
// modification, are permitted provided that the following conditions are
|
||||
|
// met:
|
||||
|
//
|
||||
|
// * Redistributions of source code must retain the above copyright
|
||||
|
// notice, this list of conditions and the following disclaimer.
|
||||
|
// * Redistributions in binary form must reproduce the above
|
||||
|
// copyright notice, this list of conditions and the following
|
||||
|
// disclaimer in the documentation and/or other materials provided
|
||||
|
// with the distribution.
|
||||
|
// * Neither the name of Google Inc. nor the names of its
|
||||
|
// contributors may be used to endorse or promote products derived
|
||||
|
// from this software without specific prior written permission.
|
||||
|
//
|
||||
|
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
|
||||
|
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
|
||||
|
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
|
||||
|
// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
|
||||
|
// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
|
||||
|
// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
|
||||
|
// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
|
||||
|
// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
|
||||
|
// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
|
||||
|
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
|
||||
|
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
||||
|
|
||||
|
#ifndef PREPARSER_H |
||||
|
#define PREPARSER_H |
||||
|
|
||||
|
#include "v8stdint.h" |
||||
|
|
||||
|
#ifdef _WIN32 |
||||
|
|
||||
|
// Setup for Windows DLL export/import. When building the V8 DLL the
|
||||
|
// BUILDING_V8_SHARED needs to be defined. When building a program which uses
|
||||
|
// the V8 DLL USING_V8_SHARED needs to be defined. When either building the V8
|
||||
|
// static library or building a program which uses the V8 static library neither
|
||||
|
// BUILDING_V8_SHARED nor USING_V8_SHARED should be defined.
|
||||
|
#if defined(BUILDING_V8_SHARED) && defined(USING_V8_SHARED) |
||||
|
#error both BUILDING_V8_SHARED and USING_V8_SHARED are set - please check the\ |
||||
|
build configuration to ensure that at most one of these is set |
||||
|
#endif |
||||
|
|
||||
|
#ifdef BUILDING_V8_SHARED |
||||
|
#define V8EXPORT __declspec(dllexport) |
||||
|
#elif USING_V8_SHARED |
||||
|
#define V8EXPORT __declspec(dllimport) |
||||
|
#else |
||||
|
#define V8EXPORT |
||||
|
#endif // BUILDING_V8_SHARED
|
||||
|
|
||||
|
#else // _WIN32
|
||||
|
|
||||
|
// Setup for Linux shared library export. There is no need to distinguish
|
||||
|
// between building or using the V8 shared library, but we should not
|
||||
|
// export symbols when we are building a static library.
|
||||
|
#if defined(__GNUC__) && (__GNUC__ >= 4) && defined(V8_SHARED) |
||||
|
#define V8EXPORT __attribute__ ((visibility("default"))) |
||||
|
#else // defined(__GNUC__) && (__GNUC__ >= 4)
|
||||
|
#define V8EXPORT |
||||
|
#endif // defined(__GNUC__) && (__GNUC__ >= 4)
|
||||
|
|
||||
|
#endif // _WIN32
|
||||
|
|
||||
|
|
||||
|
namespace v8 { |
||||
|
|
||||
|
|
||||
|
class PreParserData { |
||||
|
public: |
||||
|
PreParserData(size_t size, const uint8_t* data) |
||||
|
: data_(data), size_(size) { } |
||||
|
|
||||
|
// Create a PreParserData value where stack_overflow reports true.
|
||||
|
static PreParserData StackOverflow() { return PreParserData(NULL, 0); } |
||||
|
// Whether the pre-parser stopped due to a stack overflow.
|
||||
|
// If this is the case, size() and data() should not be used.
|
||||
|
|
||||
|
bool stack_overflow() { return size_ == 0u; } |
||||
|
|
||||
|
// The size of the data in bytes.
|
||||
|
size_t size() const { return size_; } |
||||
|
|
||||
|
// Pointer to the data.
|
||||
|
const uint8_t* data() const { return data_; } |
||||
|
|
||||
|
private: |
||||
|
const uint8_t* const data_; |
||||
|
const size_t size_; |
||||
|
}; |
||||
|
|
||||
|
|
||||
|
// Interface for a stream of Unicode characters.
|
||||
|
class UnicodeInputStream { |
||||
|
public: |
||||
|
virtual ~UnicodeInputStream(); |
||||
|
|
||||
|
// Returns the next Unicode code-point in the input, or a negative value when
|
||||
|
// there is no more input in the stream.
|
||||
|
virtual int32_t Next() = 0; |
||||
|
|
||||
|
// Pushes a read character back into the stream, so that it will be the next
|
||||
|
// to be read by Advance(). The character pushed back must be the most
|
||||
|
// recently read character that hasn't already been pushed back (i.e., if
|
||||
|
// pushing back more than one character, they must occur in the opposite order
|
||||
|
// of the one they were read in).
|
||||
|
virtual void PushBack(int32_t ch) = 0; |
||||
|
}; |
||||
|
|
||||
|
|
||||
|
// Preparse a JavaScript program. The source code is provided as a
|
||||
|
// UnicodeInputStream. The max_stack_size limits the amount of stack
|
||||
|
// space that the preparser is allowed to use. If the preparser uses
|
||||
|
// more stack space than the limit provided, the result's stack_overflow()
|
||||
|
// method will return true. Otherwise the result contains preparser
|
||||
|
// data that can be used by the V8 parser to speed up parsing.
|
||||
|
PreParserData V8EXPORT Preparse(UnicodeInputStream* input, |
||||
|
size_t max_stack_size); |
||||
|
|
||||
|
} // namespace v8.
|
||||
|
|
||||
|
#endif // PREPARSER_H
|
@ -0,0 +1,99 @@ |
|||||
|
// Copyright 2010 the V8 project authors. All rights reserved.
|
||||
|
// Redistribution and use in source and binary forms, with or without
|
||||
|
// modification, are permitted provided that the following conditions are
|
||||
|
// met:
|
||||
|
//
|
||||
|
// * Redistributions of source code must retain the above copyright
|
||||
|
// notice, this list of conditions and the following disclaimer.
|
||||
|
// * Redistributions in binary form must reproduce the above
|
||||
|
// copyright notice, this list of conditions and the following
|
||||
|
// disclaimer in the documentation and/or other materials provided
|
||||
|
// with the distribution.
|
||||
|
// * Neither the name of Google Inc. nor the names of its
|
||||
|
// contributors may be used to endorse or promote products derived
|
||||
|
// from this software without specific prior written permission.
|
||||
|
//
|
||||
|
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
|
||||
|
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
|
||||
|
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
|
||||
|
// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
|
||||
|
// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
|
||||
|
// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
|
||||
|
// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
|
||||
|
// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
|
||||
|
// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
|
||||
|
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
|
||||
|
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
||||
|
|
||||
|
#ifndef V8_V8_TEST_H_ |
||||
|
#define V8_V8_TEST_H_ |
||||
|
|
||||
|
#include "v8.h" |
||||
|
|
||||
|
#ifdef _WIN32 |
||||
|
// Setup for Windows DLL export/import. See v8.h in this directory for
|
||||
|
// information on how to build/use V8 as a DLL.
|
||||
|
#if defined(BUILDING_V8_SHARED) && defined(USING_V8_SHARED) |
||||
|
#error both BUILDING_V8_SHARED and USING_V8_SHARED are set - please check the\ |
||||
|
build configuration to ensure that at most one of these is set |
||||
|
#endif |
||||
|
|
||||
|
#ifdef BUILDING_V8_SHARED |
||||
|
#define V8EXPORT __declspec(dllexport) |
||||
|
#elif USING_V8_SHARED |
||||
|
#define V8EXPORT __declspec(dllimport) |
||||
|
#else |
||||
|
#define V8EXPORT |
||||
|
#endif |
||||
|
|
||||
|
#else // _WIN32
|
||||
|
|
||||
|
// Setup for Linux shared library export. See v8.h in this directory for
|
||||
|
// information on how to build/use V8 as shared library.
|
||||
|
#if defined(__GNUC__) && (__GNUC__ >= 4) && defined(V8_SHARED) |
||||
|
#define V8EXPORT __attribute__ ((visibility("default"))) |
||||
|
#else // defined(__GNUC__) && (__GNUC__ >= 4)
|
||||
|
#define V8EXPORT |
||||
|
#endif // defined(__GNUC__) && (__GNUC__ >= 4)
|
||||
|
|
||||
|
#endif // _WIN32
|
||||
|
|
||||
|
|
||||
|
/**
|
||||
|
* Testing support for the V8 JavaScript engine. |
||||
|
*/ |
||||
|
namespace v8 { |
||||
|
|
||||
|
class V8EXPORT Testing { |
||||
|
public: |
||||
|
enum StressType { |
||||
|
kStressTypeOpt, |
||||
|
kStressTypeDeopt |
||||
|
}; |
||||
|
|
||||
|
/**
|
||||
|
* Set the type of stressing to do. The default if not set is kStressTypeOpt. |
||||
|
*/ |
||||
|
static void SetStressRunType(StressType type); |
||||
|
|
||||
|
/**
|
||||
|
* Get the number of runs of a given test that is required to get the full |
||||
|
* stress coverage. |
||||
|
*/ |
||||
|
static int GetStressRuns(); |
||||
|
|
||||
|
/**
|
||||
|
* Indicate the number of the run which is about to start. The value of run |
||||
|
* should be between 0 and one less than the result from GetStressRuns() |
||||
|
*/ |
||||
|
static void PrepareStressRun(int run); |
||||
|
}; |
||||
|
|
||||
|
|
||||
|
} // namespace v8
|
||||
|
|
||||
|
|
||||
|
#undef V8EXPORT |
||||
|
|
||||
|
|
||||
|
#endif // V8_V8_TEST_H_
|
@ -0,0 +1,503 @@ |
|||||
|
// Copyright 2010 the V8 project authors. All rights reserved.
|
||||
|
// Redistribution and use in source and binary forms, with or without
|
||||
|
// modification, are permitted provided that the following conditions are
|
||||
|
// met:
|
||||
|
//
|
||||
|
// * Redistributions of source code must retain the above copyright
|
||||
|
// notice, this list of conditions and the following disclaimer.
|
||||
|
// * Redistributions in binary form must reproduce the above
|
||||
|
// copyright notice, this list of conditions and the following
|
||||
|
// disclaimer in the documentation and/or other materials provided
|
||||
|
// with the distribution.
|
||||
|
// * Neither the name of Google Inc. nor the names of its
|
||||
|
// contributors may be used to endorse or promote products derived
|
||||
|
// from this software without specific prior written permission.
|
||||
|
//
|
||||
|
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
|
||||
|
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
|
||||
|
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
|
||||
|
// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
|
||||
|
// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
|
||||
|
// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
|
||||
|
// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
|
||||
|
// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
|
||||
|
// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
|
||||
|
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
|
||||
|
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
||||
|
|
||||
|
#include "v8.h" |
||||
|
|
||||
|
#include "codegen.h" |
||||
|
#include "deoptimizer.h" |
||||
|
#include "full-codegen.h" |
||||
|
#include "safepoint-table.h" |
||||
|
|
||||
|
namespace v8 { |
||||
|
namespace internal { |
||||
|
|
||||
|
int Deoptimizer::table_entry_size_ = 16; |
||||
|
|
||||
|
void Deoptimizer::DeoptimizeFunction(JSFunction* function) { |
||||
|
AssertNoAllocation no_allocation; |
||||
|
|
||||
|
if (!function->IsOptimized()) return; |
||||
|
|
||||
|
// Get the optimized code.
|
||||
|
Code* code = function->code(); |
||||
|
|
||||
|
// Invalidate the relocation information, as it will become invalid by the
|
||||
|
// code patching below, and is not needed any more.
|
||||
|
code->InvalidateRelocation(); |
||||
|
|
||||
|
// For each return after a safepoint insert an absolute call to the
|
||||
|
// corresponding deoptimization entry.
|
||||
|
unsigned last_pc_offset = 0; |
||||
|
SafepointTable table(function->code()); |
||||
|
for (unsigned i = 0; i < table.length(); i++) { |
||||
|
unsigned pc_offset = table.GetPcOffset(i); |
||||
|
int deoptimization_index = table.GetDeoptimizationIndex(i); |
||||
|
int gap_code_size = table.GetGapCodeSize(i); |
||||
|
// Check that we did not shoot past next safepoint.
|
||||
|
// TODO(srdjan): How do we guarantee that safepoint code does not
|
||||
|
// overlap other safepoint patching code?
|
||||
|
CHECK(pc_offset >= last_pc_offset); |
||||
|
#ifdef DEBUG |
||||
|
// Destroy the code which is not supposed to be run again.
|
||||
|
int instructions = (pc_offset - last_pc_offset) / Assembler::kInstrSize; |
||||
|
CodePatcher destroyer(code->instruction_start() + last_pc_offset, |
||||
|
instructions); |
||||
|
for (int x = 0; x < instructions; x++) { |
||||
|
destroyer.masm()->bkpt(0); |
||||
|
} |
||||
|
#endif |
||||
|
last_pc_offset = pc_offset; |
||||
|
if (deoptimization_index != Safepoint::kNoDeoptimizationIndex) { |
||||
|
const int kCallInstructionSizeInWords = 3; |
||||
|
CodePatcher patcher(code->instruction_start() + pc_offset + gap_code_size, |
||||
|
kCallInstructionSizeInWords); |
||||
|
Address deoptimization_entry = Deoptimizer::GetDeoptimizationEntry( |
||||
|
deoptimization_index, Deoptimizer::LAZY); |
||||
|
patcher.masm()->Call(deoptimization_entry, RelocInfo::NONE); |
||||
|
last_pc_offset += |
||||
|
gap_code_size + kCallInstructionSizeInWords * Assembler::kInstrSize; |
||||
|
} |
||||
|
} |
||||
|
|
||||
|
|
||||
|
#ifdef DEBUG |
||||
|
// Destroy the code which is not supposed to be run again.
|
||||
|
int instructions = |
||||
|
(code->safepoint_table_start() - last_pc_offset) / Assembler::kInstrSize; |
||||
|
CodePatcher destroyer(code->instruction_start() + last_pc_offset, |
||||
|
instructions); |
||||
|
for (int x = 0; x < instructions; x++) { |
||||
|
destroyer.masm()->bkpt(0); |
||||
|
} |
||||
|
#endif |
||||
|
|
||||
|
// Add the deoptimizing code to the list.
|
||||
|
DeoptimizingCodeListNode* node = new DeoptimizingCodeListNode(code); |
||||
|
node->set_next(deoptimizing_code_list_); |
||||
|
deoptimizing_code_list_ = node; |
||||
|
|
||||
|
// Set the code for the function to non-optimized version.
|
||||
|
function->ReplaceCode(function->shared()->code()); |
||||
|
|
||||
|
if (FLAG_trace_deopt) { |
||||
|
PrintF("[forced deoptimization: "); |
||||
|
function->PrintName(); |
||||
|
PrintF(" / %x]\n", reinterpret_cast<uint32_t>(function)); |
||||
|
} |
||||
|
} |
||||
|
|
||||
|
|
||||
|
void Deoptimizer::PatchStackCheckCode(RelocInfo* rinfo, |
||||
|
Code* replacement_code) { |
||||
|
UNIMPLEMENTED(); |
||||
|
} |
||||
|
|
||||
|
|
||||
|
void Deoptimizer::RevertStackCheckCode(RelocInfo* rinfo, Code* check_code) { |
||||
|
UNIMPLEMENTED(); |
||||
|
} |
||||
|
|
||||
|
|
||||
|
void Deoptimizer::DoComputeOsrOutputFrame() { |
||||
|
UNIMPLEMENTED(); |
||||
|
} |
||||
|
|
||||
|
|
||||
|
// This code is very similar to ia32 code, but relies on register names (fp, sp)
|
||||
|
// and how the frame is laid out.
|
||||
|
void Deoptimizer::DoComputeFrame(TranslationIterator* iterator, |
||||
|
int frame_index) { |
||||
|
// Read the ast node id, function, and frame height for this output frame.
|
||||
|
Translation::Opcode opcode = |
||||
|
static_cast<Translation::Opcode>(iterator->Next()); |
||||
|
USE(opcode); |
||||
|
ASSERT(Translation::FRAME == opcode); |
||||
|
int node_id = iterator->Next(); |
||||
|
JSFunction* function = JSFunction::cast(ComputeLiteral(iterator->Next())); |
||||
|
unsigned height = iterator->Next(); |
||||
|
unsigned height_in_bytes = height * kPointerSize; |
||||
|
if (FLAG_trace_deopt) { |
||||
|
PrintF(" translating "); |
||||
|
function->PrintName(); |
||||
|
PrintF(" => node=%d, height=%d\n", node_id, height_in_bytes); |
||||
|
} |
||||
|
|
||||
|
// The 'fixed' part of the frame consists of the incoming parameters and
|
||||
|
// the part described by JavaScriptFrameConstants.
|
||||
|
unsigned fixed_frame_size = ComputeFixedSize(function); |
||||
|
unsigned input_frame_size = input_->GetFrameSize(); |
||||
|
unsigned output_frame_size = height_in_bytes + fixed_frame_size; |
||||
|
|
||||
|
// Allocate and store the output frame description.
|
||||
|
FrameDescription* output_frame = |
||||
|
new(output_frame_size) FrameDescription(output_frame_size, function); |
||||
|
|
||||
|
bool is_bottommost = (0 == frame_index); |
||||
|
bool is_topmost = (output_count_ - 1 == frame_index); |
||||
|
ASSERT(frame_index >= 0 && frame_index < output_count_); |
||||
|
ASSERT(output_[frame_index] == NULL); |
||||
|
output_[frame_index] = output_frame; |
||||
|
|
||||
|
// The top address for the bottommost output frame can be computed from
|
||||
|
// the input frame pointer and the output frame's height. For all
|
||||
|
// subsequent output frames, it can be computed from the previous one's
|
||||
|
// top address and the current frame's size.
|
||||
|
uint32_t top_address; |
||||
|
if (is_bottommost) { |
||||
|
// 2 = context and function in the frame.
|
||||
|
top_address = |
||||
|
input_->GetRegister(fp.code()) - (2 * kPointerSize) - height_in_bytes; |
||||
|
} else { |
||||
|
top_address = output_[frame_index - 1]->GetTop() - output_frame_size; |
||||
|
} |
||||
|
output_frame->SetTop(top_address); |
||||
|
|
||||
|
// Compute the incoming parameter translation.
|
||||
|
int parameter_count = function->shared()->formal_parameter_count() + 1; |
||||
|
unsigned output_offset = output_frame_size; |
||||
|
unsigned input_offset = input_frame_size; |
||||
|
for (int i = 0; i < parameter_count; ++i) { |
||||
|
output_offset -= kPointerSize; |
||||
|
DoTranslateCommand(iterator, frame_index, output_offset); |
||||
|
} |
||||
|
input_offset -= (parameter_count * kPointerSize); |
||||
|
|
||||
|
// There are no translation commands for the caller's pc and fp, the
|
||||
|
// context, and the function. Synthesize their values and set them up
|
||||
|
// explicitly.
|
||||
|
//
|
||||
|
// The caller's pc for the bottommost output frame is the same as in the
|
||||
|
// input frame. For all subsequent output frames, it can be read from the
|
||||
|
// previous one. This frame's pc can be computed from the non-optimized
|
||||
|
// function code and AST id of the bailout.
|
||||
|
output_offset -= kPointerSize; |
||||
|
input_offset -= kPointerSize; |
||||
|
intptr_t value; |
||||
|
if (is_bottommost) { |
||||
|
value = input_->GetFrameSlot(input_offset); |
||||
|
} else { |
||||
|
value = output_[frame_index - 1]->GetPc(); |
||||
|
} |
||||
|
output_frame->SetFrameSlot(output_offset, value); |
||||
|
if (FLAG_trace_deopt) { |
||||
|
PrintF(" 0x%08x: [top + %d] <- 0x%08x ; caller's pc\n", |
||||
|
top_address + output_offset, output_offset, value); |
||||
|
} |
||||
|
|
||||
|
// The caller's frame pointer for the bottommost output frame is the same
|
||||
|
// as in the input frame. For all subsequent output frames, it can be
|
||||
|
// read from the previous one. Also compute and set this frame's frame
|
||||
|
// pointer.
|
||||
|
output_offset -= kPointerSize; |
||||
|
input_offset -= kPointerSize; |
||||
|
if (is_bottommost) { |
||||
|
value = input_->GetFrameSlot(input_offset); |
||||
|
} else { |
||||
|
value = output_[frame_index - 1]->GetFp(); |
||||
|
} |
||||
|
output_frame->SetFrameSlot(output_offset, value); |
||||
|
intptr_t fp_value = top_address + output_offset; |
||||
|
ASSERT(!is_bottommost || input_->GetRegister(fp.code()) == fp_value); |
||||
|
output_frame->SetFp(fp_value); |
||||
|
if (is_topmost) { |
||||
|
output_frame->SetRegister(fp.code(), fp_value); |
||||
|
} |
||||
|
if (FLAG_trace_deopt) { |
||||
|
PrintF(" 0x%08x: [top + %d] <- 0x%08x ; caller's fp\n", |
||||
|
fp_value, output_offset, value); |
||||
|
} |
||||
|
|
||||
|
// The context can be gotten from the function so long as we don't
|
||||
|
// optimize functions that need local contexts.
|
||||
|
output_offset -= kPointerSize; |
||||
|
input_offset -= kPointerSize; |
||||
|
value = reinterpret_cast<intptr_t>(function->context()); |
||||
|
// The context for the bottommost output frame should also agree with the
|
||||
|
// input frame.
|
||||
|
ASSERT(!is_bottommost || input_->GetFrameSlot(input_offset) == value); |
||||
|
output_frame->SetFrameSlot(output_offset, value); |
||||
|
if (is_topmost) { |
||||
|
output_frame->SetRegister(cp.code(), value); |
||||
|
} |
||||
|
if (FLAG_trace_deopt) { |
||||
|
PrintF(" 0x%08x: [top + %d] <- 0x%08x ; context\n", |
||||
|
top_address + output_offset, output_offset, value); |
||||
|
} |
||||
|
|
||||
|
// The function was mentioned explicitly in the BEGIN_FRAME.
|
||||
|
output_offset -= kPointerSize; |
||||
|
input_offset -= kPointerSize; |
||||
|
value = reinterpret_cast<uint32_t>(function); |
||||
|
// The function for the bottommost output frame should also agree with the
|
||||
|
// input frame.
|
||||
|
ASSERT(!is_bottommost || input_->GetFrameSlot(input_offset) == value); |
||||
|
output_frame->SetFrameSlot(output_offset, value); |
||||
|
if (FLAG_trace_deopt) { |
||||
|
PrintF(" 0x%08x: [top + %d] <- 0x%08x ; function\n", |
||||
|
top_address + output_offset, output_offset, value); |
||||
|
} |
||||
|
|
||||
|
// Translate the rest of the frame.
|
||||
|
for (unsigned i = 0; i < height; ++i) { |
||||
|
output_offset -= kPointerSize; |
||||
|
DoTranslateCommand(iterator, frame_index, output_offset); |
||||
|
} |
||||
|
ASSERT(0 == output_offset); |
||||
|
|
||||
|
// Compute this frame's PC, state, and continuation.
|
||||
|
Code* non_optimized_code = function->shared()->code(); |
||||
|
FixedArray* raw_data = non_optimized_code->deoptimization_data(); |
||||
|
DeoptimizationOutputData* data = DeoptimizationOutputData::cast(raw_data); |
||||
|
Address start = non_optimized_code->instruction_start(); |
||||
|
unsigned pc_and_state = GetOutputInfo(data, node_id, function->shared()); |
||||
|
unsigned pc_offset = FullCodeGenerator::PcField::decode(pc_and_state); |
||||
|
uint32_t pc_value = reinterpret_cast<uint32_t>(start + pc_offset); |
||||
|
output_frame->SetPc(pc_value); |
||||
|
if (is_topmost) { |
||||
|
output_frame->SetRegister(pc.code(), pc_value); |
||||
|
} |
||||
|
|
||||
|
FullCodeGenerator::State state = |
||||
|
FullCodeGenerator::StateField::decode(pc_and_state); |
||||
|
output_frame->SetState(Smi::FromInt(state)); |
||||
|
|
||||
|
// Set the continuation for the topmost frame.
|
||||
|
if (is_topmost) { |
||||
|
Code* continuation = (bailout_type_ == EAGER) |
||||
|
? Builtins::builtin(Builtins::NotifyDeoptimized) |
||||
|
: Builtins::builtin(Builtins::NotifyLazyDeoptimized); |
||||
|
output_frame->SetContinuation( |
||||
|
reinterpret_cast<uint32_t>(continuation->entry())); |
||||
|
} |
||||
|
|
||||
|
if (output_count_ - 1 == frame_index) iterator->Done(); |
||||
|
} |
||||
|
|
||||
|
|
||||
|
#define __ masm()-> |
||||
|
|
||||
|
|
||||
|
// This code tries to be close to ia32 code so that any changes can be
|
||||
|
// easily ported.
|
||||
|
void Deoptimizer::EntryGenerator::Generate() { |
||||
|
GeneratePrologue(); |
||||
|
// TOS: bailout-id; TOS+1: return address if not EAGER.
|
||||
|
CpuFeatures::Scope scope(VFP3); |
||||
|
// Save all general purpose registers before messing with them.
|
||||
|
const int kNumberOfRegisters = Register::kNumRegisters; |
||||
|
|
||||
|
// Everything but pc, lr and ip which will be saved but not restored.
|
||||
|
RegList restored_regs = kJSCallerSaved | kCalleeSaved | ip.bit(); |
||||
|
|
||||
|
const int kDoubleRegsSize = |
||||
|
kDoubleSize * DwVfpRegister::kNumAllocatableRegisters; |
||||
|
|
||||
|
// Save all general purpose registers before messing with them.
|
||||
|
__ sub(sp, sp, Operand(kDoubleRegsSize)); |
||||
|
for (int i = 0; i < DwVfpRegister::kNumAllocatableRegisters; ++i) { |
||||
|
DwVfpRegister vfp_reg = DwVfpRegister::FromAllocationIndex(i); |
||||
|
int offset = i * kDoubleSize; |
||||
|
__ vstr(vfp_reg, sp, offset); |
||||
|
} |
||||
|
|
||||
|
// Push all 16 registers (needed to populate FrameDescription::registers_).
|
||||
|
__ stm(db_w, sp, restored_regs | sp.bit() | lr.bit() | pc.bit()); |
||||
|
|
||||
|
const int kSavedRegistersAreaSize = |
||||
|
(kNumberOfRegisters * kPointerSize) + kDoubleRegsSize; |
||||
|
|
||||
|
// Get the bailout id from the stack.
|
||||
|
__ ldr(r2, MemOperand(sp, kSavedRegistersAreaSize)); |
||||
|
|
||||
|
// Get the address of the location in the code object if possible (r3) (return
|
||||
|
// address for lazy deoptimization) and compute the fp-to-sp delta in
|
||||
|
// register r4.
|
||||
|
if (type() == EAGER) { |
||||
|
__ mov(r3, Operand(0)); |
||||
|
// Correct one word for bailout id.
|
||||
|
__ add(r4, sp, Operand(kSavedRegistersAreaSize + (1 * kPointerSize))); |
||||
|
} else { |
||||
|
__ mov(r3, lr); |
||||
|
// Correct two words for bailout id and return address.
|
||||
|
__ add(r4, sp, Operand(kSavedRegistersAreaSize + (2 * kPointerSize))); |
||||
|
} |
||||
|
__ sub(r4, fp, r4); |
||||
|
|
||||
|
// Allocate a new deoptimizer object.
|
||||
|
// Pass four arguments in r0 to r3 and fifth argument on stack.
|
||||
|
__ PrepareCallCFunction(5, r5); |
||||
|
__ ldr(r0, MemOperand(fp, JavaScriptFrameConstants::kFunctionOffset)); |
||||
|
__ mov(r1, Operand(type())); // bailout type,
|
||||
|
// r2: bailout id already loaded.
|
||||
|
// r3: code address or 0 already loaded.
|
||||
|
__ str(r4, MemOperand(sp, 0 * kPointerSize)); // Fp-to-sp delta.
|
||||
|
// Call Deoptimizer::New().
|
||||
|
__ CallCFunction(ExternalReference::new_deoptimizer_function(), 5); |
||||
|
|
||||
|
// Preserve "deoptimizer" object in register r0 and get the input
|
||||
|
// frame descriptor pointer to r1 (deoptimizer->input_);
|
||||
|
__ ldr(r1, MemOperand(r0, Deoptimizer::input_offset())); |
||||
|
|
||||
|
|
||||
|
// Copy core registers into FrameDescription::registers_[kNumRegisters].
|
||||
|
ASSERT(Register::kNumRegisters == kNumberOfRegisters); |
||||
|
for (int i = 0; i < kNumberOfRegisters; i++) { |
||||
|
int offset = (i * kIntSize) + FrameDescription::registers_offset(); |
||||
|
__ ldr(r2, MemOperand(sp, i * kPointerSize)); |
||||
|
__ str(r2, MemOperand(r1, offset)); |
||||
|
} |
||||
|
|
||||
|
// Copy VFP registers to
|
||||
|
// double_registers_[DoubleRegister::kNumAllocatableRegisters]
|
||||
|
int double_regs_offset = FrameDescription::double_registers_offset(); |
||||
|
for (int i = 0; i < DwVfpRegister::kNumAllocatableRegisters; ++i) { |
||||
|
int dst_offset = i * kDoubleSize + double_regs_offset; |
||||
|
int src_offset = i * kDoubleSize + kNumberOfRegisters * kPointerSize; |
||||
|
__ vldr(d0, sp, src_offset); |
||||
|
__ vstr(d0, r1, dst_offset); |
||||
|
} |
||||
|
|
||||
|
// Remove the bailout id, eventually return address, and the saved registers
|
||||
|
// from the stack.
|
||||
|
if (type() == EAGER) { |
||||
|
__ add(sp, sp, Operand(kSavedRegistersAreaSize + (1 * kPointerSize))); |
||||
|
} else { |
||||
|
__ add(sp, sp, Operand(kSavedRegistersAreaSize + (2 * kPointerSize))); |
||||
|
} |
||||
|
|
||||
|
// Compute a pointer to the unwinding limit in register r2; that is
|
||||
|
// the first stack slot not part of the input frame.
|
||||
|
__ ldr(r2, MemOperand(r1, FrameDescription::frame_size_offset())); |
||||
|
__ add(r2, r2, sp); |
||||
|
|
||||
|
// Unwind the stack down to - but not including - the unwinding
|
||||
|
// limit and copy the contents of the activation frame to the input
|
||||
|
// frame description.
|
||||
|
__ add(r3, r1, Operand(FrameDescription::frame_content_offset())); |
||||
|
Label pop_loop; |
||||
|
__ bind(&pop_loop); |
||||
|
__ pop(r4); |
||||
|
__ str(r4, MemOperand(r3, 0)); |
||||
|
__ add(r3, r3, Operand(sizeof(uint32_t))); |
||||
|
__ cmp(r2, sp); |
||||
|
__ b(ne, &pop_loop); |
||||
|
|
||||
|
// Compute the output frame in the deoptimizer.
|
||||
|
__ push(r0); // Preserve deoptimizer object across call.
|
||||
|
// r0: deoptimizer object; r1: scratch.
|
||||
|
__ PrepareCallCFunction(1, r1); |
||||
|
// Call Deoptimizer::ComputeOutputFrames().
|
||||
|
__ CallCFunction(ExternalReference::compute_output_frames_function(), 1); |
||||
|
__ pop(r0); // Restore deoptimizer object (class Deoptimizer).
|
||||
|
|
||||
|
// Replace the current (input) frame with the output frames.
|
||||
|
Label outer_push_loop, inner_push_loop; |
||||
|
// Outer loop state: r0 = current "FrameDescription** output_",
|
||||
|
// r1 = one past the last FrameDescription**.
|
||||
|
__ ldr(r1, MemOperand(r0, Deoptimizer::output_count_offset())); |
||||
|
__ ldr(r0, MemOperand(r0, Deoptimizer::output_offset())); // r0 is output_.
|
||||
|
__ add(r1, r0, Operand(r1, LSL, 2)); |
||||
|
__ bind(&outer_push_loop); |
||||
|
// Inner loop state: r2 = current FrameDescription*, r3 = loop index.
|
||||
|
__ ldr(r2, MemOperand(r0, 0)); // output_[ix]
|
||||
|
__ ldr(r3, MemOperand(r2, FrameDescription::frame_size_offset())); |
||||
|
__ bind(&inner_push_loop); |
||||
|
__ sub(r3, r3, Operand(sizeof(uint32_t))); |
||||
|
// __ add(r6, r2, Operand(r3, LSL, 1));
|
||||
|
__ add(r6, r2, Operand(r3)); |
||||
|
__ ldr(r7, MemOperand(r6, FrameDescription::frame_content_offset())); |
||||
|
__ push(r7); |
||||
|
__ cmp(r3, Operand(0)); |
||||
|
__ b(ne, &inner_push_loop); // test for gt?
|
||||
|
__ add(r0, r0, Operand(kPointerSize)); |
||||
|
__ cmp(r0, r1); |
||||
|
__ b(lt, &outer_push_loop); |
||||
|
|
||||
|
// In case of OSR, we have to restore the XMM registers.
|
||||
|
if (type() == OSR) { |
||||
|
UNIMPLEMENTED(); |
||||
|
} |
||||
|
|
||||
|
// Push state, pc, and continuation from the last output frame.
|
||||
|
if (type() != OSR) { |
||||
|
__ ldr(r6, MemOperand(r2, FrameDescription::state_offset())); |
||||
|
__ push(r6); |
||||
|
} |
||||
|
|
||||
|
__ ldr(r6, MemOperand(r2, FrameDescription::pc_offset())); |
||||
|
__ push(r6); |
||||
|
__ ldr(r6, MemOperand(r2, FrameDescription::continuation_offset())); |
||||
|
__ push(r6); |
||||
|
|
||||
|
// Push the registers from the last output frame.
|
||||
|
for (int i = kNumberOfRegisters - 1; i >= 0; i--) { |
||||
|
int offset = (i * kIntSize) + FrameDescription::registers_offset(); |
||||
|
__ ldr(r6, MemOperand(r2, offset)); |
||||
|
__ push(r6); |
||||
|
} |
||||
|
|
||||
|
// Restore the registers from the stack.
|
||||
|
__ ldm(ia_w, sp, restored_regs); // all but pc registers.
|
||||
|
__ pop(ip); // remove sp
|
||||
|
__ pop(ip); // remove lr
|
||||
|
|
||||
|
// Set up the roots register.
|
||||
|
ExternalReference roots_address = ExternalReference::roots_address(); |
||||
|
__ mov(r10, Operand(roots_address)); |
||||
|
|
||||
|
__ pop(ip); // remove pc
|
||||
|
__ pop(r7); // get continuation, leave pc on stack
|
||||
|
__ pop(lr); |
||||
|
__ Jump(r7); |
||||
|
__ stop("Unreachable."); |
||||
|
} |
||||
|
|
||||
|
|
||||
|
void Deoptimizer::TableEntryGenerator::GeneratePrologue() { |
||||
|
// Create a sequence of deoptimization entries. Note that any
|
||||
|
// registers may be still live.
|
||||
|
Label done; |
||||
|
for (int i = 0; i < count(); i++) { |
||||
|
int start = masm()->pc_offset(); |
||||
|
USE(start); |
||||
|
if (type() == EAGER) { |
||||
|
__ nop(); |
||||
|
} else { |
||||
|
// Emulate ia32 like call by pushing return address to stack.
|
||||
|
__ push(lr); |
||||
|
} |
||||
|
__ mov(ip, Operand(i)); |
||||
|
__ push(ip); |
||||
|
__ b(&done); |
||||
|
ASSERT(masm()->pc_offset() - start == table_entry_size_); |
||||
|
} |
||||
|
__ bind(&done); |
||||
|
} |
||||
|
|
||||
|
#undef __ |
||||
|
|
||||
|
} } // namespace v8::internal
|
File diff suppressed because it is too large
File diff suppressed because it is too large
File diff suppressed because it is too large
@ -0,0 +1,265 @@ |
|||||
|
// Copyright 2010 the V8 project authors. All rights reserved.
|
||||
|
// Redistribution and use in source and binary forms, with or without
|
||||
|
// modification, are permitted provided that the following conditions are
|
||||
|
// met:
|
||||
|
//
|
||||
|
// * Redistributions of source code must retain the above copyright
|
||||
|
// notice, this list of conditions and the following disclaimer.
|
||||
|
// * Redistributions in binary form must reproduce the above
|
||||
|
// copyright notice, this list of conditions and the following
|
||||
|
// disclaimer in the documentation and/or other materials provided
|
||||
|
// with the distribution.
|
||||
|
// * Neither the name of Google Inc. nor the names of its
|
||||
|
// contributors may be used to endorse or promote products derived
|
||||
|
// from this software without specific prior written permission.
|
||||
|
//
|
||||
|
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
|
||||
|
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
|
||||
|
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
|
||||
|
// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
|
||||
|
// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
|
||||
|
// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
|
||||
|
// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
|
||||
|
// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
|
||||
|
// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
|
||||
|
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
|
||||
|
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
||||
|
|
||||
|
#ifndef V8_ARM_LITHIUM_CODEGEN_ARM_H_ |
||||
|
#define V8_ARM_LITHIUM_CODEGEN_ARM_H_ |
||||
|
|
||||
|
#include "arm/lithium-arm.h" |
||||
|
|
||||
|
#include "deoptimizer.h" |
||||
|
#include "safepoint-table.h" |
||||
|
#include "scopes.h" |
||||
|
|
||||
|
namespace v8 { |
||||
|
namespace internal { |
||||
|
|
||||
|
// Forward declarations.
|
||||
|
class LDeferredCode; |
||||
|
class SafepointGenerator; |
||||
|
|
||||
|
|
||||
|
class LCodeGen BASE_EMBEDDED { |
||||
|
public: |
||||
|
LCodeGen(LChunk* chunk, MacroAssembler* assembler, CompilationInfo* info) |
||||
|
: chunk_(chunk), |
||||
|
masm_(assembler), |
||||
|
info_(info), |
||||
|
current_block_(-1), |
||||
|
current_instruction_(-1), |
||||
|
instructions_(chunk->instructions()), |
||||
|
deoptimizations_(4), |
||||
|
deoptimization_literals_(8), |
||||
|
inlined_function_count_(0), |
||||
|
scope_(chunk->graph()->info()->scope()), |
||||
|
status_(UNUSED), |
||||
|
deferred_(8), |
||||
|
osr_pc_offset_(-1) { |
||||
|
PopulateDeoptimizationLiteralsWithInlinedFunctions(); |
||||
|
} |
||||
|
|
||||
|
// Try to generate code for the entire chunk, but it may fail if the
|
||||
|
// chunk contains constructs we cannot handle. Returns true if the
|
||||
|
// code generation attempt succeeded.
|
||||
|
bool GenerateCode(); |
||||
|
|
||||
|
// Finish the code by setting stack height, safepoint, and bailout
|
||||
|
// information on it.
|
||||
|
void FinishCode(Handle<Code> code); |
||||
|
|
||||
|
// Deferred code support.
|
||||
|
void DoDeferredNumberTagD(LNumberTagD* instr); |
||||
|
void DoDeferredNumberTagI(LNumberTagI* instr); |
||||
|
void DoDeferredTaggedToI(LTaggedToI* instr); |
||||
|
void DoDeferredMathAbsTaggedHeapNumber(LUnaryMathOperation* instr); |
||||
|
void DoDeferredStackCheck(LGoto* instr); |
||||
|
|
||||
|
// Parallel move support.
|
||||
|
void DoParallelMove(LParallelMove* move); |
||||
|
|
||||
|
// Declare methods that deal with the individual node types.
|
||||
|
#define DECLARE_DO(type) void Do##type(L##type* node); |
||||
|
LITHIUM_CONCRETE_INSTRUCTION_LIST(DECLARE_DO) |
||||
|
#undef DECLARE_DO |
||||
|
|
||||
|
private: |
||||
|
enum Status { |
||||
|
UNUSED, |
||||
|
GENERATING, |
||||
|
DONE, |
||||
|
ABORTED |
||||
|
}; |
||||
|
|
||||
|
bool is_unused() const { return status_ == UNUSED; } |
||||
|
bool is_generating() const { return status_ == GENERATING; } |
||||
|
bool is_done() const { return status_ == DONE; } |
||||
|
bool is_aborted() const { return status_ == ABORTED; } |
||||
|
|
||||
|
LChunk* chunk() const { return chunk_; } |
||||
|
Scope* scope() const { return scope_; } |
||||
|
HGraph* graph() const { return chunk_->graph(); } |
||||
|
MacroAssembler* masm() const { return masm_; } |
||||
|
|
||||
|
int GetNextEmittedBlock(int block); |
||||
|
LInstruction* GetNextInstruction(); |
||||
|
|
||||
|
void EmitClassOfTest(Label* if_true, |
||||
|
Label* if_false, |
||||
|
Handle<String> class_name, |
||||
|
Register input, |
||||
|
Register temporary, |
||||
|
Register temporary2); |
||||
|
|
||||
|
int StackSlotCount() const { return chunk()->spill_slot_count(); } |
||||
|
int ParameterCount() const { return scope()->num_parameters(); } |
||||
|
|
||||
|
void Abort(const char* format, ...); |
||||
|
void Comment(const char* format, ...); |
||||
|
|
||||
|
void AddDeferredCode(LDeferredCode* code) { deferred_.Add(code); } |
||||
|
|
||||
|
// Code generation passes. Returns true if code generation should
|
||||
|
// continue.
|
||||
|
bool GeneratePrologue(); |
||||
|
bool GenerateBody(); |
||||
|
bool GenerateDeferredCode(); |
||||
|
bool GenerateSafepointTable(); |
||||
|
|
||||
|
void CallCode(Handle<Code> code, |
||||
|
RelocInfo::Mode mode, |
||||
|
LInstruction* instr); |
||||
|
void CallRuntime(Runtime::Function* function, |
||||
|
int num_arguments, |
||||
|
LInstruction* instr); |
||||
|
void CallRuntime(Runtime::FunctionId id, |
||||
|
int num_arguments, |
||||
|
LInstruction* instr) { |
||||
|
Runtime::Function* function = Runtime::FunctionForId(id); |
||||
|
CallRuntime(function, num_arguments, instr); |
||||
|
} |
||||
|
|
||||
|
// Generate a direct call to a known function. Expects the function
|
||||
|
// to be in edi.
|
||||
|
void CallKnownFunction(Handle<JSFunction> function, |
||||
|
int arity, |
||||
|
LInstruction* instr); |
||||
|
|
||||
|
void LoadPrototype(Register result, Handle<JSObject> prototype); |
||||
|
|
||||
|
void RegisterLazyDeoptimization(LInstruction* instr); |
||||
|
void RegisterEnvironmentForDeoptimization(LEnvironment* environment); |
||||
|
void DeoptimizeIf(Condition cc, LEnvironment* environment); |
||||
|
|
||||
|
void AddToTranslation(Translation* translation, |
||||
|
LOperand* op, |
||||
|
bool is_tagged); |
||||
|
void PopulateDeoptimizationData(Handle<Code> code); |
||||
|
int DefineDeoptimizationLiteral(Handle<Object> literal); |
||||
|
|
||||
|
void PopulateDeoptimizationLiteralsWithInlinedFunctions(); |
||||
|
|
||||
|
Register ToRegister(int index) const; |
||||
|
DoubleRegister ToDoubleRegister(int index) const; |
||||
|
|
||||
|
// LOperand must be a register.
|
||||
|
Register ToRegister(LOperand* op) const; |
||||
|
|
||||
|
// LOperand is loaded into scratch, unless already a register.
|
||||
|
Register EmitLoadRegister(LOperand* op, Register scratch); |
||||
|
|
||||
|
// LOperand must be a double register.
|
||||
|
DoubleRegister ToDoubleRegister(LOperand* op) const; |
||||
|
|
||||
|
// LOperand is loaded into dbl_scratch, unless already a double register.
|
||||
|
DoubleRegister EmitLoadDoubleRegister(LOperand* op, |
||||
|
SwVfpRegister flt_scratch, |
||||
|
DoubleRegister dbl_scratch); |
||||
|
|
||||
|
int ToInteger32(LConstantOperand* op) const; |
||||
|
Operand ToOperand(LOperand* op); |
||||
|
MemOperand ToMemOperand(LOperand* op) const; |
||||
|
|
||||
|
// Specific math operations - used from DoUnaryMathOperation.
|
||||
|
void DoMathAbs(LUnaryMathOperation* instr); |
||||
|
void DoMathFloor(LUnaryMathOperation* instr); |
||||
|
void DoMathSqrt(LUnaryMathOperation* instr); |
||||
|
|
||||
|
// Support for recording safepoint and position information.
|
||||
|
void RecordSafepoint(LPointerMap* pointers, int deoptimization_index); |
||||
|
void RecordSafepointWithRegisters(LPointerMap* pointers, |
||||
|
int arguments, |
||||
|
int deoptimization_index); |
||||
|
void RecordPosition(int position); |
||||
|
|
||||
|
static Condition TokenToCondition(Token::Value op, bool is_unsigned); |
||||
|
void EmitGoto(int block, LDeferredCode* deferred_stack_check = NULL); |
||||
|
void EmitBranch(int left_block, int right_block, Condition cc); |
||||
|
void EmitCmpI(LOperand* left, LOperand* right); |
||||
|
void EmitNumberUntagD(Register input, |
||||
|
DoubleRegister result, |
||||
|
LEnvironment* env); |
||||
|
|
||||
|
// Emits optimized code for typeof x == "y". Modifies input register.
|
||||
|
// Returns the condition on which a final split to
|
||||
|
// true and false label should be made, to optimize fallthrough.
|
||||
|
Condition EmitTypeofIs(Label* true_label, Label* false_label, |
||||
|
Register input, Handle<String> type_name); |
||||
|
|
||||
|
LChunk* const chunk_; |
||||
|
MacroAssembler* const masm_; |
||||
|
CompilationInfo* const info_; |
||||
|
|
||||
|
int current_block_; |
||||
|
int current_instruction_; |
||||
|
const ZoneList<LInstruction*>* instructions_; |
||||
|
ZoneList<LEnvironment*> deoptimizations_; |
||||
|
ZoneList<Handle<Object> > deoptimization_literals_; |
||||
|
int inlined_function_count_; |
||||
|
Scope* const scope_; |
||||
|
Status status_; |
||||
|
TranslationBuffer translations_; |
||||
|
ZoneList<LDeferredCode*> deferred_; |
||||
|
int osr_pc_offset_; |
||||
|
|
||||
|
// Builder that keeps track of safepoints in the code. The table
|
||||
|
// itself is emitted at the end of the generated code.
|
||||
|
SafepointTableBuilder safepoints_; |
||||
|
|
||||
|
friend class LDeferredCode; |
||||
|
friend class LEnvironment; |
||||
|
friend class SafepointGenerator; |
||||
|
DISALLOW_COPY_AND_ASSIGN(LCodeGen); |
||||
|
}; |
||||
|
|
||||
|
|
||||
|
class LDeferredCode: public ZoneObject { |
||||
|
public: |
||||
|
explicit LDeferredCode(LCodeGen* codegen) |
||||
|
: codegen_(codegen), external_exit_(NULL) { |
||||
|
codegen->AddDeferredCode(this); |
||||
|
} |
||||
|
|
||||
|
virtual ~LDeferredCode() { } |
||||
|
virtual void Generate() = 0; |
||||
|
|
||||
|
void SetExit(Label *exit) { external_exit_ = exit; } |
||||
|
Label* entry() { return &entry_; } |
||||
|
Label* exit() { return external_exit_ != NULL ? external_exit_ : &exit_; } |
||||
|
|
||||
|
protected: |
||||
|
LCodeGen* codegen() const { return codegen_; } |
||||
|
MacroAssembler* masm() const { return codegen_->masm(); } |
||||
|
|
||||
|
private: |
||||
|
LCodeGen* codegen_; |
||||
|
Label entry_; |
||||
|
Label exit_; |
||||
|
Label* external_exit_; |
||||
|
}; |
||||
|
|
||||
|
} } // namespace v8::internal
|
||||
|
|
||||
|
#endif // V8_ARM_LITHIUM_CODEGEN_ARM_H_
|
@ -0,0 +1,165 @@ |
|||||
|
// Copyright 2010 the V8 project authors. All rights reserved.
|
||||
|
// Redistribution and use in source and binary forms, with or without
|
||||
|
// modification, are permitted provided that the following conditions are
|
||||
|
// met:
|
||||
|
//
|
||||
|
// * Redistributions of source code must retain the above copyright
|
||||
|
// notice, this list of conditions and the following disclaimer.
|
||||
|
// * Redistributions in binary form must reproduce the above
|
||||
|
// copyright notice, this list of conditions and the following
|
||||
|
// disclaimer in the documentation and/or other materials provided
|
||||
|
// with the distribution.
|
||||
|
// * Neither the name of Google Inc. nor the names of its
|
||||
|
// contributors may be used to endorse or promote products derived
|
||||
|
// from this software without specific prior written permission.
|
||||
|
//
|
||||
|
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
|
||||
|
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
|
||||
|
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
|
||||
|
// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
|
||||
|
// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
|
||||
|
// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
|
||||
|
// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
|
||||
|
// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
|
||||
|
// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
|
||||
|
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
|
||||
|
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
||||
|
|
||||
|
// The routines exported by this module are subtle. If you use them, even if
|
||||
|
// you get the code right, it will depend on careful reasoning about atomicity
|
||||
|
// and memory ordering; it will be less readable, and harder to maintain. If
|
||||
|
// you plan to use these routines, you should have a good reason, such as solid
|
||||
|
// evidence that performance would otherwise suffer, or there being no
|
||||
|
// alternative. You should assume only properties explicitly guaranteed by the
|
||||
|
// specifications in this file. You are almost certainly _not_ writing code
|
||||
|
// just for the x86; if you assume x86 semantics, x86 hardware bugs and
|
||||
|
// implementations on other archtectures will cause your code to break. If you
|
||||
|
// do not know what you are doing, avoid these routines, and use a Mutex.
|
||||
|
//
|
||||
|
// It is incorrect to make direct assignments to/from an atomic variable.
|
||||
|
// You should use one of the Load or Store routines. The NoBarrier
|
||||
|
// versions are provided when no barriers are needed:
|
||||
|
// NoBarrier_Store()
|
||||
|
// NoBarrier_Load()
|
||||
|
// Although there are currently no compiler enforcement, you are encouraged
|
||||
|
// to use these.
|
||||
|
//
|
||||
|
|
||||
|
#ifndef V8_ATOMICOPS_H_ |
||||
|
#define V8_ATOMICOPS_H_ |
||||
|
|
||||
|
#include "../include/v8.h" |
||||
|
#include "globals.h" |
||||
|
|
||||
|
namespace v8 { |
||||
|
namespace internal { |
||||
|
|
||||
|
typedef int32_t Atomic32; |
||||
|
#ifdef V8_HOST_ARCH_64_BIT |
||||
|
// We need to be able to go between Atomic64 and AtomicWord implicitly. This
|
||||
|
// means Atomic64 and AtomicWord should be the same type on 64-bit.
|
||||
|
#if defined(__APPLE__) |
||||
|
// MacOS is an exception to the implicit conversion rule above,
|
||||
|
// because it uses long for intptr_t.
|
||||
|
typedef int64_t Atomic64; |
||||
|
#else |
||||
|
typedef intptr_t Atomic64; |
||||
|
#endif |
||||
|
#endif |
||||
|
|
||||
|
// Use AtomicWord for a machine-sized pointer. It will use the Atomic32 or
|
||||
|
// Atomic64 routines below, depending on your architecture.
|
||||
|
typedef intptr_t AtomicWord; |
||||
|
|
||||
|
// Atomically execute:
|
||||
|
// result = *ptr;
|
||||
|
// if (*ptr == old_value)
|
||||
|
// *ptr = new_value;
|
||||
|
// return result;
|
||||
|
//
|
||||
|
// I.e., replace "*ptr" with "new_value" if "*ptr" used to be "old_value".
|
||||
|
// Always return the old value of "*ptr"
|
||||
|
//
|
||||
|
// This routine implies no memory barriers.
|
||||
|
Atomic32 NoBarrier_CompareAndSwap(volatile Atomic32* ptr, |
||||
|
Atomic32 old_value, |
||||
|
Atomic32 new_value); |
||||
|
|
||||
|
// Atomically store new_value into *ptr, returning the previous value held in
|
||||
|
// *ptr. This routine implies no memory barriers.
|
||||
|
Atomic32 NoBarrier_AtomicExchange(volatile Atomic32* ptr, Atomic32 new_value); |
||||
|
|
||||
|
// Atomically increment *ptr by "increment". Returns the new value of
|
||||
|
// *ptr with the increment applied. This routine implies no memory barriers.
|
||||
|
Atomic32 NoBarrier_AtomicIncrement(volatile Atomic32* ptr, Atomic32 increment); |
||||
|
|
||||
|
Atomic32 Barrier_AtomicIncrement(volatile Atomic32* ptr, |
||||
|
Atomic32 increment); |
||||
|
|
||||
|
// These following lower-level operations are typically useful only to people
|
||||
|
// implementing higher-level synchronization operations like spinlocks,
|
||||
|
// mutexes, and condition-variables. They combine CompareAndSwap(), a load, or
|
||||
|
// a store with appropriate memory-ordering instructions. "Acquire" operations
|
||||
|
// ensure that no later memory access can be reordered ahead of the operation.
|
||||
|
// "Release" operations ensure that no previous memory access can be reordered
|
||||
|
// after the operation. "Barrier" operations have both "Acquire" and "Release"
|
||||
|
// semantics. A MemoryBarrier() has "Barrier" semantics, but does no memory
|
||||
|
// access.
|
||||
|
Atomic32 Acquire_CompareAndSwap(volatile Atomic32* ptr, |
||||
|
Atomic32 old_value, |
||||
|
Atomic32 new_value); |
||||
|
Atomic32 Release_CompareAndSwap(volatile Atomic32* ptr, |
||||
|
Atomic32 old_value, |
||||
|
Atomic32 new_value); |
||||
|
|
||||
|
void MemoryBarrier(); |
||||
|
void NoBarrier_Store(volatile Atomic32* ptr, Atomic32 value); |
||||
|
void Acquire_Store(volatile Atomic32* ptr, Atomic32 value); |
||||
|
void Release_Store(volatile Atomic32* ptr, Atomic32 value); |
||||
|
|
||||
|
Atomic32 NoBarrier_Load(volatile const Atomic32* ptr); |
||||
|
Atomic32 Acquire_Load(volatile const Atomic32* ptr); |
||||
|
Atomic32 Release_Load(volatile const Atomic32* ptr); |
||||
|
|
||||
|
// 64-bit atomic operations (only available on 64-bit processors).
|
||||
|
#ifdef V8_HOST_ARCH_64_BIT |
||||
|
Atomic64 NoBarrier_CompareAndSwap(volatile Atomic64* ptr, |
||||
|
Atomic64 old_value, |
||||
|
Atomic64 new_value); |
||||
|
Atomic64 NoBarrier_AtomicExchange(volatile Atomic64* ptr, Atomic64 new_value); |
||||
|
Atomic64 NoBarrier_AtomicIncrement(volatile Atomic64* ptr, Atomic64 increment); |
||||
|
Atomic64 Barrier_AtomicIncrement(volatile Atomic64* ptr, Atomic64 increment); |
||||
|
|
||||
|
Atomic64 Acquire_CompareAndSwap(volatile Atomic64* ptr, |
||||
|
Atomic64 old_value, |
||||
|
Atomic64 new_value); |
||||
|
Atomic64 Release_CompareAndSwap(volatile Atomic64* ptr, |
||||
|
Atomic64 old_value, |
||||
|
Atomic64 new_value); |
||||
|
void NoBarrier_Store(volatile Atomic64* ptr, Atomic64 value); |
||||
|
void Acquire_Store(volatile Atomic64* ptr, Atomic64 value); |
||||
|
void Release_Store(volatile Atomic64* ptr, Atomic64 value); |
||||
|
Atomic64 NoBarrier_Load(volatile const Atomic64* ptr); |
||||
|
Atomic64 Acquire_Load(volatile const Atomic64* ptr); |
||||
|
Atomic64 Release_Load(volatile const Atomic64* ptr); |
||||
|
#endif // V8_HOST_ARCH_64_BIT
|
||||
|
|
||||
|
} } // namespace v8::internal
|
||||
|
|
||||
|
// Include our platform specific implementation.
|
||||
|
#if defined(_MSC_VER) && \ |
||||
|
(defined(V8_HOST_ARCH_IA32) || defined(V8_HOST_ARCH_X64)) |
||||
|
#include "atomicops_internals_x86_msvc.h" |
||||
|
#elif defined(__APPLE__) && \ |
||||
|
(defined(V8_HOST_ARCH_IA32) || defined(V8_HOST_ARCH_X64)) |
||||
|
#include "atomicops_internals_x86_macosx.h" |
||||
|
#elif defined(__GNUC__) && \ |
||||
|
(defined(V8_HOST_ARCH_IA32) || defined(V8_HOST_ARCH_X64)) |
||||
|
#include "atomicops_internals_x86_gcc.h" |
||||
|
#elif defined(__GNUC__) && defined(V8_HOST_ARCH_ARM) |
||||
|
#include "atomicops_internals_arm_gcc.h" |
||||
|
#else |
||||
|
#error "Atomic operations are not supported on your platform" |
||||
|
#endif |
||||
|
|
||||
|
#endif // V8_ATOMICOPS_H_
|
@ -0,0 +1,145 @@ |
|||||
|
// Copyright 2010 the V8 project authors. All rights reserved.
|
||||
|
// Redistribution and use in source and binary forms, with or without
|
||||
|
// modification, are permitted provided that the following conditions are
|
||||
|
// met:
|
||||
|
//
|
||||
|
// * Redistributions of source code must retain the above copyright
|
||||
|
// notice, this list of conditions and the following disclaimer.
|
||||
|
// * Redistributions in binary form must reproduce the above
|
||||
|
// copyright notice, this list of conditions and the following
|
||||
|
// disclaimer in the documentation and/or other materials provided
|
||||
|
// with the distribution.
|
||||
|
// * Neither the name of Google Inc. nor the names of its
|
||||
|
// contributors may be used to endorse or promote products derived
|
||||
|
// from this software without specific prior written permission.
|
||||
|
//
|
||||
|
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
|
||||
|
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
|
||||
|
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
|
||||
|
// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
|
||||
|
// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
|
||||
|
// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
|
||||
|
// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
|
||||
|
// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
|
||||
|
// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
|
||||
|
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
|
||||
|
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
||||
|
|
||||
|
// This file is an internal atomic implementation, use atomicops.h instead.
|
||||
|
//
|
||||
|
// LinuxKernelCmpxchg and Barrier_AtomicIncrement are from Google Gears.
|
||||
|
|
||||
|
#ifndef V8_ATOMICOPS_INTERNALS_ARM_GCC_H_ |
||||
|
#define V8_ATOMICOPS_INTERNALS_ARM_GCC_H_ |
||||
|
|
||||
|
namespace v8 { |
||||
|
namespace internal { |
||||
|
|
||||
|
// 0xffff0fc0 is the hard coded address of a function provided by
|
||||
|
// the kernel which implements an atomic compare-exchange. On older
|
||||
|
// ARM architecture revisions (pre-v6) this may be implemented using
|
||||
|
// a syscall. This address is stable, and in active use (hard coded)
|
||||
|
// by at least glibc-2.7 and the Android C library.
|
||||
|
typedef Atomic32 (*LinuxKernelCmpxchgFunc)(Atomic32 old_value, |
||||
|
Atomic32 new_value, |
||||
|
volatile Atomic32* ptr); |
||||
|
LinuxKernelCmpxchgFunc pLinuxKernelCmpxchg __attribute__((weak)) = |
||||
|
(LinuxKernelCmpxchgFunc) 0xffff0fc0; |
||||
|
|
||||
|
typedef void (*LinuxKernelMemoryBarrierFunc)(void); |
||||
|
LinuxKernelMemoryBarrierFunc pLinuxKernelMemoryBarrier __attribute__((weak)) = |
||||
|
(LinuxKernelMemoryBarrierFunc) 0xffff0fa0; |
||||
|
|
||||
|
|
||||
|
inline Atomic32 NoBarrier_CompareAndSwap(volatile Atomic32* ptr, |
||||
|
Atomic32 old_value, |
||||
|
Atomic32 new_value) { |
||||
|
Atomic32 prev_value = *ptr; |
||||
|
do { |
||||
|
if (!pLinuxKernelCmpxchg(old_value, new_value, |
||||
|
const_cast<Atomic32*>(ptr))) { |
||||
|
return old_value; |
||||
|
} |
||||
|
prev_value = *ptr; |
||||
|
} while (prev_value == old_value); |
||||
|
return prev_value; |
||||
|
} |
||||
|
|
||||
|
inline Atomic32 NoBarrier_AtomicExchange(volatile Atomic32* ptr, |
||||
|
Atomic32 new_value) { |
||||
|
Atomic32 old_value; |
||||
|
do { |
||||
|
old_value = *ptr; |
||||
|
} while (pLinuxKernelCmpxchg(old_value, new_value, |
||||
|
const_cast<Atomic32*>(ptr))); |
||||
|
return old_value; |
||||
|
} |
||||
|
|
||||
|
inline Atomic32 NoBarrier_AtomicIncrement(volatile Atomic32* ptr, |
||||
|
Atomic32 increment) { |
||||
|
return Barrier_AtomicIncrement(ptr, increment); |
||||
|
} |
||||
|
|
||||
|
inline Atomic32 Barrier_AtomicIncrement(volatile Atomic32* ptr, |
||||
|
Atomic32 increment) { |
||||
|
for (;;) { |
||||
|
// Atomic exchange the old value with an incremented one.
|
||||
|
Atomic32 old_value = *ptr; |
||||
|
Atomic32 new_value = old_value + increment; |
||||
|
if (pLinuxKernelCmpxchg(old_value, new_value, |
||||
|
const_cast<Atomic32*>(ptr)) == 0) { |
||||
|
// The exchange took place as expected.
|
||||
|
return new_value; |
||||
|
} |
||||
|
// Otherwise, *ptr changed mid-loop and we need to retry.
|
||||
|
} |
||||
|
} |
||||
|
|
||||
|
inline Atomic32 Acquire_CompareAndSwap(volatile Atomic32* ptr, |
||||
|
Atomic32 old_value, |
||||
|
Atomic32 new_value) { |
||||
|
return NoBarrier_CompareAndSwap(ptr, old_value, new_value); |
||||
|
} |
||||
|
|
||||
|
inline Atomic32 Release_CompareAndSwap(volatile Atomic32* ptr, |
||||
|
Atomic32 old_value, |
||||
|
Atomic32 new_value) { |
||||
|
return NoBarrier_CompareAndSwap(ptr, old_value, new_value); |
||||
|
} |
||||
|
|
||||
|
inline void NoBarrier_Store(volatile Atomic32* ptr, Atomic32 value) { |
||||
|
*ptr = value; |
||||
|
} |
||||
|
|
||||
|
inline void MemoryBarrier() { |
||||
|
pLinuxKernelMemoryBarrier(); |
||||
|
} |
||||
|
|
||||
|
inline void Acquire_Store(volatile Atomic32* ptr, Atomic32 value) { |
||||
|
*ptr = value; |
||||
|
MemoryBarrier(); |
||||
|
} |
||||
|
|
||||
|
inline void Release_Store(volatile Atomic32* ptr, Atomic32 value) { |
||||
|
MemoryBarrier(); |
||||
|
*ptr = value; |
||||
|
} |
||||
|
|
||||
|
inline Atomic32 NoBarrier_Load(volatile const Atomic32* ptr) { |
||||
|
return *ptr; |
||||
|
} |
||||
|
|
||||
|
inline Atomic32 Acquire_Load(volatile const Atomic32* ptr) { |
||||
|
Atomic32 value = *ptr; |
||||
|
MemoryBarrier(); |
||||
|
return value; |
||||
|
} |
||||
|
|
||||
|
inline Atomic32 Release_Load(volatile const Atomic32* ptr) { |
||||
|
MemoryBarrier(); |
||||
|
return *ptr; |
||||
|
} |
||||
|
|
||||
|
} } // namespace v8::internal
|
||||
|
|
||||
|
#endif // V8_ATOMICOPS_INTERNALS_ARM_GCC_H_
|
@ -0,0 +1,126 @@ |
|||||
|
// Copyright 2010 the V8 project authors. All rights reserved.
|
||||
|
// Redistribution and use in source and binary forms, with or without
|
||||
|
// modification, are permitted provided that the following conditions are
|
||||
|
// met:
|
||||
|
//
|
||||
|
// * Redistributions of source code must retain the above copyright
|
||||
|
// notice, this list of conditions and the following disclaimer.
|
||||
|
// * Redistributions in binary form must reproduce the above
|
||||
|
// copyright notice, this list of conditions and the following
|
||||
|
// disclaimer in the documentation and/or other materials provided
|
||||
|
// with the distribution.
|
||||
|
// * Neither the name of Google Inc. nor the names of its
|
||||
|
// contributors may be used to endorse or promote products derived
|
||||
|
// from this software without specific prior written permission.
|
||||
|
//
|
||||
|
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
|
||||
|
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
|
||||
|
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
|
||||
|
// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
|
||||
|
// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
|
||||
|
// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
|
||||
|
// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
|
||||
|
// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
|
||||
|
// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
|
||||
|
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
|
||||
|
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
||||
|
|
||||
|
// This module gets enough CPU information to optimize the
|
||||
|
// atomicops module on x86.
|
||||
|
|
||||
|
#include <string.h> |
||||
|
|
||||
|
#include "atomicops.h" |
||||
|
|
||||
|
// This file only makes sense with atomicops_internals_x86_gcc.h -- it
|
||||
|
// depends on structs that are defined in that file. If atomicops.h
|
||||
|
// doesn't sub-include that file, then we aren't needed, and shouldn't
|
||||
|
// try to do anything.
|
||||
|
#ifdef V8_ATOMICOPS_INTERNALS_X86_GCC_H_ |
||||
|
|
||||
|
// Inline cpuid instruction. In PIC compilations, %ebx contains the address
|
||||
|
// of the global offset table. To avoid breaking such executables, this code
|
||||
|
// must preserve that register's value across cpuid instructions.
|
||||
|
#if defined(__i386__) |
||||
|
#define cpuid(a, b, c, d, inp) \ |
||||
|
asm("mov %%ebx, %%edi\n" \ |
||||
|
"cpuid\n" \ |
||||
|
"xchg %%edi, %%ebx\n" \ |
||||
|
: "=a" (a), "=D" (b), "=c" (c), "=d" (d) : "a" (inp)) |
||||
|
#elif defined(__x86_64__) |
||||
|
#define cpuid(a, b, c, d, inp) \ |
||||
|
asm("mov %%rbx, %%rdi\n" \ |
||||
|
"cpuid\n" \ |
||||
|
"xchg %%rdi, %%rbx\n" \ |
||||
|
: "=a" (a), "=D" (b), "=c" (c), "=d" (d) : "a" (inp)) |
||||
|
#endif |
||||
|
|
||||
|
#if defined(cpuid) // initialize the struct only on x86
|
||||
|
|
||||
|
// Set the flags so that code will run correctly and conservatively, so even
|
||||
|
// if we haven't been initialized yet, we're probably single threaded, and our
|
||||
|
// default values should hopefully be pretty safe.
|
||||
|
struct AtomicOps_x86CPUFeatureStruct AtomicOps_Internalx86CPUFeatures = { |
||||
|
false, // bug can't exist before process spawns multiple threads
|
||||
|
false, // no SSE2
|
||||
|
}; |
||||
|
|
||||
|
// Initialize the AtomicOps_Internalx86CPUFeatures struct.
|
||||
|
static void AtomicOps_Internalx86CPUFeaturesInit() { |
||||
|
uint32_t eax; |
||||
|
uint32_t ebx; |
||||
|
uint32_t ecx; |
||||
|
uint32_t edx; |
||||
|
|
||||
|
// Get vendor string (issue CPUID with eax = 0)
|
||||
|
cpuid(eax, ebx, ecx, edx, 0); |
||||
|
char vendor[13]; |
||||
|
memcpy(vendor, &ebx, 4); |
||||
|
memcpy(vendor + 4, &edx, 4); |
||||
|
memcpy(vendor + 8, &ecx, 4); |
||||
|
vendor[12] = 0; |
||||
|
|
||||
|
// get feature flags in ecx/edx, and family/model in eax
|
||||
|
cpuid(eax, ebx, ecx, edx, 1); |
||||
|
|
||||
|
int family = (eax >> 8) & 0xf; // family and model fields
|
||||
|
int model = (eax >> 4) & 0xf; |
||||
|
if (family == 0xf) { // use extended family and model fields
|
||||
|
family += (eax >> 20) & 0xff; |
||||
|
model += ((eax >> 16) & 0xf) << 4; |
||||
|
} |
||||
|
|
||||
|
// Opteron Rev E has a bug in which on very rare occasions a locked
|
||||
|
// instruction doesn't act as a read-acquire barrier if followed by a
|
||||
|
// non-locked read-modify-write instruction. Rev F has this bug in
|
||||
|
// pre-release versions, but not in versions released to customers,
|
||||
|
// so we test only for Rev E, which is family 15, model 32..63 inclusive.
|
||||
|
if (strcmp(vendor, "AuthenticAMD") == 0 && // AMD
|
||||
|
family == 15 && |
||||
|
32 <= model && model <= 63) { |
||||
|
AtomicOps_Internalx86CPUFeatures.has_amd_lock_mb_bug = true; |
||||
|
} else { |
||||
|
AtomicOps_Internalx86CPUFeatures.has_amd_lock_mb_bug = false; |
||||
|
} |
||||
|
|
||||
|
// edx bit 26 is SSE2 which we use to tell use whether we can use mfence
|
||||
|
AtomicOps_Internalx86CPUFeatures.has_sse2 = ((edx >> 26) & 1); |
||||
|
} |
||||
|
|
||||
|
namespace { |
||||
|
|
||||
|
class AtomicOpsx86Initializer { |
||||
|
public: |
||||
|
AtomicOpsx86Initializer() { |
||||
|
AtomicOps_Internalx86CPUFeaturesInit(); |
||||
|
} |
||||
|
}; |
||||
|
|
||||
|
// A global to get use initialized on startup via static initialization :/
|
||||
|
AtomicOpsx86Initializer g_initer; |
||||
|
|
||||
|
} // namespace
|
||||
|
|
||||
|
#endif // if x86
|
||||
|
|
||||
|
#endif // ifdef V8_ATOMICOPS_INTERNALS_X86_GCC_H_
|
@ -0,0 +1,287 @@ |
|||||
|
// Copyright 2010 the V8 project authors. All rights reserved.
|
||||
|
// Redistribution and use in source and binary forms, with or without
|
||||
|
// modification, are permitted provided that the following conditions are
|
||||
|
// met:
|
||||
|
//
|
||||
|
// * Redistributions of source code must retain the above copyright
|
||||
|
// notice, this list of conditions and the following disclaimer.
|
||||
|
// * Redistributions in binary form must reproduce the above
|
||||
|
// copyright notice, this list of conditions and the following
|
||||
|
// disclaimer in the documentation and/or other materials provided
|
||||
|
// with the distribution.
|
||||
|
// * Neither the name of Google Inc. nor the names of its
|
||||
|
// contributors may be used to endorse or promote products derived
|
||||
|
// from this software without specific prior written permission.
|
||||
|
//
|
||||
|
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
|
||||
|
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
|
||||
|
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
|
||||
|
// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
|
||||
|
// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
|
||||
|
// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
|
||||
|
// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
|
||||
|
// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
|
||||
|
// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
|
||||
|
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
|
||||
|
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
||||
|
|
||||
|
// This file is an internal atomic implementation, use atomicops.h instead.
|
||||
|
|
||||
|
#ifndef V8_ATOMICOPS_INTERNALS_X86_GCC_H_ |
||||
|
#define V8_ATOMICOPS_INTERNALS_X86_GCC_H_ |
||||
|
|
||||
|
// This struct is not part of the public API of this module; clients may not
|
||||
|
// use it.
|
||||
|
// Features of this x86. Values may not be correct before main() is run,
|
||||
|
// but are set conservatively.
|
||||
|
struct AtomicOps_x86CPUFeatureStruct { |
||||
|
bool has_amd_lock_mb_bug; // Processor has AMD memory-barrier bug; do lfence
|
||||
|
// after acquire compare-and-swap.
|
||||
|
bool has_sse2; // Processor has SSE2.
|
||||
|
}; |
||||
|
extern struct AtomicOps_x86CPUFeatureStruct AtomicOps_Internalx86CPUFeatures; |
||||
|
|
||||
|
#define ATOMICOPS_COMPILER_BARRIER() __asm__ __volatile__("" : : : "memory") |
||||
|
|
||||
|
namespace v8 { |
||||
|
namespace internal { |
||||
|
|
||||
|
// 32-bit low-level operations on any platform.
|
||||
|
|
||||
|
inline Atomic32 NoBarrier_CompareAndSwap(volatile Atomic32* ptr, |
||||
|
Atomic32 old_value, |
||||
|
Atomic32 new_value) { |
||||
|
Atomic32 prev; |
||||
|
__asm__ __volatile__("lock; cmpxchgl %1,%2" |
||||
|
: "=a" (prev) |
||||
|
: "q" (new_value), "m" (*ptr), "0" (old_value) |
||||
|
: "memory"); |
||||
|
return prev; |
||||
|
} |
||||
|
|
||||
|
inline Atomic32 NoBarrier_AtomicExchange(volatile Atomic32* ptr, |
||||
|
Atomic32 new_value) { |
||||
|
__asm__ __volatile__("xchgl %1,%0" // The lock prefix is implicit for xchg.
|
||||
|
: "=r" (new_value) |
||||
|
: "m" (*ptr), "0" (new_value) |
||||
|
: "memory"); |
||||
|
return new_value; // Now it's the previous value.
|
||||
|
} |
||||
|
|
||||
|
inline Atomic32 NoBarrier_AtomicIncrement(volatile Atomic32* ptr, |
||||
|
Atomic32 increment) { |
||||
|
Atomic32 temp = increment; |
||||
|
__asm__ __volatile__("lock; xaddl %0,%1" |
||||
|
: "+r" (temp), "+m" (*ptr) |
||||
|
: : "memory"); |
||||
|
// temp now holds the old value of *ptr
|
||||
|
return temp + increment; |
||||
|
} |
||||
|
|
||||
|
inline Atomic32 Barrier_AtomicIncrement(volatile Atomic32* ptr, |
||||
|
Atomic32 increment) { |
||||
|
Atomic32 temp = increment; |
||||
|
__asm__ __volatile__("lock; xaddl %0,%1" |
||||
|
: "+r" (temp), "+m" (*ptr) |
||||
|
: : "memory"); |
||||
|
// temp now holds the old value of *ptr
|
||||
|
if (AtomicOps_Internalx86CPUFeatures.has_amd_lock_mb_bug) { |
||||
|
__asm__ __volatile__("lfence" : : : "memory"); |
||||
|
} |
||||
|
return temp + increment; |
||||
|
} |
||||
|
|
||||
|
inline Atomic32 Acquire_CompareAndSwap(volatile Atomic32* ptr, |
||||
|
Atomic32 old_value, |
||||
|
Atomic32 new_value) { |
||||
|
Atomic32 x = NoBarrier_CompareAndSwap(ptr, old_value, new_value); |
||||
|
if (AtomicOps_Internalx86CPUFeatures.has_amd_lock_mb_bug) { |
||||
|
__asm__ __volatile__("lfence" : : : "memory"); |
||||
|
} |
||||
|
return x; |
||||
|
} |
||||
|
|
||||
|
inline Atomic32 Release_CompareAndSwap(volatile Atomic32* ptr, |
||||
|
Atomic32 old_value, |
||||
|
Atomic32 new_value) { |
||||
|
return NoBarrier_CompareAndSwap(ptr, old_value, new_value); |
||||
|
} |
||||
|
|
||||
|
inline void NoBarrier_Store(volatile Atomic32* ptr, Atomic32 value) { |
||||
|
*ptr = value; |
||||
|
} |
||||
|
|
||||
|
#if defined(__x86_64__) |
||||
|
|
||||
|
// 64-bit implementations of memory barrier can be simpler, because it
|
||||
|
// "mfence" is guaranteed to exist.
|
||||
|
inline void MemoryBarrier() { |
||||
|
__asm__ __volatile__("mfence" : : : "memory"); |
||||
|
} |
||||
|
|
||||
|
inline void Acquire_Store(volatile Atomic32* ptr, Atomic32 value) { |
||||
|
*ptr = value; |
||||
|
MemoryBarrier(); |
||||
|
} |
||||
|
|
||||
|
#else |
||||
|
|
||||
|
inline void MemoryBarrier() { |
||||
|
if (AtomicOps_Internalx86CPUFeatures.has_sse2) { |
||||
|
__asm__ __volatile__("mfence" : : : "memory"); |
||||
|
} else { // mfence is faster but not present on PIII
|
||||
|
Atomic32 x = 0; |
||||
|
NoBarrier_AtomicExchange(&x, 0); // acts as a barrier on PIII
|
||||
|
} |
||||
|
} |
||||
|
|
||||
|
inline void Acquire_Store(volatile Atomic32* ptr, Atomic32 value) { |
||||
|
if (AtomicOps_Internalx86CPUFeatures.has_sse2) { |
||||
|
*ptr = value; |
||||
|
__asm__ __volatile__("mfence" : : : "memory"); |
||||
|
} else { |
||||
|
NoBarrier_AtomicExchange(ptr, value); |
||||
|
// acts as a barrier on PIII
|
||||
|
} |
||||
|
} |
||||
|
#endif |
||||
|
|
||||
|
inline void Release_Store(volatile Atomic32* ptr, Atomic32 value) { |
||||
|
ATOMICOPS_COMPILER_BARRIER(); |
||||
|
*ptr = value; // An x86 store acts as a release barrier.
|
||||
|
// See comments in Atomic64 version of Release_Store(), below.
|
||||
|
} |
||||
|
|
||||
|
inline Atomic32 NoBarrier_Load(volatile const Atomic32* ptr) { |
||||
|
return *ptr; |
||||
|
} |
||||
|
|
||||
|
inline Atomic32 Acquire_Load(volatile const Atomic32* ptr) { |
||||
|
Atomic32 value = *ptr; // An x86 load acts as a acquire barrier.
|
||||
|
// See comments in Atomic64 version of Release_Store(), below.
|
||||
|
ATOMICOPS_COMPILER_BARRIER(); |
||||
|
return value; |
||||
|
} |
||||
|
|
||||
|
inline Atomic32 Release_Load(volatile const Atomic32* ptr) { |
||||
|
MemoryBarrier(); |
||||
|
return *ptr; |
||||
|
} |
||||
|
|
||||
|
#if defined(__x86_64__) |
||||
|
|
||||
|
// 64-bit low-level operations on 64-bit platform.
|
||||
|
|
||||
|
inline Atomic64 NoBarrier_CompareAndSwap(volatile Atomic64* ptr, |
||||
|
Atomic64 old_value, |
||||
|
Atomic64 new_value) { |
||||
|
Atomic64 prev; |
||||
|
__asm__ __volatile__("lock; cmpxchgq %1,%2" |
||||
|
: "=a" (prev) |
||||
|
: "q" (new_value), "m" (*ptr), "0" (old_value) |
||||
|
: "memory"); |
||||
|
return prev; |
||||
|
} |
||||
|
|
||||
|
inline Atomic64 NoBarrier_AtomicExchange(volatile Atomic64* ptr, |
||||
|
Atomic64 new_value) { |
||||
|
__asm__ __volatile__("xchgq %1,%0" // The lock prefix is implicit for xchg.
|
||||
|
: "=r" (new_value) |
||||
|
: "m" (*ptr), "0" (new_value) |
||||
|
: "memory"); |
||||
|
return new_value; // Now it's the previous value.
|
||||
|
} |
||||
|
|
||||
|
inline Atomic64 NoBarrier_AtomicIncrement(volatile Atomic64* ptr, |
||||
|
Atomic64 increment) { |
||||
|
Atomic64 temp = increment; |
||||
|
__asm__ __volatile__("lock; xaddq %0,%1" |
||||
|
: "+r" (temp), "+m" (*ptr) |
||||
|
: : "memory"); |
||||
|
// temp now contains the previous value of *ptr
|
||||
|
return temp + increment; |
||||
|
} |
||||
|
|
||||
|
inline Atomic64 Barrier_AtomicIncrement(volatile Atomic64* ptr, |
||||
|
Atomic64 increment) { |
||||
|
Atomic64 temp = increment; |
||||
|
__asm__ __volatile__("lock; xaddq %0,%1" |
||||
|
: "+r" (temp), "+m" (*ptr) |
||||
|
: : "memory"); |
||||
|
// temp now contains the previous value of *ptr
|
||||
|
if (AtomicOps_Internalx86CPUFeatures.has_amd_lock_mb_bug) { |
||||
|
__asm__ __volatile__("lfence" : : : "memory"); |
||||
|
} |
||||
|
return temp + increment; |
||||
|
} |
||||
|
|
||||
|
inline void NoBarrier_Store(volatile Atomic64* ptr, Atomic64 value) { |
||||
|
*ptr = value; |
||||
|
} |
||||
|
|
||||
|
inline void Acquire_Store(volatile Atomic64* ptr, Atomic64 value) { |
||||
|
*ptr = value; |
||||
|
MemoryBarrier(); |
||||
|
} |
||||
|
|
||||
|
inline void Release_Store(volatile Atomic64* ptr, Atomic64 value) { |
||||
|
ATOMICOPS_COMPILER_BARRIER(); |
||||
|
|
||||
|
*ptr = value; // An x86 store acts as a release barrier
|
||||
|
// for current AMD/Intel chips as of Jan 2008.
|
||||
|
// See also Acquire_Load(), below.
|
||||
|
|
||||
|
// When new chips come out, check:
|
||||
|
// IA-32 Intel Architecture Software Developer's Manual, Volume 3:
|
||||
|
// System Programming Guide, Chatper 7: Multiple-processor management,
|
||||
|
// Section 7.2, Memory Ordering.
|
||||
|
// Last seen at:
|
||||
|
// http://developer.intel.com/design/pentium4/manuals/index_new.htm
|
||||
|
//
|
||||
|
// x86 stores/loads fail to act as barriers for a few instructions (clflush
|
||||
|
// maskmovdqu maskmovq movntdq movnti movntpd movntps movntq) but these are
|
||||
|
// not generated by the compiler, and are rare. Users of these instructions
|
||||
|
// need to know about cache behaviour in any case since all of these involve
|
||||
|
// either flushing cache lines or non-temporal cache hints.
|
||||
|
} |
||||
|
|
||||
|
inline Atomic64 NoBarrier_Load(volatile const Atomic64* ptr) { |
||||
|
return *ptr; |
||||
|
} |
||||
|
|
||||
|
inline Atomic64 Acquire_Load(volatile const Atomic64* ptr) { |
||||
|
Atomic64 value = *ptr; // An x86 load acts as a acquire barrier,
|
||||
|
// for current AMD/Intel chips as of Jan 2008.
|
||||
|
// See also Release_Store(), above.
|
||||
|
ATOMICOPS_COMPILER_BARRIER(); |
||||
|
return value; |
||||
|
} |
||||
|
|
||||
|
inline Atomic64 Release_Load(volatile const Atomic64* ptr) { |
||||
|
MemoryBarrier(); |
||||
|
return *ptr; |
||||
|
} |
||||
|
|
||||
|
inline Atomic64 Acquire_CompareAndSwap(volatile Atomic64* ptr, |
||||
|
Atomic64 old_value, |
||||
|
Atomic64 new_value) { |
||||
|
Atomic64 x = NoBarrier_CompareAndSwap(ptr, old_value, new_value); |
||||
|
if (AtomicOps_Internalx86CPUFeatures.has_amd_lock_mb_bug) { |
||||
|
__asm__ __volatile__("lfence" : : : "memory"); |
||||
|
} |
||||
|
return x; |
||||
|
} |
||||
|
|
||||
|
inline Atomic64 Release_CompareAndSwap(volatile Atomic64* ptr, |
||||
|
Atomic64 old_value, |
||||
|
Atomic64 new_value) { |
||||
|
return NoBarrier_CompareAndSwap(ptr, old_value, new_value); |
||||
|
} |
||||
|
|
||||
|
#endif // defined(__x86_64__)
|
||||
|
|
||||
|
} } // namespace v8::internal
|
||||
|
|
||||
|
#undef ATOMICOPS_COMPILER_BARRIER |
||||
|
|
||||
|
#endif // V8_ATOMICOPS_INTERNALS_X86_GCC_H_
|
@ -0,0 +1,301 @@ |
|||||
|
// Copyright 2010 the V8 project authors. All rights reserved.
|
||||
|
// Redistribution and use in source and binary forms, with or without
|
||||
|
// modification, are permitted provided that the following conditions are
|
||||
|
// met:
|
||||
|
//
|
||||
|
// * Redistributions of source code must retain the above copyright
|
||||
|
// notice, this list of conditions and the following disclaimer.
|
||||
|
// * Redistributions in binary form must reproduce the above
|
||||
|
// copyright notice, this list of conditions and the following
|
||||
|
// disclaimer in the documentation and/or other materials provided
|
||||
|
// with the distribution.
|
||||
|
// * Neither the name of Google Inc. nor the names of its
|
||||
|
// contributors may be used to endorse or promote products derived
|
||||
|
// from this software without specific prior written permission.
|
||||
|
//
|
||||
|
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
|
||||
|
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
|
||||
|
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
|
||||
|
// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
|
||||
|
// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
|
||||
|
// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
|
||||
|
// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
|
||||
|
// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
|
||||
|
// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
|
||||
|
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
|
||||
|
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
||||
|
|
||||
|
// This file is an internal atomic implementation, use atomicops.h instead.
|
||||
|
|
||||
|
#ifndef V8_ATOMICOPS_INTERNALS_X86_MACOSX_H_ |
||||
|
#define V8_ATOMICOPS_INTERNALS_X86_MACOSX_H_ |
||||
|
|
||||
|
#include <libkern/OSAtomic.h> |
||||
|
|
||||
|
namespace v8 { |
||||
|
namespace internal { |
||||
|
|
||||
|
inline Atomic32 NoBarrier_CompareAndSwap(volatile Atomic32 *ptr, |
||||
|
Atomic32 old_value, |
||||
|
Atomic32 new_value) { |
||||
|
Atomic32 prev_value; |
||||
|
do { |
||||
|
if (OSAtomicCompareAndSwap32(old_value, new_value, |
||||
|
const_cast<Atomic32*>(ptr))) { |
||||
|
return old_value; |
||||
|
} |
||||
|
prev_value = *ptr; |
||||
|
} while (prev_value == old_value); |
||||
|
return prev_value; |
||||
|
} |
||||
|
|
||||
|
inline Atomic32 NoBarrier_AtomicExchange(volatile Atomic32 *ptr, |
||||
|
Atomic32 new_value) { |
||||
|
Atomic32 old_value; |
||||
|
do { |
||||
|
old_value = *ptr; |
||||
|
} while (!OSAtomicCompareAndSwap32(old_value, new_value, |
||||
|
const_cast<Atomic32*>(ptr))); |
||||
|
return old_value; |
||||
|
} |
||||
|
|
||||
|
inline Atomic32 NoBarrier_AtomicIncrement(volatile Atomic32 *ptr, |
||||
|
Atomic32 increment) { |
||||
|
return OSAtomicAdd32(increment, const_cast<Atomic32*>(ptr)); |
||||
|
} |
||||
|
|
||||
|
inline Atomic32 Barrier_AtomicIncrement(volatile Atomic32 *ptr, |
||||
|
Atomic32 increment) { |
||||
|
return OSAtomicAdd32Barrier(increment, const_cast<Atomic32*>(ptr)); |
||||
|
} |
||||
|
|
||||
|
inline void MemoryBarrier() { |
||||
|
OSMemoryBarrier(); |
||||
|
} |
||||
|
|
||||
|
inline Atomic32 Acquire_CompareAndSwap(volatile Atomic32 *ptr, |
||||
|
Atomic32 old_value, |
||||
|
Atomic32 new_value) { |
||||
|
Atomic32 prev_value; |
||||
|
do { |
||||
|
if (OSAtomicCompareAndSwap32Barrier(old_value, new_value, |
||||
|
const_cast<Atomic32*>(ptr))) { |
||||
|
return old_value; |
||||
|
} |
||||
|
prev_value = *ptr; |
||||
|
} while (prev_value == old_value); |
||||
|
return prev_value; |
||||
|
} |
||||
|
|
||||
|
inline Atomic32 Release_CompareAndSwap(volatile Atomic32 *ptr, |
||||
|
Atomic32 old_value, |
||||
|
Atomic32 new_value) { |
||||
|
return Acquire_CompareAndSwap(ptr, old_value, new_value); |
||||
|
} |
||||
|
|
||||
|
inline void NoBarrier_Store(volatile Atomic32* ptr, Atomic32 value) { |
||||
|
*ptr = value; |
||||
|
} |
||||
|
|
||||
|
inline void Acquire_Store(volatile Atomic32 *ptr, Atomic32 value) { |
||||
|
*ptr = value; |
||||
|
MemoryBarrier(); |
||||
|
} |
||||
|
|
||||
|
inline void Release_Store(volatile Atomic32 *ptr, Atomic32 value) { |
||||
|
MemoryBarrier(); |
||||
|
*ptr = value; |
||||
|
} |
||||
|
|
||||
|
inline Atomic32 NoBarrier_Load(volatile const Atomic32* ptr) { |
||||
|
return *ptr; |
||||
|
} |
||||
|
|
||||
|
inline Atomic32 Acquire_Load(volatile const Atomic32 *ptr) { |
||||
|
Atomic32 value = *ptr; |
||||
|
MemoryBarrier(); |
||||
|
return value; |
||||
|
} |
||||
|
|
||||
|
inline Atomic32 Release_Load(volatile const Atomic32 *ptr) { |
||||
|
MemoryBarrier(); |
||||
|
return *ptr; |
||||
|
} |
||||
|
|
||||
|
#ifdef __LP64__ |
||||
|
|
||||
|
// 64-bit implementation on 64-bit platform
|
||||
|
|
||||
|
inline Atomic64 NoBarrier_CompareAndSwap(volatile Atomic64 *ptr, |
||||
|
Atomic64 old_value, |
||||
|
Atomic64 new_value) { |
||||
|
Atomic64 prev_value; |
||||
|
do { |
||||
|
if (OSAtomicCompareAndSwap64(old_value, new_value, |
||||
|
const_cast<Atomic64*>(ptr))) { |
||||
|
return old_value; |
||||
|
} |
||||
|
prev_value = *ptr; |
||||
|
} while (prev_value == old_value); |
||||
|
return prev_value; |
||||
|
} |
||||
|
|
||||
|
inline Atomic64 NoBarrier_AtomicExchange(volatile Atomic64 *ptr, |
||||
|
Atomic64 new_value) { |
||||
|
Atomic64 old_value; |
||||
|
do { |
||||
|
old_value = *ptr; |
||||
|
} while (!OSAtomicCompareAndSwap64(old_value, new_value, |
||||
|
const_cast<Atomic64*>(ptr))); |
||||
|
return old_value; |
||||
|
} |
||||
|
|
||||
|
inline Atomic64 NoBarrier_AtomicIncrement(volatile Atomic64 *ptr, |
||||
|
Atomic64 increment) { |
||||
|
return OSAtomicAdd64(increment, const_cast<Atomic64*>(ptr)); |
||||
|
} |
||||
|
|
||||
|
inline Atomic64 Barrier_AtomicIncrement(volatile Atomic64 *ptr, |
||||
|
Atomic64 increment) { |
||||
|
return OSAtomicAdd64Barrier(increment, const_cast<Atomic64*>(ptr)); |
||||
|
} |
||||
|
|
||||
|
inline Atomic64 Acquire_CompareAndSwap(volatile Atomic64 *ptr, |
||||
|
Atomic64 old_value, |
||||
|
Atomic64 new_value) { |
||||
|
Atomic64 prev_value; |
||||
|
do { |
||||
|
if (OSAtomicCompareAndSwap64Barrier(old_value, new_value, |
||||
|
const_cast<Atomic64*>(ptr))) { |
||||
|
return old_value; |
||||
|
} |
||||
|
prev_value = *ptr; |
||||
|
} while (prev_value == old_value); |
||||
|
return prev_value; |
||||
|
} |
||||
|
|
||||
|
inline Atomic64 Release_CompareAndSwap(volatile Atomic64 *ptr, |
||||
|
Atomic64 old_value, |
||||
|
Atomic64 new_value) { |
||||
|
// The lib kern interface does not distinguish between
|
||||
|
// Acquire and Release memory barriers; they are equivalent.
|
||||
|
return Acquire_CompareAndSwap(ptr, old_value, new_value); |
||||
|
} |
||||
|
|
||||
|
inline void NoBarrier_Store(volatile Atomic64* ptr, Atomic64 value) { |
||||
|
*ptr = value; |
||||
|
} |
||||
|
|
||||
|
inline void Acquire_Store(volatile Atomic64 *ptr, Atomic64 value) { |
||||
|
*ptr = value; |
||||
|
MemoryBarrier(); |
||||
|
} |
||||
|
|
||||
|
inline void Release_Store(volatile Atomic64 *ptr, Atomic64 value) { |
||||
|
MemoryBarrier(); |
||||
|
*ptr = value; |
||||
|
} |
||||
|
|
||||
|
inline Atomic64 NoBarrier_Load(volatile const Atomic64* ptr) { |
||||
|
return *ptr; |
||||
|
} |
||||
|
|
||||
|
inline Atomic64 Acquire_Load(volatile const Atomic64 *ptr) { |
||||
|
Atomic64 value = *ptr; |
||||
|
MemoryBarrier(); |
||||
|
return value; |
||||
|
} |
||||
|
|
||||
|
inline Atomic64 Release_Load(volatile const Atomic64 *ptr) { |
||||
|
MemoryBarrier(); |
||||
|
return *ptr; |
||||
|
} |
||||
|
|
||||
|
#endif // defined(__LP64__)
|
||||
|
|
||||
|
// MacOS uses long for intptr_t, AtomicWord and Atomic32 are always different
|
||||
|
// on the Mac, even when they are the same size. We need to explicitly cast
|
||||
|
// from AtomicWord to Atomic32/64 to implement the AtomicWord interface.
|
||||
|
#ifdef __LP64__ |
||||
|
#define AtomicWordCastType Atomic64 |
||||
|
#else |
||||
|
#define AtomicWordCastType Atomic32 |
||||
|
#endif |
||||
|
|
||||
|
inline AtomicWord NoBarrier_CompareAndSwap(volatile AtomicWord* ptr, |
||||
|
AtomicWord old_value, |
||||
|
AtomicWord new_value) { |
||||
|
return NoBarrier_CompareAndSwap( |
||||
|
reinterpret_cast<volatile AtomicWordCastType*>(ptr), |
||||
|
old_value, new_value); |
||||
|
} |
||||
|
|
||||
|
inline AtomicWord NoBarrier_AtomicExchange(volatile AtomicWord* ptr, |
||||
|
AtomicWord new_value) { |
||||
|
return NoBarrier_AtomicExchange( |
||||
|
reinterpret_cast<volatile AtomicWordCastType*>(ptr), new_value); |
||||
|
} |
||||
|
|
||||
|
inline AtomicWord NoBarrier_AtomicIncrement(volatile AtomicWord* ptr, |
||||
|
AtomicWord increment) { |
||||
|
return NoBarrier_AtomicIncrement( |
||||
|
reinterpret_cast<volatile AtomicWordCastType*>(ptr), increment); |
||||
|
} |
||||
|
|
||||
|
inline AtomicWord Barrier_AtomicIncrement(volatile AtomicWord* ptr, |
||||
|
AtomicWord increment) { |
||||
|
return Barrier_AtomicIncrement( |
||||
|
reinterpret_cast<volatile AtomicWordCastType*>(ptr), increment); |
||||
|
} |
||||
|
|
||||
|
inline AtomicWord Acquire_CompareAndSwap(volatile AtomicWord* ptr, |
||||
|
AtomicWord old_value, |
||||
|
AtomicWord new_value) { |
||||
|
return v8::internal::Acquire_CompareAndSwap( |
||||
|
reinterpret_cast<volatile AtomicWordCastType*>(ptr), |
||||
|
old_value, new_value); |
||||
|
} |
||||
|
|
||||
|
inline AtomicWord Release_CompareAndSwap(volatile AtomicWord* ptr, |
||||
|
AtomicWord old_value, |
||||
|
AtomicWord new_value) { |
||||
|
return v8::internal::Release_CompareAndSwap( |
||||
|
reinterpret_cast<volatile AtomicWordCastType*>(ptr), |
||||
|
old_value, new_value); |
||||
|
} |
||||
|
|
||||
|
inline void NoBarrier_Store(volatile AtomicWord *ptr, AtomicWord value) { |
||||
|
NoBarrier_Store( |
||||
|
reinterpret_cast<volatile AtomicWordCastType*>(ptr), value); |
||||
|
} |
||||
|
|
||||
|
inline void Acquire_Store(volatile AtomicWord* ptr, AtomicWord value) { |
||||
|
return v8::internal::Acquire_Store( |
||||
|
reinterpret_cast<volatile AtomicWordCastType*>(ptr), value); |
||||
|
} |
||||
|
|
||||
|
inline void Release_Store(volatile AtomicWord* ptr, AtomicWord value) { |
||||
|
return v8::internal::Release_Store( |
||||
|
reinterpret_cast<volatile AtomicWordCastType*>(ptr), value); |
||||
|
} |
||||
|
|
||||
|
inline AtomicWord NoBarrier_Load(volatile const AtomicWord *ptr) { |
||||
|
return NoBarrier_Load( |
||||
|
reinterpret_cast<volatile const AtomicWordCastType*>(ptr)); |
||||
|
} |
||||
|
|
||||
|
inline AtomicWord Acquire_Load(volatile const AtomicWord* ptr) { |
||||
|
return v8::internal::Acquire_Load( |
||||
|
reinterpret_cast<volatile const AtomicWordCastType*>(ptr)); |
||||
|
} |
||||
|
|
||||
|
inline AtomicWord Release_Load(volatile const AtomicWord* ptr) { |
||||
|
return v8::internal::Release_Load( |
||||
|
reinterpret_cast<volatile const AtomicWordCastType*>(ptr)); |
||||
|
} |
||||
|
|
||||
|
#undef AtomicWordCastType |
||||
|
|
||||
|
} } // namespace v8::internal
|
||||
|
|
||||
|
#endif // V8_ATOMICOPS_INTERNALS_X86_MACOSX_H_
|
@ -0,0 +1,203 @@ |
|||||
|
// Copyright 2010 the V8 project authors. All rights reserved.
|
||||
|
// Redistribution and use in source and binary forms, with or without
|
||||
|
// modification, are permitted provided that the following conditions are
|
||||
|
// met:
|
||||
|
//
|
||||
|
// * Redistributions of source code must retain the above copyright
|
||||
|
// notice, this list of conditions and the following disclaimer.
|
||||
|
// * Redistributions in binary form must reproduce the above
|
||||
|
// copyright notice, this list of conditions and the following
|
||||
|
// disclaimer in the documentation and/or other materials provided
|
||||
|
// with the distribution.
|
||||
|
// * Neither the name of Google Inc. nor the names of its
|
||||
|
// contributors may be used to endorse or promote products derived
|
||||
|
// from this software without specific prior written permission.
|
||||
|
//
|
||||
|
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
|
||||
|
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
|
||||
|
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
|
||||
|
// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
|
||||
|
// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
|
||||
|
// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
|
||||
|
// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
|
||||
|
// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
|
||||
|
// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
|
||||
|
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
|
||||
|
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
||||
|
|
||||
|
// This file is an internal atomic implementation, use atomicops.h instead.
|
||||
|
|
||||
|
#ifndef V8_ATOMICOPS_INTERNALS_X86_MSVC_H_ |
||||
|
#define V8_ATOMICOPS_INTERNALS_X86_MSVC_H_ |
||||
|
|
||||
|
#include "checks.h" |
||||
|
#include "win32-headers.h" |
||||
|
|
||||
|
namespace v8 { |
||||
|
namespace internal { |
||||
|
|
||||
|
inline Atomic32 NoBarrier_CompareAndSwap(volatile Atomic32* ptr, |
||||
|
Atomic32 old_value, |
||||
|
Atomic32 new_value) { |
||||
|
LONG result = InterlockedCompareExchange( |
||||
|
reinterpret_cast<volatile LONG*>(ptr), |
||||
|
static_cast<LONG>(new_value), |
||||
|
static_cast<LONG>(old_value)); |
||||
|
return static_cast<Atomic32>(result); |
||||
|
} |
||||
|
|
||||
|
inline Atomic32 NoBarrier_AtomicExchange(volatile Atomic32* ptr, |
||||
|
Atomic32 new_value) { |
||||
|
LONG result = InterlockedExchange( |
||||
|
reinterpret_cast<volatile LONG*>(ptr), |
||||
|
static_cast<LONG>(new_value)); |
||||
|
return static_cast<Atomic32>(result); |
||||
|
} |
||||
|
|
||||
|
inline Atomic32 Barrier_AtomicIncrement(volatile Atomic32* ptr, |
||||
|
Atomic32 increment) { |
||||
|
return InterlockedExchangeAdd( |
||||
|
reinterpret_cast<volatile LONG*>(ptr), |
||||
|
static_cast<LONG>(increment)) + increment; |
||||
|
} |
||||
|
|
||||
|
inline Atomic32 NoBarrier_AtomicIncrement(volatile Atomic32* ptr, |
||||
|
Atomic32 increment) { |
||||
|
return Barrier_AtomicIncrement(ptr, increment); |
||||
|
} |
||||
|
|
||||
|
#if !(defined(_MSC_VER) && _MSC_VER >= 1400) |
||||
|
#error "We require at least vs2005 for MemoryBarrier" |
||||
|
#endif |
||||
|
inline void MemoryBarrier() { |
||||
|
// We use MemoryBarrier from WinNT.h
|
||||
|
::MemoryBarrier(); |
||||
|
} |
||||
|
|
||||
|
inline Atomic32 Acquire_CompareAndSwap(volatile Atomic32* ptr, |
||||
|
Atomic32 old_value, |
||||
|
Atomic32 new_value) { |
||||
|
return NoBarrier_CompareAndSwap(ptr, old_value, new_value); |
||||
|
} |
||||
|
|
||||
|
inline Atomic32 Release_CompareAndSwap(volatile Atomic32* ptr, |
||||
|
Atomic32 old_value, |
||||
|
Atomic32 new_value) { |
||||
|
return NoBarrier_CompareAndSwap(ptr, old_value, new_value); |
||||
|
} |
||||
|
|
||||
|
inline void NoBarrier_Store(volatile Atomic32* ptr, Atomic32 value) { |
||||
|
*ptr = value; |
||||
|
} |
||||
|
|
||||
|
inline void Acquire_Store(volatile Atomic32* ptr, Atomic32 value) { |
||||
|
NoBarrier_AtomicExchange(ptr, value); |
||||
|
// acts as a barrier in this implementation
|
||||
|
} |
||||
|
|
||||
|
inline void Release_Store(volatile Atomic32* ptr, Atomic32 value) { |
||||
|
*ptr = value; // works w/o barrier for current Intel chips as of June 2005
|
||||
|
// See comments in Atomic64 version of Release_Store() below.
|
||||
|
} |
||||
|
|
||||
|
inline Atomic32 NoBarrier_Load(volatile const Atomic32* ptr) { |
||||
|
return *ptr; |
||||
|
} |
||||
|
|
||||
|
inline Atomic32 Acquire_Load(volatile const Atomic32* ptr) { |
||||
|
Atomic32 value = *ptr; |
||||
|
return value; |
||||
|
} |
||||
|
|
||||
|
inline Atomic32 Release_Load(volatile const Atomic32* ptr) { |
||||
|
MemoryBarrier(); |
||||
|
return *ptr; |
||||
|
} |
||||
|
|
||||
|
#if defined(_WIN64) |
||||
|
|
||||
|
// 64-bit low-level operations on 64-bit platform.
|
||||
|
|
||||
|
STATIC_ASSERT(sizeof(Atomic64) == sizeof(PVOID)); |
||||
|
|
||||
|
inline Atomic64 NoBarrier_CompareAndSwap(volatile Atomic64* ptr, |
||||
|
Atomic64 old_value, |
||||
|
Atomic64 new_value) { |
||||
|
PVOID result = InterlockedCompareExchangePointer( |
||||
|
reinterpret_cast<volatile PVOID*>(ptr), |
||||
|
reinterpret_cast<PVOID>(new_value), reinterpret_cast<PVOID>(old_value)); |
||||
|
return reinterpret_cast<Atomic64>(result); |
||||
|
} |
||||
|
|
||||
|
inline Atomic64 NoBarrier_AtomicExchange(volatile Atomic64* ptr, |
||||
|
Atomic64 new_value) { |
||||
|
PVOID result = InterlockedExchangePointer( |
||||
|
reinterpret_cast<volatile PVOID*>(ptr), |
||||
|
reinterpret_cast<PVOID>(new_value)); |
||||
|
return reinterpret_cast<Atomic64>(result); |
||||
|
} |
||||
|
|
||||
|
inline Atomic64 Barrier_AtomicIncrement(volatile Atomic64* ptr, |
||||
|
Atomic64 increment) { |
||||
|
return InterlockedExchangeAdd64( |
||||
|
reinterpret_cast<volatile LONGLONG*>(ptr), |
||||
|
static_cast<LONGLONG>(increment)) + increment; |
||||
|
} |
||||
|
|
||||
|
inline Atomic64 NoBarrier_AtomicIncrement(volatile Atomic64* ptr, |
||||
|
Atomic64 increment) { |
||||
|
return Barrier_AtomicIncrement(ptr, increment); |
||||
|
} |
||||
|
|
||||
|
inline void NoBarrier_Store(volatile Atomic64* ptr, Atomic64 value) { |
||||
|
*ptr = value; |
||||
|
} |
||||
|
|
||||
|
inline void Acquire_Store(volatile Atomic64* ptr, Atomic64 value) { |
||||
|
NoBarrier_AtomicExchange(ptr, value); |
||||
|
// acts as a barrier in this implementation
|
||||
|
} |
||||
|
|
||||
|
inline void Release_Store(volatile Atomic64* ptr, Atomic64 value) { |
||||
|
*ptr = value; // works w/o barrier for current Intel chips as of June 2005
|
||||
|
|
||||
|
// When new chips come out, check:
|
||||
|
// IA-32 Intel Architecture Software Developer's Manual, Volume 3:
|
||||
|
// System Programming Guide, Chatper 7: Multiple-processor management,
|
||||
|
// Section 7.2, Memory Ordering.
|
||||
|
// Last seen at:
|
||||
|
// http://developer.intel.com/design/pentium4/manuals/index_new.htm
|
||||
|
} |
||||
|
|
||||
|
inline Atomic64 NoBarrier_Load(volatile const Atomic64* ptr) { |
||||
|
return *ptr; |
||||
|
} |
||||
|
|
||||
|
inline Atomic64 Acquire_Load(volatile const Atomic64* ptr) { |
||||
|
Atomic64 value = *ptr; |
||||
|
return value; |
||||
|
} |
||||
|
|
||||
|
inline Atomic64 Release_Load(volatile const Atomic64* ptr) { |
||||
|
MemoryBarrier(); |
||||
|
return *ptr; |
||||
|
} |
||||
|
|
||||
|
inline Atomic64 Acquire_CompareAndSwap(volatile Atomic64* ptr, |
||||
|
Atomic64 old_value, |
||||
|
Atomic64 new_value) { |
||||
|
return NoBarrier_CompareAndSwap(ptr, old_value, new_value); |
||||
|
} |
||||
|
|
||||
|
inline Atomic64 Release_CompareAndSwap(volatile Atomic64* ptr, |
||||
|
Atomic64 old_value, |
||||
|
Atomic64 new_value) { |
||||
|
return NoBarrier_CompareAndSwap(ptr, old_value, new_value); |
||||
|
} |
||||
|
|
||||
|
|
||||
|
#endif // defined(_WIN64)
|
||||
|
|
||||
|
} } // namespace v8::internal
|
||||
|
|
||||
|
#endif // V8_ATOMICOPS_INTERNALS_X86_MSVC_H_
|
File diff suppressed because it is too large
@ -0,0 +1,511 @@ |
|||||
|
// Copyright 2010 the V8 project authors. All rights reserved.
|
||||
|
// Redistribution and use in source and binary forms, with or without
|
||||
|
// modification, are permitted provided that the following conditions are
|
||||
|
// met:
|
||||
|
//
|
||||
|
// * Redistributions of source code must retain the above copyright
|
||||
|
// notice, this list of conditions and the following disclaimer.
|
||||
|
// * Redistributions in binary form must reproduce the above
|
||||
|
// copyright notice, this list of conditions and the following
|
||||
|
// disclaimer in the documentation and/or other materials provided
|
||||
|
// with the distribution.
|
||||
|
// * Neither the name of Google Inc. nor the names of its
|
||||
|
// contributors may be used to endorse or promote products derived
|
||||
|
// from this software without specific prior written permission.
|
||||
|
//
|
||||
|
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
|
||||
|
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
|
||||
|
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
|
||||
|
// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
|
||||
|
// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
|
||||
|
// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
|
||||
|
// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
|
||||
|
// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
|
||||
|
// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
|
||||
|
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
|
||||
|
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
||||
|
|
||||
|
#ifndef V8_DEOPTIMIZER_H_ |
||||
|
#define V8_DEOPTIMIZER_H_ |
||||
|
|
||||
|
#include "v8.h" |
||||
|
|
||||
|
#include "macro-assembler.h" |
||||
|
#include "zone-inl.h" |
||||
|
|
||||
|
|
||||
|
namespace v8 { |
||||
|
namespace internal { |
||||
|
|
||||
|
class FrameDescription; |
||||
|
class TranslationIterator; |
||||
|
class DeoptimizingCodeListNode; |
||||
|
|
||||
|
|
||||
|
class ValueDescription BASE_EMBEDDED { |
||||
|
public: |
||||
|
explicit ValueDescription(int index) : stack_index_(index) { } |
||||
|
int stack_index() const { return stack_index_; } |
||||
|
|
||||
|
private: |
||||
|
// Offset relative to the top of the stack.
|
||||
|
int stack_index_; |
||||
|
}; |
||||
|
|
||||
|
|
||||
|
class ValueDescriptionInteger32: public ValueDescription { |
||||
|
public: |
||||
|
ValueDescriptionInteger32(int index, int32_t value) |
||||
|
: ValueDescription(index), int32_value_(value) { } |
||||
|
int32_t int32_value() const { return int32_value_; } |
||||
|
|
||||
|
private: |
||||
|
// Raw value.
|
||||
|
int32_t int32_value_; |
||||
|
}; |
||||
|
|
||||
|
|
||||
|
class ValueDescriptionDouble: public ValueDescription { |
||||
|
public: |
||||
|
ValueDescriptionDouble(int index, double value) |
||||
|
: ValueDescription(index), double_value_(value) { } |
||||
|
double double_value() const { return double_value_; } |
||||
|
|
||||
|
private: |
||||
|
// Raw value.
|
||||
|
double double_value_; |
||||
|
}; |
||||
|
|
||||
|
|
||||
|
class OptimizedFunctionVisitor BASE_EMBEDDED { |
||||
|
public: |
||||
|
virtual ~OptimizedFunctionVisitor() {} |
||||
|
|
||||
|
// Function which is called before iteration of any optimized functions
|
||||
|
// from given global context.
|
||||
|
virtual void EnterContext(Context* context) = 0; |
||||
|
|
||||
|
virtual void VisitFunction(JSFunction* function) = 0; |
||||
|
|
||||
|
// Function which is called after iteration of all optimized functions
|
||||
|
// from given global context.
|
||||
|
virtual void LeaveContext(Context* context) = 0; |
||||
|
}; |
||||
|
|
||||
|
|
||||
|
class Deoptimizer : public Malloced { |
||||
|
public: |
||||
|
enum BailoutType { |
||||
|
EAGER, |
||||
|
LAZY, |
||||
|
OSR |
||||
|
}; |
||||
|
|
||||
|
int output_count() const { return output_count_; } |
||||
|
|
||||
|
static Deoptimizer* New(JSFunction* function, |
||||
|
BailoutType type, |
||||
|
unsigned bailout_id, |
||||
|
Address from, |
||||
|
int fp_to_sp_delta); |
||||
|
static Deoptimizer* Grab(); |
||||
|
|
||||
|
// Deoptimize the function now. Its current optimized code will never be run
|
||||
|
// again and any activations of the optimized code will get deoptimized when
|
||||
|
// execution returns.
|
||||
|
static void DeoptimizeFunction(JSFunction* function); |
||||
|
|
||||
|
// Deoptimize all functions in the heap.
|
||||
|
static void DeoptimizeAll(); |
||||
|
|
||||
|
static void DeoptimizeGlobalObject(JSObject* object); |
||||
|
|
||||
|
static void VisitAllOptimizedFunctionsForContext( |
||||
|
Context* context, OptimizedFunctionVisitor* visitor); |
||||
|
|
||||
|
static void VisitAllOptimizedFunctionsForGlobalObject( |
||||
|
JSObject* object, OptimizedFunctionVisitor* visitor); |
||||
|
|
||||
|
static void VisitAllOptimizedFunctions(OptimizedFunctionVisitor* visitor); |
||||
|
|
||||
|
// Given the relocation info of a call to the stack check stub, patch the
|
||||
|
// code so as to go unconditionally to the on-stack replacement builtin
|
||||
|
// instead.
|
||||
|
static void PatchStackCheckCode(RelocInfo* rinfo, Code* replacement_code); |
||||
|
|
||||
|
// Given the relocation info of a call to the on-stack replacement
|
||||
|
// builtin, patch the code back to the original stack check code.
|
||||
|
static void RevertStackCheckCode(RelocInfo* rinfo, Code* check_code); |
||||
|
|
||||
|
~Deoptimizer(); |
||||
|
|
||||
|
void InsertHeapNumberValues(int index, JavaScriptFrame* frame); |
||||
|
|
||||
|
static void ComputeOutputFrames(Deoptimizer* deoptimizer); |
||||
|
|
||||
|
static Address GetDeoptimizationEntry(int id, BailoutType type); |
||||
|
static int GetDeoptimizationId(Address addr, BailoutType type); |
||||
|
static unsigned GetOutputInfo(DeoptimizationOutputData* data, |
||||
|
unsigned node_id, |
||||
|
SharedFunctionInfo* shared); |
||||
|
|
||||
|
static void Setup(); |
||||
|
static void TearDown(); |
||||
|
|
||||
|
// Code generation support.
|
||||
|
static int input_offset() { return OFFSET_OF(Deoptimizer, input_); } |
||||
|
static int output_count_offset() { |
||||
|
return OFFSET_OF(Deoptimizer, output_count_); |
||||
|
} |
||||
|
static int output_offset() { return OFFSET_OF(Deoptimizer, output_); } |
||||
|
|
||||
|
static int GetDeoptimizedCodeCount(); |
||||
|
|
||||
|
static const int kNotDeoptimizationEntry = -1; |
||||
|
|
||||
|
// Generators for the deoptimization entry code.
|
||||
|
class EntryGenerator BASE_EMBEDDED { |
||||
|
public: |
||||
|
EntryGenerator(MacroAssembler* masm, BailoutType type) |
||||
|
: masm_(masm), type_(type) { } |
||||
|
virtual ~EntryGenerator() { } |
||||
|
|
||||
|
void Generate(); |
||||
|
|
||||
|
protected: |
||||
|
MacroAssembler* masm() const { return masm_; } |
||||
|
BailoutType type() const { return type_; } |
||||
|
|
||||
|
virtual void GeneratePrologue() { } |
||||
|
|
||||
|
private: |
||||
|
MacroAssembler* masm_; |
||||
|
Deoptimizer::BailoutType type_; |
||||
|
}; |
||||
|
|
||||
|
class TableEntryGenerator : public EntryGenerator { |
||||
|
public: |
||||
|
TableEntryGenerator(MacroAssembler* masm, BailoutType type, int count) |
||||
|
: EntryGenerator(masm, type), count_(count) { } |
||||
|
|
||||
|
protected: |
||||
|
virtual void GeneratePrologue(); |
||||
|
|
||||
|
private: |
||||
|
int count() const { return count_; } |
||||
|
|
||||
|
int count_; |
||||
|
}; |
||||
|
|
||||
|
private: |
||||
|
static const int kNumberOfEntries = 4096; |
||||
|
|
||||
|
Deoptimizer(JSFunction* function, |
||||
|
BailoutType type, |
||||
|
unsigned bailout_id, |
||||
|
Address from, |
||||
|
int fp_to_sp_delta); |
||||
|
void DeleteFrameDescriptions(); |
||||
|
|
||||
|
void DoComputeOutputFrames(); |
||||
|
void DoComputeOsrOutputFrame(); |
||||
|
void DoComputeFrame(TranslationIterator* iterator, int frame_index); |
||||
|
void DoTranslateCommand(TranslationIterator* iterator, |
||||
|
int frame_index, |
||||
|
unsigned output_offset); |
||||
|
// Translate a command for OSR. Updates the input offset to be used for
|
||||
|
// the next command. Returns false if translation of the command failed
|
||||
|
// (e.g., a number conversion failed) and may or may not have updated the
|
||||
|
// input offset.
|
||||
|
bool DoOsrTranslateCommand(TranslationIterator* iterator, |
||||
|
int* input_offset); |
||||
|
|
||||
|
unsigned ComputeInputFrameSize() const; |
||||
|
unsigned ComputeFixedSize(JSFunction* function) const; |
||||
|
|
||||
|
unsigned ComputeIncomingArgumentSize(JSFunction* function) const; |
||||
|
unsigned ComputeOutgoingArgumentSize() const; |
||||
|
|
||||
|
Object* ComputeLiteral(int index) const; |
||||
|
|
||||
|
void InsertHeapNumberValue(JavaScriptFrame* frame, |
||||
|
int stack_index, |
||||
|
double val, |
||||
|
int extra_slot_count); |
||||
|
|
||||
|
void AddInteger32Value(int frame_index, int slot_index, int32_t value); |
||||
|
void AddDoubleValue(int frame_index, int slot_index, double value); |
||||
|
|
||||
|
static LargeObjectChunk* CreateCode(BailoutType type); |
||||
|
static void GenerateDeoptimizationEntries( |
||||
|
MacroAssembler* masm, int count, BailoutType type); |
||||
|
|
||||
|
// Weak handle callback for deoptimizing code objects.
|
||||
|
static void HandleWeakDeoptimizedCode( |
||||
|
v8::Persistent<v8::Value> obj, void* data); |
||||
|
static Code* FindDeoptimizingCodeFromAddress(Address addr); |
||||
|
static void RemoveDeoptimizingCode(Code* code); |
||||
|
|
||||
|
static LargeObjectChunk* eager_deoptimization_entry_code_; |
||||
|
static LargeObjectChunk* lazy_deoptimization_entry_code_; |
||||
|
static Deoptimizer* current_; |
||||
|
|
||||
|
// List of deoptimized code which still have references from active stack
|
||||
|
// frames. These code objects are needed by the deoptimizer when deoptimizing
|
||||
|
// a frame for which the code object for the function function has been
|
||||
|
// changed from the code present when deoptimizing was done.
|
||||
|
static DeoptimizingCodeListNode* deoptimizing_code_list_; |
||||
|
|
||||
|
JSFunction* function_; |
||||
|
Code* optimized_code_; |
||||
|
unsigned bailout_id_; |
||||
|
BailoutType bailout_type_; |
||||
|
Address from_; |
||||
|
int fp_to_sp_delta_; |
||||
|
|
||||
|
// Input frame description.
|
||||
|
FrameDescription* input_; |
||||
|
// Number of output frames.
|
||||
|
int output_count_; |
||||
|
// Array of output frame descriptions.
|
||||
|
FrameDescription** output_; |
||||
|
|
||||
|
List<ValueDescriptionInteger32>* integer32_values_; |
||||
|
List<ValueDescriptionDouble>* double_values_; |
||||
|
|
||||
|
static int table_entry_size_; |
||||
|
|
||||
|
friend class FrameDescription; |
||||
|
friend class DeoptimizingCodeListNode; |
||||
|
}; |
||||
|
|
||||
|
|
||||
|
class FrameDescription { |
||||
|
public: |
||||
|
FrameDescription(uint32_t frame_size, |
||||
|
JSFunction* function); |
||||
|
|
||||
|
void* operator new(size_t size, uint32_t frame_size) { |
||||
|
return malloc(size + frame_size); |
||||
|
} |
||||
|
|
||||
|
void operator delete(void* description) { |
||||
|
free(description); |
||||
|
} |
||||
|
|
||||
|
intptr_t GetFrameSize() const { return frame_size_; } |
||||
|
|
||||
|
JSFunction* GetFunction() const { return function_; } |
||||
|
|
||||
|
unsigned GetOffsetFromSlotIndex(Deoptimizer* deoptimizer, int slot_index); |
||||
|
|
||||
|
intptr_t GetFrameSlot(unsigned offset) { |
||||
|
return *GetFrameSlotPointer(offset); |
||||
|
} |
||||
|
|
||||
|
double GetDoubleFrameSlot(unsigned offset) { |
||||
|
return *reinterpret_cast<double*>(GetFrameSlotPointer(offset)); |
||||
|
} |
||||
|
|
||||
|
void SetFrameSlot(unsigned offset, intptr_t value) { |
||||
|
*GetFrameSlotPointer(offset) = value; |
||||
|
} |
||||
|
|
||||
|
intptr_t GetRegister(unsigned n) const { |
||||
|
ASSERT(n < ARRAY_SIZE(registers_)); |
||||
|
return registers_[n]; |
||||
|
} |
||||
|
|
||||
|
double GetDoubleRegister(unsigned n) const { |
||||
|
ASSERT(n < ARRAY_SIZE(double_registers_)); |
||||
|
return double_registers_[n]; |
||||
|
} |
||||
|
|
||||
|
void SetRegister(unsigned n, intptr_t value) { |
||||
|
ASSERT(n < ARRAY_SIZE(registers_)); |
||||
|
registers_[n] = value; |
||||
|
} |
||||
|
|
||||
|
void SetDoubleRegister(unsigned n, double value) { |
||||
|
ASSERT(n < ARRAY_SIZE(double_registers_)); |
||||
|
double_registers_[n] = value; |
||||
|
} |
||||
|
|
||||
|
intptr_t GetTop() const { return top_; } |
||||
|
void SetTop(intptr_t top) { top_ = top; } |
||||
|
|
||||
|
intptr_t GetPc() const { return pc_; } |
||||
|
void SetPc(intptr_t pc) { pc_ = pc; } |
||||
|
|
||||
|
intptr_t GetFp() const { return fp_; } |
||||
|
void SetFp(intptr_t fp) { fp_ = fp; } |
||||
|
|
||||
|
Smi* GetState() const { return state_; } |
||||
|
void SetState(Smi* state) { state_ = state; } |
||||
|
|
||||
|
void SetContinuation(intptr_t pc) { continuation_ = pc; } |
||||
|
|
||||
|
static int registers_offset() { |
||||
|
return OFFSET_OF(FrameDescription, registers_); |
||||
|
} |
||||
|
|
||||
|
static int double_registers_offset() { |
||||
|
return OFFSET_OF(FrameDescription, double_registers_); |
||||
|
} |
||||
|
|
||||
|
static int frame_size_offset() { |
||||
|
return OFFSET_OF(FrameDescription, frame_size_); |
||||
|
} |
||||
|
|
||||
|
static int pc_offset() { |
||||
|
return OFFSET_OF(FrameDescription, pc_); |
||||
|
} |
||||
|
|
||||
|
static int state_offset() { |
||||
|
return OFFSET_OF(FrameDescription, state_); |
||||
|
} |
||||
|
|
||||
|
static int continuation_offset() { |
||||
|
return OFFSET_OF(FrameDescription, continuation_); |
||||
|
} |
||||
|
|
||||
|
static int frame_content_offset() { |
||||
|
return sizeof(FrameDescription); |
||||
|
} |
||||
|
|
||||
|
private: |
||||
|
static const uint32_t kZapUint32 = 0xbeeddead; |
||||
|
|
||||
|
uintptr_t frame_size_; // Number of bytes.
|
||||
|
JSFunction* function_; |
||||
|
intptr_t registers_[Register::kNumRegisters]; |
||||
|
double double_registers_[DoubleRegister::kNumAllocatableRegisters]; |
||||
|
intptr_t top_; |
||||
|
intptr_t pc_; |
||||
|
intptr_t fp_; |
||||
|
Smi* state_; |
||||
|
|
||||
|
// Continuation is the PC where the execution continues after
|
||||
|
// deoptimizing.
|
||||
|
intptr_t continuation_; |
||||
|
|
||||
|
intptr_t* GetFrameSlotPointer(unsigned offset) { |
||||
|
ASSERT(offset < frame_size_); |
||||
|
return reinterpret_cast<intptr_t*>( |
||||
|
reinterpret_cast<Address>(this) + frame_content_offset() + offset); |
||||
|
} |
||||
|
}; |
||||
|
|
||||
|
|
||||
|
class TranslationBuffer BASE_EMBEDDED { |
||||
|
public: |
||||
|
TranslationBuffer() : contents_(256) { } |
||||
|
|
||||
|
int CurrentIndex() const { return contents_.length(); } |
||||
|
void Add(int32_t value); |
||||
|
|
||||
|
Handle<ByteArray> CreateByteArray(); |
||||
|
|
||||
|
private: |
||||
|
ZoneList<uint8_t> contents_; |
||||
|
}; |
||||
|
|
||||
|
|
||||
|
class TranslationIterator BASE_EMBEDDED { |
||||
|
public: |
||||
|
TranslationIterator(ByteArray* buffer, int index) |
||||
|
: buffer_(buffer), index_(index) { |
||||
|
ASSERT(index >= 0 && index < buffer->length()); |
||||
|
} |
||||
|
|
||||
|
int32_t Next(); |
||||
|
|
||||
|
bool HasNext() const { return index_ >= 0; } |
||||
|
|
||||
|
void Done() { index_ = -1; } |
||||
|
|
||||
|
void Skip(int n) { |
||||
|
for (int i = 0; i < n; i++) Next(); |
||||
|
} |
||||
|
|
||||
|
private: |
||||
|
ByteArray* buffer_; |
||||
|
int index_; |
||||
|
}; |
||||
|
|
||||
|
|
||||
|
class Translation BASE_EMBEDDED { |
||||
|
public: |
||||
|
enum Opcode { |
||||
|
BEGIN, |
||||
|
FRAME, |
||||
|
REGISTER, |
||||
|
INT32_REGISTER, |
||||
|
DOUBLE_REGISTER, |
||||
|
STACK_SLOT, |
||||
|
INT32_STACK_SLOT, |
||||
|
DOUBLE_STACK_SLOT, |
||||
|
LITERAL, |
||||
|
ARGUMENTS_OBJECT, |
||||
|
|
||||
|
// A prefix indicating that the next command is a duplicate of the one
|
||||
|
// that follows it.
|
||||
|
DUPLICATE |
||||
|
}; |
||||
|
|
||||
|
Translation(TranslationBuffer* buffer, int frame_count) |
||||
|
: buffer_(buffer), |
||||
|
index_(buffer->CurrentIndex()) { |
||||
|
buffer_->Add(BEGIN); |
||||
|
buffer_->Add(frame_count); |
||||
|
} |
||||
|
|
||||
|
int index() const { return index_; } |
||||
|
|
||||
|
// Commands.
|
||||
|
void BeginFrame(int node_id, int literal_id, unsigned height); |
||||
|
void StoreRegister(Register reg); |
||||
|
void StoreInt32Register(Register reg); |
||||
|
void StoreDoubleRegister(DoubleRegister reg); |
||||
|
void StoreStackSlot(int index); |
||||
|
void StoreInt32StackSlot(int index); |
||||
|
void StoreDoubleStackSlot(int index); |
||||
|
void StoreLiteral(int literal_id); |
||||
|
void StoreArgumentsObject(); |
||||
|
void MarkDuplicate(); |
||||
|
|
||||
|
static int NumberOfOperandsFor(Opcode opcode); |
||||
|
|
||||
|
#ifdef DEBUG |
||||
|
static const char* StringFor(Opcode opcode); |
||||
|
#endif |
||||
|
|
||||
|
private: |
||||
|
TranslationBuffer* buffer_; |
||||
|
int index_; |
||||
|
}; |
||||
|
|
||||
|
|
||||
|
// Linked list holding deoptimizing code objects. The deoptimizing code objects
|
||||
|
// are kept as weak handles until they are no longer activated on the stack.
|
||||
|
class DeoptimizingCodeListNode : public Malloced { |
||||
|
public: |
||||
|
explicit DeoptimizingCodeListNode(Code* code); |
||||
|
~DeoptimizingCodeListNode(); |
||||
|
|
||||
|
DeoptimizingCodeListNode* next() const { return next_; } |
||||
|
void set_next(DeoptimizingCodeListNode* next) { next_ = next; } |
||||
|
Handle<Code> code() const { return code_; } |
||||
|
|
||||
|
private: |
||||
|
// Global (weak) handle to the deoptimizing code object.
|
||||
|
Handle<Code> code_; |
||||
|
|
||||
|
// Next pointer for linked list.
|
||||
|
DeoptimizingCodeListNode* next_; |
||||
|
}; |
||||
|
|
||||
|
|
||||
|
} } // namespace v8::internal
|
||||
|
|
||||
|
#endif // V8_DEOPTIMIZER_H_
|
File diff suppressed because it is too large
File diff suppressed because it is too large
File diff suppressed because it is too large
File diff suppressed because it is too large
File diff suppressed because it is too large
Some files were not shown because too many files changed in this diff
Loading…
Reference in new issue