diff --git a/deps/v8/ChangeLog b/deps/v8/ChangeLog index 24af827b5effa3..ab885c0935a8e2 100644 --- a/deps/v8/ChangeLog +++ b/deps/v8/ChangeLog @@ -1,3 +1,21 @@ +2012-01-26: Version 3.8.9 + + Flush number string cache on GC (issue 1605). + + Provide access to function inferred name with + v8::Function::GetInferredName in V8 public API. + + Fix building with Clang (issue 1912). + + Reduce the space used by the stack for the profiling thread. + + Fix misleading documentation of v8::Locker (issue 542). + + Introduce readbinary function in d8 to read binary files. + + Performance and stability improvements on all platforms. + + 2012-01-23: Version 3.8.8 Limited number of loop iterations in Heap::ReserveSpace diff --git a/deps/v8/build/common.gypi b/deps/v8/build/common.gypi index 22c5fdd16cabec..7d5e8c852c4424 100644 --- a/deps/v8/build/common.gypi +++ b/deps/v8/build/common.gypi @@ -295,7 +295,7 @@ '-O3', ], 'conditions': [ - [ 'gcc_version==44', { + [ 'gcc_version==44 and clang==0', { 'cflags': [ # Avoid crashes with gcc 4.4 in the v8 test suite. '-fno-tree-vrp', diff --git a/deps/v8/include/v8.h b/deps/v8/include/v8.h index 294b14d386fff2..08c2fa2b7eb9e9 100644 --- a/deps/v8/include/v8.h +++ b/deps/v8/include/v8.h @@ -1731,6 +1731,14 @@ class Function : public Object { V8EXPORT void SetName(Handle name); V8EXPORT Handle GetName() const; + /** + * Name inferred from variable or property assignment of this function. + * Used to facilitate debugging and profiling of JavaScript code written + * in an OO style, where many functions are anonymous but are assigned + * to object properties. + */ + V8EXPORT Handle GetInferredName() const; + /** * Returns zero based line number of function body and * kLineOffsetNotFound if no information available. @@ -2717,7 +2725,7 @@ class RetainedObjectInfo; * default isolate is implicitly created and entered. The embedder * can create additional isolates and use them in parallel in multiple * threads. An isolate can be entered by at most one thread at any - * given time. The Locker/Unlocker API can be used to synchronize. + * given time. The Locker/Unlocker API must be used to synchronize. */ class V8EXPORT Isolate { public: @@ -3559,7 +3567,9 @@ class V8EXPORT Context { * accessing handles or holding onto object pointers obtained * from V8 handles while in the particular V8 isolate. It is up * to the user of V8 to ensure (perhaps with locking) that this - * constraint is not violated. + * constraint is not violated. In addition to any other synchronization + * mechanism that may be used, the v8::Locker and v8::Unlocker classes + * must be used to signal thead switches to V8. * * v8::Locker is a scoped lock object. While it's * active (i.e. between its construction and destruction) the current thread is diff --git a/deps/v8/include/v8stdint.h b/deps/v8/include/v8stdint.h index 50b4f29a64ff24..7c12e1f4907993 100644 --- a/deps/v8/include/v8stdint.h +++ b/deps/v8/include/v8stdint.h @@ -1,4 +1,4 @@ -// Copyright 2010 the V8 project authors. All rights reserved. +// Copyright 2012 the V8 project authors. All rights reserved. // Redistribution and use in source and binary forms, with or without // modification, are permitted provided that the following conditions are // met: @@ -30,6 +30,7 @@ #ifndef V8STDINT_H_ #define V8STDINT_H_ +#include #include #if defined(_WIN32) && !defined(__MINGW32__) diff --git a/deps/v8/src/accessors.cc b/deps/v8/src/accessors.cc index e60f568a278908..9b16525b4c10e7 100644 --- a/deps/v8/src/accessors.cc +++ b/deps/v8/src/accessors.cc @@ -1,4 +1,4 @@ -// Copyright 2011 the V8 project authors. All rights reserved. +// Copyright 2012 the V8 project authors. All rights reserved. // Redistribution and use in source and binary forms, with or without // modification, are permitted provided that the following conditions are // met: @@ -26,15 +26,16 @@ // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. #include "v8.h" - #include "accessors.h" -#include "ast.h" + +#include "contexts.h" #include "deoptimizer.h" #include "execution.h" #include "factory.h" +#include "frames-inl.h" +#include "isolate.h" #include "list-inl.h" -#include "safepoint-table.h" -#include "scopeinfo.h" +#include "property-details.h" namespace v8 { namespace internal { @@ -574,11 +575,12 @@ static MaybeObject* ConstructArgumentsObjectForInlinedFunction( Handle inlined_function, int inlined_frame_index) { Factory* factory = Isolate::Current()->factory(); - int args_count = inlined_function->shared()->formal_parameter_count(); - ScopedVector args_slots(args_count); - SlotRef::ComputeSlotMappingForArguments(frame, - inlined_frame_index, - &args_slots); + Vector args_slots = + SlotRef::ComputeSlotMappingForArguments( + frame, + inlined_frame_index, + inlined_function->shared()->formal_parameter_count()); + int args_count = args_slots.length(); Handle arguments = factory->NewArgumentsObject(inlined_function, args_count); Handle array = factory->NewFixedArray(args_count); @@ -587,6 +589,7 @@ static MaybeObject* ConstructArgumentsObjectForInlinedFunction( array->set(i, *value); } arguments->set_elements(*array); + args_slots.Dispose(); // Return the freshly allocated arguments object. return *arguments; diff --git a/deps/v8/src/accessors.h b/deps/v8/src/accessors.h index 385536d22ee16a..36b9a9984a31d2 100644 --- a/deps/v8/src/accessors.h +++ b/deps/v8/src/accessors.h @@ -1,4 +1,4 @@ -// Copyright 2006-2008 the V8 project authors. All rights reserved. +// Copyright 2012 the V8 project authors. All rights reserved. // Redistribution and use in source and binary forms, with or without // modification, are permitted provided that the following conditions are // met: @@ -29,6 +29,7 @@ #define V8_ACCESSORS_H_ #include "allocation.h" +#include "v8globals.h" namespace v8 { namespace internal { diff --git a/deps/v8/src/allocation.cc b/deps/v8/src/allocation.cc index 119b087c1963a4..6c7a08cec8008b 100644 --- a/deps/v8/src/allocation.cc +++ b/deps/v8/src/allocation.cc @@ -1,4 +1,4 @@ -// Copyright 2008 the V8 project authors. All rights reserved. +// Copyright 2012 the V8 project authors. All rights reserved. // Redistribution and use in source and binary forms, with or without // modification, are permitted provided that the following conditions are // met: @@ -25,10 +25,11 @@ // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. -#include "../include/v8stdint.h" -#include "globals.h" -#include "checks.h" #include "allocation.h" + +#include // For free, malloc. +#include // For memcpy. +#include "checks.h" #include "utils.h" namespace v8 { diff --git a/deps/v8/src/allocation.h b/deps/v8/src/allocation.h index 00c5664ea912e2..69e72bdbad38e3 100644 --- a/deps/v8/src/allocation.h +++ b/deps/v8/src/allocation.h @@ -1,4 +1,4 @@ -// Copyright 2011 the V8 project authors. All rights reserved. +// Copyright 2012 the V8 project authors. All rights reserved. // Redistribution and use in source and binary forms, with or without // modification, are permitted provided that the following conditions are // met: @@ -28,7 +28,6 @@ #ifndef V8_ALLOCATION_H_ #define V8_ALLOCATION_H_ -#include "checks.h" #include "globals.h" namespace v8 { diff --git a/deps/v8/src/api.cc b/deps/v8/src/api.cc index 4146bd4c1d8469..ff7ab2d4f2f7d7 100644 --- a/deps/v8/src/api.cc +++ b/deps/v8/src/api.cc @@ -25,34 +25,36 @@ // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. -#include "v8.h" - #include "api.h" -#include "arguments.h" +#include // For isnan. +#include // For memcpy, strlen. +#include "../include/v8-debug.h" +#include "../include/v8-profiler.h" +#include "../include/v8-testing.h" #include "bootstrapper.h" #include "compiler.h" +#include "conversions-inl.h" +#include "counters.h" #include "debug.h" #include "deoptimizer.h" #include "execution.h" -#include "flags.h" #include "global-handles.h" #include "heap-profiler.h" #include "messages.h" -#include "natives.h" #include "parser.h" #include "platform.h" #include "profile-generator-inl.h" +#include "property-details.h" +#include "property.h" #include "runtime-profiler.h" #include "scanner-character-streams.h" -#include "serialize.h" #include "snapshot.h" +#include "unicode-inl.h" #include "v8threads.h" #include "version.h" #include "vm-state-inl.h" -#include "../include/v8-profiler.h" -#include "../include/v8-testing.h" #define LOG_API(isolate, expr) LOG(isolate, ApiEntryCall(expr)) @@ -3622,6 +3624,12 @@ Handle Function::GetName() const { } +Handle Function::GetInferredName() const { + i::Handle func = Utils::OpenHandle(this); + return Utils::ToLocal(i::Handle(func->shared()->inferred_name())); +} + + ScriptOrigin Function::GetScriptOrigin() const { i::Handle func = Utils::OpenHandle(this); if (func->shared()->script()->IsScript()) { diff --git a/deps/v8/src/api.h b/deps/v8/src/api.h index a825dd79708580..89cf0c864cc811 100644 --- a/deps/v8/src/api.h +++ b/deps/v8/src/api.h @@ -1,4 +1,4 @@ -// Copyright 2011 the V8 project authors. All rights reserved. +// Copyright 2012 the V8 project authors. All rights reserved. // Redistribution and use in source and binary forms, with or without // modification, are permitted provided that the following conditions are // met: @@ -28,10 +28,14 @@ #ifndef V8_API_H_ #define V8_API_H_ -#include "apiutils.h" -#include "factory.h" +#include "v8.h" #include "../include/v8-testing.h" +#include "apiutils.h" +#include "contexts.h" +#include "factory.h" +#include "isolate.h" +#include "list-inl.h" namespace v8 { diff --git a/deps/v8/src/arm/assembler-arm-inl.h b/deps/v8/src/arm/assembler-arm-inl.h index 2ec6c7cfa7958b..dd8ffcd77c23fc 100644 --- a/deps/v8/src/arm/assembler-arm-inl.h +++ b/deps/v8/src/arm/assembler-arm-inl.h @@ -38,6 +38,7 @@ #define V8_ARM_ASSEMBLER_ARM_INL_H_ #include "arm/assembler-arm.h" + #include "cpu.h" #include "debug.h" diff --git a/deps/v8/src/arm/assembler-arm.h b/deps/v8/src/arm/assembler-arm.h index e88739e49790cb..11e39df682202e 100644 --- a/deps/v8/src/arm/assembler-arm.h +++ b/deps/v8/src/arm/assembler-arm.h @@ -300,11 +300,13 @@ const DwVfpRegister d13 = { 13 }; const DwVfpRegister d14 = { 14 }; const DwVfpRegister d15 = { 15 }; -// Aliases for double registers. -static const DwVfpRegister& kFirstCalleeSavedDoubleReg = d8; -static const DwVfpRegister& kLastCalleeSavedDoubleReg = d15; -static const DwVfpRegister& kDoubleRegZero = d14; -static const DwVfpRegister& kScratchDoubleReg = d15; +// Aliases for double registers. Defined using #define instead of +// "static const DwVfpRegister&" because Clang complains otherwise when a +// compilation unit that includes this header doesn't use the variables. +#define kFirstCalleeSavedDoubleReg d8 +#define kLastCalleeSavedDoubleReg d15 +#define kDoubleRegZero d14 +#define kScratchDoubleReg d15 // Coprocessor register diff --git a/deps/v8/src/arm/builtins-arm.cc b/deps/v8/src/arm/builtins-arm.cc index 2a650a44a5ad0a..b461b45a57384d 100644 --- a/deps/v8/src/arm/builtins-arm.cc +++ b/deps/v8/src/arm/builtins-arm.cc @@ -1760,6 +1760,7 @@ void Builtins::Generate_ArgumentsAdaptorTrampoline(MacroAssembler* masm) { __ bind(&invoke); __ Call(r3); + masm->isolate()->heap()->SetArgumentsAdaptorDeoptPCOffset(masm->pc_offset()); // Exit frame and return. LeaveArgumentsAdaptorFrame(masm); __ Jump(lr); diff --git a/deps/v8/src/arm/deoptimizer-arm.cc b/deps/v8/src/arm/deoptimizer-arm.cc index 3689a9f6b6b402..76d89541c592c5 100644 --- a/deps/v8/src/arm/deoptimizer-arm.cc +++ b/deps/v8/src/arm/deoptimizer-arm.cc @@ -1,4 +1,4 @@ -// Copyright 2011 the V8 project authors. All rights reserved. +// Copyright 2012 the V8 project authors. All rights reserved. // Redistribution and use in source and binary forms, with or without // modification, are permitted provided that the following conditions are // met: @@ -211,12 +211,13 @@ void Deoptimizer::DoComputeOsrOutputFrame() { ASSERT(Translation::BEGIN == opcode); USE(opcode); int count = iterator.Next(); + iterator.Skip(1); // Drop JS frame count. ASSERT(count == 1); USE(count); opcode = static_cast(iterator.Next()); USE(opcode); - ASSERT(Translation::FRAME == opcode); + ASSERT(Translation::JS_FRAME == opcode); unsigned node_id = iterator.Next(); USE(node_id); ASSERT(node_id == ast_id); @@ -252,9 +253,7 @@ void Deoptimizer::DoComputeOsrOutputFrame() { output_ = new FrameDescription*[1]; output_[0] = new(output_frame_size) FrameDescription( output_frame_size, function_); -#ifdef DEBUG - output_[0]->SetKind(Code::OPTIMIZED_FUNCTION); -#endif + output_[0]->SetFrameType(StackFrame::JAVA_SCRIPT); // Clear the incoming parameters in the optimized frame to avoid // confusing the garbage collector. @@ -342,15 +341,115 @@ void Deoptimizer::DoComputeOsrOutputFrame() { } +void Deoptimizer::DoComputeArgumentsAdaptorFrame(TranslationIterator* iterator, + int frame_index) { + JSFunction* function = JSFunction::cast(ComputeLiteral(iterator->Next())); + unsigned height = iterator->Next(); + unsigned height_in_bytes = height * kPointerSize; + if (FLAG_trace_deopt) { + PrintF(" translating arguments adaptor => height=%d\n", height_in_bytes); + } + + unsigned fixed_frame_size = ArgumentsAdaptorFrameConstants::kFrameSize; + unsigned input_frame_size = input_->GetFrameSize(); + unsigned output_frame_size = height_in_bytes + fixed_frame_size; + + // Allocate and store the output frame description. + FrameDescription* output_frame = + new(output_frame_size) FrameDescription(output_frame_size, function); + output_frame->SetFrameType(StackFrame::ARGUMENTS_ADAPTOR); + + // Arguments adaptor can not be topmost or bottommost. + ASSERT(frame_index > 0 && frame_index < output_count_ - 1); + ASSERT(output_[frame_index] == NULL); + output_[frame_index] = output_frame; + + // The top address of the frame is computed from the previous + // frame's top and this frame's size. + uint32_t top_address; + top_address = output_[frame_index - 1]->GetTop() - output_frame_size; + output_frame->SetTop(top_address); + + // Compute the incoming parameter translation. + int parameter_count = height; + unsigned output_offset = output_frame_size; + unsigned input_offset = input_frame_size; + for (int i = 0; i < parameter_count; ++i) { + output_offset -= kPointerSize; + DoTranslateCommand(iterator, frame_index, output_offset); + } + input_offset -= (parameter_count * kPointerSize); + + // Read caller's PC from the previous frame. + output_offset -= kPointerSize; + input_offset -= kPointerSize; + intptr_t callers_pc = output_[frame_index - 1]->GetPc(); + output_frame->SetFrameSlot(output_offset, callers_pc); + if (FLAG_trace_deopt) { + PrintF(" 0x%08x: [top + %d] <- 0x%08x ; caller's pc\n", + top_address + output_offset, output_offset, callers_pc); + } + + // Read caller's FP from the previous frame, and set this frame's FP. + output_offset -= kPointerSize; + input_offset -= kPointerSize; + intptr_t value = output_[frame_index - 1]->GetFp(); + output_frame->SetFrameSlot(output_offset, value); + intptr_t fp_value = top_address + output_offset; + output_frame->SetFp(fp_value); + if (FLAG_trace_deopt) { + PrintF(" 0x%08x: [top + %d] <- 0x%08x ; caller's fp\n", + fp_value, output_offset, value); + } + + // A marker value is used in place of the context. + output_offset -= kPointerSize; + input_offset -= kPointerSize; + intptr_t context = reinterpret_cast( + Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR)); + output_frame->SetFrameSlot(output_offset, context); + if (FLAG_trace_deopt) { + PrintF(" 0x%08x: [top + %d] <- 0x%08x ; context (adaptor sentinel)\n", + top_address + output_offset, output_offset, context); + } + + // The function was mentioned explicitly in the ARGUMENTS_ADAPTOR_FRAME. + output_offset -= kPointerSize; + input_offset -= kPointerSize; + value = reinterpret_cast(function); + output_frame->SetFrameSlot(output_offset, value); + if (FLAG_trace_deopt) { + PrintF(" 0x%08x: [top + %d] <- 0x%08x ; function\n", + top_address + output_offset, output_offset, value); + } + + // Number of incoming arguments. + output_offset -= kPointerSize; + input_offset -= kPointerSize; + value = reinterpret_cast(Smi::FromInt(height - 1)); + output_frame->SetFrameSlot(output_offset, value); + if (FLAG_trace_deopt) { + PrintF(" 0x%08x: [top + %d] <- 0x%08x ; argc (%d)\n", + top_address + output_offset, output_offset, value, height - 1); + } + + ASSERT(0 == output_offset); + + Builtins* builtins = isolate_->builtins(); + Code* adaptor_trampoline = + builtins->builtin(Builtins::kArgumentsAdaptorTrampoline); + uint32_t pc = reinterpret_cast( + adaptor_trampoline->instruction_start() + + isolate_->heap()->arguments_adaptor_deopt_pc_offset()->value()); + output_frame->SetPc(pc); +} + + // This code is very similar to ia32 code, but relies on register names (fp, sp) // and how the frame is laid out. -void Deoptimizer::DoComputeFrame(TranslationIterator* iterator, - int frame_index) { +void Deoptimizer::DoComputeJSFrame(TranslationIterator* iterator, + int frame_index) { // Read the ast node id, function, and frame height for this output frame. - Translation::Opcode opcode = - static_cast(iterator->Next()); - USE(opcode); - ASSERT(Translation::FRAME == opcode); int node_id = iterator->Next(); JSFunction* function = JSFunction::cast(ComputeLiteral(iterator->Next())); unsigned height = iterator->Next(); @@ -370,9 +469,7 @@ void Deoptimizer::DoComputeFrame(TranslationIterator* iterator, // Allocate and store the output frame description. FrameDescription* output_frame = new(output_frame_size) FrameDescription(output_frame_size, function); -#ifdef DEBUG - output_frame->SetKind(Code::FUNCTION); -#endif + output_frame->SetFrameType(StackFrame::JAVA_SCRIPT); bool is_bottommost = (0 == frame_index); bool is_topmost = (output_count_ - 1 == frame_index); diff --git a/deps/v8/src/arm/disasm-arm.cc b/deps/v8/src/arm/disasm-arm.cc index 16b568cbafc9db..96a7d3ce6b6dfb 100644 --- a/deps/v8/src/arm/disasm-arm.cc +++ b/deps/v8/src/arm/disasm-arm.cc @@ -662,6 +662,15 @@ void Decoder::Format(Instruction* instr, const char* format) { } +// The disassembler may end up decoding data inlined in the code. We do not want +// it to crash if the data does not ressemble any known instruction. +#define VERIFY(condition) \ +if(!(condition)) { \ + Unknown(instr); \ + return; \ +} + + // For currently unimplemented decodings the disassembler calls Unknown(instr) // which will just print "unknown" of the instruction bits. void Decoder::Unknown(Instruction* instr) { @@ -947,13 +956,13 @@ void Decoder::DecodeType2(Instruction* instr) { void Decoder::DecodeType3(Instruction* instr) { switch (instr->PUField()) { case da_x: { - ASSERT(!instr->HasW()); + VERIFY(!instr->HasW()); Format(instr, "'memop'cond'b 'rd, ['rn], -'shift_rm"); break; } case ia_x: { if (instr->HasW()) { - ASSERT(instr->Bits(5, 4) == 0x1); + VERIFY(instr->Bits(5, 4) == 0x1); if (instr->Bit(22) == 0x1) { Format(instr, "usat 'rd, #'imm05@16, 'rm'shift_sat"); } else { @@ -1074,8 +1083,8 @@ int Decoder::DecodeType7(Instruction* instr) { // vmsr // Dd = vsqrt(Dm) void Decoder::DecodeTypeVFP(Instruction* instr) { - ASSERT((instr->TypeValue() == 7) && (instr->Bit(24) == 0x0) ); - ASSERT(instr->Bits(11, 9) == 0x5); + VERIFY((instr->TypeValue() == 7) && (instr->Bit(24) == 0x0) ); + VERIFY(instr->Bits(11, 9) == 0x5); if (instr->Bit(4) == 0) { if (instr->Opc1Value() == 0x7) { @@ -1166,7 +1175,7 @@ void Decoder::DecodeTypeVFP(Instruction* instr) { void Decoder::DecodeVMOVBetweenCoreAndSinglePrecisionRegisters( Instruction* instr) { - ASSERT((instr->Bit(4) == 1) && (instr->VCValue() == 0x0) && + VERIFY((instr->Bit(4) == 1) && (instr->VCValue() == 0x0) && (instr->VAValue() == 0x0)); bool to_arm_register = (instr->VLValue() == 0x1); @@ -1180,8 +1189,8 @@ void Decoder::DecodeVMOVBetweenCoreAndSinglePrecisionRegisters( void Decoder::DecodeVCMP(Instruction* instr) { - ASSERT((instr->Bit(4) == 0) && (instr->Opc1Value() == 0x7)); - ASSERT(((instr->Opc2Value() == 0x4) || (instr->Opc2Value() == 0x5)) && + VERIFY((instr->Bit(4) == 0) && (instr->Opc1Value() == 0x7)); + VERIFY(((instr->Opc2Value() == 0x4) || (instr->Opc2Value() == 0x5)) && (instr->Opc3Value() & 0x1)); // Comparison. @@ -1203,8 +1212,8 @@ void Decoder::DecodeVCMP(Instruction* instr) { void Decoder::DecodeVCVTBetweenDoubleAndSingle(Instruction* instr) { - ASSERT((instr->Bit(4) == 0) && (instr->Opc1Value() == 0x7)); - ASSERT((instr->Opc2Value() == 0x7) && (instr->Opc3Value() == 0x3)); + VERIFY((instr->Bit(4) == 0) && (instr->Opc1Value() == 0x7)); + VERIFY((instr->Opc2Value() == 0x7) && (instr->Opc3Value() == 0x3)); bool double_to_single = (instr->SzValue() == 1); @@ -1217,8 +1226,8 @@ void Decoder::DecodeVCVTBetweenDoubleAndSingle(Instruction* instr) { void Decoder::DecodeVCVTBetweenFloatingPointAndInteger(Instruction* instr) { - ASSERT((instr->Bit(4) == 0) && (instr->Opc1Value() == 0x7)); - ASSERT(((instr->Opc2Value() == 0x8) && (instr->Opc3Value() & 0x1)) || + VERIFY((instr->Bit(4) == 0) && (instr->Opc1Value() == 0x7)); + VERIFY(((instr->Opc2Value() == 0x8) && (instr->Opc3Value() & 0x1)) || (((instr->Opc2Value() >> 1) == 0x6) && (instr->Opc3Value() & 0x1))); bool to_integer = (instr->Bit(18) == 1); @@ -1265,7 +1274,7 @@ void Decoder::DecodeVCVTBetweenFloatingPointAndInteger(Instruction* instr) { // Ddst = MEM(Rbase + 4*offset). // MEM(Rbase + 4*offset) = Dsrc. void Decoder::DecodeType6CoprocessorIns(Instruction* instr) { - ASSERT(instr->TypeValue() == 6); + VERIFY(instr->TypeValue() == 6); if (instr->CoprocessorValue() == 0xA) { switch (instr->OpcodeValue()) { @@ -1347,6 +1356,7 @@ void Decoder::DecodeType6CoprocessorIns(Instruction* instr) { } } +#undef VERIFIY bool Decoder::IsConstantPoolAt(byte* instr_ptr) { int instruction_bits = *(reinterpret_cast(instr_ptr)); diff --git a/deps/v8/src/arm/frames-arm.h b/deps/v8/src/arm/frames-arm.h index 1844149861be4d..a10acd06871cf9 100644 --- a/deps/v8/src/arm/frames-arm.h +++ b/deps/v8/src/arm/frames-arm.h @@ -1,4 +1,4 @@ -// Copyright 2011 the V8 project authors. All rights reserved. +// Copyright 2012 the V8 project authors. All rights reserved. // Redistribution and use in source and binary forms, with or without // modification, are permitted provided that the following conditions are // met: @@ -136,6 +136,9 @@ class ExitFrameConstants : public AllStatic { class StandardFrameConstants : public AllStatic { public: + // Fixed part of the frame consists of return address, caller fp, + // context and function. + static const int kFixedFrameSize = 4 * kPointerSize; static const int kExpressionsOffset = -3 * kPointerSize; static const int kMarkerOffset = -2 * kPointerSize; static const int kContextOffset = -1 * kPointerSize; @@ -161,6 +164,8 @@ class JavaScriptFrameConstants : public AllStatic { class ArgumentsAdaptorFrameConstants : public AllStatic { public: static const int kLengthOffset = StandardFrameConstants::kExpressionsOffset; + static const int kFrameSize = + StandardFrameConstants::kFixedFrameSize + kPointerSize; }; diff --git a/deps/v8/src/arm/ic-arm.cc b/deps/v8/src/arm/ic-arm.cc index 583453ab2ca2f6..dfd4d2e39648ca 100644 --- a/deps/v8/src/arm/ic-arm.cc +++ b/deps/v8/src/arm/ic-arm.cc @@ -1036,21 +1036,29 @@ void KeyedLoadIC::GenerateGeneric(MacroAssembler* masm) { // Load the key (consisting of map and symbol) from the cache and // check for match. - Label try_second_entry, hit_on_first_entry, load_in_object_property; + Label load_in_object_property; + static const int kEntriesPerBucket = KeyedLookupCache::kEntriesPerBucket; + Label hit_on_nth_entry[kEntriesPerBucket]; ExternalReference cache_keys = ExternalReference::keyed_lookup_cache_keys(isolate); + __ mov(r4, Operand(cache_keys)); __ add(r4, r4, Operand(r3, LSL, kPointerSizeLog2 + 1)); - // Move r4 to second entry. - __ ldr(r5, MemOperand(r4, kPointerSize * 2, PostIndex)); - __ cmp(r2, r5); - __ b(ne, &try_second_entry); - __ ldr(r5, MemOperand(r4, -kPointerSize)); // Load symbol - __ cmp(r0, r5); - __ b(eq, &hit_on_first_entry); - __ bind(&try_second_entry); - __ ldr(r5, MemOperand(r4, kPointerSize, PostIndex)); // Move r4 to symbol. + for (int i = 0; i < kEntriesPerBucket - 1; i++) { + Label try_next_entry; + // Load map and move r4 to next entry. + __ ldr(r5, MemOperand(r4, kPointerSize * 2, PostIndex)); + __ cmp(r2, r5); + __ b(ne, &try_next_entry); + __ ldr(r5, MemOperand(r4, -kPointerSize)); // Load symbol + __ cmp(r0, r5); + __ b(eq, &hit_on_nth_entry[i]); + __ bind(&try_next_entry); + } + + // Last entry: Load map and move r4 to symbol. + __ ldr(r5, MemOperand(r4, kPointerSize, PostIndex)); __ cmp(r2, r5); __ b(ne, &slow); __ ldr(r5, MemOperand(r4)); @@ -1065,22 +1073,21 @@ void KeyedLoadIC::GenerateGeneric(MacroAssembler* masm) { ExternalReference cache_field_offsets = ExternalReference::keyed_lookup_cache_field_offsets(isolate); - // Hit on second entry. - __ mov(r4, Operand(cache_field_offsets)); - __ add(r3, r3, Operand(1)); - __ ldr(r5, MemOperand(r4, r3, LSL, kPointerSizeLog2)); - __ ldrb(r6, FieldMemOperand(r2, Map::kInObjectPropertiesOffset)); - __ sub(r5, r5, r6, SetCC); - __ b(ge, &property_array_property); - __ jmp(&load_in_object_property); - - // Hit on first entry. - __ bind(&hit_on_first_entry); - __ mov(r4, Operand(cache_field_offsets)); - __ ldr(r5, MemOperand(r4, r3, LSL, kPointerSizeLog2)); - __ ldrb(r6, FieldMemOperand(r2, Map::kInObjectPropertiesOffset)); - __ sub(r5, r5, r6, SetCC); - __ b(ge, &property_array_property); + // Hit on nth entry. + for (int i = kEntriesPerBucket - 1; i >= 0; i--) { + __ bind(&hit_on_nth_entry[i]); + __ mov(r4, Operand(cache_field_offsets)); + if (i != 0) { + __ add(r3, r3, Operand(i)); + } + __ ldr(r5, MemOperand(r4, r3, LSL, kPointerSizeLog2)); + __ ldrb(r6, FieldMemOperand(r2, Map::kInObjectPropertiesOffset)); + __ sub(r5, r5, r6, SetCC); + __ b(ge, &property_array_property); + if (i != 0) { + __ jmp(&load_in_object_property); + } + } // Load in-object property. __ bind(&load_in_object_property); diff --git a/deps/v8/src/arm/lithium-arm.cc b/deps/v8/src/arm/lithium-arm.cc index e063ef113265ff..846680f4f665ad 100644 --- a/deps/v8/src/arm/lithium-arm.cc +++ b/deps/v8/src/arm/lithium-arm.cc @@ -1005,14 +1005,16 @@ LEnvironment* LChunkBuilder::CreateEnvironment( LEnvironment* outer = CreateEnvironment(hydrogen_env->outer(), argument_index_accumulator); int ast_id = hydrogen_env->ast_id(); - ASSERT(ast_id != AstNode::kNoNumber); + ASSERT(ast_id != AstNode::kNoNumber || hydrogen_env->is_arguments_adaptor()); int value_count = hydrogen_env->length(); LEnvironment* result = new LEnvironment(hydrogen_env->closure(), + hydrogen_env->is_arguments_adaptor(), ast_id, hydrogen_env->parameter_count(), argument_count_, value_count, outer); + int argument_index = *argument_index_accumulator; for (int i = 0; i < value_count; ++i) { if (hydrogen_env->is_special_index(i)) continue; @@ -1021,13 +1023,17 @@ LEnvironment* LChunkBuilder::CreateEnvironment( if (value->IsArgumentsObject()) { op = NULL; } else if (value->IsPushArgument()) { - op = new LArgument((*argument_index_accumulator)++); + op = new LArgument(argument_index++); } else { op = UseAny(value); } result->AddValue(op, value->representation()); } + if (!hydrogen_env->is_arguments_adaptor()) { + *argument_index_accumulator = argument_index; + } + return result; } @@ -1917,12 +1923,11 @@ LInstruction* LChunkBuilder::DoLoadKeyedFastDoubleElement( LInstruction* LChunkBuilder::DoLoadKeyedSpecializedArrayElement( HLoadKeyedSpecializedArrayElement* instr) { ElementsKind elements_kind = instr->elements_kind(); - Representation representation(instr->representation()); ASSERT( - (representation.IsInteger32() && + (instr->representation().IsInteger32() && (elements_kind != EXTERNAL_FLOAT_ELEMENTS) && (elements_kind != EXTERNAL_DOUBLE_ELEMENTS)) || - (representation.IsDouble() && + (instr->representation().IsDouble() && ((elements_kind == EXTERNAL_FLOAT_ELEMENTS) || (elements_kind == EXTERNAL_DOUBLE_ELEMENTS)))); ASSERT(instr->key()->representation().IsInteger32()); @@ -1982,13 +1987,12 @@ LInstruction* LChunkBuilder::DoStoreKeyedFastDoubleElement( LInstruction* LChunkBuilder::DoStoreKeyedSpecializedArrayElement( HStoreKeyedSpecializedArrayElement* instr) { - Representation representation(instr->value()->representation()); ElementsKind elements_kind = instr->elements_kind(); ASSERT( - (representation.IsInteger32() && + (instr->value()->representation().IsInteger32() && (elements_kind != EXTERNAL_FLOAT_ELEMENTS) && (elements_kind != EXTERNAL_DOUBLE_ELEMENTS)) || - (representation.IsDouble() && + (instr->value()->representation().IsDouble() && ((elements_kind == EXTERNAL_FLOAT_ELEMENTS) || (elements_kind == EXTERNAL_DOUBLE_ELEMENTS)))); ASSERT(instr->external_pointer()->representation().IsExternal()); @@ -2244,6 +2248,7 @@ LInstruction* LChunkBuilder::DoEnterInlined(HEnterInlined* instr) { HEnvironment* outer = current_block_->last_environment(); HConstant* undefined = graph()->GetConstantUndefined(); HEnvironment* inner = outer->CopyForInlining(instr->closure(), + instr->arguments_count(), instr->function(), undefined, instr->call_kind()); @@ -2254,7 +2259,8 @@ LInstruction* LChunkBuilder::DoEnterInlined(HEnterInlined* instr) { LInstruction* LChunkBuilder::DoLeaveInlined(HLeaveInlined* instr) { - HEnvironment* outer = current_block_->last_environment()->outer(); + HEnvironment* outer = current_block_->last_environment()-> + DiscardInlined(false); current_block_->UpdateEnvironment(outer); return NULL; } diff --git a/deps/v8/src/arm/lithium-codegen-arm.cc b/deps/v8/src/arm/lithium-codegen-arm.cc index 080785725fb09c..76c8443e7c30cc 100644 --- a/deps/v8/src/arm/lithium-codegen-arm.cc +++ b/deps/v8/src/arm/lithium-codegen-arm.cc @@ -479,7 +479,11 @@ void LCodeGen::WriteTranslation(LEnvironment* environment, WriteTranslation(environment->outer(), translation); int closure_id = DefineDeoptimizationLiteral(environment->closure()); - translation->BeginFrame(environment->ast_id(), closure_id, height); + if (environment->is_arguments_adaptor()) { + translation->BeginArgumentsAdaptorFrame(closure_id, translation_size); + } else { + translation->BeginJSFrame(environment->ast_id(), closure_id, height); + } for (int i = 0; i < translation_size; ++i) { LOperand* value = environment->values()->at(i); // spilled_registers_ and spilled_double_registers_ are either @@ -612,10 +616,14 @@ void LCodeGen::RegisterEnvironmentForDeoptimization(LEnvironment* environment, // |>------------ translation_size ------------<| int frame_count = 0; + int jsframe_count = 0; for (LEnvironment* e = environment; e != NULL; e = e->outer()) { ++frame_count; + if (!e->is_arguments_adaptor()) { + ++jsframe_count; + } } - Translation translation(&translations_, frame_count); + Translation translation(&translations_, frame_count, jsframe_count); WriteTranslation(environment, &translation); int deoptimization_index = deoptimizations_.length(); int pc_offset = masm()->pc_offset(); diff --git a/deps/v8/src/assembler.cc b/deps/v8/src/assembler.cc index 2de7b92f5ef2f0..0bec57752f31ef 100644 --- a/deps/v8/src/assembler.cc +++ b/deps/v8/src/assembler.cc @@ -30,25 +30,42 @@ // The original source code covered by the above license above has been // modified significantly by Google Inc. -// Copyright 2011 the V8 project authors. All rights reserved. +// Copyright 2012 the V8 project authors. All rights reserved. -#include "v8.h" +#include "assembler.h" -#include "arguments.h" +#include // For cos, log, pow, sin, tan, etc. +#include "api.h" +#include "builtins.h" +#include "counters.h" +#include "cpu.h" +#include "debug.h" #include "deoptimizer.h" #include "execution.h" -#include "ic-inl.h" -#include "incremental-marking.h" -#include "factory.h" +#include "ic.h" +#include "isolate.h" +#include "jsregexp.h" +#include "platform.h" +#include "regexp-macro-assembler.h" +#include "regexp-stack.h" #include "runtime.h" -#include "runtime-profiler.h" #include "serialize.h" +#include "store-buffer-inl.h" #include "stub-cache.h" -#include "regexp-stack.h" -#include "ast.h" -#include "regexp-macro-assembler.h" -#include "platform.h" -#include "store-buffer.h" +#include "token.h" + +#if V8_TARGET_ARCH_IA32 +#include "ia32/assembler-ia32-inl.h" +#elif V8_TARGET_ARCH_X64 +#include "x64/assembler-x64-inl.h" +#elif V8_TARGET_ARCH_ARM +#include "arm/assembler-arm-inl.h" +#elif V8_TARGET_ARCH_MIPS +#include "mips/assembler-mips-inl.h" +#else +#error "Unknown architecture." +#endif + // Include native regexp-macro-assembler. #ifndef V8_INTERPRETED_REGEXP #if V8_TARGET_ARCH_IA32 diff --git a/deps/v8/src/assembler.h b/deps/v8/src/assembler.h index 540c15a4568efd..e7c92b451c09df 100644 --- a/deps/v8/src/assembler.h +++ b/deps/v8/src/assembler.h @@ -30,19 +30,27 @@ // The original source code covered by the above license above has been // modified significantly by Google Inc. -// Copyright 2011 the V8 project authors. All rights reserved. +// Copyright 2012 the V8 project authors. All rights reserved. #ifndef V8_ASSEMBLER_H_ #define V8_ASSEMBLER_H_ +#include "v8.h" + #include "allocation.h" +#include "builtins.h" #include "gdb-jit.h" +#include "isolate.h" #include "runtime.h" #include "token.h" namespace v8 { + +class ApiFunction; + namespace internal { +struct StatsCounter; const unsigned kNoASTId = -1; // ----------------------------------------------------------------------------- // Platform independent assembler base class. diff --git a/deps/v8/src/ast.cc b/deps/v8/src/ast.cc index 580a4850781758..811193b49bde08 100644 --- a/deps/v8/src/ast.cc +++ b/deps/v8/src/ast.cc @@ -1,4 +1,4 @@ -// Copyright 2011 the V8 project authors. All rights reserved. +// Copyright 2012 the V8 project authors. All rights reserved. // Redistribution and use in source and binary forms, with or without // modification, are permitted provided that the following conditions are // met: @@ -25,10 +25,15 @@ // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. -#include "v8.h" - #include "ast.h" + +#include // For isfinite. +#include "builtins.h" +#include "conversions.h" +#include "hashmap.h" #include "parser.h" +#include "property-details.h" +#include "property.h" #include "scopes.h" #include "string-stream.h" #include "type-info.h" diff --git a/deps/v8/src/ast.h b/deps/v8/src/ast.h index 9b90d816d93ab3..34fadab62d8ec5 100644 --- a/deps/v8/src/ast.h +++ b/deps/v8/src/ast.h @@ -1,4 +1,4 @@ -// Copyright 2011 the V8 project authors. All rights reserved. +// Copyright 2012 the V8 project authors. All rights reserved. // Redistribution and use in source and binary forms, with or without // modification, are permitted provided that the following conditions are // met: @@ -28,14 +28,19 @@ #ifndef V8_AST_H_ #define V8_AST_H_ -#include "allocation.h" -#include "execution.h" +#include "v8.h" + +#include "assembler.h" #include "factory.h" +#include "isolate.h" #include "jsregexp.h" +#include "list-inl.h" #include "runtime.h" #include "small-pointer-list.h" +#include "smart-array-pointer.h" #include "token.h" #include "variables.h" +#include "zone-inl.h" namespace v8 { namespace internal { @@ -98,12 +103,28 @@ namespace internal { EXPRESSION_NODE_LIST(V) // Forward declarations -class BitVector; -class DefinitionInfo; +class AstVisitor; +class BreakableStatement; +class Expression; +class IterationStatement; class MaterializedLiteral; +class Statement; class TargetCollector; class TypeFeedbackOracle; +class RegExpAlternative; +class RegExpAssertion; +class RegExpAtom; +class RegExpBackReference; +class RegExpCapture; +class RegExpCharacterClass; +class RegExpCompiler; +class RegExpDisjunction; +class RegExpEmpty; +class RegExpLookahead; +class RegExpQuantifier; +class RegExpText; + #define DEF_FORWARD_DECLARATION(type) class type; AST_NODE_LIST(DEF_FORWARD_DECLARATION) #undef DEF_FORWARD_DECLARATION @@ -115,11 +136,6 @@ typedef ZoneList > ZoneStringList; typedef ZoneList > ZoneObjectList; -#define DECLARE_NODE_TYPE(type) \ - virtual void Accept(AstVisitor* v); \ - virtual AstNode::Type node_type() const { return AstNode::k##type; } \ - - class AstNode: public ZoneObject { public: #define DECLARE_TYPE_ENUM(type) k##type, @@ -190,6 +206,11 @@ class AstNode: public ZoneObject { }; +#define DECLARE_NODE_TYPE(type) \ + virtual void Accept(AstVisitor* v); \ + virtual AstNode::Type node_type() const { return AstNode::k##type; } \ + + class Statement: public AstNode { public: Statement() : statement_pos_(RelocInfo::kNoPosition) {} diff --git a/deps/v8/src/cpu-profiler.cc b/deps/v8/src/cpu-profiler.cc index 2bd62ad39042e9..953952a91db092 100644 --- a/deps/v8/src/cpu-profiler.cc +++ b/deps/v8/src/cpu-profiler.cc @@ -39,13 +39,14 @@ namespace v8 { namespace internal { -static const int kEventsBufferSize = 256*KB; -static const int kTickSamplesBufferChunkSize = 64*KB; +static const int kEventsBufferSize = 256 * KB; +static const int kTickSamplesBufferChunkSize = 64 * KB; static const int kTickSamplesBufferChunksCount = 16; +static const int kProfilerStackSize = 32 * KB; ProfilerEventsProcessor::ProfilerEventsProcessor(ProfileGenerator* generator) - : Thread("v8:ProfEvntProc"), + : Thread(Thread::Options("v8:ProfEvntProc", kProfilerStackSize)), generator_(generator), running_(true), ticks_buffer_(sizeof(TickSampleEventRecord), diff --git a/deps/v8/src/d8.cc b/deps/v8/src/d8.cc index bfb99440272688..e555c154847c81 100644 --- a/deps/v8/src/d8.cc +++ b/deps/v8/src/d8.cc @@ -120,6 +120,9 @@ ShellOptions Shell::options; const char* Shell::kPrompt = "d8> "; +const int MB = 1024 * 1024; + + #ifndef V8_SHARED bool CounterMap::Match(void* key1, void* key2) { const char* name1 = reinterpret_cast(key1); @@ -803,6 +806,8 @@ Handle Shell::CreateGlobalTemplate() { global_template->Set(String::New("print"), FunctionTemplate::New(Print)); global_template->Set(String::New("write"), FunctionTemplate::New(Write)); global_template->Set(String::New("read"), FunctionTemplate::New(Read)); + global_template->Set(String::New("readbinary"), + FunctionTemplate::New(ReadBinary)); global_template->Set(String::New("readline"), FunctionTemplate::New(ReadLine)); global_template->Set(String::New("load"), FunctionTemplate::New(Load)); @@ -1021,6 +1026,23 @@ static char* ReadChars(const char* name, int* size_out) { } +Handle Shell::ReadBinary(const Arguments& args) { + String::Utf8Value filename(args[0]); + int size; + if (*filename == NULL) { + return ThrowException(String::New("Error loading file")); + } + char* chars = ReadChars(*filename, &size); + if (chars == NULL) { + return ThrowException(String::New("Error reading file")); + } + // We skip checking the string for UTF8 characters and use it raw as + // backing store for the external string with 8-bit characters. + BinaryResource* resource = new BinaryResource(chars, size); + return String::NewExternal(resource); +} + + #ifndef V8_SHARED static char* ReadToken(char* data, char token) { char* next = i::OS::StrChr(data, token); @@ -1191,14 +1213,11 @@ Handle SourceGroup::ReadFile(const char* name) { #ifndef V8_SHARED i::Thread::Options SourceGroup::GetThreadOptions() { - i::Thread::Options options; - options.name = "IsolateThread"; // On some systems (OSX 10.6) the stack size default is 0.5Mb or less // which is not enough to parse the big literal expressions used in tests. // The stack size should be at least StackGuard::kLimitSize + some - // OS-specific padding for thread startup code. - options.stack_size = 2 << 20; // 2 Mb seems to be enough - return options; + // OS-specific padding for thread startup code. 2Mbytes seems to be enough. + return i::Thread::Options("IsolateThread", 2 * MB); } diff --git a/deps/v8/src/d8.h b/deps/v8/src/d8.h index 60a8c1bf760b72..c872f90958f48d 100644 --- a/deps/v8/src/d8.h +++ b/deps/v8/src/d8.h @@ -195,6 +195,27 @@ class SourceGroup { }; +class BinaryResource : public v8::String::ExternalAsciiStringResource { + public: + BinaryResource(const char* string, int length) + : data_(string), + length_(length) { } + + ~BinaryResource() { + delete[] data_; + data_ = NULL; + length_ = 0; + } + + virtual const char* data() const { return data_; } + virtual size_t length() const { return length_; } + + private: + const char* data_; + size_t length_; +}; + + class ShellOptions { public: ShellOptions() : @@ -286,6 +307,7 @@ class Shell : public i::AllStatic { static Handle EnableProfiler(const Arguments& args); static Handle DisableProfiler(const Arguments& args); static Handle Read(const Arguments& args); + static Handle ReadBinary(const Arguments& args); static Handle ReadFromStdin(); static Handle ReadLine(const Arguments& args) { return ReadFromStdin(); diff --git a/deps/v8/src/debug.cc b/deps/v8/src/debug.cc index ffba7821cac966..01c4dba3cfeba4 100644 --- a/deps/v8/src/debug.cc +++ b/deps/v8/src/debug.cc @@ -1758,6 +1758,135 @@ static bool CompileFullCodeForDebugging(Handle shared, } +static void CollectActiveFunctionsFromThread( + Isolate* isolate, + ThreadLocalTop* top, + List >* active_functions, + Object* active_code_marker) { + // Find all non-optimized code functions with activation frames + // on the stack. This includes functions which have optimized + // activations (including inlined functions) on the stack as the + // non-optimized code is needed for the lazy deoptimization. + for (JavaScriptFrameIterator it(isolate, top); !it.done(); it.Advance()) { + JavaScriptFrame* frame = it.frame(); + if (frame->is_optimized()) { + List functions(Compiler::kMaxInliningLevels + 1); + frame->GetFunctions(&functions); + for (int i = 0; i < functions.length(); i++) { + JSFunction* function = functions[i]; + active_functions->Add(Handle(function)); + function->shared()->code()->set_gc_metadata(active_code_marker); + } + } else if (frame->function()->IsJSFunction()) { + JSFunction* function = JSFunction::cast(frame->function()); + ASSERT(frame->LookupCode()->kind() == Code::FUNCTION); + active_functions->Add(Handle(function)); + function->shared()->code()->set_gc_metadata(active_code_marker); + } + } +} + + +static void RedirectActivationsToRecompiledCodeOnThread( + Isolate* isolate, + ThreadLocalTop* top) { + for (JavaScriptFrameIterator it(isolate, top); !it.done(); it.Advance()) { + JavaScriptFrame* frame = it.frame(); + + if (frame->is_optimized() || !frame->function()->IsJSFunction()) continue; + + JSFunction* function = JSFunction::cast(frame->function()); + + ASSERT(frame->LookupCode()->kind() == Code::FUNCTION); + + Handle frame_code(frame->LookupCode()); + if (frame_code->has_debug_break_slots()) continue; + + Handle new_code(function->shared()->code()); + if (new_code->kind() != Code::FUNCTION || + !new_code->has_debug_break_slots()) { + continue; + } + + intptr_t delta = frame->pc() - frame_code->instruction_start(); + int debug_break_slot_count = 0; + int mask = RelocInfo::ModeMask(RelocInfo::DEBUG_BREAK_SLOT); + for (RelocIterator it(*new_code, mask); !it.done(); it.next()) { + // Check if the pc in the new code with debug break + // slots is before this slot. + RelocInfo* info = it.rinfo(); + int debug_break_slot_bytes = + debug_break_slot_count * Assembler::kDebugBreakSlotLength; + intptr_t new_delta = + info->pc() - + new_code->instruction_start() - + debug_break_slot_bytes; + if (new_delta > delta) { + break; + } + + // Passed a debug break slot in the full code with debug + // break slots. + debug_break_slot_count++; + } + int debug_break_slot_bytes = + debug_break_slot_count * Assembler::kDebugBreakSlotLength; + if (FLAG_trace_deopt) { + PrintF("Replacing code %08" V8PRIxPTR " - %08" V8PRIxPTR " (%d) " + "with %08" V8PRIxPTR " - %08" V8PRIxPTR " (%d) " + "for debugging, " + "changing pc from %08" V8PRIxPTR " to %08" V8PRIxPTR "\n", + reinterpret_cast( + frame_code->instruction_start()), + reinterpret_cast( + frame_code->instruction_start()) + + frame_code->instruction_size(), + frame_code->instruction_size(), + reinterpret_cast(new_code->instruction_start()), + reinterpret_cast(new_code->instruction_start()) + + new_code->instruction_size(), + new_code->instruction_size(), + reinterpret_cast(frame->pc()), + reinterpret_cast(new_code->instruction_start()) + + delta + debug_break_slot_bytes); + } + + // Patch the return address to return into the code with + // debug break slots. + frame->set_pc( + new_code->instruction_start() + delta + debug_break_slot_bytes); + } +} + + +class ActiveFunctionsCollector : public ThreadVisitor { + public: + explicit ActiveFunctionsCollector(List >* active_functions, + Object* active_code_marker) + : active_functions_(active_functions), + active_code_marker_(active_code_marker) { } + + void VisitThread(Isolate* isolate, ThreadLocalTop* top) { + CollectActiveFunctionsFromThread(isolate, + top, + active_functions_, + active_code_marker_); + } + + private: + List >* active_functions_; + Object* active_code_marker_; +}; + + +class ActiveFunctionsRedirector : public ThreadVisitor { + public: + void VisitThread(Isolate* isolate, ThreadLocalTop* top) { + RedirectActivationsToRecompiledCodeOnThread(isolate, top); + } +}; + + void Debug::PrepareForBreakPoints() { // If preparing for the first break point make sure to deoptimize all // functions as debugging does not work with optimized code. @@ -1776,71 +1905,59 @@ void Debug::PrepareForBreakPoints() { // debug break slots. isolate_->heap()->CollectAllGarbage(Heap::kMakeHeapIterableMask); - // Ensure no GC in this scope as we are comparing raw pointer - // values and performing a heap iteration. + // Ensure no GC in this scope as we are going to use gc_metadata + // field in the Code object to mark active functions. AssertNoAllocation no_allocation; - // Find all non-optimized code functions with activation frames - // on the stack. This includes functions which have optimized - // activations (including inlined functions) on the stack as the - // non-optimized code is needed for the lazy deoptimization. - for (JavaScriptFrameIterator it(isolate_); !it.done(); it.Advance()) { - JavaScriptFrame* frame = it.frame(); - if (frame->is_optimized()) { - List functions(Compiler::kMaxInliningLevels + 1); - frame->GetFunctions(&functions); - for (int i = 0; i < functions.length(); i++) { - if (!functions[i]->shared()->code()->has_debug_break_slots()) { - active_functions.Add(Handle(functions[i])); - } - } - } else if (frame->function()->IsJSFunction()) { - JSFunction* function = JSFunction::cast(frame->function()); - ASSERT(frame->LookupCode()->kind() == Code::FUNCTION); - if (!frame->LookupCode()->has_debug_break_slots() || - !function->shared()->code()->has_debug_break_slots()) { - active_functions.Add(Handle(function)); - } - } - } + Object* active_code_marker = isolate_->heap()->the_hole_value(); - // Sort the functions on the object pointer value to prepare for - // the binary search below. - active_functions.Sort(HandleObjectPointerCompare); + CollectActiveFunctionsFromThread(isolate_, + isolate_->thread_local_top(), + &active_functions, + active_code_marker); + ActiveFunctionsCollector active_functions_collector(&active_functions, + active_code_marker); + isolate_->thread_manager()->IterateArchivedThreads( + &active_functions_collector); - // Scan the heap for all non-optimized functions which has no - // debug break slots. + // Scan the heap for all non-optimized functions which have no + // debug break slots and are not active or inlined into an active + // function and mark them for lazy compilation. HeapIterator iterator; HeapObject* obj = NULL; while (((obj = iterator.next()) != NULL)) { if (obj->IsJSFunction()) { JSFunction* function = JSFunction::cast(obj); - if (function->shared()->allows_lazy_compilation() && - function->shared()->script()->IsScript() && + SharedFunctionInfo* shared = function->shared(); + if (shared->allows_lazy_compilation() && + shared->script()->IsScript() && function->code()->kind() == Code::FUNCTION && - !function->code()->has_debug_break_slots()) { - bool has_activation = - SortedListBSearch >( - active_functions, - Handle(function), - HandleObjectPointerCompare) != -1; - if (!has_activation) { - function->set_code(*lazy_compile); - function->shared()->set_code(*lazy_compile); - } + !function->code()->has_debug_break_slots() && + shared->code()->gc_metadata() != active_code_marker) { + function->set_code(*lazy_compile); + function->shared()->set_code(*lazy_compile); } } } - } - // Now the non-GC scope is left, and the sorting of the functions - // in active_function is not ensured any more. The code below does - // not rely on it. + // Clear gc_metadata field. + for (int i = 0; i < active_functions.length(); i++) { + Handle function = active_functions[i]; + function->shared()->code()->set_gc_metadata(Smi::FromInt(0)); + } + } // Now recompile all functions with activation frames and and // patch the return address to run in the new compiled code. for (int i = 0; i < active_functions.length(); i++) { Handle function = active_functions[i]; + + if (function->code()->kind() == Code::FUNCTION && + function->code()->has_debug_break_slots()) { + // Nothing to do. Function code already had debug break slots. + continue; + } + Handle shared(function->shared()); // If recompilation is not possible just skip it. if (shared->is_toplevel() || @@ -1851,9 +1968,6 @@ void Debug::PrepareForBreakPoints() { // Make sure that the shared full code is compiled with debug // break slots. - if (function->code() == *lazy_compile) { - function->set_code(shared->code()); - } if (!shared->code()->has_debug_break_slots()) { // Try to compile the full code with debug break slots. If it // fails just keep the current code. @@ -1872,70 +1986,17 @@ void Debug::PrepareForBreakPoints() { continue; } } - Handle new_code(shared->code()); - - // Find the function and patch the return address. - for (JavaScriptFrameIterator it(isolate_); !it.done(); it.Advance()) { - JavaScriptFrame* frame = it.frame(); - // If the current frame is for this function in its - // non-optimized form rewrite the return address to continue - // in the newly compiled full code with debug break slots. - if (!frame->is_optimized() && - frame->function()->IsJSFunction() && - frame->function() == *function) { - ASSERT(frame->LookupCode()->kind() == Code::FUNCTION); - Handle frame_code(frame->LookupCode()); - if (frame_code->has_debug_break_slots()) continue; - intptr_t delta = frame->pc() - frame_code->instruction_start(); - int debug_break_slot_count = 0; - int mask = RelocInfo::ModeMask(RelocInfo::DEBUG_BREAK_SLOT); - for (RelocIterator it(*new_code, mask); !it.done(); it.next()) { - // Check if the pc in the new code with debug break - // slots is before this slot. - RelocInfo* info = it.rinfo(); - int debug_break_slot_bytes = - debug_break_slot_count * Assembler::kDebugBreakSlotLength; - intptr_t new_delta = - info->pc() - - new_code->instruction_start() - - debug_break_slot_bytes; - if (new_delta > delta) { - break; - } - - // Passed a debug break slot in the full code with debug - // break slots. - debug_break_slot_count++; - } - int debug_break_slot_bytes = - debug_break_slot_count * Assembler::kDebugBreakSlotLength; - if (FLAG_trace_deopt) { - PrintF("Replacing code %08" V8PRIxPTR " - %08" V8PRIxPTR " (%d) " - "with %08" V8PRIxPTR " - %08" V8PRIxPTR " (%d) " - "for debugging, " - "changing pc from %08" V8PRIxPTR " to %08" V8PRIxPTR "\n", - reinterpret_cast( - frame_code->instruction_start()), - reinterpret_cast( - frame_code->instruction_start()) + - frame_code->instruction_size(), - frame_code->instruction_size(), - reinterpret_cast(new_code->instruction_start()), - reinterpret_cast(new_code->instruction_start()) + - new_code->instruction_size(), - new_code->instruction_size(), - reinterpret_cast(frame->pc()), - reinterpret_cast(new_code->instruction_start()) + - delta + debug_break_slot_bytes); - } - // Patch the return address to return into the code with - // debug break slots. - frame->set_pc( - new_code->instruction_start() + delta + debug_break_slot_bytes); - } - } + // Keep function code in sync with shared function info. + function->set_code(shared->code()); } + + RedirectActivationsToRecompiledCodeOnThread(isolate_, + isolate_->thread_local_top()); + + ActiveFunctionsRedirector active_functions_redirector; + isolate_->thread_manager()->IterateArchivedThreads( + &active_functions_redirector); } } diff --git a/deps/v8/src/deoptimizer.cc b/deps/v8/src/deoptimizer.cc index aab69c3422abb5..56ff4549da25b5 100644 --- a/deps/v8/src/deoptimizer.cc +++ b/deps/v8/src/deoptimizer.cc @@ -1,4 +1,4 @@ -// Copyright 2011 the V8 project authors. All rights reserved. +// Copyright 2012 the V8 project authors. All rights reserved. // Redistribution and use in source and binary forms, with or without // modification, are permitted provided that the following conditions are // met: @@ -104,10 +104,27 @@ Deoptimizer* Deoptimizer::Grab(Isolate* isolate) { return result; } + +int Deoptimizer::ConvertJSFrameIndexToFrameIndex(int jsframe_index) { + if (jsframe_index == 0) return 0; + + int frame_index = 0; + while (jsframe_index >= 0) { + FrameDescription* frame = output_[frame_index]; + if (frame->GetFrameType() == StackFrame::JAVA_SCRIPT) { + jsframe_index--; + } + frame_index++; + } + + return frame_index - 1; +} + + #ifdef ENABLE_DEBUGGER_SUPPORT DeoptimizedFrameInfo* Deoptimizer::DebuggerInspectableFrame( JavaScriptFrame* frame, - int frame_index, + int jsframe_index, Isolate* isolate) { ASSERT(isolate == Isolate::Current()); ASSERT(frame->is_optimized()); @@ -143,22 +160,40 @@ DeoptimizedFrameInfo* Deoptimizer::DebuggerInspectableFrame( // Create the GC safe output frame information and register it for GC // handling. - ASSERT_LT(frame_index, deoptimizer->output_count()); + ASSERT_LT(jsframe_index, deoptimizer->jsframe_count()); + + // Convert JS frame index into frame index. + int frame_index = deoptimizer->ConvertJSFrameIndexToFrameIndex(jsframe_index); + + bool has_arguments_adaptor = + frame_index > 0 && + deoptimizer->output_[frame_index - 1]->GetFrameType() == + StackFrame::ARGUMENTS_ADAPTOR; + DeoptimizedFrameInfo* info = - new DeoptimizedFrameInfo(deoptimizer, frame_index); + new DeoptimizedFrameInfo(deoptimizer, frame_index, has_arguments_adaptor); isolate->deoptimizer_data()->deoptimized_frame_info_ = info; // Get the "simulated" top and size for the requested frame. - Address top = - reinterpret_cast
(deoptimizer->output_[frame_index]->GetTop()); - uint32_t size = deoptimizer->output_[frame_index]->GetFrameSize(); + FrameDescription* parameters_frame = + deoptimizer->output_[ + has_arguments_adaptor ? (frame_index - 1) : frame_index]; + + uint32_t parameters_size = (info->parameters_count() + 1) * kPointerSize; + Address parameters_top = reinterpret_cast
( + parameters_frame->GetTop() + (parameters_frame->GetFrameSize() - + parameters_size)); + + uint32_t expressions_size = info->expression_count() * kPointerSize; + Address expressions_top = reinterpret_cast
( + deoptimizer->output_[frame_index]->GetTop()); // Done with the GC-unsafe frame descriptions. This re-enables allocation. deoptimizer->DeleteFrameDescriptions(); // Allocate a heap number for the doubles belonging to this frame. deoptimizer->MaterializeHeapNumbersForDebuggerInspectableFrame( - top, size, info); + parameters_top, parameters_size, expressions_top, expressions_size, info); // Finished using the deoptimizer instance. delete deoptimizer; @@ -313,6 +348,7 @@ Deoptimizer::Deoptimizer(Isolate* isolate, fp_to_sp_delta_(fp_to_sp_delta), input_(NULL), output_count_(0), + jsframe_count_(0), output_(NULL), frame_alignment_marker_(isolate->heap()->frame_alignment_marker()), has_alignment_padding_(0), @@ -377,9 +413,7 @@ Deoptimizer::Deoptimizer(Isolate* isolate, ASSERT(HEAP->allow_allocation(false)); unsigned size = ComputeInputFrameSize(); input_ = new(size) FrameDescription(size, function); -#ifdef DEBUG - input_->SetKind(Code::OPTIMIZED_FUNCTION); -#endif + input_->SetFrameType(StackFrame::JAVA_SCRIPT); } @@ -515,6 +549,7 @@ void Deoptimizer::DoComputeOutputFrames() { // Read the number of output frames and allocate an array for their // descriptions. int count = iterator.Next(); + iterator.Next(); // Drop JS frames count. ASSERT(output_ == NULL); output_ = new FrameDescription*[count]; for (int i = 0; i < count; ++i) { @@ -524,7 +559,21 @@ void Deoptimizer::DoComputeOutputFrames() { // Translate each output frame. for (int i = 0; i < count; ++i) { - DoComputeFrame(&iterator, i); + // Read the ast node id, function, and frame height for this output frame. + Translation::Opcode opcode = + static_cast(iterator.Next()); + switch (opcode) { + case Translation::JS_FRAME: + DoComputeJSFrame(&iterator, i); + jsframe_count_++; + break; + case Translation::ARGUMENTS_ADAPTOR_FRAME: + DoComputeArgumentsAdaptorFrame(&iterator, i); + break; + default: + UNREACHABLE(); + break; + } } // Print some helpful diagnostic information. @@ -565,39 +614,52 @@ void Deoptimizer::MaterializeHeapNumbers() { #ifdef ENABLE_DEBUGGER_SUPPORT void Deoptimizer::MaterializeHeapNumbersForDebuggerInspectableFrame( - Address top, uint32_t size, DeoptimizedFrameInfo* info) { + Address parameters_top, + uint32_t parameters_size, + Address expressions_top, + uint32_t expressions_size, + DeoptimizedFrameInfo* info) { ASSERT_EQ(DEBUGGER, bailout_type_); + Address parameters_bottom = parameters_top + parameters_size; + Address expressions_bottom = expressions_top + expressions_size; for (int i = 0; i < deferred_heap_numbers_.length(); i++) { HeapNumberMaterializationDescriptor d = deferred_heap_numbers_[i]; // Check of the heap number to materialize actually belong to the frame // being extracted. Address slot = d.slot_address(); - if (top <= slot && slot < top + size) { + if (parameters_top <= slot && slot < parameters_bottom) { Handle num = isolate_->factory()->NewNumber(d.value()); - // Calculate the index with the botton of the expression stack - // at index 0, and the fixed part (including incoming arguments) - // at negative indexes. - int index = static_cast( - info->expression_count_ - (slot - top) / kPointerSize - 1); + + int index = (info->parameters_count() - 1) - + static_cast(slot - parameters_top) / kPointerSize; + if (FLAG_trace_deopt) { PrintF("Materializing a new heap number %p [%e] in slot %p" - "for stack index %d\n", + "for parameter slot #%d\n", reinterpret_cast(*num), d.value(), d.slot_address(), index); } - if (index >=0) { - info->SetExpression(index, *num); - } else { - // Calculate parameter index subtracting one for the receiver. - int parameter_index = - index + - static_cast(size) / kPointerSize - - info->expression_count_ - 1; - info->SetParameter(parameter_index, *num); + + info->SetParameter(index, *num); + } else if (expressions_top <= slot && slot < expressions_bottom) { + Handle num = isolate_->factory()->NewNumber(d.value()); + + int index = info->expression_count() - 1 - + static_cast(slot - expressions_top) / kPointerSize; + + if (FLAG_trace_deopt) { + PrintF("Materializing a new heap number %p [%e] in slot %p" + "for expression slot #%d\n", + reinterpret_cast(*num), + d.value(), + d.slot_address(), + index); } + + info->SetExpression(index, *num); } } } @@ -622,7 +684,8 @@ void Deoptimizer::DoTranslateCommand(TranslationIterator* iterator, switch (opcode) { case Translation::BEGIN: - case Translation::FRAME: + case Translation::JS_FRAME: + case Translation::ARGUMENTS_ADAPTOR_FRAME: case Translation::DUPLICATE: UNREACHABLE(); return; @@ -691,7 +754,7 @@ void Deoptimizer::DoTranslateCommand(TranslationIterator* iterator, case Translation::STACK_SLOT: { int input_slot_index = iterator->Next(); unsigned input_offset = - input_->GetOffsetFromSlotIndex(this, input_slot_index); + input_->GetOffsetFromSlotIndex(input_slot_index); intptr_t input_value = input_->GetFrameSlot(input_offset); if (FLAG_trace_deopt) { PrintF(" 0x%08" V8PRIxPTR ": ", @@ -710,7 +773,7 @@ void Deoptimizer::DoTranslateCommand(TranslationIterator* iterator, case Translation::INT32_STACK_SLOT: { int input_slot_index = iterator->Next(); unsigned input_offset = - input_->GetOffsetFromSlotIndex(this, input_slot_index); + input_->GetOffsetFromSlotIndex(input_slot_index); intptr_t value = input_->GetFrameSlot(input_offset); bool is_smi = Smi::IsValid(value); if (FLAG_trace_deopt) { @@ -739,7 +802,7 @@ void Deoptimizer::DoTranslateCommand(TranslationIterator* iterator, case Translation::DOUBLE_STACK_SLOT: { int input_slot_index = iterator->Next(); unsigned input_offset = - input_->GetOffsetFromSlotIndex(this, input_slot_index); + input_->GetOffsetFromSlotIndex(input_slot_index); double value = input_->GetDoubleFrameSlot(input_offset); if (FLAG_trace_deopt) { PrintF(" 0x%08" V8PRIxPTR ": [top + %d] <- %e ; [esp + %d]\n", @@ -808,7 +871,8 @@ bool Deoptimizer::DoOsrTranslateCommand(TranslationIterator* iterator, switch (opcode) { case Translation::BEGIN: - case Translation::FRAME: + case Translation::JS_FRAME: + case Translation::ARGUMENTS_ADAPTOR_FRAME: case Translation::DUPLICATE: UNREACHABLE(); // Malformed input. return false; @@ -871,7 +935,7 @@ bool Deoptimizer::DoOsrTranslateCommand(TranslationIterator* iterator, case Translation::STACK_SLOT: { int output_index = iterator->Next(); unsigned output_offset = - output->GetOffsetFromSlotIndex(this, output_index); + output->GetOffsetFromSlotIndex(output_index); if (FLAG_trace_osr) { PrintF(" [sp + %d] <- 0x%08" V8PRIxPTR " ; [sp + %d] ", output_offset, @@ -890,7 +954,7 @@ bool Deoptimizer::DoOsrTranslateCommand(TranslationIterator* iterator, int output_index = iterator->Next(); unsigned output_offset = - output->GetOffsetFromSlotIndex(this, output_index); + output->GetOffsetFromSlotIndex(output_index); int int32_value = input_object->IsSmi() ? Smi::cast(input_object)->value() : DoubleToInt32(input_object->Number()); @@ -922,7 +986,7 @@ bool Deoptimizer::DoOsrTranslateCommand(TranslationIterator* iterator, int output_index = iterator->Next(); unsigned output_offset = - output->GetOffsetFromSlotIndex(this, output_index); + output->GetOffsetFromSlotIndex(output_index); double double_value = input_object->Number(); uint64_t int_value = BitCast(double_value); int32_t lower = static_cast(int_value); @@ -1033,8 +1097,8 @@ unsigned Deoptimizer::ComputeInputFrameSize() const { unsigned Deoptimizer::ComputeFixedSize(JSFunction* function) const { // The fixed part of the frame consists of the return address, frame // pointer, function, context, and all the incoming arguments. - static const unsigned kFixedSlotSize = 4 * kPointerSize; - return ComputeIncomingArgumentSize(function) + kFixedSlotSize; + return ComputeIncomingArgumentSize(function) + + StandardFrameConstants::kFixedFrameSize; } @@ -1154,49 +1218,62 @@ FrameDescription::FrameDescription(uint32_t frame_size, } -unsigned FrameDescription::GetOffsetFromSlotIndex(Deoptimizer* deoptimizer, - int slot_index) { +int FrameDescription::ComputeFixedSize() { + return StandardFrameConstants::kFixedFrameSize + + (ComputeParametersCount() + 1) * kPointerSize; +} + + +unsigned FrameDescription::GetOffsetFromSlotIndex(int slot_index) { if (slot_index >= 0) { // Local or spill slots. Skip the fixed part of the frame // including all arguments. - unsigned base = - GetFrameSize() - deoptimizer->ComputeFixedSize(GetFunction()); + unsigned base = GetFrameSize() - ComputeFixedSize(); return base - ((slot_index + 1) * kPointerSize); } else { // Incoming parameter. - unsigned base = GetFrameSize() - - deoptimizer->ComputeIncomingArgumentSize(GetFunction()); + int arg_size = (ComputeParametersCount() + 1) * kPointerSize; + unsigned base = GetFrameSize() - arg_size; return base - ((slot_index + 1) * kPointerSize); } } int FrameDescription::ComputeParametersCount() { - return function_->shared()->formal_parameter_count(); + switch (type_) { + case StackFrame::JAVA_SCRIPT: + return function_->shared()->formal_parameter_count(); + case StackFrame::ARGUMENTS_ADAPTOR: { + // Last slot contains number of incomming arguments as a smi. + // Can't use GetExpression(0) because it would cause infinite recursion. + return reinterpret_cast(*GetFrameSlotPointer(0))->value(); + } + default: + UNREACHABLE(); + return 0; + } } -Object* FrameDescription::GetParameter(Deoptimizer* deoptimizer, int index) { - ASSERT_EQ(Code::FUNCTION, kind_); +Object* FrameDescription::GetParameter(int index) { ASSERT(index >= 0); ASSERT(index < ComputeParametersCount()); // The slot indexes for incoming arguments are negative. - unsigned offset = GetOffsetFromSlotIndex(deoptimizer, - index - ComputeParametersCount()); + unsigned offset = GetOffsetFromSlotIndex(index - ComputeParametersCount()); return reinterpret_cast(*GetFrameSlotPointer(offset)); } -unsigned FrameDescription::GetExpressionCount(Deoptimizer* deoptimizer) { - ASSERT_EQ(Code::FUNCTION, kind_); - unsigned size = GetFrameSize() - deoptimizer->ComputeFixedSize(GetFunction()); +unsigned FrameDescription::GetExpressionCount() { + ASSERT_EQ(StackFrame::JAVA_SCRIPT, type_); + unsigned size = GetFrameSize() - ComputeFixedSize(); return size / kPointerSize; } -Object* FrameDescription::GetExpression(Deoptimizer* deoptimizer, int index) { - ASSERT_EQ(Code::FUNCTION, kind_); - unsigned offset = GetOffsetFromSlotIndex(deoptimizer, index); +Object* FrameDescription::GetExpression(int index) { + ASSERT_EQ(StackFrame::JAVA_SCRIPT, type_); + unsigned offset = GetOffsetFromSlotIndex(index); return reinterpret_cast(*GetFrameSlotPointer(offset)); } @@ -1242,8 +1319,15 @@ Handle TranslationBuffer::CreateByteArray() { } -void Translation::BeginFrame(int node_id, int literal_id, unsigned height) { - buffer_->Add(FRAME); +void Translation::BeginArgumentsAdaptorFrame(int literal_id, unsigned height) { + buffer_->Add(ARGUMENTS_ADAPTOR_FRAME); + buffer_->Add(literal_id); + buffer_->Add(height); +} + + +void Translation::BeginJSFrame(int node_id, int literal_id, unsigned height) { + buffer_->Add(JS_FRAME); buffer_->Add(node_id); buffer_->Add(literal_id); buffer_->Add(height); @@ -1307,7 +1391,6 @@ int Translation::NumberOfOperandsFor(Opcode opcode) { case ARGUMENTS_OBJECT: case DUPLICATE: return 0; - case BEGIN: case REGISTER: case INT32_REGISTER: case DOUBLE_REGISTER: @@ -1316,7 +1399,10 @@ int Translation::NumberOfOperandsFor(Opcode opcode) { case DOUBLE_STACK_SLOT: case LITERAL: return 1; - case FRAME: + case BEGIN: + case ARGUMENTS_ADAPTOR_FRAME: + return 2; + case JS_FRAME: return 3; } UNREACHABLE(); @@ -1330,8 +1416,10 @@ const char* Translation::StringFor(Opcode opcode) { switch (opcode) { case BEGIN: return "BEGIN"; - case FRAME: - return "FRAME"; + case JS_FRAME: + return "JS_FRAME"; + case ARGUMENTS_ADAPTOR_FRAME: + return "ARGUMENTS_ADAPTOR_FRAME"; case REGISTER: return "REGISTER"; case INT32_REGISTER: @@ -1385,7 +1473,8 @@ SlotRef SlotRef::ComputeSlotForNextArgument(TranslationIterator* iterator, switch (opcode) { case Translation::BEGIN: - case Translation::FRAME: + case Translation::JS_FRAME: + case Translation::ARGUMENTS_ADAPTOR_FRAME: // Peeled off before getting here. break; @@ -1431,9 +1520,27 @@ SlotRef SlotRef::ComputeSlotForNextArgument(TranslationIterator* iterator, } -void SlotRef::ComputeSlotMappingForArguments(JavaScriptFrame* frame, - int inlined_frame_index, - Vector* args_slots) { +void SlotRef::ComputeSlotsForArguments(Vector* args_slots, + TranslationIterator* it, + DeoptimizationInputData* data, + JavaScriptFrame* frame) { + // Process the translation commands for the arguments. + + // Skip the translation command for the receiver. + it->Skip(Translation::NumberOfOperandsFor( + static_cast(it->Next()))); + + // Compute slots for arguments. + for (int i = 0; i < args_slots->length(); ++i) { + (*args_slots)[i] = ComputeSlotForNextArgument(it, data, frame); + } +} + + +Vector SlotRef::ComputeSlotMappingForArguments( + JavaScriptFrame* frame, + int inlined_jsframe_index, + int formal_parameter_count) { AssertNoAllocation no_gc; int deopt_index = AstNode::kNoNumber; DeoptimizationInputData* data = @@ -1442,51 +1549,73 @@ void SlotRef::ComputeSlotMappingForArguments(JavaScriptFrame* frame, data->TranslationIndex(deopt_index)->value()); Translation::Opcode opcode = static_cast(it.Next()); ASSERT(opcode == Translation::BEGIN); - int frame_count = it.Next(); - USE(frame_count); - ASSERT(frame_count > inlined_frame_index); - int frames_to_skip = inlined_frame_index; + it.Next(); // Drop frame count. + int jsframe_count = it.Next(); + USE(jsframe_count); + ASSERT(jsframe_count > inlined_jsframe_index); + int jsframes_to_skip = inlined_jsframe_index; while (true) { opcode = static_cast(it.Next()); - // Skip over operands to advance to the next opcode. - it.Skip(Translation::NumberOfOperandsFor(opcode)); - if (opcode == Translation::FRAME) { - if (frames_to_skip == 0) { + if (opcode == Translation::ARGUMENTS_ADAPTOR_FRAME) { + if (jsframes_to_skip == 0) { + ASSERT(Translation::NumberOfOperandsFor(opcode) == 2); + + it.Skip(1); // literal id + int height = it.Next(); + + // We reached the arguments adaptor frame corresponding to the + // inlined function in question. Number of arguments is height - 1. + Vector args_slots = + Vector::New(height - 1); // Minus receiver. + ComputeSlotsForArguments(&args_slots, &it, data, frame); + return args_slots; + } + } else if (opcode == Translation::JS_FRAME) { + if (jsframes_to_skip == 0) { + // Skip over operands to advance to the next opcode. + it.Skip(Translation::NumberOfOperandsFor(opcode)); + // We reached the frame corresponding to the inlined function // in question. Process the translation commands for the - // arguments. - // - // Skip the translation command for the receiver. - it.Skip(Translation::NumberOfOperandsFor( - static_cast(it.Next()))); - // Compute slots for arguments. - for (int i = 0; i < args_slots->length(); ++i) { - (*args_slots)[i] = ComputeSlotForNextArgument(&it, data, frame); - } - return; + // arguments. Number of arguments is equal to the number of + // format parameter count. + Vector args_slots = + Vector::New(formal_parameter_count); + ComputeSlotsForArguments(&args_slots, &it, data, frame); + return args_slots; } - frames_to_skip--; + jsframes_to_skip--; } + + // Skip over operands to advance to the next opcode. + it.Skip(Translation::NumberOfOperandsFor(opcode)); } UNREACHABLE(); + return Vector(); } #ifdef ENABLE_DEBUGGER_SUPPORT DeoptimizedFrameInfo::DeoptimizedFrameInfo( - Deoptimizer* deoptimizer, int frame_index) { + Deoptimizer* deoptimizer, int frame_index, bool has_arguments_adaptor) { FrameDescription* output_frame = deoptimizer->output_[frame_index]; SetFunction(output_frame->GetFunction()); - expression_count_ = output_frame->GetExpressionCount(deoptimizer); + expression_count_ = output_frame->GetExpressionCount(); + expression_stack_ = new Object*[expression_count_]; + for (int i = 0; i < expression_count_; i++) { + SetExpression(i, output_frame->GetExpression(i)); + } + + if (has_arguments_adaptor) { + output_frame = deoptimizer->output_[frame_index - 1]; + ASSERT(output_frame->GetFrameType() == StackFrame::ARGUMENTS_ADAPTOR); + } + parameters_count_ = output_frame->ComputeParametersCount(); parameters_ = new Object*[parameters_count_]; for (int i = 0; i < parameters_count_; i++) { - SetParameter(i, output_frame->GetParameter(deoptimizer, i)); - } - expression_stack_ = new Object*[expression_count_]; - for (int i = 0; i < expression_count_; i++) { - SetExpression(i, output_frame->GetExpression(deoptimizer, i)); + SetParameter(i, output_frame->GetParameter(i)); } } diff --git a/deps/v8/src/deoptimizer.h b/deps/v8/src/deoptimizer.h index 284676c36eeef1..e2cf05367bdfc5 100644 --- a/deps/v8/src/deoptimizer.h +++ b/deps/v8/src/deoptimizer.h @@ -1,4 +1,4 @@ -// Copyright 2011 the V8 project authors. All rights reserved. +// Copyright 2012 the V8 project authors. All rights reserved. // Redistribution and use in source and binary forms, with or without // modification, are permitted provided that the following conditions are // met: @@ -119,6 +119,9 @@ class Deoptimizer : public Malloced { int output_count() const { return output_count_; } + // Number of created JS frames. Not all created frames are necessarily JS. + int jsframe_count() const { return jsframe_count_; } + static Deoptimizer* New(JSFunction* function, BailoutType type, unsigned bailout_id, @@ -131,7 +134,7 @@ class Deoptimizer : public Malloced { // The returned object with information on the optimized frame needs to be // freed before another one can be generated. static DeoptimizedFrameInfo* DebuggerInspectableFrame(JavaScriptFrame* frame, - int frame_index, + int jsframe_index, Isolate* isolate); static void DeleteDebuggerInspectableFrame(DeoptimizedFrameInfo* info, Isolate* isolate); @@ -196,7 +199,11 @@ class Deoptimizer : public Malloced { void MaterializeHeapNumbers(); #ifdef ENABLE_DEBUGGER_SUPPORT void MaterializeHeapNumbersForDebuggerInspectableFrame( - Address top, uint32_t size, DeoptimizedFrameInfo* info); + Address parameters_top, + uint32_t parameters_size, + Address expressions_top, + uint32_t expressions_size, + DeoptimizedFrameInfo* info); #endif static void ComputeOutputFrames(Deoptimizer* deoptimizer); @@ -257,8 +264,10 @@ class Deoptimizer : public Malloced { int count_; }; + int ConvertJSFrameIndexToFrameIndex(int jsframe_index); + private: - static const int kNumberOfEntries = 4096; + static const int kNumberOfEntries = 8192; Deoptimizer(Isolate* isolate, JSFunction* function, @@ -271,7 +280,9 @@ class Deoptimizer : public Malloced { void DoComputeOutputFrames(); void DoComputeOsrOutputFrame(); - void DoComputeFrame(TranslationIterator* iterator, int frame_index); + void DoComputeJSFrame(TranslationIterator* iterator, int frame_index); + void DoComputeArgumentsAdaptorFrame(TranslationIterator* iterator, + int frame_index); void DoTranslateCommand(TranslationIterator* iterator, int frame_index, unsigned output_offset); @@ -319,6 +330,8 @@ class Deoptimizer : public Malloced { FrameDescription* input_; // Number of output frames. int output_count_; + // Number of output js frames. + int jsframe_count_; // Array of output frame descriptions. FrameDescription** output_; @@ -362,7 +375,7 @@ class FrameDescription { JSFunction* GetFunction() const { return function_; } - unsigned GetOffsetFromSlotIndex(Deoptimizer* deoptimizer, int slot_index); + unsigned GetOffsetFromSlotIndex(int slot_index); intptr_t GetFrameSlot(unsigned offset) { return *GetFrameSlotPointer(offset); @@ -423,22 +436,20 @@ class FrameDescription { void SetContinuation(intptr_t pc) { continuation_ = pc; } -#ifdef DEBUG - Code::Kind GetKind() const { return kind_; } - void SetKind(Code::Kind kind) { kind_ = kind; } -#endif + StackFrame::Type GetFrameType() const { return type_; } + void SetFrameType(StackFrame::Type type) { type_ = type; } // Get the incoming arguments count. int ComputeParametersCount(); // Get a parameter value for an unoptimized frame. - Object* GetParameter(Deoptimizer* deoptimizer, int index); + Object* GetParameter(int index); // Get the expression stack height for a unoptimized frame. - unsigned GetExpressionCount(Deoptimizer* deoptimizer); + unsigned GetExpressionCount(); // Get the expression stack value for an unoptimized frame. - Object* GetExpression(Deoptimizer* deoptimizer, int index); + Object* GetExpression(int index); static int registers_offset() { return OFFSET_OF(FrameDescription, registers_); @@ -481,6 +492,7 @@ class FrameDescription { intptr_t top_; intptr_t pc_; intptr_t fp_; + StackFrame::Type type_; Smi* state_; #ifdef DEBUG Code::Kind kind_; @@ -499,6 +511,8 @@ class FrameDescription { return reinterpret_cast( reinterpret_cast
(this) + frame_content_offset() + offset); } + + int ComputeFixedSize(); }; @@ -541,7 +555,8 @@ class Translation BASE_EMBEDDED { public: enum Opcode { BEGIN, - FRAME, + JS_FRAME, + ARGUMENTS_ADAPTOR_FRAME, REGISTER, INT32_REGISTER, DOUBLE_REGISTER, @@ -556,17 +571,19 @@ class Translation BASE_EMBEDDED { DUPLICATE }; - Translation(TranslationBuffer* buffer, int frame_count) + Translation(TranslationBuffer* buffer, int frame_count, int jsframe_count) : buffer_(buffer), index_(buffer->CurrentIndex()) { buffer_->Add(BEGIN); buffer_->Add(frame_count); + buffer_->Add(jsframe_count); } int index() const { return index_; } // Commands. - void BeginFrame(int node_id, int literal_id, unsigned height); + void BeginJSFrame(int node_id, int literal_id, unsigned height); + void BeginArgumentsAdaptorFrame(int literal_id, unsigned height); void StoreRegister(Register reg); void StoreInt32Register(Register reg); void StoreDoubleRegister(DoubleRegister reg); @@ -656,9 +673,10 @@ class SlotRef BASE_EMBEDDED { } } - static void ComputeSlotMappingForArguments(JavaScriptFrame* frame, - int inlined_frame_index, - Vector* args_slots); + static Vector ComputeSlotMappingForArguments( + JavaScriptFrame* frame, + int inlined_frame_index, + int formal_parameter_count); private: Address addr_; @@ -678,6 +696,12 @@ class SlotRef BASE_EMBEDDED { static SlotRef ComputeSlotForNextArgument(TranslationIterator* iterator, DeoptimizationInputData* data, JavaScriptFrame* frame); + + static void ComputeSlotsForArguments( + Vector* args_slots, + TranslationIterator* iterator, + DeoptimizationInputData* data, + JavaScriptFrame* frame); }; @@ -686,9 +710,13 @@ class SlotRef BASE_EMBEDDED { // needs to inspect a frame that is part of an optimized frame. The // internally used FrameDescription objects are not GC safe so for use // by the debugger frame information is copied to an object of this type. +// Represents parameters in unadapted form so their number might mismatch +// formal parameter count. class DeoptimizedFrameInfo : public Malloced { public: - DeoptimizedFrameInfo(Deoptimizer* deoptimizer, int frame_index); + DeoptimizedFrameInfo(Deoptimizer* deoptimizer, + int frame_index, + bool has_arguments_adaptor); virtual ~DeoptimizedFrameInfo(); // GC support. diff --git a/deps/v8/src/frames.cc b/deps/v8/src/frames.cc index 3a46e0869f45d6..4402496f182458 100644 --- a/deps/v8/src/frames.cc +++ b/deps/v8/src/frames.cc @@ -1,4 +1,4 @@ -// Copyright 2011 the V8 project authors. All rights reserved. +// Copyright 2012 the V8 project authors. All rights reserved. // Redistribution and use in source and binary forms, with or without // modification, are permitted provided that the following conditions are // met: @@ -813,17 +813,18 @@ void OptimizedFrame::Summarize(List* frames) { data->TranslationIndex(deopt_index)->value()); Translation::Opcode opcode = static_cast(it.Next()); ASSERT(opcode == Translation::BEGIN); - int frame_count = it.Next(); + it.Next(); // Drop frame count. + int jsframe_count = it.Next(); // We create the summary in reverse order because the frames // in the deoptimization translation are ordered bottom-to-top. - int i = frame_count; + int i = jsframe_count; while (i > 0) { opcode = static_cast(it.Next()); - if (opcode == Translation::FRAME) { + if (opcode == Translation::JS_FRAME) { // We don't inline constructor calls, so only the first, outermost // frame can be a constructor frame in case of inlining. - bool is_constructor = (i == frame_count) && IsConstructor(); + bool is_constructor = (i == jsframe_count) && IsConstructor(); i--; int ast_id = it.Next(); @@ -918,8 +919,9 @@ int OptimizedFrame::GetInlineCount() { Translation::Opcode opcode = static_cast(it.Next()); ASSERT(opcode == Translation::BEGIN); USE(opcode); - int frame_count = it.Next(); - return frame_count; + it.Next(); // Drop frame count. + int jsframe_count = it.Next(); + return jsframe_count; } @@ -934,14 +936,15 @@ void OptimizedFrame::GetFunctions(List* functions) { data->TranslationIndex(deopt_index)->value()); Translation::Opcode opcode = static_cast(it.Next()); ASSERT(opcode == Translation::BEGIN); - int frame_count = it.Next(); + it.Next(); // Drop frame count. + int jsframe_count = it.Next(); // We insert the frames in reverse order because the frames // in the deoptimization translation are ordered bottom-to-top. - while (frame_count > 0) { + while (jsframe_count > 0) { opcode = static_cast(it.Next()); - if (opcode == Translation::FRAME) { - frame_count--; + if (opcode == Translation::JS_FRAME) { + jsframe_count--; it.Next(); // Skip ast id. int function_id = it.Next(); it.Next(); // Skip height. diff --git a/deps/v8/src/full-codegen.cc b/deps/v8/src/full-codegen.cc index 5c7a23d54de942..4d1707d7bef1de 100644 --- a/deps/v8/src/full-codegen.cc +++ b/deps/v8/src/full-codegen.cc @@ -370,12 +370,14 @@ void FullCodeGenerator::PrepareForBailoutForId(unsigned id, State state) { StateField::encode(state) | PcField::encode(masm_->pc_offset()); BailoutEntry entry = { id, pc_and_state }; #ifdef DEBUG - // Assert that we don't have multiple bailout entries for the same node. - for (int i = 0; i < bailout_entries_.length(); i++) { - if (bailout_entries_.at(i).id == entry.id) { - AstPrinter printer; - PrintF("%s", printer.PrintProgram(info_->function())); - UNREACHABLE(); + if (FLAG_enable_slow_asserts) { + // Assert that we don't have multiple bailout entries for the same node. + for (int i = 0; i < bailout_entries_.length(); i++) { + if (bailout_entries_.at(i).id == entry.id) { + AstPrinter printer; + PrintF("%s", printer.PrintProgram(info_->function())); + UNREACHABLE(); + } } } #endif // DEBUG diff --git a/deps/v8/src/heap-inl.h b/deps/v8/src/heap-inl.h index 4d98fbad1078bc..23fe3060a6adeb 100644 --- a/deps/v8/src/heap-inl.h +++ b/deps/v8/src/heap-inl.h @@ -505,7 +505,6 @@ Isolate* Heap::isolate() { #define GC_GREEDY_CHECK() { } #endif - // Calls the FUNCTION_CALL function and retries it up to three times // to guarantee that any allocations performed during the call will // succeed if there's enough memory. diff --git a/deps/v8/src/heap.cc b/deps/v8/src/heap.cc index d97f3379776dc7..4bd125e6012152 100644 --- a/deps/v8/src/heap.cc +++ b/deps/v8/src/heap.cc @@ -695,12 +695,18 @@ void Heap::UpdateSurvivalRateTrend(int start_new_space_size) { (static_cast(young_survivors_after_last_gc_) * 100) / start_new_space_size; - if (survival_rate > kYoungSurvivalRateThreshold) { + if (survival_rate > kYoungSurvivalRateHighThreshold) { high_survival_rate_period_length_++; } else { high_survival_rate_period_length_ = 0; } + if (survival_rate < kYoungSurvivalRateLowThreshold) { + low_survival_rate_period_length_++; + } else { + low_survival_rate_period_length_ = 0; + } + double survival_rate_diff = survival_rate_ - survival_rate; if (survival_rate_diff > kYoungSurvivalRateAllowedDeviation) { @@ -760,32 +766,6 @@ bool Heap::PerformGarbageCollection(GarbageCollector collector, UpdateSurvivalRateTrend(start_new_space_size); - if (!new_space_high_promotion_mode_active_ && - new_space_.Capacity() == new_space_.MaximumCapacity() && - IsStableOrIncreasingSurvivalTrend() && - IsHighSurvivalRate()) { - // Stable high survival rates even though young generation is at - // maximum capacity indicates that most objects will be promoted. - // To decrease scavenger pauses and final mark-sweep pauses, we - // have to limit maximal capacity of the young generation. - new_space_high_promotion_mode_active_ = true; - if (FLAG_trace_gc) { - PrintF("Limited new space size due to high promotion rate: %d MB\n", - new_space_.InitialCapacity() / MB); - } - } else if (new_space_high_promotion_mode_active_ && - IsDecreasingSurvivalTrend() && - !IsHighSurvivalRate()) { - // Decreasing low survival rates might indicate that the above high - // promotion mode is over and we should allow the young generation - // to grow again. - new_space_high_promotion_mode_active_ = false; - if (FLAG_trace_gc) { - PrintF("Unlimited new space size due to low promotion rate: %d MB\n", - new_space_.MaximumCapacity() / MB); - } - } - size_of_old_gen_at_last_old_space_gc_ = PromotedSpaceSize(); if (high_survival_rate_during_scavenges && @@ -815,6 +795,32 @@ bool Heap::PerformGarbageCollection(GarbageCollector collector, UpdateSurvivalRateTrend(start_new_space_size); } + if (!new_space_high_promotion_mode_active_ && + new_space_.Capacity() == new_space_.MaximumCapacity() && + IsStableOrIncreasingSurvivalTrend() && + IsHighSurvivalRate()) { + // Stable high survival rates even though young generation is at + // maximum capacity indicates that most objects will be promoted. + // To decrease scavenger pauses and final mark-sweep pauses, we + // have to limit maximal capacity of the young generation. + new_space_high_promotion_mode_active_ = true; + if (FLAG_trace_gc) { + PrintF("Limited new space size due to high promotion rate: %d MB\n", + new_space_.InitialCapacity() / MB); + } + } else if (new_space_high_promotion_mode_active_ && + IsStableOrDecreasingSurvivalTrend() && + IsLowSurvivalRate()) { + // Decreasing low survival rates might indicate that the above high + // promotion mode is over and we should allow the young generation + // to grow again. + new_space_high_promotion_mode_active_ = false; + if (FLAG_trace_gc) { + PrintF("Unlimited new space size due to low promotion rate: %d MB\n", + new_space_.MaximumCapacity() / MB); + } + } + if (new_space_high_promotion_mode_active_ && new_space_.Capacity() > new_space_.InitialCapacity()) { new_space_.Shrink(); @@ -1099,7 +1105,7 @@ void Heap::Scavenge() { isolate_->descriptor_lookup_cache()->Clear(); // Used for updating survived_since_last_expansion_ at function end. - intptr_t survived_watermark = PromotedSpaceSize(); + intptr_t survived_watermark = PromotedSpaceSizeOfObjects(); CheckNewSpaceExpansionCriteria(); @@ -1191,7 +1197,7 @@ void Heap::Scavenge() { // Update how much has survived scavenge. IncrementYoungSurvivorsCounter(static_cast( - (PromotedSpaceSize() - survived_watermark) + new_space_.Size())); + (PromotedSpaceSizeOfObjects() - survived_watermark) + new_space_.Size())); LOG(isolate_, ResourceEvent("scavenge", "end")); @@ -3302,7 +3308,7 @@ MaybeObject* Heap::CreateCode(const CodeDesc& desc, } code->set_deoptimization_data(empty_fixed_array(), SKIP_WRITE_BARRIER); code->set_handler_table(empty_fixed_array(), SKIP_WRITE_BARRIER); - code->set_next_code_flushing_candidate(undefined_value()); + code->set_gc_metadata(Smi::FromInt(0)); // Allow self references to created code object by patching the handle to // point to the newly allocated Code object. if (!self_reference.is_null()) { @@ -5422,6 +5428,16 @@ intptr_t Heap::PromotedSpaceSize() { } +intptr_t Heap::PromotedSpaceSizeOfObjects() { + return old_pointer_space_->SizeOfObjects() + + old_data_space_->SizeOfObjects() + + code_space_->SizeOfObjects() + + map_space_->SizeOfObjects() + + cell_space_->SizeOfObjects() + + lo_space_->SizeOfObjects(); +} + + int Heap::PromotedExternalMemorySize() { if (amount_of_external_allocated_memory_ <= amount_of_external_allocated_memory_at_last_global_gc_) return 0; @@ -6523,15 +6539,11 @@ int KeyedLookupCache::Hash(Map* map, String* name) { int KeyedLookupCache::Lookup(Map* map, String* name) { int index = (Hash(map, name) & kHashMask); - Key& key = keys_[index]; - if ((key.map == map) && key.name->Equals(name)) { - return field_offsets_[index]; - } - ASSERT(kEntriesPerBucket == 2); // There are two entries to check. - // First entry in the bucket missed, check the second. - Key& key2 = keys_[index + 1]; - if ((key2.map == map) && key2.name->Equals(name)) { - return field_offsets_[index + 1]; + for (int i = 0; i < kEntriesPerBucket; i++) { + Key& key = keys_[index + i]; + if ((key.map == map) && key.name->Equals(name)) { + return field_offsets_[index + i]; + } } return kNotFound; } @@ -6541,13 +6553,29 @@ void KeyedLookupCache::Update(Map* map, String* name, int field_offset) { String* symbol; if (HEAP->LookupSymbolIfExists(name, &symbol)) { int index = (Hash(map, symbol) & kHashMask); - Key& key = keys_[index]; - Key& key2 = keys_[index + 1]; // Second entry in the bucket. - // Demote the first entry to the second in the bucket. - key2.map = key.map; - key2.name = key.name; - field_offsets_[index + 1] = field_offsets_[index]; + // After a GC there will be free slots, so we use them in order (this may + // help to get the most frequently used one in position 0). + for (int i = 0; i< kEntriesPerBucket; i++) { + Key& key = keys_[index]; + Object* free_entry_indicator = NULL; + if (key.map == free_entry_indicator) { + key.map = map; + key.name = symbol; + field_offsets_[index + i] = field_offset; + return; + } + } + // No free entry found in this bucket, so we move them all down one and + // put the new entry at position zero. + for (int i = kEntriesPerBucket - 1; i > 0; i--) { + Key& key = keys_[index + i]; + Key& key2 = keys_[index + i - 1]; + key = key2; + field_offsets_[index + i] = field_offsets_[index + i - 1]; + } + // Write the new first entry. + Key& key = keys_[index]; key.map = map; key.name = symbol; field_offsets_[index] = field_offset; diff --git a/deps/v8/src/heap.h b/deps/v8/src/heap.h index 85df55e4b5e0c7..c8ac927cd254ee 100644 --- a/deps/v8/src/heap.h +++ b/deps/v8/src/heap.h @@ -156,6 +156,7 @@ inline Heap* _inline_get_heap_(); V(Script, empty_script, EmptyScript) \ V(Smi, real_stack_limit, RealStackLimit) \ V(StringDictionary, intrinsic_function_names, IntrinsicFunctionNames) \ + V(Smi, arguments_adaptor_deopt_pc_offset, ArgumentsAdaptorDeoptPCOffset) #define ROOT_LIST(V) \ STRONG_ROOT_LIST(V) \ @@ -1067,7 +1068,7 @@ class Heap { // Heap root getters. We have versions with and without type::cast() here. // You can't use type::cast during GC because the assert fails. // TODO(1490): Try removing the unchecked accessors, now that GC marking does - // not corrupt the stack. + // not corrupt the map. #define ROOT_ACCESSOR(type, name, camel_name) \ type* name() { \ return type::cast(roots_[k##camel_name##RootIndex]); \ @@ -1381,6 +1382,7 @@ class Heap { void CheckNewSpaceExpansionCriteria(); inline void IncrementYoungSurvivorsCounter(int survived) { + ASSERT(survived >= 0); young_survivors_after_last_gc_ = survived; survived_since_last_expansion_ += survived; } @@ -1430,6 +1432,7 @@ class Heap { // Returns the size of objects residing in non new spaces. intptr_t PromotedSpaceSize(); + intptr_t PromotedSpaceSizeOfObjects(); double total_regexp_code_generated() { return total_regexp_code_generated_; } void IncreaseTotalRegexpCodeGenerated(int size) { @@ -1517,6 +1520,11 @@ class Heap { return seed; } + void SetArgumentsAdaptorDeoptPCOffset(int pc_offset) { + ASSERT(arguments_adaptor_deopt_pc_offset() == Smi::FromInt(0)); + set_arguments_adaptor_deopt_pc_offset(Smi::FromInt(pc_offset)); + } + private: Heap(); @@ -1799,11 +1807,13 @@ class Heap { enum SurvivalRateTrend { INCREASING, STABLE, DECREASING, FLUCTUATING }; - static const int kYoungSurvivalRateThreshold = 90; + static const int kYoungSurvivalRateHighThreshold = 90; + static const int kYoungSurvivalRateLowThreshold = 10; static const int kYoungSurvivalRateAllowedDeviation = 15; int young_survivors_after_last_gc_; int high_survival_rate_period_length_; + int low_survival_rate_period_length_; double survival_rate_; SurvivalRateTrend previous_survival_rate_trend_; SurvivalRateTrend survival_rate_trend_; @@ -1836,18 +1846,28 @@ class Heap { } } - bool IsIncreasingSurvivalTrend() { - return survival_rate_trend() == INCREASING; + bool IsStableOrDecreasingSurvivalTrend() { + switch (survival_rate_trend()) { + case STABLE: + case DECREASING: + return true; + default: + return false; + } } - bool IsDecreasingSurvivalTrend() { - return survival_rate_trend() == DECREASING; + bool IsIncreasingSurvivalTrend() { + return survival_rate_trend() == INCREASING; } bool IsHighSurvivalRate() { return high_survival_rate_period_length_ > 0; } + bool IsLowSurvivalRate() { + return low_survival_rate_period_length_ > 0; + } + void SelectScavengingVisitorsTable(); void StartIdleRound() { @@ -2135,13 +2155,17 @@ class KeyedLookupCache { // Clear the cache. void Clear(); - static const int kLength = 128; + static const int kLength = 256; static const int kCapacityMask = kLength - 1; static const int kMapHashShift = 5; - static const int kHashMask = -2; // Zero the last bit. - static const int kEntriesPerBucket = 2; + static const int kHashMask = -4; // Zero the last two bits. + static const int kEntriesPerBucket = 4; static const int kNotFound = -1; + // kEntriesPerBucket should be a power of 2. + STATIC_ASSERT((kEntriesPerBucket & (kEntriesPerBucket - 1)) == 0); + STATIC_ASSERT(kEntriesPerBucket == -kHashMask); + private: KeyedLookupCache() { for (int i = 0; i < kLength; ++i) { diff --git a/deps/v8/src/hydrogen-instructions.cc b/deps/v8/src/hydrogen-instructions.cc index 2d32ad1fed8a67..7ae0b44a27634b 100644 --- a/deps/v8/src/hydrogen-instructions.cc +++ b/deps/v8/src/hydrogen-instructions.cc @@ -1339,6 +1339,23 @@ Range* HShl::InferRange() { } +Range* HLoadKeyedSpecializedArrayElement::InferRange() { + switch (elements_kind()) { + case EXTERNAL_PIXEL_ELEMENTS: + return new Range(0, 255); + case EXTERNAL_BYTE_ELEMENTS: + return new Range(-128, 127); + case EXTERNAL_UNSIGNED_BYTE_ELEMENTS: + return new Range(0, 255); + case EXTERNAL_SHORT_ELEMENTS: + return new Range(-32768, 32767); + case EXTERNAL_UNSIGNED_SHORT_ELEMENTS: + return new Range(0, 65535); + default: + return HValue::InferRange(); + } +} + void HCompareGeneric::PrintDataTo(StringStream* stream) { stream->Add(Token::Name(token())); diff --git a/deps/v8/src/hydrogen-instructions.h b/deps/v8/src/hydrogen-instructions.h index 9f661d6f032764..38277e955767d8 100644 --- a/deps/v8/src/hydrogen-instructions.h +++ b/deps/v8/src/hydrogen-instructions.h @@ -764,6 +764,7 @@ class HValue: public ZoneObject { int flags_; GVNFlagSet gvn_flags_; + private: DISALLOW_COPY_AND_ASSIGN(HValue); }; @@ -1340,9 +1341,11 @@ class HStackCheck: public HTemplateInstruction<1> { class HEnterInlined: public HTemplateInstruction<0> { public: HEnterInlined(Handle closure, + int arguments_count, FunctionLiteral* function, CallKind call_kind) : closure_(closure), + arguments_count_(arguments_count), function_(function), call_kind_(call_kind) { } @@ -1350,6 +1353,7 @@ class HEnterInlined: public HTemplateInstruction<0> { virtual void PrintDataTo(StringStream* stream); Handle closure() const { return closure_; } + int arguments_count() const { return arguments_count_; } FunctionLiteral* function() const { return function_; } CallKind call_kind() const { return call_kind_; } @@ -1361,6 +1365,7 @@ class HEnterInlined: public HTemplateInstruction<0> { private: Handle closure_; + int arguments_count_; FunctionLiteral* function_; CallKind call_kind_; }; @@ -3844,6 +3849,8 @@ class HLoadKeyedSpecializedArrayElement: public HTemplateInstruction<2> { HValue* key() { return OperandAt(1); } ElementsKind elements_kind() const { return elements_kind_; } + virtual Range* InferRange(); + DECLARE_CONCRETE_INSTRUCTION(LoadKeyedSpecializedArrayElement) protected: diff --git a/deps/v8/src/hydrogen.cc b/deps/v8/src/hydrogen.cc index 1a63f1e7aaa7fe..862afe5d3a644b 100644 --- a/deps/v8/src/hydrogen.cc +++ b/deps/v8/src/hydrogen.cc @@ -167,8 +167,7 @@ void HBasicBlock::Finish(HControlInstruction* end) { void HBasicBlock::Goto(HBasicBlock* block, bool drop_extra) { if (block->IsInlineReturnTarget()) { AddInstruction(new(zone()) HLeaveInlined); - last_environment_ = last_environment()->outer(); - if (drop_extra) last_environment_->Drop(1); + last_environment_ = last_environment()->DiscardInlined(drop_extra); } AddSimulate(AstNode::kNoNumber); HGoto* instr = new(zone()) HGoto(block); @@ -182,8 +181,7 @@ void HBasicBlock::AddLeaveInlined(HValue* return_value, ASSERT(target->IsInlineReturnTarget()); ASSERT(return_value != NULL); AddInstruction(new(zone()) HLeaveInlined); - last_environment_ = last_environment()->outer(); - if (drop_extra) last_environment_->Drop(1); + last_environment_ = last_environment()->DiscardInlined(drop_extra); last_environment()->Push(return_value); AddSimulate(AstNode::kNoNumber); HGoto* instr = new(zone()) HGoto(target); @@ -2076,6 +2074,7 @@ AstContext::AstContext(HGraphBuilder* owner, Expression::Context kind) for_typeof_(false) { owner->set_ast_context(this); // Push. #ifdef DEBUG + ASSERT(!owner->environment()->is_arguments_adaptor()); original_length_ = owner->environment()->length(); #endif } @@ -2089,14 +2088,16 @@ AstContext::~AstContext() { EffectContext::~EffectContext() { ASSERT(owner()->HasStackOverflow() || owner()->current_block() == NULL || - owner()->environment()->length() == original_length_); + (owner()->environment()->length() == original_length_ && + !owner()->environment()->is_arguments_adaptor())); } ValueContext::~ValueContext() { ASSERT(owner()->HasStackOverflow() || owner()->current_block() == NULL || - owner()->environment()->length() == original_length_ + 1); + (owner()->environment()->length() == original_length_ + 1 && + !owner()->environment()->is_arguments_adaptor())); } @@ -4828,7 +4829,9 @@ bool HGraphBuilder::TryInline(Call* expr, bool drop_extra) { TraceInline(target, caller, "inline depth limit reached"); return false; } - current_level++; + if (!env->outer()->is_arguments_adaptor()) { + current_level++; + } env = env->outer(); } @@ -4876,11 +4879,8 @@ bool HGraphBuilder::TryInline(Call* expr, bool drop_extra) { return false; } - // Don't inline functions that uses the arguments object or that - // have a mismatching number of parameters. - int arity = expr->arguments()->length(); - if (function->scope()->arguments() != NULL || - arity != target_shared->formal_parameter_count()) { + // Don't inline functions that uses the arguments object. + if (function->scope()->arguments() != NULL) { TraceInline(target, caller, "target requires special argument handling"); return false; } @@ -4944,6 +4944,7 @@ bool HGraphBuilder::TryInline(Call* expr, bool drop_extra) { HConstant* undefined = graph()->GetConstantUndefined(); HEnvironment* inner_env = environment()->CopyForInlining(target, + expr->arguments()->length(), function, undefined, call_kind); @@ -4963,6 +4964,7 @@ bool HGraphBuilder::TryInline(Call* expr, bool drop_extra) { body_entry->SetJoinId(expr->ReturnId()); set_current_block(body_entry); AddInstruction(new(zone()) HEnterInlined(target, + expr->arguments()->length(), function, call_kind)); VisitDeclarations(target_info.scope()->declarations()); @@ -6902,7 +6904,8 @@ HEnvironment::HEnvironment(HEnvironment* outer, outer_(outer), pop_count_(0), push_count_(0), - ast_id_(AstNode::kNoNumber) { + ast_id_(AstNode::kNoNumber), + arguments_adaptor_(false) { Initialize(scope->num_parameters() + 1, scope->num_stack_slots(), 0); } @@ -6916,11 +6919,28 @@ HEnvironment::HEnvironment(const HEnvironment* other) outer_(NULL), pop_count_(0), push_count_(0), - ast_id_(other->ast_id()) { + ast_id_(other->ast_id()), + arguments_adaptor_(false) { Initialize(other); } +HEnvironment::HEnvironment(HEnvironment* outer, + Handle closure, + int arguments) + : closure_(closure), + values_(arguments), + assigned_variables_(0), + parameter_count_(arguments), + local_count_(0), + outer_(outer), + pop_count_(0), + push_count_(0), + ast_id_(AstNode::kNoNumber), + arguments_adaptor_(true) { +} + + void HEnvironment::Initialize(int parameter_count, int local_count, int stack_height) { @@ -6944,6 +6964,7 @@ void HEnvironment::Initialize(const HEnvironment* other) { pop_count_ = other->pop_count_; push_count_ = other->push_count_; ast_id_ = other->ast_id_; + arguments_adaptor_ = other->arguments_adaptor_; } @@ -7047,20 +7068,36 @@ HEnvironment* HEnvironment::CopyAsLoopHeader(HBasicBlock* loop_header) const { HEnvironment* HEnvironment::CopyForInlining( Handle target, + int arguments, FunctionLiteral* function, HConstant* undefined, CallKind call_kind) const { + ASSERT(!is_arguments_adaptor()); + + Zone* zone = closure()->GetIsolate()->zone(); + // Outer environment is a copy of this one without the arguments. int arity = function->scope()->num_parameters(); + HEnvironment* outer = Copy(); - outer->Drop(arity + 1); // Including receiver. + outer->Drop(arguments + 1); // Including receiver. outer->ClearHistory(); - Zone* zone = closure()->GetIsolate()->zone(); + + if (arity != arguments) { + // Create artificial arguments adaptation environment. + outer = new(zone) HEnvironment(outer, target, arguments + 1); + for (int i = 0; i <= arguments; ++i) { // Include receiver. + outer->Push(ExpressionStackAt(arguments - i)); + } + outer->ClearHistory(); + } + HEnvironment* inner = new(zone) HEnvironment(outer, function->scope(), target); // Get the argument values from the original environment. for (int i = 0; i <= arity; ++i) { // Include receiver. - HValue* push = ExpressionStackAt(arity - i); + HValue* push = (i <= arguments) ? + ExpressionStackAt(arguments - i) : undefined; inner->SetValueAt(i, push); } // If the function we are inlining is a strict mode function or a @@ -7070,7 +7107,7 @@ HEnvironment* HEnvironment::CopyForInlining( call_kind == CALL_AS_FUNCTION) { inner->SetValueAt(0, undefined); } - inner->SetValueAt(arity + 1, outer->LookupContext()); + inner->SetValueAt(arity + 1, LookupContext()); for (int i = arity + 2; i < inner->length(); ++i) { inner->SetValueAt(i, undefined); } @@ -7086,7 +7123,7 @@ void HEnvironment::PrintTo(StringStream* stream) { if (i == parameter_count()) stream->Add("specials\n"); if (i == parameter_count() + specials_count()) stream->Add("locals\n"); if (i == parameter_count() + specials_count() + local_count()) { - stream->Add("expressions"); + stream->Add("expressions\n"); } HValue* val = values_.at(i); stream->Add("%d: ", i); @@ -7097,6 +7134,7 @@ void HEnvironment::PrintTo(StringStream* stream) { } stream->Add("\n"); } + PrintF("\n"); } diff --git a/deps/v8/src/hydrogen.h b/deps/v8/src/hydrogen.h index b77e97583a069d..c8cfc91b3cd05c 100644 --- a/deps/v8/src/hydrogen.h +++ b/deps/v8/src/hydrogen.h @@ -343,6 +343,17 @@ class HEnvironment: public ZoneObject { Scope* scope, Handle closure); + bool is_arguments_adaptor() const { + return arguments_adaptor_; + } + + HEnvironment* DiscardInlined(bool drop_extra) { + HEnvironment* outer = outer_->is_arguments_adaptor() ? + outer_->outer_ : outer_; + if (drop_extra) outer->Drop(1); + return outer; + } + // Simple accessors. Handle closure() const { return closure_; } const ZoneList* values() const { return &values_; } @@ -427,6 +438,7 @@ class HEnvironment: public ZoneObject { // environment is the outer environment but the top expression stack // elements are moved to an inner environment as parameters. HEnvironment* CopyForInlining(Handle target, + int arguments, FunctionLiteral* function, HConstant* undefined, CallKind call_kind) const; @@ -450,6 +462,10 @@ class HEnvironment: public ZoneObject { private: explicit HEnvironment(const HEnvironment* other); + // Create an argument adaptor environment. + HEnvironment(HEnvironment* outer, Handle closure, int arguments); + + // True if index is included in the expression stack part of the environment. bool HasExpressionAt(int index) const; @@ -478,6 +494,7 @@ class HEnvironment: public ZoneObject { int pop_count_; int push_count_; int ast_id_; + bool arguments_adaptor_; }; diff --git a/deps/v8/src/ia32/assembler-ia32-inl.h b/deps/v8/src/ia32/assembler-ia32-inl.h index 5f67077ad014cf..ef109229a2b135 100644 --- a/deps/v8/src/ia32/assembler-ia32-inl.h +++ b/deps/v8/src/ia32/assembler-ia32-inl.h @@ -30,13 +30,15 @@ // The original source code covered by the above license above has been // modified significantly by Google Inc. -// Copyright 2011 the V8 project authors. All rights reserved. +// Copyright 2012 the V8 project authors. All rights reserved. // A light-weight IA32 Assembler. #ifndef V8_IA32_ASSEMBLER_IA32_INL_H_ #define V8_IA32_ASSEMBLER_IA32_INL_H_ +#include "ia32/assembler-ia32.h" + #include "cpu.h" #include "debug.h" diff --git a/deps/v8/src/ia32/builtins-ia32.cc b/deps/v8/src/ia32/builtins-ia32.cc index 4666311af68f75..28c97f0e65c387 100644 --- a/deps/v8/src/ia32/builtins-ia32.cc +++ b/deps/v8/src/ia32/builtins-ia32.cc @@ -537,7 +537,7 @@ static void Generate_NotifyDeoptimizedHelper(MacroAssembler* masm, { FrameScope scope(masm, StackFrame::INTERNAL); - // Pass the function and deoptimization type to the runtime system. + // Pass deoptimization type to the runtime system. __ push(Immediate(Smi::FromInt(static_cast(type)))); __ CallRuntime(Runtime::kNotifyDeoptimized, 1); @@ -1644,6 +1644,7 @@ void Builtins::Generate_ArgumentsAdaptorTrampoline(MacroAssembler* masm) { __ mov(edi, Operand(ebp, JavaScriptFrameConstants::kFunctionOffset)); __ call(edx); + masm->isolate()->heap()->SetArgumentsAdaptorDeoptPCOffset(masm->pc_offset()); // Leave frame and return. LeaveArgumentsAdaptorFrame(masm); __ ret(0); diff --git a/deps/v8/src/ia32/deoptimizer-ia32.cc b/deps/v8/src/ia32/deoptimizer-ia32.cc index 292315d10ca33e..14f26757e9232f 100644 --- a/deps/v8/src/ia32/deoptimizer-ia32.cc +++ b/deps/v8/src/ia32/deoptimizer-ia32.cc @@ -1,4 +1,4 @@ -// Copyright 2011 the V8 project authors. All rights reserved. +// Copyright 2012 the V8 project authors. All rights reserved. // Redistribution and use in source and binary forms, with or without // modification, are permitted provided that the following conditions are // met: @@ -299,12 +299,13 @@ void Deoptimizer::DoComputeOsrOutputFrame() { ASSERT(Translation::BEGIN == opcode); USE(opcode); int count = iterator.Next(); + iterator.Next(); // Drop JS frames count. ASSERT(count == 1); USE(count); opcode = static_cast(iterator.Next()); USE(opcode); - ASSERT(Translation::FRAME == opcode); + ASSERT(Translation::JS_FRAME == opcode); unsigned node_id = iterator.Next(); USE(node_id); ASSERT(node_id == ast_id); @@ -340,9 +341,7 @@ void Deoptimizer::DoComputeOsrOutputFrame() { output_ = new FrameDescription*[1]; output_[0] = new(output_frame_size) FrameDescription( output_frame_size, function_); -#ifdef DEBUG - output_[0]->SetKind(Code::OPTIMIZED_FUNCTION); -#endif + output_[0]->SetFrameType(StackFrame::JAVA_SCRIPT); // Clear the incoming parameters in the optimized frame to avoid // confusing the garbage collector. @@ -437,13 +436,112 @@ void Deoptimizer::DoComputeOsrOutputFrame() { } -void Deoptimizer::DoComputeFrame(TranslationIterator* iterator, - int frame_index) { - // Read the ast node id, function, and frame height for this output frame. - Translation::Opcode opcode = - static_cast(iterator->Next()); - USE(opcode); - ASSERT(Translation::FRAME == opcode); +void Deoptimizer::DoComputeArgumentsAdaptorFrame(TranslationIterator* iterator, + int frame_index) { + JSFunction* function = JSFunction::cast(ComputeLiteral(iterator->Next())); + unsigned height = iterator->Next(); + unsigned height_in_bytes = height * kPointerSize; + if (FLAG_trace_deopt) { + PrintF(" translating arguments adaptor => height=%d\n", height_in_bytes); + } + + unsigned fixed_frame_size = ArgumentsAdaptorFrameConstants::kFrameSize; + unsigned input_frame_size = input_->GetFrameSize(); + unsigned output_frame_size = height_in_bytes + fixed_frame_size; + + // Allocate and store the output frame description. + FrameDescription* output_frame = + new(output_frame_size) FrameDescription(output_frame_size, function); + output_frame->SetFrameType(StackFrame::ARGUMENTS_ADAPTOR); + + // Arguments adaptor can not be topmost or bottommost. + ASSERT(frame_index > 0 && frame_index < output_count_ - 1); + ASSERT(output_[frame_index] == NULL); + output_[frame_index] = output_frame; + + // The top address of the frame is computed from the previous + // frame's top and this frame's size. + uint32_t top_address; + top_address = output_[frame_index - 1]->GetTop() - output_frame_size; + output_frame->SetTop(top_address); + + // Compute the incoming parameter translation. + int parameter_count = height; + unsigned output_offset = output_frame_size; + unsigned input_offset = input_frame_size; + for (int i = 0; i < parameter_count; ++i) { + output_offset -= kPointerSize; + DoTranslateCommand(iterator, frame_index, output_offset); + } + input_offset -= (parameter_count * kPointerSize); + + // Read caller's PC from the previous frame. + output_offset -= kPointerSize; + input_offset -= kPointerSize; + intptr_t callers_pc = output_[frame_index - 1]->GetPc(); + output_frame->SetFrameSlot(output_offset, callers_pc); + if (FLAG_trace_deopt) { + PrintF(" 0x%08x: [top + %d] <- 0x%08x ; caller's pc\n", + top_address + output_offset, output_offset, callers_pc); + } + + // Read caller's FP from the previous frame, and set this frame's FP. + output_offset -= kPointerSize; + input_offset -= kPointerSize; + intptr_t value = output_[frame_index - 1]->GetFp(); + output_frame->SetFrameSlot(output_offset, value); + intptr_t fp_value = top_address + output_offset; + output_frame->SetFp(fp_value); + if (FLAG_trace_deopt) { + PrintF(" 0x%08x: [top + %d] <- 0x%08x ; caller's fp\n", + fp_value, output_offset, value); + } + + // A marker value is used in place of the context. + output_offset -= kPointerSize; + input_offset -= kPointerSize; + intptr_t context = reinterpret_cast( + Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR)); + output_frame->SetFrameSlot(output_offset, context); + if (FLAG_trace_deopt) { + PrintF(" 0x%08x: [top + %d] <- 0x%08x ; context (adaptor sentinel)\n", + top_address + output_offset, output_offset, context); + } + + // The function was mentioned explicitly in the ARGUMENTS_ADAPTOR_FRAME. + output_offset -= kPointerSize; + input_offset -= kPointerSize; + value = reinterpret_cast(function); + output_frame->SetFrameSlot(output_offset, value); + if (FLAG_trace_deopt) { + PrintF(" 0x%08x: [top + %d] <- 0x%08x ; function\n", + top_address + output_offset, output_offset, value); + } + + // Number of incoming arguments. + output_offset -= kPointerSize; + input_offset -= kPointerSize; + value = reinterpret_cast(Smi::FromInt(height - 1)); + output_frame->SetFrameSlot(output_offset, value); + if (FLAG_trace_deopt) { + PrintF(" 0x%08x: [top + %d] <- 0x%08x ; argc (%d)\n", + top_address + output_offset, output_offset, value, height - 1); + } + + ASSERT(0 == output_offset); + + Builtins* builtins = isolate_->builtins(); + Code* adaptor_trampoline = + builtins->builtin(Builtins::kArgumentsAdaptorTrampoline); + uint32_t pc = reinterpret_cast( + adaptor_trampoline->instruction_start() + + isolate_->heap()->arguments_adaptor_deopt_pc_offset()->value()); + output_frame->SetPc(pc); +} + + +void Deoptimizer::DoComputeJSFrame(TranslationIterator* iterator, + int frame_index) { int node_id = iterator->Next(); JSFunction* function = JSFunction::cast(ComputeLiteral(iterator->Next())); unsigned height = iterator->Next(); @@ -463,9 +561,7 @@ void Deoptimizer::DoComputeFrame(TranslationIterator* iterator, // Allocate and store the output frame description. FrameDescription* output_frame = new(output_frame_size) FrameDescription(output_frame_size, function); -#ifdef DEBUG - output_frame->SetKind(Code::FUNCTION); -#endif + output_frame->SetFrameType(StackFrame::JAVA_SCRIPT); bool is_bottommost = (0 == frame_index); bool is_topmost = (output_count_ - 1 == frame_index); diff --git a/deps/v8/src/ia32/frames-ia32.h b/deps/v8/src/ia32/frames-ia32.h index 45b847aec4d11f..9e51857bdb1031 100644 --- a/deps/v8/src/ia32/frames-ia32.h +++ b/deps/v8/src/ia32/frames-ia32.h @@ -1,4 +1,4 @@ -// Copyright 2011 the V8 project authors. All rights reserved. +// Copyright 2012 the V8 project authors. All rights reserved. // Redistribution and use in source and binary forms, with or without // modification, are permitted provided that the following conditions are // met: @@ -95,9 +95,11 @@ class ExitFrameConstants : public AllStatic { class StandardFrameConstants : public AllStatic { public: + // Fixed part of the frame consists of return address, caller fp, + // context and function. // StandardFrame::IterateExpressions assumes that kContextOffset is the last // object pointer. - static const int kFixedFrameSize = 4; // Currently unused. + static const int kFixedFrameSize = 4 * kPointerSize; static const int kExpressionsOffset = -3 * kPointerSize; static const int kMarkerOffset = -2 * kPointerSize; static const int kContextOffset = -1 * kPointerSize; @@ -123,6 +125,8 @@ class JavaScriptFrameConstants : public AllStatic { class ArgumentsAdaptorFrameConstants : public AllStatic { public: static const int kLengthOffset = StandardFrameConstants::kExpressionsOffset; + static const int kFrameSize = + StandardFrameConstants::kFixedFrameSize + kPointerSize; }; diff --git a/deps/v8/src/ia32/ic-ia32.cc b/deps/v8/src/ia32/ic-ia32.cc index bb1d678e0bcea1..3a937900cc05af 100644 --- a/deps/v8/src/ia32/ic-ia32.cc +++ b/deps/v8/src/ia32/ic-ia32.cc @@ -538,20 +538,30 @@ void KeyedLoadIC::GenerateGeneric(MacroAssembler* masm) { // Load the key (consisting of map and symbol) from the cache and // check for match. - Label try_second_entry, hit_on_first_entry, load_in_object_property; + Label load_in_object_property; + static const int kEntriesPerBucket = KeyedLookupCache::kEntriesPerBucket; + Label hit_on_nth_entry[kEntriesPerBucket]; ExternalReference cache_keys = ExternalReference::keyed_lookup_cache_keys(masm->isolate()); - __ mov(edi, ecx); - __ shl(edi, kPointerSizeLog2 + 1); - __ cmp(ebx, Operand::StaticArray(edi, times_1, cache_keys)); - __ j(not_equal, &try_second_entry); - __ add(edi, Immediate(kPointerSize)); - __ cmp(eax, Operand::StaticArray(edi, times_1, cache_keys)); - __ j(equal, &hit_on_first_entry); - __ bind(&try_second_entry); + for (int i = 0; i < kEntriesPerBucket - 1; i++) { + Label try_next_entry; + __ mov(edi, ecx); + __ shl(edi, kPointerSizeLog2 + 1); + if (i != 0) { + __ add(edi, Immediate(kPointerSize * i * 2)); + } + __ cmp(ebx, Operand::StaticArray(edi, times_1, cache_keys)); + __ j(not_equal, &try_next_entry); + __ add(edi, Immediate(kPointerSize)); + __ cmp(eax, Operand::StaticArray(edi, times_1, cache_keys)); + __ j(equal, &hit_on_nth_entry[i]); + __ bind(&try_next_entry); + } + __ lea(edi, Operand(ecx, 1)); __ shl(edi, kPointerSizeLog2 + 1); + __ add(edi, Immediate(kPointerSize * (kEntriesPerBucket - 1) * 2)); __ cmp(ebx, Operand::StaticArray(edi, times_1, cache_keys)); __ j(not_equal, &slow); __ add(edi, Immediate(kPointerSize)); @@ -566,22 +576,21 @@ void KeyedLoadIC::GenerateGeneric(MacroAssembler* masm) { ExternalReference cache_field_offsets = ExternalReference::keyed_lookup_cache_field_offsets(masm->isolate()); - // Hit on second entry. - __ add(ecx, Immediate(1)); - __ mov(edi, - Operand::StaticArray(ecx, times_pointer_size, cache_field_offsets)); - __ movzx_b(ecx, FieldOperand(ebx, Map::kInObjectPropertiesOffset)); - __ sub(edi, ecx); - __ j(above_equal, &property_array_property); - __ jmp(&load_in_object_property); - - // Hit on first entry. - __ bind(&hit_on_first_entry); - __ mov(edi, - Operand::StaticArray(ecx, times_pointer_size, cache_field_offsets)); - __ movzx_b(ecx, FieldOperand(ebx, Map::kInObjectPropertiesOffset)); - __ sub(edi, ecx); - __ j(above_equal, &property_array_property); + // Hit on nth entry. + for (int i = kEntriesPerBucket - 1; i >= 0; i--) { + __ bind(&hit_on_nth_entry[i]); + if (i != 0) { + __ add(ecx, Immediate(i)); + } + __ mov(edi, + Operand::StaticArray(ecx, times_pointer_size, cache_field_offsets)); + __ movzx_b(ecx, FieldOperand(ebx, Map::kInObjectPropertiesOffset)); + __ sub(edi, ecx); + __ j(above_equal, &property_array_property); + if (i != 0) { + __ jmp(&load_in_object_property); + } + } // Load in-object property. __ bind(&load_in_object_property); diff --git a/deps/v8/src/ia32/lithium-codegen-ia32.cc b/deps/v8/src/ia32/lithium-codegen-ia32.cc index 976b23448df38c..a594c6628eb7b0 100644 --- a/deps/v8/src/ia32/lithium-codegen-ia32.cc +++ b/deps/v8/src/ia32/lithium-codegen-ia32.cc @@ -394,7 +394,11 @@ void LCodeGen::WriteTranslation(LEnvironment* environment, WriteTranslation(environment->outer(), translation); int closure_id = DefineDeoptimizationLiteral(environment->closure()); - translation->BeginFrame(environment->ast_id(), closure_id, height); + if (environment->is_arguments_adaptor()) { + translation->BeginArgumentsAdaptorFrame(closure_id, translation_size); + } else { + translation->BeginJSFrame(environment->ast_id(), closure_id, height); + } for (int i = 0; i < translation_size; ++i) { LOperand* value = environment->values()->at(i); // spilled_registers_ and spilled_double_registers_ are either @@ -543,10 +547,14 @@ void LCodeGen::RegisterEnvironmentForDeoptimization( // |>------------ translation_size ------------<| int frame_count = 0; + int jsframe_count = 0; for (LEnvironment* e = environment; e != NULL; e = e->outer()) { ++frame_count; + if (!e->is_arguments_adaptor()) { + ++jsframe_count; + } } - Translation translation(&translations_, frame_count); + Translation translation(&translations_, frame_count, jsframe_count); WriteTranslation(environment, &translation); int deoptimization_index = deoptimizations_.length(); int pc_offset = masm()->pc_offset(); diff --git a/deps/v8/src/ia32/lithium-ia32.cc b/deps/v8/src/ia32/lithium-ia32.cc index 8435a3c2a1320b..c81aca8a20e64e 100644 --- a/deps/v8/src/ia32/lithium-ia32.cc +++ b/deps/v8/src/ia32/lithium-ia32.cc @@ -1013,15 +1013,17 @@ LEnvironment* LChunkBuilder::CreateEnvironment( LEnvironment* outer = CreateEnvironment(hydrogen_env->outer(), argument_index_accumulator); int ast_id = hydrogen_env->ast_id(); - ASSERT(ast_id != AstNode::kNoNumber); + ASSERT(ast_id != AstNode::kNoNumber || hydrogen_env->is_arguments_adaptor()); int value_count = hydrogen_env->length(); LEnvironment* result = new(zone()) LEnvironment(hydrogen_env->closure(), + hydrogen_env->is_arguments_adaptor(), ast_id, hydrogen_env->parameter_count(), argument_count_, value_count, outer); + int argument_index = *argument_index_accumulator; for (int i = 0; i < value_count; ++i) { if (hydrogen_env->is_special_index(i)) continue; @@ -1030,13 +1032,17 @@ LEnvironment* LChunkBuilder::CreateEnvironment( if (value->IsArgumentsObject()) { op = NULL; } else if (value->IsPushArgument()) { - op = new(zone()) LArgument((*argument_index_accumulator)++); + op = new(zone()) LArgument(argument_index++); } else { op = UseAny(value); } result->AddValue(op, value->representation()); } + if (!hydrogen_env->is_arguments_adaptor()) { + *argument_index_accumulator = argument_index; + } + return result; } @@ -2002,12 +2008,11 @@ LInstruction* LChunkBuilder::DoLoadKeyedFastDoubleElement( LInstruction* LChunkBuilder::DoLoadKeyedSpecializedArrayElement( HLoadKeyedSpecializedArrayElement* instr) { ElementsKind elements_kind = instr->elements_kind(); - Representation representation(instr->representation()); ASSERT( - (representation.IsInteger32() && + (instr->representation().IsInteger32() && (elements_kind != EXTERNAL_FLOAT_ELEMENTS) && (elements_kind != EXTERNAL_DOUBLE_ELEMENTS)) || - (representation.IsDouble() && + (instr->representation().IsDouble() && ((elements_kind == EXTERNAL_FLOAT_ELEMENTS) || (elements_kind == EXTERNAL_DOUBLE_ELEMENTS)))); ASSERT(instr->key()->representation().IsInteger32()); @@ -2070,13 +2075,12 @@ LInstruction* LChunkBuilder::DoStoreKeyedFastDoubleElement( LInstruction* LChunkBuilder::DoStoreKeyedSpecializedArrayElement( HStoreKeyedSpecializedArrayElement* instr) { - Representation representation(instr->value()->representation()); ElementsKind elements_kind = instr->elements_kind(); ASSERT( - (representation.IsInteger32() && + (instr->value()->representation().IsInteger32() && (elements_kind != EXTERNAL_FLOAT_ELEMENTS) && (elements_kind != EXTERNAL_DOUBLE_ELEMENTS)) || - (representation.IsDouble() && + (instr->value()->representation().IsDouble() && ((elements_kind == EXTERNAL_FLOAT_ELEMENTS) || (elements_kind == EXTERNAL_DOUBLE_ELEMENTS)))); ASSERT(instr->external_pointer()->representation().IsExternal()); @@ -2380,6 +2384,7 @@ LInstruction* LChunkBuilder::DoEnterInlined(HEnterInlined* instr) { HEnvironment* outer = current_block_->last_environment(); HConstant* undefined = graph()->GetConstantUndefined(); HEnvironment* inner = outer->CopyForInlining(instr->closure(), + instr->arguments_count(), instr->function(), undefined, instr->call_kind()); @@ -2390,7 +2395,8 @@ LInstruction* LChunkBuilder::DoEnterInlined(HEnterInlined* instr) { LInstruction* LChunkBuilder::DoLeaveInlined(HLeaveInlined* instr) { - HEnvironment* outer = current_block_->last_environment()->outer(); + HEnvironment* outer = current_block_->last_environment()-> + DiscardInlined(false); current_block_->UpdateEnvironment(outer); return NULL; } diff --git a/deps/v8/src/jsregexp.h b/deps/v8/src/jsregexp.h index 0e0184918b882e..42c76fbd67666a 100644 --- a/deps/v8/src/jsregexp.h +++ b/deps/v8/src/jsregexp.h @@ -1,4 +1,4 @@ -// Copyright 2011 the V8 project authors. All rights reserved. +// Copyright 2012 the V8 project authors. All rights reserved. // Redistribution and use in source and binary forms, with or without // modification, are permitted provided that the following conditions are // met: @@ -35,9 +35,11 @@ namespace v8 { namespace internal { - +class NodeVisitor; +class RegExpCompiler; class RegExpMacroAssembler; - +class RegExpNode; +class RegExpTree; class RegExpImpl { public: diff --git a/deps/v8/src/lithium-allocator.cc b/deps/v8/src/lithium-allocator.cc index 1601dcf14ee597..0e5c992fb82f52 100644 --- a/deps/v8/src/lithium-allocator.cc +++ b/deps/v8/src/lithium-allocator.cc @@ -161,9 +161,8 @@ LiveRange::LiveRange(int id) next_(NULL), current_interval_(NULL), last_processed_use_(NULL), - spill_start_index_(kMaxInt) { - spill_operand_ = new LUnallocated(LUnallocated::IGNORE); -} + spill_operand_(new LOperand()), + spill_start_index_(kMaxInt) { } void LiveRange::set_assigned_register(int reg, RegisterKind register_kind) { @@ -184,14 +183,15 @@ void LiveRange::MakeSpilled() { bool LiveRange::HasAllocatedSpillOperand() const { - return spill_operand_ != NULL && !spill_operand_->IsUnallocated(); + ASSERT(spill_operand_ != NULL); + return !spill_operand_->IsIgnored(); } void LiveRange::SetSpillOperand(LOperand* operand) { ASSERT(!operand->IsUnallocated()); ASSERT(spill_operand_ != NULL); - ASSERT(spill_operand_->IsUnallocated()); + ASSERT(spill_operand_->IsIgnored()); spill_operand_->ConvertTo(operand->kind(), operand->index()); } @@ -1643,7 +1643,7 @@ void LAllocator::RecordUse(HValue* value, LUnallocated* operand) { int LAllocator::max_initial_value_ids() { - return LUnallocated::kMaxVirtualRegisters / 32; + return LUnallocated::kMaxVirtualRegisters / 16; } diff --git a/deps/v8/src/lithium.cc b/deps/v8/src/lithium.cc index 31b16982d1aee1..5beca330670fa0 100644 --- a/deps/v8/src/lithium.cc +++ b/deps/v8/src/lithium.cc @@ -36,6 +36,7 @@ void LOperand::PrintTo(StringStream* stream) { LUnallocated* unalloc = NULL; switch (kind()) { case INVALID: + stream->Add("(0)"); break; case UNALLOCATED: unalloc = LUnallocated::cast(this); @@ -70,9 +71,6 @@ void LOperand::PrintTo(StringStream* stream) { case LUnallocated::ANY: stream->Add("(-)"); break; - case LUnallocated::IGNORE: - stream->Add("(0)"); - break; } break; case CONSTANT_OPERAND: diff --git a/deps/v8/src/lithium.h b/deps/v8/src/lithium.h index 3253520090e834..c0d7d076d9c9b3 100644 --- a/deps/v8/src/lithium.h +++ b/deps/v8/src/lithium.h @@ -59,6 +59,7 @@ class LOperand: public ZoneObject { bool IsDoubleRegister() const { return kind() == DOUBLE_REGISTER; } bool IsArgument() const { return kind() == ARGUMENT; } bool IsUnallocated() const { return kind() == UNALLOCATED; } + bool IsIgnored() const { return kind() == INVALID; } bool Equals(LOperand* other) const { return value_ == other->value_; } int VirtualRegister(); @@ -89,8 +90,7 @@ class LUnallocated: public LOperand { FIXED_SLOT, MUST_HAVE_REGISTER, WRITABLE_REGISTER, - SAME_AS_FIRST_INPUT, - IGNORE + SAME_AS_FIRST_INPUT }; // Lifetime of operand inside the instruction. @@ -121,9 +121,9 @@ class LUnallocated: public LOperand { // The superclass has a KindField. Some policies have a signed fixed // index in the upper bits. - static const int kPolicyWidth = 4; + static const int kPolicyWidth = 3; static const int kLifetimeWidth = 1; - static const int kVirtualRegisterWidth = 17; + static const int kVirtualRegisterWidth = 18; static const int kPolicyShift = kKindFieldWidth; static const int kLifetimeShift = kPolicyShift + kPolicyWidth; @@ -143,12 +143,10 @@ class LUnallocated: public LOperand { kVirtualRegisterWidth> { }; - static const int kMaxVirtualRegisters = 1 << (kVirtualRegisterWidth + 1); + static const int kMaxVirtualRegisters = 1 << kVirtualRegisterWidth; static const int kMaxFixedIndex = 63; static const int kMinFixedIndex = -64; - bool HasIgnorePolicy() const { return policy() == IGNORE; } - bool HasNoPolicy() const { return policy() == NONE; } bool HasAnyPolicy() const { return policy() == ANY; } @@ -234,9 +232,7 @@ class LMoveOperands BASE_EMBEDDED { } bool IsIgnored() const { - return destination_ != NULL && - destination_->IsUnallocated() && - LUnallocated::cast(destination_)->HasIgnorePolicy(); + return destination_ != NULL && destination_->IsIgnored(); } // We clear both operands to indicate move that's been eliminated. @@ -443,12 +439,14 @@ class LPointerMap: public ZoneObject { class LEnvironment: public ZoneObject { public: LEnvironment(Handle closure, + bool is_arguments_adaptor, int ast_id, int parameter_count, int argument_count, int value_count, LEnvironment* outer) : closure_(closure), + is_arguments_adaptor_(is_arguments_adaptor), arguments_stack_height_(argument_count), deoptimization_index_(Safepoint::kNoDeoptimizationIndex), translation_index_(-1), @@ -505,8 +503,11 @@ class LEnvironment: public ZoneObject { void PrintTo(StringStream* stream); + bool is_arguments_adaptor() const { return is_arguments_adaptor_; } + private: Handle closure_; + bool is_arguments_adaptor_; int arguments_stack_height_; int deoptimization_index_; int translation_index_; diff --git a/deps/v8/src/mark-compact-inl.h b/deps/v8/src/mark-compact-inl.h index 64faf82dad7d77..a9107bd277fdbd 100644 --- a/deps/v8/src/mark-compact-inl.h +++ b/deps/v8/src/mark-compact-inl.h @@ -66,6 +66,19 @@ void MarkCompactCollector::MarkObject(HeapObject* obj, MarkBit mark_bit) { } +bool MarkCompactCollector::MarkObjectWithoutPush(HeapObject* object) { + MarkBit mark = Marking::MarkBitFrom(object); + bool old_mark = mark.Get(); + if (!old_mark) SetMark(object, mark); + return old_mark; +} + + +void MarkCompactCollector::MarkObjectAndPush(HeapObject* object) { + if (!MarkObjectWithoutPush(object)) marking_deque_.PushBlack(object); +} + + void MarkCompactCollector::SetMark(HeapObject* obj, MarkBit mark_bit) { ASSERT(!mark_bit.Get()); ASSERT(Marking::MarkBitFrom(obj) == mark_bit); diff --git a/deps/v8/src/mark-compact.cc b/deps/v8/src/mark-compact.cc index 93614aceba2722..3636aa6fb5643e 100644 --- a/deps/v8/src/mark-compact.cc +++ b/deps/v8/src/mark-compact.cc @@ -710,16 +710,17 @@ class CodeFlusher { SharedFunctionInfo* candidate) { Code* code = candidate->code(); return reinterpret_cast( - code->address() + Code::kNextCodeFlushingCandidateOffset); + code->address() + Code::kGCMetadataOffset); } static SharedFunctionInfo* GetNextCandidate(SharedFunctionInfo* candidate) { - return *GetNextCandidateField(candidate); + return reinterpret_cast( + candidate->code()->gc_metadata()); } static void SetNextCandidate(SharedFunctionInfo* candidate, SharedFunctionInfo* next_candidate) { - *GetNextCandidateField(candidate) = next_candidate; + candidate->code()->set_gc_metadata(next_candidate); } Isolate* isolate_; @@ -1672,6 +1673,16 @@ void MarkCompactCollector::MarkMapContents(Map* map) { } +void MarkCompactCollector::MarkAccessorPairSlot(HeapObject* accessors, + int offset) { + Object** slot = HeapObject::RawField(accessors, offset); + HeapObject* accessor = HeapObject::cast(*slot); + if (accessor->IsMap()) return; + RecordSlot(slot, slot, accessor); + MarkObjectAndPush(accessor); +} + + void MarkCompactCollector::MarkDescriptorArray( DescriptorArray* descriptors) { MarkBit descriptors_mark = Marking::MarkBitFrom(descriptors); @@ -1699,27 +1710,37 @@ void MarkCompactCollector::MarkDescriptorArray( PropertyDetails details(Smi::cast(contents->get(i + 1))); Object** slot = contents->data_start() + i; - Object* value = *slot; - if (!value->IsHeapObject()) continue; + if (!(*slot)->IsHeapObject()) continue; + HeapObject* value = HeapObject::cast(*slot); RecordSlot(slot, slot, *slot); - if (details.IsProperty()) { - HeapObject* object = HeapObject::cast(value); - MarkBit mark = Marking::MarkBitFrom(HeapObject::cast(object)); - if (!mark.Get()) { - SetMark(HeapObject::cast(object), mark); - marking_deque_.PushBlack(object); - } - } else if (details.type() == ELEMENTS_TRANSITION && value->IsFixedArray()) { - // For maps with multiple elements transitions, the transition maps are - // stored in a FixedArray. Keep the fixed array alive but not the maps - // that it refers to. - HeapObject* object = HeapObject::cast(value); - MarkBit mark = Marking::MarkBitFrom(HeapObject::cast(object)); - if (!mark.Get()) { - SetMark(HeapObject::cast(object), mark); - } + switch (details.type()) { + case NORMAL: + case FIELD: + case CONSTANT_FUNCTION: + case HANDLER: + case INTERCEPTOR: + MarkObjectAndPush(value); + break; + case CALLBACKS: + if (!value->IsAccessorPair()) { + MarkObjectAndPush(value); + } else if (!MarkObjectWithoutPush(value)) { + MarkAccessorPairSlot(value, AccessorPair::kGetterOffset); + MarkAccessorPairSlot(value, AccessorPair::kSetterOffset); + } + break; + case ELEMENTS_TRANSITION: + // For maps with multiple elements transitions, the transition maps are + // stored in a FixedArray. Keep the fixed array alive but not the maps + // that it refers to. + if (value->IsFixedArray()) MarkObjectWithoutPush(value); + break; + case MAP_TRANSITION: + case CONSTANT_TRANSITION: + case NULL_DESCRIPTOR: + break; } } // The DescriptorArray descriptors contains a pointer to its contents array, @@ -2290,88 +2311,92 @@ void MarkCompactCollector::ClearNonLiveTransitions() { map->unchecked_constructor()->unchecked_shared()->AttachInitialMap(map); } - // Clear dead prototype transitions. - int number_of_transitions = map->NumberOfProtoTransitions(); - FixedArray* prototype_transitions = map->prototype_transitions(); - - int new_number_of_transitions = 0; - const int header = Map::kProtoTransitionHeaderSize; - const int proto_offset = - header + Map::kProtoTransitionPrototypeOffset; - const int map_offset = header + Map::kProtoTransitionMapOffset; - const int step = Map::kProtoTransitionElementsPerEntry; - for (int i = 0; i < number_of_transitions; i++) { - Object* prototype = prototype_transitions->get(proto_offset + i * step); - Object* cached_map = prototype_transitions->get(map_offset + i * step); - if (IsMarked(prototype) && IsMarked(cached_map)) { - int proto_index = proto_offset + new_number_of_transitions * step; - int map_index = map_offset + new_number_of_transitions * step; - if (new_number_of_transitions != i) { - prototype_transitions->set_unchecked( - heap_, - proto_index, - prototype, - UPDATE_WRITE_BARRIER); - prototype_transitions->set_unchecked( - heap_, - map_index, - cached_map, - SKIP_WRITE_BARRIER); - } - Object** slot = - HeapObject::RawField(prototype_transitions, - FixedArray::OffsetOfElementAt(proto_index)); - RecordSlot(slot, slot, prototype); - new_number_of_transitions++; + ClearNonLivePrototypeTransitions(map); + ClearNonLiveMapTransitions(map, map_mark); + } +} + + +void MarkCompactCollector::ClearNonLivePrototypeTransitions(Map* map) { + int number_of_transitions = map->NumberOfProtoTransitions(); + FixedArray* prototype_transitions = map->prototype_transitions(); + + int new_number_of_transitions = 0; + const int header = Map::kProtoTransitionHeaderSize; + const int proto_offset = header + Map::kProtoTransitionPrototypeOffset; + const int map_offset = header + Map::kProtoTransitionMapOffset; + const int step = Map::kProtoTransitionElementsPerEntry; + for (int i = 0; i < number_of_transitions; i++) { + Object* prototype = prototype_transitions->get(proto_offset + i * step); + Object* cached_map = prototype_transitions->get(map_offset + i * step); + if (IsMarked(prototype) && IsMarked(cached_map)) { + int proto_index = proto_offset + new_number_of_transitions * step; + int map_index = map_offset + new_number_of_transitions * step; + if (new_number_of_transitions != i) { + prototype_transitions->set_unchecked( + heap_, + proto_index, + prototype, + UPDATE_WRITE_BARRIER); + prototype_transitions->set_unchecked( + heap_, + map_index, + cached_map, + SKIP_WRITE_BARRIER); } + Object** slot = + HeapObject::RawField(prototype_transitions, + FixedArray::OffsetOfElementAt(proto_index)); + RecordSlot(slot, slot, prototype); + new_number_of_transitions++; } + } - if (new_number_of_transitions != number_of_transitions) { - map->SetNumberOfProtoTransitions(new_number_of_transitions); - } + if (new_number_of_transitions != number_of_transitions) { + map->SetNumberOfProtoTransitions(new_number_of_transitions); + } - // Fill slots that became free with undefined value. - for (int i = new_number_of_transitions * step; - i < number_of_transitions * step; - i++) { - prototype_transitions->set_undefined(heap_, header + i); - } + // Fill slots that became free with undefined value. + for (int i = new_number_of_transitions * step; + i < number_of_transitions * step; + i++) { + prototype_transitions->set_undefined(heap_, header + i); + } +} - // Follow the chain of back pointers to find the prototype. - Map* current = map; - while (current->IsMap()) { - current = reinterpret_cast(current->prototype()); - ASSERT(current->IsHeapObject()); - } - Object* real_prototype = current; - // Follow back pointers, setting them to prototype, - // clearing map transitions when necessary. - current = map; - bool on_dead_path = !map_mark.Get(); - Object* next; - while (current->IsMap()) { - next = current->prototype(); - // There should never be a dead map above a live map. - MarkBit current_mark = Marking::MarkBitFrom(current); - bool is_alive = current_mark.Get(); - ASSERT(on_dead_path || is_alive); +void MarkCompactCollector::ClearNonLiveMapTransitions(Map* map, + MarkBit map_mark) { + // Follow the chain of back pointers to find the prototype. + Map* real_prototype = map; + while (real_prototype->IsMap()) { + real_prototype = reinterpret_cast(real_prototype->prototype()); + ASSERT(real_prototype->IsHeapObject()); + } - // A live map above a dead map indicates a dead transition. - // This test will always be false on the first iteration. - if (on_dead_path && is_alive) { - on_dead_path = false; - current->ClearNonLiveTransitions(heap(), real_prototype); - } - *HeapObject::RawField(current, Map::kPrototypeOffset) = - real_prototype; + // Follow back pointers, setting them to prototype, clearing map transitions + // when necessary. + Map* current = map; + bool current_is_alive = map_mark.Get(); + bool on_dead_path = !current_is_alive; + while (current->IsMap()) { + Object* next = current->prototype(); + // There should never be a dead map above a live map. + ASSERT(on_dead_path || current_is_alive); - if (is_alive) { - Object** slot = HeapObject::RawField(current, Map::kPrototypeOffset); - RecordSlot(slot, slot, real_prototype); - } - current = reinterpret_cast(next); + // A live map above a dead map indicates a dead transition. This test will + // always be false on the first iteration. + if (on_dead_path && current_is_alive) { + on_dead_path = false; + current->ClearNonLiveTransitions(heap(), real_prototype); } + + Object** slot = HeapObject::RawField(current, Map::kPrototypeOffset); + *slot = real_prototype; + if (current_is_alive) RecordSlot(slot, slot, real_prototype); + + current = reinterpret_cast(next); + current_is_alive = Marking::MarkBitFrom(current).Get(); } } diff --git a/deps/v8/src/mark-compact.h b/deps/v8/src/mark-compact.h index 85a4a3b7060cbf..a911b49d2ee25f 100644 --- a/deps/v8/src/mark-compact.h +++ b/deps/v8/src/mark-compact.h @@ -628,6 +628,9 @@ class MarkCompactCollector { // This is for non-incremental marking. INLINE(void MarkObject(HeapObject* obj, MarkBit mark_bit)); + INLINE(bool MarkObjectWithoutPush(HeapObject* object)); + INLINE(void MarkObjectAndPush(HeapObject* value)); + // Marks the object black. This is for non-incremental marking. INLINE(void SetMark(HeapObject* obj, MarkBit mark_bit)); @@ -645,6 +648,7 @@ class MarkCompactCollector { // Mark a Map and its DescriptorArray together, skipping transitions. void MarkMapContents(Map* map); + void MarkAccessorPairSlot(HeapObject* accessors, int offset); void MarkDescriptorArray(DescriptorArray* descriptors); // Mark the heap roots and all objects reachable from them. @@ -692,6 +696,8 @@ class MarkCompactCollector { // Map transitions from a live map to a dead map must be killed. // We replace them with a null descriptor, with the same key. void ClearNonLiveTransitions(); + void ClearNonLivePrototypeTransitions(Map* map); + void ClearNonLiveMapTransitions(Map* map, MarkBit map_mark); // Marking detaches initial maps from SharedFunctionInfo objects // to make this reference weak. We need to reattach initial maps diff --git a/deps/v8/src/mips/assembler-mips-inl.h b/deps/v8/src/mips/assembler-mips-inl.h index cc215091e37b7c..f9e75face8a69c 100644 --- a/deps/v8/src/mips/assembler-mips-inl.h +++ b/deps/v8/src/mips/assembler-mips-inl.h @@ -37,6 +37,7 @@ #define V8_MIPS_ASSEMBLER_MIPS_INL_H_ #include "mips/assembler-mips.h" + #include "cpu.h" #include "debug.h" diff --git a/deps/v8/src/mips/ic-mips.cc b/deps/v8/src/mips/ic-mips.cc index d974f08a44e37a..3489936657a573 100644 --- a/deps/v8/src/mips/ic-mips.cc +++ b/deps/v8/src/mips/ic-mips.cc @@ -1038,21 +1038,27 @@ void KeyedLoadIC::GenerateGeneric(MacroAssembler* masm) { // Load the key (consisting of map and symbol) from the cache and // check for match. - Label try_second_entry, hit_on_first_entry, load_in_object_property; + Label load_in_object_property; + static const int kEntriesPerBucket = KeyedLookupCache::kEntriesPerBucket; + Label hit_on_nth_entry[kEntriesPerBucket]; ExternalReference cache_keys = ExternalReference::keyed_lookup_cache_keys(isolate); __ li(t0, Operand(cache_keys)); __ sll(at, a3, kPointerSizeLog2 + 1); __ addu(t0, t0, at); - __ lw(t1, MemOperand(t0)); - __ Branch(&try_second_entry, ne, a2, Operand(t1)); - __ lw(t1, MemOperand(t0, kPointerSize)); - __ Branch(&hit_on_first_entry, eq, a0, Operand(t1)); - __ bind(&try_second_entry); - __ lw(t1, MemOperand(t0, kPointerSize * 2)); + for (int i = 0; i < kEntriesPerBucket - 1; i++) { + Label try_next_entry; + __ lw(t1, MemOperand(t0, kPointerSize * i * 2)); + __ Branch(&try_next_entry, ne, a2, Operand(t1)); + __ lw(t1, MemOperand(t0, kPointerSize * (i * 2 + 1))); + __ Branch(&hit_on_nth_entry[i], eq, a0, Operand(t1)); + __ bind(&try_next_entry); + } + + __ lw(t1, MemOperand(t0, kPointerSize * (kEntriesPerBucket - 1) * 2)); __ Branch(&slow, ne, a2, Operand(t1)); - __ lw(t1, MemOperand(t0, kPointerSize * 3)); + __ lw(t1, MemOperand(t0, kPointerSize * ((kEntriesPerBucket - 1) * 2 + 1))); __ Branch(&slow, ne, a0, Operand(t1)); // Get field offset. @@ -1063,25 +1069,20 @@ void KeyedLoadIC::GenerateGeneric(MacroAssembler* masm) { ExternalReference cache_field_offsets = ExternalReference::keyed_lookup_cache_field_offsets(isolate); - // Hit on second entry. - __ li(t0, Operand(cache_field_offsets)); - __ sll(at, a3, kPointerSizeLog2); - __ addu(at, t0, at); - __ lw(t1, MemOperand(at, kPointerSize)); - __ lbu(t2, FieldMemOperand(a2, Map::kInObjectPropertiesOffset)); - __ Subu(t1, t1, t2); - __ Branch(&property_array_property, ge, t1, Operand(zero_reg)); - __ Branch(&load_in_object_property); - - // Hit on first entry. - __ bind(&hit_on_first_entry); - __ li(t0, Operand(cache_field_offsets)); - __ sll(at, a3, kPointerSizeLog2); - __ addu(at, t0, at); - __ lw(t1, MemOperand(at)); - __ lbu(t2, FieldMemOperand(a2, Map::kInObjectPropertiesOffset)); - __ Subu(t1, t1, t2); - __ Branch(&property_array_property, ge, t1, Operand(zero_reg)); + // Hit on nth entry. + for (int i = kEntriesPerBucket - 1; i >= 0; i--) { + __ bind(&hit_on_nth_entry[i]); + __ li(t0, Operand(cache_field_offsets)); + __ sll(at, a3, kPointerSizeLog2); + __ addu(at, t0, at); + __ lw(t1, MemOperand(at, kPointerSize * i)); + __ lbu(t2, FieldMemOperand(a2, Map::kInObjectPropertiesOffset)); + __ Subu(t1, t1, t2); + __ Branch(&property_array_property, ge, t1, Operand(zero_reg)); + if (i != 0) { + __ Branch(&load_in_object_property); + } + } // Load in-object property. __ bind(&load_in_object_property); diff --git a/deps/v8/src/objects-inl.h b/deps/v8/src/objects-inl.h index 7308fb2bc3ca47..9457b04c519f83 100644 --- a/deps/v8/src/objects-inl.h +++ b/deps/v8/src/objects-inl.h @@ -4042,8 +4042,7 @@ INT_ACCESSORS(Code, instruction_size, kInstructionSizeOffset) ACCESSORS(Code, relocation_info, ByteArray, kRelocationInfoOffset) ACCESSORS(Code, handler_table, FixedArray, kHandlerTableOffset) ACCESSORS(Code, deoptimization_data, FixedArray, kDeoptimizationDataOffset) -ACCESSORS(Code, next_code_flushing_candidate, - Object, kNextCodeFlushingCandidateOffset) +ACCESSORS(Code, gc_metadata, Object, kGCMetadataOffset) byte* Code::instruction_start() { diff --git a/deps/v8/src/objects.cc b/deps/v8/src/objects.cc index 426327c9caa686..1686a4d5a1baae 100644 --- a/deps/v8/src/objects.cc +++ b/deps/v8/src/objects.cc @@ -4954,15 +4954,43 @@ class IntrusiveMapTransitionIterator { Map* Next() { ASSERT(IsIterating()); FixedArray* contents = ContentArray(); + // Attention, tricky index manipulation ahead: Every entry in the contents + // array consists of a value/details pair, so the index is typically even. + // An exception is made for CALLBACKS entries: An even index means we look + // at its getter, and an odd index means we look at its setter. int index = Smi::cast(*ContentHeader())->value(); while (index < contents->length()) { - int next_index = index + 2; - PropertyDetails details(Smi::cast(contents->get(index + 1))); - if (details.IsTransition()) { - *ContentHeader() = Smi::FromInt(next_index); - return static_cast(contents->get(index)); + PropertyDetails details(Smi::cast(contents->get(index | 1))); + switch (details.type()) { + case MAP_TRANSITION: + case CONSTANT_TRANSITION: + case ELEMENTS_TRANSITION: + // We definitely have a map transition. + *ContentHeader() = Smi::FromInt(index + 2); + return static_cast(contents->get(index)); + case CALLBACKS: { + // We might have a map transition in a getter or in a setter. + AccessorPair* accessors = + static_cast(contents->get(index & ~1)); + Object* accessor = + ((index & 1) == 0) ? accessors->getter() : accessors->setter(); + index++; + if (accessor->IsMap()) { + *ContentHeader() = Smi::FromInt(index); + return static_cast(accessor); + } + break; + } + case NORMAL: + case FIELD: + case CONSTANT_FUNCTION: + case HANDLER: + case INTERCEPTOR: + case NULL_DESCRIPTOR: + // We definitely have no map transition. + index += 2; + break; } - index = next_index; } *ContentHeader() = descriptor_array_->GetHeap()->fixed_array_map(); return NULL; @@ -8113,8 +8141,11 @@ void DeoptimizationInputData::DeoptimizationInputDataPrint(FILE* out) { static_cast(iterator.Next()); ASSERT(Translation::BEGIN == opcode); int frame_count = iterator.Next(); - PrintF(out, " %s {count=%d}\n", Translation::StringFor(opcode), - frame_count); + int jsframe_count = iterator.Next(); + PrintF(out, " %s {frame count=%d, js frame count=%d}\n", + Translation::StringFor(opcode), + frame_count, + jsframe_count); while (iterator.HasNext() && Translation::BEGIN != @@ -8126,7 +8157,7 @@ void DeoptimizationInputData::DeoptimizationInputDataPrint(FILE* out) { UNREACHABLE(); break; - case Translation::FRAME: { + case Translation::JS_FRAME: { int ast_id = iterator.Next(); int function_id = iterator.Next(); JSFunction* function = @@ -8138,6 +8169,12 @@ void DeoptimizationInputData::DeoptimizationInputDataPrint(FILE* out) { break; } + case Translation::ARGUMENTS_ADAPTOR_FRAME: { + unsigned height = iterator.Next(); + PrintF(out, "{arguments adaptor, height=%d}", height); + break; + } + case Translation::DUPLICATE: break; diff --git a/deps/v8/src/objects.h b/deps/v8/src/objects.h index f2e35a6b7c422c..b774a63279eb0b 100644 --- a/deps/v8/src/objects.h +++ b/deps/v8/src/objects.h @@ -4050,11 +4050,10 @@ class Code: public HeapObject { // [deoptimization_data]: Array containing data for deopt. DECL_ACCESSORS(deoptimization_data, FixedArray) - // [code_flushing_candidate]: Field only used during garbage - // collection to hold code flushing candidates. The contents of this + // [gc_metadata]: Field used to hold GC related metadata. The contents of this // field does not have to be traced during garbage collection since // it is only used by the garbage collector itself. - DECL_ACCESSORS(next_code_flushing_candidate, Object) + DECL_ACCESSORS(gc_metadata, Object) // Unchecked accessors to be used during GC. inline ByteArray* unchecked_relocation_info(); @@ -4278,10 +4277,8 @@ class Code: public HeapObject { static const int kHandlerTableOffset = kRelocationInfoOffset + kPointerSize; static const int kDeoptimizationDataOffset = kHandlerTableOffset + kPointerSize; - static const int kNextCodeFlushingCandidateOffset = - kDeoptimizationDataOffset + kPointerSize; - static const int kFlagsOffset = - kNextCodeFlushingCandidateOffset + kPointerSize; + static const int kGCMetadataOffset = kDeoptimizationDataOffset + kPointerSize; + static const int kFlagsOffset = kGCMetadataOffset + kPointerSize; static const int kKindSpecificFlagsOffset = kFlagsOffset + kIntSize; static const int kKindSpecificFlagsSize = 2 * kIntSize; diff --git a/deps/v8/src/platform-freebsd.cc b/deps/v8/src/platform-freebsd.cc index 6d04fb1aaa95ff..c6dedd4ad28721 100644 --- a/deps/v8/src/platform-freebsd.cc +++ b/deps/v8/src/platform-freebsd.cc @@ -464,15 +464,8 @@ class Thread::PlatformData : public Malloced { Thread::Thread(const Options& options) : data_(new PlatformData), - stack_size_(options.stack_size) { - set_name(options.name); -} - - -Thread::Thread(const char* name) - : data_(new PlatformData), - stack_size_(0) { - set_name(name); + stack_size_(options.stack_size()) { + set_name(options.name()); } @@ -717,8 +710,10 @@ class SignalSender : public Thread { FULL_INTERVAL }; + static const int kSignalSenderStackSize = 32 * KB; + explicit SignalSender(int interval) - : Thread("SignalSender"), + : Thread(Thread::Options("SignalSender", kSignalSenderStackSize)), interval_(interval) {} static void AddActiveSampler(Sampler* sampler) { diff --git a/deps/v8/src/platform-linux.cc b/deps/v8/src/platform-linux.cc index 30b60866165644..6f64f2d31c4873 100644 --- a/deps/v8/src/platform-linux.cc +++ b/deps/v8/src/platform-linux.cc @@ -720,15 +720,8 @@ class Thread::PlatformData : public Malloced { Thread::Thread(const Options& options) : data_(new PlatformData()), - stack_size_(options.stack_size) { - set_name(options.name); -} - - -Thread::Thread(const char* name) - : data_(new PlatformData()), - stack_size_(0) { - set_name(name); + stack_size_(options.stack_size()) { + set_name(options.name()); } @@ -1035,8 +1028,10 @@ class SignalSender : public Thread { FULL_INTERVAL }; + static const int kSignalSenderStackSize = 32 * KB; + explicit SignalSender(int interval) - : Thread("SignalSender"), + : Thread(Thread::Options("SignalSender", kSignalSenderStackSize)), vm_tgid_(getpid()), interval_(interval) {} diff --git a/deps/v8/src/platform-macos.cc b/deps/v8/src/platform-macos.cc index 9f8fe1209fffa9..f061f67a403eaa 100644 --- a/deps/v8/src/platform-macos.cc +++ b/deps/v8/src/platform-macos.cc @@ -473,17 +473,11 @@ class Thread::PlatformData : public Malloced { pthread_t thread_; // Thread handle for pthread. }; -Thread::Thread(const Options& options) - : data_(new PlatformData), - stack_size_(options.stack_size) { - set_name(options.name); -} - -Thread::Thread(const char* name) +Thread::Thread(const Options& options) : data_(new PlatformData), - stack_size_(0) { - set_name(name); + stack_size_(options.stack_size()) { + set_name(options.name()); } @@ -736,10 +730,13 @@ class Sampler::PlatformData : public Malloced { thread_act_t profiled_thread_; }; + class SamplerThread : public Thread { public: + static const int kSamplerThreadStackSize = 32 * KB; + explicit SamplerThread(int interval) - : Thread("SamplerThread"), + : Thread(Thread::Options("SamplerThread", kSamplerThreadStackSize)), interval_(interval) {} static void AddActiveSampler(Sampler* sampler) { diff --git a/deps/v8/src/platform-openbsd.cc b/deps/v8/src/platform-openbsd.cc index fda5fb45895be9..96dece014662f2 100644 --- a/deps/v8/src/platform-openbsd.cc +++ b/deps/v8/src/platform-openbsd.cc @@ -512,15 +512,8 @@ class Thread::PlatformData : public Malloced { Thread::Thread(const Options& options) : data_(new PlatformData()), - stack_size_(options.stack_size) { - set_name(options.name); -} - - -Thread::Thread(const char* name) - : data_(new PlatformData()), - stack_size_(0) { - set_name(name); + stack_size_(options.stack_size()) { + set_name(options.name()); } @@ -789,8 +782,10 @@ class SignalSender : public Thread { FULL_INTERVAL }; + static const int kSignalSenderStackSize = 32 * KB; + explicit SignalSender(int interval) - : Thread("SignalSender"), + : Thread(Thread::Options("SignalSender", kSignalSenderStackSize)), vm_tgid_(getpid()), interval_(interval) {} diff --git a/deps/v8/src/platform-solaris.cc b/deps/v8/src/platform-solaris.cc index d46da9652fb609..85a420350b11a4 100644 --- a/deps/v8/src/platform-solaris.cc +++ b/deps/v8/src/platform-solaris.cc @@ -1,4 +1,4 @@ -// Copyright 2011 the V8 project authors. All rights reserved. +// Copyright 2012 the V8 project authors. All rights reserved. // Redistribution and use in source and binary forms, with or without // modification, are permitted provided that the following conditions are // met: @@ -53,8 +53,8 @@ #include "v8.h" #include "platform.h" -#include "vm-state-inl.h" #include "v8threads.h" +#include "vm-state-inl.h" // It seems there is a bug in some Solaris distributions (experienced in @@ -84,33 +84,6 @@ namespace internal { static const pthread_t kNoThread = (pthread_t) 0; -static void* GetRandomMmapAddr() { - Isolate* isolate = Isolate::UncheckedCurrent(); - // Note that the current isolate isn't set up in a call path via - // CpuFeatures::Probe. We don't care about randomization in this case because - // the code page is immediately freed. - if (isolate != NULL) { -#ifdef V8_TARGET_ARCH_X64 - uint64_t rnd1 = V8::RandomPrivate(isolate); - uint64_t rnd2 = V8::RandomPrivate(isolate); - uint64_t raw_addr = (rnd1 << 32) ^ rnd2; - // Currently available CPUs have 48 bits of virtual addressing. Truncate - // the hint address to 46 bits to give the kernel a fighting chance of - // fulfilling our placement request. - raw_addr &= V8_UINT64_C(0x3ffffffff000); -#else - uint32_t raw_addr = V8::RandomPrivate(isolate); - // The range 0x20000000 - 0x60000000 is relatively unpopulated across a - // variety of ASLR modes (PAE kernel, NX compat mode, etc). - raw_addr &= 0x3ffff000; - raw_addr += 0x20000000; -#endif - return reinterpret_cast(raw_addr); - } - return NULL; -} - - double ceiling(double x) { return ceil(x); } @@ -167,7 +140,7 @@ double OS::LocalTimeOffset() { // We keep the lowest and highest addresses mapped as a quick way of // determining that pointers are outside the heap (used mostly in assertions -// and verification). The estimate is conservative, ie, not all addresses in +// and verification). The estimate is conservative, i.e., not all addresses in // 'allocated' space are actually allocated to our heap. The range is // [lowest, highest), inclusive on the low and and exclusive on the high end. static void* lowest_ever_allocated = reinterpret_cast(-1); @@ -363,7 +336,7 @@ VirtualMemory::VirtualMemory(size_t size, size_t alignment) ASSERT(IsAligned(alignment, static_cast(OS::AllocateAlignment()))); size_t request_size = RoundUp(size + alignment, static_cast(OS::AllocateAlignment())); - void* reservation = mmap(GetRandomMmapAddr(), + void* reservation = mmap(OS::GetRandomMmapAddr(), request_size, PROT_NONE, MAP_PRIVATE | MAP_ANONYMOUS | MAP_NORESERVE, @@ -429,7 +402,7 @@ bool VirtualMemory::Uncommit(void* address, size_t size) { void* VirtualMemory::ReserveRegion(size_t size) { - void* result = mmap(GetRandomMmapAddr(), + void* result = mmap(OS::GetRandomMmapAddr(), size, PROT_NONE, MAP_PRIVATE | MAP_ANONYMOUS | MAP_NORESERVE, @@ -480,17 +453,11 @@ class Thread::PlatformData : public Malloced { pthread_t thread_; // Thread handle for pthread. }; -Thread::Thread(const Options& options) - : data_(new PlatformData()), - stack_size_(options.stack_size) { - set_name(options.name); -} - -Thread::Thread(const char* name) +Thread::Thread(const Options& options) : data_(new PlatformData()), - stack_size_(0) { - set_name(name); + stack_size_(options.stack_size()) { + set_name(options.name()); } @@ -737,8 +704,10 @@ class SignalSender : public Thread { FULL_INTERVAL }; + static const int kSignalSenderStackSize = 32 * KB; + explicit SignalSender(int interval) - : Thread("SignalSender"), + : Thread(Thread::Options("SignalSender", kSignalSenderStackSize)), interval_(interval) {} static void InstallSignalHandler() { @@ -870,6 +839,7 @@ class SignalSender : public Thread { static bool signal_handler_installed_; static struct sigaction old_signal_handler_; + private: DISALLOW_COPY_AND_ASSIGN(SignalSender); }; diff --git a/deps/v8/src/platform-win32.cc b/deps/v8/src/platform-win32.cc index ffda6606a890f0..52bc3f569ccc55 100644 --- a/deps/v8/src/platform-win32.cc +++ b/deps/v8/src/platform-win32.cc @@ -1526,16 +1526,9 @@ class Thread::PlatformData : public Malloced { // handle until it is started. Thread::Thread(const Options& options) - : stack_size_(options.stack_size) { + : stack_size_(options.stack_size()) { data_ = new PlatformData(kNoThread); - set_name(options.name); -} - - -Thread::Thread(const char* name) - : stack_size_(0) { - data_ = new PlatformData(kNoThread); - set_name(name); + set_name(options.name()); } @@ -1901,8 +1894,10 @@ class Sampler::PlatformData : public Malloced { class SamplerThread : public Thread { public: + static const int kSamplerThreadStackSize = 32 * KB; + explicit SamplerThread(int interval) - : Thread("SamplerThread"), + : Thread(Thread::Options("SamplerThread", kSamplerThreadStackSize)), interval_(interval) {} static void AddActiveSampler(Sampler* sampler) { diff --git a/deps/v8/src/platform.h b/deps/v8/src/platform.h index fc12df2d7cf962..a0186d580fe32c 100644 --- a/deps/v8/src/platform.h +++ b/deps/v8/src/platform.h @@ -412,16 +412,22 @@ class Thread { LOCAL_STORAGE_KEY_MAX_VALUE = kMaxInt }; - struct Options { - Options() : name("v8:"), stack_size(0) {} + class Options { + public: + Options() : name_("v8:"), stack_size_(0) {} + Options(const char* name, int stack_size = 0) + : name_(name), stack_size_(stack_size) {} + + const char* name() const { return name_; } + int stack_size() const { return stack_size_; } - const char* name; - int stack_size; + private: + const char* name_; + int stack_size_; }; // Create new thread. explicit Thread(const Options& options); - explicit Thread(const char* name); virtual ~Thread(); // Start new thread by calling the Run() method in the new thread. diff --git a/deps/v8/src/runtime.cc b/deps/v8/src/runtime.cc index 2bac30473beb07..f95ecdfc6f2f00 100644 --- a/deps/v8/src/runtime.cc +++ b/deps/v8/src/runtime.cc @@ -8126,13 +8126,15 @@ static SmartArrayPointer > GetCallerArguments( List functions(2); frame->GetFunctions(&functions); if (functions.length() > 1) { - int inlined_frame_index = functions.length() - 1; - JSFunction* inlined_function = functions[inlined_frame_index]; - int args_count = inlined_function->shared()->formal_parameter_count(); - ScopedVector args_slots(args_count); - SlotRef::ComputeSlotMappingForArguments(frame, - inlined_frame_index, - &args_slots); + int inlined_jsframe_index = functions.length() - 1; + JSFunction* inlined_function = functions[inlined_jsframe_index]; + Vector args_slots = + SlotRef::ComputeSlotMappingForArguments( + frame, + inlined_jsframe_index, + inlined_function->shared()->formal_parameter_count()); + + int args_count = args_slots.length(); *total_argc = prefix_argc + args_count; SmartArrayPointer > param_data( @@ -8141,6 +8143,9 @@ static SmartArrayPointer > GetCallerArguments( Handle val = args_slots[i].GetValue(); param_data[prefix_argc + i] = val; } + + args_slots.Dispose(); + return param_data; } else { it.AdvanceToArgumentsFrame(); @@ -8486,14 +8491,14 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_NotifyDeoptimized) { static_cast(args.smi_at(0)); Deoptimizer* deoptimizer = Deoptimizer::Grab(isolate); ASSERT(isolate->heap()->IsAllocationAllowed()); - int frames = deoptimizer->output_count(); + int jsframes = deoptimizer->jsframe_count(); deoptimizer->MaterializeHeapNumbers(); delete deoptimizer; JavaScriptFrameIterator it(isolate); JavaScriptFrame* frame = NULL; - for (int i = 0; i < frames - 1; i++) it.Advance(); + for (int i = 0; i < jsframes - 1; i++) it.Advance(); frame = it.frame(); RUNTIME_ASSERT(frame->function()->IsJSFunction()); @@ -10703,13 +10708,13 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_GetFrameCount) { class FrameInspector { public: FrameInspector(JavaScriptFrame* frame, - int inlined_frame_index, + int inlined_jsframe_index, Isolate* isolate) : frame_(frame), deoptimized_frame_(NULL), isolate_(isolate) { // Calculate the deoptimized frame. if (frame->is_optimized()) { deoptimized_frame_ = Deoptimizer::DebuggerInspectableFrame( - frame, inlined_frame_index, isolate); + frame, inlined_jsframe_index, isolate); } has_adapted_arguments_ = frame_->has_adapted_arguments(); is_optimized_ = frame_->is_optimized(); @@ -10825,8 +10830,6 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_GetFrameDetails) { return heap->undefined_value(); } - int inlined_frame_index = 0; // Inlined frame index in optimized frame. - int count = 0; JavaScriptFrameIterator it(isolate, id); for (; !it.done(); it.Advance()) { @@ -10835,11 +10838,14 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_GetFrameDetails) { } if (it.done()) return heap->undefined_value(); - if (it.frame()->is_optimized()) { - inlined_frame_index = + bool is_optimized = it.frame()->is_optimized(); + + int inlined_jsframe_index = 0; // Inlined frame index in optimized frame. + if (is_optimized) { + inlined_jsframe_index = it.frame()->GetInlineCount() - (index - count) - 1; } - FrameInspector frame_inspector(it.frame(), inlined_frame_index, isolate); + FrameInspector frame_inspector(it.frame(), inlined_jsframe_index, isolate); // Traverse the saved contexts chain to find the active context for the // selected frame. @@ -10853,12 +10859,11 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_GetFrameDetails) { it.frame()->LookupCode()->SourcePosition(it.frame()->pc()); // Check for constructor frame. Inlined frames cannot be construct calls. - bool inlined_frame = - it.frame()->is_optimized() && inlined_frame_index != 0; + bool inlined_frame = is_optimized && inlined_jsframe_index != 0; bool constructor = !inlined_frame && it.frame()->IsConstructor(); // Get scope info and read from it for local variable information. - Handle function(JSFunction::cast(it.frame()->function())); + Handle function(JSFunction::cast(frame_inspector.GetFunction())); Handle shared(function->shared()); Handle scope_info(shared->scope_info()); ASSERT(*scope_info != ScopeInfo::Empty()); @@ -10895,7 +10900,7 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_GetFrameDetails) { // Check whether this frame is positioned at return. If not top // frame or if the frame is optimized it cannot be at a return. bool at_return = false; - if (!it.frame()->is_optimized() && index == 0) { + if (!is_optimized && index == 0) { at_return = isolate->debug()->IsBreakAtReturn(it.frame()); } @@ -10935,7 +10940,7 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_GetFrameDetails) { // the provided parameters whereas the function frame always have the number // of arguments matching the functions parameters. The rest of the // information (except for what is collected above) is the same. - if (it.frame()->has_adapted_arguments()) { + if ((inlined_jsframe_index == 0) && it.frame()->has_adapted_arguments()) { it.AdvanceToArgumentsFrame(); frame_inspector.SetArgumentsFrame(it.frame()); } @@ -10946,11 +10951,6 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_GetFrameDetails) { if (argument_count < frame_inspector.GetParametersCount()) { argument_count = frame_inspector.GetParametersCount(); } -#ifdef DEBUG - if (it.frame()->is_optimized()) { - ASSERT_EQ(argument_count, frame_inspector.GetParametersCount()); - } -#endif // Calculate the size of the result. int details_size = kFrameDetailsFirstDynamicIndex + @@ -10992,9 +10992,9 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_GetFrameDetails) { if (*save->context() == *isolate->debug()->debug_context()) { flags |= 1 << 0; } - if (it.frame()->is_optimized()) { + if (is_optimized) { flags |= 1 << 1; - flags |= inlined_frame_index << 2; + flags |= inlined_jsframe_index << 2; } details->set(kFrameDetailsFlagsIndex, Smi::FromInt(flags)); @@ -11011,7 +11011,7 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_GetFrameDetails) { } // Parameter value. - if (i < it.frame()->ComputeParametersCount()) { + if (i < frame_inspector.GetParametersCount()) { // Get the value from the stack. details->set(details_index++, frame_inspector.GetParameter(i)); } else { @@ -11084,14 +11084,13 @@ static bool CopyContextLocalsToScopeObject( // Create a plain JSObject which materializes the local scope for the specified // frame. -static Handle MaterializeLocalScope( +static Handle MaterializeLocalScopeWithFrameInspector( Isolate* isolate, JavaScriptFrame* frame, - int inlined_frame_index) { - Handle function(JSFunction::cast(frame->function())); + FrameInspector* frame_inspector) { + Handle function(JSFunction::cast(frame_inspector->GetFunction())); Handle shared(function->shared()); Handle scope_info(shared->scope_info()); - FrameInspector frame_inspector(frame, inlined_frame_index, isolate); // Allocate and initialize a JSObject with all the arguments, stack locals // heap locals and extension properties of the debugged function. @@ -11100,11 +11099,15 @@ static Handle MaterializeLocalScope( // First fill all parameters. for (int i = 0; i < scope_info->ParameterCount(); ++i) { + Handle value( + i < frame_inspector->GetParametersCount() ? + frame_inspector->GetParameter(i) : isolate->heap()->undefined_value()); + RETURN_IF_EMPTY_HANDLE_VALUE( isolate, SetProperty(local_scope, Handle(scope_info->ParameterName(i)), - Handle(frame_inspector.GetParameter(i)), + value, NONE, kNonStrictMode), Handle()); @@ -11116,7 +11119,7 @@ static Handle MaterializeLocalScope( isolate, SetProperty(local_scope, Handle(scope_info->StackLocalName(i)), - Handle(frame_inspector.GetExpression(i)), + Handle(frame_inspector->GetExpression(i)), NONE, kNonStrictMode), Handle()); @@ -11163,6 +11166,17 @@ static Handle MaterializeLocalScope( } +static Handle MaterializeLocalScope( + Isolate* isolate, + JavaScriptFrame* frame, + int inlined_jsframe_index) { + FrameInspector frame_inspector(frame, inlined_jsframe_index, isolate); + return MaterializeLocalScopeWithFrameInspector(isolate, + frame, + &frame_inspector); +} + + // Create a plain JSObject which materializes the closure content for the // context. static Handle MaterializeClosure(Isolate* isolate, @@ -11268,10 +11282,10 @@ class ScopeIterator { ScopeIterator(Isolate* isolate, JavaScriptFrame* frame, - int inlined_frame_index) + int inlined_jsframe_index) : isolate_(isolate), frame_(frame), - inlined_frame_index_(inlined_frame_index), + inlined_jsframe_index_(inlined_jsframe_index), function_(JSFunction::cast(frame->function())), context_(Context::cast(frame->context())), nested_scope_chain_(4) { @@ -11428,7 +11442,7 @@ class ScopeIterator { case ScopeIterator::ScopeTypeLocal: // Materialize the content of the local scope into a JSObject. ASSERT(nested_scope_chain_.length() == 1); - return MaterializeLocalScope(isolate_, frame_, inlined_frame_index_); + return MaterializeLocalScope(isolate_, frame_, inlined_jsframe_index_); case ScopeIterator::ScopeTypeWith: // Return the with object. return Handle(JSObject::cast(CurrentContext()->extension())); @@ -11524,7 +11538,7 @@ class ScopeIterator { private: Isolate* isolate_; JavaScriptFrame* frame_; - int inlined_frame_index_; + int inlined_jsframe_index_; Handle function_; Handle context_; List > nested_scope_chain_; @@ -11586,7 +11600,7 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_GetScopeDetails) { if (!maybe_check->ToObject(&check)) return maybe_check; } CONVERT_CHECKED(Smi, wrapped_id, args[1]); - CONVERT_NUMBER_CHECKED(int, inlined_frame_index, Int32, args[2]); + CONVERT_NUMBER_CHECKED(int, inlined_jsframe_index, Int32, args[2]); CONVERT_NUMBER_CHECKED(int, index, Int32, args[3]); // Get the frame where the debugging is performed. @@ -11596,7 +11610,7 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_GetScopeDetails) { // Find the requested scope. int n = 0; - ScopeIterator it(isolate, frame, inlined_frame_index); + ScopeIterator it(isolate, frame, inlined_jsframe_index); for (; !it.Done() && n < index; it.Next()) { n++; } @@ -11994,12 +12008,12 @@ static Handle CopyNestedScopeContextChain(Isolate* isolate, Handle function, Handle base, JavaScriptFrame* frame, - int inlined_frame_index) { + int inlined_jsframe_index) { HandleScope scope(isolate); List > scope_chain; List > context_chain; - ScopeIterator it(isolate, frame, inlined_frame_index); + ScopeIterator it(isolate, frame, inlined_jsframe_index); for (; it.Type() != ScopeIterator::ScopeTypeGlobal && it.Type() != ScopeIterator::ScopeTypeLocal ; it.Next()) { ASSERT(!it.Done()); @@ -12056,8 +12070,7 @@ static Handle CopyNestedScopeContextChain(Isolate* isolate, // Runtime_DebugEvaluate. static Handle GetArgumentsObject(Isolate* isolate, JavaScriptFrame* frame, - int inlined_frame_index, - Handle function, + FrameInspector* frame_inspector, Handle scope_info, Handle function_context) { // Try to find the value of 'arguments' to pass as parameter. If it is not @@ -12081,9 +12094,8 @@ static Handle GetArgumentsObject(Isolate* isolate, } } - FrameInspector frame_inspector(frame, inlined_frame_index, isolate); - - int length = frame_inspector.GetParametersCount(); + Handle function(JSFunction::cast(frame_inspector->GetFunction())); + int length = frame_inspector->GetParametersCount(); Handle arguments = isolate->factory()->NewArgumentsObject(function, length); Handle array = isolate->factory()->NewFixedArray(length); @@ -12091,7 +12103,7 @@ static Handle GetArgumentsObject(Isolate* isolate, AssertNoAllocation no_gc; WriteBarrierMode mode = array->GetWriteBarrierMode(no_gc); for (int i = 0; i < length; i++) { - array->set(i, frame_inspector.GetParameter(i), mode); + array->set(i, frame_inspector->GetParameter(i), mode); } arguments->set_elements(*array); return arguments; @@ -12127,7 +12139,7 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_DebugEvaluate) { } } CONVERT_CHECKED(Smi, wrapped_id, args[1]); - CONVERT_NUMBER_CHECKED(int, inlined_frame_index, Int32, args[2]); + CONVERT_NUMBER_CHECKED(int, inlined_jsframe_index, Int32, args[2]); CONVERT_ARG_CHECKED(String, source, 3); CONVERT_BOOLEAN_CHECKED(disable_break, args[4]); Handle additional_context(args[5]); @@ -12139,7 +12151,8 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_DebugEvaluate) { StackFrame::Id id = UnwrapFrameId(wrapped_id); JavaScriptFrameIterator it(isolate, id); JavaScriptFrame* frame = it.frame(); - Handle function(JSFunction::cast(frame->function())); + FrameInspector frame_inspector(frame, inlined_jsframe_index, isolate); + Handle function(JSFunction::cast(frame_inspector.GetFunction())); Handle scope_info(function->shared()->scope_info()); // Traverse the saved contexts chain to find the active context for the @@ -12166,8 +12179,8 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_DebugEvaluate) { #endif // Materialize the content of the local scope into a JSObject. - Handle local_scope = MaterializeLocalScope( - isolate, frame, inlined_frame_index); + Handle local_scope = MaterializeLocalScopeWithFrameInspector( + isolate, frame, &frame_inspector); RETURN_IF_EMPTY_HANDLE(isolate, local_scope); // Allocate a new context for the debug evaluation and set the extension @@ -12187,7 +12200,7 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_DebugEvaluate) { go_between, context, frame, - inlined_frame_index); + inlined_jsframe_index); if (additional_context->IsJSObject()) { Handle extension = Handle::cast(additional_context); @@ -12227,8 +12240,7 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_DebugEvaluate) { Handle arguments = GetArgumentsObject(isolate, frame, - inlined_frame_index, - function, + &frame_inspector, scope_info, function_context); diff --git a/deps/v8/src/serialize.cc b/deps/v8/src/serialize.cc index e9be2492d7aa43..d9fc2b7b7c7a33 100644 --- a/deps/v8/src/serialize.cc +++ b/deps/v8/src/serialize.cc @@ -1,4 +1,4 @@ -// Copyright 2011 the V8 project authors. All rights reserved. +// Copyright 2012 the V8 project authors. All rights reserved. // Redistribution and use in source and binary forms, with or without // modification, are permitted provided that the following conditions are // met: @@ -1081,36 +1081,6 @@ void SnapshotByteSink::PutInt(uintptr_t integer, const char* description) { PutSection(static_cast(integer & 0x7f), "IntLastPart"); } -#ifdef DEBUG - -void Deserializer::Synchronize(const char* tag) { - int data = source_->Get(); - // If this assert fails then that indicates that you have a mismatch between - // the number of GC roots when serializing and deserializing. - ASSERT_EQ(kSynchronize, data); - do { - int character = source_->Get(); - if (character == 0) break; - if (FLAG_debug_serialization) { - PrintF("%c", character); - } - } while (true); - if (FLAG_debug_serialization) { - PrintF("\n"); - } -} - - -void Serializer::Synchronize(const char* tag) { - sink_->Put(kSynchronize, tag); - int character; - do { - character = *tag++; - sink_->PutSection(character, "TagCharacter"); - } while (character != 0); -} - -#endif Serializer::Serializer(SnapshotByteSink* sink) : sink_(sink), diff --git a/deps/v8/src/serialize.h b/deps/v8/src/serialize.h index 839bfc5618cc33..72eed5ad2f52eb 100644 --- a/deps/v8/src/serialize.h +++ b/deps/v8/src/serialize.h @@ -341,10 +341,6 @@ class Deserializer: public SerializerDeserializer { // Deserialize a single object and the objects reachable from it. void DeserializePartial(Object** root); -#ifdef DEBUG - virtual void Synchronize(const char* tag); -#endif - private: virtual void VisitPointers(Object** start, Object** end); @@ -485,9 +481,6 @@ class Serializer : public SerializerDeserializer { SerializationAddressMapper* address_mapper() { return &address_mapper_; } void PutRoot( int index, HeapObject* object, HowToCode how, WhereToPoint where); -#ifdef DEBUG - virtual void Synchronize(const char* tag); -#endif protected: static const int kInvalidRootIndex = -1; diff --git a/deps/v8/src/spaces.cc b/deps/v8/src/spaces.cc index d5b4d81322655d..05c5876fdf34ca 100644 --- a/deps/v8/src/spaces.cc +++ b/deps/v8/src/spaces.cc @@ -922,14 +922,13 @@ bool NewSpace::SetUp(int reserved_semispace_capacity, 2 * heap()->ReservedSemiSpaceSize()); ASSERT(IsAddressAligned(chunk_base_, 2 * reserved_semispace_capacity, 0)); - if (!to_space_.SetUp(chunk_base_, - initial_semispace_capacity, - maximum_semispace_capacity)) { - return false; - } - if (!from_space_.SetUp(chunk_base_ + reserved_semispace_capacity, - initial_semispace_capacity, - maximum_semispace_capacity)) { + to_space_.SetUp(chunk_base_, + initial_semispace_capacity, + maximum_semispace_capacity); + from_space_.SetUp(chunk_base_ + reserved_semispace_capacity, + initial_semispace_capacity, + maximum_semispace_capacity); + if (!to_space_.Commit()) { return false; } @@ -1162,7 +1161,7 @@ void NewSpace::Verify() { // ----------------------------------------------------------------------------- // SemiSpace implementation -bool SemiSpace::SetUp(Address start, +void SemiSpace::SetUp(Address start, int initial_capacity, int maximum_capacity) { // Creates a space in the young generation. The constructor does not @@ -1181,8 +1180,6 @@ bool SemiSpace::SetUp(Address start, object_mask_ = address_mask_ | kHeapObjectTagMask; object_expected_ = reinterpret_cast(start) | kHeapObjectTag; age_mark_ = start_; - - return Commit(); } @@ -1232,6 +1229,9 @@ bool SemiSpace::Uncommit() { bool SemiSpace::GrowTo(int new_capacity) { + if (!is_committed()) { + if (!Commit()) return false; + } ASSERT((new_capacity & Page::kPageAlignmentMask) == 0); ASSERT(new_capacity <= maximum_capacity_); ASSERT(new_capacity > capacity_); diff --git a/deps/v8/src/spaces.h b/deps/v8/src/spaces.h index f49873ac4ffb53..1a30078438ed74 100644 --- a/deps/v8/src/spaces.h +++ b/deps/v8/src/spaces.h @@ -1834,7 +1834,7 @@ class SemiSpace : public Space { current_page_(NULL) { } // Sets up the semispace using the given chunk. - bool SetUp(Address start, int initial_capacity, int maximum_capacity); + void SetUp(Address start, int initial_capacity, int maximum_capacity); // Tear down the space. Heap memory was not allocated by the space, so it // is not deallocated here. diff --git a/deps/v8/src/type-info.h b/deps/v8/src/type-info.h index 9d3080abbff3ca..80ebf0c63a887f 100644 --- a/deps/v8/src/type-info.h +++ b/deps/v8/src/type-info.h @@ -1,4 +1,4 @@ -// Copyright 2011 the V8 project authors. All rights reserved. +// Copyright 2012 the V8 project authors. All rights reserved. // Redistribution and use in source and binary forms, with or without // modification, are permitted provided that the following conditions are // met: @@ -223,6 +223,7 @@ class CaseClause; class CompareOperation; class CompilationInfo; class CountOperation; +class Expression; class Property; class SmallMapList; class UnaryOperation; diff --git a/deps/v8/src/v8globals.h b/deps/v8/src/v8globals.h index bdb960e8fdcd98..ff3ad8d7486163 100644 --- a/deps/v8/src/v8globals.h +++ b/deps/v8/src/v8globals.h @@ -1,4 +1,4 @@ -// Copyright 2011 the V8 project authors. All rights reserved. +// Copyright 2012 the V8 project authors. All rights reserved. // Redistribution and use in source and binary forms, with or without // modification, are permitted provided that the following conditions are // met: @@ -107,14 +107,12 @@ const uint32_t kQuietNaNHighBitsMask = 0xfff << (51 - 32); // ----------------------------------------------------------------------------- // Forward declarations for frequently used classes -// (sorted alphabetically) class AccessorInfo; class Allocation; class Arguments; class Assembler; class AssertNoAllocation; -class BreakableStatement; class Code; class CodeGenerator; class CodeStub; @@ -124,10 +122,8 @@ class Debugger; class DebugInfo; class Descriptor; class DescriptorArray; -class Expression; class ExternalReference; class FixedArray; -class FunctionLiteral; class FunctionTemplateInfo; class MemoryChunk; class SeededNumberDictionary; @@ -138,7 +134,6 @@ class Heap; class HeapObject; class IC; class InterceptorInfo; -class IterationStatement; class JSArray; class JSFunction; class JSObject; @@ -149,31 +144,19 @@ class Map; class MapSpace; class MarkCompactCollector; class NewSpace; -class NodeVisitor; class Object; class MaybeObject; class OldSpace; -class Property; class Foreign; -class RegExpNode; -struct RegExpCompileData; -class RegExpTree; -class RegExpCompiler; -class RegExpVisitor; class Scope; class ScopeInfo; class Script; -class Slot; class Smi; template class SplayTree; -class Statement; class String; class Struct; -class SwitchStatement; -class AstVisitor; class Variable; -class VariableProxy; class RelocInfo; class Deserializer; class MessageLocation; diff --git a/deps/v8/src/version.cc b/deps/v8/src/version.cc index ba531abcb8b501..77d29f585d12bc 100644 --- a/deps/v8/src/version.cc +++ b/deps/v8/src/version.cc @@ -34,7 +34,7 @@ // cannot be changed without changing the SCons build script. #define MAJOR_VERSION 3 #define MINOR_VERSION 8 -#define BUILD_NUMBER 8 +#define BUILD_NUMBER 9 #define PATCH_LEVEL 0 // Use 1 for candidates and 0 otherwise. // (Boolean macro values are not supported by all preprocessors.) diff --git a/deps/v8/src/x64/assembler-x64-inl.h b/deps/v8/src/x64/assembler-x64-inl.h index 141d092d8e132b..8e3caa444a6aa8 100644 --- a/deps/v8/src/x64/assembler-x64-inl.h +++ b/deps/v8/src/x64/assembler-x64-inl.h @@ -1,4 +1,4 @@ -// Copyright 2011 the V8 project authors. All rights reserved. +// Copyright 2012 the V8 project authors. All rights reserved. // Redistribution and use in source and binary forms, with or without // modification, are permitted provided that the following conditions are // met: @@ -28,6 +28,8 @@ #ifndef V8_X64_ASSEMBLER_X64_INL_H_ #define V8_X64_ASSEMBLER_X64_INL_H_ +#include "x64/assembler-x64.h" + #include "cpu.h" #include "debug.h" #include "v8memory.h" diff --git a/deps/v8/src/x64/builtins-x64.cc b/deps/v8/src/x64/builtins-x64.cc index 4833e03c8e896a..5258f26263dc75 100644 --- a/deps/v8/src/x64/builtins-x64.cc +++ b/deps/v8/src/x64/builtins-x64.cc @@ -1547,6 +1547,7 @@ void Builtins::Generate_ArgumentsAdaptorTrampoline(MacroAssembler* masm) { __ bind(&invoke); __ call(rdx); + masm->isolate()->heap()->SetArgumentsAdaptorDeoptPCOffset(masm->pc_offset()); // Leave frame and return. LeaveArgumentsAdaptorFrame(masm); __ ret(0); diff --git a/deps/v8/src/x64/deoptimizer-x64.cc b/deps/v8/src/x64/deoptimizer-x64.cc index a5a171a2a8833f..efa988874edc35 100644 --- a/deps/v8/src/x64/deoptimizer-x64.cc +++ b/deps/v8/src/x64/deoptimizer-x64.cc @@ -1,4 +1,4 @@ -// Copyright 2011 the V8 project authors. All rights reserved. +// Copyright 2012 the V8 project authors. All rights reserved. // Redistribution and use in source and binary forms, with or without // modification, are permitted provided that the following conditions are // met: @@ -206,12 +206,13 @@ void Deoptimizer::DoComputeOsrOutputFrame() { ASSERT(Translation::BEGIN == opcode); USE(opcode); int count = iterator.Next(); + iterator.Skip(1); // Drop JS frame count. ASSERT(count == 1); USE(count); opcode = static_cast(iterator.Next()); USE(opcode); - ASSERT(Translation::FRAME == opcode); + ASSERT(Translation::JS_FRAME == opcode); unsigned node_id = iterator.Next(); USE(node_id); ASSERT(node_id == ast_id); @@ -247,9 +248,7 @@ void Deoptimizer::DoComputeOsrOutputFrame() { output_ = new FrameDescription*[1]; output_[0] = new(output_frame_size) FrameDescription( output_frame_size, function_); -#ifdef DEBUG - output_[0]->SetKind(Code::OPTIMIZED_FUNCTION); -#endif + output_[0]->SetFrameType(StackFrame::JAVA_SCRIPT); // Clear the incoming parameters in the optimized frame to avoid // confusing the garbage collector. @@ -338,13 +337,117 @@ void Deoptimizer::DoComputeOsrOutputFrame() { } -void Deoptimizer::DoComputeFrame(TranslationIterator* iterator, - int frame_index) { - // Read the ast node id, function, and frame height for this output frame. - Translation::Opcode opcode = - static_cast(iterator->Next()); - USE(opcode); - ASSERT(Translation::FRAME == opcode); +void Deoptimizer::DoComputeArgumentsAdaptorFrame(TranslationIterator* iterator, + int frame_index) { + JSFunction* function = JSFunction::cast(ComputeLiteral(iterator->Next())); + unsigned height = iterator->Next(); + unsigned height_in_bytes = height * kPointerSize; + if (FLAG_trace_deopt) { + PrintF(" translating arguments adaptor => height=%d\n", height_in_bytes); + } + + unsigned fixed_frame_size = ArgumentsAdaptorFrameConstants::kFrameSize; + unsigned input_frame_size = input_->GetFrameSize(); + unsigned output_frame_size = height_in_bytes + fixed_frame_size; + + // Allocate and store the output frame description. + FrameDescription* output_frame = + new(output_frame_size) FrameDescription(output_frame_size, function); + output_frame->SetFrameType(StackFrame::ARGUMENTS_ADAPTOR); + + // Arguments adaptor can not be topmost or bottommost. + ASSERT(frame_index > 0 && frame_index < output_count_ - 1); + ASSERT(output_[frame_index] == NULL); + output_[frame_index] = output_frame; + + // The top address of the frame is computed from the previous + // frame's top and this frame's size. + intptr_t top_address; + top_address = output_[frame_index - 1]->GetTop() - output_frame_size; + output_frame->SetTop(top_address); + + // Compute the incoming parameter translation. + int parameter_count = height; + unsigned output_offset = output_frame_size; + unsigned input_offset = input_frame_size; + for (int i = 0; i < parameter_count; ++i) { + output_offset -= kPointerSize; + DoTranslateCommand(iterator, frame_index, output_offset); + } + input_offset -= (parameter_count * kPointerSize); + + // Read caller's PC from the previous frame. + output_offset -= kPointerSize; + input_offset -= kPointerSize; + intptr_t callers_pc = output_[frame_index - 1]->GetPc(); + output_frame->SetFrameSlot(output_offset, callers_pc); + if (FLAG_trace_deopt) { + PrintF(" 0x%08" V8PRIxPTR ": [top + %d] <- 0x%08" + V8PRIxPTR " ; caller's pc\n", + top_address + output_offset, output_offset, callers_pc); + } + + // Read caller's FP from the previous frame, and set this frame's FP. + output_offset -= kPointerSize; + input_offset -= kPointerSize; + intptr_t value = output_[frame_index - 1]->GetFp(); + output_frame->SetFrameSlot(output_offset, value); + intptr_t fp_value = top_address + output_offset; + output_frame->SetFp(fp_value); + if (FLAG_trace_deopt) { + PrintF(" 0x%08" V8PRIxPTR ": [top + %d] <- 0x%08" + V8PRIxPTR " ; caller's fp\n", + fp_value, output_offset, value); + } + + // A marker value is used in place of the context. + output_offset -= kPointerSize; + input_offset -= kPointerSize; + intptr_t context = reinterpret_cast( + Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR)); + output_frame->SetFrameSlot(output_offset, context); + if (FLAG_trace_deopt) { + PrintF(" 0x%08" V8PRIxPTR ": [top + %d] <- 0x%08" + V8PRIxPTR " ; context (adaptor sentinel)\n", + top_address + output_offset, output_offset, context); + } + + // The function was mentioned explicitly in the ARGUMENTS_ADAPTOR_FRAME. + output_offset -= kPointerSize; + input_offset -= kPointerSize; + value = reinterpret_cast(function); + output_frame->SetFrameSlot(output_offset, value); + if (FLAG_trace_deopt) { + PrintF(" 0x%08" V8PRIxPTR ": [top + %d] <- 0x%08" + V8PRIxPTR " ; function\n", + top_address + output_offset, output_offset, value); + } + + // Number of incoming arguments. + output_offset -= kPointerSize; + input_offset -= kPointerSize; + value = reinterpret_cast(Smi::FromInt(height - 1)); + output_frame->SetFrameSlot(output_offset, value); + if (FLAG_trace_deopt) { + PrintF(" 0x%08" V8PRIxPTR ": [top + %d] <- 0x%08" + V8PRIxPTR " ; argc (%d)\n", + top_address + output_offset, output_offset, value, height - 1); + } + + ASSERT(0 == output_offset); + + Builtins* builtins = isolate_->builtins(); + Code* adaptor_trampoline = + builtins->builtin(Builtins::kArgumentsAdaptorTrampoline); + intptr_t pc_value = reinterpret_cast( + adaptor_trampoline->instruction_start() + + isolate_->heap()->arguments_adaptor_deopt_pc_offset()->value()); + output_frame->SetPc(pc_value); +} + + +void Deoptimizer::DoComputeJSFrame(TranslationIterator* iterator, + int frame_index) { int node_id = iterator->Next(); JSFunction* function = JSFunction::cast(ComputeLiteral(iterator->Next())); unsigned height = iterator->Next(); @@ -364,9 +467,7 @@ void Deoptimizer::DoComputeFrame(TranslationIterator* iterator, // Allocate and store the output frame description. FrameDescription* output_frame = new(output_frame_size) FrameDescription(output_frame_size, function); -#ifdef DEBUG - output_frame->SetKind(Code::FUNCTION); -#endif + output_frame->SetFrameType(StackFrame::JAVA_SCRIPT); bool is_bottommost = (0 == frame_index); bool is_topmost = (output_count_ - 1 == frame_index); diff --git a/deps/v8/src/x64/frames-x64.h b/deps/v8/src/x64/frames-x64.h index 2626954ac6dce2..3e3d63d62b917f 100644 --- a/deps/v8/src/x64/frames-x64.h +++ b/deps/v8/src/x64/frames-x64.h @@ -1,4 +1,4 @@ -// Copyright 2011 the V8 project authors. All rights reserved. +// Copyright 2012 the V8 project authors. All rights reserved. // Redistribution and use in source and binary forms, with or without // modification, are permitted provided that the following conditions are // met: @@ -87,6 +87,9 @@ class ExitFrameConstants : public AllStatic { class StandardFrameConstants : public AllStatic { public: + // Fixed part of the frame consists of return address, caller fp, + // context and function. + static const int kFixedFrameSize = 4 * kPointerSize; static const int kExpressionsOffset = -3 * kPointerSize; static const int kMarkerOffset = -2 * kPointerSize; static const int kContextOffset = -1 * kPointerSize; @@ -112,6 +115,8 @@ class JavaScriptFrameConstants : public AllStatic { class ArgumentsAdaptorFrameConstants : public AllStatic { public: static const int kLengthOffset = StandardFrameConstants::kExpressionsOffset; + static const int kFrameSize = + StandardFrameConstants::kFixedFrameSize + kPointerSize; }; diff --git a/deps/v8/src/x64/ic-x64.cc b/deps/v8/src/x64/ic-x64.cc index 8dca1e1644af08..1fdffa2651d2c0 100644 --- a/deps/v8/src/x64/ic-x64.cc +++ b/deps/v8/src/x64/ic-x64.cc @@ -467,43 +467,50 @@ void KeyedLoadIC::GenerateGeneric(MacroAssembler* masm) { // Load the key (consisting of map and symbol) from the cache and // check for match. - Label try_second_entry, hit_on_first_entry, load_in_object_property; + Label load_in_object_property; + static const int kEntriesPerBucket = KeyedLookupCache::kEntriesPerBucket; + Label hit_on_nth_entry[kEntriesPerBucket]; ExternalReference cache_keys = ExternalReference::keyed_lookup_cache_keys(masm->isolate()); - __ movq(rdi, rcx); - __ shl(rdi, Immediate(kPointerSizeLog2 + 1)); - __ LoadAddress(kScratchRegister, cache_keys); - __ cmpq(rbx, Operand(kScratchRegister, rdi, times_1, 0)); - __ j(not_equal, &try_second_entry); - __ cmpq(rax, Operand(kScratchRegister, rdi, times_1, kPointerSize)); - __ j(equal, &hit_on_first_entry); - - __ bind(&try_second_entry); - __ cmpq(rbx, Operand(kScratchRegister, rdi, times_1, kPointerSize * 2)); + + for (int i = 0; i < kEntriesPerBucket - 1; i++) { + Label try_next_entry; + __ movq(rdi, rcx); + __ shl(rdi, Immediate(kPointerSizeLog2 + 1)); + __ LoadAddress(kScratchRegister, cache_keys); + int off = kPointerSize * i * 2; + __ cmpq(rbx, Operand(kScratchRegister, rdi, times_1, off)); + __ j(not_equal, &try_next_entry); + __ cmpq(rax, Operand(kScratchRegister, rdi, times_1, off + kPointerSize)); + __ j(equal, &hit_on_nth_entry[i]); + __ bind(&try_next_entry); + } + + int off = kPointerSize * (kEntriesPerBucket - 1) * 2; + __ cmpq(rbx, Operand(kScratchRegister, rdi, times_1, off)); __ j(not_equal, &slow); - __ cmpq(rax, Operand(kScratchRegister, rdi, times_1, kPointerSize * 3)); + __ cmpq(rax, Operand(kScratchRegister, rdi, times_1, off + kPointerSize)); __ j(not_equal, &slow); // Get field offset, which is a 32-bit integer. ExternalReference cache_field_offsets = ExternalReference::keyed_lookup_cache_field_offsets(masm->isolate()); - // Hit on second entry. - __ LoadAddress(kScratchRegister, cache_field_offsets); - __ addl(rcx, Immediate(1)); - __ movl(rdi, Operand(kScratchRegister, rcx, times_4, 0)); - __ movzxbq(rcx, FieldOperand(rbx, Map::kInObjectPropertiesOffset)); - __ subq(rdi, rcx); - __ j(above_equal, &property_array_property); - __ jmp(&load_in_object_property); - - // Hit on first entry. - __ bind(&hit_on_first_entry); - __ LoadAddress(kScratchRegister, cache_field_offsets); - __ movl(rdi, Operand(kScratchRegister, rcx, times_4, 0)); - __ movzxbq(rcx, FieldOperand(rbx, Map::kInObjectPropertiesOffset)); - __ subq(rdi, rcx); - __ j(above_equal, &property_array_property); + // Hit on nth entry. + for (int i = kEntriesPerBucket - 1; i >= 0; i--) { + __ bind(&hit_on_nth_entry[i]); + if (i != 0) { + __ addl(rcx, Immediate(i)); + } + __ LoadAddress(kScratchRegister, cache_field_offsets); + __ movl(rdi, Operand(kScratchRegister, rcx, times_4, 0)); + __ movzxbq(rcx, FieldOperand(rbx, Map::kInObjectPropertiesOffset)); + __ subq(rdi, rcx); + __ j(above_equal, &property_array_property); + if (i != 0) { + __ jmp(&load_in_object_property); + } + } // Load in-object property. __ bind(&load_in_object_property); diff --git a/deps/v8/src/x64/lithium-codegen-x64.cc b/deps/v8/src/x64/lithium-codegen-x64.cc index e0512147735ca5..b14fa8b34a8510 100644 --- a/deps/v8/src/x64/lithium-codegen-x64.cc +++ b/deps/v8/src/x64/lithium-codegen-x64.cc @@ -368,7 +368,11 @@ void LCodeGen::WriteTranslation(LEnvironment* environment, WriteTranslation(environment->outer(), translation); int closure_id = DefineDeoptimizationLiteral(environment->closure()); - translation->BeginFrame(environment->ast_id(), closure_id, height); + if (environment->is_arguments_adaptor()) { + translation->BeginArgumentsAdaptorFrame(closure_id, translation_size); + } else { + translation->BeginJSFrame(environment->ast_id(), closure_id, height); + } for (int i = 0; i < translation_size; ++i) { LOperand* value = environment->values()->at(i); // spilled_registers_ and spilled_double_registers_ are either @@ -504,10 +508,14 @@ void LCodeGen::RegisterEnvironmentForDeoptimization(LEnvironment* environment, // |>------------ translation_size ------------<| int frame_count = 0; + int jsframe_count = 0; for (LEnvironment* e = environment; e != NULL; e = e->outer()) { ++frame_count; + if (!e->is_arguments_adaptor()) { + ++jsframe_count; + } } - Translation translation(&translations_, frame_count); + Translation translation(&translations_, frame_count, jsframe_count); WriteTranslation(environment, &translation); int deoptimization_index = deoptimizations_.length(); int pc_offset = masm()->pc_offset(); diff --git a/deps/v8/src/x64/lithium-x64.cc b/deps/v8/src/x64/lithium-x64.cc index fee2f4f2ab98bb..ac98a4c2dcd544 100644 --- a/deps/v8/src/x64/lithium-x64.cc +++ b/deps/v8/src/x64/lithium-x64.cc @@ -1000,14 +1000,16 @@ LEnvironment* LChunkBuilder::CreateEnvironment( LEnvironment* outer = CreateEnvironment(hydrogen_env->outer(), argument_index_accumulator); int ast_id = hydrogen_env->ast_id(); - ASSERT(ast_id != AstNode::kNoNumber); + ASSERT(ast_id != AstNode::kNoNumber || hydrogen_env->is_arguments_adaptor()); int value_count = hydrogen_env->length(); LEnvironment* result = new LEnvironment(hydrogen_env->closure(), + hydrogen_env->is_arguments_adaptor(), ast_id, hydrogen_env->parameter_count(), argument_count_, value_count, outer); + int argument_index = *argument_index_accumulator; for (int i = 0; i < value_count; ++i) { if (hydrogen_env->is_special_index(i)) continue; @@ -1016,13 +1018,17 @@ LEnvironment* LChunkBuilder::CreateEnvironment( if (value->IsArgumentsObject()) { op = NULL; } else if (value->IsPushArgument()) { - op = new LArgument((*argument_index_accumulator)++); + op = new LArgument(argument_index++); } else { op = UseAny(value); } result->AddValue(op, value->representation()); } + if (!hydrogen_env->is_arguments_adaptor()) { + *argument_index_accumulator = argument_index; + } + return result; } @@ -1912,12 +1918,11 @@ LInstruction* LChunkBuilder::DoLoadKeyedFastDoubleElement( LInstruction* LChunkBuilder::DoLoadKeyedSpecializedArrayElement( HLoadKeyedSpecializedArrayElement* instr) { ElementsKind elements_kind = instr->elements_kind(); - Representation representation(instr->representation()); ASSERT( - (representation.IsInteger32() && + (instr->representation().IsInteger32() && (elements_kind != EXTERNAL_FLOAT_ELEMENTS) && (elements_kind != EXTERNAL_DOUBLE_ELEMENTS)) || - (representation.IsDouble() && + (instr->representation().IsDouble() && ((elements_kind == EXTERNAL_FLOAT_ELEMENTS) || (elements_kind == EXTERNAL_DOUBLE_ELEMENTS)))); ASSERT(instr->key()->representation().IsInteger32()); @@ -1976,13 +1981,12 @@ LInstruction* LChunkBuilder::DoStoreKeyedFastDoubleElement( LInstruction* LChunkBuilder::DoStoreKeyedSpecializedArrayElement( HStoreKeyedSpecializedArrayElement* instr) { - Representation representation(instr->value()->representation()); ElementsKind elements_kind = instr->elements_kind(); ASSERT( - (representation.IsInteger32() && + (instr->value()->representation().IsInteger32() && (elements_kind != EXTERNAL_FLOAT_ELEMENTS) && (elements_kind != EXTERNAL_DOUBLE_ELEMENTS)) || - (representation.IsDouble() && + (instr->value()->representation().IsDouble() && ((elements_kind == EXTERNAL_FLOAT_ELEMENTS) || (elements_kind == EXTERNAL_DOUBLE_ELEMENTS)))); ASSERT(instr->external_pointer()->representation().IsExternal()); @@ -2245,6 +2249,7 @@ LInstruction* LChunkBuilder::DoEnterInlined(HEnterInlined* instr) { HEnvironment* outer = current_block_->last_environment(); HConstant* undefined = graph()->GetConstantUndefined(); HEnvironment* inner = outer->CopyForInlining(instr->closure(), + instr->arguments_count(), instr->function(), undefined, instr->call_kind()); @@ -2255,7 +2260,8 @@ LInstruction* LChunkBuilder::DoEnterInlined(HEnterInlined* instr) { LInstruction* LChunkBuilder::DoLeaveInlined(HLeaveInlined* instr) { - HEnvironment* outer = current_block_->last_environment()->outer(); + HEnvironment* outer = current_block_->last_environment()-> + DiscardInlined(false); current_block_->UpdateEnvironment(outer); return NULL; } diff --git a/deps/v8/test/cctest/test-api.cc b/deps/v8/test/cctest/test-api.cc index 8525f3876b4648..59d6d19462f357 100644 --- a/deps/v8/test/cctest/test-api.cc +++ b/deps/v8/test/cctest/test-api.cc @@ -1189,7 +1189,6 @@ THREADED_TEST(GlobalPrototype) { templ->Set("x", v8_num(200)); templ->SetAccessor(v8_str("m"), GetM); LocalContext env(0, templ); - v8::Handle obj(env->Global()); v8::Handle