Skip to content
This repository has been archived by the owner on Oct 15, 2020. It is now read-only.

Commit

Permalink
deps: update ChakraCore to chakra-core/ChakraCore@54e986f8a4
Browse files Browse the repository at this point in the history
[1.8>1.9] [MERGE #4682 @MikeHolman] Fix bad interaction with Spectre mitigation and VirtualArray OOB resume

Merge pull request #4682 from MikeHolman:virtualspectre

In case of VirtualArrays, we may have eliminated bound check and rely on our AV handling (as long as index is guaranteed to be within 4GB).

However, with spectre mitigations we force OOB reads to nullptr. Our exception filter only handles AVs trying to read from the reserved region, so we end up crashing with nullptr deref instead of resuming.

This change makes it so that we will only poison in case the index exceeds our 4GB reservation.

OS: 15897366

Reviewed-By: chakrabot <[email protected]>
  • Loading branch information
MikeHolman authored and chakrabot committed Feb 16, 2018
1 parent 96f1f73 commit e3bfac6
Showing 1 changed file with 29 additions and 13 deletions.
42 changes: 29 additions & 13 deletions deps/chakrashim/core/lib/Backend/Lower.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -16202,7 +16202,11 @@ Lowerer::GenerateFastElemIIntIndexCommon(
if (shouldPoisonLoad)
{
// Use a mask to prevent arbitrary speculative reads
if (!headSegmentLengthOpnd)
if (!headSegmentLengthOpnd
#if ENABLE_FAST_ARRAYBUFFER
&& !baseValueType.IsLikelyOptimizedVirtualTypedArray()
#endif
)
{
if (baseValueType.IsLikelyTypedArray())
{
Expand All @@ -16220,30 +16224,42 @@ Lowerer::GenerateFastElemIIntIndexCommon(
}
IR::RegOpnd* localMaskOpnd = nullptr;
#if TARGET_64
IR::RegOpnd* headSegmentLengthRegOpnd = IR::RegOpnd::New(headSegmentLengthOpnd->GetType(), m_func);
IR::Instr * instrMov = IR::Instr::New(Js::OpCode::MOV_TRUNC, headSegmentLengthRegOpnd, headSegmentLengthOpnd, m_func);
instr->InsertBefore(instrMov);
LowererMD::Legalize(instrMov);

if (headSegmentLengthRegOpnd->GetSize() != MachPtr)
IR::Opnd* lengthOpnd = nullptr;
#if ENABLE_FAST_ARRAYBUFFER
if (baseValueType.IsLikelyOptimizedVirtualTypedArray())
{
headSegmentLengthRegOpnd = headSegmentLengthRegOpnd->UseWithNewType(TyMachPtr, this->m_func)->AsRegOpnd();
}
lengthOpnd = IR::IntConstOpnd::New(MAX_ASMJS_ARRAYBUFFER_LENGTH >> indirScale, TyMachReg, m_func);
}
else
#endif
{
AnalysisAssert(headSegmentLengthOpnd != nullptr);
lengthOpnd = IR::RegOpnd::New(headSegmentLengthOpnd->GetType(), m_func);
IR::Instr * instrMov = IR::Instr::New(Js::OpCode::MOV_TRUNC, lengthOpnd, headSegmentLengthOpnd, m_func);
instr->InsertBefore(instrMov);
LowererMD::Legalize(instrMov);

// MOV r1, [opnd + offset(type)]
if (lengthOpnd->GetSize() != MachPtr)
{
lengthOpnd = lengthOpnd->UseWithNewType(TyMachPtr, this->m_func)->AsRegOpnd();
}
}


// MOV r1, [opnd + offset(type)]
IR::RegOpnd* indexValueRegOpnd = IR::RegOpnd::New(indexValueOpnd->GetType(), m_func);

instrMov = IR::Instr::New(Js::OpCode::MOV_TRUNC, indexValueRegOpnd, indexValueOpnd, m_func);
IR::Instr * instrMov = IR::Instr::New(Js::OpCode::MOV_TRUNC, indexValueRegOpnd, indexValueOpnd, m_func);
instr->InsertBefore(instrMov);
LowererMD::Legalize(instrMov);

if (indexValueRegOpnd->GetSize() != MachPtr)
{
indexValueRegOpnd = indexValueRegOpnd->UseWithNewType(TyMachPtr, this->m_func)->AsRegOpnd();
}
}

localMaskOpnd = IR::RegOpnd::New(TyMachPtr, m_func);
InsertSub(false, localMaskOpnd, indexValueRegOpnd, headSegmentLengthRegOpnd, instr);
InsertSub(false, localMaskOpnd, indexValueRegOpnd, lengthOpnd, instr);
InsertShift(Js::OpCode::Shr_A, false, localMaskOpnd, localMaskOpnd, IR::IntConstOpnd::New(63, TyInt8, m_func), instr);
#else
localMaskOpnd = IR::RegOpnd::New(TyInt32, m_func);
Expand Down

0 comments on commit e3bfac6

Please sign in to comment.