diff --git a/src/librustc/dep_graph/dep_node.rs b/src/librustc/dep_graph/dep_node.rs index b7281cf0445cd..90081d5b85ef9 100644 --- a/src/librustc/dep_graph/dep_node.rs +++ b/src/librustc/dep_graph/dep_node.rs @@ -632,7 +632,7 @@ define_dep_nodes!( <'tcx> // queries). Making them anonymous avoids hashing the result, which // may save a bit of time. [anon] EraseRegionsTy { ty: Ty<'tcx> }, - [anon] ConstValueToAllocation { val: &'tcx ty::Const<'tcx> }, + [anon] ConstToAllocation { val: &'tcx ty::Const<'tcx> }, [input] Freevars(DefId), [input] MaybeUnusedTraitImport(DefId), diff --git a/src/librustc/ich/impls_ty.rs b/src/librustc/ich/impls_ty.rs index f13e26fee3ee4..46f4ed4ec478b 100644 --- a/src/librustc/ich/impls_ty.rs +++ b/src/librustc/ich/impls_ty.rs @@ -397,12 +397,6 @@ impl_stable_hash_for!(enum mir::interpret::ScalarMaybeUndef { Undef }); -impl_stable_hash_for!(enum mir::interpret::Value { - Scalar(v), - ScalarPair(a, b), - ByRef(ptr, align) -}); - impl_stable_hash_for!(struct mir::interpret::Pointer { alloc_id, offset diff --git a/src/librustc/mir/interpret/mod.rs b/src/librustc/mir/interpret/mod.rs index a0980b06230c8..ca664c6e18b4d 100644 --- a/src/librustc/mir/interpret/mod.rs +++ b/src/librustc/mir/interpret/mod.rs @@ -13,7 +13,7 @@ pub use self::error::{ FrameInfo, ConstEvalResult, }; -pub use self::value::{Scalar, Value, ConstValue, ScalarMaybeUndef}; +pub use self::value::{Scalar, ConstValue, ScalarMaybeUndef}; use std::fmt; use mir; @@ -135,7 +135,7 @@ impl<'tcx> Pointer { Pointer { alloc_id, offset } } - pub(crate) fn wrapping_signed_offset(self, i: i64, cx: C) -> Self { + pub fn wrapping_signed_offset(self, i: i64, cx: C) -> Self { Pointer::new( self.alloc_id, Size::from_bytes(cx.data_layout().wrapping_signed_offset(self.offset.bytes(), i)), @@ -147,7 +147,7 @@ impl<'tcx> Pointer { (Pointer::new(self.alloc_id, Size::from_bytes(res)), over) } - pub(crate) fn signed_offset(self, i: i64, cx: C) -> EvalResult<'tcx, Self> { + pub fn signed_offset(self, i: i64, cx: C) -> EvalResult<'tcx, Self> { Ok(Pointer::new( self.alloc_id, Size::from_bytes(cx.data_layout().signed_offset(self.offset.bytes(), i)?), @@ -567,18 +567,6 @@ pub fn write_target_uint( } } -pub fn write_target_int( - endianness: layout::Endian, - mut target: &mut [u8], - data: i128, -) -> Result<(), io::Error> { - let len = target.len(); - match endianness { - layout::Endian::Little => target.write_int128::(data, len), - layout::Endian::Big => target.write_int128::(data, len), - } -} - pub fn read_target_uint(endianness: layout::Endian, mut source: &[u8]) -> Result { match endianness { layout::Endian::Little => source.read_uint128::(source.len()), @@ -586,6 +574,26 @@ pub fn read_target_uint(endianness: layout::Endian, mut source: &[u8]) -> Result } } +//////////////////////////////////////////////////////////////////////////////// +// Methods to faciliate working with signed integers stored in a u128 +//////////////////////////////////////////////////////////////////////////////// + +pub fn sign_extend(value: u128, size: Size) -> u128 { + let size = size.bits(); + // sign extend + let shift = 128 - size; + // shift the unsigned value to the left + // and back to the right as signed (essentially fills with FF on the left) + (((value << shift) as i128) >> shift) as u128 +} + +pub fn truncate(value: u128, size: Size) -> u128 { + let size = size.bits(); + let shift = 128 - size; + // truncate (shift left to drop out leftover values, shift right to fill with zeroes) + (value << shift) >> shift +} + //////////////////////////////////////////////////////////////////////////////// // Undefined byte tracking //////////////////////////////////////////////////////////////////////////////// diff --git a/src/librustc/mir/interpret/value.rs b/src/librustc/mir/interpret/value.rs index 3e8b44b87fe74..b142de81c1e91 100644 --- a/src/librustc/mir/interpret/value.rs +++ b/src/librustc/mir/interpret/value.rs @@ -1,14 +1,13 @@ #![allow(unknown_lints)] -use ty::layout::{Align, HasDataLayout, Size}; -use ty; +use ty::layout::{HasDataLayout, Size}; use ty::subst::Substs; use hir::def_id::DefId; use super::{EvalResult, Pointer, PointerArithmetic, Allocation}; /// Represents a constant value in Rust. Scalar and ScalarPair are optimizations which -/// matches Value's optimizations for easy conversions between these two types +/// matches the LocalValue optimizations for easy conversions between Value and ConstValue. #[derive(Copy, Clone, Debug, Eq, PartialEq, PartialOrd, Ord, RustcEncodable, RustcDecodable, Hash)] pub enum ConstValue<'tcx> { /// Never returned from the `const_eval` query, but the HIR contains these frequently in order @@ -16,6 +15,8 @@ pub enum ConstValue<'tcx> { /// evaluation Unevaluated(DefId, &'tcx Substs<'tcx>), /// Used only for types with layout::abi::Scalar ABI and ZSTs + /// + /// Not using the enum `Value` to encode that this must not be `Undef` Scalar(Scalar), /// Used only for types with layout::abi::ScalarPair /// @@ -26,25 +27,6 @@ pub enum ConstValue<'tcx> { } impl<'tcx> ConstValue<'tcx> { - #[inline] - pub fn from_byval_value(val: Value) -> EvalResult<'static, Self> { - Ok(match val { - Value::ByRef(..) => bug!(), - Value::ScalarPair(a, b) => ConstValue::ScalarPair(a.unwrap_or_err()?, b), - Value::Scalar(val) => ConstValue::Scalar(val.unwrap_or_err()?), - }) - } - - #[inline] - pub fn to_byval_value(&self) -> Option { - match *self { - ConstValue::Unevaluated(..) | - ConstValue::ByRef(..) => None, - ConstValue::ScalarPair(a, b) => Some(Value::ScalarPair(a.into(), b)), - ConstValue::Scalar(val) => Some(Value::Scalar(val.into())), - } - } - #[inline] pub fn try_to_scalar(&self) -> Option { match *self { @@ -56,58 +38,44 @@ impl<'tcx> ConstValue<'tcx> { } #[inline] - pub fn to_bits(&self, size: Size) -> Option { + pub fn try_to_bits(&self, size: Size) -> Option { self.try_to_scalar()?.to_bits(size).ok() } #[inline] - pub fn to_ptr(&self) -> Option { + pub fn try_to_ptr(&self) -> Option { self.try_to_scalar()?.to_ptr().ok() } -} -/// A `Value` represents a single self-contained Rust value. -/// -/// A `Value` can either refer to a block of memory inside an allocation (`ByRef`) or to a primitive -/// value held directly, outside of any allocation (`Scalar`). For `ByRef`-values, we remember -/// whether the pointer is supposed to be aligned or not (also see Place). -/// -/// For optimization of a few very common cases, there is also a representation for a pair of -/// primitive values (`ScalarPair`). It allows Miri to avoid making allocations for checked binary -/// operations and fat pointers. This idea was taken from rustc's codegen. -#[derive(Clone, Copy, Debug, Eq, PartialEq, Ord, PartialOrd, RustcEncodable, RustcDecodable, Hash)] -pub enum Value { - ByRef(Scalar, Align), - Scalar(ScalarMaybeUndef), - ScalarPair(ScalarMaybeUndef, ScalarMaybeUndef), -} - -impl<'tcx> ty::TypeFoldable<'tcx> for Value { - fn super_fold_with<'gcx: 'tcx, F: ty::fold::TypeFolder<'gcx, 'tcx>>(&self, _: &mut F) -> Self { - *self + pub fn new_slice( + val: Scalar, + len: u64, + cx: impl HasDataLayout + ) -> Self { + ConstValue::ScalarPair(val, Scalar::Bits { + bits: len as u128, + size: cx.data_layout().pointer_size.bytes() as u8, + }.into()) } - fn super_visit_with>(&self, _: &mut V) -> bool { - false + + pub fn new_dyn_trait(val: Scalar, vtable: Pointer) -> Self { + ConstValue::ScalarPair(val, Scalar::Ptr(vtable).into()) } } impl<'tcx> Scalar { - pub fn ptr_null(cx: C) -> Self { + pub fn ptr_null(cx: impl HasDataLayout) -> Self { Scalar::Bits { bits: 0, size: cx.data_layout().pointer_size.bytes() as u8, } } - pub fn to_value_with_len(self, len: u64, cx: C) -> Value { - ScalarMaybeUndef::Scalar(self).to_value_with_len(len, cx) - } - - pub fn to_value_with_vtable(self, vtable: Pointer) -> Value { - ScalarMaybeUndef::Scalar(self).to_value_with_vtable(vtable) + pub fn zst() -> Self { + Scalar::Bits { bits: 0, size: 0 } } - pub fn ptr_signed_offset(self, i: i64, cx: C) -> EvalResult<'tcx, Self> { + pub fn ptr_signed_offset(self, i: i64, cx: impl HasDataLayout) -> EvalResult<'tcx, Self> { let layout = cx.data_layout(); match self { Scalar::Bits { bits, size } => { @@ -121,7 +89,7 @@ impl<'tcx> Scalar { } } - pub fn ptr_offset(self, i: Size, cx: C) -> EvalResult<'tcx, Self> { + pub fn ptr_offset(self, i: Size, cx: impl HasDataLayout) -> EvalResult<'tcx, Self> { let layout = cx.data_layout(); match self { Scalar::Bits { bits, size } => { @@ -135,7 +103,7 @@ impl<'tcx> Scalar { } } - pub fn ptr_wrapping_signed_offset(self, i: i64, cx: C) -> Self { + pub fn ptr_wrapping_signed_offset(self, i: i64, cx: impl HasDataLayout) -> Self { let layout = cx.data_layout(); match self { Scalar::Bits { bits, size } => { @@ -149,7 +117,7 @@ impl<'tcx> Scalar { } } - pub fn is_null_ptr(self, cx: C) -> bool { + pub fn is_null_ptr(self, cx: impl HasDataLayout) -> bool { match self { Scalar::Bits { bits, size } => { assert_eq!(size as u64, cx.data_layout().pointer_size.bytes()); @@ -159,12 +127,58 @@ impl<'tcx> Scalar { } } - pub fn to_value(self) -> Value { - Value::Scalar(ScalarMaybeUndef::Scalar(self)) + pub fn from_bool(b: bool) -> Self { + Scalar::Bits { bits: b as u128, size: 1 } + } + + pub fn from_char(c: char) -> Self { + Scalar::Bits { bits: c as u128, size: 4 } + } + + pub fn to_bits(self, target_size: Size) -> EvalResult<'tcx, u128> { + match self { + Scalar::Bits { bits, size } => { + assert_eq!(target_size.bytes(), size as u64); + assert_ne!(size, 0, "to_bits cannot be used with zsts"); + Ok(bits) + } + Scalar::Ptr(_) => err!(ReadPointerAsBytes), + } + } + + pub fn to_ptr(self) -> EvalResult<'tcx, Pointer> { + match self { + Scalar::Bits { bits: 0, .. } => err!(InvalidNullPointerUsage), + Scalar::Bits { .. } => err!(ReadBytesAsPointer), + Scalar::Ptr(p) => Ok(p), + } + } + + pub fn is_bits(self) -> bool { + match self { + Scalar::Bits { .. } => true, + _ => false, + } + } + + pub fn is_ptr(self) -> bool { + match self { + Scalar::Ptr(_) => true, + _ => false, + } + } + + pub fn to_bool(self) -> EvalResult<'tcx, bool> { + match self { + Scalar::Bits { bits: 0, size: 1 } => Ok(false), + Scalar::Bits { bits: 1, size: 1 } => Ok(true), + _ => err!(InvalidBool), + } } } impl From for Scalar { + #[inline(always)] fn from(ptr: Pointer) -> Self { Scalar::Ptr(ptr) } @@ -179,7 +193,7 @@ pub enum Scalar { /// The raw bytes of a simple value. Bits { /// The first `size` bytes are the value. - /// Do not try to read less or more bytes that that + /// Do not try to read less or more bytes that that. The remaining bytes must be 0. size: u8, bits: u128, }, @@ -197,86 +211,29 @@ pub enum ScalarMaybeUndef { } impl From for ScalarMaybeUndef { + #[inline(always)] fn from(s: Scalar) -> Self { ScalarMaybeUndef::Scalar(s) } } -impl ScalarMaybeUndef { - pub fn unwrap_or_err(self) -> EvalResult<'static, Scalar> { +impl<'tcx> ScalarMaybeUndef { + pub fn not_undef(self) -> EvalResult<'static, Scalar> { match self { ScalarMaybeUndef::Scalar(scalar) => Ok(scalar), ScalarMaybeUndef::Undef => err!(ReadUndefBytes), } } - pub fn to_value_with_len(self, len: u64, cx: C) -> Value { - Value::ScalarPair(self, Scalar::Bits { - bits: len as u128, - size: cx.data_layout().pointer_size.bytes() as u8, - }.into()) - } - - pub fn to_value_with_vtable(self, vtable: Pointer) -> Value { - Value::ScalarPair(self, Scalar::Ptr(vtable).into()) - } - - pub fn ptr_offset(self, i: Size, cx: C) -> EvalResult<'tcx, Self> { - match self { - ScalarMaybeUndef::Scalar(scalar) => { - scalar.ptr_offset(i, cx).map(ScalarMaybeUndef::Scalar) - }, - ScalarMaybeUndef::Undef => Ok(ScalarMaybeUndef::Undef) - } - } -} - -impl<'tcx> Scalar { - pub fn from_bool(b: bool) -> Self { - Scalar::Bits { bits: b as u128, size: 1 } - } - - pub fn from_char(c: char) -> Self { - Scalar::Bits { bits: c as u128, size: 4 } - } - - pub fn to_bits(self, target_size: Size) -> EvalResult<'tcx, u128> { - match self { - Scalar::Bits { bits, size } => { - assert_eq!(target_size.bytes(), size as u64); - assert_ne!(size, 0, "to_bits cannot be used with zsts"); - Ok(bits) - } - Scalar::Ptr(_) => err!(ReadPointerAsBytes), - } - } - pub fn to_ptr(self) -> EvalResult<'tcx, Pointer> { - match self { - Scalar::Bits {..} => err!(ReadBytesAsPointer), - Scalar::Ptr(p) => Ok(p), - } + self.not_undef()?.to_ptr() } - pub fn is_bits(self) -> bool { - match self { - Scalar::Bits { .. } => true, - _ => false, - } - } - - pub fn is_ptr(self) -> bool { - match self { - Scalar::Ptr(_) => true, - _ => false, - } + pub fn to_bits(self, target_size: Size) -> EvalResult<'tcx, u128> { + self.not_undef()?.to_bits(target_size) } pub fn to_bool(self) -> EvalResult<'tcx, bool> { - match self { - Scalar::Bits { bits: 0, size: 1 } => Ok(false), - Scalar::Bits { bits: 1, size: 1 } => Ok(true), - _ => err!(InvalidBool), - } + self.not_undef()?.to_bool() } } diff --git a/src/librustc/mir/mod.rs b/src/librustc/mir/mod.rs index bafeb5dd12867..e958ca9b9bb02 100644 --- a/src/librustc/mir/mod.rs +++ b/src/librustc/mir/mod.rs @@ -17,7 +17,7 @@ use hir::def::CtorKind; use hir::def_id::DefId; use hir::{self, HirId, InlineAsm}; use middle::region; -use mir::interpret::{EvalErrorKind, Scalar, Value, ScalarMaybeUndef}; +use mir::interpret::{EvalErrorKind, Scalar, ScalarMaybeUndef, ConstValue}; use mir::visit::MirVisitable; use rustc_apfloat::ieee::{Double, Single}; use rustc_apfloat::Float; @@ -1469,14 +1469,14 @@ impl<'tcx> TerminatorKind<'tcx> { .iter() .map(|&u| { let mut s = String::new(); - print_miri_value( - Scalar::Bits { - bits: u, - size: size.bytes() as u8, - }.to_value(), - switch_ty, - &mut s, - ).unwrap(); + let c = ty::Const { + val: ConstValue::Scalar(Scalar::Bits { + bits: u, + size: size.bytes() as u8, + }.into()), + ty: switch_ty, + }; + fmt_const_val(&mut s, &c).unwrap(); s.into() }) .chain(iter::once(String::from("otherwise").into())) @@ -2220,18 +2220,12 @@ impl<'tcx> Debug for Constant<'tcx> { } /// Write a `ConstValue` in a way closer to the original source code than the `Debug` output. -pub fn fmt_const_val(fmt: &mut W, const_val: &ty::Const) -> fmt::Result { - if let Some(value) = const_val.to_byval_value() { - print_miri_value(value, const_val.ty, fmt) - } else { - write!(fmt, "{:?}:{}", const_val.val, const_val.ty) - } -} - -pub fn print_miri_value<'tcx, W: Write>(value: Value, ty: Ty<'tcx>, f: &mut W) -> fmt::Result { +pub fn fmt_const_val(f: &mut impl Write, const_val: &ty::Const) -> fmt::Result { use ty::TypeVariants::*; + let value = const_val.val; + let ty = const_val.ty; // print some primitives - if let Value::Scalar(ScalarMaybeUndef::Scalar(Scalar::Bits { bits, .. })) = value { + if let ConstValue::Scalar(Scalar::Bits { bits, .. }) = value { match ty.sty { TyBool if bits == 0 => return write!(f, "false"), TyBool if bits == 1 => return write!(f, "true"), @@ -2258,8 +2252,8 @@ pub fn print_miri_value<'tcx, W: Write>(value: Value, ty: Ty<'tcx>, f: &mut W) - return write!(f, "{}", item_path_str(did)); } // print string literals - if let Value::ScalarPair(ptr, len) = value { - if let ScalarMaybeUndef::Scalar(Scalar::Ptr(ptr)) = ptr { + if let ConstValue::ScalarPair(ptr, len) = value { + if let Scalar::Ptr(ptr) = ptr { if let ScalarMaybeUndef::Scalar(Scalar::Bits { bits: len, .. }) = len { if let TyRef(_, &ty::TyS { sty: TyStr, .. }, _) = ty.sty { return ty::tls::with(|tcx| { diff --git a/src/librustc/ty/query/config.rs b/src/librustc/ty/query/config.rs index ecf35c1b0da39..b5093d0a1fc95 100644 --- a/src/librustc/ty/query/config.rs +++ b/src/librustc/ty/query/config.rs @@ -198,9 +198,9 @@ impl<'tcx> QueryDescription<'tcx> for queries::super_predicates_of<'tcx> { } } -impl<'tcx> QueryDescription<'tcx> for queries::const_value_to_allocation<'tcx> { +impl<'tcx> QueryDescription<'tcx> for queries::const_to_allocation<'tcx> { fn describe(_tcx: TyCtxt, val: &'tcx ty::Const<'tcx>) -> String { - format!("converting value `{:?}` to an allocation", val) + format!("converting constant `{:?}` to an allocation", val) } } diff --git a/src/librustc/ty/query/mod.rs b/src/librustc/ty/query/mod.rs index ef22ebef9d7d4..c1372293a1b68 100644 --- a/src/librustc/ty/query/mod.rs +++ b/src/librustc/ty/query/mod.rs @@ -287,8 +287,8 @@ define_queries! { <'tcx> [] fn const_eval: const_eval_dep_node(ty::ParamEnvAnd<'tcx, GlobalId<'tcx>>) -> ConstEvalResult<'tcx>, - /// Converts a constant value to an constant allocation - [] fn const_value_to_allocation: const_value_to_allocation( + /// Converts a constant value to a constant allocation + [] fn const_to_allocation: const_to_allocation( &'tcx ty::Const<'tcx> ) -> &'tcx Allocation, }, @@ -706,10 +706,10 @@ fn erase_regions_ty<'tcx>(ty: Ty<'tcx>) -> DepConstructor<'tcx> { DepConstructor::EraseRegionsTy { ty } } -fn const_value_to_allocation<'tcx>( +fn const_to_allocation<'tcx>( val: &'tcx ty::Const<'tcx>, ) -> DepConstructor<'tcx> { - DepConstructor::ConstValueToAllocation { val } + DepConstructor::ConstToAllocation { val } } fn type_param_predicates<'tcx>((item_id, param_id): (DefId, DefId)) -> DepConstructor<'tcx> { diff --git a/src/librustc/ty/query/plumbing.rs b/src/librustc/ty/query/plumbing.rs index bf721ddd13fc6..8473e4af40e3b 100644 --- a/src/librustc/ty/query/plumbing.rs +++ b/src/librustc/ty/query/plumbing.rs @@ -1062,7 +1062,7 @@ pub fn force_from_dep_node<'a, 'gcx, 'lcx>(tcx: TyCtxt<'a, 'gcx, 'lcx>, DepKind::FulfillObligation | DepKind::VtableMethods | DepKind::EraseRegionsTy | - DepKind::ConstValueToAllocation | + DepKind::ConstToAllocation | DepKind::NormalizeProjectionTy | DepKind::NormalizeTyAfterErasingRegions | DepKind::ImpliedOutlivesBounds | diff --git a/src/librustc/ty/sty.rs b/src/librustc/ty/sty.rs index 226388c9efa48..7c0857cd2f998 100644 --- a/src/librustc/ty/sty.rs +++ b/src/librustc/ty/sty.rs @@ -20,7 +20,7 @@ use ty::subst::{Substs, Subst, Kind, UnpackedKind}; use ty::{self, AdtDef, TypeFlags, Ty, TyCtxt, TypeFoldable}; use ty::{Slice, TyS, ParamEnvAnd, ParamEnv}; use util::captures::Captures; -use mir::interpret::{Scalar, Pointer, Value}; +use mir::interpret::{Scalar, Pointer}; use std::iter; use std::cmp::Ordering; @@ -1973,17 +1973,12 @@ impl<'tcx> Const<'tcx> { } let ty = tcx.lift_to_global(&ty).unwrap(); let size = tcx.layout_of(ty).ok()?.size; - self.val.to_bits(size) + self.val.try_to_bits(size) } #[inline] pub fn to_ptr(&self) -> Option { - self.val.to_ptr() - } - - #[inline] - pub fn to_byval_value(&self) -> Option { - self.val.to_byval_value() + self.val.try_to_ptr() } #[inline] @@ -1995,7 +1990,7 @@ impl<'tcx> Const<'tcx> { assert_eq!(self.ty, ty.value); let ty = tcx.lift_to_global(&ty).unwrap(); let size = tcx.layout_of(ty).ok()?.size; - self.val.to_bits(size) + self.val.try_to_bits(size) } #[inline] diff --git a/src/librustc_codegen_llvm/debuginfo/metadata.rs b/src/librustc_codegen_llvm/debuginfo/metadata.rs index cac2ae0302e3e..c79a1a4bd04c6 100644 --- a/src/librustc_codegen_llvm/debuginfo/metadata.rs +++ b/src/librustc_codegen_llvm/debuginfo/metadata.rs @@ -1359,6 +1359,7 @@ fn describe_enum_variant( // If this is not a univariant enum, there is also the discriminant field. let (discr_offset, discr_arg) = match discriminant_info { RegularDiscriminant(_) => { + // We have the layout of an enum variant, we need the layout of the outer enum let enum_layout = cx.layout_of(layout.ty); (Some(enum_layout.fields.offset(0)), Some(("RUST$ENUM$DISR".to_string(), enum_layout.field(cx, 0).ty))) diff --git a/src/librustc_codegen_llvm/mir/constant.rs b/src/librustc_codegen_llvm/mir/constant.rs index 47fd92682fdc4..6774ce818c1f6 100644 --- a/src/librustc_codegen_llvm/mir/constant.rs +++ b/src/librustc_codegen_llvm/mir/constant.rs @@ -9,8 +9,8 @@ // except according to those terms. use llvm; -use rustc::mir::interpret::ConstEvalErr; -use rustc_mir::interpret::{read_target_uint, const_val_field}; +use rustc::mir::interpret::{ConstEvalErr, read_target_uint}; +use rustc_mir::interpret::{const_field}; use rustc::hir::def_id::DefId; use rustc::mir; use rustc_data_structures::indexed_vec::Idx; @@ -186,7 +186,7 @@ impl FunctionCx<'a, 'll, 'tcx> { ref other => bug!("invalid simd shuffle type: {}", other), }; let values: Result, Lrc<_>> = (0..fields).map(|field| { - let field = const_val_field( + let field = const_field( bx.tcx(), ty::ParamEnv::reveal_all(), self.instance, diff --git a/src/librustc_lint/builtin.rs b/src/librustc_lint/builtin.rs index 038d53a35478d..681e919580567 100644 --- a/src/librustc_lint/builtin.rs +++ b/src/librustc_lint/builtin.rs @@ -1615,21 +1615,20 @@ fn validate_const<'a, 'tcx>( ) { let mut ecx = ::rustc_mir::interpret::mk_eval_cx(tcx, gid.instance, param_env).unwrap(); let result = (|| { - let val = ecx.const_to_value(constant.val)?; use rustc_target::abi::LayoutOf; + use rustc_mir::interpret::OpTy; + + let op = ecx.const_value_to_op(constant.val)?; let layout = ecx.layout_of(constant.ty)?; - let place = ecx.allocate_place_for_value(val, layout, None)?; - let ptr = place.to_ptr()?; - let mut todo = vec![(ptr, layout.ty, String::new())]; + let place = ecx.allocate_op(OpTy { op, layout })?.into(); + + let mut todo = vec![(place, Vec::new())]; let mut seen = FxHashSet(); - seen.insert((ptr, layout.ty)); - while let Some((ptr, ty, path)) = todo.pop() { - let layout = ecx.layout_of(ty)?; - ecx.validate_ptr_target( - ptr, - layout.align, - layout, - path, + seen.insert(place); + while let Some((place, mut path)) = todo.pop() { + ecx.validate_mplace( + place, + &mut path, &mut seen, &mut todo, )?; diff --git a/src/librustc_mir/hair/cx/mod.rs b/src/librustc_mir/hair/cx/mod.rs index 70148fc917604..79483e454ecea 100644 --- a/src/librustc_mir/hair/cx/mod.rs +++ b/src/librustc_mir/hair/cx/mod.rs @@ -167,8 +167,7 @@ impl<'a, 'gcx, 'tcx> Cx<'a, 'gcx, 'tcx> { LitKind::Str(ref s, _) => { let s = s.as_str(); let id = self.tcx.allocate_bytes(s.as_bytes()); - let value = Scalar::Ptr(id.into()).to_value_with_len(s.len() as u64, self.tcx); - ConstValue::from_byval_value(value).unwrap() + ConstValue::new_slice(Scalar::Ptr(id.into()), s.len() as u64, self.tcx) }, LitKind::ByteStr(ref data) => { let id = self.tcx.allocate_bytes(data); diff --git a/src/librustc_mir/hair/pattern/mod.rs b/src/librustc_mir/hair/pattern/mod.rs index 7ec9373130419..16d6a08981abc 100644 --- a/src/librustc_mir/hair/pattern/mod.rs +++ b/src/librustc_mir/hair/pattern/mod.rs @@ -16,10 +16,10 @@ mod check_match; pub use self::check_match::check_crate; pub(crate) use self::check_match::check_match; -use interpret::{const_val_field, const_variant_index, self}; +use interpret::{const_field, const_variant_index}; use rustc::mir::{fmt_const_val, Field, BorrowKind, Mutability}; -use rustc::mir::interpret::{Scalar, GlobalId, ConstValue}; +use rustc::mir::interpret::{Scalar, GlobalId, ConstValue, sign_extend}; use rustc::ty::{self, TyCtxt, AdtDef, Ty, Region}; use rustc::ty::subst::{Substs, Kind}; use rustc::hir::{self, PatKind, RangeEnd}; @@ -795,7 +795,7 @@ impl<'a, 'tcx> PatternContext<'a, 'tcx> { debug!("const_to_pat: cv={:#?}", cv); let adt_subpattern = |i, variant_opt| { let field = Field::new(i); - let val = const_val_field( + let val = const_field( self.tcx, self.param_env, instance, variant_opt, field, cv, ).expect("field access failed"); @@ -1085,8 +1085,9 @@ pub fn compare_const_vals<'a, 'tcx>( }, ty::TyInt(_) => { let layout = tcx.layout_of(ty).ok()?; - let a = interpret::sign_extend(a, layout); - let b = interpret::sign_extend(b, layout); + assert!(layout.abi.is_signed()); + let a = sign_extend(a, layout.size); + let b = sign_extend(b, layout.size); Some((a as i128).cmp(&(b as i128))) }, _ => Some(a.cmp(&b)), @@ -1106,8 +1107,8 @@ pub fn compare_const_vals<'a, 'tcx>( len_b, ), ) if ptr_a.offset.bytes() == 0 && ptr_b.offset.bytes() == 0 => { - let len_a = len_a.unwrap_or_err().ok(); - let len_b = len_b.unwrap_or_err().ok(); + let len_a = len_a.not_undef().ok(); + let len_b = len_b.not_undef().ok(); if len_a.is_none() || len_b.is_none() { tcx.sess.struct_err("str slice len is undef").delay_as_bug(); } @@ -1153,8 +1154,7 @@ fn lit_to_const<'a, 'tcx>(lit: &'tcx ast::LitKind, LitKind::Str(ref s, _) => { let s = s.as_str(); let id = tcx.allocate_bytes(s.as_bytes()); - let value = Scalar::Ptr(id.into()).to_value_with_len(s.len() as u64, tcx); - ConstValue::from_byval_value(value).unwrap() + ConstValue::new_slice(Scalar::Ptr(id.into()), s.len() as u64, tcx) }, LitKind::ByteStr(ref data) => { let id = tcx.allocate_bytes(data); diff --git a/src/librustc_mir/interpret/cast.rs b/src/librustc_mir/interpret/cast.rs index 4e705254331a2..b4d36afa0f80d 100644 --- a/src/librustc_mir/interpret/cast.rs +++ b/src/librustc_mir/interpret/cast.rs @@ -1,87 +1,85 @@ -use rustc::ty::{self, Ty}; -use rustc::ty::layout::{self, LayoutOf, TyLayout}; +use rustc::ty::{self, Ty, TypeAndMut}; +use rustc::ty::layout::{self, TyLayout, Size}; use syntax::ast::{FloatTy, IntTy, UintTy}; use rustc_apfloat::ieee::{Single, Double}; -use super::{EvalContext, Machine}; -use rustc::mir::interpret::{Scalar, EvalResult, Pointer, PointerArithmetic, Value, EvalErrorKind}; +use rustc::mir::interpret::{ + Scalar, EvalResult, Pointer, PointerArithmetic, EvalErrorKind, + truncate, sign_extend +}; use rustc::mir::CastKind; use rustc_apfloat::Float; -use interpret::eval_context::ValTy; -use interpret::Place; + +use super::{EvalContext, Machine, PlaceTy, OpTy, Value}; impl<'a, 'mir, 'tcx, M: Machine<'mir, 'tcx>> EvalContext<'a, 'mir, 'tcx, M> { + fn type_is_fat_ptr(&self, ty: Ty<'tcx>) -> bool { + match ty.sty { + ty::TyRawPtr(ty::TypeAndMut { ty, .. }) | + ty::TyRef(_, ty, _) => !self.type_is_sized(ty), + ty::TyAdt(def, _) if def.is_box() => !self.type_is_sized(ty.boxed_ty()), + _ => false, + } + } + crate fn cast( &mut self, - src: ValTy<'tcx>, + src: OpTy<'tcx>, kind: CastKind, - dest_ty: Ty<'tcx>, - dest: Place, + dest: PlaceTy<'tcx>, ) -> EvalResult<'tcx> { - let src_layout = self.layout_of(src.ty)?; - let dst_layout = self.layout_of(dest_ty)?; + let src_layout = src.layout; + let dst_layout = dest.layout; use rustc::mir::CastKind::*; match kind { Unsize => { - self.unsize_into(src.value, src_layout, dest, dst_layout)?; + self.unsize_into(src, dest)?; } Misc => { - if self.type_is_fat_ptr(src.ty) { - match (src.value, self.type_is_fat_ptr(dest_ty)) { - (Value::ByRef { .. }, _) | + let src = self.read_value(src)?; + if self.type_is_fat_ptr(src_layout.ty) { + match (src.value, self.type_is_fat_ptr(dest.layout.ty)) { // pointers to extern types (Value::Scalar(_),_) | // slices and trait objects to other slices/trait objects (Value::ScalarPair(..), true) => { - let valty = ValTy { - value: src.value, - ty: dest_ty, - }; - self.write_value(valty, dest)?; + // No change to value + self.write_value(src.value, dest)?; } // slices and trait objects to thin pointers (dropping the metadata) (Value::ScalarPair(data, _), false) => { - let valty = ValTy { - value: Value::Scalar(data), - ty: dest_ty, - }; - self.write_value(valty, dest)?; + self.write_scalar(data, dest)?; } } } else { - let src_layout = self.layout_of(src.ty)?; match src_layout.variants { layout::Variants::Single { index } => { - if let Some(def) = src.ty.ty_adt_def() { + if let Some(def) = src_layout.ty.ty_adt_def() { let discr_val = def .discriminant_for_variant(*self.tcx, index) .val; return self.write_scalar( - dest, Scalar::Bits { bits: discr_val, size: dst_layout.size.bytes() as u8, }, - dest_ty); + dest); } } layout::Variants::Tagged { .. } | layout::Variants::NicheFilling { .. } => {}, } - let src_val = self.value_to_scalar(src)?; - let dest_val = self.cast_scalar(src_val, src_layout, dst_layout)?; - let valty = ValTy { - value: Value::Scalar(dest_val.into()), - ty: dest_ty, - }; - self.write_value(valty, dest)?; + let src = src.to_scalar()?; + let dest_val = self.cast_scalar(src, src_layout, dest.layout)?; + self.write_scalar(dest_val, dest)?; } } ReifyFnPointer => { - match src.ty.sty { + // The src operand does not matter, just its type + match src_layout.ty.sty { ty::TyFnDef(def_id, substs) => { if self.tcx.has_attr(def_id, "rustc_args_required_const") { bug!("reifying a fn ptr that requires \ @@ -94,29 +92,26 @@ impl<'a, 'mir, 'tcx, M: Machine<'mir, 'tcx>> EvalContext<'a, 'mir, 'tcx, M> { substs, ).ok_or_else(|| EvalErrorKind::TooGeneric.into()); let fn_ptr = self.memory.create_fn_alloc(instance?); - let valty = ValTy { - value: Value::Scalar(Scalar::Ptr(fn_ptr.into()).into()), - ty: dest_ty, - }; - self.write_value(valty, dest)?; + self.write_scalar(Scalar::Ptr(fn_ptr.into()), dest)?; } ref other => bug!("reify fn pointer on {:?}", other), } } UnsafeFnPointer => { - match dest_ty.sty { + let src = self.read_value(src)?; + match dest.layout.ty.sty { ty::TyFnPtr(_) => { - let mut src = src; - src.ty = dest_ty; - self.write_value(src, dest)?; + // No change to value + self.write_value(*src, dest)?; } ref other => bug!("fn to unsafe fn cast on {:?}", other), } } ClosureFnPointer => { - match src.ty.sty { + // The src operand does not matter, just its type + match src_layout.ty.sty { ty::TyClosure(def_id, substs) => { let substs = self.tcx.subst_and_normalize_erasing_regions( self.substs(), @@ -130,11 +125,8 @@ impl<'a, 'mir, 'tcx, M: Machine<'mir, 'tcx>> EvalContext<'a, 'mir, 'tcx, M> { ty::ClosureKind::FnOnce, ); let fn_ptr = self.memory.create_fn_alloc(instance); - let valty = ValTy { - value: Value::Scalar(Scalar::Ptr(fn_ptr.into()).into()), - ty: dest_ty, - }; - self.write_value(valty, dest)?; + let val = Value::Scalar(Scalar::Ptr(fn_ptr.into()).into()); + self.write_value(val, dest)?; } ref other => bug!("closure fn pointer on {:?}", other), } @@ -155,11 +147,26 @@ impl<'a, 'mir, 'tcx, M: Machine<'mir, 'tcx>> EvalContext<'a, 'mir, 'tcx, M> { match val { Scalar::Ptr(ptr) => self.cast_from_ptr(ptr, dest_layout.ty), Scalar::Bits { bits, size } => { - assert_eq!(size as u64, src_layout.size.bytes()); - match src_layout.ty.sty { - TyFloat(fty) => self.cast_from_float(bits, fty, dest_layout.ty), - _ => self.cast_from_int(bits, src_layout, dest_layout), + debug_assert_eq!(size as u64, src_layout.size.bytes()); + debug_assert_eq!(truncate(bits, Size::from_bytes(size.into())), bits, + "Unexpected value of size {} before casting", size); + + let res = match src_layout.ty.sty { + TyFloat(fty) => self.cast_from_float(bits, fty, dest_layout.ty)?, + _ => self.cast_from_int(bits, src_layout, dest_layout)?, + }; + + // Sanity check + match res { + Scalar::Ptr(_) => bug!("Fabricated a ptr value from an int...?"), + Scalar::Bits { bits, size } => { + debug_assert_eq!(size as u64, dest_layout.size.bytes()); + debug_assert_eq!(truncate(bits, Size::from_bytes(size.into())), bits, + "Unexpected value of size {} after casting", size); + } } + // Done + Ok(res) } } } @@ -229,30 +236,31 @@ impl<'a, 'mir, 'tcx, M: Machine<'mir, 'tcx>> EvalContext<'a, 'mir, 'tcx, M> { // float -> uint TyUint(t) => { let width = t.bit_width().unwrap_or(self.memory.pointer_size().bits() as usize); - match fty { - FloatTy::F32 => Ok(Scalar::Bits { - bits: Single::from_bits(bits).to_u128(width).value, - size: (width / 8) as u8, - }), - FloatTy::F64 => Ok(Scalar::Bits { - bits: Double::from_bits(bits).to_u128(width).value, - size: (width / 8) as u8, - }), - } + let v = match fty { + FloatTy::F32 => Single::from_bits(bits).to_u128(width).value, + FloatTy::F64 => Double::from_bits(bits).to_u128(width).value, + }; + // This should already fit the bit width + Ok(Scalar::Bits { + bits: v, + size: (width / 8) as u8, + }) }, // float -> int TyInt(t) => { let width = t.bit_width().unwrap_or(self.memory.pointer_size().bits() as usize); - match fty { - FloatTy::F32 => Ok(Scalar::Bits { - bits: Single::from_bits(bits).to_i128(width).value as u128, - size: (width / 8) as u8, - }), - FloatTy::F64 => Ok(Scalar::Bits { - bits: Double::from_bits(bits).to_i128(width).value as u128, - size: (width / 8) as u8, - }), - } + let v = match fty { + FloatTy::F32 => Single::from_bits(bits).to_i128(width).value, + FloatTy::F64 => Double::from_bits(bits).to_i128(width).value, + }; + // We got an i128, but we may need something smaller. We have to truncate ourselves. + let truncated = truncate(v as u128, Size::from_bits(width as u64)); + assert_eq!(sign_extend(truncated, Size::from_bits(width as u64)) as i128, v, + "truncating and extending changed the value?!?"); + Ok(Scalar::Bits { + bits: truncated, + size: (width / 8) as u8, + }) }, // f64 -> f32 TyFloat(FloatTy::F32) if fty == FloatTy::F64 => { @@ -292,4 +300,111 @@ impl<'a, 'mir, 'tcx, M: Machine<'mir, 'tcx>> EvalContext<'a, 'mir, 'tcx, M> { _ => err!(Unimplemented(format!("ptr to {:?} cast", ty))), } } + + fn unsize_into_ptr( + &mut self, + src: OpTy<'tcx>, + dest: PlaceTy<'tcx>, + // The pointee types + sty: Ty<'tcx>, + dty: Ty<'tcx>, + ) -> EvalResult<'tcx> { + // A -> A conversion + let (src_pointee_ty, dest_pointee_ty) = self.tcx.struct_lockstep_tails(sty, dty); + + match (&src_pointee_ty.sty, &dest_pointee_ty.sty) { + (&ty::TyArray(_, length), &ty::TySlice(_)) => { + let ptr = self.read_value(src)?.to_scalar_ptr()?; + // u64 cast is from usize to u64, which is always good + let val = Value::new_slice(ptr, length.unwrap_usize(self.tcx.tcx), self.tcx.tcx); + self.write_value(val, dest) + } + (&ty::TyDynamic(..), &ty::TyDynamic(..)) => { + // For now, upcasts are limited to changes in marker + // traits, and hence never actually require an actual + // change to the vtable. + self.copy_op(src, dest) + } + (_, &ty::TyDynamic(ref data, _)) => { + // Initial cast from sized to dyn trait + let trait_ref = data.principal().unwrap().with_self_ty( + *self.tcx, + src_pointee_ty, + ); + let trait_ref = self.tcx.erase_regions(&trait_ref); + let vtable = self.get_vtable(src_pointee_ty, trait_ref)?; + let ptr = self.read_value(src)?.to_scalar_ptr()?; + let val = Value::new_dyn_trait(ptr, vtable); + self.write_value(val, dest) + } + + _ => bug!("invalid unsizing {:?} -> {:?}", src.layout.ty, dest.layout.ty), + } + } + + fn unsize_into( + &mut self, + src: OpTy<'tcx>, + dest: PlaceTy<'tcx>, + ) -> EvalResult<'tcx> { + match (&src.layout.ty.sty, &dest.layout.ty.sty) { + (&ty::TyRef(_, s, _), &ty::TyRef(_, d, _)) | + (&ty::TyRef(_, s, _), &ty::TyRawPtr(TypeAndMut { ty: d, .. })) | + (&ty::TyRawPtr(TypeAndMut { ty: s, .. }), + &ty::TyRawPtr(TypeAndMut { ty: d, .. })) => { + self.unsize_into_ptr(src, dest, s, d) + } + (&ty::TyAdt(def_a, _), &ty::TyAdt(def_b, _)) => { + assert_eq!(def_a, def_b); + if def_a.is_box() || def_b.is_box() { + if !def_a.is_box() || !def_b.is_box() { + bug!("invalid unsizing between {:?} -> {:?}", src.layout, dest.layout); + } + return self.unsize_into_ptr( + src, + dest, + src.layout.ty.boxed_ty(), + dest.layout.ty.boxed_ty(), + ); + } + + // unsizing of generic struct with pointer fields + // Example: `Arc` -> `Arc` + // here we need to increase the size of every &T thin ptr field to a fat ptr + for i in 0..src.layout.fields.count() { + let dst_field = self.place_field(dest, i as u64)?; + if dst_field.layout.is_zst() { + continue; + } + let src_field = match src.try_as_mplace() { + Ok(mplace) => { + let src_field = self.mplace_field(mplace, i as u64)?; + src_field.into() + } + Err(..) => { + let src_field_layout = src.layout.field(&self, i)?; + // this must be a field covering the entire thing + assert_eq!(src.layout.fields.offset(i).bytes(), 0); + assert_eq!(src_field_layout.size, src.layout.size); + // just sawp out the layout + OpTy { op: src.op, layout: src_field_layout } + } + }; + if src_field.layout.ty == dst_field.layout.ty { + self.copy_op(src_field, dst_field)?; + } else { + self.unsize_into(src_field, dst_field)?; + } + } + Ok(()) + } + _ => { + bug!( + "unsize_into: invalid conversion: {:?} -> {:?}", + src.layout, + dest.layout + ) + } + } + } } diff --git a/src/librustc_mir/interpret/const_eval.rs b/src/librustc_mir/interpret/const_eval.rs index dd298d9becbed..8aba49531239e 100644 --- a/src/librustc_mir/interpret/const_eval.rs +++ b/src/librustc_mir/interpret/const_eval.rs @@ -2,12 +2,12 @@ use std::fmt; use std::error::Error; use rustc::hir; -use rustc::mir::interpret::{ConstEvalErr, ScalarMaybeUndef}; +use rustc::mir::interpret::ConstEvalErr; use rustc::mir; -use rustc::ty::{self, TyCtxt, Ty, Instance}; -use rustc::ty::layout::{self, LayoutOf, Primitive, TyLayout}; +use rustc::ty::{self, TyCtxt, Instance}; +use rustc::ty::layout::{LayoutOf, Primitive, TyLayout}; use rustc::ty::subst::Subst; -use rustc_data_structures::indexed_vec::IndexVec; +use rustc_data_structures::indexed_vec::{IndexVec, Idx}; use syntax::ast::Mutability; use syntax::source_map::Span; @@ -15,9 +15,12 @@ use syntax::source_map::DUMMY_SP; use rustc::mir::interpret::{ EvalResult, EvalError, EvalErrorKind, GlobalId, - Value, Scalar, AllocId, Allocation, ConstValue, + Scalar, AllocId, Allocation, ConstValue, +}; +use super::{ + Place, PlaceExtra, PlaceTy, MemPlace, OpTy, Operand, Value, + EvalContext, StackPopCleanup, Memory, MemoryKind }; -use super::{Place, EvalContext, StackPopCleanup, ValTy, Memory, MemoryKind}; pub fn mk_borrowck_eval_cx<'a, 'mir, 'tcx>( tcx: TyCtxt<'a, 'tcx, 'tcx>, @@ -35,7 +38,7 @@ pub fn mk_borrowck_eval_cx<'a, 'mir, 'tcx>( instance, span, mir, - return_place: Place::undef(), + return_place: Place::null(tcx), return_to_block: StackPopCleanup::None, stmt: 0, }); @@ -56,7 +59,7 @@ pub fn mk_eval_cx<'a, 'tcx>( instance, mir.span, mir, - Place::undef(), + Place::null(tcx), StackPopCleanup::None, )?; Ok(ecx) @@ -67,39 +70,51 @@ pub fn eval_promoted<'a, 'mir, 'tcx>( cid: GlobalId<'tcx>, mir: &'mir mir::Mir<'tcx>, param_env: ty::ParamEnv<'tcx>, -) -> EvalResult<'tcx, (Value, Scalar, TyLayout<'tcx>)> { +) -> EvalResult<'tcx, OpTy<'tcx>> { ecx.with_fresh_body(|ecx| { eval_body_using_ecx(ecx, cid, Some(mir), param_env) }) } -pub fn value_to_const_value<'tcx>( +pub fn op_to_const<'tcx>( ecx: &EvalContext<'_, '_, 'tcx, CompileTimeEvaluator>, - val: Value, - layout: TyLayout<'tcx>, + op: OpTy<'tcx>, + normalize: bool, ) -> EvalResult<'tcx, &'tcx ty::Const<'tcx>> { - match (val, &layout.abi) { - (Value::Scalar(ScalarMaybeUndef::Scalar(Scalar::Bits { size: 0, ..})), _) if layout.is_zst() => {}, - (Value::ByRef(..), _) | - (Value::Scalar(_), &layout::Abi::Scalar(_)) | - (Value::ScalarPair(..), &layout::Abi::ScalarPair(..)) => {}, - _ => bug!("bad value/layout combo: {:#?}, {:#?}", val, layout), - } - let val = match val { - Value::Scalar(val) => ConstValue::Scalar(val.unwrap_or_err()?), - Value::ScalarPair(a, b) => ConstValue::ScalarPair(a.unwrap_or_err()?, b), - Value::ByRef(ptr, align) => { - let ptr = ptr.to_ptr().unwrap(); + let normalized_op = if normalize { + ecx.try_read_value(op)? + } else { + match op.op { + Operand::Indirect(mplace) => Err(mplace), + Operand::Immediate(val) => Ok(val) + } + }; + let val = match normalized_op { + Err(MemPlace { ptr, align, extra }) => { + // extract alloc-offset pair + assert_eq!(extra, PlaceExtra::None); + let ptr = ptr.to_ptr()?; let alloc = ecx.memory.get(ptr.alloc_id)?; assert!(alloc.align.abi() >= align.abi()); - assert!(alloc.bytes.len() as u64 - ptr.offset.bytes() >= layout.size.bytes()); + assert!(alloc.bytes.len() as u64 - ptr.offset.bytes() >= op.layout.size.bytes()); let mut alloc = alloc.clone(); alloc.align = align; let alloc = ecx.tcx.intern_const_alloc(alloc); ConstValue::ByRef(alloc, ptr.offset) - } + }, + Ok(Value::Scalar(x)) => + ConstValue::Scalar(x.not_undef()?), + Ok(Value::ScalarPair(a, b)) => + ConstValue::ScalarPair(a.not_undef()?, b), }; - Ok(ty::Const::from_const_value(ecx.tcx.tcx, val, layout.ty)) + Ok(ty::Const::from_const_value(ecx.tcx.tcx, val, op.layout.ty)) +} +pub fn const_to_op<'tcx>( + ecx: &mut EvalContext<'_, '_, 'tcx, CompileTimeEvaluator>, + cnst: &'tcx ty::Const<'tcx>, +) -> EvalResult<'tcx, OpTy<'tcx>> { + let op = ecx.const_value_to_op(cnst.val)?; + Ok(OpTy { op, layout: ecx.layout_of(cnst.ty)? }) } fn eval_body_and_ecx<'a, 'mir, 'tcx>( @@ -107,7 +122,7 @@ fn eval_body_and_ecx<'a, 'mir, 'tcx>( cid: GlobalId<'tcx>, mir: Option<&'mir mir::Mir<'tcx>>, param_env: ty::ParamEnv<'tcx>, -) -> (EvalResult<'tcx, (Value, Scalar, TyLayout<'tcx>)>, EvalContext<'a, 'mir, 'tcx, CompileTimeEvaluator>) { +) -> (EvalResult<'tcx, OpTy<'tcx>>, EvalContext<'a, 'mir, 'tcx, CompileTimeEvaluator>) { debug!("eval_body_and_ecx: {:?}, {:?}", cid, param_env); // we start out with the best span we have // and try improving it down the road when more information is available @@ -118,12 +133,13 @@ fn eval_body_and_ecx<'a, 'mir, 'tcx>( (r, ecx) } +// Returns a pointer to where the result lives fn eval_body_using_ecx<'a, 'mir, 'tcx>( ecx: &mut EvalContext<'a, 'mir, 'tcx, CompileTimeEvaluator>, cid: GlobalId<'tcx>, mir: Option<&'mir mir::Mir<'tcx>>, param_env: ty::ParamEnv<'tcx>, -) -> EvalResult<'tcx, (Value, Scalar, TyLayout<'tcx>)> { +) -> EvalResult<'tcx, OpTy<'tcx>> { debug!("eval_body: {:?}, {:?}", cid, param_env); let tcx = ecx.tcx.tcx; let mut mir = match mir { @@ -135,11 +151,7 @@ fn eval_body_using_ecx<'a, 'mir, 'tcx>( } let layout = ecx.layout_of(mir.return_ty().subst(tcx, cid.instance.substs))?; assert!(!layout.is_unsized()); - let ptr = ecx.memory.allocate( - layout.size, - layout.align, - MemoryKind::Stack, - )?; + let ret = ecx.allocate(layout, MemoryKind::Stack)?; let internally_mutable = !layout.ty.is_freeze(tcx, param_env, mir.span); let is_static = tcx.is_static(cid.instance.def_id()); let mutability = if is_static == Some(hir::Mutability::MutMutable) || internally_mutable { @@ -156,19 +168,14 @@ fn eval_body_using_ecx<'a, 'mir, 'tcx>( cid.instance, mir.span, mir, - Place::from_ptr(ptr, layout.align), + Place::Ptr(*ret), cleanup, )?; + // The main interpreter loop. while ecx.step()? {} - let ptr = ptr.into(); - // always try to read the value and report errors - let value = match ecx.try_read_value(ptr, layout.align, layout.ty)? { - Some(val) if is_static.is_none() && cid.promoted.is_none() => val, - // point at the allocation - _ => Value::ByRef(ptr, layout.align), - }; - Ok((value, ptr, layout)) + + Ok(ret.into()) } #[derive(Debug, Clone, Eq, PartialEq, Hash)] @@ -222,14 +229,14 @@ impl<'mir, 'tcx> super::Machine<'mir, 'tcx> for CompileTimeEvaluator { fn eval_fn_call<'a>( ecx: &mut EvalContext<'a, 'mir, 'tcx, Self>, instance: ty::Instance<'tcx>, - destination: Option<(Place, mir::BasicBlock)>, - args: &[ValTy<'tcx>], + destination: Option<(PlaceTy<'tcx>, mir::BasicBlock)>, + args: &[OpTy<'tcx>], span: Span, - sig: ty::FnSig<'tcx>, ) -> EvalResult<'tcx, bool> { debug!("eval_fn_call: {:?}", instance); if !ecx.tcx.is_const_fn(instance.def_id()) { let def_id = instance.def_id(); + // Some fn calls are actually BinOp intrinsics let (op, oflo) = if let Some(op) = ecx.tcx.is_binop_lang_item(def_id) { op } else { @@ -238,11 +245,12 @@ impl<'mir, 'tcx> super::Machine<'mir, 'tcx> for CompileTimeEvaluator { ); }; let (dest, bb) = destination.expect("128 lowerings can't diverge"); - let dest_ty = sig.output(); + let l = ecx.read_value(args[0])?; + let r = ecx.read_value(args[1])?; if oflo { - ecx.intrinsic_with_overflow(op, args[0], args[1], dest, dest_ty)?; + ecx.binop_with_overflow(op, l, r, dest)?; } else { - ecx.intrinsic_overflowing(op, args[0], args[1], dest, dest_ty)?; + ecx.binop_ignore_overflow(op, l, r, dest)?; } ecx.goto_block(bb); return Ok(true); @@ -260,8 +268,8 @@ impl<'mir, 'tcx> super::Machine<'mir, 'tcx> for CompileTimeEvaluator { } }; let (return_place, return_to_block) = match destination { - Some((place, block)) => (place, StackPopCleanup::Goto(block)), - None => (Place::undef(), StackPopCleanup::None), + Some((place, block)) => (*place, StackPopCleanup::Goto(block)), + None => (Place::null(&ecx), StackPopCleanup::None), }; ecx.push_stack_frame( @@ -279,9 +287,8 @@ impl<'mir, 'tcx> super::Machine<'mir, 'tcx> for CompileTimeEvaluator { fn call_intrinsic<'a>( ecx: &mut EvalContext<'a, 'mir, 'tcx, Self>, instance: ty::Instance<'tcx>, - args: &[ValTy<'tcx>], - dest: Place, - dest_layout: layout::TyLayout<'tcx>, + args: &[OpTy<'tcx>], + dest: PlaceTy<'tcx>, target: mir::BasicBlock, ) -> EvalResult<'tcx> { let substs = instance.substs; @@ -293,9 +300,9 @@ impl<'mir, 'tcx> super::Machine<'mir, 'tcx> for CompileTimeEvaluator { let elem_align = ecx.layout_of(elem_ty)?.align.abi(); let align_val = Scalar::Bits { bits: elem_align as u128, - size: dest_layout.size.bytes() as u8, + size: dest.layout.size.bytes() as u8, }; - ecx.write_scalar(dest, align_val, dest_layout.ty)?; + ecx.write_scalar(align_val, dest)?; } "size_of" => { @@ -303,9 +310,9 @@ impl<'mir, 'tcx> super::Machine<'mir, 'tcx> for CompileTimeEvaluator { let size = ecx.layout_of(ty)?.size.bytes() as u128; let size_val = Scalar::Bits { bits: size, - size: dest_layout.size.bytes() as u8, + size: dest.layout.size.bytes() as u8, }; - ecx.write_scalar(dest, size_val, dest_layout.ty)?; + ecx.write_scalar(size_val, dest)?; } "type_id" => { @@ -313,14 +320,14 @@ impl<'mir, 'tcx> super::Machine<'mir, 'tcx> for CompileTimeEvaluator { let type_id = ecx.tcx.type_id_hash(ty) as u128; let id_val = Scalar::Bits { bits: type_id, - size: dest_layout.size.bytes() as u8, + size: dest.layout.size.bytes() as u8, }; - ecx.write_scalar(dest, id_val, dest_layout.ty)?; + ecx.write_scalar(id_val, dest)?; } "ctpop" | "cttz" | "cttz_nonzero" | "ctlz" | "ctlz_nonzero" | "bswap" => { let ty = substs.type_at(0); let layout_of = ecx.layout_of(ty)?; - let bits = ecx.value_to_scalar(args[0])?.to_bits(layout_of.size)?; + let bits = ecx.read_scalar(args[0])?.to_bits(layout_of.size)?; let kind = match layout_of.abi { ty::layout::Abi::Scalar(ref scalar) => scalar.value, _ => Err(::rustc::mir::interpret::EvalErrorKind::TypeNotPrimitive(ty))?, @@ -333,7 +340,7 @@ impl<'mir, 'tcx> super::Machine<'mir, 'tcx> for CompileTimeEvaluator { } else { numeric_intrinsic(intrinsic_name, bits, kind)? }; - ecx.write_scalar(dest, out_val, ty)?; + ecx.write_scalar(out_val, dest)?; } name => return Err( @@ -353,9 +360,9 @@ impl<'mir, 'tcx> super::Machine<'mir, 'tcx> for CompileTimeEvaluator { _ecx: &EvalContext<'a, 'mir, 'tcx, Self>, _bin_op: mir::BinOp, left: Scalar, - _left_ty: Ty<'tcx>, + _left_layout: TyLayout<'tcx>, right: Scalar, - _right_ty: Ty<'tcx>, + _right_layout: TyLayout<'tcx>, ) -> EvalResult<'tcx, Option<(Scalar, bool)>> { if left.is_bits() && right.is_bits() { Ok(None) @@ -387,8 +394,7 @@ impl<'mir, 'tcx> super::Machine<'mir, 'tcx> for CompileTimeEvaluator { fn box_alloc<'a>( _ecx: &mut EvalContext<'a, 'mir, 'tcx, Self>, - _ty: Ty<'tcx>, - _dest: Place, + _dest: PlaceTy<'tcx>, ) -> EvalResult<'tcx> { Err( ConstEvalError::NeedsRfc("heap allocations via `box` keyword".to_string()).into(), @@ -406,7 +412,8 @@ impl<'mir, 'tcx> super::Machine<'mir, 'tcx> for CompileTimeEvaluator { } } -pub fn const_val_field<'a, 'tcx>( +/// Project to a field of a (variant of a) const +pub fn const_field<'a, 'tcx>( tcx: TyCtxt<'a, 'tcx, 'tcx>, param_env: ty::ParamEnv<'tcx>, instance: ty::Instance<'tcx>, @@ -414,30 +421,21 @@ pub fn const_val_field<'a, 'tcx>( field: mir::Field, value: &'tcx ty::Const<'tcx>, ) -> ::rustc::mir::interpret::ConstEvalResult<'tcx> { - trace!("const_val_field: {:?}, {:?}, {:?}", instance, field, value); + trace!("const_field: {:?}, {:?}, {:?}", instance, field, value); let mut ecx = mk_eval_cx(tcx, instance, param_env).unwrap(); let result = (|| { - let ty = value.ty; - let value = ecx.const_to_value(value.val)?; - let layout = ecx.layout_of(ty)?; - let place = ecx.allocate_place_for_value(value, layout, variant)?; - let (place, layout) = ecx.place_field(place, field, layout)?; - let (ptr, align) = place.to_ptr_align(); - let mut new_value = Value::ByRef(ptr.unwrap_or_err()?, align); - new_value = ecx.try_read_by_ref(new_value, layout.ty)?; - use rustc_data_structures::indexed_vec::Idx; - match (value, new_value) { - (Value::Scalar(_), Value::ByRef(..)) | - (Value::ScalarPair(..), Value::ByRef(..)) | - (Value::Scalar(_), Value::ScalarPair(..)) => bug!( - "field {} of {:?} yielded {:?}", - field.index(), - value, - new_value, - ), - _ => {}, - } - value_to_const_value(&ecx, new_value, layout) + // get the operand again + let op = const_to_op(&mut ecx, value)?; + // downcast + let down = match variant { + None => op, + Some(variant) => ecx.operand_downcast(op, variant)? + }; + // then project + let field = ecx.operand_field(down, field.index() as u64)?; + // and finally move back to the const world, always normalizing because + // this is not called for statics. + op_to_const(&ecx, field, true) })(); result.map_err(|err| { let (trace, span) = ecx.generate_stacktrace(None); @@ -457,21 +455,11 @@ pub fn const_variant_index<'a, 'tcx>( ) -> EvalResult<'tcx, usize> { trace!("const_variant_index: {:?}, {:?}", instance, val); let mut ecx = mk_eval_cx(tcx, instance, param_env).unwrap(); - let value = ecx.const_to_value(val.val)?; - let layout = ecx.layout_of(val.ty)?; - let (ptr, align) = match value { - Value::ScalarPair(..) | Value::Scalar(_) => { - let ptr = ecx.memory.allocate(layout.size, layout.align, MemoryKind::Stack)?.into(); - ecx.write_value_to_ptr(value, ptr, layout.align, val.ty)?; - (ptr, layout.align) - }, - Value::ByRef(ptr, align) => (ptr, align), - }; - let place = Place::from_scalar_ptr(ptr.into(), align); - ecx.read_discriminant_as_variant_index(place, layout) + let op = const_to_op(&mut ecx, val)?; + ecx.read_discriminant_as_variant_index(op) } -pub fn const_value_to_allocation_provider<'a, 'tcx>( +pub fn const_to_allocation_provider<'a, 'tcx>( tcx: TyCtxt<'a, 'tcx, 'tcx>, val: &'tcx ty::Const<'tcx>, ) -> &'tcx Allocation { @@ -488,11 +476,11 @@ pub fn const_value_to_allocation_provider<'a, 'tcx>( ty::ParamEnv::reveal_all(), CompileTimeEvaluator, ()); - let value = ecx.const_to_value(val.val)?; - let layout = ecx.layout_of(val.ty)?; - let ptr = ecx.memory.allocate(layout.size, layout.align, MemoryKind::Stack)?; - ecx.write_value_to_ptr(value, ptr.into(), layout.align, val.ty)?; - let alloc = ecx.memory.get(ptr.alloc_id)?; + let op = const_to_op(&mut ecx, val)?; + // Make a new allocation, copy things there + let ptr = ecx.allocate(op.layout, MemoryKind::Stack)?; + ecx.copy_op(op, ptr.into())?; + let alloc = ecx.memory.get(ptr.to_ptr()?.alloc_id)?; Ok(tcx.intern_const_alloc(alloc.clone())) }; result().expect("unable to convert ConstValue to Allocation") @@ -534,11 +522,16 @@ pub fn const_eval_provider<'a, 'tcx>( }; let (res, ecx) = eval_body_and_ecx(tcx, cid, None, key.param_env); - res.and_then(|(mut val, _, layout)| { - if tcx.is_static(def_id).is_none() && cid.promoted.is_none() { - val = ecx.try_read_by_ref(val, layout.ty)?; + res.and_then(|op| { + let normalize = tcx.is_static(def_id).is_none() && cid.promoted.is_none(); + if !normalize { + // Sanity check: These must always be a MemPlace + match op.op { + Operand::Indirect(_) => { /* all is good */ }, + Operand::Immediate(_) => bug!("const eval gave us an Immediate"), + } } - value_to_const_value(&ecx, val, layout) + op_to_const(&ecx, op, normalize) }).map_err(|err| { let (trace, span) = ecx.generate_stacktrace(None); let err = ConstEvalErr { diff --git a/src/librustc_mir/interpret/eval_context.rs b/src/librustc_mir/interpret/eval_context.rs index 5517ed145546d..33420b6150bb7 100644 --- a/src/librustc_mir/interpret/eval_context.rs +++ b/src/librustc_mir/interpret/eval_context.rs @@ -6,49 +6,28 @@ use rustc::hir::def_id::DefId; use rustc::hir::def::Def; use rustc::hir::map::definitions::DefPathData; use rustc::mir; -use rustc::ty::layout::{self, Size, Align, HasDataLayout, IntegerExt, LayoutOf, TyLayout, Primitive}; +use rustc::ty::layout::{ + self, Size, Align, HasDataLayout, LayoutOf, TyLayout +}; use rustc::ty::subst::{Subst, Substs}; -use rustc::ty::{self, Ty, TyCtxt, TypeAndMut}; +use rustc::ty::{self, Ty, TyCtxt, TypeFoldable}; use rustc::ty::query::TyCtxtAt; use rustc_data_structures::fx::{FxHashSet, FxHasher}; -use rustc_data_structures::indexed_vec::{IndexVec, Idx}; +use rustc_data_structures::indexed_vec::IndexVec; use rustc::mir::interpret::{ - GlobalId, Value, Scalar, FrameInfo, AllocType, - EvalResult, EvalErrorKind, Pointer, ConstValue, + GlobalId, Scalar, FrameInfo, + EvalResult, EvalErrorKind, ScalarMaybeUndef, + truncate, sign_extend, }; use syntax::source_map::{self, Span}; use syntax::ast::Mutability; -use super::{Place, PlaceExtra, Memory, - HasMemory, MemoryKind, - Machine}; - -macro_rules! validation_failure{ - ($what:expr, $where:expr, $details:expr) => {{ - let where_ = if $where.is_empty() { - String::new() - } else { - format!(" at {}", $where) - }; - err!(ValidationFailure(format!( - "encountered {}{}, but expected {}", - $what, where_, $details, - ))) - }}; - ($what:expr, $where:expr) => {{ - let where_ = if $where.is_empty() { - String::new() - } else { - format!(" at {}", $where) - }; - err!(ValidationFailure(format!( - "encountered {}{}", - $what, where_, - ))) - }}; -} +use super::{ + Value, Operand, MemPlace, MPlaceTy, Place, PlaceExtra, + Memory, Machine +}; pub struct EvalContext<'a, 'mir, 'tcx: 'a + 'mir, M: Machine<'mir, 'tcx>> { /// Stores the `Machine` instance. @@ -119,21 +98,6 @@ pub struct Frame<'mir, 'tcx: 'mir> { pub stmt: usize, } -#[derive(Copy, Clone, PartialEq, Eq, Hash)] -pub enum LocalValue { - Dead, - Live(Value), -} - -impl LocalValue { - pub fn access(self) -> EvalResult<'static, Value> { - match self { - LocalValue::Dead => err!(DeadLocal), - LocalValue::Live(val) => Ok(val), - } - } -} - impl<'mir, 'tcx: 'mir> Eq for Frame<'mir, 'tcx> {} impl<'mir, 'tcx: 'mir> PartialEq for Frame<'mir, 'tcx> { @@ -182,6 +146,33 @@ impl<'mir, 'tcx: 'mir> Hash for Frame<'mir, 'tcx> { } } +// State of a local variable +#[derive(Copy, Clone, PartialEq, Eq, Hash)] +pub enum LocalValue { + Dead, + // Mostly for convenience, we re-use the `Operand` type here. + // This is an optimization over just always having a pointer here; + // we can thus avoid doing an allocation when the local just stores + // immediate values *and* never has its address taken. + Live(Operand), +} + +impl<'tcx> LocalValue { + pub fn access(&self) -> EvalResult<'tcx, &Operand> { + match self { + LocalValue::Dead => err!(DeadLocal), + LocalValue::Live(ref val) => Ok(val), + } + } + + pub fn access_mut(&mut self) -> EvalResult<'tcx, &mut Operand> { + match self { + LocalValue::Dead => err!(DeadLocal), + LocalValue::Live(ref mut val) => Ok(val), + } + } +} + /// The virtual machine state during const-evaluation at a given point in time. type EvalSnapshot<'a, 'mir, 'tcx, M> = (M, Vec>, Memory<'a, 'mir, 'tcx, M>); @@ -263,25 +254,6 @@ pub enum StackPopCleanup { None, } -#[derive(Copy, Clone, Debug)] -pub struct TyAndPacked<'tcx> { - pub ty: Ty<'tcx>, - pub packed: bool, -} - -#[derive(Copy, Clone, Debug)] -pub struct ValTy<'tcx> { - pub value: Value, - pub ty: Ty<'tcx>, -} - -impl<'tcx> ::std::ops::Deref for ValTy<'tcx> { - type Target = Value; - fn deref(&self) -> &Value { - &self.value - } -} - impl<'a, 'mir, 'tcx, M: Machine<'mir, 'tcx>> HasDataLayout for &'a EvalContext<'a, 'mir, 'tcx, M> { #[inline] fn data_layout(&self) -> &layout::TargetDataLayout { @@ -316,6 +288,7 @@ impl<'a, 'mir, 'tcx, M: Machine<'mir, 'tcx>> LayoutOf for &'a EvalContext<'a, 'm type Ty = Ty<'tcx>; type TyLayout = EvalResult<'tcx, TyLayout<'tcx>>; + #[inline] fn layout_of(self, ty: Ty<'tcx>) -> Self::TyLayout { self.tcx.layout_of(self.param_env.and(ty)) .map_err(|layout| EvalErrorKind::Layout(layout).into()) @@ -363,12 +336,6 @@ impl<'a, 'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> EvalContext<'a, 'mir, 'tcx, M r } - pub fn alloc_ptr(&mut self, layout: TyLayout<'tcx>) -> EvalResult<'tcx, Pointer> { - assert!(!layout.is_unsized(), "cannot alloc memory for unsized type"); - - self.memory.allocate(layout.size, layout.align, MemoryKind::Stack) - } - pub fn memory(&self) -> &Memory<'a, 'mir, 'tcx, M> { &self.memory } @@ -387,31 +354,28 @@ impl<'a, 'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> EvalContext<'a, 'mir, 'tcx, M self.stack.len() - 1 } - pub fn str_to_value(&mut self, s: &str) -> EvalResult<'tcx, Value> { - let ptr = self.memory.allocate_bytes(s.as_bytes()); - Ok(Scalar::Ptr(ptr).to_value_with_len(s.len() as u64, self.tcx.tcx)) + /// Mark a storage as live, killing the previous content and returning it. + /// Remember to deallocate that! + pub fn storage_live(&mut self, local: mir::Local) -> EvalResult<'tcx, LocalValue> { + trace!("{:?} is now live", local); + + let layout = self.layout_of_local(self.cur_frame(), local)?; + let init = LocalValue::Live(self.uninit_operand(layout)?); + // StorageLive *always* kills the value that's currently stored + Ok(mem::replace(&mut self.frame_mut().locals[local], init)) } - pub fn const_to_value( - &mut self, - val: ConstValue<'tcx>, - ) -> EvalResult<'tcx, Value> { - match val { - ConstValue::Unevaluated(def_id, substs) => { - let instance = self.resolve(def_id, substs)?; - self.read_global_as_value(GlobalId { - instance, - promoted: None, - }) - } - ConstValue::ByRef(alloc, offset) => { - // FIXME: Allocate new AllocId for all constants inside - let id = self.memory.allocate_value(alloc.clone(), MemoryKind::Stack)?; - Ok(Value::ByRef(Pointer::new(id, offset).into(), alloc.align)) - }, - ConstValue::ScalarPair(a, b) => Ok(Value::ScalarPair(a.into(), b.into())), - ConstValue::Scalar(val) => Ok(Value::Scalar(val.into())), - } + /// Returns the old value of the local. + /// Remember to deallocate that! + pub fn storage_dead(&mut self, local: mir::Local) -> LocalValue { + trace!("{:?} is now dead", local); + + mem::replace(&mut self.frame_mut().locals[local], LocalValue::Dead) + } + + pub fn str_to_value(&mut self, s: &str) -> EvalResult<'tcx, Value> { + let ptr = self.memory.allocate_bytes(s.as_bytes()); + Ok(Value::new_slice(Scalar::Ptr(ptr), s.len() as u64, self.tcx.tcx)) } pub(super) fn resolve(&self, def_id: DefId, substs: &'tcx Substs<'tcx>) -> EvalResult<'tcx, ty::Instance<'tcx>> { @@ -455,48 +419,65 @@ impl<'a, 'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> EvalContext<'a, 'mir, 'tcx, M } } - pub fn monomorphize(&self, ty: Ty<'tcx>, substs: &'tcx Substs<'tcx>) -> Ty<'tcx> { + pub fn monomorphize + Subst<'tcx>>( + &self, + t: T, + substs: &'tcx Substs<'tcx> + ) -> T { // miri doesn't care about lifetimes, and will choke on some crazy ones // let's simply get rid of them - let substituted = ty.subst(*self.tcx, substs); + let substituted = t.subst(*self.tcx, substs); self.tcx.normalize_erasing_regions(ty::ParamEnv::reveal_all(), substituted) } - /// Return the size and alignment of the value at the given type. + pub fn layout_of_local( + &self, + frame: usize, + local: mir::Local + ) -> EvalResult<'tcx, TyLayout<'tcx>> { + let local_ty = self.stack[frame].mir.local_decls[local].ty; + let local_ty = self.monomorphize( + local_ty, + self.stack[frame].instance.substs + ); + self.layout_of(local_ty) + } + + /// Return the actual dynamic size and alignment of the place at the given type. /// Note that the value does not matter if the type is sized. For unsized types, /// the value has to be a fat pointer, and we only care about the "extra" data in it. - pub fn size_and_align_of_dst( + pub fn size_and_align_of_mplace( &self, - ty: Ty<'tcx>, - value: Value, + mplace: MPlaceTy<'tcx>, ) -> EvalResult<'tcx, (Size, Align)> { - let layout = self.layout_of(ty)?; - if !layout.is_unsized() { - Ok(layout.size_and_align()) + if let PlaceExtra::None = mplace.extra { + assert!(!mplace.layout.is_unsized()); + Ok(mplace.layout.size_and_align()) } else { - match ty.sty { + let layout = mplace.layout; + assert!(layout.is_unsized()); + match layout.ty.sty { ty::TyAdt(..) | ty::TyTuple(..) => { // First get the size of all statically known fields. // Don't use type_of::sizing_type_of because that expects t to be sized, // and it also rounds up to alignment, which we want to avoid, // as the unsized field's alignment could be smaller. - assert!(!ty.is_simd()); - debug!("DST {} layout: {:?}", ty, layout); + assert!(!layout.ty.is_simd()); + debug!("DST layout: {:?}", layout); let sized_size = layout.fields.offset(layout.fields.count() - 1); let sized_align = layout.align; debug!( "DST {} statically sized prefix size: {:?} align: {:?}", - ty, + layout.ty, sized_size, sized_align ); // Recurse to get the size of the dynamically sized field (must be // the last field). - let field_ty = layout.field(self, layout.fields.count() - 1)?.ty; - let (unsized_size, unsized_align) = - self.size_and_align_of_dst(field_ty, value)?; + let field = self.mplace_field(mplace, layout.fields.count() as u64 - 1)?; + let (unsized_size, unsized_align) = self.size_and_align_of_mplace(field)?; // FIXME (#26403, #27023): We should be adding padding // to `sized_size` (to accommodate the `unsized_align` @@ -526,18 +507,24 @@ impl<'a, 'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> EvalContext<'a, 'mir, 'tcx, M Ok((size.abi_align(align), align)) } ty::TyDynamic(..) => { - let (_, vtable) = self.into_ptr_vtable_pair(value)?; + let vtable = match mplace.extra { + PlaceExtra::Vtable(vtable) => vtable, + _ => bug!("Expected vtable"), + }; // the second entry in the vtable is the dynamic size of the object. self.read_size_and_align_from_vtable(vtable) } ty::TySlice(_) | ty::TyStr => { + let len = match mplace.extra { + PlaceExtra::Length(len) => len, + _ => bug!("Expected length"), + }; let (elem_size, align) = layout.field(self, 0)?.size_and_align(); - let (_, len) = self.into_slice(value)?; Ok((elem_size * len, align)) } - _ => bug!("size_of_val::<{:?}>", ty), + _ => bug!("size_of_val::<{:?}> not supported", layout.ty), } } } @@ -568,10 +555,13 @@ impl<'a, 'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> EvalContext<'a, 'mir, 'tcx, M // don't allocate at all for trivial constants if mir.local_decls.len() > 1 { - let mut locals = IndexVec::from_elem(LocalValue::Dead, &mir.local_decls); - for (local, decl) in locals.iter_mut().zip(mir.local_decls.iter()) { - *local = LocalValue::Live(self.init_value(decl.ty)?); - } + // We put some marker value into the locals that we later want to initialize. + // This can be anything except for LocalValue::Dead -- because *that* is the + // value we use for things that we know are initially dead. + let dummy = + LocalValue::Live(Operand::Immediate(Value::Scalar(ScalarMaybeUndef::Undef))); + let mut locals = IndexVec::from_elem(dummy, &mir.local_decls); + // Now mark those locals as dead that we do not want to initialize match self.tcx.describe_def(instance.def_id()) { // statics and constants don't have `Storage*` statements, no need to look for them Some(Def::Static(..)) | Some(Def::Const(..)) | Some(Def::AssociatedConst(..)) => {}, @@ -582,18 +572,32 @@ impl<'a, 'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> EvalContext<'a, 'mir, 'tcx, M use rustc::mir::StatementKind::{StorageDead, StorageLive}; match stmt.kind { StorageLive(local) | - StorageDead(local) => locals[local] = LocalValue::Dead, + StorageDead(local) => { + locals[local] = LocalValue::Dead; + } _ => {} } } } }, } + // Finally, properly initialize all those that still have the dummy value + for (local, decl) in locals.iter_mut().zip(mir.local_decls.iter()) { + match *local { + LocalValue::Live(_) => { + // This needs to be peoperly initialized. + let layout = self.layout_of(self.monomorphize(decl.ty, instance.substs))?; + *local = LocalValue::Live(self.uninit_operand(layout)?); + } + LocalValue::Dead => { + // Nothing to do + } + } + } + // done self.frame_mut().locals = locals; } - self.memory.cur_frame = self.cur_frame(); - if self.stack.len() > self.stack_limit { err!(StackFrameLimitReached) } else { @@ -607,16 +611,12 @@ impl<'a, 'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> EvalContext<'a, 'mir, 'tcx, M let frame = self.stack.pop().expect( "tried to pop a stack frame, but there were none", ); - if !self.stack.is_empty() { - // TODO: Is this the correct time to start considering these accesses as originating from the returned-to stack frame? - self.memory.cur_frame = self.cur_frame(); - } match frame.return_to_block { StackPopCleanup::MarkStatic(mutable) => { - if let Place::Ptr { ptr, .. } = frame.return_place { + if let Place::Ptr(MemPlace { ptr, .. }) = frame.return_place { // FIXME: to_ptr()? might be too extreme here, static zsts might reach this under certain conditions self.memory.mark_static_initialized( - ptr.unwrap_or_err()?.to_ptr()?.alloc_id, + ptr.to_ptr()?.alloc_id, mutable, )? } else { @@ -634,9 +634,9 @@ impl<'a, 'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> EvalContext<'a, 'mir, 'tcx, M Ok(()) } - pub fn deallocate_local(&mut self, local: LocalValue) -> EvalResult<'tcx> { + crate fn deallocate_local(&mut self, local: LocalValue) -> EvalResult<'tcx> { // FIXME: should we tell the user that there was a local which was never written to? - if let LocalValue::Live(Value::ByRef(ptr, _align)) = local { + if let LocalValue::Live(Operand::Indirect(MemPlace { ptr, .. })) = local { trace!("deallocating local"); let ptr = ptr.to_ptr()?; self.memory.dump_alloc(ptr.alloc_id); @@ -645,419 +645,6 @@ impl<'a, 'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> EvalContext<'a, 'mir, 'tcx, M Ok(()) } - /// Evaluate an assignment statement. - /// - /// There is no separate `eval_rvalue` function. Instead, the code for handling each rvalue - /// type writes its results directly into the memory specified by the place. - pub(super) fn eval_rvalue_into_place( - &mut self, - rvalue: &mir::Rvalue<'tcx>, - place: &mir::Place<'tcx>, - ) -> EvalResult<'tcx> { - let dest = self.eval_place(place)?; - let dest_ty = self.place_ty(place); - let dest_layout = self.layout_of(dest_ty)?; - - use rustc::mir::Rvalue::*; - match *rvalue { - Use(ref operand) => { - let value = self.eval_operand(operand)?.value; - let valty = ValTy { - value, - ty: dest_ty, - }; - self.write_value(valty, dest)?; - } - - BinaryOp(bin_op, ref left, ref right) => { - let left = self.eval_operand(left)?; - let right = self.eval_operand(right)?; - self.intrinsic_overflowing( - bin_op, - left, - right, - dest, - dest_ty, - )?; - } - - CheckedBinaryOp(bin_op, ref left, ref right) => { - let left = self.eval_operand(left)?; - let right = self.eval_operand(right)?; - self.intrinsic_with_overflow( - bin_op, - left, - right, - dest, - dest_ty, - )?; - } - - UnaryOp(un_op, ref operand) => { - let val = self.eval_operand_to_scalar(operand)?; - let val = self.unary_op(un_op, val, dest_layout)?; - self.write_scalar( - dest, - val, - dest_ty, - )?; - } - - Aggregate(ref kind, ref operands) => { - let (dest, active_field_index) = match **kind { - mir::AggregateKind::Adt(adt_def, variant_index, _, active_field_index) => { - self.write_discriminant_value(dest_ty, dest, variant_index)?; - if adt_def.is_enum() { - (self.place_downcast(dest, variant_index)?, active_field_index) - } else { - (dest, active_field_index) - } - } - _ => (dest, None) - }; - - let layout = self.layout_of(dest_ty)?; - for (i, operand) in operands.iter().enumerate() { - let value = self.eval_operand(operand)?; - // Ignore zero-sized fields. - if !self.layout_of(value.ty)?.is_zst() { - let field_index = active_field_index.unwrap_or(i); - let (field_dest, _) = self.place_field(dest, mir::Field::new(field_index), layout)?; - self.write_value(value, field_dest)?; - } - } - } - - Repeat(ref operand, _) => { - let (elem_ty, length) = match dest_ty.sty { - ty::TyArray(elem_ty, n) => (elem_ty, n.unwrap_usize(self.tcx.tcx)), - _ => { - bug!( - "tried to assign array-repeat to non-array type {:?}", - dest_ty - ) - } - }; - let elem_size = self.layout_of(elem_ty)?.size; - let value = self.eval_operand(operand)?.value; - - let (dest, dest_align) = self.force_allocation(dest)?.to_ptr_align(); - - if length > 0 { - let dest = dest.unwrap_or_err()?; - //write the first value - self.write_value_to_ptr(value, dest, dest_align, elem_ty)?; - - if length > 1 { - let rest = dest.ptr_offset(elem_size * 1 as u64, &self)?; - self.memory.copy_repeatedly(dest, dest_align, rest, dest_align, elem_size, length - 1, false)?; - } - } - } - - Len(ref place) => { - // FIXME(CTFE): don't allow computing the length of arrays in const eval - let src = self.eval_place(place)?; - let ty = self.place_ty(place); - let (_, len) = src.elem_ty_and_len(ty, self.tcx.tcx); - let size = self.memory.pointer_size().bytes() as u8; - self.write_scalar( - dest, - Scalar::Bits { - bits: len as u128, - size, - }, - dest_ty, - )?; - } - - Ref(_, _, ref place) => { - let src = self.eval_place(place)?; - // We ignore the alignment of the place here -- special handling for packed structs ends - // at the `&` operator. - let (ptr, _align, extra) = self.force_allocation(src)?.to_ptr_align_extra(); - - let val = match extra { - PlaceExtra::None => Value::Scalar(ptr), - PlaceExtra::Length(len) => ptr.to_value_with_len(len, self.tcx.tcx), - PlaceExtra::Vtable(vtable) => ptr.to_value_with_vtable(vtable), - PlaceExtra::DowncastVariant(..) => { - bug!("attempted to take a reference to an enum downcast place") - } - }; - let valty = ValTy { - value: val, - ty: dest_ty, - }; - self.write_value(valty, dest)?; - } - - NullaryOp(mir::NullOp::Box, ty) => { - let ty = self.monomorphize(ty, self.substs()); - M::box_alloc(self, ty, dest)?; - } - - NullaryOp(mir::NullOp::SizeOf, ty) => { - let ty = self.monomorphize(ty, self.substs()); - let layout = self.layout_of(ty)?; - assert!(!layout.is_unsized(), - "SizeOf nullary MIR operator called for unsized type"); - let size = self.memory.pointer_size().bytes() as u8; - self.write_scalar( - dest, - Scalar::Bits { - bits: layout.size.bytes() as u128, - size, - }, - dest_ty, - )?; - } - - Cast(kind, ref operand, cast_ty) => { - debug_assert_eq!(self.monomorphize(cast_ty, self.substs()), dest_ty); - let src = self.eval_operand(operand)?; - self.cast(src, kind, dest_ty, dest)?; - } - - Discriminant(ref place) => { - let ty = self.place_ty(place); - let layout = self.layout_of(ty)?; - let place = self.eval_place(place)?; - let discr_val = self.read_discriminant_value(place, layout)?; - let size = self.layout_of(dest_ty).unwrap().size.bytes() as u8; - self.write_scalar(dest, Scalar::Bits { - bits: discr_val, - size, - }, dest_ty)?; - } - } - - self.dump_local(dest); - - Ok(()) - } - - pub(super) fn type_is_fat_ptr(&self, ty: Ty<'tcx>) -> bool { - match ty.sty { - ty::TyRawPtr(ty::TypeAndMut { ty, .. }) | - ty::TyRef(_, ty, _) => !self.type_is_sized(ty), - ty::TyAdt(def, _) if def.is_box() => !self.type_is_sized(ty.boxed_ty()), - _ => false, - } - } - - pub(super) fn eval_operand_to_scalar( - &mut self, - op: &mir::Operand<'tcx>, - ) -> EvalResult<'tcx, Scalar> { - let valty = self.eval_operand(op)?; - self.value_to_scalar(valty) - } - - pub(crate) fn operands_to_args( - &mut self, - ops: &[mir::Operand<'tcx>], - ) -> EvalResult<'tcx, Vec>> { - ops.into_iter() - .map(|op| self.eval_operand(op)) - .collect() - } - - pub fn eval_operand(&mut self, op: &mir::Operand<'tcx>) -> EvalResult<'tcx, ValTy<'tcx>> { - use rustc::mir::Operand::*; - let ty = self.monomorphize(op.ty(self.mir(), *self.tcx), self.substs()); - match *op { - // FIXME: do some more logic on `move` to invalidate the old location - Copy(ref place) | - Move(ref place) => { - Ok(ValTy { - value: self.eval_and_read_place(place)?, - ty - }) - }, - - Constant(ref constant) => { - let value = self.const_to_value(constant.literal.val)?; - - Ok(ValTy { - value, - ty, - }) - } - } - } - - /// reads a tag and produces the corresponding variant index - pub fn read_discriminant_as_variant_index( - &self, - place: Place, - layout: TyLayout<'tcx>, - ) -> EvalResult<'tcx, usize> { - match layout.variants { - ty::layout::Variants::Single { index } => Ok(index), - ty::layout::Variants::Tagged { .. } => { - let discr_val = self.read_discriminant_value(place, layout)?; - layout - .ty - .ty_adt_def() - .expect("tagged layout for non adt") - .discriminants(self.tcx.tcx) - .position(|var| var.val == discr_val) - .ok_or_else(|| EvalErrorKind::InvalidDiscriminant.into()) - } - ty::layout::Variants::NicheFilling { .. } => { - let discr_val = self.read_discriminant_value(place, layout)?; - assert_eq!(discr_val as usize as u128, discr_val); - Ok(discr_val as usize) - }, - } - } - - pub fn read_discriminant_value( - &self, - place: Place, - layout: TyLayout<'tcx>, - ) -> EvalResult<'tcx, u128> { - trace!("read_discriminant_value {:#?}", layout); - if layout.abi == layout::Abi::Uninhabited { - return Ok(0); - } - - match layout.variants { - layout::Variants::Single { index } => { - let discr_val = layout.ty.ty_adt_def().map_or( - index as u128, - |def| def.discriminant_for_variant(*self.tcx, index).val); - return Ok(discr_val); - } - layout::Variants::Tagged { .. } | - layout::Variants::NicheFilling { .. } => {}, - } - let discr_place_val = self.read_place(place)?; - let (discr_val, discr) = self.read_field(discr_place_val, None, mir::Field::new(0), layout)?; - trace!("discr value: {:?}, {:?}", discr_val, discr); - let raw_discr = self.value_to_scalar(ValTy { - value: discr_val, - ty: discr.ty - })?; - let discr_val = match layout.variants { - layout::Variants::Single { .. } => bug!(), - // FIXME: should we catch invalid discriminants here? - layout::Variants::Tagged { .. } => { - if discr.ty.is_signed() { - let i = raw_discr.to_bits(discr.size)? as i128; - // going from layout tag type to typeck discriminant type - // requires first sign extending with the layout discriminant - let shift = 128 - discr.size.bits(); - let sexted = (i << shift) >> shift; - // and then zeroing with the typeck discriminant type - let discr_ty = layout - .ty - .ty_adt_def().expect("tagged layout corresponds to adt") - .repr - .discr_type(); - let discr_ty = layout::Integer::from_attr(self.tcx.tcx, discr_ty); - let shift = 128 - discr_ty.size().bits(); - let truncatee = sexted as u128; - (truncatee << shift) >> shift - } else { - raw_discr.to_bits(discr.size)? - } - }, - layout::Variants::NicheFilling { - dataful_variant, - ref niche_variants, - niche_start, - .. - } => { - let variants_start = *niche_variants.start() as u128; - let variants_end = *niche_variants.end() as u128; - match raw_discr { - Scalar::Ptr(_) => { - assert!(niche_start == 0); - assert!(variants_start == variants_end); - dataful_variant as u128 - }, - Scalar::Bits { bits: raw_discr, size } => { - assert_eq!(size as u64, discr.size.bytes()); - let discr = raw_discr.wrapping_sub(niche_start) - .wrapping_add(variants_start); - if variants_start <= discr && discr <= variants_end { - discr - } else { - dataful_variant as u128 - } - }, - } - } - }; - - Ok(discr_val) - } - - - pub fn write_discriminant_value( - &mut self, - dest_ty: Ty<'tcx>, - dest: Place, - variant_index: usize, - ) -> EvalResult<'tcx> { - let layout = self.layout_of(dest_ty)?; - - match layout.variants { - layout::Variants::Single { index } => { - if index != variant_index { - // If the layout of an enum is `Single`, all - // other variants are necessarily uninhabited. - assert_eq!(layout.for_variant(&self, variant_index).abi, - layout::Abi::Uninhabited); - } - } - layout::Variants::Tagged { ref tag, .. } => { - let discr_val = dest_ty.ty_adt_def().unwrap() - .discriminant_for_variant(*self.tcx, variant_index) - .val; - - // raw discriminants for enums are isize or bigger during - // their computation, but the in-memory tag is the smallest possible - // representation - let size = tag.value.size(self.tcx.tcx); - let shift = 128 - size.bits(); - let discr_val = (discr_val << shift) >> shift; - - let (discr_dest, tag) = self.place_field(dest, mir::Field::new(0), layout)?; - self.write_scalar(discr_dest, Scalar::Bits { - bits: discr_val, - size: size.bytes() as u8, - }, tag.ty)?; - } - layout::Variants::NicheFilling { - dataful_variant, - ref niche_variants, - niche_start, - .. - } => { - if variant_index != dataful_variant { - let (niche_dest, niche) = - self.place_field(dest, mir::Field::new(0), layout)?; - let niche_value = ((variant_index - niche_variants.start()) as u128) - .wrapping_add(niche_start); - self.write_scalar(niche_dest, Scalar::Bits { - bits: niche_value, - size: niche.size.bytes() as u8, - }, niche.ty)?; - } - } - } - - Ok(()) - } - - pub fn read_global_as_value(&mut self, gid: GlobalId<'tcx>) -> EvalResult<'tcx, Value> { - let cv = self.const_eval(gid)?; - self.const_to_value(cv.val) - } - pub fn const_eval(&self, gid: GlobalId<'tcx>) -> EvalResult<'tcx, &'tcx ty::Const<'tcx>> { let param_env = if self.tcx.is_static(gid.instance.def_id()).is_some() { ty::ParamEnv::reveal_all() @@ -1067,492 +654,12 @@ impl<'a, 'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> EvalContext<'a, 'mir, 'tcx, M self.tcx.const_eval(param_env.and(gid)).map_err(|err| EvalErrorKind::ReferencedConstant(err).into()) } - pub fn allocate_place_for_value( - &mut self, - value: Value, - layout: TyLayout<'tcx>, - variant: Option, - ) -> EvalResult<'tcx, Place> { - let (ptr, align) = match value { - Value::ByRef(ptr, align) => (ptr, align), - Value::ScalarPair(..) | Value::Scalar(_) => { - let ptr = self.alloc_ptr(layout)?.into(); - self.write_value_to_ptr(value, ptr, layout.align, layout.ty)?; - (ptr, layout.align) - }, - }; - Ok(Place::Ptr { - ptr: ptr.into(), - align, - extra: variant.map_or(PlaceExtra::None, PlaceExtra::DowncastVariant), - }) - } - - pub fn force_allocation(&mut self, place: Place) -> EvalResult<'tcx, Place> { - let new_place = match place { - Place::Local { frame, local } => { - match self.stack[frame].locals[local].access()? { - Value::ByRef(ptr, align) => { - Place::Ptr { - ptr: ptr.into(), - align, - extra: PlaceExtra::None, - } - } - val => { - let ty = self.stack[frame].mir.local_decls[local].ty; - let ty = self.monomorphize(ty, self.stack[frame].instance.substs); - let layout = self.layout_of(ty)?; - let ptr = self.alloc_ptr(layout)?; - self.stack[frame].locals[local] = - LocalValue::Live(Value::ByRef(ptr.into(), layout.align)); // it stays live - - let place = Place::from_ptr(ptr, layout.align); - self.write_value(ValTy { value: val, ty }, place)?; - place - } - } - } - Place::Ptr { .. } => place, - }; - Ok(new_place) - } - - /// ensures this Value is not a ByRef - pub fn follow_by_ref_value( - &self, - value: Value, - ty: Ty<'tcx>, - ) -> EvalResult<'tcx, Value> { - match value { - Value::ByRef(ptr, align) => { - self.read_value(ptr, align, ty) - } - other => Ok(other), - } - } - - pub fn value_to_scalar( - &self, - ValTy { value, ty } : ValTy<'tcx>, - ) -> EvalResult<'tcx, Scalar> { - match self.follow_by_ref_value(value, ty)? { - Value::ByRef { .. } => bug!("follow_by_ref_value can't result in `ByRef`"), - - Value::Scalar(scalar) => scalar.unwrap_or_err(), - - Value::ScalarPair(..) => bug!("value_to_scalar can't work with fat pointers"), - } - } - - pub fn write_ptr(&mut self, dest: Place, val: Scalar, dest_ty: Ty<'tcx>) -> EvalResult<'tcx> { - let valty = ValTy { - value: val.to_value(), - ty: dest_ty, - }; - self.write_value(valty, dest) - } - - pub fn write_scalar( - &mut self, - dest: Place, - val: impl Into, - dest_ty: Ty<'tcx>, - ) -> EvalResult<'tcx> { - let valty = ValTy { - value: Value::Scalar(val.into()), - ty: dest_ty, - }; - self.write_value(valty, dest) - } - - pub fn write_value( - &mut self, - ValTy { value: src_val, ty: dest_ty } : ValTy<'tcx>, - dest: Place, - ) -> EvalResult<'tcx> { - //trace!("Writing {:?} to {:?} at type {:?}", src_val, dest, dest_ty); - // Note that it is really important that the type here is the right one, and matches the type things are read at. - // In case `src_val` is a `ScalarPair`, we don't do any magic here to handle padding properly, which is only - // correct if we never look at this data with the wrong type. - - match dest { - Place::Ptr { ptr, align, extra } => { - assert_eq!(extra, PlaceExtra::None); - self.write_value_to_ptr(src_val, ptr.unwrap_or_err()?, align, dest_ty) - } - - Place::Local { frame, local } => { - let old_val = self.stack[frame].locals[local].access()?; - self.write_value_possibly_by_val( - src_val, - |this, val| this.stack[frame].set_local(local, val), - old_val, - dest_ty, - ) - } - } - } - - // The cases here can be a bit subtle. Read carefully! - fn write_value_possibly_by_val EvalResult<'tcx>>( - &mut self, - src_val: Value, - write_dest: F, - old_dest_val: Value, - dest_ty: Ty<'tcx>, - ) -> EvalResult<'tcx> { - // FIXME: this should be a layout check, not underlying value - if let Value::ByRef(dest_ptr, align) = old_dest_val { - // If the value is already `ByRef` (that is, backed by an `Allocation`), - // then we must write the new value into this allocation, because there may be - // other pointers into the allocation. These other pointers are logically - // pointers into the local variable, and must be able to observe the change. - // - // Thus, it would be an error to replace the `ByRef` with a `ByVal`, unless we - // knew for certain that there were no outstanding pointers to this allocation. - self.write_value_to_ptr(src_val, dest_ptr, align, dest_ty)?; - } else if let Value::ByRef(src_ptr, align) = src_val { - // If the value is not `ByRef`, then we know there are no pointers to it - // and we can simply overwrite the `Value` in the locals array directly. - // - // In this specific case, where the source value is `ByRef`, we must duplicate - // the allocation, because this is a by-value operation. It would be incorrect - // if they referred to the same allocation, since then a change to one would - // implicitly change the other. - // - // It is a valid optimization to attempt reading a primitive value out of the - // source and write that into the destination without making an allocation, so - // we do so here. - if let Ok(Some(src_val)) = self.try_read_value(src_ptr, align, dest_ty) { - write_dest(self, src_val)?; - } else { - let layout = self.layout_of(dest_ty)?; - let dest_ptr = self.alloc_ptr(layout)?.into(); - self.memory.copy(src_ptr, align.min(layout.align), dest_ptr, layout.align, layout.size, false)?; - write_dest(self, Value::ByRef(dest_ptr, layout.align))?; - } - } else { - // Finally, we have the simple case where neither source nor destination are - // `ByRef`. We may simply copy the source value over the the destintion. - write_dest(self, src_val)?; - } - Ok(()) - } - - pub fn write_value_to_ptr( - &mut self, - value: Value, - dest: Scalar, - dest_align: Align, - dest_ty: Ty<'tcx>, - ) -> EvalResult<'tcx> { - let layout = self.layout_of(dest_ty)?; - trace!("write_value_to_ptr: {:#?}, {}, {:#?}", value, dest_ty, layout); - match value { - Value::ByRef(ptr, align) => { - self.memory.copy(ptr, align.min(layout.align), dest, dest_align.min(layout.align), layout.size, false) - } - Value::Scalar(scalar) => { - let signed = match layout.abi { - layout::Abi::Scalar(ref scal) => match scal.value { - layout::Primitive::Int(_, signed) => signed, - _ => false, - }, - _ => false, - }; - self.memory.write_scalar(dest, dest_align, scalar, layout.size, layout.align, signed) - } - Value::ScalarPair(a_val, b_val) => { - trace!("write_value_to_ptr valpair: {:#?}", layout); - let (a, b) = match layout.abi { - layout::Abi::ScalarPair(ref a, ref b) => (&a.value, &b.value), - _ => bug!("write_value_to_ptr: invalid ScalarPair layout: {:#?}", layout) - }; - let (a_size, b_size) = (a.size(&self), b.size(&self)); - let (a_align, b_align) = (a.align(&self), b.align(&self)); - let a_ptr = dest; - let b_offset = a_size.abi_align(b_align); - let b_ptr = dest.ptr_offset(b_offset, &self)?.into(); - // TODO: What about signedess? - self.memory.write_scalar(a_ptr, dest_align, a_val, a_size, a_align, false)?; - self.memory.write_scalar(b_ptr, dest_align, b_val, b_size, b_align, false) - } - } - } - - pub fn read_value(&self, ptr: Scalar, align: Align, ty: Ty<'tcx>) -> EvalResult<'tcx, Value> { - if let Some(val) = self.try_read_value(ptr, align, ty)? { - Ok(val) - } else { - bug!("primitive read failed for type: {:?}", ty); - } - } - - fn validate_scalar( - &self, - value: ScalarMaybeUndef, - size: Size, - scalar: &layout::Scalar, - path: &str, - ty: Ty, - ) -> EvalResult<'tcx> { - trace!("validate scalar: {:#?}, {:#?}, {:#?}, {}", value, size, scalar, ty); - let (lo, hi) = scalar.valid_range.clone().into_inner(); - - let value = match value { - ScalarMaybeUndef::Scalar(scalar) => scalar, - ScalarMaybeUndef::Undef => return validation_failure!("undefined bytes", path), - }; - - let bits = match value { - Scalar::Bits { bits, size: value_size } => { - assert_eq!(value_size as u64, size.bytes()); - bits - }, - Scalar::Ptr(_) => { - let ptr_size = self.memory.pointer_size(); - let ptr_max = u128::max_value() >> (128 - ptr_size.bits()); - return if lo > hi { - if lo - hi == 1 { - // no gap, all values are ok - Ok(()) - } else if hi < ptr_max || lo > 1 { - let max = u128::max_value() >> (128 - size.bits()); - validation_failure!( - "pointer", - path, - format!("something in the range {:?} or {:?}", 0..=lo, hi..=max) - ) - } else { - Ok(()) - } - } else if hi < ptr_max || lo > 1 { - validation_failure!( - "pointer", - path, - format!("something in the range {:?}", scalar.valid_range) - ) - } else { - Ok(()) - }; - }, - }; - - // char gets a special treatment, because its number space is not contiguous so `TyLayout` - // has no special checks for chars - match ty.sty { - ty::TyChar => { - debug_assert_eq!(size.bytes(), 4); - if ::std::char::from_u32(bits as u32).is_none() { - return err!(InvalidChar(bits)); - } - } - _ => {}, - } - - use std::ops::RangeInclusive; - let in_range = |bound: RangeInclusive| bound.contains(&bits); - if lo > hi { - if in_range(0..=hi) || in_range(lo..=u128::max_value()) { - Ok(()) - } else { - validation_failure!( - bits, - path, - format!("something in the range {:?} or {:?}", ..=hi, lo..) - ) - } - } else { - if in_range(scalar.valid_range.clone()) { - Ok(()) - } else { - validation_failure!( - bits, - path, - format!("something in the range {:?}", scalar.valid_range) - ) - } - } - } - - /// This function checks the memory where `ptr` points to. - /// It will error if the bits at the destination do not match the ones described by the layout. - pub fn validate_ptr_target( - &self, - ptr: Pointer, - ptr_align: Align, - mut layout: TyLayout<'tcx>, - path: String, - seen: &mut FxHashSet<(Pointer, Ty<'tcx>)>, - todo: &mut Vec<(Pointer, Ty<'tcx>, String)>, - ) -> EvalResult<'tcx> { - self.memory.dump_alloc(ptr.alloc_id); - trace!("validate_ptr_target: {:?}, {:#?}", ptr, layout); - - let variant; - match layout.variants { - layout::Variants::NicheFilling { niche: ref tag, .. } | - layout::Variants::Tagged { ref tag, .. } => { - let size = tag.value.size(self); - let (tag_value, tag_layout) = self.read_field( - Value::ByRef(ptr.into(), ptr_align), - None, - mir::Field::new(0), - layout, - )?; - let tag_value = match self.follow_by_ref_value(tag_value, tag_layout.ty)? { - Value::Scalar(val) => val, - _ => bug!("tag must be scalar"), - }; - let path = format!("{}.TAG", path); - self.validate_scalar(tag_value, size, tag, &path, tag_layout.ty)?; - let variant_index = self.read_discriminant_as_variant_index( - Place::from_ptr(ptr, ptr_align), - layout, - )?; - variant = variant_index; - layout = layout.for_variant(self, variant_index); - trace!("variant layout: {:#?}", layout); - }, - layout::Variants::Single { index } => variant = index, - } - match layout.fields { - // primitives are unions with zero fields - layout::FieldPlacement::Union(0) => { - match layout.abi { - // nothing to do, whatever the pointer points to, it is never going to be read - layout::Abi::Uninhabited => validation_failure!("a value of an uninhabited type", path), - // check that the scalar is a valid pointer or that its bit range matches the - // expectation. - layout::Abi::Scalar(ref scalar) => { - let size = scalar.value.size(self); - let value = self.memory.read_scalar(ptr, ptr_align, size)?; - self.validate_scalar(value, size, scalar, &path, layout.ty)?; - if scalar.value == Primitive::Pointer { - // ignore integer pointers, we can't reason about the final hardware - if let Scalar::Ptr(ptr) = value.unwrap_or_err()? { - let alloc_kind = self.tcx.alloc_map.lock().get(ptr.alloc_id); - if let Some(AllocType::Static(did)) = alloc_kind { - // statics from other crates are already checked - // extern statics should not be validated as they have no body - if !did.is_local() || self.tcx.is_foreign_item(did) { - return Ok(()); - } - } - if let Some(tam) = layout.ty.builtin_deref(false) { - // we have not encountered this pointer+layout combination before - if seen.insert((ptr, tam.ty)) { - todo.push((ptr, tam.ty, format!("(*{})", path))) - } - } - } - } - Ok(()) - }, - _ => bug!("bad abi for FieldPlacement::Union(0): {:#?}", layout.abi), - } - } - layout::FieldPlacement::Union(_) => { - // We can't check unions, their bits are allowed to be anything. - // The fields don't need to correspond to any bit pattern of the union's fields. - // See https://github.com/rust-lang/rust/issues/32836#issuecomment-406875389 - Ok(()) - }, - layout::FieldPlacement::Array { stride, count } => { - let elem_layout = layout.field(self, 0)?; - for i in 0..count { - let mut path = path.clone(); - self.write_field_name(&mut path, layout.ty, i as usize, variant).unwrap(); - self.validate_ptr_target(ptr.offset(stride * i, self)?, ptr_align, elem_layout, path, seen, todo)?; - } - Ok(()) - }, - layout::FieldPlacement::Arbitrary { ref offsets, .. } => { - - // check length field and vtable field - match layout.ty.builtin_deref(false).map(|tam| &tam.ty.sty) { - | Some(ty::TyStr) - | Some(ty::TySlice(_)) => { - let (len, len_layout) = self.read_field( - Value::ByRef(ptr.into(), ptr_align), - None, - mir::Field::new(1), - layout, - )?; - let len = self.value_to_scalar(ValTy { value: len, ty: len_layout.ty })?; - if len.to_bits(len_layout.size).is_err() { - return validation_failure!("length is not a valid integer", path); - } - }, - Some(ty::TyDynamic(..)) => { - let (vtable, vtable_layout) = self.read_field( - Value::ByRef(ptr.into(), ptr_align), - None, - mir::Field::new(1), - layout, - )?; - let vtable = self.value_to_scalar(ValTy { value: vtable, ty: vtable_layout.ty })?; - if vtable.to_ptr().is_err() { - return validation_failure!("vtable address is not a pointer", path); - } - } - _ => {}, - } - for (i, &offset) in offsets.iter().enumerate() { - let field_layout = layout.field(self, i)?; - let mut path = path.clone(); - self.write_field_name(&mut path, layout.ty, i, variant).unwrap(); - self.validate_ptr_target(ptr.offset(offset, self)?, ptr_align, field_layout, path, seen, todo)?; - } - Ok(()) - } - } - } - - pub fn try_read_by_ref(&self, mut val: Value, ty: Ty<'tcx>) -> EvalResult<'tcx, Value> { - // Convert to ByVal or ScalarPair if possible - if let Value::ByRef(ptr, align) = val { - if let Some(read_val) = self.try_read_value(ptr, align, ty)? { - val = read_val; - } - } - Ok(val) - } - - pub fn try_read_value(&self, ptr: Scalar, ptr_align: Align, ty: Ty<'tcx>) -> EvalResult<'tcx, Option> { - let layout = self.layout_of(ty)?; - self.memory.check_align(ptr, ptr_align)?; - - if layout.size.bytes() == 0 { - return Ok(Some(Value::Scalar(ScalarMaybeUndef::Scalar(Scalar::Bits { bits: 0, size: 0 })))); - } - - let ptr = ptr.to_ptr()?; - - match layout.abi { - layout::Abi::Scalar(..) => { - let scalar = self.memory.read_scalar(ptr, ptr_align, layout.size)?; - Ok(Some(Value::Scalar(scalar))) - } - layout::Abi::ScalarPair(ref a, ref b) => { - let (a, b) = (&a.value, &b.value); - let (a_size, b_size) = (a.size(self), b.size(self)); - let a_ptr = ptr; - let b_offset = a_size.abi_align(b.align(self)); - let b_ptr = ptr.offset(b_offset, self)?.into(); - let a_val = self.memory.read_scalar(a_ptr, ptr_align, a_size)?; - let b_val = self.memory.read_scalar(b_ptr, ptr_align, b_size)?; - Ok(Some(Value::ScalarPair(a_val, b_val))) - } - _ => Ok(None), - } - } - + #[inline(always)] pub fn frame(&self) -> &Frame<'mir, 'tcx> { self.stack.last().expect("no call frames exist") } + #[inline(always)] pub fn frame_mut(&mut self) -> &mut Frame<'mir, 'tcx> { self.stack.last_mut().expect("no call frames exist") } @@ -1569,132 +676,7 @@ impl<'a, 'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> EvalContext<'a, 'mir, 'tcx, M } } - fn unsize_into_ptr( - &mut self, - src: Value, - src_ty: Ty<'tcx>, - dest: Place, - dest_ty: Ty<'tcx>, - sty: Ty<'tcx>, - dty: Ty<'tcx>, - ) -> EvalResult<'tcx> { - // A -> A conversion - let (src_pointee_ty, dest_pointee_ty) = self.tcx.struct_lockstep_tails(sty, dty); - - match (&src_pointee_ty.sty, &dest_pointee_ty.sty) { - (&ty::TyArray(_, length), &ty::TySlice(_)) => { - let ptr = self.into_ptr(src)?; - // u64 cast is from usize to u64, which is always good - let valty = ValTy { - value: ptr.to_value_with_len(length.unwrap_usize(self.tcx.tcx), self.tcx.tcx), - ty: dest_ty, - }; - self.write_value(valty, dest) - } - (&ty::TyDynamic(..), &ty::TyDynamic(..)) => { - // For now, upcasts are limited to changes in marker - // traits, and hence never actually require an actual - // change to the vtable. - let valty = ValTy { - value: src, - ty: dest_ty, - }; - self.write_value(valty, dest) - } - (_, &ty::TyDynamic(ref data, _)) => { - let trait_ref = data.principal().unwrap().with_self_ty( - *self.tcx, - src_pointee_ty, - ); - let trait_ref = self.tcx.erase_regions(&trait_ref); - let vtable = self.get_vtable(src_pointee_ty, trait_ref)?; - let ptr = self.into_ptr(src)?; - let valty = ValTy { - value: ptr.to_value_with_vtable(vtable), - ty: dest_ty, - }; - self.write_value(valty, dest) - } - - _ => bug!("invalid unsizing {:?} -> {:?}", src_ty, dest_ty), - } - } - - crate fn unsize_into( - &mut self, - src: Value, - src_layout: TyLayout<'tcx>, - dst: Place, - dst_layout: TyLayout<'tcx>, - ) -> EvalResult<'tcx> { - match (&src_layout.ty.sty, &dst_layout.ty.sty) { - (&ty::TyRef(_, s, _), &ty::TyRef(_, d, _)) | - (&ty::TyRef(_, s, _), &ty::TyRawPtr(TypeAndMut { ty: d, .. })) | - (&ty::TyRawPtr(TypeAndMut { ty: s, .. }), - &ty::TyRawPtr(TypeAndMut { ty: d, .. })) => { - self.unsize_into_ptr(src, src_layout.ty, dst, dst_layout.ty, s, d) - } - (&ty::TyAdt(def_a, _), &ty::TyAdt(def_b, _)) => { - assert_eq!(def_a, def_b); - if def_a.is_box() || def_b.is_box() { - if !def_a.is_box() || !def_b.is_box() { - bug!("invalid unsizing between {:?} -> {:?}", src_layout, dst_layout); - } - return self.unsize_into_ptr( - src, - src_layout.ty, - dst, - dst_layout.ty, - src_layout.ty.boxed_ty(), - dst_layout.ty.boxed_ty(), - ); - } - - // unsizing of generic struct with pointer fields - // Example: `Arc` -> `Arc` - // here we need to increase the size of every &T thin ptr field to a fat ptr - for i in 0..src_layout.fields.count() { - let (dst_f_place, dst_field) = - self.place_field(dst, mir::Field::new(i), dst_layout)?; - if dst_field.is_zst() { - continue; - } - let (src_f_value, src_field) = match src { - Value::ByRef(ptr, align) => { - let src_place = Place::from_scalar_ptr(ptr.into(), align); - let (src_f_place, src_field) = - self.place_field(src_place, mir::Field::new(i), src_layout)?; - (self.read_place(src_f_place)?, src_field) - } - Value::Scalar(_) | Value::ScalarPair(..) => { - let src_field = src_layout.field(&self, i)?; - assert_eq!(src_layout.fields.offset(i).bytes(), 0); - assert_eq!(src_field.size, src_layout.size); - (src, src_field) - } - }; - if src_field.ty == dst_field.ty { - self.write_value(ValTy { - value: src_f_value, - ty: src_field.ty, - }, dst_f_place)?; - } else { - self.unsize_into(src_f_value, src_field, dst_f_place, dst_field)?; - } - } - Ok(()) - } - _ => { - bug!( - "unsize_into: invalid conversion: {:?} -> {:?}", - src_layout, - dst_layout - ) - } - } - } - - pub fn dump_local(&self, place: Place) { + pub fn dump_place(&self, place: Place) { // Debug output if !log_enabled!(::log::Level::Trace) { return; @@ -1716,22 +698,23 @@ impl<'a, 'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> EvalContext<'a, 'mir, 'tcx, M panic!("Failed to access local: {:?}", err); } } - Ok(Value::ByRef(ptr, align)) => { + Ok(Operand::Indirect(mplace)) => { + let (ptr, align) = mplace.to_scalar_ptr_align(); match ptr { Scalar::Ptr(ptr) => { write!(msg, " by align({}) ref:", align.abi()).unwrap(); allocs.push(ptr.alloc_id); } - ptr => write!(msg, " integral by ref: {:?}", ptr).unwrap(), + ptr => write!(msg, " by integral ref: {:?}", ptr).unwrap(), } } - Ok(Value::Scalar(val)) => { + Ok(Operand::Immediate(Value::Scalar(val))) => { write!(msg, " {:?}", val).unwrap(); if let ScalarMaybeUndef::Scalar(Scalar::Ptr(ptr)) = val { allocs.push(ptr.alloc_id); } } - Ok(Value::ScalarPair(val1, val2)) => { + Ok(Operand::Immediate(Value::ScalarPair(val1, val2))) => { write!(msg, " ({:?}, {:?})", val1, val2).unwrap(); if let ScalarMaybeUndef::Scalar(Scalar::Ptr(ptr)) = val1 { allocs.push(ptr.alloc_id); @@ -1745,10 +728,10 @@ impl<'a, 'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> EvalContext<'a, 'mir, 'tcx, M trace!("{}", msg); self.memory.dump_allocs(allocs); } - Place::Ptr { ptr, align, .. } => { - match ptr { - ScalarMaybeUndef::Scalar(Scalar::Ptr(ptr)) => { - trace!("by align({}) ref:", align.abi()); + Place::Ptr(mplace) => { + match mplace.ptr { + Scalar::Ptr(ptr) => { + trace!("by align({}) ref:", mplace.align.abi()); self.memory.dump_alloc(ptr.alloc_id); } ptr => trace!(" integral by ref: {:?}", ptr), @@ -1795,119 +778,15 @@ impl<'a, 'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> EvalContext<'a, 'mir, 'tcx, M (frames, self.tcx.span) } + #[inline(always)] pub fn sign_extend(&self, value: u128, ty: TyLayout<'_>) -> u128 { - super::sign_extend(value, ty) + assert!(ty.abi.is_signed()); + sign_extend(value, ty.size) } + #[inline(always)] pub fn truncate(&self, value: u128, ty: TyLayout<'_>) -> u128 { - super::truncate(value, ty) - } - - fn write_field_name(&self, s: &mut String, ty: Ty<'tcx>, i: usize, variant: usize) -> ::std::fmt::Result { - match ty.sty { - ty::TyBool | - ty::TyChar | - ty::TyInt(_) | - ty::TyUint(_) | - ty::TyFloat(_) | - ty::TyFnPtr(_) | - ty::TyNever | - ty::TyFnDef(..) | - ty::TyGeneratorWitness(..) | - ty::TyForeign(..) | - ty::TyDynamic(..) => { - bug!("field_name({:?}): not applicable", ty) - } - - // Potentially-fat pointers. - ty::TyRef(_, pointee, _) | - ty::TyRawPtr(ty::TypeAndMut { ty: pointee, .. }) => { - assert!(i < 2); - - // Reuse the fat *T type as its own thin pointer data field. - // This provides information about e.g. DST struct pointees - // (which may have no non-DST form), and will work as long - // as the `Abi` or `FieldPlacement` is checked by users. - if i == 0 { - return write!(s, ".data_ptr"); - } - - match self.tcx.struct_tail(pointee).sty { - ty::TySlice(_) | - ty::TyStr => write!(s, ".len"), - ty::TyDynamic(..) => write!(s, ".vtable_ptr"), - _ => bug!("field_name({:?}): not applicable", ty) - } - } - - // Arrays and slices. - ty::TyArray(_, _) | - ty::TySlice(_) | - ty::TyStr => write!(s, "[{}]", i), - - // generators and closures. - ty::TyClosure(def_id, _) | ty::TyGenerator(def_id, _, _) => { - let node_id = self.tcx.hir.as_local_node_id(def_id).unwrap(); - let freevar = self.tcx.with_freevars(node_id, |fv| fv[i]); - write!(s, ".upvar({})", self.tcx.hir.name(freevar.var_id())) - } - - ty::TyTuple(_) => write!(s, ".{}", i), - - // enums - ty::TyAdt(def, ..) if def.is_enum() => { - let variant = &def.variants[variant]; - write!(s, ".{}::{}", variant.name, variant.fields[i].ident) - } - - // other ADTs. - ty::TyAdt(def, _) => write!(s, ".{}", def.non_enum_variant().fields[i].ident), - - ty::TyProjection(_) | ty::TyAnon(..) | ty::TyParam(_) | - ty::TyInfer(_) | ty::TyError => { - bug!("write_field_name: unexpected type `{}`", ty) - } - } - } - - pub fn storage_live(&mut self, local: mir::Local) -> EvalResult<'tcx, LocalValue> { - trace!("{:?} is now live", local); - - let ty = self.frame().mir.local_decls[local].ty; - let init = self.init_value(ty)?; - // StorageLive *always* kills the value that's currently stored - Ok(mem::replace(&mut self.frame_mut().locals[local], LocalValue::Live(init))) - } - - fn init_value(&mut self, ty: Ty<'tcx>) -> EvalResult<'tcx, Value> { - let ty = self.monomorphize(ty, self.substs()); - let layout = self.layout_of(ty)?; - Ok(match layout.abi { - layout::Abi::Scalar(..) => Value::Scalar(ScalarMaybeUndef::Undef), - layout::Abi::ScalarPair(..) => Value::ScalarPair( - ScalarMaybeUndef::Undef, - ScalarMaybeUndef::Undef, - ), - _ => Value::ByRef(self.alloc_ptr(layout)?.into(), layout.align), - }) + truncate(value, ty.size) } } -impl<'mir, 'tcx> Frame<'mir, 'tcx> { - fn set_local(&mut self, local: mir::Local, value: Value) -> EvalResult<'tcx> { - match self.locals[local] { - LocalValue::Dead => err!(DeadLocal), - LocalValue::Live(ref mut local) => { - *local = value; - Ok(()) - } - } - } - - /// Returns the old value of the local - pub fn storage_dead(&mut self, local: mir::Local) -> LocalValue { - trace!("{:?} is now dead", local); - - mem::replace(&mut self.locals[local], LocalValue::Dead) - } -} diff --git a/src/librustc_mir/interpret/machine.rs b/src/librustc_mir/interpret/machine.rs index 112d8759c6832..84556c7249dbd 100644 --- a/src/librustc_mir/interpret/machine.rs +++ b/src/librustc_mir/interpret/machine.rs @@ -5,10 +5,10 @@ use std::hash::Hash; use rustc::mir::interpret::{AllocId, EvalResult, Scalar, Pointer, AccessKind, GlobalId}; -use super::{EvalContext, Place, ValTy, Memory}; +use super::{EvalContext, PlaceTy, OpTy, Memory}; use rustc::mir; -use rustc::ty::{self, Ty}; +use rustc::ty::{self, layout::TyLayout}; use rustc::ty::layout::Size; use syntax::source_map::Span; use syntax::ast::Mutability; @@ -31,19 +31,17 @@ pub trait Machine<'mir, 'tcx>: Clone + Eq + Hash { fn eval_fn_call<'a>( ecx: &mut EvalContext<'a, 'mir, 'tcx, Self>, instance: ty::Instance<'tcx>, - destination: Option<(Place, mir::BasicBlock)>, - args: &[ValTy<'tcx>], + destination: Option<(PlaceTy<'tcx>, mir::BasicBlock)>, + args: &[OpTy<'tcx>], span: Span, - sig: ty::FnSig<'tcx>, ) -> EvalResult<'tcx, bool>; /// directly process an intrinsic without pushing a stack frame. fn call_intrinsic<'a>( ecx: &mut EvalContext<'a, 'mir, 'tcx, Self>, instance: ty::Instance<'tcx>, - args: &[ValTy<'tcx>], - dest: Place, - dest_layout: ty::layout::TyLayout<'tcx>, + args: &[OpTy<'tcx>], + dest: PlaceTy<'tcx>, target: mir::BasicBlock, ) -> EvalResult<'tcx>; @@ -57,9 +55,9 @@ pub trait Machine<'mir, 'tcx>: Clone + Eq + Hash { ecx: &EvalContext<'a, 'mir, 'tcx, Self>, bin_op: mir::BinOp, left: Scalar, - left_ty: Ty<'tcx>, + left_layout: TyLayout<'tcx>, right: Scalar, - right_ty: Ty<'tcx>, + right_layout: TyLayout<'tcx>, ) -> EvalResult<'tcx, Option<(Scalar, bool)>>; /// Called when trying to mark machine defined `MemoryKinds` as static @@ -81,8 +79,7 @@ pub trait Machine<'mir, 'tcx>: Clone + Eq + Hash { /// Returns a pointer to the allocated memory fn box_alloc<'a>( ecx: &mut EvalContext<'a, 'mir, 'tcx, Self>, - ty: Ty<'tcx>, - dest: Place, + dest: PlaceTy<'tcx>, ) -> EvalResult<'tcx>; /// Called when trying to access a global declared with a `linkage` attribute diff --git a/src/librustc_mir/interpret/memory.rs b/src/librustc_mir/interpret/memory.rs index 636b04a8d16da..89c308d87ef28 100644 --- a/src/librustc_mir/interpret/memory.rs +++ b/src/librustc_mir/interpret/memory.rs @@ -1,3 +1,11 @@ +//! The memory subsystem. +//! +//! Generally, we use `Pointer` to denote memory addresses. However, some operations +//! have a "size"-like parameter, and they take `Scalar` for the address because +//! if the size is 0, then the pointer can also be a (properly aligned, non-NULL) +//! integer. It is crucial that these operations call `check_align` *before* +//! short-circuiting the empty case! + use std::collections::VecDeque; use std::hash::{Hash, Hasher}; use std::ptr; @@ -7,15 +15,16 @@ use rustc::ty::Instance; use rustc::ty::ParamEnv; use rustc::ty::query::TyCtxtAt; use rustc::ty::layout::{self, Align, TargetDataLayout, Size}; -use rustc::mir::interpret::{Pointer, AllocId, Allocation, AccessKind, Value, ScalarMaybeUndef, - EvalResult, Scalar, EvalErrorKind, GlobalId, AllocType}; -pub use rustc::mir::interpret::{write_target_uint, write_target_int, read_target_uint}; +use rustc::mir::interpret::{Pointer, AllocId, Allocation, AccessKind, ScalarMaybeUndef, + EvalResult, Scalar, EvalErrorKind, GlobalId, AllocType, truncate}; +pub use rustc::mir::interpret::{write_target_uint, read_target_uint}; use rustc_data_structures::fx::{FxHashSet, FxHashMap, FxHasher}; use syntax::ast::Mutability; use super::{EvalContext, Machine}; + //////////////////////////////////////////////////////////////////////////////// // Allocations and pointers //////////////////////////////////////////////////////////////////////////////// @@ -43,9 +52,6 @@ pub struct Memory<'a, 'mir, 'tcx: 'a + 'mir, M: Machine<'mir, 'tcx>> { /// Actual memory allocations (arbitrary bytes, may contain pointers into other allocations). alloc_map: FxHashMap, - /// The current stack frame. Used to check accesses against locks. - pub cur_frame: usize, - pub tcx: TyCtxtAt<'a, 'tcx, 'tcx>, } @@ -63,14 +69,12 @@ impl<'a, 'mir, 'tcx, M> PartialEq for Memory<'a, 'mir, 'tcx, M> data, alloc_kind, alloc_map, - cur_frame, tcx: _, } = self; *data == other.data && *alloc_kind == other.alloc_kind && *alloc_map == other.alloc_map - && *cur_frame == other.cur_frame } } @@ -83,12 +87,10 @@ impl<'a, 'mir, 'tcx, M> Hash for Memory<'a, 'mir, 'tcx, M> data, alloc_kind: _, alloc_map: _, - cur_frame, tcx: _, } = self; data.hash(state); - cur_frame.hash(state); // We ignore some fields which don't change between evaluation steps. @@ -114,7 +116,6 @@ impl<'a, 'mir, 'tcx, M: Machine<'mir, 'tcx>> Memory<'a, 'mir, 'tcx, M> { alloc_kind: FxHashMap::default(), alloc_map: FxHashMap::default(), tcx, - cur_frame: usize::max_value(), } } @@ -264,7 +265,8 @@ impl<'a, 'mir, 'tcx, M: Machine<'mir, 'tcx>> Memory<'a, 'mir, 'tcx, M> { self.tcx.data_layout.endian } - /// Check that the pointer is aligned AND non-NULL. + /// Check that the pointer is aligned AND non-NULL. This supports scalars + /// for the benefit of other parts of miri that need to check alignment even for ZST. pub fn check_align(&self, ptr: Scalar, required_align: Align) -> EvalResult<'tcx> { // Check non-NULL/Undef, extract offset let (offset, alloc_align) = match ptr { @@ -301,6 +303,9 @@ impl<'a, 'mir, 'tcx, M: Machine<'mir, 'tcx>> Memory<'a, 'mir, 'tcx, M> { } } + /// Check if the pointer is "in-bounds". Notice that a pointer pointing at the end + /// of an allocation (i.e., at the first *inaccessible* location) *is* considered + /// in-bounds! This follows C's/LLVM's rules. pub fn check_bounds(&self, ptr: Pointer, access: bool) -> EvalResult<'tcx> { let alloc = self.get(ptr.alloc_id)?; let allocation_size = alloc.bytes.len() as u64; @@ -331,7 +336,7 @@ impl<'a, 'mir, 'tcx, M: Machine<'mir, 'tcx>> Memory<'a, 'mir, 'tcx, M> { assert!(self.tcx.is_static(def_id).is_some()); EvalErrorKind::ReferencedConstant(err).into() }).map(|val| { - self.tcx.const_value_to_allocation(val) + self.tcx.const_to_allocation(val) }) } @@ -499,6 +504,7 @@ impl<'a, 'mir, 'tcx, M: Machine<'mir, 'tcx>> Memory<'a, 'mir, 'tcx, M> { /// Byte accessors impl<'a, 'mir, 'tcx, M: Machine<'mir, 'tcx>> Memory<'a, 'mir, 'tcx, M> { + /// This checks alignment! fn get_bytes_unchecked( &self, ptr: Pointer, @@ -519,6 +525,7 @@ impl<'a, 'mir, 'tcx, M: Machine<'mir, 'tcx>> Memory<'a, 'mir, 'tcx, M> { Ok(&alloc.bytes[offset..offset + size.bytes() as usize]) } + /// This checks alignment! fn get_bytes_unchecked_mut( &mut self, ptr: Pointer, @@ -556,7 +563,7 @@ impl<'a, 'mir, 'tcx, M: Machine<'mir, 'tcx>> Memory<'a, 'mir, 'tcx, M> { ) -> EvalResult<'tcx, &mut [u8]> { assert_ne!(size.bytes(), 0); self.clear_relocations(ptr, size)?; - self.mark_definedness(ptr.into(), size, true)?; + self.mark_definedness(ptr, size, true)?; self.get_bytes_unchecked_mut(ptr, size, align) } } @@ -635,10 +642,10 @@ impl<'a, 'mir, 'tcx, M: Machine<'mir, 'tcx>> Memory<'a, 'mir, 'tcx, M> { length: u64, nonoverlapping: bool, ) -> EvalResult<'tcx> { - // Empty accesses don't need to be valid pointers, but they should still be aligned - self.check_align(src, src_align)?; - self.check_align(dest, dest_align)?; if size.bytes() == 0 { + // Nothing to do for ZST, other than checking alignment and non-NULLness. + self.check_align(src, src_align)?; + self.check_align(dest, dest_align)?; return Ok(()); } let src = src.to_ptr()?; @@ -664,6 +671,7 @@ impl<'a, 'mir, 'tcx, M: Machine<'mir, 'tcx>> Memory<'a, 'mir, 'tcx, M> { new_relocations }; + // This also checks alignment. let src_bytes = self.get_bytes_unchecked(src, size, src_align)?.as_ptr(); let dest_bytes = self.get_bytes_mut(dest, size * length, dest_align)?.as_mut_ptr(); @@ -721,8 +729,8 @@ impl<'a, 'mir, 'tcx, M: Machine<'mir, 'tcx>> Memory<'a, 'mir, 'tcx, M> { pub fn read_bytes(&self, ptr: Scalar, size: Size) -> EvalResult<'tcx, &[u8]> { // Empty accesses don't need to be valid pointers, but they should still be non-NULL let align = Align::from_bytes(1, 1).unwrap(); - self.check_align(ptr, align)?; if size.bytes() == 0 { + self.check_align(ptr, align)?; return Ok(&[]); } self.get_bytes(ptr.to_ptr()?, size, align) @@ -731,8 +739,8 @@ impl<'a, 'mir, 'tcx, M: Machine<'mir, 'tcx>> Memory<'a, 'mir, 'tcx, M> { pub fn write_bytes(&mut self, ptr: Scalar, src: &[u8]) -> EvalResult<'tcx> { // Empty accesses don't need to be valid pointers, but they should still be non-NULL let align = Align::from_bytes(1, 1).unwrap(); - self.check_align(ptr, align)?; if src.is_empty() { + self.check_align(ptr, align)?; return Ok(()); } let bytes = self.get_bytes_mut(ptr.to_ptr()?, Size::from_bytes(src.len() as u64), align)?; @@ -743,8 +751,8 @@ impl<'a, 'mir, 'tcx, M: Machine<'mir, 'tcx>> Memory<'a, 'mir, 'tcx, M> { pub fn write_repeat(&mut self, ptr: Scalar, val: u8, count: Size) -> EvalResult<'tcx> { // Empty accesses don't need to be valid pointers, but they should still be non-NULL let align = Align::from_bytes(1, 1).unwrap(); - self.check_align(ptr, align)?; if count.bytes() == 0 { + self.check_align(ptr, align)?; return Ok(()); } let bytes = self.get_bytes_mut(ptr.to_ptr()?, count, align)?; @@ -754,9 +762,11 @@ impl<'a, 'mir, 'tcx, M: Machine<'mir, 'tcx>> Memory<'a, 'mir, 'tcx, M> { Ok(()) } + /// Read a *non-ZST* scalar pub fn read_scalar(&self, ptr: Pointer, ptr_align: Align, size: Size) -> EvalResult<'tcx, ScalarMaybeUndef> { self.check_relocation_edges(ptr, size)?; // Make sure we don't read part of a pointer as a pointer let endianness = self.endianness(); + // get_bytes_unchecked tests alignment let bytes = self.get_bytes_unchecked(ptr, size, ptr_align.min(self.int_align(size)))?; // Undef check happens *after* we established that the alignment is correct. // We must not return Ok() for unaligned pointers! @@ -789,17 +799,15 @@ impl<'a, 'mir, 'tcx, M: Machine<'mir, 'tcx>> Memory<'a, 'mir, 'tcx, M> { self.read_scalar(ptr, ptr_align, self.pointer_size()) } + /// Write a *non-ZST* scalar pub fn write_scalar( &mut self, - ptr: Scalar, + ptr: Pointer, ptr_align: Align, val: ScalarMaybeUndef, type_size: Size, - type_align: Align, - signed: bool, ) -> EvalResult<'tcx> { let endianness = self.endianness(); - self.check_align(ptr, ptr_align)?; let val = match val { ScalarMaybeUndef::Scalar(scalar) => scalar, @@ -812,27 +820,18 @@ impl<'a, 'mir, 'tcx, M: Machine<'mir, 'tcx>> Memory<'a, 'mir, 'tcx, M> { val.offset.bytes() as u128 } - Scalar::Bits { size: 0, .. } => { - // nothing to do for ZSTs - assert_eq!(type_size.bytes(), 0); - return Ok(()); - } - Scalar::Bits { bits, size } => { assert_eq!(size as u64, type_size.bytes()); + assert_eq!(truncate(bits, Size::from_bytes(size.into())), bits, + "Unexpected value of size {} when writing to memory", size); bits }, }; - let ptr = ptr.to_ptr()?; - { - let dst = self.get_bytes_mut(ptr, type_size, ptr_align.min(type_align))?; - if signed { - write_target_int(endianness, dst, bytes as i128).unwrap(); - } else { - write_target_uint(endianness, dst, bytes).unwrap(); - } + // get_bytes_mut checks alignment + let dst = self.get_bytes_mut(ptr, type_size, ptr_align)?; + write_target_uint(endianness, dst, bytes).unwrap(); } // See if we have to also write a relocation @@ -849,9 +848,9 @@ impl<'a, 'mir, 'tcx, M: Machine<'mir, 'tcx>> Memory<'a, 'mir, 'tcx, M> { Ok(()) } - pub fn write_ptr_sized_unsigned(&mut self, ptr: Pointer, ptr_align: Align, val: ScalarMaybeUndef) -> EvalResult<'tcx> { + pub fn write_ptr_sized(&mut self, ptr: Pointer, ptr_align: Align, val: ScalarMaybeUndef) -> EvalResult<'tcx> { let ptr_size = self.pointer_size(); - self.write_scalar(ptr.into(), ptr_align, val, ptr_size, ptr_align, false) + self.write_scalar(ptr.into(), ptr_align, val, ptr_size) } fn int_align(&self, size: Size) -> Align { @@ -967,14 +966,13 @@ impl<'a, 'mir, 'tcx, M: Machine<'mir, 'tcx>> Memory<'a, 'mir, 'tcx, M> { pub fn mark_definedness( &mut self, - ptr: Scalar, + ptr: Pointer, size: Size, new_state: bool, ) -> EvalResult<'tcx> { if size.bytes() == 0 { return Ok(()); } - let ptr = ptr.to_ptr()?; let alloc = self.get_mut(ptr.alloc_id)?; alloc.undef_mask.set_range( ptr.offset, @@ -992,63 +990,6 @@ impl<'a, 'mir, 'tcx, M: Machine<'mir, 'tcx>> Memory<'a, 'mir, 'tcx, M> { pub trait HasMemory<'a, 'mir, 'tcx: 'a + 'mir, M: Machine<'mir, 'tcx>> { fn memory_mut(&mut self) -> &mut Memory<'a, 'mir, 'tcx, M>; fn memory(&self) -> &Memory<'a, 'mir, 'tcx, M>; - - /// Convert the value into a pointer (or a pointer-sized integer). If the value is a ByRef, - /// this may have to perform a load. - fn into_ptr( - &self, - value: Value, - ) -> EvalResult<'tcx, ScalarMaybeUndef> { - Ok(match value { - Value::ByRef(ptr, align) => { - self.memory().read_ptr_sized(ptr.to_ptr()?, align)? - } - Value::Scalar(ptr) | - Value::ScalarPair(ptr, _) => ptr, - }.into()) - } - - fn into_ptr_vtable_pair( - &self, - value: Value, - ) -> EvalResult<'tcx, (ScalarMaybeUndef, Pointer)> { - match value { - Value::ByRef(ref_ptr, align) => { - let mem = self.memory(); - let ptr = mem.read_ptr_sized(ref_ptr.to_ptr()?, align)?.into(); - let vtable = mem.read_ptr_sized( - ref_ptr.ptr_offset(mem.pointer_size(), &mem.tcx.data_layout)?.to_ptr()?, - align - )?.unwrap_or_err()?.to_ptr()?; - Ok((ptr, vtable)) - } - - Value::ScalarPair(ptr, vtable) => Ok((ptr, vtable.unwrap_or_err()?.to_ptr()?)), - _ => bug!("expected ptr and vtable, got {:?}", value), - } - } - - fn into_slice( - &self, - value: Value, - ) -> EvalResult<'tcx, (ScalarMaybeUndef, u64)> { - match value { - Value::ByRef(ref_ptr, align) => { - let mem = self.memory(); - let ptr = mem.read_ptr_sized(ref_ptr.to_ptr()?, align)?.into(); - let len = mem.read_ptr_sized( - ref_ptr.ptr_offset(mem.pointer_size(), &mem.tcx.data_layout)?.to_ptr()?, - align - )?.unwrap_or_err()?.to_bits(mem.pointer_size())? as u64; - Ok((ptr, len)) - } - Value::ScalarPair(ptr, val) => { - let len = val.unwrap_or_err()?.to_bits(self.memory().pointer_size())?; - Ok((ptr, len as u64)) - } - Value::Scalar(_) => bug!("expected ptr and length, got {:?}", value), - } - } } impl<'a, 'mir, 'tcx, M: Machine<'mir, 'tcx>> HasMemory<'a, 'mir, 'tcx, M> for Memory<'a, 'mir, 'tcx, M> { diff --git a/src/librustc_mir/interpret/mod.rs b/src/librustc_mir/interpret/mod.rs index bc77f6e29d271..b769dae0a7abc 100644 --- a/src/librustc_mir/interpret/mod.rs +++ b/src/librustc_mir/interpret/mod.rs @@ -1,22 +1,23 @@ //! An interpreter for MIR used in CTFE and by miri mod cast; -mod const_eval; mod eval_context; mod place; +mod operand; mod machine; mod memory; mod operator; mod step; mod terminator; mod traits; +mod const_eval; +mod validity; pub use self::eval_context::{ - EvalContext, Frame, StackPopCleanup, - TyAndPacked, ValTy, + EvalContext, Frame, StackPopCleanup, LocalValue, }; -pub use self::place::{Place, PlaceExtra}; +pub use self::place::{Place, PlaceExtra, PlaceTy, MemPlace, MPlaceTy}; pub use self::memory::{Memory, MemoryKind, HasMemory}; @@ -25,32 +26,13 @@ pub use self::const_eval::{ mk_borrowck_eval_cx, mk_eval_cx, CompileTimeEvaluator, - const_value_to_allocation_provider, + const_to_allocation_provider, const_eval_provider, - const_val_field, + const_field, const_variant_index, - value_to_const_value, + op_to_const, }; pub use self::machine::Machine; -pub use self::memory::{write_target_uint, write_target_int, read_target_uint}; - -use rustc::ty::layout::TyLayout; - -pub fn sign_extend(value: u128, layout: TyLayout<'_>) -> u128 { - let size = layout.size.bits(); - assert!(layout.abi.is_signed()); - // sign extend - let shift = 128 - size; - // shift the unsigned value to the left - // and back to the right as signed (essentially fills with FF on the left) - (((value << shift) as i128) >> shift) as u128 -} - -pub fn truncate(value: u128, layout: TyLayout<'_>) -> u128 { - let size = layout.size.bits(); - let shift = 128 - size; - // truncate (shift left to drop out leftover values, shift right to fill with zeroes) - (value << shift) >> shift -} +pub use self::operand::{Value, ValTy, Operand, OpTy}; diff --git a/src/librustc_mir/interpret/operand.rs b/src/librustc_mir/interpret/operand.rs new file mode 100644 index 0000000000000..df3dc44b22984 --- /dev/null +++ b/src/librustc_mir/interpret/operand.rs @@ -0,0 +1,614 @@ +//! Functions concerning immediate values and operands, and reading from operands. +//! All high-level functions to read from memory work on operands as sources. + +use std::convert::TryInto; + +use rustc::mir; +use rustc::ty::layout::{self, Align, LayoutOf, TyLayout, HasDataLayout, IntegerExt}; +use rustc_data_structures::indexed_vec::Idx; + +use rustc::mir::interpret::{ + GlobalId, ConstValue, Scalar, EvalResult, Pointer, ScalarMaybeUndef, EvalErrorKind +}; +use super::{EvalContext, Machine, MemPlace, MPlaceTy, PlaceExtra, MemoryKind}; + +/// A `Value` represents a single immediate self-contained Rust value. +/// +/// For optimization of a few very common cases, there is also a representation for a pair of +/// primitive values (`ScalarPair`). It allows Miri to avoid making allocations for checked binary +/// operations and fat pointers. This idea was taken from rustc's codegen. +/// In particular, thanks to `ScalarPair`, arithmetic operations and casts can be entirely +/// defined on `Value`, and do not have to work with a `Place`. +#[derive(Copy, Clone, Debug, Hash, PartialEq, Eq)] +pub enum Value { + Scalar(ScalarMaybeUndef), + ScalarPair(ScalarMaybeUndef, ScalarMaybeUndef), +} + +impl<'tcx> Value { + pub fn new_slice( + val: Scalar, + len: u64, + cx: impl HasDataLayout + ) -> Self { + Value::ScalarPair(val.into(), Scalar::Bits { + bits: len as u128, + size: cx.data_layout().pointer_size.bytes() as u8, + }.into()) + } + + pub fn new_dyn_trait(val: Scalar, vtable: Pointer) -> Self { + Value::ScalarPair(val.into(), Scalar::Ptr(vtable).into()) + } + + #[inline] + pub fn to_scalar_or_undef(self) -> ScalarMaybeUndef { + match self { + Value::Scalar(val) => val, + Value::ScalarPair(..) => bug!("Got a fat pointer where a scalar was expected"), + } + } + + #[inline] + pub fn to_scalar(self) -> EvalResult<'tcx, Scalar> { + self.to_scalar_or_undef().not_undef() + } + + /// Convert the value into a pointer (or a pointer-sized integer). + /// Throws away the second half of a ScalarPair! + #[inline] + pub fn to_scalar_ptr(self) -> EvalResult<'tcx, Scalar> { + match self { + Value::Scalar(ptr) | + Value::ScalarPair(ptr, _) => ptr.not_undef(), + } + } + + pub fn to_scalar_dyn_trait(self) -> EvalResult<'tcx, (Scalar, Pointer)> { + match self { + Value::ScalarPair(ptr, vtable) => + Ok((ptr.not_undef()?, vtable.to_ptr()?)), + _ => bug!("expected ptr and vtable, got {:?}", self), + } + } + + pub fn to_scalar_slice(self, cx: impl HasDataLayout) -> EvalResult<'tcx, (Scalar, u64)> { + match self { + Value::ScalarPair(ptr, val) => { + let len = val.to_bits(cx.data_layout().pointer_size)?; + Ok((ptr.not_undef()?, len as u64)) + } + _ => bug!("expected ptr and length, got {:?}", self), + } + } +} + +// ScalarPair needs a type to interpret, so we often have a value and a type together +// as input for binary and cast operations. +#[derive(Copy, Clone, Debug)] +pub struct ValTy<'tcx> { + pub value: Value, + pub layout: TyLayout<'tcx>, +} + +impl<'tcx> ::std::ops::Deref for ValTy<'tcx> { + type Target = Value; + #[inline(always)] + fn deref(&self) -> &Value { + &self.value + } +} + +/// An `Operand` is the result of computing a `mir::Operand`. It can be immediate, +/// or still in memory. The latter is an optimization, to delay reading that chunk of +/// memory and to avoid having to store arbitrary-sized data here. +#[derive(Copy, Clone, Debug, Hash, PartialEq, Eq)] +pub enum Operand { + Immediate(Value), + Indirect(MemPlace), +} + +impl Operand { + #[inline] + pub fn from_ptr(ptr: Pointer, align: Align) -> Self { + Operand::Indirect(MemPlace::from_ptr(ptr, align)) + } + + #[inline] + pub fn from_scalar_value(val: Scalar) -> Self { + Operand::Immediate(Value::Scalar(val.into())) + } + + #[inline] + pub fn to_mem_place(self) -> MemPlace { + match self { + Operand::Indirect(mplace) => mplace, + _ => bug!("to_mem_place: expected Operand::Indirect, got {:?}", self), + + } + } + + #[inline] + pub fn to_immediate(self) -> Value { + match self { + Operand::Immediate(val) => val, + _ => bug!("to_immediate: expected Operand::Immediate, got {:?}", self), + + } + } +} + +#[derive(Copy, Clone, Debug)] +pub struct OpTy<'tcx> { + pub op: Operand, + pub layout: TyLayout<'tcx>, +} + +impl<'tcx> ::std::ops::Deref for OpTy<'tcx> { + type Target = Operand; + #[inline(always)] + fn deref(&self) -> &Operand { + &self.op + } +} + +impl<'tcx> From> for OpTy<'tcx> { + #[inline(always)] + fn from(mplace: MPlaceTy<'tcx>) -> Self { + OpTy { + op: Operand::Indirect(*mplace), + layout: mplace.layout + } + } +} + +impl<'tcx> From> for OpTy<'tcx> { + #[inline(always)] + fn from(val: ValTy<'tcx>) -> Self { + OpTy { + op: Operand::Immediate(val.value), + layout: val.layout + } + } +} + +impl<'tcx> OpTy<'tcx> { + #[inline] + pub fn from_ptr(ptr: Pointer, align: Align, layout: TyLayout<'tcx>) -> Self { + OpTy { op: Operand::from_ptr(ptr, align), layout } + } + + #[inline] + pub fn from_aligned_ptr(ptr: Pointer, layout: TyLayout<'tcx>) -> Self { + OpTy { op: Operand::from_ptr(ptr, layout.align), layout } + } + + #[inline] + pub fn from_scalar_value(val: Scalar, layout: TyLayout<'tcx>) -> Self { + OpTy { op: Operand::Immediate(Value::Scalar(val.into())), layout } + } +} + +// Use the existing layout if given (but sanity check in debug mode), +// or compute the layout. +#[inline(always)] +fn from_known_layout<'tcx>( + layout: Option>, + compute: impl FnOnce() -> EvalResult<'tcx, TyLayout<'tcx>> +) -> EvalResult<'tcx, TyLayout<'tcx>> { + match layout { + None => compute(), + Some(layout) => { + if cfg!(debug_assertions) { + let layout2 = compute()?; + assert_eq!(layout.details, layout2.details, + "Mismatch in layout of supposedly equal-layout types {:?} and {:?}", + layout.ty, layout2.ty); + } + Ok(layout) + } + } +} + +impl<'a, 'mir, 'tcx, M: Machine<'mir, 'tcx>> EvalContext<'a, 'mir, 'tcx, M> { + /// Try reading a value in memory; this is interesting particularily for ScalarPair. + /// Return None if the layout does not permit loading this as a value. + fn try_read_value_from_mplace( + &self, + mplace: MPlaceTy<'tcx>, + ) -> EvalResult<'tcx, Option> { + if mplace.extra != PlaceExtra::None { + return Ok(None); + } + let (ptr, ptr_align) = mplace.to_scalar_ptr_align(); + + if mplace.layout.size.bytes() == 0 { + // Not all ZSTs have a layout we would handle below, so just short-circuit them + // all here. + self.memory.check_align(ptr, ptr_align)?; + return Ok(Some(Value::Scalar(Scalar::zst().into()))); + } + + let ptr = ptr.to_ptr()?; + match mplace.layout.abi { + layout::Abi::Scalar(..) => { + let scalar = self.memory.read_scalar(ptr, ptr_align, mplace.layout.size)?; + Ok(Some(Value::Scalar(scalar))) + } + layout::Abi::ScalarPair(ref a, ref b) => { + let (a, b) = (&a.value, &b.value); + let (a_size, b_size) = (a.size(self), b.size(self)); + let a_ptr = ptr; + let b_offset = a_size.abi_align(b.align(self)); + assert!(b_offset.bytes() > 0); // we later use the offset to test which field to use + let b_ptr = ptr.offset(b_offset, self)?.into(); + let a_val = self.memory.read_scalar(a_ptr, ptr_align, a_size)?; + let b_val = self.memory.read_scalar(b_ptr, ptr_align, b_size)?; + Ok(Some(Value::ScalarPair(a_val, b_val))) + } + _ => Ok(None), + } + } + + /// Try returning an immediate value for the operand. + /// If the layout does not permit loading this as a value, return where in memory + /// we can find the data. + /// Note that for a given layout, this operation will either always fail or always + /// succeed! Whether it succeeds depends on whether the layout can be represented + /// in a `Value`, not on which data is stored there currently. + pub(super) fn try_read_value( + &self, + src: OpTy<'tcx>, + ) -> EvalResult<'tcx, Result> { + Ok(match src.try_as_mplace() { + Ok(mplace) => { + if let Some(val) = self.try_read_value_from_mplace(mplace)? { + Ok(val) + } else { + Err(*mplace) + } + }, + Err(val) => Ok(val), + }) + } + + /// Read a value from a place, asserting that that is possible with the given layout. + #[inline(always)] + pub fn read_value(&self, op: OpTy<'tcx>) -> EvalResult<'tcx, ValTy<'tcx>> { + if let Ok(value) = self.try_read_value(op)? { + Ok(ValTy { value, layout: op.layout }) + } else { + bug!("primitive read failed for type: {:?}", op.layout.ty); + } + } + + /// Read a scalar from a place + pub fn read_scalar(&self, op: OpTy<'tcx>) -> EvalResult<'tcx, ScalarMaybeUndef> { + match *self.read_value(op)? { + Value::ScalarPair(..) => bug!("got ScalarPair for type: {:?}", op.layout.ty), + Value::Scalar(val) => Ok(val), + } + } + + pub fn uninit_operand(&mut self, layout: TyLayout<'tcx>) -> EvalResult<'tcx, Operand> { + // This decides which types we will use the Immediate optimization for, and hence should + // match what `try_read_value` and `eval_place_to_op` support. + if layout.is_zst() { + return Ok(Operand::Immediate(Value::Scalar(Scalar::zst().into()))); + } + + Ok(match layout.abi { + layout::Abi::Scalar(..) => + Operand::Immediate(Value::Scalar(ScalarMaybeUndef::Undef)), + layout::Abi::ScalarPair(..) => + Operand::Immediate(Value::ScalarPair( + ScalarMaybeUndef::Undef, + ScalarMaybeUndef::Undef, + )), + _ => { + trace!("Forcing allocation for local of type {:?}", layout.ty); + Operand::Indirect( + *self.allocate(layout, MemoryKind::Stack)? + ) + } + }) + } + + /// Projection functions + pub fn operand_field( + &self, + op: OpTy<'tcx>, + field: u64, + ) -> EvalResult<'tcx, OpTy<'tcx>> { + let base = match op.try_as_mplace() { + Ok(mplace) => { + // The easy case + let field = self.mplace_field(mplace, field)?; + return Ok(field.into()); + }, + Err(value) => value + }; + + let field = field.try_into().unwrap(); + let field_layout = op.layout.field(self, field)?; + if field_layout.size.bytes() == 0 { + let val = Value::Scalar(Scalar::zst().into()); + return Ok(OpTy { op: Operand::Immediate(val), layout: field_layout }); + } + let offset = op.layout.fields.offset(field); + let value = match base { + // the field covers the entire type + _ if offset.bytes() == 0 && field_layout.size == op.layout.size => base, + // extract fields from types with `ScalarPair` ABI + Value::ScalarPair(a, b) => { + let val = if offset.bytes() == 0 { a } else { b }; + Value::Scalar(val) + }, + Value::Scalar(val) => + bug!("field access on non aggregate {:#?}, {:#?}", val, op.layout), + }; + Ok(OpTy { op: Operand::Immediate(value), layout: field_layout }) + } + + pub(super) fn operand_downcast( + &self, + op: OpTy<'tcx>, + variant: usize, + ) -> EvalResult<'tcx, OpTy<'tcx>> { + // Downcasts only change the layout + Ok(match op.try_as_mplace() { + Ok(mplace) => { + self.mplace_downcast(mplace, variant)?.into() + }, + Err(..) => { + let layout = op.layout.for_variant(self, variant); + OpTy { layout, ..op } + } + }) + } + + // Take an operand, representing a pointer, and dereference it -- that + // will always be a MemPlace. + pub(super) fn deref_operand( + &self, + src: OpTy<'tcx>, + ) -> EvalResult<'tcx, MPlaceTy<'tcx>> { + let val = self.read_value(src)?; + trace!("deref to {} on {:?}", val.layout.ty, val); + Ok(self.ref_to_mplace(val)?) + } + + pub fn operand_projection( + &self, + base: OpTy<'tcx>, + proj_elem: &mir::PlaceElem<'tcx>, + ) -> EvalResult<'tcx, OpTy<'tcx>> { + use rustc::mir::ProjectionElem::*; + Ok(match *proj_elem { + Field(field, _) => self.operand_field(base, field.index() as u64)?, + Downcast(_, variant) => self.operand_downcast(base, variant)?, + Deref => self.deref_operand(base)?.into(), + // The rest should only occur as mplace, we do not use Immediates for types + // allowing such operations. This matches place_projection forcing an allocation. + Subslice { .. } | ConstantIndex { .. } | Index(_) => { + let mplace = base.to_mem_place(); + self.mplace_projection(mplace, proj_elem)?.into() + } + }) + } + + // Evaluate a place with the goal of reading from it. This lets us sometimes + // avoid allocations. If you already know the layout, you can pass it in + // to avoid looking it up again. + fn eval_place_to_op( + &mut self, + mir_place: &mir::Place<'tcx>, + layout: Option>, + ) -> EvalResult<'tcx, OpTy<'tcx>> { + use rustc::mir::Place::*; + Ok(match *mir_place { + Local(mir::RETURN_PLACE) => return err!(ReadFromReturnPointer), + Local(local) => { + let op = *self.frame().locals[local].access()?; + let layout = from_known_layout(layout, + || self.layout_of_local(self.cur_frame(), local))?; + OpTy { op, layout } + }, + + Projection(ref proj) => { + let op = self.eval_place_to_op(&proj.base, None)?; + self.operand_projection(op, &proj.elem)? + } + + // Everything else is an mplace, so we just call `eval_place`. + // Note that getting an mplace for a static aways requires `&mut`, + // so this does not "cost" us anything in terms if mutability. + Promoted(_) | Static(_) => { + let place = self.eval_place(mir_place)?; + place.to_mem_place().into() + } + }) + } + + /// Evaluate the operand, returning a place where you can then find the data. + /// if you already know the layout, you can save two some table lookups + /// by passing it in here. + pub fn eval_operand( + &mut self, + mir_op: &mir::Operand<'tcx>, + layout: Option>, + ) -> EvalResult<'tcx, OpTy<'tcx>> { + use rustc::mir::Operand::*; + let op = match *mir_op { + // FIXME: do some more logic on `move` to invalidate the old location + Copy(ref place) | + Move(ref place) => + self.eval_place_to_op(place, layout)?, + + Constant(ref constant) => { + let layout = from_known_layout(layout, || { + let ty = self.monomorphize(mir_op.ty(self.mir(), *self.tcx), self.substs()); + self.layout_of(ty) + })?; + let op = self.const_value_to_op(constant.literal.val)?; + OpTy { op, layout } + } + }; + trace!("{:?}: {:?}", mir_op, *op); + Ok(op) + } + + /// Evaluate a bunch of operands at once + pub(crate) fn eval_operands( + &mut self, + ops: &[mir::Operand<'tcx>], + ) -> EvalResult<'tcx, Vec>> { + ops.into_iter() + .map(|op| self.eval_operand(op, None)) + .collect() + } + + // Also used e.g. when miri runs into a constant. + // Unfortunately, this needs an `&mut` to be able to allocate a copy of a `ByRef` + // constant. This bleeds up to `eval_operand` needing `&mut`. + pub fn const_value_to_op( + &mut self, + val: ConstValue<'tcx>, + ) -> EvalResult<'tcx, Operand> { + match val { + ConstValue::Unevaluated(def_id, substs) => { + let instance = self.resolve(def_id, substs)?; + self.global_to_op(GlobalId { + instance, + promoted: None, + }) + } + ConstValue::ByRef(alloc, offset) => { + // FIXME: Allocate new AllocId for all constants inside + let id = self.memory.allocate_value(alloc.clone(), MemoryKind::Stack)?; + Ok(Operand::from_ptr(Pointer::new(id, offset), alloc.align)) + }, + ConstValue::ScalarPair(a, b) => + Ok(Operand::Immediate(Value::ScalarPair(a.into(), b))), + ConstValue::Scalar(x) => + Ok(Operand::Immediate(Value::Scalar(x.into()))), + } + } + + pub(super) fn global_to_op(&mut self, gid: GlobalId<'tcx>) -> EvalResult<'tcx, Operand> { + let cv = self.const_eval(gid)?; + self.const_value_to_op(cv.val) + } + + /// We cannot do self.read_value(self.eval_operand) due to eval_operand taking &mut self, + /// so this helps avoid unnecessary let. + #[inline] + pub fn eval_operand_and_read_value( + &mut self, + op: &mir::Operand<'tcx>, + layout: Option>, + ) -> EvalResult<'tcx, ValTy<'tcx>> { + let op = self.eval_operand(op, layout)?; + self.read_value(op) + } + + /// reads a tag and produces the corresponding variant index + pub fn read_discriminant_as_variant_index( + &self, + rval: OpTy<'tcx>, + ) -> EvalResult<'tcx, usize> { + match rval.layout.variants { + layout::Variants::Single { index } => Ok(index), + layout::Variants::Tagged { .. } => { + let discr_val = self.read_discriminant_value(rval)?; + rval.layout.ty + .ty_adt_def() + .expect("tagged layout for non adt") + .discriminants(self.tcx.tcx) + .position(|var| var.val == discr_val) + .ok_or_else(|| EvalErrorKind::InvalidDiscriminant.into()) + } + layout::Variants::NicheFilling { .. } => { + let discr_val = self.read_discriminant_value(rval)?; + assert_eq!(discr_val as usize as u128, discr_val); + Ok(discr_val as usize) + }, + } + } + + pub fn read_discriminant_value( + &self, + rval: OpTy<'tcx>, + ) -> EvalResult<'tcx, u128> { + trace!("read_discriminant_value {:#?}", rval.layout); + if rval.layout.abi == layout::Abi::Uninhabited { + return err!(Unreachable); + } + + match rval.layout.variants { + layout::Variants::Single { index } => { + let discr_val = rval.layout.ty.ty_adt_def().map_or( + index as u128, + |def| def.discriminant_for_variant(*self.tcx, index).val); + return Ok(discr_val); + } + layout::Variants::Tagged { .. } | + layout::Variants::NicheFilling { .. } => {}, + } + let discr_op = self.operand_field(rval, 0)?; + let discr_val = self.read_value(discr_op)?; + trace!("discr value: {:?}", discr_val); + let raw_discr = discr_val.to_scalar()?; + Ok(match rval.layout.variants { + layout::Variants::Single { .. } => bug!(), + // FIXME: We should catch invalid discriminants here! + layout::Variants::Tagged { .. } => { + if discr_val.layout.ty.is_signed() { + let i = raw_discr.to_bits(discr_val.layout.size)? as i128; + // going from layout tag type to typeck discriminant type + // requires first sign extending with the layout discriminant + let shift = 128 - discr_val.layout.size.bits(); + let sexted = (i << shift) >> shift; + // and then zeroing with the typeck discriminant type + let discr_ty = rval.layout.ty + .ty_adt_def().expect("tagged layout corresponds to adt") + .repr + .discr_type(); + let discr_ty = layout::Integer::from_attr(self.tcx.tcx, discr_ty); + let shift = 128 - discr_ty.size().bits(); + let truncatee = sexted as u128; + (truncatee << shift) >> shift + } else { + raw_discr.to_bits(discr_val.layout.size)? + } + }, + layout::Variants::NicheFilling { + dataful_variant, + ref niche_variants, + niche_start, + .. + } => { + let variants_start = *niche_variants.start() as u128; + let variants_end = *niche_variants.end() as u128; + match raw_discr { + Scalar::Ptr(_) => { + assert!(niche_start == 0); + assert!(variants_start == variants_end); + dataful_variant as u128 + }, + Scalar::Bits { bits: raw_discr, size } => { + assert_eq!(size as u64, discr_val.layout.size.bytes()); + let discr = raw_discr.wrapping_sub(niche_start) + .wrapping_add(variants_start); + if variants_start <= discr && discr <= variants_end { + discr + } else { + dataful_variant as u128 + } + }, + } + } + }) + } + +} diff --git a/src/librustc_mir/interpret/operator.rs b/src/librustc_mir/interpret/operator.rs index 732c85bd0147a..c5475f9a4c064 100644 --- a/src/librustc_mir/interpret/operator.rs +++ b/src/librustc_mir/interpret/operator.rs @@ -1,58 +1,39 @@ use rustc::mir; -use rustc::ty::{self, Ty, layout}; +use rustc::ty::{self, layout::{self, TyLayout}}; use syntax::ast::FloatTy; -use rustc::ty::layout::{LayoutOf, TyLayout}; use rustc_apfloat::ieee::{Double, Single}; use rustc_apfloat::Float; +use rustc::mir::interpret::{EvalResult, Scalar}; -use super::{EvalContext, Place, Machine, ValTy}; +use super::{EvalContext, PlaceTy, Value, Machine, ValTy}; -use rustc::mir::interpret::{EvalResult, Scalar, Value}; impl<'a, 'mir, 'tcx, M: Machine<'mir, 'tcx>> EvalContext<'a, 'mir, 'tcx, M> { - fn binop_with_overflow( - &self, - op: mir::BinOp, - left: ValTy<'tcx>, - right: ValTy<'tcx>, - ) -> EvalResult<'tcx, (Scalar, bool)> { - let left_val = self.value_to_scalar(left)?; - let right_val = self.value_to_scalar(right)?; - self.binary_op(op, left_val, left.ty, right_val, right.ty) - } - /// Applies the binary operation `op` to the two operands and writes a tuple of the result /// and a boolean signifying the potential overflow to the destination. - pub fn intrinsic_with_overflow( + pub fn binop_with_overflow( &mut self, op: mir::BinOp, left: ValTy<'tcx>, right: ValTy<'tcx>, - dest: Place, - dest_ty: Ty<'tcx>, + dest: PlaceTy<'tcx>, ) -> EvalResult<'tcx> { - let (val, overflowed) = self.binop_with_overflow(op, left, right)?; + let (val, overflowed) = self.binary_op(op, left, right)?; let val = Value::ScalarPair(val.into(), Scalar::from_bool(overflowed).into()); - let valty = ValTy { - value: val, - ty: dest_ty, - }; - self.write_value(valty, dest) + self.write_value(val, dest) } /// Applies the binary operation `op` to the arguments and writes the result to the - /// destination. Returns `true` if the operation overflowed. - pub fn intrinsic_overflowing( + /// destination. + pub fn binop_ignore_overflow( &mut self, op: mir::BinOp, left: ValTy<'tcx>, right: ValTy<'tcx>, - dest: Place, - dest_ty: Ty<'tcx>, - ) -> EvalResult<'tcx, bool> { - let (val, overflowed) = self.binop_with_overflow(op, left, right)?; - self.write_scalar(dest, val, dest_ty)?; - Ok(overflowed) + dest: PlaceTy<'tcx>, + ) -> EvalResult<'tcx> { + let (val, _overflowed) = self.binary_op(op, left, right)?; + self.write_scalar(val, dest) } } @@ -61,29 +42,29 @@ impl<'a, 'mir, 'tcx, M: Machine<'mir, 'tcx>> EvalContext<'a, 'mir, 'tcx, M> { pub fn binary_op( &self, bin_op: mir::BinOp, - left: Scalar, - left_ty: Ty<'tcx>, - right: Scalar, - right_ty: Ty<'tcx>, + ValTy { value: left, layout: left_layout }: ValTy<'tcx>, + ValTy { value: right, layout: right_layout }: ValTy<'tcx>, ) -> EvalResult<'tcx, (Scalar, bool)> { use rustc::mir::BinOp::*; - let left_layout = self.layout_of(left_ty)?; - let right_layout = self.layout_of(right_ty)?; + let left = left.to_scalar()?; + let right = right.to_scalar()?; let left_kind = match left_layout.abi { layout::Abi::Scalar(ref scalar) => scalar.value, - _ => return err!(TypeNotPrimitive(left_ty)), + _ => return err!(TypeNotPrimitive(left_layout.ty)), }; let right_kind = match right_layout.abi { layout::Abi::Scalar(ref scalar) => scalar.value, - _ => return err!(TypeNotPrimitive(right_ty)), + _ => return err!(TypeNotPrimitive(right_layout.ty)), }; trace!("Running binary op {:?}: {:?} ({:?}), {:?} ({:?})", bin_op, left, left_kind, right, right_kind); // I: Handle operations that support pointers if !left_kind.is_float() && !right_kind.is_float() { - if let Some(handled) = M::try_ptr_op(self, bin_op, left, left_ty, right, right_ty)? { + if let Some(handled) = + M::try_ptr_op(self, bin_op, left, left_layout, right, right_layout)? + { return Ok(handled); } } @@ -188,7 +169,7 @@ impl<'a, 'mir, 'tcx, M: Machine<'mir, 'tcx>> EvalContext<'a, 'mir, 'tcx, M> { } } - if let ty::TyFloat(fty) = left_ty.sty { + if let ty::TyFloat(fty) = left_layout.ty.sty { macro_rules! float_math { ($ty:path, $size:expr) => {{ let l = <$ty>::from_bits(l); @@ -220,7 +201,7 @@ impl<'a, 'mir, 'tcx, M: Machine<'mir, 'tcx>> EvalContext<'a, 'mir, 'tcx, M> { } } - let size = self.layout_of(left_ty).unwrap().size.bytes() as u8; + let size = left_layout.size.bytes() as u8; // only ints left let val = match bin_op { @@ -260,9 +241,9 @@ impl<'a, 'mir, 'tcx, M: Machine<'mir, 'tcx>> EvalContext<'a, 'mir, 'tcx, M> { "unimplemented binary op {:?}: {:?} ({:?}), {:?} ({:?})", bin_op, left, - left_ty, + left_layout.ty, right, - right_ty, + right_layout.ty, ); return err!(Unimplemented(msg)); } diff --git a/src/librustc_mir/interpret/place.rs b/src/librustc_mir/interpret/place.rs index 91c2519230695..91182edc2f5b7 100644 --- a/src/librustc_mir/interpret/place.rs +++ b/src/librustc_mir/interpret/place.rs @@ -1,472 +1,794 @@ +//! Computations on places -- field projections, going from mir::Place, and writing +//! into a place. +//! All high-level functions to write to memory work on places as destinations. + +use std::hash::{Hash, Hasher}; +use std::convert::TryFrom; + use rustc::mir; -use rustc::ty::{self, Ty, TyCtxt}; -use rustc::ty::layout::{self, Align, LayoutOf, TyLayout}; +use rustc::ty::{self, Ty}; +use rustc::ty::layout::{self, Size, Align, LayoutOf, TyLayout, HasDataLayout}; use rustc_data_structures::indexed_vec::Idx; -use rustc::mir::interpret::{GlobalId, Value, Scalar, EvalResult, Pointer, ScalarMaybeUndef}; -use super::{EvalContext, Machine, ValTy}; -use interpret::memory::HasMemory; +use rustc::mir::interpret::{ + GlobalId, Scalar, EvalResult, Pointer, ScalarMaybeUndef +}; +use super::{EvalContext, Machine, Value, ValTy, Operand, OpTy, MemoryKind}; + +#[derive(Copy, Clone, Debug, Hash, PartialEq, Eq)] +pub struct MemPlace { + /// A place may have an integral pointer for ZSTs, and since it might + /// be turned back into a reference before ever being dereferenced. + /// However, it may never be undef. + pub ptr: Scalar, + pub align: Align, + pub extra: PlaceExtra, +} #[derive(Copy, Clone, Debug, Hash, PartialEq, Eq)] pub enum Place { /// A place referring to a value allocated in the `Memory` system. - Ptr { - /// A place may have an invalid (integral or undef) pointer, - /// since it might be turned back into a reference - /// before ever being dereferenced. - ptr: ScalarMaybeUndef, - align: Align, - extra: PlaceExtra, - }, + Ptr(MemPlace), - /// A place referring to a value on the stack. Represented by a stack frame index paired with - /// a Mir local index. - Local { frame: usize, local: mir::Local }, + /// To support alloc-free locals, we are able to write directly to a local. + /// (Without that optimization, we'd just always be a `MemPlace`.) + Local { + frame: usize, + local: mir::Local, + }, } +// Extra information for fat pointers / places #[derive(Copy, Clone, Debug, Hash, PartialEq, Eq)] pub enum PlaceExtra { None, Length(u64), Vtable(Pointer), - DowncastVariant(usize), } -impl<'tcx> Place { - /// Produces a Place that will error if attempted to be read from - pub fn undef() -> Self { - Self::from_scalar_ptr(ScalarMaybeUndef::Undef, Align::from_bytes(1, 1).unwrap()) +#[derive(Copy, Clone, Debug)] +pub struct PlaceTy<'tcx> { + place: Place, + pub layout: TyLayout<'tcx>, +} + +impl<'tcx> ::std::ops::Deref for PlaceTy<'tcx> { + type Target = Place; + #[inline(always)] + fn deref(&self) -> &Place { + &self.place + } +} + +/// A MemPlace with its layout. Constructing it is only possible in this module. +#[derive(Copy, Clone, Debug)] +pub struct MPlaceTy<'tcx> { + mplace: MemPlace, + pub layout: TyLayout<'tcx>, +} + +impl<'tcx> ::std::ops::Deref for MPlaceTy<'tcx> { + type Target = MemPlace; + #[inline(always)] + fn deref(&self) -> &MemPlace { + &self.mplace } +} + +impl<'tcx> From> for PlaceTy<'tcx> { + #[inline(always)] + fn from(mplace: MPlaceTy<'tcx>) -> Self { + PlaceTy { + place: Place::Ptr(mplace.mplace), + layout: mplace.layout + } + } +} - pub fn from_scalar_ptr(ptr: ScalarMaybeUndef, align: Align) -> Self { - Place::Ptr { +impl MemPlace { + #[inline(always)] + pub fn from_scalar_ptr(ptr: Scalar, align: Align) -> Self { + MemPlace { ptr, align, extra: PlaceExtra::None, } } + #[inline(always)] + pub fn from_ptr(ptr: Pointer, align: Align) -> Self { + Self::from_scalar_ptr(ptr.into(), align) + } + + #[inline(always)] + pub fn to_scalar_ptr_align(self) -> (Scalar, Align) { + assert_eq!(self.extra, PlaceExtra::None); + (self.ptr, self.align) + } + + /// Extract the ptr part of the mplace + #[inline(always)] + pub fn to_ptr(self) -> EvalResult<'tcx, Pointer> { + // At this point, we forget about the alignment information -- the place has been turned into a reference, + // and no matter where it came from, it now must be aligned. + self.to_scalar_ptr_align().0.to_ptr() + } + + /// Turn a mplace into a (thin or fat) pointer, as a reference, pointing to the same space. + /// This is the inverse of `ref_to_mplace`. + pub fn to_ref(self, cx: impl HasDataLayout) -> Value { + // We ignore the alignment of the place here -- special handling for packed structs ends + // at the `&` operator. + match self.extra { + PlaceExtra::None => Value::Scalar(self.ptr.into()), + PlaceExtra::Length(len) => Value::new_slice(self.ptr.into(), len, cx), + PlaceExtra::Vtable(vtable) => Value::new_dyn_trait(self.ptr.into(), vtable), + } + } +} + +impl<'tcx> MPlaceTy<'tcx> { + #[inline] + fn from_aligned_ptr(ptr: Pointer, layout: TyLayout<'tcx>) -> Self { + MPlaceTy { mplace: MemPlace::from_ptr(ptr, layout.align), layout } + } + + #[inline] + pub(super) fn len(self) -> u64 { + // Sanity check + let ty_len = match self.layout.fields { + layout::FieldPlacement::Array { count, .. } => count, + _ => bug!("Length for non-array layout {:?} requested", self.layout), + }; + if let PlaceExtra::Length(len) = self.extra { + len + } else { + ty_len + } + } +} + +// Validation needs to hash MPlaceTy, but we cannot hash Layout -- so we just hash the type +impl<'tcx> Hash for MPlaceTy<'tcx> { + fn hash(&self, state: &mut H) { + self.mplace.hash(state); + self.layout.ty.hash(state); + } +} +impl<'tcx> PartialEq for MPlaceTy<'tcx> { + fn eq(&self, other: &Self) -> bool { + self.mplace == other.mplace && self.layout.ty == other.layout.ty + } +} +impl<'tcx> Eq for MPlaceTy<'tcx> {} + +impl<'tcx> OpTy<'tcx> { + #[inline(always)] + pub fn try_as_mplace(self) -> Result, Value> { + match *self { + Operand::Indirect(mplace) => Ok(MPlaceTy { mplace, layout: self.layout }), + Operand::Immediate(value) => Err(value), + } + } + + #[inline(always)] + pub fn to_mem_place(self) -> MPlaceTy<'tcx> { + self.try_as_mplace().unwrap() + } +} + +impl<'tcx> Place { + /// Produces a Place that will error if attempted to be read from or written to + #[inline] + pub fn null(cx: impl HasDataLayout) -> Self { + Self::from_scalar_ptr(Scalar::ptr_null(cx), Align::from_bytes(1, 1).unwrap()) + } + + #[inline] + pub fn from_scalar_ptr(ptr: Scalar, align: Align) -> Self { + Place::Ptr(MemPlace::from_scalar_ptr(ptr, align)) + } + + #[inline] pub fn from_ptr(ptr: Pointer, align: Align) -> Self { - Self::from_scalar_ptr(ScalarMaybeUndef::Scalar(ptr.into()), align) + Place::Ptr(MemPlace::from_ptr(ptr, align)) } - pub fn to_ptr_align_extra(self) -> (ScalarMaybeUndef, Align, PlaceExtra) { + #[inline] + pub fn to_mem_place(self) -> MemPlace { match self { - Place::Ptr { ptr, align, extra } => (ptr, align, extra), - _ => bug!("to_ptr_and_extra: expected Place::Ptr, got {:?}", self), + Place::Ptr(mplace) => mplace, + _ => bug!("to_mem_place: expected Place::Ptr, got {:?}", self), } } - pub fn to_ptr_align(self) -> (ScalarMaybeUndef, Align) { - let (ptr, align, _extra) = self.to_ptr_align_extra(); - (ptr, align) + #[inline] + pub fn to_scalar_ptr_align(self) -> (Scalar, Align) { + self.to_mem_place().to_scalar_ptr_align() } + #[inline] pub fn to_ptr(self) -> EvalResult<'tcx, Pointer> { - // At this point, we forget about the alignment information -- the place has been turned into a reference, - // and no matter where it came from, it now must be aligned. - self.to_ptr_align().0.unwrap_or_err()?.to_ptr() - } - - pub(super) fn elem_ty_and_len( - self, - ty: Ty<'tcx>, - tcx: TyCtxt<'_, 'tcx, '_> - ) -> (Ty<'tcx>, u64) { - match ty.sty { - ty::TyArray(elem, n) => (elem, n.unwrap_usize(tcx)), - - ty::TySlice(elem) => { - match self { - Place::Ptr { extra: PlaceExtra::Length(len), .. } => (elem, len), - _ => { - bug!( - "elem_ty_and_len of a TySlice given non-slice place: {:?}", - self - ) - } - } - } + self.to_mem_place().to_ptr() + } +} - _ => bug!("elem_ty_and_len expected array or slice, got {:?}", ty), - } +impl<'tcx> PlaceTy<'tcx> { + /// Produces a Place that will error if attempted to be read from or written to + #[inline] + pub fn null(cx: impl HasDataLayout, layout: TyLayout<'tcx>) -> Self { + PlaceTy { place: Place::from_scalar_ptr(Scalar::ptr_null(cx), layout.align), layout } + } + + #[inline] + pub fn to_mem_place(self) -> MPlaceTy<'tcx> { + MPlaceTy { mplace: self.place.to_mem_place(), layout: self.layout } } } impl<'a, 'mir, 'tcx, M: Machine<'mir, 'tcx>> EvalContext<'a, 'mir, 'tcx, M> { - /// Reads a value from the place without going through the intermediate step of obtaining - /// a `miri::Place` - pub fn try_read_place( + /// Take a value, which represents a (thin or fat) reference, and make it a place. + /// Alignment is just based on the type. This is the inverse of `MemPlace::to_ref`. + pub fn ref_to_mplace( + &self, val: ValTy<'tcx> + ) -> EvalResult<'tcx, MPlaceTy<'tcx>> { + let pointee_type = val.layout.ty.builtin_deref(true).unwrap().ty; + let layout = self.layout_of(pointee_type)?; + let mplace = match self.tcx.struct_tail(pointee_type).sty { + ty::TyDynamic(..) => { + let (ptr, vtable) = val.to_scalar_dyn_trait()?; + MemPlace { + ptr, + align: layout.align, + extra: PlaceExtra::Vtable(vtable), + } + } + ty::TyStr | ty::TySlice(_) => { + let (ptr, len) = val.to_scalar_slice(self)?; + MemPlace { + ptr, + align: layout.align, + extra: PlaceExtra::Length(len), + } + } + _ => MemPlace { + ptr: val.to_scalar()?, + align: layout.align, + extra: PlaceExtra::None, + }, + }; + Ok(MPlaceTy { mplace, layout }) + } + + /// Offset a pointer to project to a field. Unlike place_field, this is always + /// possible without allocating, so it can take &self. Also return the field's layout. + /// This supports both struct and array fields. + #[inline(always)] + pub fn mplace_field( &self, - place: &mir::Place<'tcx>, - ) -> EvalResult<'tcx, Option> { - use rustc::mir::Place::*; - match *place { - // Might allow this in the future, right now there's no way to do this from Rust code anyway - Local(mir::RETURN_PLACE) => err!(ReadFromReturnPointer), - // Directly reading a local will always succeed - Local(local) => self.frame().locals[local].access().map(Some), - // No fast path for statics. Reading from statics is rare and would require another - // Machine function to handle differently in miri. - Promoted(_) | - Static(_) => Ok(None), - Projection(ref proj) => self.try_read_place_projection(proj), - } + base: MPlaceTy<'tcx>, + field: u64, + ) -> EvalResult<'tcx, MPlaceTy<'tcx>> { + // Not using the layout method because we want to compute on u64 + let offset = match base.layout.fields { + layout::FieldPlacement::Arbitrary { ref offsets, .. } => + offsets[usize::try_from(field).unwrap()], + layout::FieldPlacement::Array { stride, .. } => { + let len = base.len(); + assert!(field < len, "Tried to access element {} of array/slice with length {}", field, len); + stride * field + } + layout::FieldPlacement::Union(count) => { + assert!(field < count as u64, "Tried to access field {} of union with {} fields", field, count); + // Offset is always 0 + Size::from_bytes(0) + } + }; + // the only way conversion can fail if is this is an array (otherwise we already panicked + // above). In that case, all fields are equal. + let field_layout = base.layout.field(self, usize::try_from(field).unwrap_or(0))?; + + // Adjust offset + let offset = match base.extra { + PlaceExtra::Vtable(vtable) => { + let (_, align) = self.read_size_and_align_from_vtable(vtable)?; + // FIXME: Is this right? Should we always do this, or only when actually + // accessing the field to which the vtable applies? + offset.abi_align(align) + } + _ => { + // No adjustment needed + offset + } + }; + + let ptr = base.ptr.ptr_offset(offset, self)?; + let align = base.align.min(field_layout.align); + let extra = if !field_layout.is_unsized() { + PlaceExtra::None + } else { + assert!(base.extra != PlaceExtra::None, "Expected fat ptr"); + base.extra + }; + + Ok(MPlaceTy { mplace: MemPlace { ptr, align, extra }, layout: field_layout }) } - pub fn read_field( + // Iterates over all fields of an array. Much more efficient than doing the + // same by repeatedly calling `mplace_array`. + pub fn mplace_array_fields( &self, - base: Value, - variant: Option, - field: mir::Field, - mut base_layout: TyLayout<'tcx>, - ) -> EvalResult<'tcx, (Value, TyLayout<'tcx>)> { - if let Some(variant_index) = variant { - base_layout = base_layout.for_variant(self, variant_index); - } - let field_index = field.index(); - let field = base_layout.field(self, field_index)?; - if field.size.bytes() == 0 { - return Ok(( - Value::Scalar(ScalarMaybeUndef::Scalar(Scalar::Bits { bits: 0, size: 0 })), - field, - )); - } - let offset = base_layout.fields.offset(field_index); - let value = match base { - // the field covers the entire type - Value::ScalarPair(..) | - Value::Scalar(_) if offset.bytes() == 0 && field.size == base_layout.size => base, - // extract fields from types with `ScalarPair` ABI - Value::ScalarPair(a, b) => { - let val = if offset.bytes() == 0 { a } else { b }; - Value::Scalar(val) - }, - Value::ByRef(base_ptr, align) => { - let offset = base_layout.fields.offset(field_index); - let ptr = base_ptr.ptr_offset(offset, self)?; - let align = align.min(base_layout.align).min(field.align); - assert!(!field.is_unsized()); - Value::ByRef(ptr, align) - }, - Value::Scalar(val) => bug!("field access on non aggregate {:#?}, {:#?}", val, base_layout), + base: MPlaceTy<'tcx>, + ) -> EvalResult<'tcx, impl Iterator>> + 'a> { + let len = base.len(); + let stride = match base.layout.fields { + layout::FieldPlacement::Array { stride, .. } => stride, + _ => bug!("mplace_array_fields: expected an array layout"), }; - Ok((value, field)) + let layout = base.layout.field(self, 0)?; + let dl = &self.tcx.data_layout; + Ok((0..len).map(move |i| { + let ptr = base.ptr.ptr_offset(i * stride, dl)?; + Ok(MPlaceTy { + mplace: MemPlace { ptr, align: base.align, extra: PlaceExtra::None }, + layout + }) + })) } - fn try_read_place_projection( + pub fn mplace_subslice( &self, - proj: &mir::PlaceProjection<'tcx>, - ) -> EvalResult<'tcx, Option> { - use rustc::mir::ProjectionElem::*; - let base = match self.try_read_place(&proj.base)? { - Some(base) => base, - None => return Ok(None), + base: MPlaceTy<'tcx>, + from: u64, + to: u64, + ) -> EvalResult<'tcx, MPlaceTy<'tcx>> { + let len = base.len(); + assert!(from <= len - to); + + // Not using layout method because that works with usize, and does not work with slices + // (that have count 0 in their layout). + let from_offset = match base.layout.fields { + layout::FieldPlacement::Array { stride, .. } => + stride * from, + _ => bug!("Unexpected layout of index access: {:#?}", base.layout), }; - let base_ty = self.place_ty(&proj.base); - let base_layout = self.layout_of(base_ty)?; - match proj.elem { - Field(field, _) => Ok(Some(self.read_field(base, None, field, base_layout)?.0)), - // The NullablePointer cases should work fine, need to take care for normal enums - Downcast(..) | - Subslice { .. } | - // reading index 0 or index 1 from a ByVal or ByVal pair could be optimized - ConstantIndex { .. } | Index(_) | - // No way to optimize this projection any better than the normal place path - Deref => Ok(None), - } + let ptr = base.ptr.ptr_offset(from_offset, self)?; + + // Compute extra and new layout + let inner_len = len - to - from; + let (extra, ty) = match base.layout.ty.sty { + ty::TyArray(inner, _) => + (PlaceExtra::None, self.tcx.mk_array(inner, inner_len)), + ty::TySlice(..) => + (PlaceExtra::Length(inner_len), base.layout.ty), + _ => + bug!("cannot subslice non-array type: `{:?}`", base.layout.ty), + }; + let layout = self.layout_of(ty)?; + + Ok(MPlaceTy { + mplace: MemPlace { ptr, align: base.align, extra }, + layout + }) + } + + pub fn mplace_downcast( + &self, + base: MPlaceTy<'tcx>, + variant: usize, + ) -> EvalResult<'tcx, MPlaceTy<'tcx>> { + // Downcasts only change the layout + assert_eq!(base.extra, PlaceExtra::None); + Ok(MPlaceTy { layout: base.layout.for_variant(self, variant), ..base }) + } + + /// Project into an mplace + pub fn mplace_projection( + &self, + base: MPlaceTy<'tcx>, + proj_elem: &mir::PlaceElem<'tcx>, + ) -> EvalResult<'tcx, MPlaceTy<'tcx>> { + use rustc::mir::ProjectionElem::*; + Ok(match *proj_elem { + Field(field, _) => self.mplace_field(base, field.index() as u64)?, + Downcast(_, variant) => self.mplace_downcast(base, variant)?, + Deref => self.deref_operand(base.into())?, + + Index(local) => { + let n = *self.frame().locals[local].access()?; + let n_layout = self.layout_of(self.tcx.types.usize)?; + let n = self.read_scalar(OpTy { op: n, layout: n_layout })?; + let n = n.to_bits(self.tcx.data_layout.pointer_size)?; + self.mplace_field(base, u64::try_from(n).unwrap())? + } + + ConstantIndex { + offset, + min_length, + from_end, + } => { + let n = base.len(); + assert!(n >= min_length as u64); + + let index = if from_end { + n - u64::from(offset) + } else { + u64::from(offset) + }; + + self.mplace_field(base, index)? + } + + Subslice { from, to } => + self.mplace_subslice(base, u64::from(from), u64::from(to))?, + }) } - /// Returns a value and (in case of a ByRef) if we are supposed to use aligned accesses. - pub(super) fn eval_and_read_place( + /// Get the place of a field inside the place, and also the field's type. + /// Just a convenience function, but used quite a bit. + pub fn place_field( &mut self, - place: &mir::Place<'tcx>, - ) -> EvalResult<'tcx, Value> { - // Shortcut for things like accessing a fat pointer's field, - // which would otherwise (in the `eval_place` path) require moving a `ScalarPair` to memory - // and returning an `Place::Ptr` to it - if let Some(val) = self.try_read_place(place)? { - return Ok(val); - } - let place = self.eval_place(place)?; - self.read_place(place) + base: PlaceTy<'tcx>, + field: u64, + ) -> EvalResult<'tcx, PlaceTy<'tcx>> { + // FIXME: We could try to be smarter and avoid allocation for fields that span the + // entire place. + let mplace = self.force_allocation(base)?; + Ok(self.mplace_field(mplace, field)?.into()) } - pub fn read_place(&self, place: Place) -> EvalResult<'tcx, Value> { - match place { - Place::Ptr { ptr, align, extra } => { - assert_eq!(extra, PlaceExtra::None); - Ok(Value::ByRef(ptr.unwrap_or_err()?, align)) + pub fn place_downcast( + &mut self, + base: PlaceTy<'tcx>, + variant: usize, + ) -> EvalResult<'tcx, PlaceTy<'tcx>> { + // Downcast just changes the layout + Ok(match base.place { + Place::Ptr(mplace) => + self.mplace_downcast(MPlaceTy { mplace, layout: base.layout }, variant)?.into(), + Place::Local { .. } => { + let layout = base.layout.for_variant(&self, variant); + PlaceTy { layout, ..base } } - Place::Local { frame, local } => self.stack[frame].locals[local].access(), - } + }) } - pub fn eval_place(&mut self, mir_place: &mir::Place<'tcx>) -> EvalResult<'tcx, Place> { + /// Project into a place + pub fn place_projection( + &mut self, + base: PlaceTy<'tcx>, + proj_elem: &mir::ProjectionElem<'tcx, mir::Local, Ty<'tcx>>, + ) -> EvalResult<'tcx, PlaceTy<'tcx>> { + use rustc::mir::ProjectionElem::*; + Ok(match *proj_elem { + Field(field, _) => self.place_field(base, field.index() as u64)?, + Downcast(_, variant) => self.place_downcast(base, variant)?, + Deref => self.deref_operand(self.place_to_op(base)?)?.into(), + // For the other variants, we have to force an allocation. + // This matches `operand_projection`. + Subslice { .. } | ConstantIndex { .. } | Index(_) => { + let mplace = self.force_allocation(base)?; + self.mplace_projection(mplace, proj_elem)?.into() + } + }) + } + + /// Compute a place. You should only use this if you intend to write into this + /// place; for reading, a more efficient alternative is `eval_place_for_read`. + pub fn eval_place(&mut self, mir_place: &mir::Place<'tcx>) -> EvalResult<'tcx, PlaceTy<'tcx>> { use rustc::mir::Place::*; let place = match *mir_place { - Local(mir::RETURN_PLACE) => self.frame().return_place, - Local(local) => Place::Local { - frame: self.cur_frame(), - local, + Local(mir::RETURN_PLACE) => PlaceTy { + place: self.frame().return_place, + layout: self.layout_of_local(self.cur_frame(), mir::RETURN_PLACE)?, + }, + Local(local) => PlaceTy { + place: Place::Local { + frame: self.cur_frame(), + local, + }, + layout: self.layout_of_local(self.cur_frame(), local)?, }, Promoted(ref promoted) => { let instance = self.frame().instance; - let val = self.read_global_as_value(GlobalId { + let op = self.global_to_op(GlobalId { instance, promoted: Some(promoted.0), })?; - if let Value::ByRef(ptr, align) = val { - Place::Ptr { - ptr: ptr.into(), - align, - extra: PlaceExtra::None, - } - } else { - bug!("evaluated promoted and got {:#?}", val); + let mplace = op.to_mem_place(); + let ty = self.monomorphize(promoted.1, self.substs()); + PlaceTy { + place: Place::Ptr(mplace), + layout: self.layout_of(ty)?, } } Static(ref static_) => { - let layout = self.layout_of(self.place_ty(mir_place))?; + let ty = self.monomorphize(static_.ty, self.substs()); + let layout = self.layout_of(ty)?; let instance = ty::Instance::mono(*self.tcx, static_.def_id); let cid = GlobalId { instance, promoted: None }; let alloc = Machine::init_static(self, cid)?; - Place::Ptr { - ptr: ScalarMaybeUndef::Scalar(Scalar::Ptr(alloc.into())), - align: layout.align, - extra: PlaceExtra::None, - } + MPlaceTy::from_aligned_ptr(alloc.into(), layout).into() } Projection(ref proj) => { - let ty = self.place_ty(&proj.base); let place = self.eval_place(&proj.base)?; - return self.eval_place_projection(place, ty, &proj.elem); + self.place_projection(place, &proj.elem)? } }; - self.dump_local(place); + self.dump_place(place.place); Ok(place) } - pub fn place_field( + /// Write a scalar to a place + pub fn write_scalar( &mut self, - base: Place, - field: mir::Field, - mut base_layout: TyLayout<'tcx>, - ) -> EvalResult<'tcx, (Place, TyLayout<'tcx>)> { - match base { - Place::Ptr { extra: PlaceExtra::DowncastVariant(variant_index), .. } => { - base_layout = base_layout.for_variant(&self, variant_index); - } - _ => {} - } - let field_index = field.index(); - let field = base_layout.field(&self, field_index)?; - let offset = base_layout.fields.offset(field_index); + val: impl Into, + dest: PlaceTy<'tcx>, + ) -> EvalResult<'tcx> { + self.write_value(Value::Scalar(val.into()), dest) + } - // Do not allocate in trivial cases - let (base_ptr, base_align, base_extra) = match base { - Place::Ptr { ptr, align, extra } => (ptr, align, extra), + /// Write a value to a place + pub fn write_value( + &mut self, + src_val: Value, + dest: PlaceTy<'tcx>, + ) -> EvalResult<'tcx> { + trace!("write_value: {:?} <- {:?}", *dest, src_val); + // See if we can avoid an allocation. This is the counterpart to `try_read_value`, + // but not factored as a separate function. + let mplace = match dest.place { Place::Local { frame, local } => { - match (self.stack[frame].locals[local].access()?, &base_layout.abi) { - // in case the field covers the entire type, just return the value - (Value::Scalar(_), &layout::Abi::Scalar(_)) | - (Value::ScalarPair(..), &layout::Abi::ScalarPair(..)) - if offset.bytes() == 0 && field.size == base_layout.size => { - return Ok((base, field)) + match *self.stack[frame].locals[local].access_mut()? { + Operand::Immediate(ref mut dest_val) => { + // Yay, we can just change the local directly. + *dest_val = src_val; + return Ok(()); }, - _ => self.force_allocation(base)?.to_ptr_align_extra(), + Operand::Indirect(mplace) => mplace, // already in memory } - } + }, + Place::Ptr(mplace) => mplace, // already in memory }; - let offset = match base_extra { - PlaceExtra::Vtable(tab) => { - let (_, align) = self.size_and_align_of_dst( - base_layout.ty, - base_ptr.to_value_with_vtable(tab), - )?; - offset.abi_align(align) + // This is already in memory, write there. + let dest = MPlaceTy { mplace, layout: dest.layout }; + self.write_value_to_mplace(src_val, dest) + } + + /// Write a value to memory + fn write_value_to_mplace( + &mut self, + value: Value, + dest: MPlaceTy<'tcx>, + ) -> EvalResult<'tcx> { + let (ptr, ptr_align) = dest.to_scalar_ptr_align(); + // Note that it is really important that the type here is the right one, and matches the type things are read at. + // In case `src_val` is a `ScalarPair`, we don't do any magic here to handle padding properly, which is only + // correct if we never look at this data with the wrong type. + + // Nothing to do for ZSTs, other than checking alignment + if dest.layout.size.bytes() == 0 { + self.memory.check_align(ptr, ptr_align)?; + return Ok(()); + } + + let ptr = ptr.to_ptr()?; + match value { + Value::Scalar(scalar) => { + self.memory.write_scalar( + ptr, ptr_align.min(dest.layout.align), scalar, dest.layout.size + ) } - _ => offset, - }; + Value::ScalarPair(a_val, b_val) => { + let (a, b) = match dest.layout.abi { + layout::Abi::ScalarPair(ref a, ref b) => (&a.value, &b.value), + _ => bug!("write_value_to_mplace: invalid ScalarPair layout: {:#?}", dest.layout) + }; + let (a_size, b_size) = (a.size(&self), b.size(&self)); + let (a_align, b_align) = (a.align(&self), b.align(&self)); + let b_offset = a_size.abi_align(b_align); + let b_ptr = ptr.offset(b_offset, &self)?.into(); - let ptr = base_ptr.ptr_offset(offset, &self)?; - let align = base_align.min(base_layout.align).min(field.align); - let extra = if !field.is_unsized() { - PlaceExtra::None - } else { - match base_extra { - PlaceExtra::None => bug!("expected fat pointer"), - PlaceExtra::DowncastVariant(..) => { - bug!("Rust doesn't support unsized fields in enum variants") - } - PlaceExtra::Vtable(_) | - PlaceExtra::Length(_) => {} + self.memory.write_scalar(ptr, ptr_align.min(a_align), a_val, a_size)?; + self.memory.write_scalar(b_ptr, ptr_align.min(b_align), b_val, b_size) } - base_extra - }; + } + } - Ok((Place::Ptr { ptr, align, extra }, field)) + /// Copy the data from an operand to a place + pub fn copy_op( + &mut self, + src: OpTy<'tcx>, + dest: PlaceTy<'tcx>, + ) -> EvalResult<'tcx> { + assert_eq!(src.layout.size, dest.layout.size, + "Size mismatch when copying!\nsrc: {:#?}\ndest: {:#?}", src, dest); + + // Let us see if the layout is simple so we take a shortcut, avoid force_allocation. + let (src_ptr, src_align) = match self.try_read_value(src)? { + Ok(src_val) => + // Yay, we got a value that we can write directly. We write with the + // *source layout*, because that was used to load, and if they do not match + // this is a transmute we want to support. + return self.write_value(src_val, PlaceTy { place: *dest, layout: src.layout }), + Err(mplace) => mplace.to_scalar_ptr_align(), + }; + // Slow path, this does not fit into an immediate. Just memcpy. + trace!("copy_op: {:?} <- {:?}", *dest, *src); + let (dest_ptr, dest_align) = self.force_allocation(dest)?.to_scalar_ptr_align(); + self.memory.copy( + src_ptr, src_align, + dest_ptr, dest_align, + src.layout.size, false + ) } - pub fn val_to_place(&self, val: Value, ty: Ty<'tcx>) -> EvalResult<'tcx, Place> { - let layout = self.layout_of(ty)?; - Ok(match self.tcx.struct_tail(ty).sty { - ty::TyDynamic(..) => { - let (ptr, vtable) = self.into_ptr_vtable_pair(val)?; - Place::Ptr { - ptr, - align: layout.align, - extra: PlaceExtra::Vtable(vtable), - } - } - ty::TyStr | ty::TySlice(_) => { - let (ptr, len) = self.into_slice(val)?; - Place::Ptr { - ptr, - align: layout.align, - extra: PlaceExtra::Length(len), - } + /// Make sure that a place is in memory, and return where it is. + /// This is essentially `force_to_memplace`. + pub fn force_allocation( + &mut self, + place: PlaceTy<'tcx>, + ) -> EvalResult<'tcx, MPlaceTy<'tcx>> { + let mplace = match place.place { + Place::Local { frame, local } => { + // FIXME: Consider not doing anything for a ZST, and just returning + // a fake pointer? + + // We need the layout of the local. We can NOT use the layout we got, + // that might e.g. be a downcast variant! + let local_layout = self.layout_of_local(frame, local)?; + // Make sure it has a place + let rval = *self.stack[frame].locals[local].access()?; + let mplace = self.allocate_op(OpTy { op: rval, layout: local_layout })?.mplace; + // This might have allocated the flag + *self.stack[frame].locals[local].access_mut()? = + Operand::Indirect(mplace); + // done + mplace } - _ => Place::from_scalar_ptr(self.into_ptr(val)?, layout.align), - }) + Place::Ptr(mplace) => mplace + }; + // Return with the original layout, so that the caller can go on + Ok(MPlaceTy { mplace, layout: place.layout }) } - pub fn place_index( + pub fn allocate( &mut self, - base: Place, - outer_ty: Ty<'tcx>, - n: u64, - ) -> EvalResult<'tcx, Place> { - // Taking the outer type here may seem odd; it's needed because for array types, the outer type gives away the length. - let base = self.force_allocation(base)?; - let (base_ptr, align) = base.to_ptr_align(); - - let (elem_ty, len) = base.elem_ty_and_len(outer_ty, self.tcx.tcx); - let elem_size = self.layout_of(elem_ty)?.size; - assert!( - n < len, - "Tried to access element {} of array/slice with length {}", - n, - len - ); - let ptr = base_ptr.ptr_offset(elem_size * n, &*self)?; - Ok(Place::Ptr { - ptr, - align, - extra: PlaceExtra::None, - }) + layout: TyLayout<'tcx>, + kind: MemoryKind, + ) -> EvalResult<'tcx, MPlaceTy<'tcx>> { + assert!(!layout.is_unsized(), "cannot alloc memory for unsized type"); + let ptr = self.memory.allocate(layout.size, layout.align, kind)?; + Ok(MPlaceTy::from_aligned_ptr(ptr, layout)) } - pub(super) fn place_downcast( + /// Make a place for an operand, allocating if needed + pub fn allocate_op( &mut self, - base: Place, - variant: usize, - ) -> EvalResult<'tcx, Place> { - // FIXME(solson) - let base = self.force_allocation(base)?; - let (ptr, align) = base.to_ptr_align(); - let extra = PlaceExtra::DowncastVariant(variant); - Ok(Place::Ptr { ptr, align, extra }) + OpTy { op, layout }: OpTy<'tcx>, + ) -> EvalResult<'tcx, MPlaceTy<'tcx>> { + Ok(match op { + Operand::Indirect(mplace) => MPlaceTy { mplace, layout }, + Operand::Immediate(value) => { + // FIXME: Is stack always right here? + let ptr = self.allocate(layout, MemoryKind::Stack)?; + self.write_value_to_mplace(value, ptr)?; + ptr + }, + }) } - pub fn eval_place_projection( + pub fn write_discriminant_value( &mut self, - base: Place, - base_ty: Ty<'tcx>, - proj_elem: &mir::ProjectionElem<'tcx, mir::Local, Ty<'tcx>>, - ) -> EvalResult<'tcx, Place> { - use rustc::mir::ProjectionElem::*; - match *proj_elem { - Field(field, _) => { - let layout = self.layout_of(base_ty)?; - Ok(self.place_field(base, field, layout)?.0) - } - - Downcast(_, variant) => { - self.place_downcast(base, variant) - } - - Deref => { - let val = self.read_place(base)?; - - let pointee_type = match base_ty.sty { - ty::TyRawPtr(ref tam) => tam.ty, - ty::TyRef(_, ty, _) => ty, - ty::TyAdt(def, _) if def.is_box() => base_ty.boxed_ty(), - _ => bug!("can only deref pointer types"), - }; - - trace!("deref to {} on {:?}", pointee_type, val); - - self.val_to_place(val, pointee_type) + variant_index: usize, + dest: PlaceTy<'tcx>, + ) -> EvalResult<'tcx> { + match dest.layout.variants { + layout::Variants::Single { index } => { + if index != variant_index { + // If the layout of an enum is `Single`, all + // other variants are necessarily uninhabited. + assert_eq!(dest.layout.for_variant(&self, variant_index).abi, + layout::Abi::Uninhabited); + } } - - Index(local) => { - let value = self.frame().locals[local].access()?; - let ty = self.tcx.types.usize; - let n = self - .value_to_scalar(ValTy { value, ty })? - .to_bits(self.tcx.data_layout.pointer_size)?; - self.place_index(base, base_ty, n as u64) + layout::Variants::Tagged { ref tag, .. } => { + let discr_val = dest.layout.ty.ty_adt_def().unwrap() + .discriminant_for_variant(*self.tcx, variant_index) + .val; + + // raw discriminants for enums are isize or bigger during + // their computation, but the in-memory tag is the smallest possible + // representation + let size = tag.value.size(self.tcx.tcx); + let shift = 128 - size.bits(); + let discr_val = (discr_val << shift) >> shift; + + let discr_dest = self.place_field(dest, 0)?; + self.write_scalar(Scalar::Bits { + bits: discr_val, + size: size.bytes() as u8, + }, discr_dest)?; } - - ConstantIndex { - offset, - min_length, - from_end, + layout::Variants::NicheFilling { + dataful_variant, + ref niche_variants, + niche_start, + .. } => { - // FIXME(solson) - let base = self.force_allocation(base)?; - let (base_ptr, align) = base.to_ptr_align(); - - let (elem_ty, n) = base.elem_ty_and_len(base_ty, self.tcx.tcx); - let elem_size = self.layout_of(elem_ty)?.size; - assert!(n >= min_length as u64); - - let index = if from_end { - n - u64::from(offset) - } else { - u64::from(offset) - }; - - let ptr = base_ptr.ptr_offset(elem_size * index, &self)?; - Ok(Place::Ptr { ptr, align, extra: PlaceExtra::None }) + if variant_index != dataful_variant { + let niche_dest = + self.place_field(dest, 0)?; + let niche_value = ((variant_index - niche_variants.start()) as u128) + .wrapping_add(niche_start); + self.write_scalar(Scalar::Bits { + bits: niche_value, + size: niche_dest.layout.size.bytes() as u8, + }, niche_dest)?; + } } + } - Subslice { from, to } => { - // FIXME(solson) - let base = self.force_allocation(base)?; - let (base_ptr, align) = base.to_ptr_align(); - - let (elem_ty, n) = base.elem_ty_and_len(base_ty, self.tcx.tcx); - let elem_size = self.layout_of(elem_ty)?.size; - assert!(u64::from(from) <= n - u64::from(to)); - let ptr = base_ptr.ptr_offset(elem_size * u64::from(from), &self)?; - // sublicing arrays produces arrays - let extra = if self.type_is_sized(base_ty) { - PlaceExtra::None - } else { - PlaceExtra::Length(n - u64::from(to) - u64::from(from)) - }; - Ok(Place::Ptr { ptr, align, extra }) + Ok(()) + } + + /// Every place can be read from, so we can turm them into an operand + #[inline(always)] + pub fn place_to_op(&self, place: PlaceTy<'tcx>) -> EvalResult<'tcx, OpTy<'tcx>> { + let op = match place.place { + Place::Ptr(mplace) => { + Operand::Indirect(mplace) } - } + Place::Local { frame, local } => + *self.stack[frame].locals[local].access()? + }; + Ok(OpTy { op, layout: place.layout }) } - pub fn place_ty(&self, place: &mir::Place<'tcx>) -> Ty<'tcx> { - self.monomorphize( - place.ty(self.mir(), *self.tcx).to_ty(*self.tcx), - self.substs(), - ) + /// Turn a place that is a dyn trait (i.e., PlaceExtra::Vtable and the appropriate layout) + /// or a slice into the specific fixed-size place and layout that is given by the vtable/len. + /// This "unpacks" the existential quantifier, so to speak. + pub fn unpack_unsized_mplace(&self, mplace: MPlaceTy<'tcx>) -> EvalResult<'tcx, MPlaceTy<'tcx>> { + trace!("Unpacking {:?} ({:?})", *mplace, mplace.layout.ty); + let layout = match mplace.extra { + PlaceExtra::Vtable(vtable) => { + // the drop function signature + let drop_instance = self.read_drop_type_from_vtable(vtable)?; + trace!("Found drop fn: {:?}", drop_instance); + let fn_sig = drop_instance.ty(*self.tcx).fn_sig(*self.tcx); + let fn_sig = self.tcx.normalize_erasing_late_bound_regions(self.param_env, &fn_sig); + // the drop function takes *mut T where T is the type being dropped, so get that + let ty = fn_sig.inputs()[0].builtin_deref(true).unwrap().ty; + let layout = self.layout_of(ty)?; + // Sanity checks + let (size, align) = self.read_size_and_align_from_vtable(vtable)?; + assert_eq!(size, layout.size); + assert_eq!(align.abi(), layout.align.abi()); // only ABI alignment is preserved + // FIXME: More checks for the vtable? We could make sure it is exactly + // the one one would expect for this type. + // Done! + layout + }, + PlaceExtra::Length(len) => { + let ty = self.tcx.mk_array(mplace.layout.field(self, 0)?.ty, len); + self.layout_of(ty)? + } + PlaceExtra::None => bug!("Expected a fat pointer"), + }; + trace!("Unpacked type: {:?}", layout.ty); + Ok(MPlaceTy { + mplace: MemPlace { extra: PlaceExtra::None, ..*mplace }, + layout + }) } } diff --git a/src/librustc_mir/interpret/step.rs b/src/librustc_mir/interpret/step.rs index 57b56db14bb4b..f39a5ee3e4ef2 100644 --- a/src/librustc_mir/interpret/step.rs +++ b/src/librustc_mir/interpret/step.rs @@ -3,10 +3,38 @@ //! The main entry point is the `step` method. use rustc::mir; +use rustc::ty::layout::LayoutOf; +use rustc::mir::interpret::{EvalResult, Scalar}; -use rustc::mir::interpret::EvalResult; use super::{EvalContext, Machine}; +/// Classify whether an operator is "left-homogeneous", i.e. the LHS has the +/// same type as the result. +#[inline] +fn binop_left_homogeneous(op: mir::BinOp) -> bool { + use rustc::mir::BinOp::*; + match op { + Add | Sub | Mul | Div | Rem | BitXor | BitAnd | BitOr | + Offset | Shl | Shr => + true, + Eq | Ne | Lt | Le | Gt | Ge => + false, + } +} +/// Classify whether an operator is "right-homogeneous", i.e. the RHS has the +/// same type as the LHS. +#[inline] +fn binop_right_homogeneous(op: mir::BinOp) -> bool { + use rustc::mir::BinOp::*; + match op { + Add | Sub | Mul | Div | Rem | BitXor | BitAnd | BitOr | + Eq | Ne | Lt | Le | Gt | Ge => + true, + Offset | Shl | Shr => + false, + } +} + impl<'a, 'mir, 'tcx, M: Machine<'mir, 'tcx>> EvalContext<'a, 'mir, 'tcx, M> { pub fn inc_step_counter_and_detect_loops(&mut self) -> EvalResult<'tcx, ()> { /// The number of steps between loop detector snapshots. @@ -66,7 +94,7 @@ impl<'a, 'mir, 'tcx, M: Machine<'mir, 'tcx>> EvalContext<'a, 'mir, 'tcx, M> { } fn statement(&mut self, stmt: &mir::Statement<'tcx>) -> EvalResult<'tcx> { - trace!("{:?}", stmt); + debug!("{:?}", stmt); use rustc::mir::StatementKind::*; @@ -84,8 +112,7 @@ impl<'a, 'mir, 'tcx, M: Machine<'mir, 'tcx>> EvalContext<'a, 'mir, 'tcx, M> { variant_index, } => { let dest = self.eval_place(place)?; - let dest_ty = self.place_ty(place); - self.write_discriminant_value(dest_ty, dest, variant_index)?; + self.write_discriminant_value(variant_index, dest)?; } // Mark locals as alive @@ -96,7 +123,7 @@ impl<'a, 'mir, 'tcx, M: Machine<'mir, 'tcx>> EvalContext<'a, 'mir, 'tcx, M> { // Mark locals as dead StorageDead(local) => { - let old_val = self.frame_mut().storage_dead(local); + let old_val = self.storage_dead(local); self.deallocate_local(old_val)?; } @@ -127,13 +154,172 @@ impl<'a, 'mir, 'tcx, M: Machine<'mir, 'tcx>> EvalContext<'a, 'mir, 'tcx, M> { Ok(()) } + /// Evaluate an assignment statement. + /// + /// There is no separate `eval_rvalue` function. Instead, the code for handling each rvalue + /// type writes its results directly into the memory specified by the place. + fn eval_rvalue_into_place( + &mut self, + rvalue: &mir::Rvalue<'tcx>, + place: &mir::Place<'tcx>, + ) -> EvalResult<'tcx> { + let dest = self.eval_place(place)?; + + use rustc::mir::Rvalue::*; + match *rvalue { + Use(ref operand) => { + // Avoid recomputing the layout + let op = self.eval_operand(operand, Some(dest.layout))?; + self.copy_op(op, dest)?; + } + + BinaryOp(bin_op, ref left, ref right) => { + let layout = if binop_left_homogeneous(bin_op) { Some(dest.layout) } else { None }; + let left = self.eval_operand_and_read_value(left, layout)?; + let layout = if binop_right_homogeneous(bin_op) { Some(left.layout) } else { None }; + let right = self.eval_operand_and_read_value(right, layout)?; + self.binop_ignore_overflow( + bin_op, + left, + right, + dest, + )?; + } + + CheckedBinaryOp(bin_op, ref left, ref right) => { + // Due to the extra boolean in the result, we can never reuse the `dest.layout`. + let left = self.eval_operand_and_read_value(left, None)?; + let layout = if binop_right_homogeneous(bin_op) { Some(left.layout) } else { None }; + let right = self.eval_operand_and_read_value(right, layout)?; + self.binop_with_overflow( + bin_op, + left, + right, + dest, + )?; + } + + UnaryOp(un_op, ref operand) => { + // The operand always has the same type as the result. + let val = self.eval_operand_and_read_value(operand, Some(dest.layout))?; + let val = self.unary_op(un_op, val.to_scalar()?, dest.layout)?; + self.write_scalar(val, dest)?; + } + + Aggregate(ref kind, ref operands) => { + let (dest, active_field_index) = match **kind { + mir::AggregateKind::Adt(adt_def, variant_index, _, active_field_index) => { + self.write_discriminant_value(variant_index, dest)?; + if adt_def.is_enum() { + (self.place_downcast(dest, variant_index)?, active_field_index) + } else { + (dest, active_field_index) + } + } + _ => (dest, None) + }; + + for (i, operand) in operands.iter().enumerate() { + let op = self.eval_operand(operand, None)?; + // Ignore zero-sized fields. + if !op.layout.is_zst() { + let field_index = active_field_index.unwrap_or(i); + let field_dest = self.place_field(dest, field_index as u64)?; + self.copy_op(op, field_dest)?; + } + } + } + + Repeat(ref operand, _) => { + let op = self.eval_operand(operand, None)?; + let dest = self.force_allocation(dest)?; + let length = dest.len(); + + if length > 0 { + // write the first + let first = self.mplace_field(dest, 0)?; + self.copy_op(op, first.into())?; + + if length > 1 { + // copy the rest + let (dest, dest_align) = first.to_scalar_ptr_align(); + let rest = dest.ptr_offset(first.layout.size, &self)?; + self.memory.copy_repeatedly( + dest, dest_align, rest, dest_align, first.layout.size, length - 1, true + )?; + } + } + } + + Len(ref place) => { + // FIXME(CTFE): don't allow computing the length of arrays in const eval + let src = self.eval_place(place)?; + let mplace = self.force_allocation(src)?; + let len = mplace.len(); + let size = self.memory.pointer_size().bytes() as u8; + self.write_scalar( + Scalar::Bits { + bits: len as u128, + size, + }, + dest, + )?; + } + + Ref(_, _, ref place) => { + let src = self.eval_place(place)?; + let val = self.force_allocation(src)?.to_ref(&self); + self.write_value(val, dest)?; + } + + NullaryOp(mir::NullOp::Box, _) => { + M::box_alloc(self, dest)?; + } + + NullaryOp(mir::NullOp::SizeOf, ty) => { + let ty = self.monomorphize(ty, self.substs()); + let layout = self.layout_of(ty)?; + assert!(!layout.is_unsized(), + "SizeOf nullary MIR operator called for unsized type"); + let size = self.memory.pointer_size().bytes() as u8; + self.write_scalar( + Scalar::Bits { + bits: layout.size.bytes() as u128, + size, + }, + dest, + )?; + } + + Cast(kind, ref operand, cast_ty) => { + debug_assert_eq!(self.monomorphize(cast_ty, self.substs()), dest.layout.ty); + let src = self.eval_operand(operand, None)?; + self.cast(src, kind, dest)?; + } + + Discriminant(ref place) => { + let place = self.eval_place(place)?; + let discr_val = self.read_discriminant_value(self.place_to_op(place)?)?; + let size = dest.layout.size.bytes() as u8; + self.write_scalar(Scalar::Bits { + bits: discr_val, + size, + }, dest)?; + } + } + + self.dump_place(*dest); + + Ok(()) + } + fn terminator(&mut self, terminator: &mir::Terminator<'tcx>) -> EvalResult<'tcx> { - trace!("{:?}", terminator.kind); + debug!("{:?}", terminator.kind); self.tcx.span = terminator.source_info.span; self.memory.tcx.span = terminator.source_info.span; self.eval_terminator(terminator)?; if !self.stack.is_empty() { - trace!("// {:?}", self.frame().block); + debug!("// {:?}", self.frame().block); } Ok(()) } diff --git a/src/librustc_mir/interpret/terminator/drop.rs b/src/librustc_mir/interpret/terminator/drop.rs index f86c0e89954d2..e044df2d1c0f9 100644 --- a/src/librustc_mir/interpret/terminator/drop.rs +++ b/src/librustc_mir/interpret/terminator/drop.rs @@ -1,77 +1,54 @@ use rustc::mir::BasicBlock; -use rustc::ty::{self, Ty}; +use rustc::ty::{self, layout::LayoutOf}; use syntax::source_map::Span; -use rustc::mir::interpret::{EvalResult, Value}; -use interpret::{Machine, ValTy, EvalContext, Place, PlaceExtra}; +use rustc::mir::interpret::EvalResult; +use interpret::{Machine, EvalContext, PlaceTy, PlaceExtra, OpTy, Operand}; impl<'a, 'mir, 'tcx, M: Machine<'mir, 'tcx>> EvalContext<'a, 'mir, 'tcx, M> { - pub(crate) fn drop_place( + pub(crate) fn drop_in_place( &mut self, - place: Place, + place: PlaceTy<'tcx>, instance: ty::Instance<'tcx>, - ty: Ty<'tcx>, span: Span, target: BasicBlock, ) -> EvalResult<'tcx> { - trace!("drop_place: {:#?}", place); + trace!("drop_in_place: {:?},\n {:?}, {:?}", *place, place.layout.ty, instance); // We take the address of the object. This may well be unaligned, which is fine for us here. // However, unaligned accesses will probably make the actual drop implementation fail -- a problem shared // by rustc. - let val = match self.force_allocation(place)? { - Place::Ptr { - ptr, - align: _, - extra: PlaceExtra::Vtable(vtable), - } => ptr.to_value_with_vtable(vtable), - Place::Ptr { - ptr, - align: _, - extra: PlaceExtra::Length(len), - } => ptr.to_value_with_len(len, self.tcx.tcx), - Place::Ptr { - ptr, - align: _, - extra: PlaceExtra::None, - } => Value::Scalar(ptr), - _ => bug!("force_allocation broken"), - }; - self.drop(val, instance, ty, span, target) - } + let place = self.force_allocation(place)?; - fn drop( - &mut self, - arg: Value, - instance: ty::Instance<'tcx>, - ty: Ty<'tcx>, - span: Span, - target: BasicBlock, - ) -> EvalResult<'tcx> { - trace!("drop: {:#?}, {:?}, {:?}", arg, ty.sty, instance.def); - - let instance = match ty.sty { + let (instance, place) = match place.layout.ty.sty { ty::TyDynamic(..) => { - if let Value::ScalarPair(_, vtable) = arg { - self.read_drop_type_from_vtable(vtable.unwrap_or_err()?.to_ptr()?)? - } else { - bug!("expected fat ptr, got {:?}", arg); - } + // Dropping a trait object. + let vtable = match place.extra { + PlaceExtra::Vtable(vtable) => vtable, + _ => bug!("Expected vtable when dropping {:#?}", place), + }; + let place = self.unpack_unsized_mplace(place)?; + let instance = self.read_drop_type_from_vtable(vtable)?; + (instance, place) } - _ => instance, + _ => (instance, place), }; - // the drop function expects a reference to the value - let valty = ValTy { - value: arg, - ty: self.tcx.mk_mut_ptr(ty), + let fn_sig = instance.ty(*self.tcx).fn_sig(*self.tcx); + let fn_sig = self.tcx.normalize_erasing_late_bound_regions(self.param_env, &fn_sig); + + let arg = OpTy { + op: Operand::Immediate(place.to_ref(&self)), + layout: self.layout_of(self.tcx.mk_mut_ptr(place.layout.ty))?, }; - let fn_sig = self.tcx.fn_sig(instance.def_id()).skip_binder().clone(); + // This should always be (), but getting it from the sig seems + // easier than creating a layout of (). + let dest = PlaceTy::null(&self, self.layout_of(fn_sig.output())?); self.eval_fn_call( instance, - Some((Place::undef(), target)), - &[valty], + Some((dest, target)), + &[arg], span, fn_sig, ) diff --git a/src/librustc_mir/interpret/terminator/mod.rs b/src/librustc_mir/interpret/terminator/mod.rs index 3a772559d6de4..82455cacac2d4 100644 --- a/src/librustc_mir/interpret/terminator/mod.rs +++ b/src/librustc_mir/interpret/terminator/mod.rs @@ -1,14 +1,13 @@ use rustc::mir; use rustc::ty::{self, Ty}; -use rustc::ty::layout::{LayoutOf, Size}; +use rustc::ty::layout::LayoutOf; use syntax::source_map::Span; use rustc_target::spec::abi::Abi; -use rustc::mir::interpret::{EvalResult, Scalar, Value}; -use super::{EvalContext, Place, Machine, ValTy}; +use rustc::mir::interpret::{EvalResult, Scalar}; +use super::{EvalContext, Machine, Value, OpTy, PlaceTy, ValTy, Operand}; use rustc_data_structures::indexed_vec::Idx; -use interpret::memory::HasMemory; mod drop; @@ -25,7 +24,7 @@ impl<'a, 'mir, 'tcx, M: Machine<'mir, 'tcx>> EvalContext<'a, 'mir, 'tcx, M> { use rustc::mir::TerminatorKind::*; match terminator.kind { Return => { - self.dump_local(self.frame().return_place); + self.dump_place(self.frame().return_place); self.pop_stack_frame()? } @@ -37,22 +36,21 @@ impl<'a, 'mir, 'tcx, M: Machine<'mir, 'tcx>> EvalContext<'a, 'mir, 'tcx, M> { ref targets, .. } => { - let discr_val = self.eval_operand(discr)?; - let discr_prim = self.value_to_scalar(discr_val)?; - let discr_layout = self.layout_of(discr_val.ty).unwrap(); - trace!("SwitchInt({:?}, {:#?})", discr_prim, discr_layout); + let discr_val = self.eval_operand(discr, None)?; + let discr = self.read_value(discr_val)?; + trace!("SwitchInt({:?})", *discr); // Branch to the `otherwise` case by default, if no match is found. let mut target_block = targets[targets.len() - 1]; for (index, &const_int) in values.iter().enumerate() { // Compare using binary_op - let const_int = Scalar::Bits { bits: const_int, size: discr_layout.size.bytes() as u8 }; - let res = self.binary_op(mir::BinOp::Eq, - discr_prim, discr_val.ty, - const_int, discr_val.ty + let const_int = Scalar::Bits { bits: const_int, size: discr.layout.size.bytes() as u8 }; + let (res, _) = self.binary_op(mir::BinOp::Eq, + discr, + ValTy { value: Value::Scalar(const_int.into()), layout: discr.layout } )?; - if res.0.to_bits(Size::from_bytes(1))? != 0 { + if res.to_bool()? { target_block = targets[index]; break; } @@ -72,10 +70,10 @@ impl<'a, 'mir, 'tcx, M: Machine<'mir, 'tcx>> EvalContext<'a, 'mir, 'tcx, M> { None => None, }; - let func = self.eval_operand(func)?; - let (fn_def, sig) = match func.ty.sty { + let func = self.eval_operand(func, None)?; + let (fn_def, sig) = match func.layout.ty.sty { ty::TyFnPtr(sig) => { - let fn_ptr = self.value_to_scalar(func)?.to_ptr()?; + let fn_ptr = self.read_scalar(func)?.to_ptr()?; let instance = self.memory.get_fn(fn_ptr)?; let instance_ty = instance.ty(*self.tcx); match instance_ty.sty { @@ -99,14 +97,14 @@ impl<'a, 'mir, 'tcx, M: Machine<'mir, 'tcx>> EvalContext<'a, 'mir, 'tcx, M> { } ty::TyFnDef(def_id, substs) => ( self.resolve(def_id, substs)?, - func.ty.fn_sig(*self.tcx), + func.layout.ty.fn_sig(*self.tcx), ), _ => { - let msg = format!("can't handle callee of type {:?}", func.ty); + let msg = format!("can't handle callee of type {:?}", func.layout.ty); return err!(Unimplemented(msg)); } }; - let args = self.operands_to_args(args)?; + let args = self.eval_operands(args)?; let sig = self.tcx.normalize_erasing_late_bound_regions( ty::ParamEnv::reveal_all(), &sig, @@ -114,7 +112,7 @@ impl<'a, 'mir, 'tcx, M: Machine<'mir, 'tcx>> EvalContext<'a, 'mir, 'tcx, M> { self.eval_fn_call( fn_def, destination, - &args, + &args[..], terminator.source_info.span, sig, )?; @@ -127,19 +125,13 @@ impl<'a, 'mir, 'tcx, M: Machine<'mir, 'tcx>> EvalContext<'a, 'mir, 'tcx, M> { } => { // FIXME(CTFE): forbid drop in const eval let place = self.eval_place(location)?; - let ty = self.place_ty(location); - let ty = self.tcx.subst_and_normalize_erasing_regions( - self.substs(), - ty::ParamEnv::reveal_all(), - &ty, - ); + let ty = place.layout.ty; trace!("TerminatorKind::drop: {:?}, type {}", location, ty); let instance = ::monomorphize::resolve_drop_in_place(*self.tcx, ty); - self.drop_place( + self.drop_in_place( place, instance, - ty, terminator.source_info.span, target, )?; @@ -152,18 +144,18 @@ impl<'a, 'mir, 'tcx, M: Machine<'mir, 'tcx>> EvalContext<'a, 'mir, 'tcx, M> { target, .. } => { - let cond_val = self.eval_operand_to_scalar(cond)?.to_bool()?; + let cond_val = self.eval_operand_and_read_value(cond, None)?.to_scalar()?.to_bool()?; if expected == cond_val { self.goto_block(target); } else { use rustc::mir::interpret::EvalErrorKind::*; return match *msg { BoundsCheck { ref len, ref index } => { - let len = self.eval_operand_to_scalar(len) - .expect("can't eval len") + let len = self.eval_operand_and_read_value(len, None) + .expect("can't eval len").to_scalar()? .to_bits(self.memory().pointer_size())? as u64; - let index = self.eval_operand_to_scalar(index) - .expect("can't eval index") + let index = self.eval_operand_and_read_value(index, None) + .expect("can't eval index").to_scalar()? .to_bits(self.memory().pointer_size())? as u64; err!(BoundsCheck { len, index }) } @@ -259,36 +251,37 @@ impl<'a, 'mir, 'tcx, M: Machine<'mir, 'tcx>> EvalContext<'a, 'mir, 'tcx, M> { fn eval_fn_call( &mut self, instance: ty::Instance<'tcx>, - destination: Option<(Place, mir::BasicBlock)>, - args: &[ValTy<'tcx>], + destination: Option<(PlaceTy<'tcx>, mir::BasicBlock)>, + args: &[OpTy<'tcx>], span: Span, sig: ty::FnSig<'tcx>, ) -> EvalResult<'tcx> { trace!("eval_fn_call: {:#?}", instance); + if let Some((place, _)) = destination { + assert_eq!(place.layout.ty, sig.output()); + } match instance.def { ty::InstanceDef::Intrinsic(..) => { let (ret, target) = match destination { Some(dest) => dest, _ => return err!(Unreachable), }; - let ty = sig.output(); - let layout = self.layout_of(ty)?; - M::call_intrinsic(self, instance, args, ret, layout, target)?; - self.dump_local(ret); + M::call_intrinsic(self, instance, args, ret, target)?; + self.dump_place(*ret); Ok(()) } // FIXME: figure out why we can't just go through the shim ty::InstanceDef::ClosureOnceShim { .. } => { - if M::eval_fn_call(self, instance, destination, args, span, sig)? { + if M::eval_fn_call(self, instance, destination, args, span)? { return Ok(()); } let mut arg_locals = self.frame().mir.args_iter(); match sig.abi { // closure as closure once Abi::RustCall => { - for (arg_local, &valty) in arg_locals.zip(args) { + for (arg_local, &op) in arg_locals.zip(args) { let dest = self.eval_place(&mir::Place::Local(arg_local))?; - self.write_value(valty, dest)?; + self.copy_op(op, dest)?; } } // non capture closure as fn ptr @@ -296,17 +289,17 @@ impl<'a, 'mir, 'tcx, M: Machine<'mir, 'tcx>> EvalContext<'a, 'mir, 'tcx, M> { // and need to pack arguments Abi::Rust => { trace!( - "arg_locals: {:#?}", - self.frame().mir.args_iter().collect::>() + "args: {:#?}", + self.frame().mir.args_iter().zip(args.iter()) + .map(|(local, arg)| (local, **arg, arg.layout.ty)).collect::>() ); - trace!("args: {:#?}", args); let local = arg_locals.nth(1).unwrap(); - for (i, &valty) in args.into_iter().enumerate() { + for (i, &op) in args.into_iter().enumerate() { let dest = self.eval_place(&mir::Place::Local(local).field( mir::Field::new(i), - valty.ty, + op.layout.ty, ))?; - self.write_value(valty, dest)?; + self.copy_op(op, dest)?; } } _ => bug!("bad ABI for ClosureOnceShim: {:?}", sig.abi), @@ -318,7 +311,10 @@ impl<'a, 'mir, 'tcx, M: Machine<'mir, 'tcx>> EvalContext<'a, 'mir, 'tcx, M> { ty::InstanceDef::CloneShim(..) | ty::InstanceDef::Item(_) => { // Push the stack frame, and potentially be entirely done if the call got hooked - if M::eval_fn_call(self, instance, destination, args, span, sig)? { + if M::eval_fn_call(self, instance, destination, args, span)? { + // TODO: Can we make it return the frame to push, instead + // of the hook doing half of the work and us doing the argument + // initialization? return Ok(()); } @@ -326,10 +322,10 @@ impl<'a, 'mir, 'tcx, M: Machine<'mir, 'tcx>> EvalContext<'a, 'mir, 'tcx, M> { let mut arg_locals = self.frame().mir.args_iter(); trace!("ABI: {:?}", sig.abi); trace!( - "arg_locals: {:#?}", - self.frame().mir.args_iter().collect::>() + "args: {:#?}", + self.frame().mir.args_iter().zip(args.iter()) + .map(|(local, arg)| (local, **arg, arg.layout.ty)).collect::>() ); - trace!("args: {:#?}", args); match sig.abi { Abi::RustCall => { assert_eq!(args.len(), 2); @@ -338,26 +334,21 @@ impl<'a, 'mir, 'tcx, M: Machine<'mir, 'tcx>> EvalContext<'a, 'mir, 'tcx, M> { // write first argument let first_local = arg_locals.next().unwrap(); let dest = self.eval_place(&mir::Place::Local(first_local))?; - self.write_value(args[0], dest)?; + self.copy_op(args[0], dest)?; } // unpack and write all other args - let layout = self.layout_of(args[1].ty)?; - if let ty::TyTuple(_) = args[1].ty.sty { + let layout = args[1].layout; + if let ty::TyTuple(_) = layout.ty.sty { if layout.is_zst() { // Nothing to do, no need to unpack zsts return Ok(()); } if self.frame().mir.args_iter().count() == layout.fields.count() + 1 { for (i, arg_local) in arg_locals.enumerate() { - let field = mir::Field::new(i); - let (value, layout) = self.read_field(args[1].value, None, field, layout)?; + let arg = self.operand_field(args[1], i as u64)?; let dest = self.eval_place(&mir::Place::Local(arg_local))?; - let valty = ValTy { - value, - ty: layout.ty, - }; - self.write_value(valty, dest)?; + self.copy_op(arg, dest)?; } } else { trace!("manual impl of rust-call ABI"); @@ -365,20 +356,19 @@ impl<'a, 'mir, 'tcx, M: Machine<'mir, 'tcx>> EvalContext<'a, 'mir, 'tcx, M> { let dest = self.eval_place( &mir::Place::Local(arg_locals.next().unwrap()), )?; - self.write_value(args[1], dest)?; + self.copy_op(args[1], dest)?; } } else { bug!( - "rust-call ABI tuple argument was {:#?}, {:#?}", - args[1].ty, + "rust-call ABI tuple argument was {:#?}", layout ); } } _ => { - for (arg_local, &valty) in arg_locals.zip(args) { + for (arg_local, &op) in arg_locals.zip(args) { let dest = self.eval_place(&mir::Place::Local(arg_local))?; - self.write_value(valty, dest)?; + self.copy_op(op, dest)?; } } } @@ -388,16 +378,22 @@ impl<'a, 'mir, 'tcx, M: Machine<'mir, 'tcx>> EvalContext<'a, 'mir, 'tcx, M> { ty::InstanceDef::Virtual(_, idx) => { let ptr_size = self.memory.pointer_size(); let ptr_align = self.tcx.data_layout.pointer_align; - let (ptr, vtable) = self.into_ptr_vtable_pair(args[0].value)?; + let (ptr, vtable) = self.read_value(args[0])?.to_scalar_dyn_trait()?; let fn_ptr = self.memory.read_ptr_sized( vtable.offset(ptr_size * (idx as u64 + 3), &self)?, ptr_align - )?.unwrap_or_err()?.to_ptr()?; + )?.to_ptr()?; let instance = self.memory.get_fn(fn_ptr)?; + + // We have to patch the self argument, in particular get the layout + // expected by the actual function. Cannot just use "field 0" due to + // Box. let mut args = args.to_vec(); - let ty = self.layout_of(args[0].ty)?.field(&self, 0)?.ty; - args[0].ty = ty; - args[0].value = Value::Scalar(ptr); + let pointee = args[0].layout.ty.builtin_deref(true).unwrap().ty; + let fake_fat_ptr_ty = self.tcx.mk_mut_ptr(pointee); + args[0].layout = self.layout_of(fake_fat_ptr_ty)?.field(&self, 0)?; + args[0].op = Operand::Immediate(Value::Scalar(ptr.into())); // strip vtable + trace!("Patched self operand to {:#?}", args[0]); // recurse with concrete function self.eval_fn_call(instance, destination, &args, span, sig) } diff --git a/src/librustc_mir/interpret/traits.rs b/src/librustc_mir/interpret/traits.rs index 84583680988b7..18718cc3dcd6e 100644 --- a/src/librustc_mir/interpret/traits.rs +++ b/src/librustc_mir/interpret/traits.rs @@ -36,15 +36,15 @@ impl<'a, 'mir, 'tcx, M: Machine<'mir, 'tcx>> EvalContext<'a, 'mir, 'tcx, M> { let drop = ::monomorphize::resolve_drop_in_place(*self.tcx, ty); let drop = self.memory.create_fn_alloc(drop); - self.memory.write_ptr_sized_unsigned(vtable, ptr_align, Scalar::Ptr(drop).into())?; + self.memory.write_ptr_sized(vtable, ptr_align, Scalar::Ptr(drop).into())?; let size_ptr = vtable.offset(ptr_size, &self)?; - self.memory.write_ptr_sized_unsigned(size_ptr, ptr_align, Scalar::Bits { + self.memory.write_ptr_sized(size_ptr, ptr_align, Scalar::Bits { bits: size as u128, size: ptr_size.bytes() as u8, }.into())?; let align_ptr = vtable.offset(ptr_size * 2, &self)?; - self.memory.write_ptr_sized_unsigned(align_ptr, ptr_align, Scalar::Bits { + self.memory.write_ptr_sized(align_ptr, ptr_align, Scalar::Bits { bits: align as u128, size: ptr_size.bytes() as u8, }.into())?; @@ -54,7 +54,7 @@ impl<'a, 'mir, 'tcx, M: Machine<'mir, 'tcx>> EvalContext<'a, 'mir, 'tcx, M> { let instance = self.resolve(def_id, substs)?; let fn_ptr = self.memory.create_fn_alloc(instance); let method_ptr = vtable.offset(ptr_size * (3 + i as u64), &self)?; - self.memory.write_ptr_sized_unsigned(method_ptr, ptr_align, Scalar::Ptr(fn_ptr).into())?; + self.memory.write_ptr_sized(method_ptr, ptr_align, Scalar::Ptr(fn_ptr).into())?; } } @@ -72,7 +72,7 @@ impl<'a, 'mir, 'tcx, M: Machine<'mir, 'tcx>> EvalContext<'a, 'mir, 'tcx, M> { ) -> EvalResult<'tcx, ty::Instance<'tcx>> { // we don't care about the pointee type, we just want a pointer let pointer_align = self.tcx.data_layout.pointer_align; - let drop_fn = self.memory.read_ptr_sized(vtable, pointer_align)?.unwrap_or_err()?.to_ptr()?; + let drop_fn = self.memory.read_ptr_sized(vtable, pointer_align)?.to_ptr()?; self.memory.get_fn(drop_fn) } @@ -82,11 +82,11 @@ impl<'a, 'mir, 'tcx, M: Machine<'mir, 'tcx>> EvalContext<'a, 'mir, 'tcx, M> { ) -> EvalResult<'tcx, (Size, Align)> { let pointer_size = self.memory.pointer_size(); let pointer_align = self.tcx.data_layout.pointer_align; - let size = self.memory.read_ptr_sized(vtable.offset(pointer_size, self)?, pointer_align)?.unwrap_or_err()?.to_bits(pointer_size)? as u64; + let size = self.memory.read_ptr_sized(vtable.offset(pointer_size, self)?, pointer_align)?.to_bits(pointer_size)? as u64; let align = self.memory.read_ptr_sized( vtable.offset(pointer_size * 2, self)?, pointer_align - )?.unwrap_or_err()?.to_bits(pointer_size)? as u64; + )?.to_bits(pointer_size)? as u64; Ok((Size::from_bytes(size), Align::from_bytes(align, align).unwrap())) } } diff --git a/src/librustc_mir/interpret/validity.rs b/src/librustc_mir/interpret/validity.rs new file mode 100644 index 0000000000000..8f0e819660541 --- /dev/null +++ b/src/librustc_mir/interpret/validity.rs @@ -0,0 +1,348 @@ +use std::fmt::Write; + +use syntax_pos::symbol::Symbol; +use rustc::ty::layout::{self, Size, Primitive}; +use rustc::ty::{self, Ty}; +use rustc_data_structures::fx::FxHashSet; +use rustc::mir::interpret::{ + Scalar, AllocType, EvalResult, ScalarMaybeUndef, EvalErrorKind +}; + +use super::{ + MPlaceTy, Machine, EvalContext +}; + +macro_rules! validation_failure{ + ($what:expr, $where:expr, $details:expr) => {{ + let where_ = path_format($where); + let where_ = if where_.is_empty() { + String::new() + } else { + format!(" at {}", where_) + }; + err!(ValidationFailure(format!( + "encountered {}{}, but expected {}", + $what, where_, $details, + ))) + }}; + ($what:expr, $where:expr) => {{ + let where_ = path_format($where); + let where_ = if where_.is_empty() { + String::new() + } else { + format!(" at {}", where_) + }; + err!(ValidationFailure(format!( + "encountered {}{}", + $what, where_, + ))) + }}; +} + +/// We want to show a nice path to the invalid field for diagnotsics, +/// but avoid string operations in the happy case where no error happens. +/// So we track a `Vec` where `PathElem` contains all the data we +/// need to later print something for the user. +#[derive(Copy, Clone, Debug)] +pub enum PathElem { + Field(Symbol), + ClosureVar(Symbol), + ArrayElem(usize), + TupleElem(usize), + Deref, + Tag, +} + +// Adding a Deref and making a copy of the path to be put into the queue +// always go together. This one does it with only new allocation. +fn path_clone_and_deref(path: &Vec) -> Vec { + let mut new_path = Vec::with_capacity(path.len()+1); + new_path.clone_from(path); + new_path.push(PathElem::Deref); + new_path +} + +/// Format a path +fn path_format(path: &Vec) -> String { + use self::PathElem::*; + + let mut out = String::new(); + for elem in path.iter() { + match elem { + Field(name) => write!(out, ".{}", name).unwrap(), + ClosureVar(name) => write!(out, ".", name).unwrap(), + TupleElem(idx) => write!(out, ".{}", idx).unwrap(), + ArrayElem(idx) => write!(out, "[{}]", idx).unwrap(), + Deref => + // This does not match Rust syntax, but it is more readable for long paths -- and + // some of the other items here also are not Rust syntax. Actually we can't + // even use the usual syntax because we are just showing the projections, + // not the root. + write!(out, ".").unwrap(), + Tag => write!(out, ".").unwrap(), + } + } + out +} + +impl<'a, 'mir, 'tcx, M: Machine<'mir, 'tcx>> EvalContext<'a, 'mir, 'tcx, M> { + fn validate_scalar( + &self, + value: ScalarMaybeUndef, + size: Size, + scalar: &layout::Scalar, + path: &Vec, + ty: Ty, + ) -> EvalResult<'tcx> { + trace!("validate scalar: {:#?}, {:#?}, {:#?}, {}", value, size, scalar, ty); + let (lo, hi) = scalar.valid_range.clone().into_inner(); + + let value = match value { + ScalarMaybeUndef::Scalar(scalar) => scalar, + ScalarMaybeUndef::Undef => return validation_failure!("undefined bytes", path), + }; + + let bits = match value { + Scalar::Bits { bits, size: value_size } => { + assert_eq!(value_size as u64, size.bytes()); + bits + }, + Scalar::Ptr(_) => { + let ptr_size = self.memory.pointer_size(); + let ptr_max = u128::max_value() >> (128 - ptr_size.bits()); + return if lo > hi { + if lo - hi == 1 { + // no gap, all values are ok + Ok(()) + } else if hi < ptr_max || lo > 1 { + let max = u128::max_value() >> (128 - size.bits()); + validation_failure!( + "pointer", + path, + format!("something in the range {:?} or {:?}", 0..=lo, hi..=max) + ) + } else { + Ok(()) + } + } else if hi < ptr_max || lo > 1 { + validation_failure!( + "pointer", + path, + format!("something in the range {:?}", scalar.valid_range) + ) + } else { + Ok(()) + }; + }, + }; + + // char gets a special treatment, because its number space is not contiguous so `TyLayout` + // has no special checks for chars + match ty.sty { + ty::TyChar => { + debug_assert_eq!(size.bytes(), 4); + if ::std::char::from_u32(bits as u32).is_none() { + return validation_failure!( + "character", + path, + "a valid unicode codepoint" + ); + } + } + _ => {}, + } + + use std::ops::RangeInclusive; + let in_range = |bound: RangeInclusive| bound.contains(&bits); + if lo > hi { + if in_range(0..=hi) || in_range(lo..=u128::max_value()) { + Ok(()) + } else { + validation_failure!( + bits, + path, + format!("something in the range {:?} or {:?}", ..=hi, lo..) + ) + } + } else { + if in_range(scalar.valid_range.clone()) { + Ok(()) + } else { + validation_failure!( + bits, + path, + format!("something in the range {:?}", scalar.valid_range) + ) + } + } + } + + /// This function checks the memory where `dest` points to. The place must be sized + /// (i.e., dest.extra == PlaceExtra::None). + /// It will error if the bits at the destination do not match the ones described by the layout. + /// The `path` may be pushed to, but the part that is present when the function + /// starts must not be changed! + pub fn validate_mplace( + &self, + dest: MPlaceTy<'tcx>, + path: &mut Vec, + seen: &mut FxHashSet<(MPlaceTy<'tcx>)>, + todo: &mut Vec<(MPlaceTy<'tcx>, Vec)>, + ) -> EvalResult<'tcx> { + self.memory.dump_alloc(dest.to_ptr()?.alloc_id); + trace!("validate_mplace: {:?}, {:#?}", *dest, dest.layout); + + // Find the right variant. We have to handle this as a prelude, not via + // proper recursion with the new inner layout, to be able to later nicely + // print the field names of the enum field that is being accessed. + let (variant, dest) = match dest.layout.variants { + layout::Variants::NicheFilling { niche: ref tag, .. } | + layout::Variants::Tagged { ref tag, .. } => { + let size = tag.value.size(self); + // we first read the tag value as scalar, to be able to validate it + let tag_mplace = self.mplace_field(dest, 0)?; + let tag_value = self.read_scalar(tag_mplace.into())?; + path.push(PathElem::Tag); + self.validate_scalar( + tag_value, size, tag, &path, tag_mplace.layout.ty + )?; + path.pop(); // remove the element again + // then we read it again to get the index, to continue + let variant = self.read_discriminant_as_variant_index(dest.into())?; + let inner_dest = self.mplace_downcast(dest, variant)?; + // Put the variant projection onto the path, as a field + path.push(PathElem::Field(dest.layout.ty.ty_adt_def().unwrap().variants[variant].name)); + trace!("variant layout: {:#?}", dest.layout); + (variant, inner_dest) + }, + layout::Variants::Single { index } => { + (index, dest) + } + }; + + // Remember the length, in case we need to truncate + let path_len = path.len(); + + // Validate all fields + match dest.layout.fields { + // primitives are unions with zero fields + // We still check `layout.fields`, not `layout.abi`, because `layout.abi` + // is `Scalar` for newtypes around scalars, but we want to descend through the + // fields to get a proper `path`. + layout::FieldPlacement::Union(0) => { + match dest.layout.abi { + // nothing to do, whatever the pointer points to, it is never going to be read + layout::Abi::Uninhabited => + return validation_failure!("a value of an uninhabited type", path), + // check that the scalar is a valid pointer or that its bit range matches the + // expectation. + layout::Abi::Scalar(ref scalar_layout) => { + let size = scalar_layout.value.size(self); + let value = self.read_value(dest.into())?; + let scalar = value.to_scalar_or_undef(); + self.validate_scalar(scalar, size, scalar_layout, &path, dest.layout.ty)?; + if scalar_layout.value == Primitive::Pointer { + // ignore integer pointers, we can't reason about the final hardware + if let Scalar::Ptr(ptr) = scalar.not_undef()? { + let alloc_kind = self.tcx.alloc_map.lock().get(ptr.alloc_id); + if let Some(AllocType::Static(did)) = alloc_kind { + // statics from other crates are already checked. + // extern statics should not be validated as they have no body. + if !did.is_local() || self.tcx.is_foreign_item(did) { + return Ok(()); + } + } + if value.layout.ty.builtin_deref(false).is_some() { + trace!("Recursing below ptr {:#?}", value); + let ptr_place = self.ref_to_mplace(value)?; + // we have not encountered this pointer+layout combination before + if seen.insert(ptr_place) { + todo.push((ptr_place, path_clone_and_deref(path))); + } + } + } + } + }, + _ => bug!("bad abi for FieldPlacement::Union(0): {:#?}", dest.layout.abi), + } + } + layout::FieldPlacement::Union(_) => { + // We can't check unions, their bits are allowed to be anything. + // The fields don't need to correspond to any bit pattern of the union's fields. + // See https://github.com/rust-lang/rust/issues/32836#issuecomment-406875389 + }, + layout::FieldPlacement::Array { .. } => { + for (i, field) in self.mplace_array_fields(dest)?.enumerate() { + let field = field?; + path.push(PathElem::ArrayElem(i)); + self.validate_mplace(field, path, seen, todo)?; + path.truncate(path_len); + } + }, + layout::FieldPlacement::Arbitrary { ref offsets, .. } => { + // Fat pointers need special treatment. + if dest.layout.ty.builtin_deref(true).is_some() { + // This is a fat pointer. + let ptr = match self.ref_to_mplace(self.read_value(dest.into())?) { + Ok(ptr) => ptr, + Err(err) => match err.kind { + EvalErrorKind::ReadPointerAsBytes => + return validation_failure!( + "fat pointer length is not a valid integer", path + ), + EvalErrorKind::ReadBytesAsPointer => + return validation_failure!( + "fat pointer vtable is not a valid pointer", path + ), + _ => return Err(err), + } + }; + let unpacked_ptr = self.unpack_unsized_mplace(ptr)?; + // for safe ptrs, recursively check it + if !dest.layout.ty.is_unsafe_ptr() { + trace!("Recursing below fat ptr {:?} (unpacked: {:?})", ptr, unpacked_ptr); + if seen.insert(unpacked_ptr) { + todo.push((unpacked_ptr, path_clone_and_deref(path))); + } + } + } else { + // Not a pointer, perform regular aggregate handling below + for i in 0..offsets.len() { + let field = self.mplace_field(dest, i as u64)?; + path.push(self.aggregate_field_path_elem(dest.layout.ty, variant, i)); + self.validate_mplace(field, path, seen, todo)?; + path.truncate(path_len); + } + // FIXME: For a TyStr, check that this is valid UTF-8. + } + } + } + Ok(()) + } + + fn aggregate_field_path_elem(&self, ty: Ty<'tcx>, variant: usize, field: usize) -> PathElem { + match ty.sty { + // generators and closures. + ty::TyClosure(def_id, _) | ty::TyGenerator(def_id, _, _) => { + let node_id = self.tcx.hir.as_local_node_id(def_id).unwrap(); + let freevar = self.tcx.with_freevars(node_id, |fv| fv[field]); + PathElem::ClosureVar(self.tcx.hir.name(freevar.var_id())) + } + + // tuples + ty::TyTuple(_) => PathElem::TupleElem(field), + + // enums + ty::TyAdt(def, ..) if def.is_enum() => { + let variant = &def.variants[variant]; + PathElem::Field(variant.fields[field].ident.name) + } + + // other ADTs + ty::TyAdt(def, _) => PathElem::Field(def.non_enum_variant().fields[field].ident.name), + + // nothing else has an aggregate layout + _ => bug!("aggregate_field_path_elem: got non-aggregate type {:?}", ty), + } + } +} diff --git a/src/librustc_mir/lib.rs b/src/librustc_mir/lib.rs index 05494131f32e4..35b8f63c664fb 100644 --- a/src/librustc_mir/lib.rs +++ b/src/librustc_mir/lib.rs @@ -37,6 +37,7 @@ Rust MIR: a lowered representation of Rust. Also: an experiment! #![feature(step_trait)] #![feature(slice_concat_ext)] #![feature(if_while_or_patterns)] +#![feature(try_from)] #![recursion_limit="256"] @@ -82,7 +83,7 @@ pub fn provide(providers: &mut Providers) { shim::provide(providers); transform::provide(providers); providers.const_eval = interpret::const_eval_provider; - providers.const_value_to_allocation = interpret::const_value_to_allocation_provider; + providers.const_to_allocation = interpret::const_to_allocation_provider; providers.check_match = hair::pattern::check_match; } diff --git a/src/librustc_mir/transform/const_prop.rs b/src/librustc_mir/transform/const_prop.rs index 47c45adb85f5a..3f77e69b7dcbe 100644 --- a/src/librustc_mir/transform/const_prop.rs +++ b/src/librustc_mir/transform/const_prop.rs @@ -17,16 +17,16 @@ use rustc::mir::{Constant, Location, Place, Mir, Operand, Rvalue, Local}; use rustc::mir::{NullOp, StatementKind, Statement, BasicBlock, LocalKind}; use rustc::mir::{TerminatorKind, ClearCrossCrate, SourceInfo, BinOp, ProjectionElem}; use rustc::mir::visit::{Visitor, PlaceContext}; -use rustc::mir::interpret::{ConstEvalErr, EvalErrorKind, ScalarMaybeUndef}; +use rustc::mir::interpret::{ + ConstEvalErr, EvalErrorKind, ScalarMaybeUndef, Scalar, GlobalId, EvalResult +}; use rustc::ty::{TyCtxt, self, Instance}; -use rustc::mir::interpret::{Value, Scalar, GlobalId, EvalResult}; -use interpret::EvalContext; -use interpret::CompileTimeEvaluator; -use interpret::{eval_promoted, mk_borrowck_eval_cx, ValTy}; +use interpret::{EvalContext, CompileTimeEvaluator, eval_promoted, mk_borrowck_eval_cx}; +use interpret::{Value, OpTy, MemoryKind}; use transform::{MirPass, MirSource}; use syntax::source_map::{Span, DUMMY_SP}; use rustc::ty::subst::Substs; -use rustc_data_structures::indexed_vec::IndexVec; +use rustc_data_structures::indexed_vec::{IndexVec, Idx}; use rustc::ty::ParamEnv; use rustc::ty::layout::{ LayoutOf, TyLayout, LayoutError, @@ -65,7 +65,7 @@ impl MirPass for ConstProp { } } -type Const<'tcx> = (Value, TyLayout<'tcx>, Span); +type Const<'tcx> = (OpTy<'tcx>, Span); /// Finds optimization opportunities on the MIR. struct ConstPropagator<'b, 'a, 'tcx:'a+'b> { @@ -257,10 +257,10 @@ impl<'b, 'a, 'tcx:'b> ConstPropagator<'b, 'a, 'tcx> { source_info: SourceInfo, ) -> Option> { self.ecx.tcx.span = source_info.span; - match self.ecx.const_to_value(c.literal.val) { - Ok(val) => { + match self.ecx.const_value_to_op(c.literal.val) { + Ok(op) => { let layout = self.tcx.layout_of(self.param_env.and(c.literal.ty)).ok()?; - Some((val, layout, c.span)) + Some((OpTy { op, layout }, c.span)) }, Err(error) => { let (stacktrace, span) = self.ecx.generate_stacktrace(None); @@ -284,12 +284,15 @@ impl<'b, 'a, 'tcx:'b> ConstPropagator<'b, 'a, 'tcx> { Place::Projection(ref proj) => match proj.elem { ProjectionElem::Field(field, _) => { trace!("field proj on {:?}", proj.base); - let (base, layout, span) = self.eval_place(&proj.base, source_info)?; - let valty = self.use_ecx(source_info, |this| { - this.ecx.read_field(base, None, field, layout) + let (base, span) = self.eval_place(&proj.base, source_info)?; + let res = self.use_ecx(source_info, |this| { + this.ecx.operand_field(base, field.index() as u64) })?; - Some((valty.0, valty.1, span)) + Some((res, span)) }, + // We could get more projections by using e.g. `operand_projection`, + // but we do not even have the stack frame set up properly so + // an `Index` projection would throw us off-track. _ => None, }, Place::Promoted(ref promoted) => { @@ -306,12 +309,11 @@ impl<'b, 'a, 'tcx:'b> ConstPropagator<'b, 'a, 'tcx> { }; // cannot use `const_eval` here, because that would require having the MIR // for the current function available, but we're producing said MIR right now - let (value, _, ty) = self.use_ecx(source_info, |this| { + let res = self.use_ecx(source_info, |this| { eval_promoted(&mut this.ecx, cid, this.mir, this.param_env) })?; - let val = (value, ty, source_info.span); - trace!("evaluated promoted {:?} to {:?}", promoted, val); - Some(val) + trace!("evaluated promoted {:?} to {:?}", promoted, res); + Some((res, source_info.span)) }, _ => None, } @@ -343,17 +345,11 @@ impl<'b, 'a, 'tcx:'b> ConstPropagator<'b, 'a, 'tcx> { Rvalue::Discriminant(..) => None, Rvalue::Cast(kind, ref operand, _) => { - let (value, layout, span) = self.eval_operand(operand, source_info)?; + let (op, span) = self.eval_operand(operand, source_info)?; self.use_ecx(source_info, |this| { - let dest_ptr = this.ecx.alloc_ptr(place_layout)?; - let place_align = place_layout.align; - let dest = ::interpret::Place::from_ptr(dest_ptr, place_align); - this.ecx.cast(ValTy { value, ty: layout.ty }, kind, place_layout.ty, dest)?; - Ok(( - Value::ByRef(dest_ptr.into(), place_align), - place_layout, - span, - )) + let dest = this.ecx.allocate(place_layout, MemoryKind::Stack)?; + this.ecx.cast(op, kind, dest.into())?; + Ok((dest.into(), span)) }) } @@ -361,11 +357,13 @@ impl<'b, 'a, 'tcx:'b> ConstPropagator<'b, 'a, 'tcx> { Rvalue::Len(_) => None, Rvalue::NullaryOp(NullOp::SizeOf, ty) => { type_size_of(self.tcx, self.param_env, ty).and_then(|n| Some(( - Value::Scalar(Scalar::Bits { - bits: n as u128, - size: self.tcx.data_layout.pointer_size.bytes() as u8, - }.into()), - self.tcx.layout_of(self.param_env.and(self.tcx.types.usize)).ok()?, + OpTy::from_scalar_value( + Scalar::Bits { + bits: n as u128, + size: self.tcx.data_layout.pointer_size.bytes() as u8, + }, + self.tcx.layout_of(self.param_env.and(self.tcx.types.usize)).ok()?, + ), span, ))) } @@ -381,12 +379,12 @@ impl<'b, 'a, 'tcx:'b> ConstPropagator<'b, 'a, 'tcx> { return None; } - let val = self.eval_operand(arg, source_info)?; - let prim = self.use_ecx(source_info, |this| { - this.ecx.value_to_scalar(ValTy { value: val.0, ty: val.1.ty }) + let (arg, _) = self.eval_operand(arg, source_info)?; + let val = self.use_ecx(source_info, |this| { + let prim = this.ecx.read_scalar(arg)?.not_undef()?; + this.ecx.unary_op(op, prim, arg.layout) })?; - let val = self.use_ecx(source_info, |this| this.ecx.unary_op(op, prim, val.1))?; - Some((Value::Scalar(val.into()), place_layout, span)) + Some((OpTy::from_scalar_value(val, place_layout), span)) } Rvalue::CheckedBinaryOp(op, ref left, ref right) | Rvalue::BinaryOp(op, ref left, ref right) => { @@ -404,7 +402,7 @@ impl<'b, 'a, 'tcx:'b> ConstPropagator<'b, 'a, 'tcx> { } let r = self.use_ecx(source_info, |this| { - this.ecx.value_to_scalar(ValTy { value: right.0, ty: right.1.ty }) + this.ecx.read_value(right.0) })?; if op == BinOp::Shr || op == BinOp::Shl { let left_ty = left.ty(self.mir, self.tcx); @@ -414,8 +412,9 @@ impl<'b, 'a, 'tcx:'b> ConstPropagator<'b, 'a, 'tcx> { .unwrap() .size .bits(); - let right_size = right.1.size; - if r.to_bits(right_size).ok().map_or(false, |b| b >= left_bits as u128) { + let right_size = right.0.layout.size; + let r_bits = r.to_scalar().and_then(|r| r.to_bits(right_size)); + if r_bits.ok().map_or(false, |b| b >= left_bits as u128) { let source_scope_local_data = match self.mir.source_scope_local_data { ClearCrossCrate::Set(ref data) => data, ClearCrossCrate::Clear => return None, @@ -436,11 +435,11 @@ impl<'b, 'a, 'tcx:'b> ConstPropagator<'b, 'a, 'tcx> { } let left = self.eval_operand(left, source_info)?; let l = self.use_ecx(source_info, |this| { - this.ecx.value_to_scalar(ValTy { value: left.0, ty: left.1.ty }) + this.ecx.read_value(left.0) })?; trace!("const evaluating {:?} for {:?} and {:?}", op, left, right); let (val, overflow) = self.use_ecx(source_info, |this| { - this.ecx.binary_op(op, l, left.1.ty, r, right.1.ty) + this.ecx.binary_op(op, l, r) })?; let val = if let Rvalue::CheckedBinaryOp(..) = *rvalue { Value::ScalarPair( @@ -455,7 +454,11 @@ impl<'b, 'a, 'tcx:'b> ConstPropagator<'b, 'a, 'tcx> { } Value::Scalar(val.into()) }; - Some((val, place_layout, span)) + let res = OpTy { + op: ::interpret::Operand::Immediate(val), + layout: place_layout, + }; + Some((res, span)) }, } } @@ -571,7 +574,8 @@ impl<'b, 'a, 'tcx> Visitor<'tcx> for ConstPropagator<'b, 'a, 'tcx> { if let TerminatorKind::Assert { expected, msg, cond, .. } = kind { if let Some(value) = self.eval_operand(cond, source_info) { trace!("assertion on {:?} should be {:?}", value, expected); - if Value::Scalar(Scalar::from_bool(*expected).into()) != value.0 { + let expected = Value::Scalar(Scalar::from_bool(*expected).into()); + if expected != value.0.to_immediate() { // poison all places this operand references so that further code // doesn't use the invalid value match cond { @@ -607,7 +611,7 @@ impl<'b, 'a, 'tcx> Visitor<'tcx> for ConstPropagator<'b, 'a, 'tcx> { let len = self .eval_operand(len, source_info) .expect("len must be const"); - let len = match len.0 { + let len = match len.0.to_immediate() { Value::Scalar(ScalarMaybeUndef::Scalar(Scalar::Bits { bits, .. })) => bits, @@ -616,7 +620,7 @@ impl<'b, 'a, 'tcx> Visitor<'tcx> for ConstPropagator<'b, 'a, 'tcx> { let index = self .eval_operand(index, source_info) .expect("index must be const"); - let index = match index.0 { + let index = match index.0.to_immediate() { Value::Scalar(ScalarMaybeUndef::Scalar(Scalar::Bits { bits, .. })) => bits, diff --git a/src/librustc_typeck/check/mod.rs b/src/librustc_typeck/check/mod.rs index c9f717e6059fb..010ca1f7ab475 100644 --- a/src/librustc_typeck/check/mod.rs +++ b/src/librustc_typeck/check/mod.rs @@ -1375,7 +1375,7 @@ fn maybe_check_static_with_link_section(tcx: TyCtxt, id: DefId, span: Span) { }; let param_env = ty::ParamEnv::reveal_all(); if let Ok(static_) = tcx.const_eval(param_env.and(cid)) { - let alloc = tcx.const_value_to_allocation(static_); + let alloc = tcx.const_to_allocation(static_); if alloc.relocations.len() != 0 { let msg = "statics with a custom `#[link_section]` must be a \ simple list of bytes on the wasm target with no \ diff --git a/src/test/ui/consts/const-eval/const_raw_ptr_ops.stderr b/src/test/ui/consts/const-eval/const_raw_ptr_ops.stderr index a9442be081d94..df1e6f8e4c4f0 100644 --- a/src/test/ui/consts/const-eval/const_raw_ptr_ops.stderr +++ b/src/test/ui/consts/const-eval/const_raw_ptr_ops.stderr @@ -22,7 +22,7 @@ error: this constant cannot be used LL | const Z2: i32 = unsafe { *(42 as *const i32) }; //~ ERROR cannot be used | ^^^^^^^^^^^^^^^^^^^^^^^^^-------------------^^^ | | - | tried to access memory with alignment 2, but alignment 4 is required + | a memory access tried to interpret some bytes as a pointer error: this constant cannot be used --> $DIR/const_raw_ptr_ops.rs:27:1 diff --git a/src/test/ui/consts/const-eval/double_check2.stderr b/src/test/ui/consts/const-eval/double_check2.stderr index 2a0a674e237fe..739af12d09c69 100644 --- a/src/test/ui/consts/const-eval/double_check2.stderr +++ b/src/test/ui/consts/const-eval/double_check2.stderr @@ -5,7 +5,7 @@ LL | / static FOO: (&Foo, &Bar) = unsafe {( //~ undefined behavior LL | | Union { usize: &BAR }.foo, LL | | Union { usize: &BAR }.bar, LL | | )}; - | |___^ type validation failed: encountered 5 at (*.1).TAG, but expected something in the range 42..=99 + | |___^ type validation failed: encountered 5 at .1.., but expected something in the range 42..=99 | = note: The rules on what exactly is undefined behavior aren't clear, so this check might be overzealous. Please open an issue on the rust compiler repository if you believe it should not be considered undefined behavior diff --git a/src/test/ui/consts/const-eval/ub-enum-ptr.rs b/src/test/ui/consts/const-eval/ub-enum-ptr.rs deleted file mode 100644 index 8538dd14afed1..0000000000000 --- a/src/test/ui/consts/const-eval/ub-enum-ptr.rs +++ /dev/null @@ -1,27 +0,0 @@ -// Copyright 2018 The Rust Project Developers. See the COPYRIGHT -// file at the top-level directory of this distribution and at -// http://rust-lang.org/COPYRIGHT. -// -// Licensed under the Apache License, Version 2.0 or the MIT license -// , at your -// option. This file may not be copied, modified, or distributed -// except according to those terms. - -#[repr(usize)] -#[derive(Copy, Clone)] -enum Enum { - A = 0, -} - -union Foo { - a: &'static u8, - b: Enum, -} - -// A pointer is guaranteed non-null -const BAD_ENUM: Enum = unsafe { Foo { a: &1 }.b}; -//~^ ERROR this constant likely exhibits undefined behavior - -fn main() { -} diff --git a/src/test/ui/consts/const-eval/ub-enum-ptr.stderr b/src/test/ui/consts/const-eval/ub-enum-ptr.stderr deleted file mode 100644 index 4b7ccc25c6c01..0000000000000 --- a/src/test/ui/consts/const-eval/ub-enum-ptr.stderr +++ /dev/null @@ -1,11 +0,0 @@ -error[E0080]: this constant likely exhibits undefined behavior - --> $DIR/ub-enum-ptr.rs:23:1 - | -LL | const BAD_ENUM: Enum = unsafe { Foo { a: &1 }.b}; - | ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ type validation failed: encountered pointer at .TAG, but expected something in the range 0..=0 - | - = note: The rules on what exactly is undefined behavior aren't clear, so this check might be overzealous. Please open an issue on the rust compiler repository if you believe it should not be considered undefined behavior - -error: aborting due to previous error - -For more information about this error, try `rustc --explain E0080`. diff --git a/src/test/ui/consts/const-eval/ub-enum.rs b/src/test/ui/consts/const-eval/ub-enum.rs new file mode 100644 index 0000000000000..bcb71af54afdb --- /dev/null +++ b/src/test/ui/consts/const-eval/ub-enum.rs @@ -0,0 +1,49 @@ +// Copyright 2018 The Rust Project Developers. See the COPYRIGHT +// file at the top-level directory of this distribution and at +// http://rust-lang.org/COPYRIGHT. +// +// Licensed under the Apache License, Version 2.0 or the MIT license +// , at your +// option. This file may not be copied, modified, or distributed +// except according to those terms. + +#[repr(usize)] +#[derive(Copy, Clone)] +enum Enum { + A = 0, +} +union TransmuteEnum { + a: &'static u8, + b: Enum, +} + +// A pointer is guaranteed non-null +const BAD_ENUM: Enum = unsafe { TransmuteEnum { a: &1 }.b }; +//~^ ERROR this constant likely exhibits undefined behavior + +// Invalid enum discriminant +#[repr(usize)] +#[derive(Copy, Clone)] +enum Enum2 { + A = 2, +} +union TransmuteEnum2 { + a: usize, + b: Enum2, +} +const BAD_ENUM2 : Enum2 = unsafe { TransmuteEnum2 { a: 0 }.b }; +//~^ ERROR this constant likely exhibits undefined behavior + +// Invalid enum field content (mostly to test printing of apths for enum tuple +// variants and tuples). +union TransmuteChar { + a: u32, + b: char, +} +// Need to create something which does not clash with enum layout optimizations. +const BAD_ENUM_CHAR : Option<(char, char)> = Some(('x', unsafe { TransmuteChar { a: !0 }.b })); +//~^ ERROR this constant likely exhibits undefined behavior + +fn main() { +} diff --git a/src/test/ui/consts/const-eval/ub-enum.stderr b/src/test/ui/consts/const-eval/ub-enum.stderr new file mode 100644 index 0000000000000..98e9b598b543f --- /dev/null +++ b/src/test/ui/consts/const-eval/ub-enum.stderr @@ -0,0 +1,27 @@ +error[E0080]: this constant likely exhibits undefined behavior + --> $DIR/ub-enum.rs:22:1 + | +LL | const BAD_ENUM: Enum = unsafe { TransmuteEnum { a: &1 }.b }; + | ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ type validation failed: encountered pointer at ., but expected something in the range 0..=0 + | + = note: The rules on what exactly is undefined behavior aren't clear, so this check might be overzealous. Please open an issue on the rust compiler repository if you believe it should not be considered undefined behavior + +error[E0080]: this constant likely exhibits undefined behavior + --> $DIR/ub-enum.rs:35:1 + | +LL | const BAD_ENUM2 : Enum2 = unsafe { TransmuteEnum2 { a: 0 }.b }; + | ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ type validation failed: encountered 0 at ., but expected something in the range 2..=2 + | + = note: The rules on what exactly is undefined behavior aren't clear, so this check might be overzealous. Please open an issue on the rust compiler repository if you believe it should not be considered undefined behavior + +error[E0080]: this constant likely exhibits undefined behavior + --> $DIR/ub-enum.rs:45:1 + | +LL | const BAD_ENUM_CHAR : Option<(char, char)> = Some(('x', unsafe { TransmuteChar { a: !0 }.b })); + | ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ type validation failed: encountered character at .Some.0.1, but expected a valid unicode codepoint + | + = note: The rules on what exactly is undefined behavior aren't clear, so this check might be overzealous. Please open an issue on the rust compiler repository if you believe it should not be considered undefined behavior + +error: aborting due to 3 previous errors + +For more information about this error, try `rustc --explain E0080`. diff --git a/src/test/ui/union-ub-fat-ptr.rs b/src/test/ui/union-ub-fat-ptr.rs index cfce92ece7a92..ffa824fc6af47 100644 --- a/src/test/ui/union-ub-fat-ptr.rs +++ b/src/test/ui/union-ub-fat-ptr.rs @@ -13,6 +13,11 @@ // normalize-stderr-test "allocation \d+" -> "allocation N" // normalize-stderr-test "size \d+" -> "size N" +union BoolTransmute { + val: u8, + bl: bool, +} + #[repr(C)] #[derive(Copy, Clone)] struct SliceRepr { @@ -32,6 +37,7 @@ union SliceTransmute { bad: BadSliceRepr, slice: &'static [u8], str: &'static str, + my_str: &'static Str, } #[repr(C)] @@ -63,32 +69,48 @@ union DynTransmute { } trait Trait {} +impl Trait for bool {} + +struct Str(str); // OK const A: &str = unsafe { SliceTransmute { repr: SliceRepr { ptr: &42, len: 1 } }.str}; -// should lint +// bad str const B: &str = unsafe { SliceTransmute { repr: SliceRepr { ptr: &42, len: 999 } }.str}; -// bad +//~^ ERROR this constant likely exhibits undefined behavior +// bad str const C: &str = unsafe { SliceTransmute { bad: BadSliceRepr { ptr: &42, len: &3 } }.str}; //~^ ERROR this constant likely exhibits undefined behavior +// bad str in Str +const C2: &Str = unsafe { SliceTransmute { bad: BadSliceRepr { ptr: &42, len: &3 } }.my_str}; +//~^ ERROR this constant likely exhibits undefined behavior // OK const A2: &[u8] = unsafe { SliceTransmute { repr: SliceRepr { ptr: &42, len: 1 } }.slice}; -// should lint +// bad slice const B2: &[u8] = unsafe { SliceTransmute { repr: SliceRepr { ptr: &42, len: 999 } }.slice}; -// bad -const C2: &[u8] = unsafe { SliceTransmute { bad: BadSliceRepr { ptr: &42, len: &3 } }.slice}; +//~^ ERROR this constant likely exhibits undefined behavior +// bad slice +const C3: &[u8] = unsafe { SliceTransmute { bad: BadSliceRepr { ptr: &42, len: &3 } }.slice}; //~^ ERROR this constant likely exhibits undefined behavior -// bad +// bad trait object const D: &Trait = unsafe { DynTransmute { repr: DynRepr { ptr: &92, vtable: &3 } }.rust}; //~^ ERROR this constant likely exhibits undefined behavior -// bad +// bad trait object const E: &Trait = unsafe { DynTransmute { repr2: DynRepr2 { ptr: &92, vtable: &3 } }.rust}; //~^ ERROR this constant likely exhibits undefined behavior -// bad +// bad trait object const F: &Trait = unsafe { DynTransmute { bad: BadDynRepr { ptr: &92, vtable: 3 } }.rust}; //~^ ERROR this constant likely exhibits undefined behavior +// bad data *inside* the trait object +const G: &Trait = &unsafe { BoolTransmute { val: 3 }.bl }; +//~^ ERROR this constant likely exhibits undefined behavior + +// bad data *inside* the slice +const H: &[bool] = &[unsafe { BoolTransmute { val: 3 }.bl }]; +//~^ ERROR this constant likely exhibits undefined behavior + fn main() { } diff --git a/src/test/ui/union-ub-fat-ptr.stderr b/src/test/ui/union-ub-fat-ptr.stderr index f0298d9013c93..cc22422304d68 100644 --- a/src/test/ui/union-ub-fat-ptr.stderr +++ b/src/test/ui/union-ub-fat-ptr.stderr @@ -1,21 +1,45 @@ error[E0080]: this constant likely exhibits undefined behavior - --> $DIR/union-ub-fat-ptr.rs:72:1 + --> $DIR/union-ub-fat-ptr.rs:79:1 + | +LL | const B: &str = unsafe { SliceTransmute { repr: SliceRepr { ptr: &42, len: 999 } }.str}; + | ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ memory access at offset N, outside bounds of allocation N which has size N + | + = note: The rules on what exactly is undefined behavior aren't clear, so this check might be overzealous. Please open an issue on the rust compiler repository if you believe it should not be considered undefined behavior + +error[E0080]: this constant likely exhibits undefined behavior + --> $DIR/union-ub-fat-ptr.rs:82:1 | LL | const C: &str = unsafe { SliceTransmute { bad: BadSliceRepr { ptr: &42, len: &3 } }.str}; - | ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ type validation failed: encountered length is not a valid integer + | ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ type validation failed: encountered fat pointer length is not a valid integer | = note: The rules on what exactly is undefined behavior aren't clear, so this check might be overzealous. Please open an issue on the rust compiler repository if you believe it should not be considered undefined behavior error[E0080]: this constant likely exhibits undefined behavior - --> $DIR/union-ub-fat-ptr.rs:80:1 + --> $DIR/union-ub-fat-ptr.rs:85:1 | -LL | const C2: &[u8] = unsafe { SliceTransmute { bad: BadSliceRepr { ptr: &42, len: &3 } }.slice}; - | ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ type validation failed: encountered length is not a valid integer +LL | const C2: &Str = unsafe { SliceTransmute { bad: BadSliceRepr { ptr: &42, len: &3 } }.my_str}; + | ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ type validation failed: encountered fat pointer length is not a valid integer | = note: The rules on what exactly is undefined behavior aren't clear, so this check might be overzealous. Please open an issue on the rust compiler repository if you believe it should not be considered undefined behavior error[E0080]: this constant likely exhibits undefined behavior - --> $DIR/union-ub-fat-ptr.rs:84:1 + --> $DIR/union-ub-fat-ptr.rs:91:1 + | +LL | const B2: &[u8] = unsafe { SliceTransmute { repr: SliceRepr { ptr: &42, len: 999 } }.slice}; + | ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ memory access at offset N, outside bounds of allocation N which has size N + | + = note: The rules on what exactly is undefined behavior aren't clear, so this check might be overzealous. Please open an issue on the rust compiler repository if you believe it should not be considered undefined behavior + +error[E0080]: this constant likely exhibits undefined behavior + --> $DIR/union-ub-fat-ptr.rs:94:1 + | +LL | const C3: &[u8] = unsafe { SliceTransmute { bad: BadSliceRepr { ptr: &42, len: &3 } }.slice}; + | ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ type validation failed: encountered fat pointer length is not a valid integer + | + = note: The rules on what exactly is undefined behavior aren't clear, so this check might be overzealous. Please open an issue on the rust compiler repository if you believe it should not be considered undefined behavior + +error[E0080]: this constant likely exhibits undefined behavior + --> $DIR/union-ub-fat-ptr.rs:98:1 | LL | const D: &Trait = unsafe { DynTransmute { repr: DynRepr { ptr: &92, vtable: &3 } }.rust}; | ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ tried to access memory with alignment N, but alignment N is required @@ -23,21 +47,37 @@ LL | const D: &Trait = unsafe { DynTransmute { repr: DynRepr { ptr: &92, vtable: = note: The rules on what exactly is undefined behavior aren't clear, so this check might be overzealous. Please open an issue on the rust compiler repository if you believe it should not be considered undefined behavior error[E0080]: this constant likely exhibits undefined behavior - --> $DIR/union-ub-fat-ptr.rs:87:1 + --> $DIR/union-ub-fat-ptr.rs:101:1 | LL | const E: &Trait = unsafe { DynTransmute { repr2: DynRepr2 { ptr: &92, vtable: &3 } }.rust}; - | ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ memory access at offset N, outside bounds of allocation N which has size N + | ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ a memory access tried to interpret some bytes as a pointer | = note: The rules on what exactly is undefined behavior aren't clear, so this check might be overzealous. Please open an issue on the rust compiler repository if you believe it should not be considered undefined behavior error[E0080]: this constant likely exhibits undefined behavior - --> $DIR/union-ub-fat-ptr.rs:90:1 + --> $DIR/union-ub-fat-ptr.rs:104:1 | LL | const F: &Trait = unsafe { DynTransmute { bad: BadDynRepr { ptr: &92, vtable: 3 } }.rust}; - | ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ type validation failed: encountered vtable address is not a pointer + | ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ type validation failed: encountered fat pointer vtable is not a valid pointer + | + = note: The rules on what exactly is undefined behavior aren't clear, so this check might be overzealous. Please open an issue on the rust compiler repository if you believe it should not be considered undefined behavior + +error[E0080]: this constant likely exhibits undefined behavior + --> $DIR/union-ub-fat-ptr.rs:108:1 + | +LL | const G: &Trait = &unsafe { BoolTransmute { val: 3 }.bl }; + | ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ type validation failed: encountered 3 at ., but expected something in the range 0..=1 + | + = note: The rules on what exactly is undefined behavior aren't clear, so this check might be overzealous. Please open an issue on the rust compiler repository if you believe it should not be considered undefined behavior + +error[E0080]: this constant likely exhibits undefined behavior + --> $DIR/union-ub-fat-ptr.rs:112:1 + | +LL | const H: &[bool] = &[unsafe { BoolTransmute { val: 3 }.bl }]; + | ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ type validation failed: encountered 3 at .[0], but expected something in the range 0..=1 | = note: The rules on what exactly is undefined behavior aren't clear, so this check might be overzealous. Please open an issue on the rust compiler repository if you believe it should not be considered undefined behavior -error: aborting due to 5 previous errors +error: aborting due to 10 previous errors For more information about this error, try `rustc --explain E0080`.