diff --git a/.gitattributes b/.gitattributes index 2c5c37007d5..ac0a71232ef 100644 --- a/.gitattributes +++ b/.gitattributes @@ -5,6 +5,7 @@ *.h rust *.rs rust diff=rust *.fixed linguist-language=Rust +*.mir linguist-language=Rust src/etc/installer/gfx/* binary *.woff binary src/vendor/** -text diff --git a/.mailmap b/.mailmap index f4769268321..d3e400e5f90 100644 --- a/.mailmap +++ b/.mailmap @@ -148,6 +148,7 @@ Jorge Aparicio Joseph Martin Joseph T. Lyons Joseph T. Lyons +Joshua Nelson jumbatm <30644300+jumbatm@users.noreply.github.com> Junyoung Cho Jyun-Yan You diff --git a/Cargo.lock b/Cargo.lock index f74acf043ea..56faee8711b 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -2,9 +2,9 @@ # It is not intended for manual editing. [[package]] name = "addr2line" -version = "0.13.0" +version = "0.14.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1b6a2d3371669ab3ca9797670853d61402b03d0b4b9ebf33d677dfa720203072" +checksum = "7c0929d69e78dd9bf5408269919fcbcaeb2e35e5d43e5815517cdc6a8e11a423" dependencies = [ "compiler_builtins", "gimli", @@ -132,13 +132,13 @@ checksum = "f8aac770f1885fd7e387acedd76065302551364496e46b3dd00860b2f8359b9d" [[package]] name = "backtrace" -version = "0.3.53" +version = "0.3.55" dependencies = [ "addr2line", "cfg-if 1.0.0", "libc", "miniz_oxide", - "object 0.21.1", + "object", "rustc-demangle", ] @@ -470,9 +470,9 @@ checksum = "baf1de4339761588bc0619e3cbc0120ee582ebb74b53b4efbf79117bd2da40fd" [[package]] name = "chalk-derive" -version = "0.32.0" +version = "0.36.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2d072b2ba723f0bada7c515d8b3725224bc4f5052d2a92dcbeb0b118ff37084a" +checksum = "9f88ce4deae1dace71e49b7611cfae2d5489de3530d6daba5758043c47ac3a10" dependencies = [ "proc-macro2", "quote", @@ -482,9 +482,9 @@ dependencies = [ [[package]] name = "chalk-engine" -version = "0.32.0" +version = "0.36.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6fb5475f6083d6d6c509e1c335c4f69ad04144ac090faa1afb134a53c3695841" +checksum = "0e34c9b1b10616782143d7f49490f91ae94afaf2202de3ab0b2835e78b4f0ccc" dependencies = [ "chalk-derive", "chalk-ir", @@ -495,9 +495,9 @@ dependencies = [ [[package]] name = "chalk-ir" -version = "0.32.0" +version = "0.36.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f60cdb0e18c5455cb6a85e8464aad3622b70476018edfa8845691df66f7e9a05" +checksum = "63362c629c2014ab639b04029070763fb8224df136d1363d30e9ece4c8877da3" dependencies = [ "chalk-derive", "lazy_static", @@ -505,9 +505,9 @@ dependencies = [ [[package]] name = "chalk-solve" -version = "0.32.0" +version = "0.36.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "981534d499a8476ecc0b520be4d3864757f96211826a75360fbf2cb6fae362ab" +checksum = "cac338a67af52a7f50bb2f8232e730a3518ce432dbe303246acfe525ddd838c7" dependencies = [ "chalk-derive", "chalk-ir", @@ -731,6 +731,13 @@ version = "0.8.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "9a21fa21941700a3cd8fcb4091f361a6a712fac632f85d9f487cc892045d55c6" +[[package]] +name = "coverage_test_macros" +version = "0.0.0" +dependencies = [ + "proc-macro2", +] + [[package]] name = "cpuid-bool" version = "0.1.2" @@ -990,9 +997,9 @@ dependencies = [ [[package]] name = "dlmalloc" -version = "0.1.4" +version = "0.2.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "35055b1021724f4eb5262eb49130eebff23fc59fc5a14160e05faad8eeb36673" +checksum = "332570860c2edf2d57914987bf9e24835425f75825086b6ba7d1e6a3e4f1f254" dependencies = [ "compiler_builtins", "libc", @@ -1301,9 +1308,9 @@ dependencies = [ [[package]] name = "gimli" -version = "0.22.0" +version = "0.23.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "aaf91faf136cb47367fa430cd46e37a788775e7fa104f8b4bcb3861dc389b724" +checksum = "f6503fe142514ca4799d4c26297c4248239fe8838d827db6bd6065c6ed29a6ce" dependencies = [ "compiler_builtins", "rustc-std-workspace-alloc", @@ -2190,21 +2197,15 @@ dependencies = [ [[package]] name = "object" -version = "0.20.0" +version = "0.22.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1ab52be62400ca80aa00285d25253d7f7c437b7375c4de678f5405d3afe82ca5" +checksum = "8d3b63360ec3cb337817c2dbd47ab4a0f170d285d8e5a2064600f3def1402397" dependencies = [ "compiler_builtins", "rustc-std-workspace-alloc", "rustc-std-workspace-core", ] -[[package]] -name = "object" -version = "0.21.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "37fd5004feb2ce328a52b0b3d01dbf4ffff72583493900ed15f22d4111c51693" - [[package]] name = "once_cell" version = "1.4.1" @@ -3032,6 +3033,7 @@ name = "rustbook" version = "0.1.0" dependencies = [ "clap", + "env_logger 0.7.1", "mdbook", ] @@ -3813,7 +3815,7 @@ dependencies = [ "rustc_target", "rustc_trait_selection", "rustc_traits", - "rustc_ty", + "rustc_ty_utils", "rustc_typeck", "smallvec 1.4.2", "tempfile", @@ -3842,6 +3844,7 @@ dependencies = [ "rustc_hir", "rustc_index", "rustc_middle", + "rustc_parse_format", "rustc_session", "rustc_span", "rustc_target", @@ -3941,6 +3944,7 @@ dependencies = [ name = "rustc_mir" version = "0.0.0" dependencies = [ + "coverage_test_macros", "either", "itertools 0.9.0", "polonius-engine", @@ -4250,7 +4254,7 @@ dependencies = [ ] [[package]] -name = "rustc_ty" +name = "rustc_ty_utils" version = "0.0.0" dependencies = [ "rustc_data_structures", @@ -4348,7 +4352,7 @@ dependencies = [ [[package]] name = "rustfmt-nightly" -version = "1.4.24" +version = "1.4.27" dependencies = [ "annotate-snippets 0.6.1", "anyhow", @@ -4677,7 +4681,7 @@ dependencies = [ "hermit-abi", "libc", "miniz_oxide", - "object 0.20.0", + "object", "panic_abort", "panic_unwind", "profiler_builtins", diff --git a/RELEASES.md b/RELEASES.md index c9ff4928763..9fd796fd775 100644 --- a/RELEASES.md +++ b/RELEASES.md @@ -1,3 +1,143 @@ +Version 1.48.0 (2020-11-19) +========================== + +Language +-------- + +- [The `unsafe` keyword is now syntactically permitted on modules.][75857] This + is still rejected *semantically*, but can now be parsed by procedural macros. + +Compiler +-------- +- [Stabilised the `-C link-self-contained=` compiler flag.][76158] This tells + `rustc` whether to link its own C runtime and libraries or to rely on a external + linker to find them. (Supported only on `windows-gnu`, `linux-musl`, and `wasi` platforms.) +- [You can now use `-C target-feature=+crt-static` on `linux-gnu` targets.][77386] + Note: If you're using cargo you must explicitly pass the `--target` flag. +- [Added tier 2\* support for `aarch64-unknown-linux-musl`.][76420] + +\* Refer to Rust's [platform support page][forge-platform-support] for more +information on Rust's tiered platform support. + +Libraries +--------- +- [`io::Write` is now implemented for `&ChildStdin` `&Sink`, `&Stdout`, + and `&Stderr`.][76275] +- [All arrays of any length now implement `TryFrom>`.][76310] +- [The `matches!` macro now supports having a trailing comma.][74880] +- [`Vec` now implements `PartialEq<[B]>` where `A: PartialEq`.][74194] +- [The `RefCell::{replace, replace_with, clone}` methods now all use `#[track_caller]`.][77055] + +Stabilized APIs +--------------- +- [`slice::as_ptr_range`] +- [`slice::as_mut_ptr_range`] +- [`VecDeque::make_contiguous`] +- [`future::pending`] +- [`future::ready`] + +The following previously stable methods are now `const fn`'s: + +- [`Option::is_some`] +- [`Option::is_none`] +- [`Option::as_ref`] +- [`Result::is_ok`] +- [`Result::is_err`] +- [`Result::as_ref`] +- [`Ordering::reverse`] +- [`Ordering::then`] + +Cargo +----- + +Rustdoc +------- +- [You can now link to items in `rustdoc` using the intra-doc link + syntax.][74430] E.g. ``/// Uses [`std::future`]`` will automatically generate + a link to `std::future`'s documentation. See ["Linking to items by + name"][intradoc-links] for more information. +- [You can now specify `#[doc(alias = "")]` on items to add search aliases + when searching through `rustdoc`'s UI.][75740] + +Compatibility Notes +------------------- +- [Promotion of references to `'static` lifetime inside `const fn` now follows the + same rules as inside a `fn` body.][75502] In particular, `&foo()` will not be + promoted to `'static` lifetime any more inside `const fn`s. +- [Associated type bindings on trait objects are now verified to meet the bounds + declared on the trait when checking that they implement the trait.][27675] +- [When trait bounds on associated types or opaque types are ambiguous, the + compiler no longer makes an arbitrary choice on which bound to use.][54121] +- [Fixed recursive nonterminals not being expanded in macros during + pretty-print/reparse check.][77153] This may cause errors if your macro wasn't + correctly handling recursive nonterminal tokens. +- [`&mut` references to non zero-sized types are no longer promoted.][75585] +- [`rustc` will now warn if you use attributes like `#[link_name]` or `#[cold]` + in places where they have no effect.][73461] +- [Updated `_mm256_extract_epi8` and `_mm256_extract_epi16` signatures in + `arch::{x86, x86_64}` to return `i32` to match the vendor signatures.][73166] +- [`mem::uninitialized` will now panic if any inner types inside a struct or enum + disallow zero-initialization.][71274] +- [`#[target_feature]` will now error if used in a place where it has no effect.][78143] +- [Foreign exceptions are now caught by `catch_unwind` and will cause an abort.][70212] + Note: This behaviour is not guaranteed and is still considered undefined behaviour, + see the [`catch_unwind`] documentation for further information. + + + +Internal Only +------------- +These changes provide no direct user facing benefits, but represent significant +improvements to the internals and overall performance of rustc and +related tools. + +- [Building `rustc` from source now uses `ninja` by default over `make`.][74922] + You can continue building with `make` by setting `ninja=false` in + your `config.toml`. +- [cg_llvm: `fewer_names` in `uncached_llvm_type`][76030] +- [Made `ensure_sufficient_stack()` non-generic][76680] + +[78143]: https://github.com/rust-lang/rust/issues/78143 +[76680]: https://github.com/rust-lang/rust/pull/76680/ +[76030]: https://github.com/rust-lang/rust/pull/76030/ +[70212]: https://github.com/rust-lang/rust/pull/70212/ +[27675]: https://github.com/rust-lang/rust/issues/27675/ +[54121]: https://github.com/rust-lang/rust/issues/54121/ +[71274]: https://github.com/rust-lang/rust/pull/71274/ +[77386]: https://github.com/rust-lang/rust/pull/77386/ +[77153]: https://github.com/rust-lang/rust/pull/77153/ +[77055]: https://github.com/rust-lang/rust/pull/77055/ +[76275]: https://github.com/rust-lang/rust/pull/76275/ +[76310]: https://github.com/rust-lang/rust/pull/76310/ +[76420]: https://github.com/rust-lang/rust/pull/76420/ +[76158]: https://github.com/rust-lang/rust/pull/76158/ +[75857]: https://github.com/rust-lang/rust/pull/75857/ +[75585]: https://github.com/rust-lang/rust/pull/75585/ +[75740]: https://github.com/rust-lang/rust/pull/75740/ +[75502]: https://github.com/rust-lang/rust/pull/75502/ +[74880]: https://github.com/rust-lang/rust/pull/74880/ +[74922]: https://github.com/rust-lang/rust/pull/74922/ +[74430]: https://github.com/rust-lang/rust/pull/74430/ +[74194]: https://github.com/rust-lang/rust/pull/74194/ +[73461]: https://github.com/rust-lang/rust/pull/73461/ +[73166]: https://github.com/rust-lang/rust/pull/73166/ +[intradoc-links]: https://doc.rust-lang.org/rustdoc/linking-to-items-by-name.html +[`catch_unwind`]: https://doc.rust-lang.org/std/panic/fn.catch_unwind.html +[`Option::is_some`]: https://doc.rust-lang.org/std/option/enum.Option.html#method.is_some +[`Option::is_none`]: https://doc.rust-lang.org/std/option/enum.Option.html#method.is_none +[`Option::as_ref`]: https://doc.rust-lang.org/std/option/enum.Option.html#method.as_ref +[`Result::is_ok`]: https://doc.rust-lang.org/std/result/enum.Result.html#method.is_ok +[`Result::is_err`]: https://doc.rust-lang.org/std/result/enum.Result.html#method.is_err +[`Result::as_ref`]: https://doc.rust-lang.org/std/result/enum.Result.html#method.as_ref +[`Ordering::reverse`]: https://doc.rust-lang.org/std/cmp/enum.Ordering.html#method.reverse +[`Ordering::then`]: https://doc.rust-lang.org/std/cmp/enum.Ordering.html#method.then +[`slice::as_ptr_range`]: https://doc.rust-lang.org/std/primitive.slice.html#method.as_ptr_range +[`slice::as_mut_ptr_range`]: https://doc.rust-lang.org/std/primitive.slice.html#method.as_mut_ptr_range +[`VecDeque::make_contiguous`]: https://doc.rust-lang.org/std/collections/struct.VecDeque.html#method.make_contiguous +[`future::pending`]: https://doc.rust-lang.org/std/future/fn.pending.html +[`future::ready`]: https://doc.rust-lang.org/std/future/fn.ready.html + + Version 1.47.0 (2020-10-08) ========================== @@ -90,6 +230,7 @@ Compatibility Notes Internal Only -------- + - [Improved default settings for bootstrapping in `x.py`.][73964] You can read details about this change in the ["Changes to `x.py` defaults"](https://blog.rust-lang.org/inside-rust/2020/08/30/changes-to-x-py-defaults.html) post on the Inside Rust blog. [1.47.0-cfg]: https://docs.microsoft.com/en-us/windows/win32/secbp/control-flow-guard diff --git a/compiler/rustc_arena/src/lib.rs b/compiler/rustc_arena/src/lib.rs index b76e1e7ce65..f468bad635a 100644 --- a/compiler/rustc_arena/src/lib.rs +++ b/compiler/rustc_arena/src/lib.rs @@ -11,9 +11,13 @@ html_root_url = "https://doc.rust-lang.org/nightly/nightly-rustc/", test(no_crate_inject, attr(deny(warnings))) )] +#![feature(array_value_iter_slice)] #![feature(dropck_eyepatch)] #![feature(new_uninit)] #![feature(maybe_uninit_slice)] +#![feature(array_value_iter)] +#![feature(min_const_generics)] +#![feature(min_specialization)] #![cfg_attr(test, feature(test))] use smallvec::SmallVec; @@ -114,6 +118,72 @@ impl Default for TypedArena { } } +trait IterExt { + fn alloc_from_iter(self, arena: &TypedArena) -> &mut [T]; +} + +impl IterExt for I +where + I: IntoIterator, +{ + #[inline] + default fn alloc_from_iter(self, arena: &TypedArena) -> &mut [T] { + let vec: SmallVec<[_; 8]> = self.into_iter().collect(); + vec.alloc_from_iter(arena) + } +} + +impl IterExt for std::array::IntoIter { + #[inline] + fn alloc_from_iter(self, arena: &TypedArena) -> &mut [T] { + let len = self.len(); + if len == 0 { + return &mut []; + } + // Move the content to the arena by copying and then forgetting it + unsafe { + let start_ptr = arena.alloc_raw_slice(len); + self.as_slice().as_ptr().copy_to_nonoverlapping(start_ptr, len); + mem::forget(self); + slice::from_raw_parts_mut(start_ptr, len) + } + } +} + +impl IterExt for Vec { + #[inline] + fn alloc_from_iter(mut self, arena: &TypedArena) -> &mut [T] { + let len = self.len(); + if len == 0 { + return &mut []; + } + // Move the content to the arena by copying and then forgetting it + unsafe { + let start_ptr = arena.alloc_raw_slice(len); + self.as_ptr().copy_to_nonoverlapping(start_ptr, len); + self.set_len(0); + slice::from_raw_parts_mut(start_ptr, len) + } + } +} + +impl IterExt for SmallVec { + #[inline] + fn alloc_from_iter(mut self, arena: &TypedArena) -> &mut [A::Item] { + let len = self.len(); + if len == 0 { + return &mut []; + } + // Move the content to the arena by copying and then forgetting it + unsafe { + let start_ptr = arena.alloc_raw_slice(len); + self.as_ptr().copy_to_nonoverlapping(start_ptr, len); + self.set_len(0); + slice::from_raw_parts_mut(start_ptr, len) + } + } +} + impl TypedArena { /// Allocates an object in the `TypedArena`, returning a reference to it. #[inline] @@ -191,19 +261,7 @@ impl TypedArena { #[inline] pub fn alloc_from_iter>(&self, iter: I) -> &mut [T] { assert!(mem::size_of::() != 0); - let mut vec: SmallVec<[_; 8]> = iter.into_iter().collect(); - if vec.is_empty() { - return &mut []; - } - // Move the content to the arena by copying it and then forgetting - // the content of the SmallVec - unsafe { - let len = vec.len(); - let start_ptr = self.alloc_raw_slice(len); - vec.as_ptr().copy_to_nonoverlapping(start_ptr, len); - vec.set_len(0); - slice::from_raw_parts_mut(start_ptr, len) - } + iter.alloc_from_iter(self) } /// Grows the arena. diff --git a/compiler/rustc_ast/src/ast.rs b/compiler/rustc_ast/src/ast.rs index f13d67b9c15..328086af183 100644 --- a/compiler/rustc_ast/src/ast.rs +++ b/compiler/rustc_ast/src/ast.rs @@ -1061,7 +1061,7 @@ pub struct Expr { // `Expr` is used a lot. Make sure it doesn't unintentionally get bigger. #[cfg(target_arch = "x86_64")] -rustc_data_structures::static_assert_size!(Expr, 112); +rustc_data_structures::static_assert_size!(Expr, 120); impl Expr { /// Returns `true` if this expression would be valid somewhere that expects a value; @@ -1192,6 +1192,7 @@ impl Expr { ExprKind::Field(..) => ExprPrecedence::Field, ExprKind::Index(..) => ExprPrecedence::Index, ExprKind::Range(..) => ExprPrecedence::Range, + ExprKind::Underscore => ExprPrecedence::Path, ExprKind::Path(..) => ExprPrecedence::Path, ExprKind::AddrOf(..) => ExprPrecedence::AddrOf, ExprKind::Break(..) => ExprPrecedence::Break, @@ -1218,6 +1219,16 @@ pub enum RangeLimits { Closed, } +#[derive(Clone, Encodable, Decodable, Debug)] +pub enum StructRest { + /// `..x`. + Base(P), + /// `..`. + Rest(Span), + /// No trailing `..` or expression. + None, +} + #[derive(Clone, Encodable, Decodable, Debug)] pub enum ExprKind { /// A `box x` expression. @@ -1312,8 +1323,10 @@ pub enum ExprKind { Field(P, Ident), /// An indexing operation (e.g., `foo[2]`). Index(P, P), - /// A range (e.g., `1..2`, `1..`, `..2`, `1..=2`, `..=2`). + /// A range (e.g., `1..2`, `1..`, `..2`, `1..=2`, `..=2`; and `..` in destructuring assingment). Range(Option>, Option>, RangeLimits), + /// An underscore, used in destructuring assignment to ignore a value. + Underscore, /// Variable reference, possibly containing `::` and/or type /// parameters (e.g., `foo::bar::`). @@ -1340,9 +1353,8 @@ pub enum ExprKind { /// A struct literal expression. /// - /// E.g., `Foo {x: 1, y: 2}`, or `Foo {x: 1, .. base}`, - /// where `base` is the `Option`. - Struct(Path, Vec, Option>), + /// E.g., `Foo {x: 1, y: 2}`, or `Foo {x: 1, .. rest}`. + Struct(Path, Vec, StructRest), /// An array literal constructed from one repeated element. /// @@ -2439,13 +2451,12 @@ pub struct Attribute { /// or the construct this attribute is contained within (inner). pub style: AttrStyle, pub span: Span, - pub tokens: Option, } #[derive(Clone, Encodable, Decodable, Debug)] pub enum AttrKind { /// A normal attribute. - Normal(AttrItem), + Normal(AttrItem, Option), /// A doc comment (e.g. `/// ...`, `//! ...`, `/** ... */`, `/*! ... */`). /// Doc attributes (e.g. `#[doc="..."]`) are represented with the `Normal` diff --git a/compiler/rustc_ast/src/attr/mod.rs b/compiler/rustc_ast/src/attr/mod.rs index ec87a88f4ab..2ff65737444 100644 --- a/compiler/rustc_ast/src/attr/mod.rs +++ b/compiler/rustc_ast/src/attr/mod.rs @@ -8,7 +8,7 @@ use crate::ast::{Path, PathSegment}; use crate::mut_visit::visit_clobber; use crate::ptr::P; use crate::token::{self, CommentKind, Token}; -use crate::tokenstream::{DelimSpan, TokenStream, TokenTree, TreeAndSpacing}; +use crate::tokenstream::{DelimSpan, LazyTokenStream, TokenStream, TokenTree, TreeAndSpacing}; use rustc_index::bit_set::GrowableBitSet; use rustc_span::source_map::{BytePos, Spanned}; @@ -66,7 +66,7 @@ impl NestedMetaItem { self.meta_item().and_then(|meta_item| meta_item.ident()) } pub fn name_or_empty(&self) -> Symbol { - self.ident().unwrap_or(Ident::invalid()).name + self.ident().unwrap_or_else(Ident::invalid).name } /// Gets the string value if `self` is a `MetaItem` and the `MetaItem` is a @@ -120,7 +120,7 @@ impl NestedMetaItem { impl Attribute { pub fn has_name(&self, name: Symbol) -> bool { match self.kind { - AttrKind::Normal(ref item) => item.path == name, + AttrKind::Normal(ref item, _) => item.path == name, AttrKind::DocComment(..) => false, } } @@ -128,7 +128,7 @@ impl Attribute { /// For a single-segment attribute, returns its name; otherwise, returns `None`. pub fn ident(&self) -> Option { match self.kind { - AttrKind::Normal(ref item) => { + AttrKind::Normal(ref item, _) => { if item.path.segments.len() == 1 { Some(item.path.segments[0].ident) } else { @@ -139,19 +139,19 @@ impl Attribute { } } pub fn name_or_empty(&self) -> Symbol { - self.ident().unwrap_or(Ident::invalid()).name + self.ident().unwrap_or_else(Ident::invalid).name } pub fn value_str(&self) -> Option { match self.kind { - AttrKind::Normal(ref item) => item.meta(self.span).and_then(|meta| meta.value_str()), + AttrKind::Normal(ref item, _) => item.meta(self.span).and_then(|meta| meta.value_str()), AttrKind::DocComment(..) => None, } } pub fn meta_item_list(&self) -> Option> { match self.kind { - AttrKind::Normal(ref item) => match item.meta(self.span) { + AttrKind::Normal(ref item, _) => match item.meta(self.span) { Some(MetaItem { kind: MetaItemKind::List(list), .. }) => Some(list), _ => None, }, @@ -160,7 +160,7 @@ impl Attribute { } pub fn is_word(&self) -> bool { - if let AttrKind::Normal(item) = &self.kind { + if let AttrKind::Normal(item, _) = &self.kind { matches!(item.args, MacArgs::Empty) } else { false @@ -183,7 +183,7 @@ impl MetaItem { if self.path.segments.len() == 1 { Some(self.path.segments[0].ident) } else { None } } pub fn name_or_empty(&self) -> Symbol { - self.ident().unwrap_or(Ident::invalid()).name + self.ident().unwrap_or_else(Ident::invalid).name } // Example: @@ -246,7 +246,7 @@ impl AttrItem { impl Attribute { pub fn is_doc_comment(&self) -> bool { match self.kind { - AttrKind::Normal(_) => false, + AttrKind::Normal(..) => false, AttrKind::DocComment(..) => true, } } @@ -254,7 +254,7 @@ impl Attribute { pub fn doc_str(&self) -> Option { match self.kind { AttrKind::DocComment(.., data) => Some(data), - AttrKind::Normal(ref item) if item.path == sym::doc => { + AttrKind::Normal(ref item, _) if item.path == sym::doc => { item.meta(self.span).and_then(|meta| meta.value_str()) } _ => None, @@ -263,14 +263,14 @@ impl Attribute { pub fn get_normal_item(&self) -> &AttrItem { match self.kind { - AttrKind::Normal(ref item) => item, + AttrKind::Normal(ref item, _) => item, AttrKind::DocComment(..) => panic!("unexpected doc comment"), } } pub fn unwrap_normal_item(self) -> AttrItem { match self.kind { - AttrKind::Normal(item) => item, + AttrKind::Normal(item, _) => item, AttrKind::DocComment(..) => panic!("unexpected doc comment"), } } @@ -278,10 +278,22 @@ impl Attribute { /// Extracts the MetaItem from inside this Attribute. pub fn meta(&self) -> Option { match self.kind { - AttrKind::Normal(ref item) => item.meta(self.span), + AttrKind::Normal(ref item, _) => item.meta(self.span), AttrKind::DocComment(..) => None, } } + + pub fn tokens(&self) -> TokenStream { + match self.kind { + AttrKind::Normal(_, ref tokens) => tokens + .as_ref() + .unwrap_or_else(|| panic!("attribute is missing tokens: {:?}", self)) + .create_token_stream(), + AttrKind::DocComment(comment_kind, data) => TokenStream::from(TokenTree::Token( + Token::new(token::DocComment(comment_kind, self.style, data), self.span), + )), + } + } } /* Constructors */ @@ -321,11 +333,16 @@ crate fn mk_attr_id() -> AttrId { } pub fn mk_attr(style: AttrStyle, path: Path, args: MacArgs, span: Span) -> Attribute { - mk_attr_from_item(style, AttrItem { path, args, tokens: None }, span) + mk_attr_from_item(AttrItem { path, args, tokens: None }, None, style, span) } -pub fn mk_attr_from_item(style: AttrStyle, item: AttrItem, span: Span) -> Attribute { - Attribute { kind: AttrKind::Normal(item), id: mk_attr_id(), style, span, tokens: None } +pub fn mk_attr_from_item( + item: AttrItem, + tokens: Option, + style: AttrStyle, + span: Span, +) -> Attribute { + Attribute { kind: AttrKind::Normal(item, tokens), id: mk_attr_id(), style, span } } /// Returns an inner attribute with the given value and span. @@ -344,13 +361,7 @@ pub fn mk_doc_comment( data: Symbol, span: Span, ) -> Attribute { - Attribute { - kind: AttrKind::DocComment(comment_kind, data), - id: mk_attr_id(), - style, - span, - tokens: None, - } + Attribute { kind: AttrKind::DocComment(comment_kind, data), id: mk_attr_id(), style, span } } pub fn list_contains_name(items: &[NestedMetaItem], name: Symbol) -> bool { diff --git a/compiler/rustc_ast/src/mut_visit.rs b/compiler/rustc_ast/src/mut_visit.rs index 517717eebd9..ddae0ab03e4 100644 --- a/compiler/rustc_ast/src/mut_visit.rs +++ b/compiler/rustc_ast/src/mut_visit.rs @@ -210,11 +210,8 @@ pub trait MutVisitor: Sized { noop_visit_local(l, self); } - fn visit_mac(&mut self, _mac: &mut MacCall) { - panic!("visit_mac disabled by default"); - // N.B., see note about macros above. If you really want a visitor that - // works on macros, use this definition in your trait impl: - // mut_visit::noop_visit_mac(_mac, self); + fn visit_mac_call(&mut self, mac: &mut MacCall) { + noop_visit_mac(mac, self); } fn visit_macro_def(&mut self, def: &mut MacroDef) { @@ -494,7 +491,7 @@ pub fn noop_visit_ty(ty: &mut P, vis: &mut T) { vis.visit_id(id); visit_vec(bounds, |bound| vis.visit_param_bound(bound)); } - TyKind::MacCall(mac) => vis.visit_mac(mac), + TyKind::MacCall(mac) => vis.visit_mac_call(mac), } vis.visit_span(span); visit_lazy_tts(tokens, vis); @@ -589,17 +586,17 @@ pub fn noop_visit_local(local: &mut P, vis: &mut T) { } pub fn noop_visit_attribute(attr: &mut Attribute, vis: &mut T) { - let Attribute { kind, id: _, style: _, span, tokens } = attr; + let Attribute { kind, id: _, style: _, span } = attr; match kind { - AttrKind::Normal(AttrItem { path, args, tokens }) => { + AttrKind::Normal(AttrItem { path, args, tokens }, attr_tokens) => { vis.visit_path(path); visit_mac_args(args, vis); visit_lazy_tts(tokens, vis); + visit_lazy_tts(attr_tokens, vis); } AttrKind::DocComment(..) => {} } vis.visit_span(span); - visit_lazy_tts(tokens, vis); } pub fn noop_visit_mac(mac: &mut MacCall, vis: &mut T) { @@ -962,7 +959,7 @@ pub fn noop_visit_item_kind(kind: &mut ItemKind, vis: &mut T) { vis.visit_generics(generics); visit_bounds(bounds, vis); } - ItemKind::MacCall(m) => vis.visit_mac(m), + ItemKind::MacCall(m) => vis.visit_mac_call(m), ItemKind::MacroDef(def) => vis.visit_macro_def(def), } } @@ -991,7 +988,7 @@ pub fn noop_flat_map_assoc_item( visit_bounds(bounds, visitor); visit_opt(ty, |ty| visitor.visit_ty(ty)); } - AssocItemKind::MacCall(mac) => visitor.visit_mac(mac), + AssocItemKind::MacCall(mac) => visitor.visit_mac_call(mac), } visitor.visit_span(span); visit_lazy_tts(tokens, visitor); @@ -1081,7 +1078,7 @@ pub fn noop_flat_map_foreign_item( visit_bounds(bounds, visitor); visit_opt(ty, |ty| visitor.visit_ty(ty)); } - ForeignItemKind::MacCall(mac) => visitor.visit_mac(mac), + ForeignItemKind::MacCall(mac) => visitor.visit_mac_call(mac), } visitor.visit_span(span); visit_lazy_tts(tokens, visitor); @@ -1121,7 +1118,7 @@ pub fn noop_visit_pat(pat: &mut P, vis: &mut T) { visit_vec(elems, |elem| vis.visit_pat(elem)) } PatKind::Paren(inner) => vis.visit_pat(inner), - PatKind::MacCall(mac) => vis.visit_mac(mac), + PatKind::MacCall(mac) => vis.visit_mac_call(mac), } vis.visit_span(span); visit_lazy_tts(tokens, vis); @@ -1235,6 +1232,7 @@ pub fn noop_visit_expr( visit_opt(e1, |e1| vis.visit_expr(e1)); visit_opt(e2, |e2| vis.visit_expr(e2)); } + ExprKind::Underscore => {} ExprKind::Path(qself, path) => { vis.visit_qself(qself); vis.visit_path(path); @@ -1287,11 +1285,15 @@ pub fn noop_visit_expr( } visit_vec(inputs, |(_c, expr)| vis.visit_expr(expr)); } - ExprKind::MacCall(mac) => vis.visit_mac(mac), + ExprKind::MacCall(mac) => vis.visit_mac_call(mac), ExprKind::Struct(path, fields, expr) => { vis.visit_path(path); fields.flat_map_in_place(|field| vis.flat_map_field(field)); - visit_opt(expr, |expr| vis.visit_expr(expr)); + match expr { + StructRest::Base(expr) => vis.visit_expr(expr), + StructRest::Rest(_span) => {} + StructRest::None => {} + } } ExprKind::Paren(expr) => { vis.visit_expr(expr); @@ -1350,7 +1352,7 @@ pub fn noop_flat_map_stmt_kind( StmtKind::Empty => smallvec![StmtKind::Empty], StmtKind::MacCall(mut mac) => { let MacCallStmt { mac: mac_, style: _, attrs } = mac.deref_mut(); - vis.visit_mac(mac_); + vis.visit_mac_call(mac_); visit_thin_attrs(attrs, vis); smallvec![StmtKind::MacCall(mac)] } diff --git a/compiler/rustc_ast/src/tokenstream.rs b/compiler/rustc_ast/src/tokenstream.rs index 1e7001c2b23..fe67b905bf3 100644 --- a/compiler/rustc_ast/src/tokenstream.rs +++ b/compiler/rustc_ast/src/tokenstream.rs @@ -221,7 +221,7 @@ impl TokenStream { } } if let Some((pos, comma, sp)) = suggestion { - let mut new_stream = vec![]; + let mut new_stream = Vec::with_capacity(self.0.len() + 1); let parts = self.0.split_at(pos + 1); new_stream.extend_from_slice(parts.0); new_stream.push(comma); diff --git a/compiler/rustc_ast/src/visit.rs b/compiler/rustc_ast/src/visit.rs index 2ab6667ac3c..560064182e1 100644 --- a/compiler/rustc_ast/src/visit.rs +++ b/compiler/rustc_ast/src/visit.rs @@ -176,13 +176,8 @@ pub trait Visitor<'ast>: Sized { fn visit_lifetime(&mut self, lifetime: &'ast Lifetime) { walk_lifetime(self, lifetime) } - fn visit_mac(&mut self, _mac: &'ast MacCall) { - panic!("visit_mac disabled by default"); - // N.B., see note about macros above. - // if you really want a visitor that - // works on macros, use this - // definition in your trait impl: - // visit::walk_mac(self, _mac) + fn visit_mac_call(&mut self, mac: &'ast MacCall) { + walk_mac(self, mac) } fn visit_mac_def(&mut self, _mac: &'ast MacroDef, _id: NodeId) { // Nothing to do @@ -346,7 +341,7 @@ pub fn walk_item<'a, V: Visitor<'a>>(visitor: &mut V, item: &'a Item) { visitor.visit_generics(generics); walk_list!(visitor, visit_param_bound, bounds); } - ItemKind::MacCall(ref mac) => visitor.visit_mac(mac), + ItemKind::MacCall(ref mac) => visitor.visit_mac_call(mac), ItemKind::MacroDef(ref ts) => visitor.visit_mac_def(ts, item.id), } walk_list!(visitor, visit_attribute, &item.attrs); @@ -414,7 +409,7 @@ pub fn walk_ty<'a, V: Visitor<'a>>(visitor: &mut V, typ: &'a Ty) { } TyKind::Typeof(ref expression) => visitor.visit_anon_const(expression), TyKind::Infer | TyKind::ImplicitSelf | TyKind::Err => {} - TyKind::MacCall(ref mac) => visitor.visit_mac(mac), + TyKind::MacCall(ref mac) => visitor.visit_mac_call(mac), TyKind::Never | TyKind::CVarArgs => {} } } @@ -532,7 +527,7 @@ pub fn walk_pat<'a, V: Visitor<'a>>(visitor: &mut V, pattern: &'a Pat) { PatKind::Tuple(ref elems) | PatKind::Slice(ref elems) | PatKind::Or(ref elems) => { walk_list!(visitor, visit_pat, elems); } - PatKind::MacCall(ref mac) => visitor.visit_mac(mac), + PatKind::MacCall(ref mac) => visitor.visit_mac_call(mac), } } @@ -557,7 +552,7 @@ pub fn walk_foreign_item<'a, V: Visitor<'a>>(visitor: &mut V, item: &'a ForeignI walk_list!(visitor, visit_ty, ty); } ForeignItemKind::MacCall(mac) => { - visitor.visit_mac(mac); + visitor.visit_mac_call(mac); } } } @@ -662,7 +657,7 @@ pub fn walk_assoc_item<'a, V: Visitor<'a>>(visitor: &mut V, item: &'a AssocItem, walk_list!(visitor, visit_ty, ty); } AssocItemKind::MacCall(mac) => { - visitor.visit_mac(mac); + visitor.visit_mac_call(mac); } } } @@ -692,7 +687,7 @@ pub fn walk_stmt<'a, V: Visitor<'a>>(visitor: &mut V, statement: &'a Stmt) { StmtKind::Empty => {} StmtKind::MacCall(ref mac) => { let MacCallStmt { ref mac, style: _, ref attrs } = **mac; - visitor.visit_mac(mac); + visitor.visit_mac_call(mac); for attr in attrs.iter() { visitor.visit_attribute(attr); } @@ -724,7 +719,11 @@ pub fn walk_expr<'a, V: Visitor<'a>>(visitor: &mut V, expression: &'a Expr) { ExprKind::Struct(ref path, ref fields, ref optional_base) => { visitor.visit_path(path, expression.id); walk_list!(visitor, visit_field, fields); - walk_list!(visitor, visit_expr, optional_base); + match optional_base { + StructRest::Base(expr) => visitor.visit_expr(expr), + StructRest::Rest(_span) => {} + StructRest::None => {} + } } ExprKind::Tup(ref subexpressions) => { walk_list!(visitor, visit_expr, subexpressions); @@ -807,6 +806,7 @@ pub fn walk_expr<'a, V: Visitor<'a>>(visitor: &mut V, expression: &'a Expr) { walk_list!(visitor, visit_expr, start); walk_list!(visitor, visit_expr, end); } + ExprKind::Underscore => {} ExprKind::Path(ref maybe_qself, ref path) => { if let Some(ref qself) = *maybe_qself { visitor.visit_ty(&qself.ty); @@ -823,7 +823,7 @@ pub fn walk_expr<'a, V: Visitor<'a>>(visitor: &mut V, expression: &'a Expr) { ExprKind::Ret(ref optional_expression) => { walk_list!(visitor, visit_expr, optional_expression); } - ExprKind::MacCall(ref mac) => visitor.visit_mac(mac), + ExprKind::MacCall(ref mac) => visitor.visit_mac_call(mac), ExprKind::Paren(ref subexpression) => visitor.visit_expr(subexpression), ExprKind::InlineAsm(ref ia) => { for (op, _) in &ia.operands { @@ -886,7 +886,7 @@ pub fn walk_vis<'a, V: Visitor<'a>>(visitor: &mut V, vis: &'a Visibility) { pub fn walk_attribute<'a, V: Visitor<'a>>(visitor: &mut V, attr: &'a Attribute) { match attr.kind { - AttrKind::Normal(ref item) => walk_mac_args(visitor, &item.args), + AttrKind::Normal(ref item, ref _tokens) => walk_mac_args(visitor, &item.args), AttrKind::DocComment(..) => {} } } diff --git a/compiler/rustc_ast_lowering/src/expr.rs b/compiler/rustc_ast_lowering/src/expr.rs index 1f2aba2b27e..f83fc29577b 100644 --- a/compiler/rustc_ast_lowering/src/expr.rs +++ b/compiler/rustc_ast_lowering/src/expr.rs @@ -164,6 +164,16 @@ impl<'hir> LoweringContext<'_, 'hir> { ExprKind::Range(ref e1, ref e2, lims) => { self.lower_expr_range(e.span, e1.as_deref(), e2.as_deref(), lims) } + ExprKind::Underscore => { + self.sess + .struct_span_err( + e.span, + "in expressions, `_` can only be used on the left-hand side of an assignment", + ) + .span_label(e.span, "`_` not allowed here") + .emit(); + hir::ExprKind::Err + } ExprKind::Path(ref qself, ref path) => { let qpath = self.lower_qpath( e.id, @@ -187,8 +197,18 @@ impl<'hir> LoweringContext<'_, 'hir> { } ExprKind::InlineAsm(ref asm) => self.lower_expr_asm(e.span, asm), ExprKind::LlvmInlineAsm(ref asm) => self.lower_expr_llvm_asm(asm), - ExprKind::Struct(ref path, ref fields, ref maybe_expr) => { - let maybe_expr = maybe_expr.as_ref().map(|x| self.lower_expr(x)); + ExprKind::Struct(ref path, ref fields, ref rest) => { + let rest = match rest { + StructRest::Base(e) => Some(self.lower_expr(e)), + StructRest::Rest(sp) => { + self.sess + .struct_span_err(*sp, "base expression required after `..`") + .span_label(*sp, "add a base expression here") + .emit(); + Some(&*self.arena.alloc(self.expr_err(*sp))) + } + StructRest::None => None, + }; hir::ExprKind::Struct( self.arena.alloc(self.lower_qpath( e.id, @@ -198,7 +218,7 @@ impl<'hir> LoweringContext<'_, 'hir> { ImplTraitContext::disallowed(), )), self.arena.alloc_from_iter(fields.iter().map(|x| self.lower_field(x))), - maybe_expr, + rest, ) } ExprKind::Yield(ref opt_expr) => self.lower_expr_yield(e.span, opt_expr.as_deref()), @@ -851,20 +871,25 @@ impl<'hir> LoweringContext<'_, 'hir> { whole_span: Span, ) -> hir::ExprKind<'hir> { // Return early in case of an ordinary assignment. - fn is_ordinary(lhs: &Expr) -> bool { + fn is_ordinary(lower_ctx: &mut LoweringContext<'_, '_>, lhs: &Expr) -> bool { match &lhs.kind { - ExprKind::Tup(..) => false, + ExprKind::Array(..) + | ExprKind::Struct(..) + | ExprKind::Tup(..) + | ExprKind::Underscore => false, + // Check for tuple struct constructor. + ExprKind::Call(callee, ..) => lower_ctx.extract_tuple_struct_path(callee).is_none(), ExprKind::Paren(e) => { match e.kind { // We special-case `(..)` for consistency with patterns. ExprKind::Range(None, None, RangeLimits::HalfOpen) => false, - _ => is_ordinary(e), + _ => is_ordinary(lower_ctx, e), } } _ => true, } } - if is_ordinary(lhs) { + if is_ordinary(self, lhs) { return hir::ExprKind::Assign(self.lower_expr(lhs), self.lower_expr(rhs), eq_sign_span); } if !self.sess.features_untracked().destructuring_assignment { @@ -902,6 +927,26 @@ impl<'hir> LoweringContext<'_, 'hir> { hir::ExprKind::Block(&self.block_all(whole_span, stmts, None), None) } + /// If the given expression is a path to a tuple struct, returns that path. + /// It is not a complete check, but just tries to reject most paths early + /// if they are not tuple structs. + /// Type checking will take care of the full validation later. + fn extract_tuple_struct_path<'a>(&mut self, expr: &'a Expr) -> Option<&'a Path> { + // For tuple struct destructuring, it must be a non-qualified path (like in patterns). + if let ExprKind::Path(None, path) = &expr.kind { + // Does the path resolves to something disallowed in a tuple struct/variant pattern? + if let Some(partial_res) = self.resolver.get_partial_res(expr.id) { + if partial_res.unresolved_segments() == 0 + && !partial_res.base_res().expected_in_tuple_struct_pat() + { + return None; + } + } + return Some(path); + } + None + } + /// Convert the LHS of a destructuring assignment to a pattern. /// Each sub-assignment is recorded in `assignments`. fn destructure_assign( @@ -911,6 +956,90 @@ impl<'hir> LoweringContext<'_, 'hir> { assignments: &mut Vec>, ) -> &'hir hir::Pat<'hir> { match &lhs.kind { + // Underscore pattern. + ExprKind::Underscore => { + return self.pat_without_dbm(lhs.span, hir::PatKind::Wild); + } + // Slice patterns. + ExprKind::Array(elements) => { + let (pats, rest) = + self.destructure_sequence(elements, "slice", eq_sign_span, assignments); + let slice_pat = if let Some((i, span)) = rest { + let (before, after) = pats.split_at(i); + hir::PatKind::Slice( + before, + Some(self.pat_without_dbm(span, hir::PatKind::Wild)), + after, + ) + } else { + hir::PatKind::Slice(pats, None, &[]) + }; + return self.pat_without_dbm(lhs.span, slice_pat); + } + // Tuple structs. + ExprKind::Call(callee, args) => { + if let Some(path) = self.extract_tuple_struct_path(callee) { + let (pats, rest) = self.destructure_sequence( + args, + "tuple struct or variant", + eq_sign_span, + assignments, + ); + let qpath = self.lower_qpath( + callee.id, + &None, + path, + ParamMode::Optional, + ImplTraitContext::disallowed(), + ); + // Destructure like a tuple struct. + let tuple_struct_pat = + hir::PatKind::TupleStruct(qpath, pats, rest.map(|r| r.0)); + return self.pat_without_dbm(lhs.span, tuple_struct_pat); + } + } + // Structs. + ExprKind::Struct(path, fields, rest) => { + let field_pats = self.arena.alloc_from_iter(fields.iter().map(|f| { + let pat = self.destructure_assign(&f.expr, eq_sign_span, assignments); + hir::FieldPat { + hir_id: self.next_id(), + ident: f.ident, + pat, + is_shorthand: f.is_shorthand, + span: f.span, + } + })); + let qpath = self.lower_qpath( + lhs.id, + &None, + path, + ParamMode::Optional, + ImplTraitContext::disallowed(), + ); + let fields_omitted = match rest { + StructRest::Base(e) => { + self.sess + .struct_span_err( + e.span, + "functional record updates are not allowed in destructuring \ + assignments", + ) + .span_suggestion( + e.span, + "consider removing the trailing pattern", + String::new(), + rustc_errors::Applicability::MachineApplicable, + ) + .emit(); + true + } + StructRest::Rest(_) => true, + StructRest::None => false, + }; + let struct_pat = hir::PatKind::Struct(qpath, field_pats, fields_omitted); + return self.pat_without_dbm(lhs.span, struct_pat); + } // Tuples. ExprKind::Tup(elements) => { let (pats, rest) = @@ -1255,14 +1384,18 @@ impl<'hir> LoweringContext<'_, 'hir> { let mut used_input_regs = FxHashMap::default(); let mut used_output_regs = FxHashMap::default(); + let mut required_features: Vec<&str> = vec![]; for (idx, op) in operands.iter().enumerate() { let op_sp = asm.operands[idx].1; if let Some(reg) = op.reg() { + // Make sure we don't accidentally carry features from the + // previous iteration. + required_features.clear(); + // Validate register classes against currently enabled target // features. We check that at least one type is available for // the current target. let reg_class = reg.reg_class(); - let mut required_features: Vec<&str> = vec![]; for &(_, feature) in reg_class.supported_types(asm_arch) { if let Some(feature) = feature { if self.sess.target_features.contains(&Symbol::intern(feature)) { diff --git a/compiler/rustc_ast_lowering/src/item.rs b/compiler/rustc_ast_lowering/src/item.rs index 617cacee0e7..d353bc19f7a 100644 --- a/compiler/rustc_ast_lowering/src/item.rs +++ b/compiler/rustc_ast_lowering/src/item.rs @@ -1096,8 +1096,18 @@ impl<'hir> LoweringContext<'_, 'hir> { // Check if this is a binding pattern, if so, we can optimize and avoid adding a // `let = __argN;` statement. In this case, we do not rename the parameter. let (ident, is_simple_parameter) = match parameter.pat.kind { - hir::PatKind::Binding(hir::BindingAnnotation::Unannotated, _, ident, _) => { - (ident, true) + hir::PatKind::Binding( + hir::BindingAnnotation::Unannotated | hir::BindingAnnotation::Mutable, + _, + ident, + _, + ) => (ident, true), + // For `ref mut` or wildcard arguments, we can't reuse the binding, but + // we can keep the same name for the parameter. + // This lets rustdoc render it correctly in documentation. + hir::PatKind::Binding(_, _, ident, _) => (ident, false), + hir::PatKind::Wild => { + (Ident::with_dummy_span(rustc_span::symbol::kw::Underscore), false) } _ => { // Replace the ident for bindings that aren't simple. diff --git a/compiler/rustc_ast_lowering/src/lib.rs b/compiler/rustc_ast_lowering/src/lib.rs index af2f96d5e62..d93655e5905 100644 --- a/compiler/rustc_ast_lowering/src/lib.rs +++ b/compiler/rustc_ast_lowering/src/lib.rs @@ -53,7 +53,6 @@ use rustc_hir::definitions::{DefKey, DefPathData, Definitions}; use rustc_hir::intravisit; use rustc_hir::{ConstArg, GenericArg, ParamName}; use rustc_index::vec::{Idx, IndexVec}; -use rustc_session::config::nightly_options; use rustc_session::lint::{builtin::BARE_TRAIT_OBJECTS, BuiltinLintDiagnostics, LintBuffer}; use rustc_session::parse::ParseSess; use rustc_session::Session; @@ -966,17 +965,20 @@ impl<'a, 'hir> LoweringContext<'a, 'hir> { // Note that we explicitly do not walk the path. Since we don't really // lower attributes (we use the AST version) there is nowhere to keep // the `HirId`s. We don't actually need HIR version of attributes anyway. + // Tokens are also not needed after macro expansion and parsing. let kind = match attr.kind { - AttrKind::Normal(ref item) => AttrKind::Normal(AttrItem { - path: item.path.clone(), - args: self.lower_mac_args(&item.args), - tokens: None, - }), + AttrKind::Normal(ref item, _) => AttrKind::Normal( + AttrItem { + path: item.path.clone(), + args: self.lower_mac_args(&item.args), + tokens: None, + }, + None, + ), AttrKind::DocComment(comment_kind, data) => AttrKind::DocComment(comment_kind, data), }; - // Tokens aren't needed after macro expansion and parsing - Attribute { kind, id: attr.id, style: attr.style, span: attr.span, tokens: None } + Attribute { kind, id: attr.id, style: attr.style, span: attr.span } } fn lower_mac_args(&mut self, args: &MacArgs) -> MacArgs { @@ -1395,8 +1397,7 @@ impl<'a, 'hir> LoweringContext<'a, 'hir> { "`impl Trait` not allowed outside of {}", allowed_in, ); - if pos == ImplTraitPosition::Binding && nightly_options::is_nightly_build() - { + if pos == ImplTraitPosition::Binding && self.sess.is_nightly_build() { err.help( "add `#![feature(impl_trait_in_bindings)]` to the crate \ attributes to enable", @@ -2008,17 +2009,17 @@ impl<'a, 'hir> LoweringContext<'a, 'hir> { // // For the "output" lifetime parameters, we just want to // generate `'_`. - let mut generic_args: Vec<_> = lifetime_params[..input_lifetimes_count] - .iter() - .map(|&(span, hir_name)| { + let mut generic_args = Vec::with_capacity(lifetime_params.len()); + generic_args.extend(lifetime_params[..input_lifetimes_count].iter().map( + |&(span, hir_name)| { // Input lifetime like `'a` or `'1`: GenericArg::Lifetime(hir::Lifetime { hir_id: self.next_id(), span, name: hir::LifetimeName::Param(hir_name), }) - }) - .collect(); + }, + )); generic_args.extend(lifetime_params[input_lifetimes_count..].iter().map(|&(span, _)| // Output lifetime like `'_`. GenericArg::Lifetime(hir::Lifetime { @@ -2309,29 +2310,30 @@ impl<'a, 'hir> LoweringContext<'a, 'hir> { } fn lower_block_noalloc(&mut self, b: &Block, targeted_by_break: bool) -> hir::Block<'hir> { - let mut stmts = vec![]; let mut expr: Option<&'hir _> = None; - for (index, stmt) in b.stmts.iter().enumerate() { - if index == b.stmts.len() - 1 { - if let StmtKind::Expr(ref e) = stmt.kind { - expr = Some(self.lower_expr(e)); - } else { - stmts.extend(self.lower_stmt(stmt)); - } - } else { - stmts.extend(self.lower_stmt(stmt)); - } - } + let stmts = self.arena.alloc_from_iter( + b.stmts + .iter() + .enumerate() + .filter_map(|(index, stmt)| { + if index == b.stmts.len() - 1 { + if let StmtKind::Expr(ref e) = stmt.kind { + expr = Some(self.lower_expr(e)); + None + } else { + Some(self.lower_stmt(stmt)) + } + } else { + Some(self.lower_stmt(stmt)) + } + }) + .flatten(), + ); + let rules = self.lower_block_check_mode(&b.rules); + let hir_id = self.lower_node_id(b.id); - hir::Block { - hir_id: self.lower_node_id(b.id), - stmts: self.arena.alloc_from_iter(stmts), - expr, - rules: self.lower_block_check_mode(&b.rules), - span: b.span, - targeted_by_break, - } + hir::Block { hir_id, stmts, expr, rules, span: b.span, targeted_by_break } } /// Lowers a block directly to an expression, presuming that it diff --git a/compiler/rustc_ast_passes/src/ast_validation.rs b/compiler/rustc_ast_passes/src/ast_validation.rs index d6585bcc425..08ebcbf381a 100644 --- a/compiler/rustc_ast_passes/src/ast_validation.rs +++ b/compiler/rustc_ast_passes/src/ast_validation.rs @@ -522,7 +522,7 @@ impl<'a> AstValidator<'a> { self.err_handler() .struct_span_err(ident.span, "functions in `extern` blocks cannot have qualifiers") .span_label(self.current_extern_span(), "in this `extern` block") - .span_suggestion( + .span_suggestion_verbose( span.until(ident.span.shrink_to_lo()), "remove the qualifiers", "fn ".to_string(), @@ -796,7 +796,7 @@ impl<'a> Visitor<'a> for AstValidator<'a> { fn visit_expr(&mut self, expr: &'a Expr) { match &expr.kind { - ExprKind::LlvmInlineAsm(..) if !self.session.target.options.allow_asm => { + ExprKind::LlvmInlineAsm(..) if !self.session.target.allow_asm => { struct_span_err!( self.session, expr.span, diff --git a/compiler/rustc_ast_passes/src/feature_gate.rs b/compiler/rustc_ast_passes/src/feature_gate.rs index f2008449767..181783441f3 100644 --- a/compiler/rustc_ast_passes/src/feature_gate.rs +++ b/compiler/rustc_ast_passes/src/feature_gate.rs @@ -630,6 +630,11 @@ pub fn check_crate(krate: &ast::Crate, sess: &Session) { gate_all!(const_trait_impl, "const trait impls are experimental"); gate_all!(half_open_range_patterns, "half-open range patterns are unstable"); gate_all!(inline_const, "inline-const is experimental"); + if sess.parse_sess.span_diagnostic.err_count() == 0 { + // Errors for `destructuring_assignment` can get quite noisy, especially where `_` is + // involved, so we only emit errors where there are no other parsing errors. + gate_all!(destructuring_assignment, "destructuring assignments are unstable"); + } // All uses of `gate_all!` below this point were added in #65742, // and subsequently disabled (with the non-early gating readded). diff --git a/compiler/rustc_ast_passes/src/node_count.rs b/compiler/rustc_ast_passes/src/node_count.rs index 706dca2b7f4..6efc78c8842 100644 --- a/compiler/rustc_ast_passes/src/node_count.rs +++ b/compiler/rustc_ast_passes/src/node_count.rs @@ -114,9 +114,9 @@ impl<'ast> Visitor<'ast> for NodeCounter { self.count += 1; walk_lifetime(self, lifetime) } - fn visit_mac(&mut self, _mac: &MacCall) { + fn visit_mac_call(&mut self, mac: &MacCall) { self.count += 1; - walk_mac(self, _mac) + walk_mac(self, mac) } fn visit_path(&mut self, path: &Path, _id: NodeId) { self.count += 1; diff --git a/compiler/rustc_ast_passes/src/show_span.rs b/compiler/rustc_ast_passes/src/show_span.rs index 053aba86222..6cef26a13e6 100644 --- a/compiler/rustc_ast_passes/src/show_span.rs +++ b/compiler/rustc_ast_passes/src/show_span.rs @@ -54,10 +54,6 @@ impl<'a> Visitor<'a> for ShowSpanVisitor<'a> { } visit::walk_ty(self, t); } - - fn visit_mac(&mut self, mac: &'a ast::MacCall) { - visit::walk_mac(self, mac); - } } pub fn run(span_diagnostic: &rustc_errors::Handler, mode: &str, krate: &ast::Crate) { diff --git a/compiler/rustc_ast_pretty/src/pprust/state.rs b/compiler/rustc_ast_pretty/src/pprust/state.rs index 9fcba902443..fdb129d9e2a 100644 --- a/compiler/rustc_ast_pretty/src/pprust/state.rs +++ b/compiler/rustc_ast_pretty/src/pprust/state.rs @@ -109,7 +109,6 @@ pub fn print_crate<'a>( ann: &'a dyn PpAnn, is_expanded: bool, edition: Edition, - has_injected_crate: bool, ) -> String { let mut s = State { s: pp::mk_printer(), @@ -119,7 +118,7 @@ pub fn print_crate<'a>( insert_extra_parens: true, }; - if is_expanded && has_injected_crate { + if is_expanded && !krate.attrs.iter().any(|attr| attr.has_name(sym::no_core)) { // We need to print `#![no_std]` (and its feature gate) so that // compiling pretty-printed source won't inject libstd again. // However, we don't want these attributes in the AST because @@ -426,7 +425,7 @@ pub trait PrintState<'a>: std::ops::Deref + std::ops::Dere } self.maybe_print_comment(attr.span.lo()); match attr.kind { - ast::AttrKind::Normal(ref item) => { + ast::AttrKind::Normal(ref item, _) => { match attr.style { ast::AttrStyle::Inner => self.word("#!["), ast::AttrStyle::Outer => self.word("#["), @@ -1729,7 +1728,7 @@ impl<'a> State<'a> { &mut self, path: &ast::Path, fields: &[ast::Field], - wth: &Option>, + rest: &ast::StructRest, attrs: &[ast::Attribute], ) { self.print_path(path, true, 0); @@ -1750,22 +1749,21 @@ impl<'a> State<'a> { }, |f| f.span, ); - match *wth { - Some(ref expr) => { + match rest { + ast::StructRest::Base(_) | ast::StructRest::Rest(_) => { self.ibox(INDENT_UNIT); if !fields.is_empty() { self.s.word(","); self.s.space(); } self.s.word(".."); - self.print_expr(expr); - self.end(); - } - _ => { - if !fields.is_empty() { - self.s.word(",") + if let ast::StructRest::Base(ref expr) = *rest { + self.print_expr(expr); } + self.end(); } + ast::StructRest::None if !fields.is_empty() => self.s.word(","), + _ => {} } self.s.word("}"); } @@ -1891,8 +1889,8 @@ impl<'a> State<'a> { ast::ExprKind::Repeat(ref element, ref count) => { self.print_expr_repeat(element, count, attrs); } - ast::ExprKind::Struct(ref path, ref fields, ref wth) => { - self.print_expr_struct(path, &fields[..], wth, attrs); + ast::ExprKind::Struct(ref path, ref fields, ref rest) => { + self.print_expr_struct(path, &fields[..], rest, attrs); } ast::ExprKind::Tup(ref exprs) => { self.print_expr_tup(&exprs[..], attrs); @@ -2069,6 +2067,7 @@ impl<'a> State<'a> { self.print_expr_maybe_paren(e, fake_prec); } } + ast::ExprKind::Underscore => self.s.word("_"), ast::ExprKind::Path(None, ref path) => self.print_path(path, true, 0), ast::ExprKind::Path(Some(ref qself), ref path) => self.print_qpath(path, qself, true), ast::ExprKind::Break(opt_label, ref opt_expr) => { @@ -2328,11 +2327,12 @@ impl<'a> State<'a> { self.print_path(path, false, depth); } self.s.word(">"); - self.s.word("::"); - let item_segment = path.segments.last().unwrap(); - self.print_ident(item_segment.ident); - if let Some(ref args) = item_segment.args { - self.print_generic_args(args, colons_before_params) + for item_segment in &path.segments[qself.position..] { + self.s.word("::"); + self.print_ident(item_segment.ident); + if let Some(ref args) = item_segment.args { + self.print_generic_args(args, colons_before_params) + } } } diff --git a/compiler/rustc_attr/src/builtin.rs b/compiler/rustc_attr/src/builtin.rs index 2fd625c2a6c..364a3a1eeb5 100644 --- a/compiler/rustc_attr/src/builtin.rs +++ b/compiler/rustc_attr/src/builtin.rs @@ -67,7 +67,7 @@ fn handle_errors(sess: &ParseSess, span: Span, error: AttrError) { } } -#[derive(Clone, PartialEq, Encodable, Decodable)] +#[derive(Copy, Clone, PartialEq, Encodable, Decodable)] pub enum InlineAttr { None, Hint, diff --git a/compiler/rustc_builtin_macros/src/assert.rs b/compiler/rustc_builtin_macros/src/assert.rs index 5bfd8a2bf56..bb6d3f6a007 100644 --- a/compiler/rustc_builtin_macros/src/assert.rs +++ b/compiler/rustc_builtin_macros/src/assert.rs @@ -1,8 +1,8 @@ use rustc_errors::{Applicability, DiagnosticBuilder}; use rustc_ast::ptr::P; -use rustc_ast::token::{self, TokenKind}; -use rustc_ast::tokenstream::{DelimSpan, TokenStream, TokenTree}; +use rustc_ast::token; +use rustc_ast::tokenstream::{DelimSpan, TokenStream}; use rustc_ast::{self as ast, *}; use rustc_ast_pretty::pprust; use rustc_expand::base::*; @@ -26,31 +26,39 @@ pub fn expand_assert<'cx>( // `core::panic` and `std::panic` are different macros, so we use call-site // context to pick up whichever is currently in scope. let sp = cx.with_call_site_ctxt(sp); - let tokens = custom_message.unwrap_or_else(|| { - TokenStream::from(TokenTree::token( - TokenKind::lit( - token::Str, + + let panic_call = if let Some(tokens) = custom_message { + // Pass the custom message to panic!(). + cx.expr( + sp, + ExprKind::MacCall(MacCall { + path: Path::from_ident(Ident::new(sym::panic, sp)), + args: P(MacArgs::Delimited( + DelimSpan::from_single(sp), + MacDelimiter::Parenthesis, + tokens, + )), + prior_type_ascription: None, + }), + ) + } else { + // Pass our own message directly to $crate::panicking::panic(), + // because it might contain `{` and `}` that should always be + // passed literally. + cx.expr_call_global( + sp, + cx.std_path(&[sym::panicking, sym::panic]), + vec![cx.expr_str( + DUMMY_SP, Symbol::intern(&format!( "assertion failed: {}", pprust::expr_to_string(&cond_expr).escape_debug() )), - None, - ), - DUMMY_SP, - )) - }); - let args = P(MacArgs::Delimited(DelimSpan::from_single(sp), MacDelimiter::Parenthesis, tokens)); - let panic_call = MacCall { - path: Path::from_ident(Ident::new(sym::panic, sp)), - args, - prior_type_ascription: None, + )], + ) }; - let if_expr = cx.expr_if( - sp, - cx.expr(sp, ExprKind::Unary(UnOp::Not, cond_expr)), - cx.expr(sp, ExprKind::MacCall(panic_call)), - None, - ); + let if_expr = + cx.expr_if(sp, cx.expr(sp, ExprKind::Unary(UnOp::Not, cond_expr)), panic_call, None); MacEager::expr(if_expr) } diff --git a/compiler/rustc_builtin_macros/src/cfg_accessible.rs b/compiler/rustc_builtin_macros/src/cfg_accessible.rs index 75f4b077640..09ed1af3456 100644 --- a/compiler/rustc_builtin_macros/src/cfg_accessible.rs +++ b/compiler/rustc_builtin_macros/src/cfg_accessible.rs @@ -1,7 +1,7 @@ //! Implementation of the `#[cfg_accessible(path)]` attribute macro. use rustc_ast as ast; -use rustc_expand::base::{Annotatable, ExpandResult, ExtCtxt, MultiItemModifier}; +use rustc_expand::base::{Annotatable, ExpandResult, ExtCtxt, Indeterminate, MultiItemModifier}; use rustc_feature::AttributeTemplate; use rustc_parse::validate_attr; use rustc_span::symbol::sym; @@ -31,7 +31,7 @@ impl MultiItemModifier for Expander { fn expand( &self, ecx: &mut ExtCtxt<'_>, - _span: Span, + span: Span, meta_item: &ast::MetaItem, item: Annotatable, ) -> ExpandResult, Annotatable> { @@ -49,11 +49,14 @@ impl MultiItemModifier for Expander { None => return ExpandResult::Ready(Vec::new()), }; - let failure_msg = "cannot determine whether the path is accessible or not"; match ecx.resolver.cfg_accessible(ecx.current_expansion.id, path) { Ok(true) => ExpandResult::Ready(vec![item]), Ok(false) => ExpandResult::Ready(Vec::new()), - Err(_) => ExpandResult::Retry(item, failure_msg.into()), + Err(Indeterminate) if ecx.force_mode => { + ecx.span_err(span, "cannot determine whether the path is accessible or not"); + ExpandResult::Ready(vec![item]) + } + Err(Indeterminate) => ExpandResult::Retry(item), } } } diff --git a/compiler/rustc_builtin_macros/src/deriving/debug.rs b/compiler/rustc_builtin_macros/src/deriving/debug.rs index d84b3956475..9381264f498 100644 --- a/compiler/rustc_builtin_macros/src/deriving/debug.rs +++ b/compiler/rustc_builtin_macros/src/deriving/debug.rs @@ -66,7 +66,7 @@ fn show_substructure(cx: &mut ExtCtxt<'_>, span: Span, substr: &Substructure<'_> let fmt = substr.nonself_args[0].clone(); - let mut stmts = vec![]; + let mut stmts = Vec::with_capacity(fields.len() + 2); match vdata { ast::VariantData::Tuple(..) | ast::VariantData::Unit(..) => { // tuple struct/"normal" variant diff --git a/compiler/rustc_builtin_macros/src/deriving/generic/mod.rs b/compiler/rustc_builtin_macros/src/deriving/generic/mod.rs index 2e52d2a3923..a767de53dae 100644 --- a/compiler/rustc_builtin_macros/src/deriving/generic/mod.rs +++ b/compiler/rustc_builtin_macros/src/deriving/generic/mod.rs @@ -358,7 +358,7 @@ fn find_type_parameters( visit::walk_ty(self, ty) } - fn visit_mac(&mut self, mac: &ast::MacCall) { + fn visit_mac_call(&mut self, mac: &ast::MacCall) { self.cx.span_err(mac.span(), "`derive` cannot be used on items with type macros"); } } @@ -407,13 +407,7 @@ impl<'a> TraitDef<'a> { _ => false, }) } - _ => { - // Non-ADT derive is an error, but it should have been - // set earlier; see - // librustc_expand/expand.rs:MacroExpander::fully_expand_fragment() - // librustc_expand/base.rs:Annotatable::derive_allowed() - return; - } + _ => unreachable!(), }; let container_id = cx.current_expansion.id.expn_data().parent; let always_copy = has_no_type_params && cx.resolver.has_derive_copy(container_id); @@ -475,12 +469,7 @@ impl<'a> TraitDef<'a> { ); push(Annotatable::Item(P(ast::Item { attrs, ..(*newitem).clone() }))) } - _ => { - // Non-Item derive is an error, but it should have been - // set earlier; see - // librustc_expand/expand.rs:MacroExpander::fully_expand_fragment() - // librustc_expand/base.rs:Annotatable::derive_allowed() - } + _ => unreachable!(), } } diff --git a/compiler/rustc_builtin_macros/src/deriving/mod.rs b/compiler/rustc_builtin_macros/src/deriving/mod.rs index bf950934928..72d94af4694 100644 --- a/compiler/rustc_builtin_macros/src/deriving/mod.rs +++ b/compiler/rustc_builtin_macros/src/deriving/mod.rs @@ -98,13 +98,7 @@ fn inject_impl_of_structural_trait( ) { let item = match *item { Annotatable::Item(ref item) => item, - _ => { - // Non-Item derive is an error, but it should have been - // set earlier; see - // librustc_expand/expand.rs:MacroExpander::fully_expand_fragment() - // librustc_expand/base.rs:Annotatable::derive_allowed() - return; - } + _ => unreachable!(), }; let generics = match item.kind { diff --git a/compiler/rustc_builtin_macros/src/proc_macro_harness.rs b/compiler/rustc_builtin_macros/src/proc_macro_harness.rs index c6ab3faf568..4e91436199a 100644 --- a/compiler/rustc_builtin_macros/src/proc_macro_harness.rs +++ b/compiler/rustc_builtin_macros/src/proc_macro_harness.rs @@ -344,10 +344,6 @@ impl<'a> Visitor<'a> for CollectProcMacros<'a> { visit::walk_item(self, item); self.in_root = prev_in_root; } - - fn visit_mac(&mut self, mac: &'a ast::MacCall) { - visit::walk_mac(self, mac) - } } // Creates a new module which looks like: diff --git a/compiler/rustc_builtin_macros/src/standard_library_imports.rs b/compiler/rustc_builtin_macros/src/standard_library_imports.rs index e801b5c7b0c..91566ec1ef2 100644 --- a/compiler/rustc_builtin_macros/src/standard_library_imports.rs +++ b/compiler/rustc_builtin_macros/src/standard_library_imports.rs @@ -13,12 +13,12 @@ pub fn inject( resolver: &mut dyn ResolverExpand, sess: &Session, alt_std_name: Option, -) -> (ast::Crate, Option) { +) -> ast::Crate { let rust_2018 = sess.parse_sess.edition >= Edition::Edition2018; // the first name in this list is the crate name of the crate with the prelude let names: &[Symbol] = if sess.contains_name(&krate.attrs, sym::no_core) { - return (krate, None); + return krate; } else if sess.contains_name(&krate.attrs, sym::no_std) { if sess.contains_name(&krate.attrs, sym::compiler_builtins) { &[sym::core] @@ -81,5 +81,5 @@ pub fn inject( krate.module.items.insert(0, use_item); - (krate, Some(name)) + krate } diff --git a/compiler/rustc_builtin_macros/src/test_harness.rs b/compiler/rustc_builtin_macros/src/test_harness.rs index da74f0aeaa1..9976140d6bd 100644 --- a/compiler/rustc_builtin_macros/src/test_harness.rs +++ b/compiler/rustc_builtin_macros/src/test_harness.rs @@ -37,7 +37,7 @@ struct TestCtxt<'a> { pub fn inject(sess: &Session, resolver: &mut dyn ResolverExpand, krate: &mut ast::Crate) { let span_diagnostic = sess.diagnostic(); let panic_strategy = sess.panic_strategy(); - let platform_panic_strategy = sess.target.options.panic_strategy; + let platform_panic_strategy = sess.target.panic_strategy; // Check for #![reexport_test_harness_main = "some_name"] which gives the // main test function the name `some_name` without hygiene. This needs to be @@ -130,10 +130,6 @@ impl<'a> MutVisitor for TestHarnessGenerator<'a> { } smallvec![P(item)] } - - fn visit_mac(&mut self, _mac: &mut ast::MacCall) { - // Do nothing. - } } // Beware, this is duplicated in librustc_passes/entry.rs (with @@ -201,10 +197,6 @@ impl<'a> MutVisitor for EntryPointCleaner<'a> { smallvec![item] } - - fn visit_mac(&mut self, _mac: &mut ast::MacCall) { - // Do nothing. - } } /// Crawl over the crate, inserting test reexports and the test main function @@ -290,7 +282,7 @@ fn mk_main(cx: &mut TestCtxt<'_>) -> P { let mut test_runner = cx .test_runner .clone() - .unwrap_or(ecx.path(sp, vec![test_id, Ident::from_str_and_span(runner_name, sp)])); + .unwrap_or_else(|| ecx.path(sp, vec![test_id, Ident::from_str_and_span(runner_name, sp)])); test_runner.span = sp; diff --git a/compiler/rustc_codegen_cranelift/src/abi/mod.rs b/compiler/rustc_codegen_cranelift/src/abi/mod.rs index 81091728692..ac076789f2e 100644 --- a/compiler/rustc_codegen_cranelift/src/abi/mod.rs +++ b/compiler/rustc_codegen_cranelift/src/abi/mod.rs @@ -216,7 +216,7 @@ pub(crate) fn get_function_name_and_sig<'tcx>( assert!(!inst.substs.needs_infer()); let fn_sig = tcx.normalize_erasing_late_bound_regions( ParamEnv::reveal_all(), - &fn_sig_for_fn_abi(tcx, inst), + fn_sig_for_fn_abi(tcx, inst), ); if fn_sig.c_variadic && !support_vararg { tcx.sess.span_fatal( @@ -372,7 +372,7 @@ pub(crate) fn codegen_fn_prelude<'tcx>( .mir .args_iter() .map(|local| { - let arg_ty = fx.monomorphize(&fx.mir.local_decls[local].ty); + let arg_ty = fx.monomorphize(fx.mir.local_decls[local].ty); // Adapted from https://github.com/rust-lang/rust/blob/145155dc96757002c7b2e9de8489416e2fdbbd57/src/librustc_codegen_llvm/mir/mod.rs#L442-L482 if Some(local) == fx.mir.spread_arg { @@ -470,7 +470,7 @@ pub(crate) fn codegen_fn_prelude<'tcx>( } for local in fx.mir.vars_and_temps_iter() { - let ty = fx.monomorphize(&fx.mir.local_decls[local].ty); + let ty = fx.monomorphize(fx.mir.local_decls[local].ty); let layout = fx.layout_of(ty); let is_ssa = ssa_analyzed[local] == crate::analyze::SsaKind::Ssa; @@ -492,10 +492,10 @@ pub(crate) fn codegen_terminator_call<'tcx>( args: &[Operand<'tcx>], destination: Option<(Place<'tcx>, BasicBlock)>, ) { - let fn_ty = fx.monomorphize(&func.ty(fx.mir, fx.tcx)); + let fn_ty = fx.monomorphize(func.ty(fx.mir, fx.tcx)); let fn_sig = fx .tcx - .normalize_erasing_late_bound_regions(ParamEnv::reveal_all(), &fn_ty.fn_sig(fx.tcx)); + .normalize_erasing_late_bound_regions(ParamEnv::reveal_all(), fn_ty.fn_sig(fx.tcx)); let destination = destination.map(|(place, bb)| (codegen_place(fx, place), bb)); @@ -711,7 +711,7 @@ pub(crate) fn codegen_drop<'tcx>( let drop_fn_ty = drop_fn.ty(fx.tcx, ParamEnv::reveal_all()); let fn_sig = fx.tcx.normalize_erasing_late_bound_regions( ParamEnv::reveal_all(), - &drop_fn_ty.fn_sig(fx.tcx), + drop_fn_ty.fn_sig(fx.tcx), ); assert_eq!(fn_sig.output(), fx.tcx.mk_unit()); diff --git a/compiler/rustc_codegen_cranelift/src/analyze.rs b/compiler/rustc_codegen_cranelift/src/analyze.rs index fd25b19a583..adf5c7ac4fe 100644 --- a/compiler/rustc_codegen_cranelift/src/analyze.rs +++ b/compiler/rustc_codegen_cranelift/src/analyze.rs @@ -17,7 +17,7 @@ pub(crate) fn analyze(fx: &FunctionCx<'_, '_, impl Module>) -> IndexVec ArchiveBuilder<'a> for ArArchiveBuilder<'a> { sess, dst: output.to_path_buf(), lib_search_paths: archive_search_paths(sess), - use_gnu_style_archive: sess.target.options.archive_format == "gnu", + use_gnu_style_archive: sess.target.archive_format == "gnu", // FIXME fix builtin ranlib on macOS - no_builtin_ranlib: sess.target.options.is_like_osx, + no_builtin_ranlib: sess.target.is_like_osx, src_archives, entries, diff --git a/compiler/rustc_codegen_cranelift/src/base.rs b/compiler/rustc_codegen_cranelift/src/base.rs index bfe5514b6d3..a4df371c88a 100644 --- a/compiler/rustc_codegen_cranelift/src/base.rs +++ b/compiler/rustc_codegen_cranelift/src/base.rs @@ -445,43 +445,43 @@ fn codegen_stmt<'tcx>( StatementKind::Assign(to_place_and_rval) => { let lval = codegen_place(fx, to_place_and_rval.0); let dest_layout = lval.layout(); - match &to_place_and_rval.1 { - Rvalue::Use(operand) => { + match to_place_and_rval.1 { + Rvalue::Use(ref operand) => { let val = codegen_operand(fx, operand); lval.write_cvalue(fx, val); } Rvalue::Ref(_, _, place) | Rvalue::AddressOf(_, place) => { - let place = codegen_place(fx, *place); + let place = codegen_place(fx, place); let ref_ = place.place_ref(fx, lval.layout()); lval.write_cvalue(fx, ref_); } Rvalue::ThreadLocalRef(def_id) => { - let val = crate::constant::codegen_tls_ref(fx, *def_id, lval.layout()); + let val = crate::constant::codegen_tls_ref(fx, def_id, lval.layout()); lval.write_cvalue(fx, val); } - Rvalue::BinaryOp(bin_op, lhs, rhs) => { + Rvalue::BinaryOp(bin_op, ref lhs, ref rhs) => { let lhs = codegen_operand(fx, lhs); let rhs = codegen_operand(fx, rhs); - let res = crate::num::codegen_binop(fx, *bin_op, lhs, rhs); + let res = crate::num::codegen_binop(fx, bin_op, lhs, rhs); lval.write_cvalue(fx, res); } - Rvalue::CheckedBinaryOp(bin_op, lhs, rhs) => { + Rvalue::CheckedBinaryOp(bin_op, ref lhs, ref rhs) => { let lhs = codegen_operand(fx, lhs); let rhs = codegen_operand(fx, rhs); let res = if !fx.tcx.sess.overflow_checks() { let val = - crate::num::codegen_int_binop(fx, *bin_op, lhs, rhs).load_scalar(fx); + crate::num::codegen_int_binop(fx, bin_op, lhs, rhs).load_scalar(fx); let is_overflow = fx.bcx.ins().iconst(types::I8, 0); CValue::by_val_pair(val, is_overflow, lval.layout()) } else { - crate::num::codegen_checked_int_binop(fx, *bin_op, lhs, rhs) + crate::num::codegen_checked_int_binop(fx, bin_op, lhs, rhs) }; lval.write_cvalue(fx, res); } - Rvalue::UnaryOp(un_op, operand) => { + Rvalue::UnaryOp(un_op, ref operand) => { let operand = codegen_operand(fx, operand); let layout = operand.layout(); let val = operand.load_scalar(fx); @@ -509,8 +509,8 @@ fn codegen_stmt<'tcx>( }; lval.write_cvalue(fx, res); } - Rvalue::Cast(CastKind::Pointer(PointerCast::ReifyFnPointer), operand, to_ty) => { - let from_ty = fx.monomorphize(&operand.ty(&fx.mir.local_decls, fx.tcx)); + Rvalue::Cast(CastKind::Pointer(PointerCast::ReifyFnPointer), ref operand, to_ty) => { + let from_ty = fx.monomorphize(operand.ty(&fx.mir.local_decls, fx.tcx)); let to_layout = fx.layout_of(fx.monomorphize(to_ty)); match *from_ty.kind() { ty::FnDef(def_id, substs) => { @@ -530,14 +530,14 @@ fn codegen_stmt<'tcx>( _ => bug!("Trying to ReifyFnPointer on non FnDef {:?}", from_ty), } } - Rvalue::Cast(CastKind::Pointer(PointerCast::UnsafeFnPointer), operand, to_ty) - | Rvalue::Cast(CastKind::Pointer(PointerCast::MutToConstPointer), operand, to_ty) - | Rvalue::Cast(CastKind::Pointer(PointerCast::ArrayToPointer), operand, to_ty) => { + Rvalue::Cast(CastKind::Pointer(PointerCast::UnsafeFnPointer), ref operand, to_ty) + | Rvalue::Cast(CastKind::Pointer(PointerCast::MutToConstPointer), ref operand, to_ty) + | Rvalue::Cast(CastKind::Pointer(PointerCast::ArrayToPointer), ref operand, to_ty) => { let to_layout = fx.layout_of(fx.monomorphize(to_ty)); let operand = codegen_operand(fx, operand); lval.write_cvalue(fx, operand.cast_pointer_to(to_layout)); } - Rvalue::Cast(CastKind::Misc, operand, to_ty) => { + Rvalue::Cast(CastKind::Misc, ref operand, to_ty) => { let operand = codegen_operand(fx, operand); let from_ty = operand.layout().ty; let to_ty = fx.monomorphize(to_ty); @@ -577,12 +577,12 @@ fn codegen_stmt<'tcx>( use rustc_target::abi::{Int, TagEncoding, Variants}; - match &operand.layout().variants { + match operand.layout().variants { Variants::Single { index } => { let discr = operand .layout() .ty - .discriminant_for_variant(fx.tcx, *index) + .discriminant_for_variant(fx.tcx, index) .unwrap(); let discr = if discr.ty.is_signed() { fx.layout_of(discr.ty).size.sign_extend(discr.val) @@ -595,7 +595,7 @@ fn codegen_stmt<'tcx>( lval.write_cvalue(fx, discr); } Variants::Multiple { - tag, + ref tag, tag_field, tag_encoding: TagEncoding::Direct, variants: _, @@ -604,7 +604,7 @@ fn codegen_stmt<'tcx>( // Read the tag/niche-encoded discriminant from memory. let encoded_discr = - operand.value_field(fx, mir::Field::new(*tag_field)); + operand.value_field(fx, mir::Field::new(tag_field)); let encoded_discr = encoded_discr.load_scalar(fx); // Decode the discriminant (specifically if it's niche-encoded). @@ -634,7 +634,7 @@ fn codegen_stmt<'tcx>( } Rvalue::Cast( CastKind::Pointer(PointerCast::ClosureFnPointer(_)), - operand, + ref operand, _to_ty, ) => { let operand = codegen_operand(fx, operand); @@ -654,18 +654,18 @@ fn codegen_stmt<'tcx>( _ => bug!("{} cannot be cast to a fn ptr", operand.layout().ty), } } - Rvalue::Cast(CastKind::Pointer(PointerCast::Unsize), operand, _to_ty) => { + Rvalue::Cast(CastKind::Pointer(PointerCast::Unsize), ref operand, _to_ty) => { let operand = codegen_operand(fx, operand); operand.unsize_value(fx, lval); } Rvalue::Discriminant(place) => { - let place = codegen_place(fx, *place); + let place = codegen_place(fx, place); let value = place.to_cvalue(fx); let discr = crate::discriminant::codegen_get_discriminant(fx, value, dest_layout); lval.write_cvalue(fx, discr); } - Rvalue::Repeat(operand, times) => { + Rvalue::Repeat(ref operand, times) => { let operand = codegen_operand(fx, operand); let times = fx .monomorphize(times) @@ -704,7 +704,7 @@ fn codegen_stmt<'tcx>( } } Rvalue::Len(place) => { - let place = codegen_place(fx, *place); + let place = codegen_place(fx, place); let usize_layout = fx.layout_of(fx.tcx.types.usize); let len = codegen_array_len(fx, place); lval.write_cvalue(fx, CValue::by_val(len, usize_layout)); @@ -749,7 +749,7 @@ fn codegen_stmt<'tcx>( CValue::const_val(fx, fx.layout_of(fx.tcx.types.usize), ty_size.into()); lval.write_cvalue(fx, val); } - Rvalue::Aggregate(kind, operands) => match **kind { + Rvalue::Aggregate(ref kind, ref operands) => match kind.as_ref() { AggregateKind::Array(_ty) => { for (i, operand) in operands.iter().enumerate() { let operand = codegen_operand(fx, operand); @@ -877,8 +877,7 @@ fn codegen_array_len<'tcx>( match *place.layout().ty.kind() { ty::Array(_elem_ty, len) => { let len = fx - .monomorphize(&len) - .eval(fx.tcx, ParamEnv::reveal_all()) + .monomorphize(len) .eval_usize(fx.tcx, ParamEnv::reveal_all()) as i64; fx.bcx.ins().iconst(fx.pointer_type, len) } diff --git a/compiler/rustc_codegen_cranelift/src/bin/cg_clif.rs b/compiler/rustc_codegen_cranelift/src/bin/cg_clif.rs index 71ef4d22673..cd01acc9a83 100644 --- a/compiler/rustc_codegen_cranelift/src/bin/cg_clif.rs +++ b/compiler/rustc_codegen_cranelift/src/bin/cg_clif.rs @@ -27,8 +27,8 @@ impl rustc_driver::Callbacks for CraneliftPassesCallbacks { config.opts.cg.panic = Some(PanicStrategy::Abort); config.opts.debugging_opts.panic_abort_tests = true; config.opts.maybe_sysroot = Some( - config.opts.maybe_sysroot.clone().unwrap_or( - std::env::current_exe() + config.opts.maybe_sysroot.clone().unwrap_or_else( + || std::env::current_exe() .unwrap() .parent() .unwrap() diff --git a/compiler/rustc_codegen_cranelift/src/common.rs b/compiler/rustc_codegen_cranelift/src/common.rs index 466758f2f86..d7d6c3e1677 100644 --- a/compiler/rustc_codegen_cranelift/src/common.rs +++ b/compiler/rustc_codegen_cranelift/src/common.rs @@ -357,7 +357,7 @@ impl<'tcx, M: Module> HasTargetSpec for FunctionCx<'_, 'tcx, M> { } impl<'tcx, M: Module> FunctionCx<'_, 'tcx, M> { - pub(crate) fn monomorphize(&self, value: &T) -> T + pub(crate) fn monomorphize(&self, value: T) -> T where T: TypeFoldable<'tcx> + Copy, { diff --git a/compiler/rustc_codegen_cranelift/src/constant.rs b/compiler/rustc_codegen_cranelift/src/constant.rs index 41cfae4ca6e..351bb6ecd23 100644 --- a/compiler/rustc_codegen_cranelift/src/constant.rs +++ b/compiler/rustc_codegen_cranelift/src/constant.rs @@ -38,7 +38,7 @@ impl ConstantCx { pub(crate) fn check_constants(fx: &mut FunctionCx<'_, '_, impl Module>) { for constant in &fx.mir.required_consts { - let const_ = fx.monomorphize(&constant.literal); + let const_ = fx.monomorphize(constant.literal); match const_.val { ConstKind::Value(_) => {} ConstKind::Unevaluated(def, ref substs, promoted) => { @@ -110,7 +110,7 @@ pub(crate) fn codegen_constant<'tcx>( fx: &mut FunctionCx<'_, 'tcx, impl Module>, constant: &Constant<'tcx>, ) -> CValue<'tcx> { - let const_ = fx.monomorphize(&constant.literal); + let const_ = fx.monomorphize(constant.literal); let const_val = match const_.val { ConstKind::Value(const_val) => const_val, ConstKind::Unevaluated(def, ref substs, promoted) if fx.tcx.is_static(def.did) => { @@ -466,7 +466,7 @@ pub(crate) fn mir_operand_get_const_val<'tcx>( match operand { Operand::Copy(_) | Operand::Move(_) => None, Operand::Constant(const_) => Some( - fx.monomorphize(&const_.literal) + fx.monomorphize(const_.literal) .eval(fx.tcx, ParamEnv::reveal_all()), ), } diff --git a/compiler/rustc_codegen_cranelift/src/debuginfo/mod.rs b/compiler/rustc_codegen_cranelift/src/debuginfo/mod.rs index cbf9522b1d7..a6f4ded41b6 100644 --- a/compiler/rustc_codegen_cranelift/src/debuginfo/mod.rs +++ b/compiler/rustc_codegen_cranelift/src/debuginfo/mod.rs @@ -50,7 +50,7 @@ impl<'tcx> DebugContext<'tcx> { // TODO: this should be configurable // macOS doesn't seem to support DWARF > 3 // 5 version is required for md5 file hash - version: if tcx.sess.target.options.is_like_osx { + version: if tcx.sess.target.is_like_osx { 3 } else { // FIXME change to version 5 once the gdb and lldb shipping with the latest debian @@ -365,7 +365,7 @@ impl<'tcx> DebugContext<'tcx> { let ty = self.tcx.subst_and_normalize_erasing_regions( instance.substs, ty::ParamEnv::reveal_all(), - &mir.local_decls[local].ty, + mir.local_decls[local].ty, ); let var_id = self.define_local(entry_id, format!("{:?}", local), ty); diff --git a/compiler/rustc_codegen_cranelift/src/driver/aot.rs b/compiler/rustc_codegen_cranelift/src/driver/aot.rs index ff0b994c9a9..c0245aa1e02 100644 --- a/compiler/rustc_codegen_cranelift/src/driver/aot.rs +++ b/compiler/rustc_codegen_cranelift/src/driver/aot.rs @@ -320,8 +320,8 @@ fn codegen_global_asm(tcx: TyCtxt<'_>, cgu_name: &str, global_asm: &str) { } if cfg!(not(feature = "inline_asm")) - || tcx.sess.target.options.is_like_osx - || tcx.sess.target.options.is_like_windows + || tcx.sess.target.is_like_osx + || tcx.sess.target.is_like_windows { if global_asm.contains("__rust_probestack") { return; diff --git a/compiler/rustc_codegen_cranelift/src/main_shim.rs b/compiler/rustc_codegen_cranelift/src/main_shim.rs index 10f515e38ea..6c472e6774f 100644 --- a/compiler/rustc_codegen_cranelift/src/main_shim.rs +++ b/compiler/rustc_codegen_cranelift/src/main_shim.rs @@ -50,7 +50,7 @@ pub(crate) fn maybe_create_entry_wrapper( // late-bound regions, since late-bound // regions must appear in the argument // listing. - let main_ret_ty = tcx.erase_regions(&main_ret_ty.no_bound_vars().unwrap()); + let main_ret_ty = tcx.erase_regions(main_ret_ty.no_bound_vars().unwrap()); let cmain_sig = Signature { params: vec![ diff --git a/compiler/rustc_codegen_cranelift/src/metadata.rs b/compiler/rustc_codegen_cranelift/src/metadata.rs index cda2a187ff9..2e3b9fb8364 100644 --- a/compiler/rustc_codegen_cranelift/src/metadata.rs +++ b/compiler/rustc_codegen_cranelift/src/metadata.rs @@ -101,7 +101,7 @@ pub(crate) fn write_metadata( product.add_rustc_section( rustc_middle::middle::exported_symbols::metadata_symbol_name(tcx), compressed, - tcx.sess.target.options.is_like_osx, + tcx.sess.target.is_like_osx, ); metadata diff --git a/compiler/rustc_codegen_cranelift/src/pretty_clif.rs b/compiler/rustc_codegen_cranelift/src/pretty_clif.rs index ff878af7f5e..a9f060e51d8 100644 --- a/compiler/rustc_codegen_cranelift/src/pretty_clif.rs +++ b/compiler/rustc_codegen_cranelift/src/pretty_clif.rs @@ -80,7 +80,7 @@ impl CommentWriter { "sig {:?}", tcx.normalize_erasing_late_bound_regions( ParamEnv::reveal_all(), - &crate::abi::fn_sig_for_fn_abi(tcx, instance) + crate::abi::fn_sig_for_fn_abi(tcx, instance) ) ), String::new(), diff --git a/compiler/rustc_codegen_cranelift/src/toolchain.rs b/compiler/rustc_codegen_cranelift/src/toolchain.rs index 463afaf7cc5..735c59d70c1 100644 --- a/compiler/rustc_codegen_cranelift/src/toolchain.rs +++ b/compiler/rustc_codegen_cranelift/src/toolchain.rs @@ -91,7 +91,7 @@ fn linker_and_flavor(sess: &Session) -> (PathBuf, LinkerFlavor) { } else if stem == "link" || stem == "lld-link" { LinkerFlavor::Msvc } else if stem == "lld" || stem == "rust-lld" { - LinkerFlavor::Lld(sess.target.options.lld_flavor) + LinkerFlavor::Lld(sess.target.lld_flavor) } else { // fall back to the value in the target spec sess.target.linker_flavor @@ -115,7 +115,7 @@ fn linker_and_flavor(sess: &Session) -> (PathBuf, LinkerFlavor) { if let Some(ret) = infer_from( sess, - sess.target.options.linker.clone().map(PathBuf::from), + sess.target.linker.clone().map(PathBuf::from), Some(sess.target.linker_flavor), ) { return ret; diff --git a/compiler/rustc_codegen_cranelift/src/value_and_place.rs b/compiler/rustc_codegen_cranelift/src/value_and_place.rs index 0000866c4f6..cb40d4ed9a6 100644 --- a/compiler/rustc_codegen_cranelift/src/value_and_place.rs +++ b/compiler/rustc_codegen_cranelift/src/value_and_place.rs @@ -455,7 +455,7 @@ impl<'tcx> CPlace<'tcx> { from_ty: Ty<'tcx>, to_ty: Ty<'tcx>, ) { - match (&from_ty.kind(), &to_ty.kind()) { + match (from_ty.kind(), to_ty.kind()) { (ty::Ref(_, a, _), ty::Ref(_, b, _)) | ( ty::RawPtr(TypeAndMut { ty: a, mutbl: _ }), @@ -466,11 +466,11 @@ impl<'tcx> CPlace<'tcx> { (ty::FnPtr(_), ty::FnPtr(_)) => { let from_sig = fx.tcx.normalize_erasing_late_bound_regions( ParamEnv::reveal_all(), - &from_ty.fn_sig(fx.tcx), + from_ty.fn_sig(fx.tcx), ); let to_sig = fx.tcx.normalize_erasing_late_bound_regions( ParamEnv::reveal_all(), - &to_ty.fn_sig(fx.tcx), + to_ty.fn_sig(fx.tcx), ); assert_eq!( from_sig, to_sig, @@ -479,7 +479,7 @@ impl<'tcx> CPlace<'tcx> { ); // fn(&T) -> for<'l> fn(&'l T) is allowed } - (ty::Dynamic(from_traits, _), ty::Dynamic(to_traits, _)) => { + (&ty::Dynamic(from_traits, _), &ty::Dynamic(to_traits, _)) => { let from_traits = fx .tcx .normalize_erasing_late_bound_regions(ParamEnv::reveal_all(), from_traits); diff --git a/compiler/rustc_codegen_llvm/src/abi.rs b/compiler/rustc_codegen_llvm/src/abi.rs index 7857ccb613b..915dd3d9eda 100644 --- a/compiler/rustc_codegen_llvm/src/abi.rs +++ b/compiler/rustc_codegen_llvm/src/abi.rs @@ -36,17 +36,17 @@ impl ArgAttributeExt for ArgAttribute { where F: FnMut(llvm::Attribute), { - for_each_kind!(self, f, NoAlias, NoCapture, NonNull, ReadOnly, SExt, StructRet, ZExt, InReg) + for_each_kind!(self, f, NoAlias, NoCapture, NonNull, ReadOnly, InReg) } } pub trait ArgAttributesExt { - fn apply_llfn(&self, idx: AttributePlace, llfn: &Value, ty: Option<&Type>); - fn apply_callsite(&self, idx: AttributePlace, callsite: &Value, ty: Option<&Type>); + fn apply_attrs_to_llfn(&self, idx: AttributePlace, llfn: &Value); + fn apply_attrs_to_callsite(&self, idx: AttributePlace, callsite: &Value); } impl ArgAttributesExt for ArgAttributes { - fn apply_llfn(&self, idx: AttributePlace, llfn: &Value, ty: Option<&Type>) { + fn apply_attrs_to_llfn(&self, idx: AttributePlace, llfn: &Value) { let mut regular = self.regular; unsafe { let deref = self.pointee_size.bytes(); @@ -61,14 +61,20 @@ impl ArgAttributesExt for ArgAttributes { if let Some(align) = self.pointee_align { llvm::LLVMRustAddAlignmentAttr(llfn, idx.as_uint(), align.bytes() as u32); } - if regular.contains(ArgAttribute::ByVal) { - llvm::LLVMRustAddByValAttr(llfn, idx.as_uint(), ty.unwrap()); - } regular.for_each_kind(|attr| attr.apply_llfn(idx, llfn)); + match self.arg_ext { + ArgExtension::None => {} + ArgExtension::Zext => { + llvm::Attribute::ZExt.apply_llfn(idx, llfn); + } + ArgExtension::Sext => { + llvm::Attribute::SExt.apply_llfn(idx, llfn); + } + } } } - fn apply_callsite(&self, idx: AttributePlace, callsite: &Value, ty: Option<&Type>) { + fn apply_attrs_to_callsite(&self, idx: AttributePlace, callsite: &Value) { let mut regular = self.regular; unsafe { let deref = self.pointee_size.bytes(); @@ -91,10 +97,16 @@ impl ArgAttributesExt for ArgAttributes { align.bytes() as u32, ); } - if regular.contains(ArgAttribute::ByVal) { - llvm::LLVMRustAddByValCallSiteAttr(callsite, idx.as_uint(), ty.unwrap()); - } regular.for_each_kind(|attr| attr.apply_callsite(idx, callsite)); + match self.arg_ext { + ArgExtension::None => {} + ArgExtension::Zext => { + llvm::Attribute::ZExt.apply_callsite(idx, callsite); + } + ArgExtension::Sext => { + llvm::Attribute::SExt.apply_callsite(idx, callsite); + } + } } } } @@ -146,7 +158,7 @@ impl LlvmType for CastTarget { .prefix .iter() .flat_map(|option_kind| { - option_kind.map(|kind| Reg { kind, size: self.prefix_chunk }.llvm_type(cx)) + option_kind.map(|kind| Reg { kind, size: self.prefix_chunk_size }.llvm_type(cx)) }) .chain((0..rest_count).map(|_| rest_ll_unit)) .collect(); @@ -267,10 +279,12 @@ impl ArgAbiExt<'ll, 'tcx> for ArgAbi<'tcx, Ty<'tcx>> { PassMode::Pair(..) => { OperandValue::Pair(next(), next()).store(bx, dst); } - PassMode::Indirect(_, Some(_)) => { + PassMode::Indirect { attrs: _, extra_attrs: Some(_), on_stack: _ } => { OperandValue::Ref(next(), Some(next()), self.layout.align.abi).store(bx, dst); } - PassMode::Direct(_) | PassMode::Indirect(_, None) | PassMode::Cast(_) => { + PassMode::Direct(_) + | PassMode::Indirect { attrs: _, extra_attrs: None, on_stack: _ } + | PassMode::Cast(_) => { let next_arg = next(); self.store(bx, next_arg, dst); } @@ -315,14 +329,14 @@ impl<'tcx> FnAbiLlvmExt<'tcx> for FnAbi<'tcx, Ty<'tcx>> { if let PassMode::Pair(_, _) = arg.mode { 2 } else { 1 } ).sum(); let mut llargument_tys = Vec::with_capacity( - if let PassMode::Indirect(..) = self.ret.mode { 1 } else { 0 } + args_capacity, + if let PassMode::Indirect { .. } = self.ret.mode { 1 } else { 0 } + args_capacity, ); let llreturn_ty = match self.ret.mode { PassMode::Ignore => cx.type_void(), PassMode::Direct(_) | PassMode::Pair(..) => self.ret.layout.immediate_llvm_type(cx), PassMode::Cast(cast) => cast.llvm_type(cx), - PassMode::Indirect(..) => { + PassMode::Indirect { .. } => { llargument_tys.push(cx.type_ptr_to(self.ret.memory_ty(cx))); cx.type_void() } @@ -342,7 +356,7 @@ impl<'tcx> FnAbiLlvmExt<'tcx> for FnAbi<'tcx, Ty<'tcx>> { llargument_tys.push(arg.layout.scalar_pair_element_llvm_type(cx, 1, true)); continue; } - PassMode::Indirect(_, Some(_)) => { + PassMode::Indirect { attrs: _, extra_attrs: Some(_), on_stack: _ } => { let ptr_ty = cx.tcx.mk_mut_ptr(arg.layout.ty); let ptr_layout = cx.layout_of(ptr_ty); llargument_tys.push(ptr_layout.scalar_pair_element_llvm_type(cx, 0, true)); @@ -350,7 +364,9 @@ impl<'tcx> FnAbiLlvmExt<'tcx> for FnAbi<'tcx, Ty<'tcx>> { continue; } PassMode::Cast(cast) => cast.llvm_type(cx), - PassMode::Indirect(_, None) => cx.type_ptr_to(arg.memory_ty(cx)), + PassMode::Indirect { attrs: _, extra_attrs: None, on_stack: _ } => { + cx.type_ptr_to(arg.memory_ty(cx)) + } }; llargument_tys.push(llarg_ty); } @@ -402,35 +418,54 @@ impl<'tcx> FnAbiLlvmExt<'tcx> for FnAbi<'tcx, Ty<'tcx>> { } let mut i = 0; - let mut apply = |attrs: &ArgAttributes, ty: Option<&Type>| { - attrs.apply_llfn(llvm::AttributePlace::Argument(i), llfn, ty); + let mut apply = |attrs: &ArgAttributes| { + attrs.apply_attrs_to_llfn(llvm::AttributePlace::Argument(i), llfn); i += 1; + i - 1 }; match self.ret.mode { PassMode::Direct(ref attrs) => { - attrs.apply_llfn(llvm::AttributePlace::ReturnValue, llfn, None); + attrs.apply_attrs_to_llfn(llvm::AttributePlace::ReturnValue, llfn); + } + PassMode::Indirect { ref attrs, extra_attrs: _, on_stack } => { + assert!(!on_stack); + let i = apply(attrs); + llvm::Attribute::StructRet.apply_llfn(llvm::AttributePlace::Argument(i), llfn); } - PassMode::Indirect(ref attrs, _) => apply(attrs, Some(self.ret.layout.llvm_type(cx))), _ => {} } for arg in &self.args { if arg.pad.is_some() { - apply(&ArgAttributes::new(), None); + apply(&ArgAttributes::new()); } match arg.mode { PassMode::Ignore => {} - PassMode::Direct(ref attrs) | PassMode::Indirect(ref attrs, None) => { - apply(attrs, Some(arg.layout.llvm_type(cx))) + PassMode::Indirect { ref attrs, extra_attrs: None, on_stack: true } => { + let i = apply(attrs); + unsafe { + llvm::LLVMRustAddByValAttr( + llfn, + llvm::AttributePlace::Argument(i).as_uint(), + arg.layout.llvm_type(cx), + ); + } } - PassMode::Indirect(ref attrs, Some(ref extra_attrs)) => { - apply(attrs, None); - apply(extra_attrs, None); + PassMode::Direct(ref attrs) + | PassMode::Indirect { ref attrs, extra_attrs: None, on_stack: false } => { + apply(attrs); + } + PassMode::Indirect { ref attrs, extra_attrs: Some(ref extra_attrs), on_stack } => { + assert!(!on_stack); + apply(attrs); + apply(extra_attrs); } PassMode::Pair(ref a, ref b) => { - apply(a, None); - apply(b, None); + apply(a); + apply(b); + } + PassMode::Cast(_) => { + apply(&ArgAttributes::new()); } - PassMode::Cast(_) => apply(&ArgAttributes::new(), None), } } } @@ -439,15 +474,21 @@ impl<'tcx> FnAbiLlvmExt<'tcx> for FnAbi<'tcx, Ty<'tcx>> { // FIXME(wesleywiser, eddyb): We should apply `nounwind` and `noreturn` as appropriate to this callsite. let mut i = 0; - let mut apply = |attrs: &ArgAttributes, ty: Option<&Type>| { - attrs.apply_callsite(llvm::AttributePlace::Argument(i), callsite, ty); + let mut apply = |attrs: &ArgAttributes| { + attrs.apply_attrs_to_callsite(llvm::AttributePlace::Argument(i), callsite); i += 1; + i - 1 }; match self.ret.mode { PassMode::Direct(ref attrs) => { - attrs.apply_callsite(llvm::AttributePlace::ReturnValue, callsite, None); + attrs.apply_attrs_to_callsite(llvm::AttributePlace::ReturnValue, callsite); + } + PassMode::Indirect { ref attrs, extra_attrs: _, on_stack } => { + assert!(!on_stack); + let i = apply(attrs); + llvm::Attribute::StructRet + .apply_callsite(llvm::AttributePlace::Argument(i), callsite); } - PassMode::Indirect(ref attrs, _) => apply(attrs, Some(self.ret.layout.llvm_type(bx))), _ => {} } if let abi::Abi::Scalar(ref scalar) = self.ret.layout.abi { @@ -465,22 +506,39 @@ impl<'tcx> FnAbiLlvmExt<'tcx> for FnAbi<'tcx, Ty<'tcx>> { } for arg in &self.args { if arg.pad.is_some() { - apply(&ArgAttributes::new(), None); + apply(&ArgAttributes::new()); } match arg.mode { PassMode::Ignore => {} - PassMode::Direct(ref attrs) | PassMode::Indirect(ref attrs, None) => { - apply(attrs, Some(arg.layout.llvm_type(bx))) + PassMode::Indirect { ref attrs, extra_attrs: None, on_stack: true } => { + let i = apply(attrs); + unsafe { + llvm::LLVMRustAddByValCallSiteAttr( + callsite, + llvm::AttributePlace::Argument(i).as_uint(), + arg.layout.llvm_type(bx), + ); + } } - PassMode::Indirect(ref attrs, Some(ref extra_attrs)) => { - apply(attrs, None); - apply(extra_attrs, None); + PassMode::Direct(ref attrs) + | PassMode::Indirect { ref attrs, extra_attrs: None, on_stack: false } => { + apply(attrs); + } + PassMode::Indirect { + ref attrs, + extra_attrs: Some(ref extra_attrs), + on_stack: _, + } => { + apply(attrs); + apply(extra_attrs); } PassMode::Pair(ref a, ref b) => { - apply(a, None); - apply(b, None); + apply(a); + apply(b); + } + PassMode::Cast(_) => { + apply(&ArgAttributes::new()); } - PassMode::Cast(_) => apply(&ArgAttributes::new(), None), } } diff --git a/compiler/rustc_codegen_llvm/src/allocator.rs b/compiler/rustc_codegen_llvm/src/allocator.rs index f7d82ff78fa..a5ea0b2a74c 100644 --- a/compiler/rustc_codegen_llvm/src/allocator.rs +++ b/compiler/rustc_codegen_llvm/src/allocator.rs @@ -57,7 +57,7 @@ pub(crate) unsafe fn codegen( let name = format!("__rust_{}", method.name); let llfn = llvm::LLVMRustGetOrInsertFunction(llmod, name.as_ptr().cast(), name.len(), ty); - if tcx.sess.target.options.default_hidden_visibility { + if tcx.sess.target.default_hidden_visibility { llvm::LLVMRustSetVisibility(llfn, llvm::Visibility::Hidden); } if tcx.sess.must_emit_unwind_tables() { @@ -98,7 +98,7 @@ pub(crate) unsafe fn codegen( // -> ! DIFlagNoReturn llvm::Attribute::NoReturn.apply_llfn(llvm::AttributePlace::Function, llfn); - if tcx.sess.target.options.default_hidden_visibility { + if tcx.sess.target.default_hidden_visibility { llvm::LLVMRustSetVisibility(llfn, llvm::Visibility::Hidden); } if tcx.sess.must_emit_unwind_tables() { diff --git a/compiler/rustc_codegen_llvm/src/asm.rs b/compiler/rustc_codegen_llvm/src/asm.rs index d856280158f..b5d279eeb6f 100644 --- a/compiler/rustc_codegen_llvm/src/asm.rs +++ b/compiler/rustc_codegen_llvm/src/asm.rs @@ -12,8 +12,8 @@ use rustc_codegen_ssa::mir::place::PlaceRef; use rustc_codegen_ssa::traits::*; use rustc_data_structures::fx::FxHashMap; use rustc_hir as hir; -use rustc_middle::span_bug; use rustc_middle::ty::layout::TyAndLayout; +use rustc_middle::{bug, span_bug}; use rustc_span::{Pos, Span}; use rustc_target::abi::*; use rustc_target::asm::*; @@ -260,6 +260,7 @@ impl AsmBuilderMethods<'tcx> for Builder<'a, 'll, 'tcx> { InlineAsmArch::Nvptx64 => {} InlineAsmArch::Hexagon => {} InlineAsmArch::Mips | InlineAsmArch::Mips64 => {} + InlineAsmArch::SpirV => {} } } if !options.contains(InlineAsmOptions::NOMEM) { @@ -518,6 +519,9 @@ fn reg_to_llvm(reg: InlineAsmRegOrRegClass, layout: Option<&TyAndLayout<'tcx>>) | InlineAsmRegClass::X86(X86InlineAsmRegClass::ymm_reg) => "x", InlineAsmRegClass::X86(X86InlineAsmRegClass::zmm_reg) => "v", InlineAsmRegClass::X86(X86InlineAsmRegClass::kreg) => "^Yk", + InlineAsmRegClass::SpirV(SpirVInlineAsmRegClass::reg) => { + bug!("LLVM backend does not support SPIR-V") + } } .to_string(), } @@ -580,6 +584,9 @@ fn modifier_to_llvm( _ => unreachable!(), }, InlineAsmRegClass::X86(X86InlineAsmRegClass::kreg) => None, + InlineAsmRegClass::SpirV(SpirVInlineAsmRegClass::reg) => { + bug!("LLVM backend does not support SPIR-V") + } } } @@ -619,6 +626,9 @@ fn dummy_output_type(cx: &CodegenCx<'ll, 'tcx>, reg: InlineAsmRegClass) -> &'ll | InlineAsmRegClass::X86(X86InlineAsmRegClass::ymm_reg) | InlineAsmRegClass::X86(X86InlineAsmRegClass::zmm_reg) => cx.type_f32(), InlineAsmRegClass::X86(X86InlineAsmRegClass::kreg) => cx.type_i16(), + InlineAsmRegClass::SpirV(SpirVInlineAsmRegClass::reg) => { + bug!("LLVM backend does not support SPIR-V") + } } } diff --git a/compiler/rustc_codegen_llvm/src/attributes.rs b/compiler/rustc_codegen_llvm/src/attributes.rs index d4872aedd70..62a7986c194 100644 --- a/compiler/rustc_codegen_llvm/src/attributes.rs +++ b/compiler/rustc_codegen_llvm/src/attributes.rs @@ -35,11 +35,7 @@ fn inline(cx: &CodegenCx<'ll, '_>, val: &'ll Value, inline: InlineAttr) { Attribute::NoInline.apply_llfn(Function, val); } } - None => { - Attribute::InlineHint.unapply_llfn(Function, val); - Attribute::AlwaysInline.unapply_llfn(Function, val); - Attribute::NoInline.unapply_llfn(Function, val); - } + None => {} }; } @@ -90,8 +86,7 @@ fn set_instrument_function(cx: &CodegenCx<'ll, '_>, llfn: &'ll Value) { // The function name varies on platforms. // See test/CodeGen/mcount.c in clang. - let mcount_name = - CString::new(cx.sess().target.options.target_mcount.as_str().as_bytes()).unwrap(); + let mcount_name = CString::new(cx.sess().target.mcount.as_str().as_bytes()).unwrap(); llvm::AddFunctionAttrStringValue( llfn, @@ -105,7 +100,7 @@ fn set_instrument_function(cx: &CodegenCx<'ll, '_>, llfn: &'ll Value) { fn set_probestack(cx: &CodegenCx<'ll, '_>, llfn: &'ll Value) { // Only use stack probes if the target specification indicates that we // should be using stack probes - if !cx.sess().target.options.stack_probes { + if !cx.sess().target.stack_probes { return; } @@ -145,25 +140,6 @@ fn set_probestack(cx: &CodegenCx<'ll, '_>, llfn: &'ll Value) { ); } -fn translate_obsolete_target_features(feature: &str) -> &str { - const LLVM9_FEATURE_CHANGES: &[(&str, &str)] = - &[("+fp-only-sp", "-fp64"), ("-fp-only-sp", "+fp64"), ("+d16", "-d32"), ("-d16", "+d32")]; - if llvm_util::get_major_version() >= 9 { - for &(old, new) in LLVM9_FEATURE_CHANGES { - if feature == old { - return new; - } - } - } else { - for &(old, new) in LLVM9_FEATURE_CHANGES { - if feature == new { - return old; - } - } - } - feature -} - pub fn llvm_target_features(sess: &Session) -> impl Iterator { const RUSTC_SPECIFIC_FEATURES: &[&str] = &["crt-static"]; @@ -173,13 +149,7 @@ pub fn llvm_target_features(sess: &Session) -> impl Iterator { .target_feature .split(',') .filter(|f| !RUSTC_SPECIFIC_FEATURES.iter().any(|s| f.contains(s))); - sess.target - .options - .features - .split(',') - .chain(cmdline) - .filter(|l| !l.is_empty()) - .map(translate_obsolete_target_features) + sess.target.features.split(',').chain(cmdline).filter(|l| !l.is_empty()) } pub fn apply_target_cpu_attr(cx: &CodegenCx<'ll, '_>, llfn: &'ll Value) { @@ -255,12 +225,14 @@ pub fn from_fn_attrs(cx: &CodegenCx<'ll, 'tcx>, llfn: &'ll Value, instance: ty:: } } - // FIXME(eddyb) consolidate these two `inline` calls (and avoid overwrites). - if instance.def.requires_inline(cx.tcx) { - inline(cx, llfn, attributes::InlineAttr::Hint); - } - - inline(cx, llfn, codegen_fn_attrs.inline.clone()); + let inline_attr = if codegen_fn_attrs.flags.contains(CodegenFnAttrFlags::NAKED) { + InlineAttr::Never + } else if codegen_fn_attrs.inline == InlineAttr::None && instance.def.requires_inline(cx.tcx) { + InlineAttr::Hint + } else { + codegen_fn_attrs.inline + }; + inline(cx, llfn, inline_attr); // The `uwtable` attribute according to LLVM is: // diff --git a/compiler/rustc_codegen_llvm/src/back/archive.rs b/compiler/rustc_codegen_llvm/src/back/archive.rs index 595655b2ca2..4e7213853b0 100644 --- a/compiler/rustc_codegen_llvm/src/back/archive.rs +++ b/compiler/rustc_codegen_llvm/src/back/archive.rs @@ -206,7 +206,7 @@ impl<'a> LlvmArchiveBuilder<'a> { } fn llvm_archive_kind(&self) -> Result { - let kind = &*self.config.sess.target.options.archive_format; + let kind = &*self.config.sess.target.archive_format; kind.parse().map_err(|_| kind) } diff --git a/compiler/rustc_codegen_llvm/src/back/write.rs b/compiler/rustc_codegen_llvm/src/back/write.rs index f13c2d312df..6f956c3bcc1 100644 --- a/compiler/rustc_codegen_llvm/src/back/write.rs +++ b/compiler/rustc_codegen_llvm/src/back/write.rs @@ -129,13 +129,13 @@ pub fn target_machine_factory( let use_softfp = sess.opts.cg.soft_float; let ffunction_sections = - sess.opts.debugging_opts.function_sections.unwrap_or(sess.target.options.function_sections); + sess.opts.debugging_opts.function_sections.unwrap_or(sess.target.function_sections); let fdata_sections = ffunction_sections; let code_model = to_llvm_code_model(sess.code_model()); let features = attributes::llvm_target_features(sess).collect::>(); - let mut singlethread = sess.target.options.singlethread; + let mut singlethread = sess.target.singlethread; // On the wasm target once the `atomics` feature is enabled that means that // we're no longer single-threaded, or otherwise we don't want LLVM to @@ -151,22 +151,16 @@ pub fn target_machine_factory( let cpu = SmallCStr::new(llvm_util::target_cpu(sess)); let features = features.join(","); let features = CString::new(features).unwrap(); - let abi = SmallCStr::new(&sess.target.options.llvm_abiname); - let trap_unreachable = sess.target.options.trap_unreachable; + let abi = SmallCStr::new(&sess.target.llvm_abiname); + let trap_unreachable = sess.target.trap_unreachable; let emit_stack_size_section = sess.opts.debugging_opts.emit_stack_sizes; let asm_comments = sess.asm_comments(); - let relax_elf_relocations = sess - .opts - .debugging_opts - .relax_elf_relocations - .unwrap_or(sess.target.options.relax_elf_relocations); - - let use_init_array = !sess - .opts - .debugging_opts - .use_ctors_section - .unwrap_or(sess.target.options.use_ctors_section); + let relax_elf_relocations = + sess.opts.debugging_opts.relax_elf_relocations.unwrap_or(sess.target.relax_elf_relocations); + + let use_init_array = + !sess.opts.debugging_opts.use_ctors_section.unwrap_or(sess.target.use_ctors_section); Arc::new(move || { let tm = unsafe { @@ -383,11 +377,6 @@ fn get_pgo_use_path(config: &ModuleConfig) -> Option { } pub(crate) fn should_use_new_llvm_pass_manager(config: &ModuleConfig) -> bool { - // We only support the new pass manager starting with LLVM 9. - if llvm_util::get_major_version() < 9 { - return false; - } - // The new pass manager is disabled by default. config.new_llvm_pass_manager } @@ -931,9 +920,7 @@ unsafe fn embed_bitcode( || cgcx.opts.target_triple.triple().starts_with("asmjs") { // nothing to do here - } else if cgcx.opts.target_triple.triple().contains("windows") - || cgcx.opts.target_triple.triple().contains("uefi") - { + } else if cgcx.is_pe_coff { let asm = " .section .llvmbc,\"n\" .section .llvmcmd,\"n\" diff --git a/compiler/rustc_codegen_llvm/src/base.rs b/compiler/rustc_codegen_llvm/src/base.rs index 9c42ff6251a..f2615819dd5 100644 --- a/compiler/rustc_codegen_llvm/src/base.rs +++ b/compiler/rustc_codegen_llvm/src/base.rs @@ -97,14 +97,12 @@ pub fn compile_codegen_unit( tcx: TyCtxt<'tcx>, cgu_name: Symbol, ) -> (ModuleCodegen, u64) { - let prof_timer = tcx.prof.generic_activity_with_arg("codegen_module", cgu_name.to_string()); let start_time = Instant::now(); let dep_node = tcx.codegen_unit(cgu_name).codegen_dep_node(tcx); let (module, _) = tcx.dep_graph.with_task(dep_node, tcx, cgu_name, module_codegen, dep_graph::hash_result); let time_to_codegen = start_time.elapsed(); - drop(prof_timer); // We assume that the cost to run LLVM on a CGU is proportional to // the time we needed for codegenning it. @@ -112,6 +110,10 @@ pub fn compile_codegen_unit( fn module_codegen(tcx: TyCtxt<'_>, cgu_name: Symbol) -> ModuleCodegen { let cgu = tcx.codegen_unit(cgu_name); + let _prof_timer = tcx.prof.generic_activity_with_args( + "codegen_module", + &[cgu_name.to_string(), cgu.size_estimate().to_string()], + ); // Instantiate monomorphizations without filling out definitions yet... let llvm_module = ModuleLlvm::new(tcx, &cgu_name.as_str()); { diff --git a/compiler/rustc_codegen_llvm/src/callee.rs b/compiler/rustc_codegen_llvm/src/callee.rs index e2003472d12..367c1f4811c 100644 --- a/compiler/rustc_codegen_llvm/src/callee.rs +++ b/compiler/rustc_codegen_llvm/src/callee.rs @@ -176,7 +176,7 @@ pub fn get_fn(cx: &CodegenCx<'ll, 'tcx>, instance: Instance<'tcx>) -> &'ll Value // should use dllimport for functions. if cx.use_dll_storage_attrs && tcx.is_dllimport_foreign_item(instance_def_id) - && tcx.sess.target.target_env != "gnu" + && tcx.sess.target.env != "gnu" { unsafe { llvm::LLVMSetDLLStorageClass(llfn, llvm::DLLStorageClass::DllImport); diff --git a/compiler/rustc_codegen_llvm/src/consts.rs b/compiler/rustc_codegen_llvm/src/consts.rs index 90a51f75e0e..14dd245625d 100644 --- a/compiler/rustc_codegen_llvm/src/consts.rs +++ b/compiler/rustc_codegen_llvm/src/consts.rs @@ -91,7 +91,7 @@ fn set_global_alignment(cx: &CodegenCx<'ll, '_>, gv: &'ll Value, mut align: Alig // The target may require greater alignment for globals than the type does. // Note: GCC and Clang also allow `__attribute__((aligned))` on variables, // which can force it to be smaller. Rust doesn't support this yet. - if let Some(min) = cx.sess().target.options.min_global_align { + if let Some(min) = cx.sess().target.min_global_align { match Align::from_bits(min) { Ok(min) => align = align.max(min), Err(err) => { @@ -283,7 +283,7 @@ impl CodegenCx<'ll, 'tcx> { // argument validation. debug_assert!( !(self.tcx.sess.opts.cg.linker_plugin_lto.enabled() - && self.tcx.sess.target.options.is_like_windows + && self.tcx.sess.target.is_like_windows && self.tcx.sess.opts.cg.prefer_dynamic) ); @@ -435,7 +435,7 @@ impl StaticMethods for CodegenCx<'ll, 'tcx> { // will use load-unaligned instructions instead, and thus avoiding the crash. // // We could remove this hack whenever we decide to drop macOS 10.10 support. - if self.tcx.sess.target.options.is_like_osx { + if self.tcx.sess.target.is_like_osx { // The `inspect` method is okay here because we checked relocations, and // because we are doing this access to inspect the final interpreter state // (not as part of the interpreter execution). diff --git a/compiler/rustc_codegen_llvm/src/context.rs b/compiler/rustc_codegen_llvm/src/context.rs index 6f608e7fcbb..d36590bf325 100644 --- a/compiler/rustc_codegen_llvm/src/context.rs +++ b/compiler/rustc_codegen_llvm/src/context.rs @@ -103,11 +103,6 @@ fn to_llvm_tls_model(tls_model: TlsModel) -> llvm::ThreadLocalMode { } } -fn strip_function_ptr_alignment(data_layout: String) -> String { - // FIXME: Make this more general. - data_layout.replace("-Fi8-", "-") -} - fn strip_x86_address_spaces(data_layout: String) -> String { data_layout.replace("-p270:32:32-p271:32:32-p272:64:64-", "-") } @@ -122,9 +117,6 @@ pub unsafe fn create_module( let llmod = llvm::LLVMModuleCreateWithNameInContext(mod_name.as_ptr(), llcx); let mut target_data_layout = sess.target.data_layout.clone(); - if llvm_util::get_major_version() < 9 { - target_data_layout = strip_function_ptr_alignment(target_data_layout); - } if llvm_util::get_major_version() < 10 && (sess.target.arch == "x86" || sess.target.arch == "x86_64") { @@ -132,7 +124,7 @@ pub unsafe fn create_module( } // Ensure the data-layout values hardcoded remain the defaults. - if sess.target.options.is_builtin { + if sess.target.is_builtin { let tm = crate::back::write::create_informational_target_machine(tcx.sess); llvm::LLVMRustSetDataLayoutFromTargetMachine(llmod, tm); llvm::LLVMRustDisposeTargetMachine(tm); @@ -193,7 +185,7 @@ pub unsafe fn create_module( } // Control Flow Guard is currently only supported by the MSVC linker on Windows. - if sess.target.options.is_like_msvc { + if sess.target.is_like_msvc { match sess.opts.cg.control_flow_guard { CFGuard::Disabled => {} CFGuard::NoChecks => { @@ -268,7 +260,7 @@ impl<'ll, 'tcx> CodegenCx<'ll, 'tcx> { // linker will take care of everything. Fixing this problem will likely // require adding a few attributes to Rust itself (feature gated at the // start) and then strongly recommending static linkage on Windows! - let use_dll_storage_attrs = tcx.sess.target.options.is_like_windows; + let use_dll_storage_attrs = tcx.sess.target.is_like_windows; let check_overflow = tcx.sess.overflow_checks(); @@ -858,7 +850,7 @@ impl CodegenCx<'b, 'tcx> { return eh_catch_typeinfo; } let tcx = self.tcx; - assert!(self.sess().target.options.is_like_emscripten); + assert!(self.sess().target.is_like_emscripten); let eh_catch_typeinfo = match tcx.lang_items().eh_catch_typeinfo() { Some(def_id) => self.get_static(def_id), _ => { diff --git a/compiler/rustc_codegen_llvm/src/debuginfo/create_scope_map.rs b/compiler/rustc_codegen_llvm/src/debuginfo/create_scope_map.rs index 6737872f203..7673dfb744c 100644 --- a/compiler/rustc_codegen_llvm/src/debuginfo/create_scope_map.rs +++ b/compiler/rustc_codegen_llvm/src/debuginfo/create_scope_map.rs @@ -92,7 +92,7 @@ fn make_mir_scope( let callee = cx.tcx.subst_and_normalize_erasing_regions( instance.substs, ty::ParamEnv::reveal_all(), - &callee, + callee, ); let callee_fn_abi = FnAbi::of_instance(cx, callee, &[]); cx.dbg_scope_fn(callee, &callee_fn_abi, None) diff --git a/compiler/rustc_codegen_llvm/src/debuginfo/gdb.rs b/compiler/rustc_codegen_llvm/src/debuginfo/gdb.rs index 79721ff7e2d..38f50a6d621 100644 --- a/compiler/rustc_codegen_llvm/src/debuginfo/gdb.rs +++ b/compiler/rustc_codegen_llvm/src/debuginfo/gdb.rs @@ -67,5 +67,5 @@ pub fn needs_gdb_debug_scripts_section(cx: &CodegenCx<'_, '_>) -> bool { !omit_gdb_pretty_printer_section && cx.sess().opts.debuginfo != DebugInfo::None - && cx.sess().target.options.emit_debug_gdb_scripts + && cx.sess().target.emit_debug_gdb_scripts } diff --git a/compiler/rustc_codegen_llvm/src/debuginfo/metadata.rs b/compiler/rustc_codegen_llvm/src/debuginfo/metadata.rs index 454d43fd4e7..5e8ff14f0aa 100644 --- a/compiler/rustc_codegen_llvm/src/debuginfo/metadata.rs +++ b/compiler/rustc_codegen_llvm/src/debuginfo/metadata.rs @@ -189,7 +189,7 @@ impl TypeMap<'ll, 'tcx> { // something that provides more than the 64 bits of the DefaultHasher. let mut hasher = StableHasher::new(); let mut hcx = cx.tcx.create_stable_hashing_context(); - let type_ = cx.tcx.erase_regions(&type_); + let type_ = cx.tcx.erase_regions(type_); hcx.while_hashing_spans(false, |hcx| { hcx.with_node_id_hashing_mode(NodeIdHashingMode::HashDefPath, |hcx| { type_.hash_stable(hcx, &mut hasher); @@ -427,7 +427,7 @@ fn subroutine_type_metadata( span: Span, ) -> MetadataCreationResult<'ll> { let signature = - cx.tcx.normalize_erasing_late_bound_regions(ty::ParamEnv::reveal_all(), &signature); + cx.tcx.normalize_erasing_late_bound_regions(ty::ParamEnv::reveal_all(), signature); let signature_metadata: Vec<_> = iter::once( // return type @@ -870,7 +870,7 @@ fn basic_type_metadata(cx: &CodegenCx<'ll, 'tcx>, t: Ty<'tcx>) -> &'ll DIType { // When targeting MSVC, emit MSVC style type names for compatibility with // .natvis visualizers (and perhaps other existing native debuggers?) - let msvc_like_names = cx.tcx.sess.target.options.is_like_msvc; + let msvc_like_names = cx.tcx.sess.target.is_like_msvc; let (name, encoding) = match t.kind() { ty::Never => ("!", DW_ATE_unsigned), @@ -981,7 +981,7 @@ pub fn compile_unit_metadata( // if multiple object files with the same `DW_AT_name` are linked together. // As a workaround we generate unique names for each object file. Those do // not correspond to an actual source file but that should be harmless. - if tcx.sess.target.options.is_like_osx { + if tcx.sess.target.is_like_osx { name_in_debuginfo.push("@"); name_in_debuginfo.push(codegen_unit_name); } @@ -1397,7 +1397,7 @@ fn prepare_union_metadata( /// on MSVC we have to use the fallback mode, because LLVM doesn't /// lower variant parts to PDB. fn use_enum_fallback(cx: &CodegenCx<'_, '_>) -> bool { - cx.sess().target.options.is_like_msvc + cx.sess().target.is_like_msvc } // FIXME(eddyb) maybe precompute this? Right now it's computed once diff --git a/compiler/rustc_codegen_llvm/src/debuginfo/mod.rs b/compiler/rustc_codegen_llvm/src/debuginfo/mod.rs index 5e324b4352f..4035e9dfae3 100644 --- a/compiler/rustc_codegen_llvm/src/debuginfo/mod.rs +++ b/compiler/rustc_codegen_llvm/src/debuginfo/mod.rs @@ -126,12 +126,12 @@ pub fn finalize(cx: &CodegenCx<'_, '_>) { // for macOS to understand. For more info see #11352 // This can be overridden using --llvm-opts -dwarf-version,N. // Android has the same issue (#22398) - if let Some(version) = cx.sess().target.options.dwarf_version { + if let Some(version) = cx.sess().target.dwarf_version { llvm::LLVMRustAddModuleFlag(cx.llmod, "Dwarf Version\0".as_ptr().cast(), version) } // Indicate that we want CodeView debug information on MSVC - if cx.sess().target.options.is_like_msvc { + if cx.sess().target.is_like_msvc { llvm::LLVMRustAddModuleFlag(cx.llmod, "CodeView\0".as_ptr().cast(), 1) } @@ -255,7 +255,7 @@ impl CodegenCx<'ll, '_> { // For MSVC, omit the column number. // Otherwise, emit it. This mimics clang behaviour. // See discussion in https://github.com/rust-lang/rust/issues/42921 - if self.sess().target.options.is_like_msvc { + if self.sess().target.is_like_msvc { DebugLoc { file, line, col: None } } else { DebugLoc { file, line, col } @@ -391,7 +391,7 @@ impl DebugInfoMethods<'tcx> for CodegenCx<'ll, 'tcx> { }); // Arguments types - if cx.sess().target.options.is_like_msvc { + if cx.sess().target.is_like_msvc { // FIXME(#42800): // There is a bug in MSDIA that leads to a crash when it encounters // a fixed-size array of `u8` or something zero-sized in a @@ -505,7 +505,7 @@ impl DebugInfoMethods<'tcx> for CodegenCx<'ll, 'tcx> { let impl_self_ty = cx.tcx.subst_and_normalize_erasing_regions( instance.substs, ty::ParamEnv::reveal_all(), - &cx.tcx.type_of(impl_def_id), + cx.tcx.type_of(impl_def_id), ); // Only "class" methods are generally understood by LLVM, diff --git a/compiler/rustc_codegen_llvm/src/declare.rs b/compiler/rustc_codegen_llvm/src/declare.rs index 9face778322..0591e0a5c12 100644 --- a/compiler/rustc_codegen_llvm/src/declare.rs +++ b/compiler/rustc_codegen_llvm/src/declare.rs @@ -42,7 +42,7 @@ fn declare_raw_fn( // be merged. llvm::SetUnnamedAddress(llfn, llvm::UnnamedAddr::Global); - if cx.tcx.sess.opts.cg.no_redzone.unwrap_or(cx.tcx.sess.target.options.disable_redzone) { + if cx.tcx.sess.opts.cg.no_redzone.unwrap_or(cx.tcx.sess.target.disable_redzone) { llvm::Attribute::NoRedZone.apply_llfn(Function, llfn); } diff --git a/compiler/rustc_codegen_llvm/src/intrinsic.rs b/compiler/rustc_codegen_llvm/src/intrinsic.rs index e9900e8bc10..ac423d01bf1 100644 --- a/compiler/rustc_codegen_llvm/src/intrinsic.rs +++ b/compiler/rustc_codegen_llvm/src/intrinsic.rs @@ -91,7 +91,7 @@ impl IntrinsicCallMethods<'tcx> for Builder<'a, 'll, 'tcx> { }; let sig = callee_ty.fn_sig(tcx); - let sig = tcx.normalize_erasing_late_bound_regions(ty::ParamEnv::reveal_all(), &sig); + let sig = tcx.normalize_erasing_late_bound_regions(ty::ParamEnv::reveal_all(), sig); let arg_tys = sig.inputs(); let ret_ty = sig.output(); let name = tcx.item_name(def_id); @@ -367,7 +367,7 @@ fn try_intrinsic( bx.store(bx.const_i32(0), dest, ret_align); } else if wants_msvc_seh(bx.sess()) { codegen_msvc_try(bx, try_func, data, catch_func, dest); - } else if bx.sess().target.options.is_like_emscripten { + } else if bx.sess().target.is_like_emscripten { codegen_emcc_try(bx, try_func, data, catch_func, dest); } else { codegen_gnu_try(bx, try_func, data, catch_func, dest); @@ -777,8 +777,8 @@ fn generic_simd_intrinsic( } let tcx = bx.tcx(); - let sig = tcx - .normalize_erasing_late_bound_regions(ty::ParamEnv::reveal_all(), &callee_ty.fn_sig(tcx)); + let sig = + tcx.normalize_erasing_late_bound_regions(ty::ParamEnv::reveal_all(), callee_ty.fn_sig(tcx)); let arg_tys = sig.inputs(); let name_str = &*name.as_str(); @@ -979,12 +979,14 @@ fn generic_simd_intrinsic( // Integer vector : let (i_xn, in_elem_bitwidth) = match in_elem.kind() { - ty::Int(i) => { - (args[0].immediate(), i.bit_width().unwrap_or(bx.data_layout().pointer_size.bits())) - } - ty::Uint(i) => { - (args[0].immediate(), i.bit_width().unwrap_or(bx.data_layout().pointer_size.bits())) - } + ty::Int(i) => ( + args[0].immediate(), + i.bit_width().unwrap_or_else(|| bx.data_layout().pointer_size.bits()), + ), + ty::Uint(i) => ( + args[0].immediate(), + i.bit_width().unwrap_or_else(|| bx.data_layout().pointer_size.bits()), + ), _ => return_error!( "vector argument `{}`'s element type `{}`, expected integer element type", in_ty, diff --git a/compiler/rustc_codegen_llvm/src/llvm_util.rs b/compiler/rustc_codegen_llvm/src/llvm_util.rs index 9c1e1b8fac0..a3139ce5a34 100644 --- a/compiler/rustc_codegen_llvm/src/llvm_util.rs +++ b/compiler/rustc_codegen_llvm/src/llvm_util.rs @@ -3,7 +3,6 @@ use crate::llvm; use libc::c_int; use rustc_codegen_ssa::target_features::supported_target_features; use rustc_data_structures::fx::FxHashSet; -use rustc_feature::UnstableFeatures; use rustc_middle::bug; use rustc_session::config::PrintRequest; use rustc_session::Session; @@ -46,7 +45,7 @@ fn require_inited() { } unsafe fn configure_llvm(sess: &Session) { - let n_args = sess.opts.cg.llvm_args.len() + sess.target.options.llvm_args.len(); + let n_args = sess.opts.cg.llvm_args.len() + sess.target.llvm_args.len(); let mut llvm_c_strs = Vec::with_capacity(n_args + 1); let mut llvm_args = Vec::with_capacity(n_args + 1); @@ -57,7 +56,7 @@ unsafe fn configure_llvm(sess: &Session) { } let cg_opts = sess.opts.cg.llvm_args.iter(); - let tg_opts = sess.target.options.llvm_args.iter(); + let tg_opts = sess.target.llvm_args.iter(); let sess_args = cg_opts.chain(tg_opts); let user_specified_args: FxHashSet<_> = @@ -84,19 +83,14 @@ unsafe fn configure_llvm(sess: &Session) { if !sess.opts.debugging_opts.no_generate_arange_section { add("-generate-arange-section", false); } - match sess - .opts - .debugging_opts - .merge_functions - .unwrap_or(sess.target.options.merge_functions) - { + match sess.opts.debugging_opts.merge_functions.unwrap_or(sess.target.merge_functions) { MergeFunctions::Disabled | MergeFunctions::Trampolines => {} MergeFunctions::Aliases => { add("-mergefunc-use-aliases", false); } } - if sess.target.target_os == "emscripten" && sess.panic_strategy() == PanicStrategy::Unwind { + if sess.target.os == "emscripten" && sess.panic_strategy() == PanicStrategy::Unwind { add("-enable-emscripten-cxx-exceptions", false); } @@ -109,7 +103,7 @@ unsafe fn configure_llvm(sess: &Session) { } } - if sess.opts.debugging_opts.llvm_time_trace && get_major_version() >= 9 { + if sess.opts.debugging_opts.llvm_time_trace { // time-trace is not thread safe and running it in parallel will cause seg faults. if !sess.opts.debugging_opts.no_parallel_llvm { bug!("`-Z llvm-time-trace` requires `-Z no-parallel-llvm") @@ -127,16 +121,21 @@ unsafe fn configure_llvm(sess: &Session) { pub fn time_trace_profiler_finish(file_name: &str) { unsafe { - if get_major_version() >= 9 { - let file_name = CString::new(file_name).unwrap(); - llvm::LLVMTimeTraceProfilerFinish(file_name.as_ptr()); - } + let file_name = CString::new(file_name).unwrap(); + llvm::LLVMTimeTraceProfilerFinish(file_name.as_ptr()); } } // WARNING: the features after applying `to_llvm_feature` must be known // to LLVM or the feature detection code will walk past the end of the feature // array, leading to crashes. +// To find a list of LLVM's names, check llvm-project/llvm/include/llvm/Support/*TargetParser.def +// where the * matches the architecture's name +// Beware to not use the llvm github project for this, but check the git submodule +// found in src/llvm-project +// Though note that Rust can also be build with an external precompiled version of LLVM +// which might lead to failures if the oldest tested / supported LLVM version +// doesn't yet support the relevant intrinsics pub fn to_llvm_feature<'a>(sess: &Session, s: &'a str) -> &'a str { let arch = if sess.target.arch == "x86_64" { "x86" } else { &*sess.target.arch }; match (arch, s) { @@ -144,6 +143,9 @@ pub fn to_llvm_feature<'a>(sess: &Session, s: &'a str) -> &'a str { ("x86", "rdrand") => "rdrnd", ("x86", "bmi1") => "bmi", ("x86", "cmpxchg16b") => "cx16", + ("x86", "avx512vaes") => "vaes", + ("x86", "avx512gfni") => "gfni", + ("x86", "avx512vpclmulqdq") => "vpclmulqdq", ("aarch64", "fp") => "fp-armv8", ("aarch64", "fp16") => "fullfp16", (_, s) => s, @@ -154,13 +156,11 @@ pub fn target_features(sess: &Session) -> Vec { let target_machine = create_informational_target_machine(sess); supported_target_features(sess) .iter() - .filter_map(|&(feature, gate)| { - if UnstableFeatures::from_environment().is_nightly_build() || gate.is_none() { - Some(feature) - } else { - None - } - }) + .filter_map( + |&(feature, gate)| { + if sess.is_nightly_build() || gate.is_none() { Some(feature) } else { None } + }, + ) .filter(|feature| { let llvm_feature = to_llvm_feature(sess, feature); let cstr = CString::new(llvm_feature).unwrap(); @@ -215,7 +215,7 @@ fn handle_native(name: &str) -> &str { pub fn target_cpu(sess: &Session) -> &str { let name = match sess.opts.cg.target_cpu { Some(ref s) => &**s, - None => &*sess.target.options.cpu, + None => &*sess.target.cpu, }; handle_native(name) diff --git a/compiler/rustc_codegen_llvm/src/metadata.rs b/compiler/rustc_codegen_llvm/src/metadata.rs index 9036428c04b..3912d6a3a48 100644 --- a/compiler/rustc_codegen_llvm/src/metadata.rs +++ b/compiler/rustc_codegen_llvm/src/metadata.rs @@ -104,7 +104,7 @@ pub fn metadata_section_name(target: &Target) -> &'static str { // As a result, we choose a slightly shorter name! As to why // `.note.rustc` works on MinGW, that's another good question... - if target.options.is_like_osx { "__DATA,.rustc" } else { ".rustc" } + if target.is_like_osx { "__DATA,.rustc" } else { ".rustc" } } fn read_metadata_section_name(_target: &Target) -> &'static str { diff --git a/compiler/rustc_codegen_llvm/src/type_of.rs b/compiler/rustc_codegen_llvm/src/type_of.rs index e0754d21df1..8ea4768f77d 100644 --- a/compiler/rustc_codegen_llvm/src/type_of.rs +++ b/compiler/rustc_codegen_llvm/src/type_of.rs @@ -252,7 +252,7 @@ impl<'tcx> LayoutLlvmExt<'tcx> for TyAndLayout<'tcx> { // Make sure lifetimes are erased, to avoid generating distinct LLVM // types for Rust types that only differ in the choice of lifetimes. - let normal_ty = cx.tcx.erase_regions(&self.ty); + let normal_ty = cx.tcx.erase_regions(self.ty); let mut defer = None; let llty = if self.ty != normal_ty { diff --git a/compiler/rustc_codegen_llvm/src/va_arg.rs b/compiler/rustc_codegen_llvm/src/va_arg.rs index b6a0516b8bc..3fc56eecdd0 100644 --- a/compiler/rustc_codegen_llvm/src/va_arg.rs +++ b/compiler/rustc_codegen_llvm/src/va_arg.rs @@ -52,7 +52,7 @@ fn emit_direct_ptr_va_arg( let next = bx.inbounds_gep(addr, &[full_direct_size]); bx.store(next, va_list_addr, bx.tcx().data_layout.pointer_align.abi); - if size.bytes() < slot_size.bytes() && &*bx.tcx().sess.target.target_endian == "big" { + if size.bytes() < slot_size.bytes() && &*bx.tcx().sess.target.endian == "big" { let adjusted_size = bx.cx().const_i32((slot_size.bytes() - size.bytes()) as i32); let adjusted = bx.inbounds_gep(addr, &[adjusted_size]); (bx.bitcast(adjusted, bx.cx().type_ptr_to(llty)), addr_align) @@ -105,7 +105,7 @@ fn emit_aapcs_va_arg( let mut end = bx.build_sibling_block("va_arg.end"); let zero = bx.const_i32(0); let offset_align = Align::from_bytes(4).unwrap(); - assert!(&*bx.tcx().sess.target.target_endian == "little"); + assert!(&*bx.tcx().sess.target.endian == "little"); let gr_type = target_ty.is_any_ptr() || target_ty.is_integral(); let (reg_off, reg_top_index, slot_size) = if gr_type { @@ -175,22 +175,22 @@ pub(super) fn emit_va_arg( let arch = &bx.cx.tcx.sess.target.arch; match &**arch { // Windows x86 - "x86" if target.options.is_like_windows => { + "x86" if target.is_like_windows => { emit_ptr_va_arg(bx, addr, target_ty, false, Align::from_bytes(4).unwrap(), false) } // Generic x86 "x86" => emit_ptr_va_arg(bx, addr, target_ty, false, Align::from_bytes(4).unwrap(), true), // Windows AArch64 - "aarch64" if target.options.is_like_windows => { + "aarch64" if target.is_like_windows => { emit_ptr_va_arg(bx, addr, target_ty, false, Align::from_bytes(8).unwrap(), false) } // macOS / iOS AArch64 - "aarch64" if target.options.is_like_osx => { + "aarch64" if target.is_like_osx => { emit_ptr_va_arg(bx, addr, target_ty, false, Align::from_bytes(8).unwrap(), true) } "aarch64" => emit_aapcs_va_arg(bx, addr, target_ty), // Windows x86_64 - "x86_64" if target.options.is_like_windows => { + "x86_64" if target.is_like_windows => { let target_ty_size = bx.cx.size_of(target_ty).bytes(); let indirect: bool = target_ty_size > 8 || !target_ty_size.is_power_of_two(); emit_ptr_va_arg(bx, addr, target_ty, indirect, Align::from_bytes(8).unwrap(), false) diff --git a/compiler/rustc_codegen_ssa/src/back/archive.rs b/compiler/rustc_codegen_ssa/src/back/archive.rs index ef722ecb599..c477ac6462a 100644 --- a/compiler/rustc_codegen_ssa/src/back/archive.rs +++ b/compiler/rustc_codegen_ssa/src/back/archive.rs @@ -7,10 +7,8 @@ use std::path::{Path, PathBuf}; pub fn find_library(name: Symbol, search_paths: &[PathBuf], sess: &Session) -> PathBuf { // On Windows, static libraries sometimes show up as libfoo.a and other // times show up as foo.lib - let oslibname = format!( - "{}{}{}", - sess.target.options.staticlib_prefix, name, sess.target.options.staticlib_suffix - ); + let oslibname = + format!("{}{}{}", sess.target.staticlib_prefix, name, sess.target.staticlib_suffix); let unixlibname = format!("lib{}.a", name); for path in search_paths { diff --git a/compiler/rustc_codegen_ssa/src/back/link.rs b/compiler/rustc_codegen_ssa/src/back/link.rs index 88645a63c7a..b44c8b9d630 100644 --- a/compiler/rustc_codegen_ssa/src/back/link.rs +++ b/compiler/rustc_codegen_ssa/src/back/link.rs @@ -151,9 +151,7 @@ fn get_linker( Some(linker) if cfg!(windows) && linker.ends_with(".bat") => Command::bat_script(linker), _ => match flavor { LinkerFlavor::Lld(f) => Command::lld(linker, f), - LinkerFlavor::Msvc - if sess.opts.cg.linker.is_none() && sess.target.options.linker.is_none() => - { + LinkerFlavor::Msvc if sess.opts.cg.linker.is_none() && sess.target.linker.is_none() => { Command::new(msvc_tool.as_ref().map(|t| t.path()).unwrap_or(linker)) } _ => Command::new(linker), @@ -165,7 +163,7 @@ fn get_linker( // MSVC needs to link with the Store versions of the runtime libraries (vcruntime, msvcrt, etc). let t = &sess.target; if (flavor == LinkerFlavor::Msvc || flavor == LinkerFlavor::Lld(LldFlavor::Link)) - && t.target_vendor == "uwp" + && t.vendor == "uwp" { if let Some(ref tool) = msvc_tool { let original_path = tool.path(); @@ -197,7 +195,7 @@ fn get_linker( // PATH for the child. let mut new_path = sess.host_filesearch(PathKind::All).get_tools_search_paths(self_contained); let mut msvc_changed_path = false; - if sess.target.options.is_like_msvc { + if sess.target.is_like_msvc { if let Some(ref tool) = msvc_tool { cmd.args(tool.args()); for &(ref k, ref v) in tool.env() { @@ -365,7 +363,7 @@ fn link_rlib<'a, B: ArchiveBuilder<'a>>( // After adding all files to the archive, we need to update the // symbol table of the archive. This currently dies on macOS (see // #11162), and isn't necessary there anyway - if !sess.target.options.is_like_osx { + if !sess.target.is_like_osx { ab.update_symbols(); } } @@ -476,10 +474,10 @@ fn link_natively<'a, B: ArchiveBuilder<'a>>( linker::disable_localization(&mut cmd); - for &(ref k, ref v) in &sess.target.options.link_env { + for &(ref k, ref v) in &sess.target.link_env { cmd.env(k, v); } - for k in &sess.target.options.link_env_remove { + for k in &sess.target.link_env_remove { cmd.env_remove(k); } @@ -515,7 +513,7 @@ fn link_natively<'a, B: ArchiveBuilder<'a>>( // if the linker doesn't support -no-pie then it should not default to // linking executables as pie. Different versions of gcc seem to use // different quotes in the error message so don't check for them. - if sess.target.options.linker_is_gnu + if sess.target.linker_is_gnu && flavor != LinkerFlavor::Ld && (out.contains("unrecognized command line option") || out.contains("unknown argument")) @@ -535,7 +533,7 @@ fn link_natively<'a, B: ArchiveBuilder<'a>>( // Detect '-static-pie' used with an older version of gcc or clang not supporting it. // Fallback from '-static-pie' to '-static' in that case. - if sess.target.options.linker_is_gnu + if sess.target.linker_is_gnu && flavor != LinkerFlavor::Ld && (out.contains("unrecognized command line option") || out.contains("unknown argument")) @@ -548,7 +546,7 @@ fn link_natively<'a, B: ArchiveBuilder<'a>>( ); // Mirror `add_(pre,post)_link_objects` to replace CRT objects. let self_contained = crt_objects_fallback(sess, crate_type); - let opts = &sess.target.options; + let opts = &sess.target; let pre_objects = if self_contained { &opts.pre_link_objects_fallback } else { @@ -670,7 +668,7 @@ fn link_natively<'a, B: ArchiveBuilder<'a>>( // is not a Microsoft LNK error then suggest a way to fix or // install the Visual Studio build tools. if let Some(code) = prog.status.code() { - if sess.target.options.is_like_msvc + if sess.target.is_like_msvc && flavor == LinkerFlavor::Msvc // Respect the command line override && sess.opts.cg.linker.is_none() @@ -741,7 +739,7 @@ fn link_natively<'a, B: ArchiveBuilder<'a>>( linker_error.emit(); - if sess.target.options.is_like_msvc && linker_not_found { + if sess.target.is_like_msvc && linker_not_found { sess.note_without_error( "the msvc targets depend on the msvc linker \ but `link.exe` was not found", @@ -758,7 +756,7 @@ fn link_natively<'a, B: ArchiveBuilder<'a>>( // On macOS, debuggers need this utility to get run to do some munging of // the symbols. Note, though, that if the object files are being preserved // for their debug information there's no need for us to run dsymutil. - if sess.target.options.is_like_osx + if sess.target.is_like_osx && sess.opts.debuginfo != DebugInfo::None && !preserve_objects_for_their_debuginfo(sess) { @@ -775,9 +773,7 @@ fn link_sanitizers(sess: &Session, crate_type: CrateType, linker: &mut dyn Linke // executables only. let needs_runtime = match crate_type { CrateType::Executable => true, - CrateType::Dylib | CrateType::Cdylib | CrateType::ProcMacro => { - sess.target.options.is_like_osx - } + CrateType::Dylib | CrateType::Cdylib | CrateType::ProcMacro => sess.target.is_like_osx, CrateType::Rlib | CrateType::Staticlib => false, }; @@ -846,7 +842,7 @@ pub fn ignored_for_lto(sess: &Session, info: &CrateInfo, cnum: CrateNum) -> bool // If our target enables builtin function lowering in LLVM then the // crates providing these functions don't participate in LTO (e.g. // no_builtins or compiler builtins crates). - !sess.target.options.no_builtins + !sess.target.no_builtins && (info.compiler_builtins == Some(cnum) || info.is_no_builtins.contains(&cnum)) } @@ -906,7 +902,7 @@ fn linker_and_flavor(sess: &Session) -> (PathBuf, LinkerFlavor) { } else if stem == "link" || stem == "lld-link" { LinkerFlavor::Msvc } else if stem == "lld" || stem == "rust-lld" { - LinkerFlavor::Lld(sess.target.options.lld_flavor) + LinkerFlavor::Lld(sess.target.lld_flavor) } else { // fall back to the value in the target spec sess.target.linker_flavor @@ -926,7 +922,7 @@ fn linker_and_flavor(sess: &Session) -> (PathBuf, LinkerFlavor) { if let Some(ret) = infer_from( sess, - sess.target.options.linker.clone().map(PathBuf::from), + sess.target.linker.clone().map(PathBuf::from), Some(sess.target.linker_flavor), ) { return ret; @@ -962,7 +958,7 @@ fn preserve_objects_for_their_debuginfo(sess: &Session) -> bool { // Basically as a result this just means that if we're on OSX and we're // *not* running dsymutil then the object files are the only source of truth // for debug information, so we must preserve them. - if sess.target.options.is_like_osx { + if sess.target.is_like_osx { return !sess.opts.debugging_opts.run_dsymutil; } @@ -988,7 +984,7 @@ fn print_native_static_libs(sess: &Session, all_native_libs: &[NativeLib]) { NativeLibKind::StaticNoBundle | NativeLibKind::Dylib | NativeLibKind::Unspecified => { - if sess.target.options.is_like_msvc { + if sess.target.is_like_msvc { Some(format!("{}.lib", name)) } else { Some(format!("-l{}", name)) @@ -1070,13 +1066,13 @@ fn exec_linker( let mut args = String::new(); for arg in cmd2.take_args() { args.push_str( - &Escape { arg: arg.to_str().unwrap(), is_like_msvc: sess.target.options.is_like_msvc } + &Escape { arg: arg.to_str().unwrap(), is_like_msvc: sess.target.is_like_msvc } .to_string(), ); args.push('\n'); } let file = tmpdir.join("linker-arguments"); - let bytes = if sess.target.options.is_like_msvc { + let bytes = if sess.target.is_like_msvc { let mut out = Vec::with_capacity((1 + args.len()) * 2); // start the stream with a UTF-16 BOM for c in std::iter::once(0xFEFF).chain(args.encode_utf16()) { @@ -1192,7 +1188,7 @@ fn link_output_kind(sess: &Session, crate_type: CrateType) -> LinkOutputKind { }; // Adjust the output kind to target capabilities. - let opts = &sess.target.options; + let opts = &sess.target; let pic_exe_supported = opts.position_independent_executables; let static_pic_exe_supported = opts.static_position_independent_executables; let static_dylib_supported = opts.crt_static_allows_dylibs; @@ -1233,14 +1229,14 @@ fn crt_objects_fallback(sess: &Session, crate_type: CrateType) -> bool { return self_contained; } - match sess.target.options.crt_objects_fallback { + match sess.target.crt_objects_fallback { // FIXME: Find a better heuristic for "native musl toolchain is available", // based on host and linker path, for example. // (https://github.com/rust-lang/rust/pull/71769#issuecomment-626330237). Some(CrtObjectsFallback::Musl) => sess.crt_static(Some(crate_type)), Some(CrtObjectsFallback::Mingw) => { sess.host == sess.target - && sess.target.target_vendor != "uwp" + && sess.target.vendor != "uwp" && detect_self_contained_mingw(&sess) } // FIXME: Figure out cases in which WASM needs to link with a native toolchain. @@ -1256,7 +1252,7 @@ fn add_pre_link_objects( link_output_kind: LinkOutputKind, self_contained: bool, ) { - let opts = &sess.target.options; + let opts = &sess.target; let objects = if self_contained { &opts.pre_link_objects_fallback } else { &opts.pre_link_objects }; for obj in objects.get(&link_output_kind).iter().copied().flatten() { @@ -1271,7 +1267,7 @@ fn add_post_link_objects( link_output_kind: LinkOutputKind, self_contained: bool, ) { - let opts = &sess.target.options; + let opts = &sess.target; let objects = if self_contained { &opts.post_link_objects_fallback } else { &opts.post_link_objects }; for obj in objects.get(&link_output_kind).iter().copied().flatten() { @@ -1282,7 +1278,7 @@ fn add_post_link_objects( /// Add arbitrary "pre-link" args defined by the target spec or from command line. /// FIXME: Determine where exactly these args need to be inserted. fn add_pre_link_args(cmd: &mut dyn Linker, sess: &Session, flavor: LinkerFlavor) { - if let Some(args) = sess.target.options.pre_link_args.get(&flavor) { + if let Some(args) = sess.target.pre_link_args.get(&flavor) { cmd.args(args); } cmd.args(&sess.opts.debugging_opts.pre_link_args); @@ -1290,9 +1286,9 @@ fn add_pre_link_args(cmd: &mut dyn Linker, sess: &Session, flavor: LinkerFlavor) /// Add a link script embedded in the target, if applicable. fn add_link_script(cmd: &mut dyn Linker, sess: &Session, tmpdir: &Path, crate_type: CrateType) { - match (crate_type, &sess.target.options.link_script) { + match (crate_type, &sess.target.link_script) { (CrateType::Cdylib | CrateType::Executable, Some(script)) => { - if !sess.target.options.linker_is_gnu { + if !sess.target.linker_is_gnu { sess.fatal("can only use link script when linking with GNU-like linker"); } @@ -1341,15 +1337,15 @@ fn add_late_link_args( *ty == crate_type && list.iter().any(|&linkage| linkage == Linkage::Dynamic) }); if any_dynamic_crate { - if let Some(args) = sess.target.options.late_link_args_dynamic.get(&flavor) { + if let Some(args) = sess.target.late_link_args_dynamic.get(&flavor) { cmd.args(args); } } else { - if let Some(args) = sess.target.options.late_link_args_static.get(&flavor) { + if let Some(args) = sess.target.late_link_args_static.get(&flavor) { cmd.args(args); } } - if let Some(args) = sess.target.options.late_link_args.get(&flavor) { + if let Some(args) = sess.target.late_link_args.get(&flavor) { cmd.args(args); } } @@ -1357,7 +1353,7 @@ fn add_late_link_args( /// Add arbitrary "post-link" args defined by the target spec. /// FIXME: Determine where exactly these args need to be inserted. fn add_post_link_args(cmd: &mut dyn Linker, sess: &Session, flavor: LinkerFlavor) { - if let Some(args) = sess.target.options.post_link_args.get(&flavor) { + if let Some(args) = sess.target.post_link_args.get(&flavor) { cmd.args(args); } } @@ -1459,7 +1455,7 @@ fn add_library_search_dirs(cmd: &mut dyn Linker, sess: &Session, self_contained: /// Add options making relocation sections in the produced ELF files read-only /// and suppressing lazy binding. fn add_relro_args(cmd: &mut dyn Linker, sess: &Session) { - match sess.opts.debugging_opts.relro_level.unwrap_or(sess.target.options.relro_level) { + match sess.opts.debugging_opts.relro_level.unwrap_or(sess.target.relro_level) { RelroLevel::Full => cmd.full_relro(), RelroLevel::Partial => cmd.partial_relro(), RelroLevel::Off => cmd.no_relro(), @@ -1490,9 +1486,9 @@ fn add_rpath_args( let mut rpath_config = RPathConfig { used_crates: &codegen_results.crate_info.used_crates_dynamic, out_filename: out_filename.to_path_buf(), - has_rpath: sess.target.options.has_rpath, - is_like_osx: sess.target.options.is_like_osx, - linker_is_gnu: sess.target.options.linker_is_gnu, + has_rpath: sess.target.has_rpath, + is_like_osx: sess.target.is_like_osx, + linker_is_gnu: sess.target.linker_is_gnu, get_install_prefix_lib_path: &mut get_install_prefix_lib_path, }; cmd.args(&rpath::get_rpath_flags(&mut rpath_config)); @@ -1520,7 +1516,7 @@ fn linker_with_args<'a, B: ArchiveBuilder<'a>>( let base_cmd = get_linker(sess, path, flavor, crt_objects_fallback); // FIXME: Move `/LIBPATH` addition for uwp targets from the linker construction // to the linker args construction. - assert!(base_cmd.get_args().is_empty() || sess.target.target_vendor == "uwp"); + assert!(base_cmd.get_args().is_empty() || sess.target.vendor == "uwp"); let cmd = &mut *codegen_results.linker_info.to_linker(base_cmd, &sess, flavor, target_cpu); let link_output_kind = link_output_kind(sess, crate_type); @@ -1534,7 +1530,7 @@ fn linker_with_args<'a, B: ArchiveBuilder<'a>>( add_link_script(cmd, sess, tmpdir, crate_type); // NO-OPT-OUT, OBJECT-FILES-NO, AUDIT-ORDER - if sess.target.options.is_like_fuchsia && crate_type == CrateType::Executable { + if sess.target.is_like_fuchsia && crate_type == CrateType::Executable { let prefix = if sess.opts.debugging_opts.sanitizer.contains(SanitizerSet::ADDRESS) { "asan/" } else { @@ -1544,7 +1540,7 @@ fn linker_with_args<'a, B: ArchiveBuilder<'a>>( } // NO-OPT-OUT, OBJECT-FILES-NO, AUDIT-ORDER - if sess.target.options.eh_frame_header { + if sess.target.eh_frame_header { cmd.add_eh_frame_header(); } @@ -1557,7 +1553,7 @@ fn linker_with_args<'a, B: ArchiveBuilder<'a>>( add_pre_link_objects(cmd, sess, link_output_kind, crt_objects_fallback); // NO-OPT-OUT, OBJECT-FILES-NO, AUDIT-ORDER - if sess.target.options.is_like_emscripten { + if sess.target.is_like_emscripten { cmd.arg("-s"); cmd.arg(if sess.panic_strategy() == PanicStrategy::Abort { "DISABLE_EXCEPTION_CATCHING=1" @@ -1585,7 +1581,7 @@ fn linker_with_args<'a, B: ArchiveBuilder<'a>>( cmd.output_filename(out_filename); // OBJECT-FILES-NO, AUDIT-ORDER - if crate_type == CrateType::Executable && sess.target.options.is_like_windows { + if crate_type == CrateType::Executable && sess.target.is_like_windows { if let Some(ref s) = codegen_results.windows_subsystem { cmd.subsystem(s); } @@ -1629,7 +1625,7 @@ fn linker_with_args<'a, B: ArchiveBuilder<'a>>( // OBJECT-FILES-NO, AUDIT-ORDER // We want to prevent the compiler from accidentally leaking in any system libraries, // so by default we tell linkers not to link to any default libraries. - if !sess.opts.cg.default_linker_libraries && sess.target.options.no_default_libraries { + if !sess.opts.cg.default_linker_libraries && sess.target.no_default_libraries { cmd.no_default_libraries(); } @@ -1849,7 +1845,7 @@ fn add_upstream_rust_crates<'a, B: ArchiveBuilder<'a>>( // Converts a library file-stem into a cc -l argument fn unlib<'a>(target: &Target, stem: &'a str) -> &'a str { - if stem.starts_with("lib") && !target.options.is_like_windows { &stem[3..] } else { stem } + if stem.starts_with("lib") && !target.is_like_windows { &stem[3..] } else { stem } } // Adds the static "rlib" versions of all crates to the command line. @@ -1944,7 +1940,7 @@ fn add_upstream_rust_crates<'a, B: ArchiveBuilder<'a>>( // though, so we let that object file slide. let skip_because_lto = are_upstream_rust_objects_already_included(sess) && is_rust_object - && (sess.target.options.no_builtins + && (sess.target.no_builtins || !codegen_results.crate_info.is_no_builtins.contains(&cnum)); if skip_because_cfg_say_so || skip_because_lto { @@ -2088,9 +2084,9 @@ fn are_upstream_rust_objects_already_included(sess: &Session) -> bool { fn add_apple_sdk(cmd: &mut dyn Linker, sess: &Session, flavor: LinkerFlavor) { let arch = &sess.target.arch; - let os = &sess.target.target_os; + let os = &sess.target.os; let llvm_target = &sess.target.llvm_target; - if sess.target.target_vendor != "apple" + if sess.target.vendor != "apple" || !matches!(os.as_str(), "ios" | "tvos") || flavor != LinkerFlavor::Gcc { diff --git a/compiler/rustc_codegen_ssa/src/back/linker.rs b/compiler/rustc_codegen_ssa/src/back/linker.rs index f2317c14332..b3de517685d 100644 --- a/compiler/rustc_codegen_ssa/src/back/linker.rs +++ b/compiler/rustc_codegen_ssa/src/back/linker.rs @@ -185,7 +185,7 @@ impl<'a> GccLinker<'a> { // * On OSX they have their own linker, not binutils' // * For WebAssembly the only functional linker is LLD, which doesn't // support hint flags - !self.sess.target.options.is_like_osx && self.sess.target.arch != "wasm32" + !self.sess.target.is_like_osx && self.sess.target.arch != "wasm32" } // Some platforms take hints about whether a library is static or dynamic. @@ -233,7 +233,7 @@ impl<'a> GccLinker<'a> { fn build_dylib(&mut self, out_filename: &Path) { // On mac we need to tell the linker to let this library be rpathed - if self.sess.target.options.is_like_osx { + if self.sess.target.is_like_osx { self.cmd.arg("-dynamiclib"); self.linker_arg("-dylib"); @@ -249,7 +249,7 @@ impl<'a> GccLinker<'a> { } } else { self.cmd.arg("-shared"); - if self.sess.target.options.is_like_windows { + if self.sess.target.is_like_windows { // The output filename already contains `dll_suffix` so // the resulting import library will have a name in the // form of libfoo.dll.a @@ -257,9 +257,9 @@ impl<'a> GccLinker<'a> { out_filename.file_name().and_then(|file| file.to_str()).map(|file| { format!( "{}{}{}", - self.sess.target.options.staticlib_prefix, + self.sess.target.staticlib_prefix, file, - self.sess.target.options.staticlib_suffix + self.sess.target.staticlib_suffix ) }); if let Some(implib_name) = implib_name { @@ -281,7 +281,7 @@ impl<'a> Linker for GccLinker<'a> { fn set_output_kind(&mut self, output_kind: LinkOutputKind, out_filename: &Path) { match output_kind { LinkOutputKind::DynamicNoPicExe => { - if !self.is_ld && self.sess.target.options.linker_is_gnu { + if !self.is_ld && self.sess.target.linker_is_gnu { self.cmd.arg("-no-pie"); } } @@ -292,7 +292,7 @@ impl<'a> Linker for GccLinker<'a> { LinkOutputKind::StaticNoPicExe => { // `-static` works for both gcc wrapper and ld. self.cmd.arg("-static"); - if !self.is_ld && self.sess.target.options.linker_is_gnu { + if !self.is_ld && self.sess.target.linker_is_gnu { self.cmd.arg("-no-pie"); } } @@ -321,7 +321,7 @@ impl<'a> Linker for GccLinker<'a> { // any `#[link]` attributes in the `libc` crate, see #72782 for details. // FIXME: Switch to using `#[link]` attributes in the `libc` crate // similarly to other targets. - if self.sess.target.target_os == "vxworks" + if self.sess.target.os == "vxworks" && matches!( output_kind, LinkOutputKind::StaticNoPicExe @@ -387,7 +387,7 @@ impl<'a> Linker for GccLinker<'a> { fn link_whole_staticlib(&mut self, lib: Symbol, search_path: &[PathBuf]) { self.hint_static(); let target = &self.sess.target; - if !target.options.is_like_osx { + if !target.is_like_osx { self.linker_arg("--whole-archive").cmd.arg(format!("-l{}", lib)); self.linker_arg("--no-whole-archive"); } else { @@ -401,7 +401,7 @@ impl<'a> Linker for GccLinker<'a> { fn link_whole_rlib(&mut self, lib: &Path) { self.hint_static(); - if self.sess.target.options.is_like_osx { + if self.sess.target.is_like_osx { self.linker_arg("-force_load"); self.linker_arg(&lib); } else { @@ -425,9 +425,9 @@ impl<'a> Linker for GccLinker<'a> { // -dead_strip can't be part of the pre_link_args because it's also used // for partial linking when using multiple codegen units (-r). So we // insert it here. - if self.sess.target.options.is_like_osx { + if self.sess.target.is_like_osx { self.linker_arg("-dead_strip"); - } else if self.sess.target.options.is_like_solaris { + } else if self.sess.target.is_like_solaris { self.linker_arg("-zignore"); // If we're building a dylib, we don't use --gc-sections because LLVM @@ -441,7 +441,7 @@ impl<'a> Linker for GccLinker<'a> { } fn optimize(&mut self) { - if !self.sess.target.options.linker_is_gnu { + if !self.sess.target.linker_is_gnu { return; } @@ -455,7 +455,7 @@ impl<'a> Linker for GccLinker<'a> { } fn pgo_gen(&mut self) { - if !self.sess.target.options.linker_is_gnu { + if !self.sess.target.linker_is_gnu { return; } @@ -504,8 +504,7 @@ impl<'a> Linker for GccLinker<'a> { fn export_symbols(&mut self, tmpdir: &Path, crate_type: CrateType) { // Symbol visibility in object files typically takes care of this. - if crate_type == CrateType::Executable - && self.sess.target.options.override_export_symbols.is_none() + if crate_type == CrateType::Executable && self.sess.target.override_export_symbols.is_none() { return; } @@ -514,7 +513,7 @@ impl<'a> Linker for GccLinker<'a> { // The object files have far more public symbols than we actually want to export, // so we hide them all here. - if !self.sess.target.options.limit_rdylib_exports { + if !self.sess.target.limit_rdylib_exports { return; } @@ -522,13 +521,13 @@ impl<'a> Linker for GccLinker<'a> { return; } - let is_windows = self.sess.target.options.is_like_windows; + let is_windows = self.sess.target.is_like_windows; let mut arg = OsString::new(); let path = tmpdir.join(if is_windows { "list.def" } else { "list" }); debug!("EXPORTED SYMBOLS:"); - if self.sess.target.options.is_like_osx { + if self.sess.target.is_like_osx { // Write a plain, newline-separated list of symbols let res: io::Result<()> = try { let mut f = BufWriter::new(File::create(&path)?); @@ -574,12 +573,12 @@ impl<'a> Linker for GccLinker<'a> { } } - if self.sess.target.options.is_like_osx { + if self.sess.target.is_like_osx { if !self.is_ld { arg.push("-Wl,") } arg.push("-exported_symbols_list,"); - } else if self.sess.target.options.is_like_solaris { + } else if self.sess.target.is_like_solaris { if !self.is_ld { arg.push("-Wl,") } @@ -1220,7 +1219,7 @@ impl<'a> Linker for WasmLd<'a> { } fn exported_symbols(tcx: TyCtxt<'_>, crate_type: CrateType) -> Vec { - if let Some(ref exports) = tcx.sess.target.options.override_export_symbols { + if let Some(ref exports) = tcx.sess.target.override_export_symbols { return exports.clone(); } @@ -1310,7 +1309,7 @@ impl<'a> Linker for PtxLinker<'a> { // Provide the linker with fallback to internal `target-cpu`. self.cmd.arg("--fallback-arch").arg(match self.sess.opts.cg.target_cpu { Some(ref s) => s, - None => &self.sess.target.options.cpu, + None => &self.sess.target.cpu, }); } diff --git a/compiler/rustc_codegen_ssa/src/back/symbol_export.rs b/compiler/rustc_codegen_ssa/src/back/symbol_export.rs index dd8d751d045..9a6f8cde1b2 100644 --- a/compiler/rustc_codegen_ssa/src/back/symbol_export.rs +++ b/compiler/rustc_codegen_ssa/src/back/symbol_export.rs @@ -229,8 +229,7 @@ fn exported_symbols_provider_local( // needs to be exported. // However, on platforms that don't allow for Rust dylibs, having // external linkage is enough for monomorphization to be linked to. - let need_visibility = - tcx.sess.target.options.dynamic_linking && !tcx.sess.target.options.only_cdylib; + let need_visibility = tcx.sess.target.dynamic_linking && !tcx.sess.target.only_cdylib; let (_, cgus) = tcx.collect_and_partition_mono_items(LOCAL_CRATE); diff --git a/compiler/rustc_codegen_ssa/src/back/write.rs b/compiler/rustc_codegen_ssa/src/back/write.rs index 4d2cea18dcc..7f2bb7b5bcd 100644 --- a/compiler/rustc_codegen_ssa/src/back/write.rs +++ b/compiler/rustc_codegen_ssa/src/back/write.rs @@ -139,7 +139,7 @@ impl ModuleConfig { let emit_obj = if !should_emit_obj { EmitObj::None - } else if sess.target.options.obj_is_bitcode + } else if sess.target.obj_is_bitcode || (sess.opts.cg.linker_plugin_lto.enabled() && !no_builtins) { // This case is selected if the target uses objects as bitcode, or @@ -221,11 +221,11 @@ impl ModuleConfig { false ), emit_obj, - bc_cmdline: sess.target.options.bitcode_llvm_cmdline.clone(), + bc_cmdline: sess.target.bitcode_llvm_cmdline.clone(), verify_llvm_ir: sess.verify_llvm_ir(), no_prepopulate_passes: sess.opts.cg.no_prepopulate_passes, - no_builtins: no_builtins || sess.target.options.no_builtins, + no_builtins: no_builtins || sess.target.no_builtins, // Exclude metadata and allocator modules from time_passes output, // since they throw off the "LLVM passes" measurement. @@ -252,7 +252,7 @@ impl ModuleConfig { .opts .debugging_opts .merge_functions - .unwrap_or(sess.target.options.merge_functions) + .unwrap_or(sess.target.merge_functions) { MergeFunctions::Disabled => false, MergeFunctions::Trampolines | MergeFunctions::Aliases => { @@ -307,6 +307,7 @@ pub struct CodegenContext { pub allocator_module_config: Arc, pub tm_factory: TargetMachineFactory, pub msvc_imps_needed: bool, + pub is_pe_coff: bool, pub target_pointer_width: u32, pub target_arch: String, pub debuginfo: config::DebugInfo, @@ -388,7 +389,7 @@ fn need_bitcode_in_object(sess: &Session) -> bool { let requested_for_rlib = sess.opts.cg.embed_bitcode && sess.crate_types().contains(&CrateType::Rlib) && sess.opts.output_types.contains_key(&OutputType::Exe); - let forced_by_target = sess.target.options.forces_embed_bitcode; + let forced_by_target = sess.target.forces_embed_bitcode; requested_for_rlib || forced_by_target } @@ -1022,6 +1023,7 @@ fn start_executing_work( tm_factory: TargetMachineFactory(backend.target_machine_factory(tcx.sess, ol)), total_cgus, msvc_imps_needed: msvc_imps_needed(tcx), + is_pe_coff: tcx.sess.target.is_like_windows, target_pointer_width: tcx.sess.target.pointer_width, target_arch: tcx.sess.target.arch.clone(), debuginfo: tcx.sess.opts.debuginfo, @@ -1865,11 +1867,11 @@ fn msvc_imps_needed(tcx: TyCtxt<'_>) -> bool { // something is wrong with commandline arg validation. assert!( !(tcx.sess.opts.cg.linker_plugin_lto.enabled() - && tcx.sess.target.options.is_like_windows + && tcx.sess.target.is_like_windows && tcx.sess.opts.cg.prefer_dynamic) ); - tcx.sess.target.options.is_like_windows && + tcx.sess.target.is_like_windows && tcx.sess.crate_types().iter().any(|ct| *ct == CrateType::Rlib) && // ThinLTO can't handle this workaround in all cases, so we don't // emit the `__imp_` symbols. Instead we make them unnecessary by disallowing diff --git a/compiler/rustc_codegen_ssa/src/base.rs b/compiler/rustc_codegen_ssa/src/base.rs index afb407b35be..21138f967a2 100644 --- a/compiler/rustc_codegen_ssa/src/base.rs +++ b/compiler/rustc_codegen_ssa/src/base.rs @@ -326,7 +326,7 @@ fn cast_shift_rhs<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>>( /// currently uses SEH-ish unwinding with DWARF info tables to the side (same as /// 64-bit MinGW) instead of "full SEH". pub fn wants_msvc_seh(sess: &Session) -> bool { - sess.target.options.is_like_msvc + sess.target.is_like_msvc } pub fn memcpy_ty<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>>( @@ -387,7 +387,7 @@ pub fn maybe_create_entry_wrapper<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>>( ) -> Bx::Function { // The entry function is either `int main(void)` or `int main(int argc, char **argv)`, // depending on whether the target needs `argc` and `argv` to be passed in. - let llfty = if cx.sess().target.options.main_needs_argc_argv { + let llfty = if cx.sess().target.main_needs_argc_argv { cx.type_func(&[cx.type_int(), cx.type_ptr_to(cx.type_i8p())], cx.type_int()) } else { cx.type_func(&[], cx.type_int()) @@ -399,7 +399,7 @@ pub fn maybe_create_entry_wrapper<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>>( // late-bound regions, since late-bound // regions must appear in the argument // listing. - let main_ret_ty = cx.tcx().erase_regions(&main_ret_ty.no_bound_vars().unwrap()); + let main_ret_ty = cx.tcx().erase_regions(main_ret_ty.no_bound_vars().unwrap()); let llfn = match cx.declare_c_main(llfty) { Some(llfn) => llfn, @@ -459,7 +459,7 @@ fn get_argc_argv<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>>( cx: &'a Bx::CodegenCx, bx: &mut Bx, ) -> (Bx::Value, Bx::Value) { - if cx.sess().target.options.main_needs_argc_argv { + if cx.sess().target.main_needs_argc_argv { // Params from native `main()` used as args for rust start function let param_argc = bx.get_param(0); let param_argv = bx.get_param(1); diff --git a/compiler/rustc_codegen_ssa/src/debuginfo/type_names.rs b/compiler/rustc_codegen_ssa/src/debuginfo/type_names.rs index c4c51d146a6..896af8a9191 100644 --- a/compiler/rustc_codegen_ssa/src/debuginfo/type_names.rs +++ b/compiler/rustc_codegen_ssa/src/debuginfo/type_names.rs @@ -33,7 +33,7 @@ pub fn push_debuginfo_type_name<'tcx>( ) { // When targeting MSVC, emit C++ style type names for compatibility with // .natvis visualizers (and perhaps other existing native debuggers?) - let cpp_like_names = tcx.sess.target.options.is_like_msvc; + let cpp_like_names = tcx.sess.target.is_like_msvc; match *t.kind() { ty::Bool => output.push_str("bool"), @@ -120,8 +120,8 @@ pub fn push_debuginfo_type_name<'tcx>( } ty::Dynamic(ref trait_data, ..) => { if let Some(principal) = trait_data.principal() { - let principal = tcx - .normalize_erasing_late_bound_regions(ty::ParamEnv::reveal_all(), &principal); + let principal = + tcx.normalize_erasing_late_bound_regions(ty::ParamEnv::reveal_all(), principal); push_item_name(tcx, principal.def_id, false, output); push_type_params(tcx, principal.substs, output, visited); } else { @@ -159,7 +159,7 @@ pub fn push_debuginfo_type_name<'tcx>( output.push_str("fn("); - let sig = tcx.normalize_erasing_late_bound_regions(ty::ParamEnv::reveal_all(), &sig); + let sig = tcx.normalize_erasing_late_bound_regions(ty::ParamEnv::reveal_all(), sig); if !sig.inputs().is_empty() { for ¶meter_type in sig.inputs() { push_debuginfo_type_name(tcx, parameter_type, true, output, visited); diff --git a/compiler/rustc_codegen_ssa/src/mir/analyze.rs b/compiler/rustc_codegen_ssa/src/mir/analyze.rs index bdde07d3fa9..44bb0deeae9 100644 --- a/compiler/rustc_codegen_ssa/src/mir/analyze.rs +++ b/compiler/rustc_codegen_ssa/src/mir/analyze.rs @@ -24,7 +24,7 @@ pub fn non_ssa_locals<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>>( analyzer.visit_body(&mir); for (local, decl) in mir.local_decls.iter_enumerated() { - let ty = fx.monomorphize(&decl.ty); + let ty = fx.monomorphize(decl.ty); debug!("local {:?} has type `{}`", local, ty); let layout = fx.cx.spanned_layout_of(ty, decl.source_info.span); if fx.cx.is_backend_immediate(layout) { @@ -121,10 +121,10 @@ impl> LocalAnalyzer<'mir, 'a, 'tcx, Bx> { if is_consume { let base_ty = mir::Place::ty_from(place_ref.local, proj_base, self.fx.mir, cx.tcx()); - let base_ty = self.fx.monomorphize(&base_ty); + let base_ty = self.fx.monomorphize(base_ty); // ZSTs don't require any actual memory access. - let elem_ty = base_ty.projection_ty(cx.tcx(), self.fx.monomorphize(&elem)).ty; + let elem_ty = base_ty.projection_ty(cx.tcx(), self.fx.monomorphize(elem)).ty; let span = self.fx.mir.local_decls[place_ref.local].source_info.span; if cx.spanned_layout_of(elem_ty, span).is_zst() { return; @@ -313,7 +313,7 @@ impl<'mir, 'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>> Visitor<'tcx> PlaceContext::MutatingUse(MutatingUseContext::Drop) => { let ty = self.fx.mir.local_decls[local].ty; - let ty = self.fx.monomorphize(&ty); + let ty = self.fx.monomorphize(ty); // Only need the place if we're actually dropping it. if self.fx.cx.type_needs_drop(ty) { diff --git a/compiler/rustc_codegen_ssa/src/mir/block.rs b/compiler/rustc_codegen_ssa/src/mir/block.rs index 6507c1a5400..341e5fa905a 100644 --- a/compiler/rustc_codegen_ssa/src/mir/block.rs +++ b/compiler/rustc_codegen_ssa/src/mir/block.rs @@ -266,7 +266,7 @@ impl<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> { return; } let llval = match self.fn_abi.ret.mode { - PassMode::Ignore | PassMode::Indirect(..) => { + PassMode::Ignore | PassMode::Indirect { .. } => { bx.ret_void(); return; } @@ -317,7 +317,7 @@ impl<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> { unwind: Option, ) { let ty = location.ty(self.mir, bx.tcx()).ty; - let ty = self.monomorphize(&ty); + let ty = self.monomorphize(ty); let drop_fn = Instance::resolve_drop_in_place(bx.tcx(), ty); if let ty::InstanceDef::DropGlue(_, None) = drop_fn.def { @@ -641,7 +641,7 @@ impl<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> { .iter() .map(|op_arg| { let op_ty = op_arg.ty(self.mir, bx.tcx()); - self.monomorphize(&op_ty) + self.monomorphize(op_ty) }) .collect::>(); @@ -965,7 +965,7 @@ impl<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> { } } mir::InlineAsmOperand::SymFn { ref value } => { - let literal = self.monomorphize(&value.literal); + let literal = self.monomorphize(value.literal); if let ty::FnDef(def_id, substs) = *literal.ty.kind() { let instance = ty::Instance::resolve_for_fn_ptr( bx.tcx(), @@ -1192,7 +1192,7 @@ impl<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> { // Force by-ref if we have to load through a cast pointer. let (mut llval, align, by_ref) = match op.val { Immediate(_) | Pair(..) => match arg.mode { - PassMode::Indirect(..) | PassMode::Cast(_) => { + PassMode::Indirect { .. } | PassMode::Cast(_) => { let scratch = PlaceRef::alloca(bx, arg.layout); op.val.store(bx, scratch); (scratch.llval, scratch.align, true) diff --git a/compiler/rustc_codegen_ssa/src/mir/constant.rs b/compiler/rustc_codegen_ssa/src/mir/constant.rs index 4943e279c7e..c8001b8daf0 100644 --- a/compiler/rustc_codegen_ssa/src/mir/constant.rs +++ b/compiler/rustc_codegen_ssa/src/mir/constant.rs @@ -16,7 +16,7 @@ impl<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> { constant: &mir::Constant<'tcx>, ) -> Result, ErrorHandled> { let val = self.eval_mir_constant(constant)?; - let ty = self.monomorphize(&constant.literal.ty); + let ty = self.monomorphize(constant.literal.ty); Ok(OperandRef::from_const(bx, val, ty)) } @@ -24,7 +24,7 @@ impl<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> { &mut self, constant: &mir::Constant<'tcx>, ) -> Result, ErrorHandled> { - match self.monomorphize(&constant.literal).val { + match self.monomorphize(constant.literal).val { ty::ConstKind::Unevaluated(def, substs, promoted) => self .cx .tcx() @@ -83,7 +83,7 @@ impl<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> { .unwrap_or_else(|_| { bx.tcx().sess.span_err(span, "could not evaluate shuffle_indices at compile time"); // We've errored, so we don't have to produce working code. - let ty = self.monomorphize(&ty); + let ty = self.monomorphize(ty); let llty = bx.backend_type(bx.layout_of(ty)); (bx.const_undef(llty), ty) }) diff --git a/compiler/rustc_codegen_ssa/src/mir/debuginfo.rs b/compiler/rustc_codegen_ssa/src/mir/debuginfo.rs index 45296df0ee6..192c5a7101d 100644 --- a/compiler/rustc_codegen_ssa/src/mir/debuginfo.rs +++ b/compiler/rustc_codegen_ssa/src/mir/debuginfo.rs @@ -160,7 +160,7 @@ impl<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> { // FIXME(eddyb) is this `+ 1` needed at all? let kind = VariableKind::ArgumentVariable(arg_index + 1); - let arg_ty = self.monomorphize(&decl.ty); + let arg_ty = self.monomorphize(decl.ty); self.cx.create_dbg_var(name, arg_ty, dbg_scope, kind, span) }, diff --git a/compiler/rustc_codegen_ssa/src/mir/intrinsic.rs b/compiler/rustc_codegen_ssa/src/mir/intrinsic.rs index 2bf1ee43c73..94340e92048 100644 --- a/compiler/rustc_codegen_ssa/src/mir/intrinsic.rs +++ b/compiler/rustc_codegen_ssa/src/mir/intrinsic.rs @@ -64,7 +64,7 @@ impl<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> { }; let sig = callee_ty.fn_sig(bx.tcx()); - let sig = bx.tcx().normalize_erasing_late_bound_regions(ty::ParamEnv::reveal_all(), &sig); + let sig = bx.tcx().normalize_erasing_late_bound_regions(ty::ParamEnv::reveal_all(), sig); let arg_tys = sig.inputs(); let ret_ty = sig.output(); let name = bx.tcx().item_name(def_id); diff --git a/compiler/rustc_codegen_ssa/src/mir/mod.rs b/compiler/rustc_codegen_ssa/src/mir/mod.rs index 5a4eb250ac6..1265188fcce 100644 --- a/compiler/rustc_codegen_ssa/src/mir/mod.rs +++ b/compiler/rustc_codegen_ssa/src/mir/mod.rs @@ -90,7 +90,7 @@ pub struct FunctionCx<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>> { } impl<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> { - pub fn monomorphize(&self, value: &T) -> T + pub fn monomorphize(&self, value: T) -> T where T: Copy + TypeFoldable<'tcx>, { @@ -223,7 +223,7 @@ pub fn codegen_mir<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>>( let mut allocate_local = |local| { let decl = &mir.local_decls[local]; - let layout = bx.layout_of(fx.monomorphize(&decl.ty)); + let layout = bx.layout_of(fx.monomorphize(decl.ty)); assert!(!layout.ty.has_erasable_regions()); if local == mir::RETURN_PLACE && fx.fn_abi.ret.is_indirect() { @@ -389,7 +389,7 @@ fn arg_local_refs<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>>( // to reconstruct it into a tuple local variable, from multiple // individual LLVM function arguments. - let arg_ty = fx.monomorphize(&arg_decl.ty); + let arg_ty = fx.monomorphize(arg_decl.ty); let tupled_arg_tys = match arg_ty.kind() { ty::Tuple(tys) => tys, _ => bug!("spread argument isn't a tuple?!"), @@ -410,7 +410,7 @@ fn arg_local_refs<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>>( } if fx.fn_abi.c_variadic && arg_index == fx.fn_abi.args.len() { - let arg_ty = fx.monomorphize(&arg_decl.ty); + let arg_ty = fx.monomorphize(arg_decl.ty); let va_list = PlaceRef::alloca(bx, bx.layout_of(arg_ty)); bx.va_start(va_list.llval); diff --git a/compiler/rustc_codegen_ssa/src/mir/operand.rs b/compiler/rustc_codegen_ssa/src/mir/operand.rs index bbd004be875..08a4ae3962b 100644 --- a/compiler/rustc_codegen_ssa/src/mir/operand.rs +++ b/compiler/rustc_codegen_ssa/src/mir/operand.rs @@ -452,7 +452,7 @@ impl<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> { bx.abort(); // We still have to return an operand but it doesn't matter, // this code is unreachable. - let ty = self.monomorphize(&constant.literal.ty); + let ty = self.monomorphize(constant.literal.ty); let layout = bx.cx().layout_of(ty); bx.load_operand(PlaceRef::new_sized( bx.cx().const_undef(bx.cx().type_ptr_to(bx.cx().backend_type(layout))), diff --git a/compiler/rustc_codegen_ssa/src/mir/place.rs b/compiler/rustc_codegen_ssa/src/mir/place.rs index e1cc0268723..e4f4c884470 100644 --- a/compiler/rustc_codegen_ssa/src/mir/place.rs +++ b/compiler/rustc_codegen_ssa/src/mir/place.rs @@ -485,7 +485,7 @@ impl<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> { cg_base.project_index(bx, bx.cx().const_usize(from as u64)); let projected_ty = PlaceTy::from_ty(cg_base.layout.ty).projection_ty(tcx, elem).ty; - subslice.layout = bx.cx().layout_of(self.monomorphize(&projected_ty)); + subslice.layout = bx.cx().layout_of(self.monomorphize(projected_ty)); if subslice.layout.is_unsized() { assert!(from_end, "slice subslices should be `from_end`"); @@ -515,6 +515,6 @@ impl<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> { pub fn monomorphized_place_ty(&self, place_ref: mir::PlaceRef<'tcx>) -> Ty<'tcx> { let tcx = self.cx.tcx(); let place_ty = mir::Place::ty_from(place_ref.local, place_ref.projection, self.mir, tcx); - self.monomorphize(&place_ty.ty) + self.monomorphize(place_ty.ty) } } diff --git a/compiler/rustc_codegen_ssa/src/mir/rvalue.rs b/compiler/rustc_codegen_ssa/src/mir/rvalue.rs index 7ce110dcbfc..2ad470c2693 100644 --- a/compiler/rustc_codegen_ssa/src/mir/rvalue.rs +++ b/compiler/rustc_codegen_ssa/src/mir/rvalue.rs @@ -106,7 +106,7 @@ impl<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> { } let count = - self.monomorphize(&count).eval_usize(bx.cx().tcx(), ty::ParamEnv::reveal_all()); + self.monomorphize(count).eval_usize(bx.cx().tcx(), ty::ParamEnv::reveal_all()); bx.write_operand_repeatedly(cg_elem, count, dest) } @@ -181,7 +181,7 @@ impl<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> { mir::Rvalue::Cast(ref kind, ref source, mir_cast_ty) => { let operand = self.codegen_operand(&mut bx, source); debug!("cast operand is {:?}", operand); - let cast = bx.cx().layout_of(self.monomorphize(&mir_cast_ty)); + let cast = bx.cx().layout_of(self.monomorphize(mir_cast_ty)); let val = match *kind { mir::CastKind::Pointer(PointerCast::ReifyFnPointer) => { @@ -502,6 +502,7 @@ impl<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> { } mir::Rvalue::NullaryOp(mir::NullOp::SizeOf, ty) => { + let ty = self.monomorphize(ty); assert!(bx.cx().type_is_sized(ty)); let val = bx.cx().const_usize(bx.cx().layout_of(ty).size.bytes()); let tcx = self.cx.tcx(); @@ -515,7 +516,7 @@ impl<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> { } mir::Rvalue::NullaryOp(mir::NullOp::Box, content_ty) => { - let content_ty = self.monomorphize(&content_ty); + let content_ty = self.monomorphize(content_ty); let content_layout = bx.cx().layout_of(content_ty); let llsize = bx.cx().const_usize(content_layout.size.bytes()); let llalign = bx.cx().const_usize(content_layout.align.abi.bytes()); @@ -553,7 +554,7 @@ impl<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> { // aggregate rvalues are allowed to be operands. let ty = rvalue.ty(self.mir, self.cx.tcx()); let operand = - OperandRef::new_zst(&mut bx, self.cx.layout_of(self.monomorphize(&ty))); + OperandRef::new_zst(&mut bx, self.cx.layout_of(self.monomorphize(ty))); (bx, operand) } } @@ -773,7 +774,7 @@ impl<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> { mir::Rvalue::Repeat(..) | mir::Rvalue::Aggregate(..) => { let ty = rvalue.ty(self.mir, self.cx.tcx()); - let ty = self.monomorphize(&ty); + let ty = self.monomorphize(ty); self.cx.spanned_layout_of(ty, span).is_zst() } } diff --git a/compiler/rustc_codegen_ssa/src/sir.rs b/compiler/rustc_codegen_ssa/src/sir.rs index ff9b9675e5d..1eeec3d1428 100644 --- a/compiler/rustc_codegen_ssa/src/sir.rs +++ b/compiler/rustc_codegen_ssa/src/sir.rs @@ -401,7 +401,7 @@ impl SirFuncCx<'tcx> { self.instance.subst_mir_and_normalize_erasing_regions( self.tcx, ty::ParamEnv::reveal_all(), - value, + *value, ) } diff --git a/compiler/rustc_codegen_ssa/src/target_features.rs b/compiler/rustc_codegen_ssa/src/target_features.rs index 000ddf42604..fd18f42f2dd 100644 --- a/compiler/rustc_codegen_ssa/src/target_features.rs +++ b/compiler/rustc_codegen_ssa/src/target_features.rs @@ -4,6 +4,11 @@ use rustc_session::Session; use rustc_span::symbol::sym; use rustc_span::symbol::Symbol; +// When adding features to the below lists +// check whether they're named already elsewhere in rust +// e.g. in stdarch and whether the given name matches LLVM's +// if it doesn't, to_llvm_feature in llvm_util in rustc_codegen_llvm needs to be adapted + const ARM_ALLOWED_FEATURES: &[(&str, Option)] = &[ ("aclass", Some(sym::arm_target_feature)), ("mclass", Some(sym::arm_target_feature)), @@ -50,15 +55,23 @@ const X86_ALLOWED_FEATURES: &[(&str, Option)] = &[ ("aes", None), ("avx", None), ("avx2", None), + ("avx512bf16", Some(sym::avx512_target_feature)), + ("avx512bitalg", Some(sym::avx512_target_feature)), ("avx512bw", Some(sym::avx512_target_feature)), ("avx512cd", Some(sym::avx512_target_feature)), ("avx512dq", Some(sym::avx512_target_feature)), ("avx512er", Some(sym::avx512_target_feature)), ("avx512f", Some(sym::avx512_target_feature)), + ("avx512gfni", Some(sym::avx512_target_feature)), ("avx512ifma", Some(sym::avx512_target_feature)), ("avx512pf", Some(sym::avx512_target_feature)), + ("avx512vaes", Some(sym::avx512_target_feature)), ("avx512vbmi", Some(sym::avx512_target_feature)), + ("avx512vbmi2", Some(sym::avx512_target_feature)), ("avx512vl", Some(sym::avx512_target_feature)), + ("avx512vnni", Some(sym::avx512_target_feature)), + ("avx512vp2intersect", Some(sym::avx512_target_feature)), + ("avx512vpclmulqdq", Some(sym::avx512_target_feature)), ("avx512vpopcntdq", Some(sym::avx512_target_feature)), ("bmi1", None), ("bmi2", None), diff --git a/compiler/rustc_codegen_ssa/src/traits/type_.rs b/compiler/rustc_codegen_ssa/src/traits/type_.rs index 43bc0c83155..634a20bda9b 100644 --- a/compiler/rustc_codegen_ssa/src/traits/type_.rs +++ b/compiler/rustc_codegen_ssa/src/traits/type_.rs @@ -51,11 +51,11 @@ pub trait DerivedTypeMethods<'tcx>: BaseTypeMethods<'tcx> + MiscMethods<'tcx> { } fn type_int(&self) -> Self::Type { - match &self.sess().target.target_c_int_width[..] { + match &self.sess().target.c_int_width[..] { "16" => self.type_i16(), "32" => self.type_i32(), "64" => self.type_i64(), - width => bug!("Unsupported target_c_int_width: {}", width), + width => bug!("Unsupported c_int_width: {}", width), } } diff --git a/compiler/rustc_data_structures/src/fingerprint.rs b/compiler/rustc_data_structures/src/fingerprint.rs index ec2f9597b18..01efcaf6f44 100644 --- a/compiler/rustc_data_structures/src/fingerprint.rs +++ b/compiler/rustc_data_structures/src/fingerprint.rs @@ -151,8 +151,67 @@ impl FingerprintDecoder for D { panic!("Cannot decode `Fingerprint` with `{}`", std::any::type_name::()); } } + impl FingerprintDecoder for opaque::Decoder<'_> { fn decode_fingerprint(&mut self) -> Result { Fingerprint::decode_opaque(self) } } + +// `PackedFingerprint` wraps a `Fingerprint`. Its purpose is to, on certain +// architectures, behave like a `Fingerprint` without alignment requirements. +// This behavior is only enabled on x86 and x86_64, where the impact of +// unaligned accesses is tolerable in small doses. +// +// This may be preferable to use in large collections of structs containing +// fingerprints, as it can reduce memory consumption by preventing the padding +// that the more strictly-aligned `Fingerprint` can introduce. An application of +// this is in the query dependency graph, which contains a large collection of +// `DepNode`s. As of this writing, the size of a `DepNode` decreases by ~30% +// (from 24 bytes to 17) by using the packed representation here, which +// noticeably decreases total memory usage when compiling large crates. +// +// The wrapped `Fingerprint` is private to reduce the chance of a client +// invoking undefined behavior by taking a reference to the packed field. +#[cfg_attr(any(target_arch = "x86", target_arch = "x86_64"), repr(packed))] +#[derive(Eq, PartialEq, Ord, PartialOrd, Debug, Clone, Copy, Hash)] +pub struct PackedFingerprint(Fingerprint); + +impl std::fmt::Display for PackedFingerprint { + #[inline] + fn fmt(&self, formatter: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { + // Copy to avoid taking reference to packed field. + let copy = self.0; + copy.fmt(formatter) + } +} + +impl Encodable for PackedFingerprint { + #[inline] + fn encode(&self, s: &mut E) -> Result<(), E::Error> { + // Copy to avoid taking reference to packed field. + let copy = self.0; + copy.encode(s) + } +} + +impl Decodable for PackedFingerprint { + #[inline] + fn decode(d: &mut D) -> Result { + Fingerprint::decode(d).map(|f| PackedFingerprint(f)) + } +} + +impl From for PackedFingerprint { + #[inline] + fn from(f: Fingerprint) -> PackedFingerprint { + PackedFingerprint(f) + } +} + +impl From for Fingerprint { + #[inline] + fn from(f: PackedFingerprint) -> Fingerprint { + f.0 + } +} diff --git a/compiler/rustc_data_structures/src/functor.rs b/compiler/rustc_data_structures/src/functor.rs new file mode 100644 index 00000000000..fe7a256d210 --- /dev/null +++ b/compiler/rustc_data_structures/src/functor.rs @@ -0,0 +1,82 @@ +use rustc_index::vec::{Idx, IndexVec}; +use std::mem; +use std::ptr; + +pub trait IdFunctor { + type Inner; + + fn map_id(self, f: F) -> Self + where + F: FnMut(Self::Inner) -> Self::Inner; +} + +impl IdFunctor for Box { + type Inner = T; + + #[inline] + fn map_id(self, mut f: F) -> Self + where + F: FnMut(Self::Inner) -> Self::Inner, + { + let raw = Box::into_raw(self); + unsafe { + // SAFETY: The raw pointer points to a valid value of type `T`. + let value = ptr::read(raw); + // SAFETY: Converts `Box` to `Box>` which is the + // inverse of `Box::assume_init()` and should be safe. + let mut raw: Box> = Box::from_raw(raw.cast()); + // SAFETY: Write the mapped value back into the `Box`. + ptr::write(raw.as_mut_ptr(), f(value)); + // SAFETY: We just initialized `raw`. + raw.assume_init() + } + } +} + +impl IdFunctor for Vec { + type Inner = T; + + #[inline] + fn map_id(mut self, mut f: F) -> Self + where + F: FnMut(Self::Inner) -> Self::Inner, + { + // FIXME: We don't really care about panics here and leak + // far more than we should, but that should be fine for now. + let len = self.len(); + unsafe { + self.set_len(0); + let start = self.as_mut_ptr(); + for i in 0..len { + let p = start.add(i); + ptr::write(p, f(ptr::read(p))); + } + self.set_len(len); + } + self + } +} + +impl IdFunctor for Box<[T]> { + type Inner = T; + + #[inline] + fn map_id(self, f: F) -> Self + where + F: FnMut(Self::Inner) -> Self::Inner, + { + Vec::from(self).map_id(f).into() + } +} + +impl IdFunctor for IndexVec { + type Inner = T; + + #[inline] + fn map_id(self, f: F) -> Self + where + F: FnMut(Self::Inner) -> Self::Inner, + { + IndexVec::from_raw(self.raw.map_id(f)) + } +} diff --git a/compiler/rustc_data_structures/src/graph/scc/mod.rs b/compiler/rustc_data_structures/src/graph/scc/mod.rs index 486a9ba77b7..5b3d8233f3d 100644 --- a/compiler/rustc_data_structures/src/graph/scc/mod.rs +++ b/compiler/rustc_data_structures/src/graph/scc/mod.rs @@ -231,20 +231,30 @@ where let scc_indices = (0..num_nodes) .map(G::Node::new) - .map(|node| match this.walk_node(0, node) { + .map(|node| match this.start_walk_from(node) { WalkReturn::Complete { scc_index } => scc_index, - WalkReturn::Cycle { min_depth } => { - panic!("`walk_node(0, {:?})` returned cycle with depth {:?}", node, min_depth) - } + WalkReturn::Cycle { min_depth } => panic!( + "`start_walk_node({:?})` returned cycle with depth {:?}", + node, min_depth + ), }) .collect(); Sccs { scc_indices, scc_data: this.scc_data } } - /// Visits a node during the DFS. We first examine its current - /// state -- if it is not yet visited (`NotVisited`), we can push - /// it onto the stack and start walking its successors. + fn start_walk_from(&mut self, node: G::Node) -> WalkReturn { + if let Some(result) = self.inspect_node(node) { + result + } else { + self.walk_unvisited_node(node) + } + } + + /// Inspect a node during the DFS. We first examine its current + /// state -- if it is not yet visited (`NotVisited`), return `None` so + /// that the caller might push it onto the stack and start walking its + /// successors. /// /// If it is already on the DFS stack it will be in the state /// `BeingVisited`. In that case, we have found a cycle and we @@ -253,20 +263,19 @@ where /// Otherwise, we are looking at a node that has already been /// completely visited. We therefore return `WalkReturn::Complete` /// with its associated SCC index. - fn walk_node(&mut self, depth: usize, node: G::Node) -> WalkReturn { - debug!("walk_node(depth = {:?}, node = {:?})", depth, node); - match self.find_state(node) { + fn inspect_node(&mut self, node: G::Node) -> Option> { + Some(match self.find_state(node) { NodeState::InCycle { scc_index } => WalkReturn::Complete { scc_index }, NodeState::BeingVisited { depth: min_depth } => WalkReturn::Cycle { min_depth }, - NodeState::NotVisited => self.walk_unvisited_node(depth, node), + NodeState::NotVisited => return None, NodeState::InCycleWith { parent } => panic!( "`find_state` returned `InCycleWith({:?})`, which ought to be impossible", parent ), - } + }) } /// Fetches the state of the node `r`. If `r` is recorded as being @@ -274,104 +283,292 @@ where /// of `r2` (and updates `r` to reflect current result). This is /// basically the "find" part of a standard union-find algorithm /// (with path compression). - fn find_state(&mut self, r: G::Node) -> NodeState { - debug!("find_state(r = {:?} in state {:?})", r, self.node_states[r]); - match self.node_states[r] { - NodeState::InCycle { scc_index } => NodeState::InCycle { scc_index }, - NodeState::BeingVisited { depth } => NodeState::BeingVisited { depth }, - NodeState::NotVisited => NodeState::NotVisited, - NodeState::InCycleWith { parent } => { - let parent_state = self.find_state(parent); - debug!("find_state: parent_state = {:?}", parent_state); - match parent_state { - NodeState::InCycle { .. } => { - self.node_states[r] = parent_state; - parent_state - } + fn find_state(&mut self, mut node: G::Node) -> NodeState { + // To avoid recursion we temporarily reuse the `parent` of each + // InCycleWith link to encode a downwards link while compressing + // the path. After we have found the root or deepest node being + // visited, we traverse the reverse links and correct the node + // states on the way. + // + // **Note**: This mutation requires that this is a leaf function + // or at least that none of the called functions inspects the + // current node states. Luckily, we are a leaf. + + // Remember one previous link. The termination condition when + // following links downwards is then simply as soon as we have + // found the initial self-loop. + let mut previous_node = node; + + // Ultimately assigned by the parent when following + // `InCycleWith` upwards. + let node_state = loop { + debug!("find_state(r = {:?} in state {:?})", node, self.node_states[node]); + match self.node_states[node] { + NodeState::InCycle { scc_index } => break NodeState::InCycle { scc_index }, + NodeState::BeingVisited { depth } => break NodeState::BeingVisited { depth }, + NodeState::NotVisited => break NodeState::NotVisited, + NodeState::InCycleWith { parent } => { + // We test this, to be extremely sure that we never + // ever break our termination condition for the + // reverse iteration loop. + assert!(node != parent, "Node can not be in cycle with itself"); + // Store the previous node as an inverted list link + self.node_states[node] = NodeState::InCycleWith { parent: previous_node }; + // Update to parent node. + previous_node = node; + node = parent; + } + } + }; - NodeState::BeingVisited { depth } => { - self.node_states[r] = - NodeState::InCycleWith { parent: self.node_stack[depth] }; - parent_state - } + // The states form a graph where up to one outgoing link is stored at + // each node. Initially in general, + // + // E + // ^ + // | + // InCycleWith/BeingVisited/NotVisited + // | + // A-InCycleWith->B-InCycleWith…>C-InCycleWith->D-+ + // | + // = node, previous_node + // + // After the first loop, this will look like + // E + // ^ + // | + // InCycleWith/BeingVisited/NotVisited + // | + // +>A<-InCycleWith-B<…InCycleWith-C<-InCycleWith-D-+ + // | | | | + // | InCycleWith | = node + // +-+ =previous_node + // + // Note in particular that A will be linked to itself in a self-cycle + // and no other self-cycles occur due to how InCycleWith is assigned in + // the find phase implemented by `walk_unvisited_node`. + // + // We now want to compress the path, that is assign the state of the + // link D-E to all other links. + // + // We can then walk backwards, starting from `previous_node`, and assign + // each node in the list with the updated state. The loop terminates + // when we reach the self-cycle. + + // Move backwards until we found the node where we started. We + // will know when we hit the state where previous_node == node. + loop { + // Back at the beginning, we can return. + if previous_node == node { + return node_state; + } + // Update to previous node in the link. + match self.node_states[previous_node] { + NodeState::InCycleWith { parent: previous } => { + node = previous_node; + previous_node = previous; + } + // Only InCycleWith nodes were added to the reverse linked list. + other => panic!("Invalid previous link while compressing cycle: {:?}", other), + } - NodeState::NotVisited | NodeState::InCycleWith { .. } => { - panic!("invalid parent state: {:?}", parent_state) - } + debug!("find_state: parent_state = {:?}", node_state); + + // Update the node state from the parent state. The assigned + // state is actually a loop invariant but it will only be + // evaluated if there is at least one backlink to follow. + // Fully trusting llvm here to find this loop optimization. + match node_state { + // Path compression, make current node point to the same root. + NodeState::InCycle { .. } => { + self.node_states[node] = node_state; + } + // Still visiting nodes, compress to cycle to the node + // at that depth. + NodeState::BeingVisited { depth } => { + self.node_states[node] = + NodeState::InCycleWith { parent: self.node_stack[depth] }; + } + // These are never allowed as parent nodes. InCycleWith + // should have been followed to a real parent and + // NotVisited can not be part of a cycle since it should + // have instead gotten explored. + NodeState::NotVisited | NodeState::InCycleWith { .. } => { + panic!("invalid parent state: {:?}", node_state) } } } } /// Walks a node that has never been visited before. - fn walk_unvisited_node(&mut self, depth: usize, node: G::Node) -> WalkReturn { - debug!("walk_unvisited_node(depth = {:?}, node = {:?})", depth, node); - - debug_assert!(matches!(self.node_states[node], NodeState::NotVisited)); - - // Push `node` onto the stack. - self.node_states[node] = NodeState::BeingVisited { depth }; - self.node_stack.push(node); - - // Walk each successor of the node, looking to see if any of - // them can reach a node that is presently on the stack. If - // so, that means they can also reach us. - let mut min_depth = depth; - let mut min_cycle_root = node; - let successors_len = self.successors_stack.len(); - for successor_node in self.graph.successors(node) { - debug!("walk_unvisited_node: node = {:?} successor_ode = {:?}", node, successor_node); - match self.walk_node(depth + 1, successor_node) { - WalkReturn::Cycle { min_depth: successor_min_depth } => { - // Track the minimum depth we can reach. - assert!(successor_min_depth <= depth); - if successor_min_depth < min_depth { + /// + /// Call this method when `inspect_node` has returned `None`. Having the + /// caller decide avoids mutual recursion between the two methods and allows + /// us to maintain an allocated stack for nodes on the path between calls. + fn walk_unvisited_node(&mut self, initial: G::Node) -> WalkReturn { + struct VisitingNodeFrame { + node: G::Node, + iter: Option, + depth: usize, + min_depth: usize, + successors_len: usize, + min_cycle_root: G::Node, + successor_node: G::Node, + } + + // Move the stack to a local variable. We want to utilize the existing allocation and + // mutably borrow it without borrowing self at the same time. + let mut successors_stack = core::mem::take(&mut self.successors_stack); + debug_assert_eq!(successors_stack.len(), 0); + + let mut stack: Vec> = vec![VisitingNodeFrame { + node: initial, + depth: 0, + min_depth: 0, + iter: None, + successors_len: 0, + min_cycle_root: initial, + successor_node: initial, + }]; + + let mut return_value = None; + + 'recurse: while let Some(frame) = stack.last_mut() { + let VisitingNodeFrame { + node, + depth, + iter, + successors_len, + min_depth, + min_cycle_root, + successor_node, + } = frame; + + let node = *node; + let depth = *depth; + + let successors = match iter { + Some(iter) => iter, + None => { + // This None marks that we still have the initialize this node's frame. + debug!("walk_unvisited_node(depth = {:?}, node = {:?})", depth, node); + + debug_assert!(matches!(self.node_states[node], NodeState::NotVisited)); + + // Push `node` onto the stack. + self.node_states[node] = NodeState::BeingVisited { depth }; + self.node_stack.push(node); + + // Walk each successor of the node, looking to see if any of + // them can reach a node that is presently on the stack. If + // so, that means they can also reach us. + *successors_len = successors_stack.len(); + // Set and return a reference, this is currently empty. + iter.get_or_insert(self.graph.successors(node)) + } + }; + + // Now that iter is initialized, this is a constant for this frame. + let successors_len = *successors_len; + + // Construct iterators for the nodes and walk results. There are two cases: + // * The walk of a successor node returned. + // * The remaining successor nodes. + let returned_walk = + return_value.take().into_iter().map(|walk| (*successor_node, Some(walk))); + + let successor_walk = successors.by_ref().map(|successor_node| { + debug!( + "walk_unvisited_node: node = {:?} successor_ode = {:?}", + node, successor_node + ); + (successor_node, self.inspect_node(successor_node)) + }); + + for (successor_node, walk) in returned_walk.chain(successor_walk) { + match walk { + Some(WalkReturn::Cycle { min_depth: successor_min_depth }) => { + // Track the minimum depth we can reach. + assert!(successor_min_depth <= depth); + if successor_min_depth < *min_depth { + debug!( + "walk_unvisited_node: node = {:?} successor_min_depth = {:?}", + node, successor_min_depth + ); + *min_depth = successor_min_depth; + *min_cycle_root = successor_node; + } + } + + Some(WalkReturn::Complete { scc_index: successor_scc_index }) => { + // Push the completed SCC indices onto + // the `successors_stack` for later. debug!( - "walk_unvisited_node: node = {:?} successor_min_depth = {:?}", - node, successor_min_depth + "walk_unvisited_node: node = {:?} successor_scc_index = {:?}", + node, successor_scc_index ); - min_depth = successor_min_depth; - min_cycle_root = successor_node; + successors_stack.push(successor_scc_index); } - } - WalkReturn::Complete { scc_index: successor_scc_index } => { - // Push the completed SCC indices onto - // the `successors_stack` for later. - debug!( - "walk_unvisited_node: node = {:?} successor_scc_index = {:?}", - node, successor_scc_index - ); - self.successors_stack.push(successor_scc_index); + None => { + let depth = depth + 1; + debug!("walk_node(depth = {:?}, node = {:?})", depth, successor_node); + // Remember which node the return value will come from. + frame.successor_node = successor_node; + // Start a new stack frame the step into it. + stack.push(VisitingNodeFrame { + node: successor_node, + depth, + iter: None, + successors_len: 0, + min_depth: depth, + min_cycle_root: successor_node, + successor_node: successor_node, + }); + continue 'recurse; + } } } - } - // Completed walk, remove `node` from the stack. - let r = self.node_stack.pop(); - debug_assert_eq!(r, Some(node)); - - // If `min_depth == depth`, then we are the root of the - // cycle: we can't reach anyone further down the stack. - if min_depth == depth { - // Note that successor stack may have duplicates, so we - // want to remove those: - let deduplicated_successors = { - let duplicate_set = &mut self.duplicate_set; - duplicate_set.clear(); - self.successors_stack - .drain(successors_len..) - .filter(move |&i| duplicate_set.insert(i)) - }; - let scc_index = self.scc_data.create_scc(deduplicated_successors); - self.node_states[node] = NodeState::InCycle { scc_index }; - WalkReturn::Complete { scc_index } - } else { - // We are not the head of the cycle. Return back to our - // caller. They will take ownership of the - // `self.successors` data that we pushed. - self.node_states[node] = NodeState::InCycleWith { parent: min_cycle_root }; - WalkReturn::Cycle { min_depth } + // Completed walk, remove `node` from the stack. + let r = self.node_stack.pop(); + debug_assert_eq!(r, Some(node)); + + // Remove the frame, it's done. + let frame = stack.pop().unwrap(); + + // If `min_depth == depth`, then we are the root of the + // cycle: we can't reach anyone further down the stack. + + // Pass the 'return value' down the stack. + // We return one frame at a time so there can't be another return value. + debug_assert!(return_value.is_none()); + return_value = Some(if frame.min_depth == depth { + // Note that successor stack may have duplicates, so we + // want to remove those: + let deduplicated_successors = { + let duplicate_set = &mut self.duplicate_set; + duplicate_set.clear(); + successors_stack + .drain(successors_len..) + .filter(move |&i| duplicate_set.insert(i)) + }; + let scc_index = self.scc_data.create_scc(deduplicated_successors); + self.node_states[node] = NodeState::InCycle { scc_index }; + WalkReturn::Complete { scc_index } + } else { + // We are not the head of the cycle. Return back to our + // caller. They will take ownership of the + // `self.successors` data that we pushed. + self.node_states[node] = NodeState::InCycleWith { parent: frame.min_cycle_root }; + WalkReturn::Cycle { min_depth: frame.min_depth } + }); } + + // Keep the allocation we used for successors_stack. + self.successors_stack = successors_stack; + debug_assert_eq!(self.successors_stack.len(), 0); + + return_value.unwrap() } } diff --git a/compiler/rustc_data_structures/src/graph/scc/tests.rs b/compiler/rustc_data_structures/src/graph/scc/tests.rs index 1d5f46ebab1..364005e67e6 100644 --- a/compiler/rustc_data_structures/src/graph/scc/tests.rs +++ b/compiler/rustc_data_structures/src/graph/scc/tests.rs @@ -1,3 +1,5 @@ +extern crate test; + use super::*; use crate::graph::tests::TestGraph; @@ -139,3 +141,73 @@ fn test_find_state_3() { assert_eq!(sccs.successors(0), &[]); assert_eq!(sccs.successors(1), &[0]); } + +#[test] +fn test_deep_linear() { + /* + 0 + | + v + 1 + | + v + 2 + | + v + … + */ + const NR_NODES: usize = 1 << 14; + let mut nodes = vec![]; + for i in 1..NR_NODES { + nodes.push((i - 1, i)); + } + let graph = TestGraph::new(0, nodes.as_slice()); + let sccs: Sccs<_, usize> = Sccs::new(&graph); + assert_eq!(sccs.num_sccs(), NR_NODES); + assert_eq!(sccs.scc(0), NR_NODES - 1); + assert_eq!(sccs.scc(NR_NODES - 1), 0); +} + +#[bench] +fn bench_sccc(b: &mut test::Bencher) { + // Like `test_three_sccs` but each state is replaced by a group of + // three or four to have some amount of test data. + /* + 0-3 + | + v + +->4-6 11-14 + | | | + | v | + +--7-10<-+ + */ + fn make_3_clique(slice: &mut [(usize, usize)], base: usize) { + slice[0] = (base + 0, base + 1); + slice[1] = (base + 1, base + 2); + slice[2] = (base + 2, base + 0); + } + // Not actually a clique but strongly connected. + fn make_4_clique(slice: &mut [(usize, usize)], base: usize) { + slice[0] = (base + 0, base + 1); + slice[1] = (base + 1, base + 2); + slice[2] = (base + 2, base + 3); + slice[3] = (base + 3, base + 0); + slice[4] = (base + 1, base + 3); + slice[5] = (base + 2, base + 1); + } + + let mut graph = [(0, 0); 6 + 3 + 6 + 3 + 4]; + make_4_clique(&mut graph[0..6], 0); + make_3_clique(&mut graph[6..9], 4); + make_4_clique(&mut graph[9..15], 7); + make_3_clique(&mut graph[15..18], 11); + graph[18] = (0, 4); + graph[19] = (5, 7); + graph[20] = (11, 10); + graph[21] = (7, 4); + let graph = TestGraph::new(0, &graph[..]); + b.iter(|| { + let sccs: Sccs<_, usize> = Sccs::new(&graph); + assert_eq!(sccs.num_sccs(), 3); + }); +} diff --git a/compiler/rustc_data_structures/src/lib.rs b/compiler/rustc_data_structures/src/lib.rs index 7669b78834c..01604477c3e 100644 --- a/compiler/rustc_data_structures/src/lib.rs +++ b/compiler/rustc_data_structures/src/lib.rs @@ -27,9 +27,11 @@ #![feature(extend_one)] #![feature(const_panic)] #![feature(min_const_generics)] +#![feature(new_uninit)] #![feature(once_cell)] #![feature(maybe_uninit_uninit_array)] #![allow(rustc::default_hash_types)] +#![deny(unaligned_references)] #[macro_use] extern crate tracing; @@ -47,9 +49,9 @@ pub fn cold_path R, R>(f: F) -> R { #[macro_export] macro_rules! likely { ($e:expr) => { - #[allow(unused_unsafe)] - { - unsafe { std::intrinsics::likely($e) } + match $e { + #[allow(unused_unsafe)] + e => unsafe { std::intrinsics::likely(e) }, } }; } @@ -57,9 +59,9 @@ macro_rules! likely { #[macro_export] macro_rules! unlikely { ($e:expr) => { - #[allow(unused_unsafe)] - { - unsafe { std::intrinsics::unlikely($e) } + match $e { + #[allow(unused_unsafe)] + e => unsafe { std::intrinsics::unlikely(e) }, } }; } @@ -70,6 +72,7 @@ pub mod box_region; pub mod captures; pub mod const_cstr; pub mod flock; +pub mod functor; pub mod fx; pub mod graph; pub mod jobserver; @@ -102,6 +105,7 @@ pub mod work_queue; pub use atomic_ref::AtomicRef; pub mod frozen; pub mod sso; +pub mod steal; pub mod tagged_ptr; pub mod temp_dir; pub mod unhash; diff --git a/compiler/rustc_data_structures/src/profiling.rs b/compiler/rustc_data_structures/src/profiling.rs index e598d7a683d..5d13b7f27c7 100644 --- a/compiler/rustc_data_structures/src/profiling.rs +++ b/compiler/rustc_data_structures/src/profiling.rs @@ -272,6 +272,28 @@ impl SelfProfilerRef { }) } + #[inline(always)] + pub fn generic_activity_with_args( + &self, + event_label: &'static str, + event_args: &[String], + ) -> TimingGuard<'_> { + self.exec(EventFilter::GENERIC_ACTIVITIES, |profiler| { + let builder = EventIdBuilder::new(&profiler.profiler); + let event_label = profiler.get_or_alloc_cached_string(event_label); + let event_id = if profiler.event_filter_mask.contains(EventFilter::FUNCTION_ARGS) { + let event_args: Vec<_> = event_args + .iter() + .map(|s| profiler.get_or_alloc_cached_string(&s[..])) + .collect(); + builder.from_label_and_args(event_label, &event_args) + } else { + builder.from_label(event_label) + }; + TimingGuard::start(profiler, profiler.generic_activity_event_kind, event_id) + }) + } + /// Start profiling a query provider. Profiling continues until the /// TimingGuard returned from this call is dropped. #[inline(always)] diff --git a/compiler/rustc_middle/src/ty/steal.rs b/compiler/rustc_data_structures/src/steal.rs similarity index 82% rename from compiler/rustc_middle/src/ty/steal.rs rename to compiler/rustc_data_structures/src/steal.rs index 224e76845d7..e532a84cea3 100644 --- a/compiler/rustc_middle/src/ty/steal.rs +++ b/compiler/rustc_data_structures/src/steal.rs @@ -1,4 +1,5 @@ -use rustc_data_structures::sync::{MappedReadGuard, ReadGuard, RwLock}; +use crate::stable_hasher::{HashStable, StableHasher}; +use crate::sync::{MappedReadGuard, ReadGuard, RwLock}; /// The `Steal` struct is intended to used as the value for a query. /// Specifically, we sometimes have queries (*cough* MIR *cough*) @@ -31,7 +32,7 @@ impl Steal { pub fn borrow(&self) -> MappedReadGuard<'_, T> { ReadGuard::map(self.value.borrow(), |opt| match *opt { - None => bug!("attempted to read from stolen value"), + None => panic!("attempted to read from stolen value"), Some(ref v) => v, }) } @@ -42,3 +43,9 @@ impl Steal { value.expect("attempt to read from stolen value") } } + +impl> HashStable for Steal { + fn hash_stable(&self, hcx: &mut CTX, hasher: &mut StableHasher) { + self.borrow().hash_stable(hcx, hasher); + } +} diff --git a/compiler/rustc_driver/src/lib.rs b/compiler/rustc_driver/src/lib.rs index f434673c39e..e49e456792b 100644 --- a/compiler/rustc_driver/src/lib.rs +++ b/compiler/rustc_driver/src/lib.rs @@ -20,7 +20,7 @@ use rustc_data_structures::profiling::print_time_passes_entry; use rustc_data_structures::sync::SeqCst; use rustc_errors::registry::{InvalidErrorCode, Registry}; use rustc_errors::{ErrorReported, PResult}; -use rustc_feature::{find_gated_cfg, UnstableFeatures}; +use rustc_feature::find_gated_cfg; use rustc_hir::def_id::LOCAL_CRATE; use rustc_interface::util::{self, collect_crate_types, get_builtin_codegen_backend}; use rustc_interface::{interface, Queries}; @@ -746,9 +746,6 @@ impl RustcDefaultCalls { } } Cfg => { - let allow_unstable_cfg = - UnstableFeatures::from_environment().is_nightly_build(); - let mut cfgs = sess .parse_sess .config @@ -763,7 +760,7 @@ impl RustcDefaultCalls { // it, this is intended to get into Cargo and then go // through to build scripts. if (name != sym::target_feature || value != Some(sym::crt_dash_static)) - && !allow_unstable_cfg + && !sess.is_nightly_build() && find_gated_cfg(|cfg_sym| cfg_sym == name).is_some() { return None; @@ -814,14 +811,14 @@ pub fn version(binary: &str, matches: &getopts::Matches) { } } -fn usage(verbose: bool, include_unstable_options: bool) { +fn usage(verbose: bool, include_unstable_options: bool, nightly_build: bool) { let groups = if verbose { config::rustc_optgroups() } else { config::rustc_short_optgroups() }; let mut options = getopts::Options::new(); for option in groups.iter().filter(|x| include_unstable_options || x.is_stable()) { (option.apply)(&mut options); } let message = "Usage: rustc [OPTIONS] INPUT"; - let nightly_help = if nightly_options::is_nightly_build() { + let nightly_help = if nightly_build { "\n -Z help Print unstable compiler options" } else { "" @@ -831,7 +828,7 @@ fn usage(verbose: bool, include_unstable_options: bool) { } else { "\n --help -v Print the full set of options rustc accepts" }; - let at_path = if verbose && nightly_options::is_nightly_build() { + let at_path = if verbose && nightly_build { " @path Read newline separated options from `path`\n" } else { "" @@ -1034,7 +1031,9 @@ pub fn handle_options(args: &[String]) -> Option { if args.is_empty() { // user did not write `-v` nor `-Z unstable-options`, so do not // include that extra information. - usage(false, false); + let nightly_build = + rustc_feature::UnstableFeatures::from_environment(None).is_nightly_build(); + usage(false, false, nightly_build); return None; } @@ -1063,7 +1062,9 @@ pub fn handle_options(args: &[String]) -> Option { if matches.opt_present("h") || matches.opt_present("help") { // Only show unstable options in --help if we accept unstable options. - usage(matches.opt_present("verbose"), nightly_options::is_unstable_enabled(&matches)); + let unstable_enabled = nightly_options::is_unstable_enabled(&matches); + let nightly_build = nightly_options::match_is_nightly_build(&matches); + usage(matches.opt_present("verbose"), unstable_enabled, nightly_build); return None; } @@ -1285,15 +1286,16 @@ pub fn init_env_logger(env: &str) { } let filter = tracing_subscriber::EnvFilter::from_env(env); let layer = tracing_tree::HierarchicalLayer::default() + .with_writer(io::stderr) .with_indent_lines(true) .with_ansi(true) .with_targets(true) - .with_thread_ids(true) - .with_thread_names(true) .with_wraparound(10) .with_verbose_exit(true) .with_verbose_entry(true) .with_indent_amount(2); + #[cfg(parallel_compiler)] + let layer = layer.with_thread_ids(true).with_thread_names(true); use tracing_subscriber::layer::SubscriberExt; let subscriber = tracing_subscriber::Registry::default().with(filter).with(layer); diff --git a/compiler/rustc_driver/src/pretty.rs b/compiler/rustc_driver/src/pretty.rs index b0fbf1e03f5..305fa838afa 100644 --- a/compiler/rustc_driver/src/pretty.rs +++ b/compiler/rustc_driver/src/pretty.rs @@ -32,9 +32,6 @@ use crate::abort_on_err; // Note that since the `&PrinterSupport` is freshly constructed on each // call, it would not make sense to try to attach the lifetime of `self` // to the lifetime of the `&PrinterObject`. -// -// (The `use_once_payload` is working around the current lack of once -// functions in the compiler.) /// Constructs a `PrinterSupport` object and passes it to `f`. fn call_with_pp_support<'tcx, A, F>( @@ -407,7 +404,6 @@ pub fn print_after_parsing( annotation.pp_ann(), false, parse.edition, - parse.injected_crate_name.get().is_some(), ) }) } else { @@ -449,7 +445,6 @@ pub fn print_after_hir_lowering<'tcx>( annotation.pp_ann(), true, parse.edition, - parse.injected_crate_name.get().is_some(), ) }) } diff --git a/compiler/rustc_error_codes/src/error_codes/E0744.md b/compiler/rustc_error_codes/src/error_codes/E0744.md index 14cff3613e0..45804ab266e 100644 --- a/compiler/rustc_error_codes/src/error_codes/E0744.md +++ b/compiler/rustc_error_codes/src/error_codes/E0744.md @@ -1,4 +1,4 @@ -A control-flow expression was used inside a const context. +An unsupported expression was used inside a const context. Erroneous code example: @@ -12,12 +12,15 @@ const _: i32 = { }; ``` -At the moment, `if` and `match`, as well as the looping constructs `for`, -`while`, and `loop`, are forbidden inside a `const`, `static`, or `const fn`. +At the moment, `for` loops, `.await`, and the `Try` operator (`?`) are forbidden +inside a `const`, `static`, or `const fn`. -This will be allowed at some point in the future, but the implementation is not -yet complete. See the tracking issue for [conditionals] or [loops] in a const -context for the current status. +This may be allowed at some point in the future, but the implementation is not +yet complete. See the tracking issues for [`async`] and [`?`] in `const fn`, and +(to support `for` loops in `const fn`) the tracking issues for [`impl const +Trait for Ty`] and [`&mut T`] in `const fn`. -[conditionals]: https://github.com/rust-lang/rust/issues/49146 -[loops]: https://github.com/rust-lang/rust/issues/52000 +[`async`]: https://github.com/rust-lang/rust/issues/69431 +[`?`]: https://github.com/rust-lang/rust/issues/74935 +[`impl const Trait for Ty`]: https://github.com/rust-lang/rust/issues/67792 +[`&mut T`]: https://github.com/rust-lang/rust/issues/57349 diff --git a/compiler/rustc_errors/src/diagnostic_builder.rs b/compiler/rustc_errors/src/diagnostic_builder.rs index 56acdf699ef..c9259a1502c 100644 --- a/compiler/rustc_errors/src/diagnostic_builder.rs +++ b/compiler/rustc_errors/src/diagnostic_builder.rs @@ -184,16 +184,18 @@ impl<'a> DiagnosticBuilder<'a> { self.cancel(); } - /// Adds a span/label to be included in the resulting snippet. + /// Appends a labeled span to the diagnostic. /// - /// This is pushed onto the [`MultiSpan`] that was created when the diagnostic - /// was first built. That means it will be shown together with the original - /// span/label, *not* a span added by one of the `span_{note,warn,help,suggestions}` methods. + /// Labels are used to convey additional context for the diagnostic's primary span. They will + /// be shown together with the original diagnostic's span, *not* with spans added by + /// `span_note`, `span_help`, etc. Therefore, if the primary span is not displayable (because + /// the span is `DUMMY_SP` or the source code isn't found), labels will not be displayed + /// either. /// - /// This span is *not* considered a ["primary span"][`MultiSpan`]; only - /// the `Span` supplied when creating the diagnostic is primary. - /// - /// [`MultiSpan`]: ../rustc_span/struct.MultiSpan.html + /// Implementation-wise, the label span is pushed onto the [`MultiSpan`] that was created when + /// the diagnostic was constructed. However, the label span is *not* considered a + /// ["primary span"][`MultiSpan`]; only the `Span` supplied when creating the diagnostic is + /// primary. pub fn span_label(&mut self, span: Span, label: impl Into) -> &mut Self { self.0.diagnostic.span_label(span, label); self diff --git a/compiler/rustc_errors/src/emitter.rs b/compiler/rustc_errors/src/emitter.rs index 302713a21db..32104e6f00d 100644 --- a/compiler/rustc_errors/src/emitter.rs +++ b/compiler/rustc_errors/src/emitter.rs @@ -200,6 +200,11 @@ pub trait Emitter { true } + /// Checks if we can use colors in the current output stream. + fn supports_color(&self) -> bool { + false + } + fn source_map(&self) -> Option<&Lrc>; /// Formats the substitutions of the primary_span @@ -504,6 +509,10 @@ impl Emitter for EmitterWriter { fn should_show_explain(&self) -> bool { !self.short_message } + + fn supports_color(&self) -> bool { + self.dst.supports_color() + } } /// An emitter that does nothing when emitting a diagnostic. @@ -2057,6 +2066,14 @@ impl Destination { Destination::Raw(ref mut t, true) => WritableDst::ColoredRaw(Ansi::new(t)), } } + + fn supports_color(&self) -> bool { + match *self { + Self::Terminal(ref stream) => stream.supports_color(), + Self::Buffered(ref buffer) => buffer.buffer().supports_color(), + Self::Raw(_, supports_color) => supports_color, + } + } } impl<'a> WritableDst<'a> { diff --git a/compiler/rustc_expand/src/base.rs b/compiler/rustc_expand/src/base.rs index b435def87ac..1c76c31e1a7 100644 --- a/compiler/rustc_expand/src/base.rs +++ b/compiler/rustc_expand/src/base.rs @@ -251,8 +251,7 @@ pub enum ExpandResult { /// Expansion produced a result (possibly dummy). Ready(T), /// Expansion could not produce a result and needs to be retried. - /// The string is an explanation that will be printed if we are stuck in an infinite retry loop. - Retry(U, String), + Retry(U), } // `meta_item` is the attribute, and `item` is the item being modified. @@ -889,8 +888,10 @@ pub trait ResolverExpand { /// Some parent node that is close enough to the given macro call. fn lint_node_id(&mut self, expn_id: ExpnId) -> NodeId; + // Resolver interfaces for specific built-in macros. + /// Does `#[derive(...)]` attribute with the given `ExpnId` have built-in `Copy` inside it? fn has_derive_copy(&self, expn_id: ExpnId) -> bool; - fn add_derive_copy(&mut self, expn_id: ExpnId); + /// Path resolution logic for `#[cfg_accessible(path)]`. fn cfg_accessible(&mut self, expn_id: ExpnId, path: &ast::Path) -> Result; } @@ -919,6 +920,9 @@ pub struct ExtCtxt<'a> { pub root_path: PathBuf, pub resolver: &'a mut dyn ResolverExpand, pub current_expansion: ExpansionData, + /// Error recovery mode entered when expansion is stuck + /// (or during eager expansion, but that's a hack). + pub force_mode: bool, pub expansions: FxHashMap>, /// Called directly after having parsed an external `mod foo;` in expansion. pub(super) extern_mod_loaded: Option<&'a dyn Fn(&ast::Crate)>, @@ -945,6 +949,7 @@ impl<'a> ExtCtxt<'a> { directory_ownership: DirectoryOwnership::Owned { relative: None }, prior_type_ascription: None, }, + force_mode: false, expansions: FxHashMap::default(), } } diff --git a/compiler/rustc_expand/src/build.rs b/compiler/rustc_expand/src/build.rs index 1c9bfb902d6..30f0fc6cddf 100644 --- a/compiler/rustc_expand/src/build.rs +++ b/compiler/rustc_expand/src/build.rs @@ -298,7 +298,7 @@ impl<'a> ExtCtxt<'a> { path: ast::Path, fields: Vec, ) -> P { - self.expr(span, ast::ExprKind::Struct(path, fields, None)) + self.expr(span, ast::ExprKind::Struct(path, fields, ast::StructRest::None)) } pub fn expr_struct_ident( &self, diff --git a/compiler/rustc_expand/src/config.rs b/compiler/rustc_expand/src/config.rs index c124ab64218..563783c5b79 100644 --- a/compiler/rustc_expand/src/config.rs +++ b/compiler/rustc_expand/src/config.rs @@ -1,5 +1,7 @@ //! Conditional compilation stripping. +use crate::base::Annotatable; + use rustc_ast::attr::HasAttrs; use rustc_ast::mut_visit::*; use rustc_ast::ptr::P; @@ -291,8 +293,7 @@ impl<'a> StripUnconfigured<'a> { expanded_attrs .into_iter() .flat_map(|(item, span)| { - let orig_tokens = - attr.tokens.as_ref().unwrap_or_else(|| panic!("Missing tokens for {:?}", attr)); + let orig_tokens = attr.tokens(); // We are taking an attribute of the form `#[cfg_attr(pred, attr)]` // and producing an attribute of the form `#[attr]`. We @@ -302,7 +303,7 @@ impl<'a> StripUnconfigured<'a> { // Use the `#` in `#[cfg_attr(pred, attr)]` as the `#` token // for `attr` when we expand it to `#[attr]` - let pound_token = orig_tokens.create_token_stream().trees().next().unwrap(); + let pound_token = orig_tokens.trees().next().unwrap(); if !matches!(pound_token, TokenTree::Token(Token { kind: TokenKind::Pound, .. })) { panic!("Bad tokens for attribute {:?}", attr); } @@ -316,13 +317,12 @@ impl<'a> StripUnconfigured<'a> { .unwrap_or_else(|| panic!("Missing tokens for {:?}", item)) .create_token_stream(), ); - - let mut attr = attr::mk_attr_from_item(attr.style, item, span); - attr.tokens = Some(LazyTokenStream::new(TokenStream::new(vec![ + let tokens = Some(LazyTokenStream::new(TokenStream::new(vec![ (pound_token, Spacing::Alone), (bracket_group, Spacing::Alone), ]))); - self.process_cfg_attr(attr) + + self.process_cfg_attr(attr::mk_attr_from_item(item, tokens, attr.style, span)) }) .collect() } @@ -498,6 +498,49 @@ impl<'a> StripUnconfigured<'a> { pub fn configure_fn_decl(&mut self, fn_decl: &mut ast::FnDecl) { fn_decl.inputs.flat_map_in_place(|arg| self.configure(arg)); } + + pub fn fully_configure(&mut self, item: Annotatable) -> Annotatable { + // Since the item itself has already been configured by the InvocationCollector, + // we know that fold result vector will contain exactly one element + match item { + Annotatable::Item(item) => Annotatable::Item(self.flat_map_item(item).pop().unwrap()), + Annotatable::TraitItem(item) => { + Annotatable::TraitItem(self.flat_map_trait_item(item).pop().unwrap()) + } + Annotatable::ImplItem(item) => { + Annotatable::ImplItem(self.flat_map_impl_item(item).pop().unwrap()) + } + Annotatable::ForeignItem(item) => { + Annotatable::ForeignItem(self.flat_map_foreign_item(item).pop().unwrap()) + } + Annotatable::Stmt(stmt) => { + Annotatable::Stmt(stmt.map(|stmt| self.flat_map_stmt(stmt).pop().unwrap())) + } + Annotatable::Expr(mut expr) => Annotatable::Expr({ + self.visit_expr(&mut expr); + expr + }), + Annotatable::Arm(arm) => Annotatable::Arm(self.flat_map_arm(arm).pop().unwrap()), + Annotatable::Field(field) => { + Annotatable::Field(self.flat_map_field(field).pop().unwrap()) + } + Annotatable::FieldPat(fp) => { + Annotatable::FieldPat(self.flat_map_field_pattern(fp).pop().unwrap()) + } + Annotatable::GenericParam(param) => { + Annotatable::GenericParam(self.flat_map_generic_param(param).pop().unwrap()) + } + Annotatable::Param(param) => { + Annotatable::Param(self.flat_map_param(param).pop().unwrap()) + } + Annotatable::StructField(sf) => { + Annotatable::StructField(self.flat_map_struct_field(sf).pop().unwrap()) + } + Annotatable::Variant(v) => { + Annotatable::Variant(self.flat_map_variant(v).pop().unwrap()) + } + } + } } impl<'a> MutVisitor for StripUnconfigured<'a> { @@ -547,11 +590,6 @@ impl<'a> MutVisitor for StripUnconfigured<'a> { noop_flat_map_assoc_item(configure!(self, item), self) } - fn visit_mac(&mut self, _mac: &mut ast::MacCall) { - // Don't configure interpolated AST (cf. issue #34171). - // Interpolated AST will get configured once the surrounding tokens are parsed. - } - fn visit_pat(&mut self, pat: &mut P) { self.configure_pat(pat); noop_visit_pat(pat, self) diff --git a/compiler/rustc_expand/src/expand.rs b/compiler/rustc_expand/src/expand.rs index f6959591b56..5be2fee8b38 100644 --- a/compiler/rustc_expand/src/expand.rs +++ b/compiler/rustc_expand/src/expand.rs @@ -209,6 +209,28 @@ impl AstFragmentKind { self.make_from(DummyResult::any(span)).expect("couldn't create a dummy AST fragment") } + /// Fragment supports macro expansion and not just inert attributes, `cfg` and `cfg_attr`. + pub fn supports_macro_expansion(self) -> bool { + match self { + AstFragmentKind::OptExpr + | AstFragmentKind::Expr + | AstFragmentKind::Pat + | AstFragmentKind::Ty + | AstFragmentKind::Stmts + | AstFragmentKind::Items + | AstFragmentKind::TraitItems + | AstFragmentKind::ImplItems + | AstFragmentKind::ForeignItems => true, + AstFragmentKind::Arms + | AstFragmentKind::Fields + | AstFragmentKind::FieldPats + | AstFragmentKind::GenericParams + | AstFragmentKind::Params + | AstFragmentKind::StructFields + | AstFragmentKind::Variants => false, + } + } + fn expect_from_annotatables>( self, items: I, @@ -404,6 +426,7 @@ impl<'a, 'b> MacroExpander<'a, 'b> { // Recursively expand all macro invocations in this AST fragment. pub fn fully_expand_fragment(&mut self, input_fragment: AstFragment) -> AstFragment { let orig_expansion_data = self.cx.current_expansion.clone(); + let orig_force_mode = self.cx.force_mode; self.cx.current_expansion.depth = 0; // Collect all macro invocations and replace them with placeholders. @@ -432,6 +455,12 @@ impl<'a, 'b> MacroExpander<'a, 'b> { } invocations = mem::take(&mut undetermined_invocations); force = !mem::replace(&mut progress, false); + if force && self.monotonic { + self.cx.sess.delay_span_bug( + invocations.last().unwrap().0.span(), + "expansion entered force mode without producing any errors", + ); + } continue; }; @@ -460,18 +489,19 @@ impl<'a, 'b> MacroExpander<'a, 'b> { let ExpansionData { depth, id: expn_id, .. } = invoc.expansion_data; self.cx.current_expansion = invoc.expansion_data.clone(); + self.cx.force_mode = force; // FIXME(jseyfried): Refactor out the following logic + let fragment_kind = invoc.fragment_kind; let (expanded_fragment, new_invocations) = match res { InvocationRes::Single(ext) => match self.expand_invoc(invoc, &ext.kind) { ExpandResult::Ready(fragment) => self.collect_invocations(fragment, &[]), - ExpandResult::Retry(invoc, explanation) => { + ExpandResult::Retry(invoc) => { if force { - // We are stuck, stop retrying and produce a dummy fragment. - let span = invoc.span(); - self.cx.span_err(span, &explanation); - let fragment = invoc.fragment_kind.dummy(span); - self.collect_invocations(fragment, &[]) + self.cx.span_bug( + invoc.span(), + "expansion entered force mode but is still stuck", + ); } else { // Cannot expand, will retry this invocation later. undetermined_invocations @@ -483,36 +513,45 @@ impl<'a, 'b> MacroExpander<'a, 'b> { InvocationRes::DeriveContainer(_exts) => { // FIXME: Consider using the derive resolutions (`_exts`) immediately, // instead of enqueuing the derives to be resolved again later. - let (derives, item) = match invoc.kind { + let (derives, mut item) = match invoc.kind { InvocationKind::DeriveContainer { derives, item } => (derives, item), _ => unreachable!(), }; - if !item.derive_allowed() { + let (item, derive_placeholders) = if !item.derive_allowed() { self.error_derive_forbidden_on_non_adt(&derives, &item); - } + item.visit_attrs(|attrs| attrs.retain(|a| !a.has_name(sym::derive))); + (item, Vec::new()) + } else { + let mut item = StripUnconfigured { + sess: self.cx.sess, + features: self.cx.ecfg.features, + } + .fully_configure(item); + item.visit_attrs(|attrs| attrs.retain(|a| !a.has_name(sym::derive))); + + invocations.reserve(derives.len()); + let derive_placeholders = derives + .into_iter() + .map(|path| { + let expn_id = ExpnId::fresh(None); + invocations.push(( + Invocation { + kind: InvocationKind::Derive { path, item: item.clone() }, + fragment_kind, + expansion_data: ExpansionData { + id: expn_id, + ..self.cx.current_expansion.clone() + }, + }, + None, + )); + NodeId::placeholder_from_expn_id(expn_id) + }) + .collect::>(); + (item, derive_placeholders) + }; - let mut item = self.fully_configure(item); - item.visit_attrs(|attrs| attrs.retain(|a| !a.has_name(sym::derive))); - - let mut derive_placeholders = Vec::with_capacity(derives.len()); - invocations.reserve(derives.len()); - for path in derives { - let expn_id = ExpnId::fresh(None); - derive_placeholders.push(NodeId::placeholder_from_expn_id(expn_id)); - invocations.push(( - Invocation { - kind: InvocationKind::Derive { path, item: item.clone() }, - fragment_kind: invoc.fragment_kind, - expansion_data: ExpansionData { - id: expn_id, - ..invoc.expansion_data.clone() - }, - }, - None, - )); - } - let fragment = - invoc.fragment_kind.expect_from_annotatables(::std::iter::once(item)); + let fragment = fragment_kind.expect_from_annotatables(::std::iter::once(item)); self.collect_invocations(fragment, &derive_placeholders) } }; @@ -526,6 +565,7 @@ impl<'a, 'b> MacroExpander<'a, 'b> { } self.cx.current_expansion = orig_expansion_data; + self.cx.force_mode = orig_force_mode; // Finally incorporate all the expanded macros into the input AST fragment. let mut placeholder_expander = PlaceholderExpander::new(self.cx, self.monotonic); @@ -601,48 +641,6 @@ impl<'a, 'b> MacroExpander<'a, 'b> { (fragment, invocations) } - fn fully_configure(&mut self, item: Annotatable) -> Annotatable { - let mut cfg = StripUnconfigured { sess: &self.cx.sess, features: self.cx.ecfg.features }; - // Since the item itself has already been configured by the InvocationCollector, - // we know that fold result vector will contain exactly one element - match item { - Annotatable::Item(item) => Annotatable::Item(cfg.flat_map_item(item).pop().unwrap()), - Annotatable::TraitItem(item) => { - Annotatable::TraitItem(cfg.flat_map_trait_item(item).pop().unwrap()) - } - Annotatable::ImplItem(item) => { - Annotatable::ImplItem(cfg.flat_map_impl_item(item).pop().unwrap()) - } - Annotatable::ForeignItem(item) => { - Annotatable::ForeignItem(cfg.flat_map_foreign_item(item).pop().unwrap()) - } - Annotatable::Stmt(stmt) => { - Annotatable::Stmt(stmt.map(|stmt| cfg.flat_map_stmt(stmt).pop().unwrap())) - } - Annotatable::Expr(mut expr) => Annotatable::Expr({ - cfg.visit_expr(&mut expr); - expr - }), - Annotatable::Arm(arm) => Annotatable::Arm(cfg.flat_map_arm(arm).pop().unwrap()), - Annotatable::Field(field) => { - Annotatable::Field(cfg.flat_map_field(field).pop().unwrap()) - } - Annotatable::FieldPat(fp) => { - Annotatable::FieldPat(cfg.flat_map_field_pattern(fp).pop().unwrap()) - } - Annotatable::GenericParam(param) => { - Annotatable::GenericParam(cfg.flat_map_generic_param(param).pop().unwrap()) - } - Annotatable::Param(param) => { - Annotatable::Param(cfg.flat_map_param(param).pop().unwrap()) - } - Annotatable::StructField(sf) => { - Annotatable::StructField(cfg.flat_map_struct_field(sf).pop().unwrap()) - } - Annotatable::Variant(v) => Annotatable::Variant(cfg.flat_map_variant(v).pop().unwrap()), - } - } - fn error_recursion_limit_reached(&mut self) { let expn_data = self.cx.current_expansion.id.expn_data(); let suggested_limit = self.cx.ecfg.recursion_limit * 2; @@ -735,20 +733,17 @@ impl<'a, 'b> MacroExpander<'a, 'b> { Ok(meta) => { let items = match expander.expand(self.cx, span, &meta, item) { ExpandResult::Ready(items) => items, - ExpandResult::Retry(item, explanation) => { + ExpandResult::Retry(item) => { // Reassemble the original invocation for retrying. - return ExpandResult::Retry( - Invocation { - kind: InvocationKind::Attr { - attr, - item, - derives, - after_derive, - }, - ..invoc + return ExpandResult::Retry(Invocation { + kind: InvocationKind::Attr { + attr, + item, + derives, + after_derive, }, - explanation, - ); + ..invoc + }); } }; fragment_kind.expect_from_annotatables(items) @@ -772,24 +767,18 @@ impl<'a, 'b> MacroExpander<'a, 'b> { InvocationKind::Derive { path, item } => match ext { SyntaxExtensionKind::Derive(expander) | SyntaxExtensionKind::LegacyDerive(expander) => { - if !item.derive_allowed() { - return ExpandResult::Ready(fragment_kind.dummy(span)); - } if let SyntaxExtensionKind::Derive(..) = ext { self.gate_proc_macro_input(&item); } let meta = ast::MetaItem { kind: ast::MetaItemKind::Word, span, path }; let items = match expander.expand(self.cx, span, &meta, item) { ExpandResult::Ready(items) => items, - ExpandResult::Retry(item, explanation) => { + ExpandResult::Retry(item) => { // Reassemble the original invocation for retrying. - return ExpandResult::Retry( - Invocation { - kind: InvocationKind::Derive { path: meta.path, item }, - ..invoc - }, - explanation, - ); + return ExpandResult::Retry(Invocation { + kind: InvocationKind::Derive { path: meta.path, item }, + ..invoc + }); } }; fragment_kind.expect_from_annotatables(items) @@ -850,8 +839,6 @@ impl<'a, 'b> MacroExpander<'a, 'b> { visit::walk_item(self, item); } - - fn visit_mac(&mut self, _: &'ast ast::MacCall) {} } if !self.cx.ecfg.proc_macro_hygiene() { @@ -1036,11 +1023,9 @@ impl<'a, 'b> InvocationCollector<'a, 'b> { fn collect_attr( &mut self, - attr: Option, - derives: Vec, + (attr, derives, after_derive): (Option, Vec, bool), item: Annotatable, kind: AstFragmentKind, - after_derive: bool, ) -> AstFragment { self.collect( kind, @@ -1056,7 +1041,7 @@ impl<'a, 'b> InvocationCollector<'a, 'b> { attrs: &mut Vec, after_derive: &mut bool, ) -> Option { - let attr = attrs + attrs .iter() .position(|a| { if a.has_name(sym::derive) { @@ -1064,29 +1049,14 @@ impl<'a, 'b> InvocationCollector<'a, 'b> { } !self.cx.sess.is_attr_known(a) && !is_builtin_attr(a) }) - .map(|i| attrs.remove(i)); - if let Some(attr) = &attr { - if !self.cx.ecfg.custom_inner_attributes() - && attr.style == ast::AttrStyle::Inner - && !attr.has_name(sym::test) - { - feature_err( - &self.cx.sess.parse_sess, - sym::custom_inner_attributes, - attr.span, - "non-builtin inner attributes are unstable", - ) - .emit(); - } - } - attr + .map(|i| attrs.remove(i)) } /// If `item` is an attr invocation, remove and return the macro attribute and derive traits. - fn classify_item( + fn take_first_attr( &mut self, item: &mut impl HasAttrs, - ) -> (Option, Vec, /* after_derive */ bool) { + ) -> Option<(Option, Vec, /* after_derive */ bool)> { let (mut attr, mut traits, mut after_derive) = (None, Vec::new(), false); item.visit_attrs(|mut attrs| { @@ -1094,23 +1064,23 @@ impl<'a, 'b> InvocationCollector<'a, 'b> { traits = collect_derives(&mut self.cx, &mut attrs); }); - (attr, traits, after_derive) + if attr.is_some() || !traits.is_empty() { Some((attr, traits, after_derive)) } else { None } } - /// Alternative to `classify_item()` that ignores `#[derive]` so invocations fallthrough + /// Alternative to `take_first_attr()` that ignores `#[derive]` so invocations fallthrough /// to the unused-attributes lint (making it an error on statements and expressions /// is a breaking change) - fn classify_nonitem( + fn take_first_attr_no_derive( &mut self, nonitem: &mut impl HasAttrs, - ) -> (Option, /* after_derive */ bool) { + ) -> Option<(Option, Vec, /* after_derive */ bool)> { let (mut attr, mut after_derive) = (None, false); nonitem.visit_attrs(|mut attrs| { attr = self.find_attr_invoc(&mut attrs, &mut after_derive); }); - (attr, after_derive) + attr.map(|attr| (Some(attr), Vec::new(), after_derive)) } fn configure(&mut self, node: T) -> Option { @@ -1154,23 +1124,14 @@ impl<'a, 'b> MutVisitor for InvocationCollector<'a, 'b> { visit_clobber(expr.deref_mut(), |mut expr| { self.cfg.configure_expr_kind(&mut expr.kind); - // ignore derives so they remain unused - let (attr, after_derive) = self.classify_nonitem(&mut expr); - - if let Some(ref attr_value) = attr { + if let Some(attr) = self.take_first_attr_no_derive(&mut expr) { // Collect the invoc regardless of whether or not attributes are permitted here // expansion will eat the attribute so it won't error later. - self.cfg.maybe_emit_expr_attr_err(attr_value); + attr.0.as_ref().map(|attr| self.cfg.maybe_emit_expr_attr_err(attr)); // AstFragmentKind::Expr requires the macro to emit an expression. return self - .collect_attr( - attr, - vec![], - Annotatable::Expr(P(expr)), - AstFragmentKind::Expr, - after_derive, - ) + .collect_attr(attr, Annotatable::Expr(P(expr)), AstFragmentKind::Expr) .make_expr() .into_inner(); } @@ -1188,16 +1149,9 @@ impl<'a, 'b> MutVisitor for InvocationCollector<'a, 'b> { fn flat_map_arm(&mut self, arm: ast::Arm) -> SmallVec<[ast::Arm; 1]> { let mut arm = configure!(self, arm); - let (attr, traits, after_derive) = self.classify_item(&mut arm); - if attr.is_some() || !traits.is_empty() { + if let Some(attr) = self.take_first_attr(&mut arm) { return self - .collect_attr( - attr, - traits, - Annotatable::Arm(arm), - AstFragmentKind::Arms, - after_derive, - ) + .collect_attr(attr, Annotatable::Arm(arm), AstFragmentKind::Arms) .make_arms(); } @@ -1207,16 +1161,9 @@ impl<'a, 'b> MutVisitor for InvocationCollector<'a, 'b> { fn flat_map_field(&mut self, field: ast::Field) -> SmallVec<[ast::Field; 1]> { let mut field = configure!(self, field); - let (attr, traits, after_derive) = self.classify_item(&mut field); - if attr.is_some() || !traits.is_empty() { + if let Some(attr) = self.take_first_attr(&mut field) { return self - .collect_attr( - attr, - traits, - Annotatable::Field(field), - AstFragmentKind::Fields, - after_derive, - ) + .collect_attr(attr, Annotatable::Field(field), AstFragmentKind::Fields) .make_fields(); } @@ -1226,16 +1173,9 @@ impl<'a, 'b> MutVisitor for InvocationCollector<'a, 'b> { fn flat_map_field_pattern(&mut self, fp: ast::FieldPat) -> SmallVec<[ast::FieldPat; 1]> { let mut fp = configure!(self, fp); - let (attr, traits, after_derive) = self.classify_item(&mut fp); - if attr.is_some() || !traits.is_empty() { + if let Some(attr) = self.take_first_attr(&mut fp) { return self - .collect_attr( - attr, - traits, - Annotatable::FieldPat(fp), - AstFragmentKind::FieldPats, - after_derive, - ) + .collect_attr(attr, Annotatable::FieldPat(fp), AstFragmentKind::FieldPats) .make_field_patterns(); } @@ -1245,16 +1185,9 @@ impl<'a, 'b> MutVisitor for InvocationCollector<'a, 'b> { fn flat_map_param(&mut self, p: ast::Param) -> SmallVec<[ast::Param; 1]> { let mut p = configure!(self, p); - let (attr, traits, after_derive) = self.classify_item(&mut p); - if attr.is_some() || !traits.is_empty() { + if let Some(attr) = self.take_first_attr(&mut p) { return self - .collect_attr( - attr, - traits, - Annotatable::Param(p), - AstFragmentKind::Params, - after_derive, - ) + .collect_attr(attr, Annotatable::Param(p), AstFragmentKind::Params) .make_params(); } @@ -1264,16 +1197,9 @@ impl<'a, 'b> MutVisitor for InvocationCollector<'a, 'b> { fn flat_map_struct_field(&mut self, sf: ast::StructField) -> SmallVec<[ast::StructField; 1]> { let mut sf = configure!(self, sf); - let (attr, traits, after_derive) = self.classify_item(&mut sf); - if attr.is_some() || !traits.is_empty() { + if let Some(attr) = self.take_first_attr(&mut sf) { return self - .collect_attr( - attr, - traits, - Annotatable::StructField(sf), - AstFragmentKind::StructFields, - after_derive, - ) + .collect_attr(attr, Annotatable::StructField(sf), AstFragmentKind::StructFields) .make_struct_fields(); } @@ -1283,16 +1209,9 @@ impl<'a, 'b> MutVisitor for InvocationCollector<'a, 'b> { fn flat_map_variant(&mut self, variant: ast::Variant) -> SmallVec<[ast::Variant; 1]> { let mut variant = configure!(self, variant); - let (attr, traits, after_derive) = self.classify_item(&mut variant); - if attr.is_some() || !traits.is_empty() { + if let Some(attr) = self.take_first_attr(&mut variant) { return self - .collect_attr( - attr, - traits, - Annotatable::Variant(variant), - AstFragmentKind::Variants, - after_derive, - ) + .collect_attr(attr, Annotatable::Variant(variant), AstFragmentKind::Variants) .make_variants(); } @@ -1304,20 +1223,11 @@ impl<'a, 'b> MutVisitor for InvocationCollector<'a, 'b> { expr.filter_map(|mut expr| { self.cfg.configure_expr_kind(&mut expr.kind); - // Ignore derives so they remain unused. - let (attr, after_derive) = self.classify_nonitem(&mut expr); - - if let Some(ref attr_value) = attr { - self.cfg.maybe_emit_expr_attr_err(attr_value); + if let Some(attr) = self.take_first_attr_no_derive(&mut expr) { + attr.0.as_ref().map(|attr| self.cfg.maybe_emit_expr_attr_err(attr)); return self - .collect_attr( - attr, - vec![], - Annotatable::Expr(P(expr)), - AstFragmentKind::OptExpr, - after_derive, - ) + .collect_attr(attr, Annotatable::Expr(P(expr)), AstFragmentKind::OptExpr) .make_opt_expr() .map(|expr| expr.into_inner()); } @@ -1356,25 +1266,13 @@ impl<'a, 'b> MutVisitor for InvocationCollector<'a, 'b> { // we'll expand attributes on expressions separately if !stmt.is_expr() { - let (attr, derives, after_derive) = if stmt.is_item() { - // FIXME: Handle custom attributes on statements (#15701) - (None, vec![], false) - } else { - // ignore derives on non-item statements so it falls through - // to the unused-attributes lint - let (attr, after_derive) = self.classify_nonitem(&mut stmt); - (attr, vec![], after_derive) - }; + // FIXME: Handle custom attributes on statements (#15701). + let attr = + if stmt.is_item() { None } else { self.take_first_attr_no_derive(&mut stmt) }; - if attr.is_some() || !derives.is_empty() { + if let Some(attr) = attr { return self - .collect_attr( - attr, - derives, - Annotatable::Stmt(P(stmt)), - AstFragmentKind::Stmts, - after_derive, - ) + .collect_attr(attr, Annotatable::Stmt(P(stmt)), AstFragmentKind::Stmts) .make_stmts(); } } @@ -1414,16 +1312,9 @@ impl<'a, 'b> MutVisitor for InvocationCollector<'a, 'b> { fn flat_map_item(&mut self, item: P) -> SmallVec<[P; 1]> { let mut item = configure!(self, item); - let (attr, traits, after_derive) = self.classify_item(&mut item); - if attr.is_some() || !traits.is_empty() { + if let Some(attr) = self.take_first_attr(&mut item) { return self - .collect_attr( - attr, - traits, - Annotatable::Item(item), - AstFragmentKind::Items, - after_derive, - ) + .collect_attr(attr, Annotatable::Item(item), AstFragmentKind::Items) .make_items(); } @@ -1517,16 +1408,9 @@ impl<'a, 'b> MutVisitor for InvocationCollector<'a, 'b> { fn flat_map_trait_item(&mut self, item: P) -> SmallVec<[P; 1]> { let mut item = configure!(self, item); - let (attr, traits, after_derive) = self.classify_item(&mut item); - if attr.is_some() || !traits.is_empty() { + if let Some(attr) = self.take_first_attr(&mut item) { return self - .collect_attr( - attr, - traits, - Annotatable::TraitItem(item), - AstFragmentKind::TraitItems, - after_derive, - ) + .collect_attr(attr, Annotatable::TraitItem(item), AstFragmentKind::TraitItems) .make_trait_items(); } @@ -1547,16 +1431,9 @@ impl<'a, 'b> MutVisitor for InvocationCollector<'a, 'b> { fn flat_map_impl_item(&mut self, item: P) -> SmallVec<[P; 1]> { let mut item = configure!(self, item); - let (attr, traits, after_derive) = self.classify_item(&mut item); - if attr.is_some() || !traits.is_empty() { + if let Some(attr) = self.take_first_attr(&mut item) { return self - .collect_attr( - attr, - traits, - Annotatable::ImplItem(item), - AstFragmentKind::ImplItems, - after_derive, - ) + .collect_attr(attr, Annotatable::ImplItem(item), AstFragmentKind::ImplItems) .make_impl_items(); } @@ -1597,16 +1474,12 @@ impl<'a, 'b> MutVisitor for InvocationCollector<'a, 'b> { &mut self, mut foreign_item: P, ) -> SmallVec<[P; 1]> { - let (attr, traits, after_derive) = self.classify_item(&mut foreign_item); - - if attr.is_some() || !traits.is_empty() { + if let Some(attr) = self.take_first_attr(&mut foreign_item) { return self .collect_attr( attr, - traits, Annotatable::ForeignItem(foreign_item), AstFragmentKind::ForeignItems, - after_derive, ) .make_foreign_items(); } @@ -1641,15 +1514,12 @@ impl<'a, 'b> MutVisitor for InvocationCollector<'a, 'b> { ) -> SmallVec<[ast::GenericParam; 1]> { let mut param = configure!(self, param); - let (attr, traits, after_derive) = self.classify_item(&mut param); - if attr.is_some() || !traits.is_empty() { + if let Some(attr) = self.take_first_attr(&mut param) { return self .collect_attr( attr, - traits, Annotatable::GenericParam(param), AstFragmentKind::GenericParams, - after_derive, ) .make_generic_params(); } @@ -1778,15 +1648,13 @@ impl<'a, 'b> MutVisitor for InvocationCollector<'a, 'b> { let meta = attr::mk_list_item(Ident::with_dummy_span(sym::doc), items); *at = ast::Attribute { - kind: ast::AttrKind::Normal(AttrItem { - path: meta.path, - args: meta.kind.mac_args(meta.span), - tokens: None, - }), + kind: ast::AttrKind::Normal( + AttrItem { path: meta.path, args: meta.kind.mac_args(meta.span), tokens: None }, + None, + ), span: at.span, id: at.id, style: at.style, - tokens: None, }; } else { noop_visit_attribute(at, self) @@ -1834,7 +1702,4 @@ impl<'feat> ExpansionConfig<'feat> { fn proc_macro_hygiene(&self) -> bool { self.features.map_or(false, |features| features.proc_macro_hygiene) } - fn custom_inner_attributes(&self) -> bool { - self.features.map_or(false, |features| features.custom_inner_attributes) - } } diff --git a/compiler/rustc_expand/src/mbe/macro_rules.rs b/compiler/rustc_expand/src/mbe/macro_rules.rs index a074af0189a..66463eeb907 100644 --- a/compiler/rustc_expand/src/mbe/macro_rules.rs +++ b/compiler/rustc_expand/src/mbe/macro_rules.rs @@ -1173,7 +1173,8 @@ fn quoted_tt_to_string(tt: &mbe::TokenTree) -> String { mbe::TokenTree::MetaVar(_, name) => format!("${}", name), mbe::TokenTree::MetaVarDecl(_, name, kind) => format!("${}:{}", name, kind), _ => panic!( - "unexpected mbe::TokenTree::{{Sequence or Delimited}} \ + "{}", + "unexpected mbe::TokenTree::{Sequence or Delimited} \ in follow set checker" ), } diff --git a/compiler/rustc_expand/src/mbe/transcribe.rs b/compiler/rustc_expand/src/mbe/transcribe.rs index 629e0e702b6..dde65d998d8 100644 --- a/compiler/rustc_expand/src/mbe/transcribe.rs +++ b/compiler/rustc_expand/src/mbe/transcribe.rs @@ -5,7 +5,6 @@ use crate::mbe::macro_parser::{MatchedNonterminal, MatchedSeq, NamedMatch}; use rustc_ast::mut_visit::{self, MutVisitor}; use rustc_ast::token::{self, NtTT, Token}; use rustc_ast::tokenstream::{DelimSpan, TokenStream, TokenTree, TreeAndSpacing}; -use rustc_ast::MacCall; use rustc_data_structures::fx::FxHashMap; use rustc_data_structures::sync::Lrc; use rustc_errors::{pluralize, PResult}; @@ -27,10 +26,6 @@ impl MutVisitor for Marker { fn visit_span(&mut self, span: &mut Span) { *span = span.apply_mark(self.0, self.1) } - - fn visit_mac(&mut self, mac: &mut MacCall) { - mut_visit::noop_visit_mac(mac, self) - } } /// An iterator over the token trees in a delimited token tree (`{ ... }`) or a sequence (`$(...)`). diff --git a/compiler/rustc_expand/src/mut_visit/tests.rs b/compiler/rustc_expand/src/mut_visit/tests.rs index 9e65fc2eca7..be0300bad98 100644 --- a/compiler/rustc_expand/src/mut_visit/tests.rs +++ b/compiler/rustc_expand/src/mut_visit/tests.rs @@ -1,7 +1,7 @@ use crate::tests::{matches_codepattern, string_to_crate}; use rustc_ast as ast; -use rustc_ast::mut_visit::{self, MutVisitor}; +use rustc_ast::mut_visit::MutVisitor; use rustc_ast_pretty::pprust; use rustc_span::symbol::Ident; use rustc_span::with_default_session_globals; @@ -21,9 +21,6 @@ impl MutVisitor for ToZzIdentMutVisitor { fn visit_ident(&mut self, ident: &mut Ident) { *ident = Ident::from_str("zz"); } - fn visit_mac(&mut self, mac: &mut ast::MacCall) { - mut_visit::noop_visit_mac(mac, self) - } } // Maybe add to `expand.rs`. diff --git a/compiler/rustc_expand/src/placeholders.rs b/compiler/rustc_expand/src/placeholders.rs index e413564fb3f..f0e5826f403 100644 --- a/compiler/rustc_expand/src/placeholders.rs +++ b/compiler/rustc_expand/src/placeholders.rs @@ -385,8 +385,4 @@ impl<'a, 'b> MutVisitor for PlaceholderExpander<'a, 'b> { |item| !matches!(item.kind, ast::ItemKind::MacCall(_) if !self.cx.ecfg.keep_macs), ); } - - fn visit_mac(&mut self, _mac: &mut ast::MacCall) { - // Do nothing. - } } diff --git a/compiler/rustc_expand/src/proc_macro.rs b/compiler/rustc_expand/src/proc_macro.rs index 4c95f19b96d..dea167740ed 100644 --- a/compiler/rustc_expand/src/proc_macro.rs +++ b/compiler/rustc_expand/src/proc_macro.rs @@ -75,38 +75,9 @@ impl MultiItemModifier for ProcMacroDerive { item: Annotatable, ) -> ExpandResult, Annotatable> { let item = match item { - Annotatable::Arm(..) - | Annotatable::Field(..) - | Annotatable::FieldPat(..) - | Annotatable::GenericParam(..) - | Annotatable::Param(..) - | Annotatable::StructField(..) - | Annotatable::Variant(..) => panic!("unexpected annotatable"), - Annotatable::Item(item) => item, - Annotatable::ImplItem(_) - | Annotatable::TraitItem(_) - | Annotatable::ForeignItem(_) - | Annotatable::Stmt(_) - | Annotatable::Expr(_) => { - ecx.span_err( - span, - "proc-macro derives may only be applied to a struct, enum, or union", - ); - return ExpandResult::Ready(Vec::new()); - } + Annotatable::Item(item) => token::NtItem(item), + _ => unreachable!(), }; - match item.kind { - ItemKind::Struct(..) | ItemKind::Enum(..) | ItemKind::Union(..) => {} - _ => { - ecx.span_err( - span, - "proc-macro derives may only be applied to a struct, enum, or union", - ); - return ExpandResult::Ready(Vec::new()); - } - } - - let item = token::NtItem(item); let input = if item.pretty_printing_compatibility_hack() { TokenTree::token(token::Interpolated(Lrc::new(item)), DUMMY_SP).into() } else { diff --git a/compiler/rustc_feature/src/active.rs b/compiler/rustc_feature/src/active.rs index 84114fc7735..a0355079247 100644 --- a/compiler/rustc_feature/src/active.rs +++ b/compiler/rustc_feature/src/active.rs @@ -613,6 +613,12 @@ declare_features! ( /// Allows the use of destructuring assignments. (active, destructuring_assignment, "1.49.0", Some(71126), None), + /// Enables `#[cfg(panic = "...")]` config key. + (active, cfg_panic, "1.49.0", Some(77443), None), + + /// Allows capturing disjoint fields in a closure/generator (RFC 2229). + (active, capture_disjoint_fields, "1.49.0", Some(53488), None), + // ------------------------------------------------------------------------- // feature-group-end: actual feature gates // ------------------------------------------------------------------------- @@ -636,6 +642,7 @@ pub const INCOMPLETE_FEATURES: &[Symbol] = &[ sym::inline_const, sym::repr128, sym::unsized_locals, + sym::capture_disjoint_fields, ]; /// Some features are not allowed to be used together at the same time, if diff --git a/compiler/rustc_feature/src/builtin_attrs.rs b/compiler/rustc_feature/src/builtin_attrs.rs index 84e76f9382a..f444ebad5cc 100644 --- a/compiler/rustc_feature/src/builtin_attrs.rs +++ b/compiler/rustc_feature/src/builtin_attrs.rs @@ -33,6 +33,7 @@ const GATED_CFGS: &[GatedCfg] = &[ ), (sym::sanitize, sym::cfg_sanitize, cfg_fn!(cfg_sanitize)), (sym::version, sym::cfg_version, cfg_fn!(cfg_version)), + (sym::panic, sym::cfg_panic, cfg_fn!(cfg_panic)), ]; /// Find a gated cfg determined by the `pred`icate which is given the cfg's name. @@ -552,6 +553,7 @@ pub const BUILTIN_ATTRIBUTES: &[BuiltinAttribute] = &[ // ========================================================================== rustc_attr!(TEST, rustc_outlives, Normal, template!(Word)), + rustc_attr!(TEST, rustc_capture_analysis, Normal, template!(Word)), rustc_attr!(TEST, rustc_variance, Normal, template!(Word)), rustc_attr!(TEST, rustc_layout, Normal, template!(List: "field1, field2, ...")), rustc_attr!(TEST, rustc_regions, Normal, template!(Word)), diff --git a/compiler/rustc_feature/src/lib.rs b/compiler/rustc_feature/src/lib.rs index 68ac2841fed..f965f7fdefe 100644 --- a/compiler/rustc_feature/src/lib.rs +++ b/compiler/rustc_feature/src/lib.rs @@ -59,7 +59,7 @@ pub enum Stability { Deprecated(&'static str, Option<&'static str>), } -#[derive(Clone, Copy, Hash)] +#[derive(Clone, Copy, Debug, Hash)] pub enum UnstableFeatures { /// Hard errors for unstable features are active, as on beta/stable channels. Disallow, @@ -73,11 +73,20 @@ pub enum UnstableFeatures { } impl UnstableFeatures { - pub fn from_environment() -> UnstableFeatures { + /// This takes into account `RUSTC_BOOTSTRAP`. + /// + /// If `krate` is [`Some`], then setting `RUSTC_BOOTSTRAP=krate` will enable the nightly features. + /// Otherwise, only `RUSTC_BOOTSTRAP=1` will work. + pub fn from_environment(krate: Option<&str>) -> Self { // `true` if this is a feature-staged build, i.e., on the beta or stable channel. let disable_unstable_features = option_env!("CFG_DISABLE_UNSTABLE_FEATURES").is_some(); + // Returns whether `krate` should be counted as unstable + let is_unstable_crate = |var: &str| { + krate.map_or(false, |name| var.split(',').any(|new_krate| new_krate == name)) + }; // `true` if we should enable unstable features for bootstrapping. - let bootstrap = std::env::var("RUSTC_BOOTSTRAP").is_ok(); + let bootstrap = std::env::var("RUSTC_BOOTSTRAP") + .map_or(false, |var| var == "1" || is_unstable_crate(&var)); match (disable_unstable_features, bootstrap) { (_, true) => UnstableFeatures::Cheat, (true, _) => UnstableFeatures::Disallow, @@ -140,3 +149,30 @@ pub use builtin_attrs::{ AttributeType, BuiltinAttribute, GatedCfg, BUILTIN_ATTRIBUTES, BUILTIN_ATTRIBUTE_MAP, }; pub use removed::{REMOVED_FEATURES, STABLE_REMOVED_FEATURES}; + +#[cfg(test)] +mod test { + use super::UnstableFeatures; + + #[test] + fn rustc_bootstrap_parsing() { + let is_bootstrap = |env, krate| { + std::env::set_var("RUSTC_BOOTSTRAP", env); + matches!(UnstableFeatures::from_environment(krate), UnstableFeatures::Cheat) + }; + assert!(is_bootstrap("1", None)); + assert!(is_bootstrap("1", Some("x"))); + // RUSTC_BOOTSTRAP allows specifying a specific crate + assert!(is_bootstrap("x", Some("x"))); + // RUSTC_BOOTSTRAP allows multiple comma-delimited crates + assert!(is_bootstrap("x,y,z", Some("x"))); + assert!(is_bootstrap("x,y,z", Some("y"))); + // Crate that aren't specified do not get unstable features + assert!(!is_bootstrap("x", Some("a"))); + assert!(!is_bootstrap("x,y,z", Some("a"))); + assert!(!is_bootstrap("x,y,z", None)); + + // this is technically a breaking change, but there are no stability guarantees for RUSTC_BOOTSTRAP + assert!(!is_bootstrap("0", None)); + } +} diff --git a/compiler/rustc_hir/src/def.rs b/compiler/rustc_hir/src/def.rs index 193247af584..4ede9d67b74 100644 --- a/compiler/rustc_hir/src/def.rs +++ b/compiler/rustc_hir/src/def.rs @@ -39,6 +39,9 @@ pub enum NonMacroAttrKind { Tool, /// Single-segment custom attribute registered by a derive macro (`#[serde(default)]`). DeriveHelper, + /// Single-segment custom attribute registered by a derive macro + /// but used before that derive macro was expanded (deprecated). + DeriveHelperCompat, /// Single-segment custom attribute registered with `#[register_attr]`. Registered, } @@ -370,7 +373,9 @@ impl NonMacroAttrKind { match self { NonMacroAttrKind::Builtin => "built-in attribute", NonMacroAttrKind::Tool => "tool attribute", - NonMacroAttrKind::DeriveHelper => "derive helper attribute", + NonMacroAttrKind::DeriveHelper | NonMacroAttrKind::DeriveHelperCompat => { + "derive helper attribute" + } NonMacroAttrKind::Registered => "explicitly registered attribute", } } @@ -385,7 +390,9 @@ impl NonMacroAttrKind { /// Users of some attributes cannot mark them as used, so they are considered always used. pub fn is_used(self) -> bool { match self { - NonMacroAttrKind::Tool | NonMacroAttrKind::DeriveHelper => true, + NonMacroAttrKind::Tool + | NonMacroAttrKind::DeriveHelper + | NonMacroAttrKind::DeriveHelperCompat => true, NonMacroAttrKind::Builtin | NonMacroAttrKind::Registered => false, } } @@ -484,4 +491,9 @@ impl Res { pub fn matches_ns(&self, ns: Namespace) -> bool { self.ns().map_or(true, |actual_ns| actual_ns == ns) } + + /// Returns whether such a resolved path can occur in a tuple struct/variant pattern + pub fn expected_in_tuple_struct_pat(&self) -> bool { + matches!(self, Res::Def(DefKind::Ctor(_, CtorKind::Fn), _) | Res::SelfCtor(..)) + } } diff --git a/compiler/rustc_hir/src/definitions.rs b/compiler/rustc_hir/src/definitions.rs index 3f109376a3e..d5ade86593e 100644 --- a/compiler/rustc_hir/src/definitions.rs +++ b/compiler/rustc_hir/src/definitions.rs @@ -409,7 +409,7 @@ impl Definitions { } pub fn expansion_that_defined(&self, id: LocalDefId) -> ExpnId { - self.expansions_that_defined.get(&id).copied().unwrap_or(ExpnId::root()) + self.expansions_that_defined.get(&id).copied().unwrap_or_else(ExpnId::root) } pub fn parent_module_of_macro_def(&self, expn_id: ExpnId) -> DefId { diff --git a/compiler/rustc_hir/src/hir.rs b/compiler/rustc_hir/src/hir.rs index 3c28b48795f..4497c8c0eaa 100644 --- a/compiler/rustc_hir/src/hir.rs +++ b/compiler/rustc_hir/src/hir.rs @@ -282,6 +282,14 @@ impl GenericArg<'_> { GenericArg::Const(_) => "constant", } } + + pub fn short_descr(&self) -> &'static str { + match self { + GenericArg::Lifetime(_) => "lifetime", + GenericArg::Type(_) => "type", + GenericArg::Const(_) => "const", + } + } } #[derive(Debug, HashStable_Generic)] diff --git a/compiler/rustc_incremental/src/persist/file_format.rs b/compiler/rustc_incremental/src/persist/file_format.rs index 048a81b81ba..e185ee24d17 100644 --- a/compiler/rustc_incremental/src/persist/file_format.rs +++ b/compiler/rustc_incremental/src/persist/file_format.rs @@ -15,7 +15,6 @@ use std::io::{self, Read}; use std::path::Path; use rustc_serialize::opaque::Encoder; -use rustc_session::config::nightly_options; /// The first few bytes of files generated by incremental compilation. const FILE_MAGIC: &[u8] = b"RSIC"; @@ -28,12 +27,12 @@ const HEADER_FORMAT_VERSION: u16 = 0; /// the Git commit hash. const RUSTC_VERSION: Option<&str> = option_env!("CFG_VERSION"); -pub fn write_file_header(stream: &mut Encoder) { +pub fn write_file_header(stream: &mut Encoder, nightly_build: bool) { stream.emit_raw_bytes(FILE_MAGIC); stream .emit_raw_bytes(&[(HEADER_FORMAT_VERSION >> 0) as u8, (HEADER_FORMAT_VERSION >> 8) as u8]); - let rustc_version = rustc_version(); + let rustc_version = rustc_version(nightly_build); assert_eq!(rustc_version.len(), (rustc_version.len() as u8) as usize); stream.emit_raw_bytes(&[rustc_version.len() as u8]); stream.emit_raw_bytes(rustc_version.as_bytes()); @@ -51,6 +50,7 @@ pub fn write_file_header(stream: &mut Encoder) { pub fn read_file( report_incremental_info: bool, path: &Path, + nightly_build: bool, ) -> io::Result, usize)>> { if !path.exists() { return Ok(None); @@ -93,7 +93,7 @@ pub fn read_file( let mut buffer = vec![0; rustc_version_str_len]; file.read_exact(&mut buffer)?; - if buffer != rustc_version().as_bytes() { + if buffer != rustc_version(nightly_build).as_bytes() { report_format_mismatch(report_incremental_info, path, "Different compiler version"); return Ok(None); } @@ -115,8 +115,8 @@ fn report_format_mismatch(report_incremental_info: bool, file: &Path, message: & } } -fn rustc_version() -> String { - if nightly_options::is_nightly_build() { +fn rustc_version(nightly_build: bool) -> String { + if nightly_build { if let Some(val) = env::var_os("RUSTC_FORCE_INCR_COMP_ARTIFACT_HEADER") { return val.to_string_lossy().into_owned(); } diff --git a/compiler/rustc_incremental/src/persist/load.rs b/compiler/rustc_incremental/src/persist/load.rs index 966faa9639d..578c045a2b4 100644 --- a/compiler/rustc_incremental/src/persist/load.rs +++ b/compiler/rustc_incremental/src/persist/load.rs @@ -53,8 +53,12 @@ impl LoadResult<(PreviousDepGraph, WorkProductMap)> { } } -fn load_data(report_incremental_info: bool, path: &Path) -> LoadResult<(Vec, usize)> { - match file_format::read_file(report_incremental_info, path) { +fn load_data( + report_incremental_info: bool, + path: &Path, + nightly_build: bool, +) -> LoadResult<(Vec, usize)> { + match file_format::read_file(report_incremental_info, path, nightly_build) { Ok(Some(data_and_pos)) => LoadResult::Ok { data: data_and_pos }, Ok(None) => { // The file either didn't exist or was produced by an incompatible @@ -111,13 +115,14 @@ pub fn load_dep_graph(sess: &Session) -> DepGraphFuture { let expected_hash = sess.opts.dep_tracking_hash(); let mut prev_work_products = FxHashMap::default(); + let nightly_build = sess.is_nightly_build(); // If we are only building with -Zquery-dep-graph but without an actual // incr. comp. session directory, we skip this. Otherwise we'd fail // when trying to load work products. if sess.incr_comp_session_dir_opt().is_some() { let work_products_path = work_products_path(sess); - let load_result = load_data(report_incremental_info, &work_products_path); + let load_result = load_data(report_incremental_info, &work_products_path, nightly_build); if let LoadResult::Ok { data: (work_products_data, start_pos) } = load_result { // Decode the list of work_products @@ -163,7 +168,7 @@ pub fn load_dep_graph(sess: &Session) -> DepGraphFuture { MaybeAsync::Async(std::thread::spawn(move || { let _prof_timer = prof.generic_activity("incr_comp_load_dep_graph"); - match load_data(report_incremental_info, &path) { + match load_data(report_incremental_info, &path, nightly_build) { LoadResult::DataOutOfDate => LoadResult::DataOutOfDate, LoadResult::Error { message } => LoadResult::Error { message }, LoadResult::Ok { data: (bytes, start_pos) } => { @@ -201,7 +206,11 @@ pub fn load_query_result_cache(sess: &Session) -> OnDiskCache<'_> { let _prof_timer = sess.prof.generic_activity("incr_comp_load_query_result_cache"); - match load_data(sess.opts.debugging_opts.incremental_info, &query_cache_path(sess)) { + match load_data( + sess.opts.debugging_opts.incremental_info, + &query_cache_path(sess), + sess.is_nightly_build(), + ) { LoadResult::Ok { data: (bytes, start_pos) } => OnDiskCache::new(sess, bytes, start_pos), _ => OnDiskCache::new_empty(sess.source_map()), } diff --git a/compiler/rustc_incremental/src/persist/save.rs b/compiler/rustc_incremental/src/persist/save.rs index 45cef479a4f..102a77e8e79 100644 --- a/compiler/rustc_incremental/src/persist/save.rs +++ b/compiler/rustc_incremental/src/persist/save.rs @@ -119,7 +119,7 @@ where // generate the data in a memory buffer let mut encoder = Encoder::new(Vec::new()); - file_format::write_file_header(&mut encoder); + file_format::write_file_header(&mut encoder, sess.is_nightly_build()); encode(&mut encoder); // write the data out diff --git a/compiler/rustc_index/src/lib.rs b/compiler/rustc_index/src/lib.rs index 7ee881b0639..eaef4c7b54a 100644 --- a/compiler/rustc_index/src/lib.rs +++ b/compiler/rustc_index/src/lib.rs @@ -1,5 +1,4 @@ #![feature(allow_internal_unstable)] -#![feature(bool_to_option)] #![feature(const_fn)] #![feature(const_panic)] #![feature(extend_one)] diff --git a/compiler/rustc_infer/src/infer/canonical/canonicalizer.rs b/compiler/rustc_infer/src/infer/canonical/canonicalizer.rs index 871fc4fafe2..6781fbc95c0 100644 --- a/compiler/rustc_infer/src/infer/canonical/canonicalizer.rs +++ b/compiler/rustc_infer/src/infer/canonical/canonicalizer.rs @@ -38,7 +38,7 @@ impl<'cx, 'tcx> InferCtxt<'cx, 'tcx> { /// [c]: https://rust-lang.github.io/chalk/book/canonical_queries/canonicalization.html#canonicalizing-the-query pub fn canonicalize_query( &self, - value: &V, + value: V, query_state: &mut OriginalQueryValues<'tcx>, ) -> Canonicalized<'tcx, V> where @@ -80,7 +80,7 @@ impl<'cx, 'tcx> InferCtxt<'cx, 'tcx> { /// out the [chapter in the rustc dev guide][c]. /// /// [c]: https://rust-lang.github.io/chalk/book/canonical_queries/canonicalization.html#canonicalizing-the-query-result - pub fn canonicalize_response(&self, value: &V) -> Canonicalized<'tcx, V> + pub fn canonicalize_response(&self, value: V) -> Canonicalized<'tcx, V> where V: TypeFoldable<'tcx>, { @@ -94,7 +94,7 @@ impl<'cx, 'tcx> InferCtxt<'cx, 'tcx> { ) } - pub fn canonicalize_user_type_annotation(&self, value: &V) -> Canonicalized<'tcx, V> + pub fn canonicalize_user_type_annotation(&self, value: V) -> Canonicalized<'tcx, V> where V: TypeFoldable<'tcx>, { @@ -123,7 +123,7 @@ impl<'cx, 'tcx> InferCtxt<'cx, 'tcx> { // and just use `canonicalize_query`. pub fn canonicalize_hr_query_hack( &self, - value: &V, + value: V, query_state: &mut OriginalQueryValues<'tcx>, ) -> Canonicalized<'tcx, V> where @@ -277,7 +277,7 @@ impl CanonicalizeRegionMode for CanonicalizeFreeRegionsOtherThanStatic { struct Canonicalizer<'cx, 'tcx> { infcx: Option<&'cx InferCtxt<'cx, 'tcx>>, tcx: TyCtxt<'tcx>, - variables: SmallVec<[CanonicalVarInfo; 8]>, + variables: SmallVec<[CanonicalVarInfo<'tcx>; 8]>, query_state: &'cx mut OriginalQueryValues<'tcx>, // Note that indices is only used once `var_values` is big enough to be // heap-allocated. @@ -293,7 +293,7 @@ impl<'cx, 'tcx> TypeFolder<'tcx> for Canonicalizer<'cx, 'tcx> { self.tcx } - fn fold_binder(&mut self, t: &ty::Binder) -> ty::Binder + fn fold_binder(&mut self, t: ty::Binder) -> ty::Binder where T: TypeFoldable<'tcx>, { @@ -479,7 +479,7 @@ impl<'cx, 'tcx> Canonicalizer<'cx, 'tcx> { /// The main `canonicalize` method, shared impl of /// `canonicalize_query` and `canonicalize_response`. fn canonicalize( - value: &V, + value: V, infcx: Option<&InferCtxt<'_, 'tcx>>, tcx: TyCtxt<'tcx>, canonicalize_region_mode: &dyn CanonicalizeRegionMode, @@ -542,7 +542,7 @@ impl<'cx, 'tcx> Canonicalizer<'cx, 'tcx> { /// or returns an existing variable if `kind` has already been /// seen. `kind` is expected to be an unbound variable (or /// potentially a free region). - fn canonical_var(&mut self, info: CanonicalVarInfo, kind: GenericArg<'tcx>) -> BoundVar { + fn canonical_var(&mut self, info: CanonicalVarInfo<'tcx>, kind: GenericArg<'tcx>) -> BoundVar { let Canonicalizer { variables, query_state, indices, .. } = self; let var_values = &mut query_state.var_values; @@ -621,7 +621,7 @@ impl<'cx, 'tcx> Canonicalizer<'cx, 'tcx> { /// representing the region `r`; return a region referencing it. fn canonical_var_for_region( &mut self, - info: CanonicalVarInfo, + info: CanonicalVarInfo<'tcx>, r: ty::Region<'tcx>, ) -> ty::Region<'tcx> { let var = self.canonical_var(info, r.into()); @@ -633,7 +633,7 @@ impl<'cx, 'tcx> Canonicalizer<'cx, 'tcx> { /// if `ty_var` is bound to anything; if so, canonicalize /// *that*. Otherwise, create a new canonical variable for /// `ty_var`. - fn canonicalize_ty_var(&mut self, info: CanonicalVarInfo, ty_var: Ty<'tcx>) -> Ty<'tcx> { + fn canonicalize_ty_var(&mut self, info: CanonicalVarInfo<'tcx>, ty_var: Ty<'tcx>) -> Ty<'tcx> { let infcx = self.infcx.expect("encountered ty-var without infcx"); let bound_to = infcx.shallow_resolve(ty_var); if bound_to != ty_var { @@ -650,7 +650,7 @@ impl<'cx, 'tcx> Canonicalizer<'cx, 'tcx> { /// `const_var`. fn canonicalize_const_var( &mut self, - info: CanonicalVarInfo, + info: CanonicalVarInfo<'tcx>, const_var: &'tcx ty::Const<'tcx>, ) -> &'tcx ty::Const<'tcx> { let infcx = self.infcx.expect("encountered const-var without infcx"); diff --git a/compiler/rustc_infer/src/infer/canonical/mod.rs b/compiler/rustc_infer/src/infer/canonical/mod.rs index 2b8c46f1de4..0c26639e9b0 100644 --- a/compiler/rustc_infer/src/infer/canonical/mod.rs +++ b/compiler/rustc_infer/src/infer/canonical/mod.rs @@ -82,7 +82,7 @@ impl<'cx, 'tcx> InferCtxt<'cx, 'tcx> { fn instantiate_canonical_vars( &self, span: Span, - variables: &List, + variables: &List>, universe_map: impl Fn(ty::UniverseIndex) -> ty::UniverseIndex, ) -> CanonicalVarValues<'tcx> { let var_values: IndexVec> = variables @@ -100,7 +100,7 @@ impl<'cx, 'tcx> InferCtxt<'cx, 'tcx> { fn instantiate_canonical_var( &self, span: Span, - cv_info: CanonicalVarInfo, + cv_info: CanonicalVarInfo<'tcx>, universe_map: impl Fn(ty::UniverseIndex) -> ty::UniverseIndex, ) -> GenericArg<'tcx> { match cv_info.kind { @@ -154,7 +154,7 @@ impl<'cx, 'tcx> InferCtxt<'cx, 'tcx> { self.tcx .mk_const(ty::Const { val: ty::ConstKind::Placeholder(placeholder_mapped), - ty: self.tcx.ty_error(), // FIXME(const_generics) + ty: name.ty, }) .into() } diff --git a/compiler/rustc_infer/src/infer/canonical/query_response.rs b/compiler/rustc_infer/src/infer/canonical/query_response.rs index 93e19521893..c8d66cbb695 100644 --- a/compiler/rustc_infer/src/infer/canonical/query_response.rs +++ b/compiler/rustc_infer/src/infer/canonical/query_response.rs @@ -59,7 +59,7 @@ impl<'cx, 'tcx> InferCtxt<'cx, 'tcx> { Canonical<'tcx, QueryResponse<'tcx, T>>: ArenaAllocatable<'tcx>, { let query_response = self.make_query_response(inference_vars, answer, fulfill_cx)?; - let canonical_result = self.canonicalize_response(&query_response); + let canonical_result = self.canonicalize_response(query_response); debug!("make_canonicalized_query_response: canonical_result = {:#?}", canonical_result); @@ -83,7 +83,7 @@ impl<'cx, 'tcx> InferCtxt<'cx, 'tcx> { where T: Debug + TypeFoldable<'tcx>, { - self.canonicalize_response(&QueryResponse { + self.canonicalize_response(QueryResponse { var_values: inference_vars, region_constraints: QueryRegionConstraints::default(), certainty: Certainty::Proven, // Ambiguities are OK! @@ -176,7 +176,7 @@ impl<'cx, 'tcx> InferCtxt<'cx, 'tcx> { )); let user_result: R = - query_response.substitute_projected(self.tcx, &result_subst, |q_r| &q_r.value); + query_response.substitute_projected(self.tcx, &result_subst, |q_r| q_r.value.clone()); Ok(InferOk { value: user_result, obligations }) } @@ -238,7 +238,7 @@ impl<'cx, 'tcx> InferCtxt<'cx, 'tcx> { for (index, original_value) in original_values.var_values.iter().enumerate() { // ...with the value `v_r` of that variable from the query. let result_value = query_response.substitute_projected(self.tcx, &result_subst, |v| { - &v.var_values[BoundVar::new(index)] + v.var_values[BoundVar::new(index)] }); match (original_value.unpack(), result_value.unpack()) { ( @@ -296,7 +296,7 @@ impl<'cx, 'tcx> InferCtxt<'cx, 'tcx> { // ...also include the other query region constraints from the query. output_query_region_constraints.outlives.extend( - query_response.value.region_constraints.outlives.iter().filter_map(|r_c| { + query_response.value.region_constraints.outlives.iter().filter_map(|&r_c| { let r_c = substitute_value(self.tcx, &result_subst, r_c); // Screen out `'a: 'a` cases -- we skip the binder here but @@ -314,11 +314,11 @@ impl<'cx, 'tcx> InferCtxt<'cx, 'tcx> { .region_constraints .member_constraints .iter() - .map(|p_c| substitute_value(self.tcx, &result_subst, p_c)), + .map(|p_c| substitute_value(self.tcx, &result_subst, p_c.clone())), ); let user_result: R = - query_response.substitute_projected(self.tcx, &result_subst, |q_r| &q_r.value); + query_response.substitute_projected(self.tcx, &result_subst, |q_r| q_r.value.clone()); Ok(InferOk { value: user_result, obligations }) } @@ -502,7 +502,7 @@ impl<'cx, 'tcx> InferCtxt<'cx, 'tcx> { // `query_response.var_values` after applying the substitution // `result_subst`. let substituted_query_response = |index: BoundVar| -> GenericArg<'tcx> { - query_response.substitute_projected(self.tcx, &result_subst, |v| &v.var_values[index]) + query_response.substitute_projected(self.tcx, &result_subst, |v| v.var_values[index]) }; // Unify the original value for each variable with the value @@ -524,7 +524,7 @@ impl<'cx, 'tcx> InferCtxt<'cx, 'tcx> { unsubstituted_region_constraints: &'a [QueryOutlivesConstraint<'tcx>], result_subst: &'a CanonicalVarValues<'tcx>, ) -> impl Iterator> + 'a + Captures<'tcx> { - unsubstituted_region_constraints.iter().map(move |constraint| { + unsubstituted_region_constraints.iter().map(move |&constraint| { let ty::OutlivesPredicate(k1, r2) = substitute_value(self.tcx, result_subst, constraint).skip_binder(); diff --git a/compiler/rustc_infer/src/infer/canonical/substitute.rs b/compiler/rustc_infer/src/infer/canonical/substitute.rs index 65791f6fc65..cd4f1fa3bc3 100644 --- a/compiler/rustc_infer/src/infer/canonical/substitute.rs +++ b/compiler/rustc_infer/src/infer/canonical/substitute.rs @@ -28,7 +28,7 @@ pub(super) trait CanonicalExt<'tcx, V> { &self, tcx: TyCtxt<'tcx>, var_values: &CanonicalVarValues<'tcx>, - projection_fn: impl FnOnce(&V) -> &T, + projection_fn: impl FnOnce(&V) -> T, ) -> T where T: TypeFoldable<'tcx>; @@ -39,14 +39,14 @@ impl<'tcx, V> CanonicalExt<'tcx, V> for Canonical<'tcx, V> { where V: TypeFoldable<'tcx>, { - self.substitute_projected(tcx, var_values, |value| value) + self.substitute_projected(tcx, var_values, |value| value.clone()) } fn substitute_projected( &self, tcx: TyCtxt<'tcx>, var_values: &CanonicalVarValues<'tcx>, - projection_fn: impl FnOnce(&V) -> &T, + projection_fn: impl FnOnce(&V) -> T, ) -> T where T: TypeFoldable<'tcx>, @@ -60,16 +60,16 @@ impl<'tcx, V> CanonicalExt<'tcx, V> for Canonical<'tcx, V> { /// Substitute the values from `var_values` into `value`. `var_values` /// must be values for the set of canonical variables that appear in /// `value`. -pub(super) fn substitute_value<'a, 'tcx, T>( +pub(super) fn substitute_value<'tcx, T>( tcx: TyCtxt<'tcx>, var_values: &CanonicalVarValues<'tcx>, - value: &'a T, + value: T, ) -> T where T: TypeFoldable<'tcx>, { if var_values.var_values.is_empty() { - value.clone() + value } else { let fld_r = |br: ty::BoundRegion| match var_values.var_values[br.assert_bound_var()].unpack() { diff --git a/compiler/rustc_infer/src/infer/error_reporting/mod.rs b/compiler/rustc_infer/src/infer/error_reporting/mod.rs index 524efd04cfc..183fb314a00 100644 --- a/compiler/rustc_infer/src/infer/error_reporting/mod.rs +++ b/compiler/rustc_infer/src/infer/error_reporting/mod.rs @@ -389,7 +389,7 @@ impl<'a, 'tcx> InferCtxt<'a, 'tcx> { member_region, span, } => { - let hidden_ty = self.resolve_vars_if_possible(&hidden_ty); + let hidden_ty = self.resolve_vars_if_possible(hidden_ty); unexpected_hidden_region_diagnostic( self.tcx, span, @@ -590,7 +590,7 @@ impl<'a, 'tcx> InferCtxt<'a, 'tcx> { ) { match cause.code { ObligationCauseCode::Pattern { origin_expr: true, span: Some(span), root_ty } => { - let ty = self.resolve_vars_if_possible(&root_ty); + let ty = self.resolve_vars_if_possible(root_ty); if ty.is_suggestable() { // don't show type `_` err.span_label(span, format!("this expression has type `{}`", ty)); @@ -661,7 +661,7 @@ impl<'a, 'tcx> InferCtxt<'a, 'tcx> { } _ => { // `last_ty` can be `!`, `expected` will have better info when present. - let t = self.resolve_vars_if_possible(&match exp_found { + let t = self.resolve_vars_if_possible(match exp_found { Some(ty::error::ExpectedFound { expected, .. }) => expected, _ => last_ty, }); @@ -1498,7 +1498,7 @@ impl<'a, 'tcx> InferCtxt<'a, 'tcx> { } impl<'tcx> ty::fold::TypeVisitor<'tcx> for OpaqueTypesVisitor<'tcx> { - fn visit_ty(&mut self, t: Ty<'tcx>) -> ControlFlow<()> { + fn visit_ty(&mut self, t: Ty<'tcx>) -> ControlFlow { if let Some((kind, def_id)) = TyCategory::from_ty(t) { let span = self.tcx.def_span(def_id); // Avoid cluttering the output when the "found" and error span overlap: @@ -1547,7 +1547,7 @@ impl<'a, 'tcx> InferCtxt<'a, 'tcx> { ValuePairs::TraitRefs(_) => (false, Mismatch::Fixed("trait")), _ => (false, Mismatch::Fixed("type")), }; - let vals = match self.values_str(&values) { + let vals = match self.values_str(values) { Some((expected, found)) => Some((expected, found)), None => { // Derived error. Cancel the emitter. @@ -1893,32 +1893,32 @@ impl<'a, 'tcx> InferCtxt<'a, 'tcx> { fn values_str( &self, - values: &ValuePairs<'tcx>, + values: ValuePairs<'tcx>, ) -> Option<(DiagnosticStyledString, DiagnosticStyledString)> { - match *values { - infer::Types(ref exp_found) => self.expected_found_str_ty(exp_found), - infer::Regions(ref exp_found) => self.expected_found_str(exp_found), - infer::Consts(ref exp_found) => self.expected_found_str(exp_found), - infer::TraitRefs(ref exp_found) => { + match values { + infer::Types(exp_found) => self.expected_found_str_ty(exp_found), + infer::Regions(exp_found) => self.expected_found_str(exp_found), + infer::Consts(exp_found) => self.expected_found_str(exp_found), + infer::TraitRefs(exp_found) => { let pretty_exp_found = ty::error::ExpectedFound { expected: exp_found.expected.print_only_trait_path(), found: exp_found.found.print_only_trait_path(), }; - self.expected_found_str(&pretty_exp_found) + self.expected_found_str(pretty_exp_found) } - infer::PolyTraitRefs(ref exp_found) => { + infer::PolyTraitRefs(exp_found) => { let pretty_exp_found = ty::error::ExpectedFound { expected: exp_found.expected.print_only_trait_path(), found: exp_found.found.print_only_trait_path(), }; - self.expected_found_str(&pretty_exp_found) + self.expected_found_str(pretty_exp_found) } } } fn expected_found_str_ty( &self, - exp_found: &ty::error::ExpectedFound>, + exp_found: ty::error::ExpectedFound>, ) -> Option<(DiagnosticStyledString, DiagnosticStyledString)> { let exp_found = self.resolve_vars_if_possible(exp_found); if exp_found.references_error() { @@ -1931,7 +1931,7 @@ impl<'a, 'tcx> InferCtxt<'a, 'tcx> { /// Returns a string of the form "expected `{}`, found `{}`". fn expected_found_str>( &self, - exp_found: &ty::error::ExpectedFound, + exp_found: ty::error::ExpectedFound, ) -> Option<(DiagnosticStyledString, DiagnosticStyledString)> { let exp_found = self.resolve_vars_if_possible(exp_found); if exp_found.references_error() { @@ -2180,7 +2180,7 @@ impl<'a, 'tcx> InferCtxt<'a, 'tcx> { "...", ); if let Some(infer::RelateParamBound(_, t)) = origin { - let t = self.resolve_vars_if_possible(&t); + let t = self.resolve_vars_if_possible(t); match t.kind() { // We've got: // fn get_later(g: G, dest: &mut T) -> impl FnOnce() + '_ @@ -2237,7 +2237,7 @@ impl<'a, 'tcx> InferCtxt<'a, 'tcx> { debug!("report_sub_sup_conflict: sub_trace.values={:?}", sub_trace.values); if let (Some((sup_expected, sup_found)), Some((sub_expected, sub_found))) = - (self.values_str(&sup_trace.values), self.values_str(&sub_trace.values)) + (self.values_str(sup_trace.values), self.values_str(sub_trace.values)) { if sub_expected == sup_expected && sub_found == sup_found { note_and_explain_region( diff --git a/compiler/rustc_infer/src/infer/error_reporting/need_type_info.rs b/compiler/rustc_infer/src/infer/error_reporting/need_type_info.rs index 868989539d4..fd8f46a6926 100644 --- a/compiler/rustc_infer/src/infer/error_reporting/need_type_info.rs +++ b/compiler/rustc_infer/src/infer/error_reporting/need_type_info.rs @@ -49,7 +49,7 @@ impl<'a, 'tcx> FindHirNodeVisitor<'a, 'tcx> { .and_then(|typeck_results| typeck_results.borrow().node_type_opt(hir_id)); match ty_opt { Some(ty) => { - let ty = self.infcx.resolve_vars_if_possible(&ty); + let ty = self.infcx.resolve_vars_if_possible(ty); if ty.walk().any(|inner| { inner == self.target || match (inner.unpack(), self.target.unpack()) { @@ -343,7 +343,7 @@ impl<'a, 'tcx> InferCtxt<'a, 'tcx> { arg: GenericArg<'tcx>, error_code: TypeAnnotationNeeded, ) -> DiagnosticBuilder<'tcx> { - let arg = self.resolve_vars_if_possible(&arg); + let arg = self.resolve_vars_if_possible(arg); let arg_data = self.extract_inference_diagnostics_data(arg, None); let kind_str = match arg.unpack() { GenericArgKind::Type(_) => "type", @@ -686,7 +686,7 @@ impl<'a, 'tcx> InferCtxt<'a, 'tcx> { span: Span, ty: Ty<'tcx>, ) -> DiagnosticBuilder<'tcx> { - let ty = self.resolve_vars_if_possible(&ty); + let ty = self.resolve_vars_if_possible(ty); let data = self.extract_inference_diagnostics_data(ty.into(), None); let mut err = struct_span_err!( diff --git a/compiler/rustc_infer/src/infer/error_reporting/nice_region_error/different_lifetimes.rs b/compiler/rustc_infer/src/infer/error_reporting/nice_region_error/different_lifetimes.rs index 7ab18e54f7e..59786059fae 100644 --- a/compiler/rustc_infer/src/infer/error_reporting/nice_region_error/different_lifetimes.rs +++ b/compiler/rustc_infer/src/infer/error_reporting/nice_region_error/different_lifetimes.rs @@ -102,43 +102,89 @@ impl<'a, 'tcx> NiceRegionError<'a, 'tcx> { None => String::new(), }; - let (span_1, span_2, main_label, span_label) = match (sup_is_ret_type, sub_is_ret_type) { - (None, None) => { - let (main_label_1, span_label_1) = if ty_sup.hir_id == ty_sub.hir_id { + let (span_1, span_2, main_label, span_label, future_return_type) = + match (sup_is_ret_type, sub_is_ret_type) { + (None, None) => { + let (main_label_1, span_label_1) = if ty_sup.hir_id == ty_sub.hir_id { + ( + "this type is declared with multiple lifetimes...".to_owned(), + "...but data with one lifetime flows into the other here".to_owned(), + ) + } else { + ( + "these two types are declared with different lifetimes...".to_owned(), + format!("...but data{} flows{} here", span_label_var1, span_label_var2), + ) + }; + (ty_sup.span, ty_sub.span, main_label_1, span_label_1, None) + } + + (Some(ret_span), _) => { + let sup_future = self.future_return_type(scope_def_id_sup); + let (return_type, action) = if let Some(_) = sup_future { + ("returned future", "held across an await point") + } else { + ("return type", "returned") + }; + ( - "this type is declared with multiple lifetimes...".to_owned(), - "...but data with one lifetime flows into the other here".to_owned(), + ty_sub.span, + ret_span, + format!( + "this parameter and the {} are declared with different lifetimes...", + return_type + ), + format!("...but data{} is {} here", span_label_var1, action), + sup_future, ) - } else { + } + (_, Some(ret_span)) => { + let sub_future = self.future_return_type(scope_def_id_sub); + let (return_type, action) = if let Some(_) = sub_future { + ("returned future", "held across an await point") + } else { + ("return type", "returned") + }; + ( - "these two types are declared with different lifetimes...".to_owned(), - format!("...but data{} flows{} here", span_label_var1, span_label_var2), + ty_sup.span, + ret_span, + format!( + "this parameter and the {} are declared with different lifetimes...", + return_type + ), + format!("...but data{} is {} here", span_label_var1, action), + sub_future, ) - }; - (ty_sup.span, ty_sub.span, main_label_1, span_label_1) - } - - (Some(ret_span), _) => ( - ty_sub.span, - ret_span, - "this parameter and the return type are declared with different lifetimes..." - .to_owned(), - format!("...but data{} is returned here", span_label_var1), - ), - (_, Some(ret_span)) => ( - ty_sup.span, - ret_span, - "this parameter and the return type are declared with different lifetimes..." - .to_owned(), - format!("...but data{} is returned here", span_label_var1), - ), - }; - - struct_span_err!(self.tcx().sess, span, E0623, "lifetime mismatch") - .span_label(span_1, main_label) - .span_label(span_2, String::new()) - .span_label(span, span_label) - .emit(); + } + }; + + let mut e = struct_span_err!(self.tcx().sess, span, E0623, "lifetime mismatch"); + + e.span_label(span_1, main_label); + e.span_label(span_2, String::new()); + e.span_label(span, span_label); + + if let Some(t) = future_return_type { + let snip = self + .tcx() + .sess + .source_map() + .span_to_snippet(t.span) + .ok() + .and_then(|s| match (&t.kind, s.as_str()) { + (rustc_hir::TyKind::Tup(&[]), "") => Some("()".to_string()), + (_, "") => None, + _ => Some(s), + }) + .unwrap_or("{unnamed_type}".to_string()); + + e.span_label( + t.span, + &format!("this `async fn` implicitly returns an `impl Future`", snip), + ); + } + e.emit(); Some(ErrorReported) } } diff --git a/compiler/rustc_infer/src/infer/error_reporting/nice_region_error/placeholder_error.rs b/compiler/rustc_infer/src/infer/error_reporting/nice_region_error/placeholder_error.rs index 2187064ec5e..e8e0326d978 100644 --- a/compiler/rustc_infer/src/infer/error_reporting/nice_region_error/placeholder_error.rs +++ b/compiler/rustc_infer/src/infer/error_reporting/nice_region_error/placeholder_error.rs @@ -234,14 +234,13 @@ impl NiceRegionError<'me, 'tcx> { false }; - let expected_trait_ref = self.infcx.resolve_vars_if_possible(&ty::TraitRef { + let expected_trait_ref = self.infcx.resolve_vars_if_possible(ty::TraitRef { def_id: trait_def_id, substs: expected_substs, }); - let actual_trait_ref = self.infcx.resolve_vars_if_possible(&ty::TraitRef { - def_id: trait_def_id, - substs: actual_substs, - }); + let actual_trait_ref = self + .infcx + .resolve_vars_if_possible(ty::TraitRef { def_id: trait_def_id, substs: actual_substs }); // Search the expected and actual trait references to see (a) // whether the sub/sup placeholders appear in them (sometimes diff --git a/compiler/rustc_infer/src/infer/error_reporting/nice_region_error/static_impl_trait.rs b/compiler/rustc_infer/src/infer/error_reporting/nice_region_error/static_impl_trait.rs index df3dbfca01d..5264854d8eb 100644 --- a/compiler/rustc_infer/src/infer/error_reporting/nice_region_error/static_impl_trait.rs +++ b/compiler/rustc_infer/src/infer/error_reporting/nice_region_error/static_impl_trait.rs @@ -414,7 +414,7 @@ impl<'a, 'tcx> NiceRegionError<'a, 'tcx> { tcx, ctxt.param_env, ctxt.assoc_item.def_id, - self.infcx.resolve_vars_if_possible(&ctxt.substs), + self.infcx.resolve_vars_if_possible(ctxt.substs), ) { Ok(Some(instance)) => instance, _ => return false, @@ -474,7 +474,7 @@ impl<'a, 'tcx> NiceRegionError<'a, 'tcx> { struct TraitObjectVisitor(Vec); impl TypeVisitor<'_> for TraitObjectVisitor { - fn visit_ty(&mut self, t: Ty<'_>) -> ControlFlow<()> { + fn visit_ty(&mut self, t: Ty<'_>) -> ControlFlow { match t.kind() { ty::Dynamic(preds, RegionKind::ReStatic) => { if let Some(def_id) = preds.principal_def_id() { diff --git a/compiler/rustc_infer/src/infer/error_reporting/nice_region_error/trait_impl_difference.rs b/compiler/rustc_infer/src/infer/error_reporting/nice_region_error/trait_impl_difference.rs index c061f485c1c..4d3217a9c0b 100644 --- a/compiler/rustc_infer/src/infer/error_reporting/nice_region_error/trait_impl_difference.rs +++ b/compiler/rustc_infer/src/infer/error_reporting/nice_region_error/trait_impl_difference.rs @@ -86,7 +86,7 @@ impl<'a, 'tcx> NiceRegionError<'a, 'tcx> { } if let Some((expected, found)) = - self.infcx.expected_found_str_ty(&ExpectedFound { expected, found }) + self.infcx.expected_found_str_ty(ExpectedFound { expected, found }) { // Highlighted the differences when showing the "expected/found" note. err.note_expected_found(&"", expected, &"", found); diff --git a/compiler/rustc_infer/src/infer/error_reporting/nice_region_error/util.rs b/compiler/rustc_infer/src/infer/error_reporting/nice_region_error/util.rs index c055fed43f6..61fad8863e7 100644 --- a/compiler/rustc_infer/src/infer/error_reporting/nice_region_error/util.rs +++ b/compiler/rustc_infer/src/infer/error_reporting/nice_region_error/util.rs @@ -55,12 +55,12 @@ impl<'a, 'tcx> NiceRegionError<'a, 'tcx> { let owner_id = hir.body_owner(body_id); let fn_decl = hir.fn_decl_by_hir_id(owner_id).unwrap(); let poly_fn_sig = self.tcx().fn_sig(id); - let fn_sig = self.tcx().liberate_late_bound_regions(id, &poly_fn_sig); + let fn_sig = self.tcx().liberate_late_bound_regions(id, poly_fn_sig); body.params.iter().enumerate().find_map(|(index, param)| { // May return None; sometimes the tables are not yet populated. let ty = fn_sig.inputs()[index]; let mut found_anon_region = false; - let new_param_ty = self.tcx().fold_regions(&ty, &mut false, |r, _| { + let new_param_ty = self.tcx().fold_regions(ty, &mut false, |r, _| { if *r == *anon_region { found_anon_region = true; replace_region @@ -85,6 +85,60 @@ impl<'a, 'tcx> NiceRegionError<'a, 'tcx> { }) } + pub(super) fn future_return_type( + &self, + local_def_id: LocalDefId, + ) -> Option<&rustc_hir::Ty<'_>> { + if let Some(hir::IsAsync::Async) = self.asyncness(local_def_id) { + if let rustc_middle::ty::Opaque(def_id, _) = + self.tcx().type_of(local_def_id).fn_sig(self.tcx()).output().skip_binder().kind() + { + match self.tcx().hir().get_if_local(*def_id) { + Some(hir::Node::Item(hir::Item { + kind: + hir::ItemKind::OpaqueTy(hir::OpaqueTy { + bounds, + origin: hir::OpaqueTyOrigin::AsyncFn, + .. + }), + .. + })) => { + for b in bounds.iter() { + if let hir::GenericBound::LangItemTrait( + hir::LangItem::Future, + _span, + _hir_id, + generic_args, + ) = b + { + for type_binding in generic_args.bindings.iter() { + if type_binding.ident.name == rustc_span::sym::Output { + if let hir::TypeBindingKind::Equality { ty } = + type_binding.kind + { + return Some(ty); + } + } + } + } + } + } + _ => {} + } + } + } + None + } + + pub(super) fn asyncness(&self, local_def_id: LocalDefId) -> Option { + // similar to the asyncness fn in rustc_ty_utils::ty + let hir_id = self.tcx().hir().local_def_id_to_hir_id(local_def_id); + let node = self.tcx().hir().get(hir_id); + let fn_like = rustc_middle::hir::map::blocks::FnLikeNode::from_node(node)?; + + Some(fn_like.asyncness()) + } + // Here, we check for the case where the anonymous region // is in the return type. // FIXME(#42703) - Need to handle certain cases here. diff --git a/compiler/rustc_infer/src/infer/error_reporting/note.rs b/compiler/rustc_infer/src/infer/error_reporting/note.rs index 9ac27030ade..7fb94332cad 100644 --- a/compiler/rustc_infer/src/infer/error_reporting/note.rs +++ b/compiler/rustc_infer/src/infer/error_reporting/note.rs @@ -24,7 +24,7 @@ impl<'a, 'tcx> InferCtxt<'a, 'tcx> { }; match *origin { infer::Subtype(ref trace) => { - if let Some((expected, found)) = self.values_str(&trace.values) { + if let Some((expected, found)) = self.values_str(trace.values) { label_or_note( trace.cause.span, &format!("...so that the {}", trace.cause.as_requirement_str()), diff --git a/compiler/rustc_infer/src/infer/fudge.rs b/compiler/rustc_infer/src/infer/fudge.rs index d7bc636db8f..c292b2bdb30 100644 --- a/compiler/rustc_infer/src/infer/fudge.rs +++ b/compiler/rustc_infer/src/infer/fudge.rs @@ -105,7 +105,7 @@ impl<'a, 'tcx> InferCtxt<'a, 'tcx> { let (mut fudger, value) = self.probe(|_| { match f() { Ok(value) => { - let value = self.resolve_vars_if_possible(&value); + let value = self.resolve_vars_if_possible(value); // At this point, `value` could in principle refer // to inference variables that have been created during diff --git a/compiler/rustc_infer/src/infer/higher_ranked/mod.rs b/compiler/rustc_infer/src/infer/higher_ranked/mod.rs index e3365e8590b..9d9ecf5b384 100644 --- a/compiler/rustc_infer/src/infer/higher_ranked/mod.rs +++ b/compiler/rustc_infer/src/infer/higher_ranked/mod.rs @@ -33,14 +33,14 @@ impl<'a, 'tcx> CombineFields<'a, 'tcx> { self.infcx.commit_if_ok(|_| { // First, we instantiate each bound region in the supertype with a // fresh placeholder region. - let b_prime = self.infcx.replace_bound_vars_with_placeholders(&b); + let b_prime = self.infcx.replace_bound_vars_with_placeholders(b); // Next, we instantiate each bound region in the subtype // with a fresh region variable. These region variables -- // but no other pre-existing region variables -- can name // the placeholders. let (a_prime, _) = - self.infcx.replace_bound_vars_with_fresh_vars(span, HigherRankedType, &a); + self.infcx.replace_bound_vars_with_fresh_vars(span, HigherRankedType, a); debug!("a_prime={:?}", a_prime); debug!("b_prime={:?}", b_prime); @@ -66,7 +66,7 @@ impl<'a, 'tcx> InferCtxt<'a, 'tcx> { /// the [rustc dev guide]. /// /// [rustc dev guide]: https://rustc-dev-guide.rust-lang.org/traits/hrtb.html - pub fn replace_bound_vars_with_placeholders(&self, binder: &ty::Binder) -> T + pub fn replace_bound_vars_with_placeholders(&self, binder: ty::Binder) -> T where T: TypeFoldable<'tcx>, { @@ -95,7 +95,7 @@ impl<'a, 'tcx> InferCtxt<'a, 'tcx> { self.tcx.mk_const(ty::Const { val: ty::ConstKind::Placeholder(ty::PlaceholderConst { universe: next_universe, - name: bound_var, + name: ty::BoundConst { var: bound_var, ty }, }), ty, }) @@ -113,10 +113,9 @@ impl<'a, 'tcx> InferCtxt<'a, 'tcx> { debug!( "replace_bound_vars_with_placeholders(\ next_universe={:?}, \ - binder={:?}, \ result={:?}, \ map={:?})", - next_universe, binder, result, map, + next_universe, result, map, ); result diff --git a/compiler/rustc_infer/src/infer/lexical_region_resolve/mod.rs b/compiler/rustc_infer/src/infer/lexical_region_resolve/mod.rs index fcf1949933b..d7b2ce7ee20 100644 --- a/compiler/rustc_infer/src/infer/lexical_region_resolve/mod.rs +++ b/compiler/rustc_infer/src/infer/lexical_region_resolve/mod.rs @@ -1001,7 +1001,7 @@ impl<'tcx> LexicalRegionResolutions<'tcx> { where T: TypeFoldable<'tcx>, { - tcx.fold_regions(&value, &mut false, |r, _db| match r { + tcx.fold_regions(value, &mut false, |r, _db| match r { ty::ReVar(rid) => self.resolve_var(*rid), _ => r, }) diff --git a/compiler/rustc_infer/src/infer/mod.rs b/compiler/rustc_infer/src/infer/mod.rs index acded5351f8..6affe0e5463 100644 --- a/compiler/rustc_infer/src/infer/mod.rs +++ b/compiler/rustc_infer/src/infer/mod.rs @@ -1,5 +1,3 @@ -//! See the Book for more information. - pub use self::freshen::TypeFreshener; pub use self::LateBoundRegionConversionTime::*; pub use self::RegionVariableOrigin::*; @@ -345,7 +343,7 @@ pub struct InferCtxt<'a, 'tcx> { } /// See the `error_reporting` module for more details. -#[derive(Clone, Debug, PartialEq, Eq, TypeFoldable)] +#[derive(Clone, Copy, Debug, PartialEq, Eq, TypeFoldable)] pub enum ValuePairs<'tcx> { Types(ExpectedFound>), Regions(ExpectedFound>), @@ -955,7 +953,7 @@ impl<'a, 'tcx> InferCtxt<'a, 'tcx> { Some(self.commit_if_ok(|_snapshot| { let ty::SubtypePredicate { a_is_expected, a, b } = - self.replace_bound_vars_with_placeholders(&predicate); + self.replace_bound_vars_with_placeholders(predicate); let ok = self.at(cause, param_env).sub_exp(a_is_expected, a, b)?; @@ -970,7 +968,7 @@ impl<'a, 'tcx> InferCtxt<'a, 'tcx> { ) -> UnitResult<'tcx> { self.commit_if_ok(|_snapshot| { let ty::OutlivesPredicate(r_a, r_b) = - self.replace_bound_vars_with_placeholders(&predicate); + self.replace_bound_vars_with_placeholders(predicate); let origin = SubregionOrigin::from_obligation_cause(cause, || { RelateRegionParamBound(cause.span) }); @@ -1266,7 +1264,7 @@ impl<'a, 'tcx> InferCtxt<'a, 'tcx> { } pub fn ty_to_string(&self, t: Ty<'tcx>) -> String { - self.resolve_vars_if_possible(&t).to_string() + self.resolve_vars_if_possible(t).to_string() } pub fn tys_to_string(&self, ts: &[Ty<'tcx>]) -> String { @@ -1274,7 +1272,7 @@ impl<'a, 'tcx> InferCtxt<'a, 'tcx> { format!("({})", tstrs.join(", ")) } - pub fn trait_ref_to_string(&self, t: &ty::TraitRef<'tcx>) -> String { + pub fn trait_ref_to_string(&self, t: ty::TraitRef<'tcx>) -> String { self.resolve_vars_if_possible(t).print_only_trait_path().to_string() } @@ -1314,7 +1312,7 @@ impl<'a, 'tcx> InferCtxt<'a, 'tcx> { /// is left as is. This is an idempotent operation that does /// not affect inference state in any way and so you can do it /// at will. - pub fn resolve_vars_if_possible(&self, value: &T) -> T + pub fn resolve_vars_if_possible(&self, value: T) -> T where T: TypeFoldable<'tcx>, { @@ -1334,9 +1332,7 @@ impl<'a, 'tcx> InferCtxt<'a, 'tcx> { where T: TypeFoldable<'tcx>, { - let mut r = resolve::UnresolvedTypeFinder::new(self); - value.visit_with(&mut r); - r.first_unresolved + value.visit_with(&mut resolve::UnresolvedTypeFinder::new(self)).break_value() } pub fn probe_const_var( @@ -1349,7 +1345,7 @@ impl<'a, 'tcx> InferCtxt<'a, 'tcx> { } } - pub fn fully_resolve>(&self, value: &T) -> FixupResult<'tcx, T> { + pub fn fully_resolve>(&self, value: T) -> FixupResult<'tcx, T> { /*! * Attempts to resolve all type/region/const variables in * `value`. Region inference must have been run already (e.g., @@ -1383,7 +1379,7 @@ impl<'a, 'tcx> InferCtxt<'a, 'tcx> { where M: FnOnce(String) -> DiagnosticBuilder<'tcx>, { - let actual_ty = self.resolve_vars_if_possible(&actual_ty); + let actual_ty = self.resolve_vars_if_possible(actual_ty); debug!("type_error_struct_with_diag({:?}, {:?})", sp, actual_ty); // Don't report an error if actual type is `Error`. @@ -1420,7 +1416,7 @@ impl<'a, 'tcx> InferCtxt<'a, 'tcx> { &self, span: Span, lbrct: LateBoundRegionConversionTime, - value: &ty::Binder, + value: ty::Binder, ) -> (T, BTreeMap>) where T: TypeFoldable<'tcx>, @@ -1508,7 +1504,7 @@ impl<'a, 'tcx> InferCtxt<'a, 'tcx> { span: Option, ) -> EvalToConstValueResult<'tcx> { let mut original_values = OriginalQueryValues::default(); - let canonical = self.canonicalize_query(&(param_env, substs), &mut original_values); + let canonical = self.canonicalize_query((param_env, substs), &mut original_values); let (param_env, substs) = canonical.value; // The return value is the evaluated value which doesn't contain any reference to inference diff --git a/compiler/rustc_infer/src/infer/nll_relate/mod.rs b/compiler/rustc_infer/src/infer/nll_relate/mod.rs index 9b2ffc7a920..0b2847658f7 100644 --- a/compiler/rustc_infer/src/infer/nll_relate/mod.rs +++ b/compiler/rustc_infer/src/infer/nll_relate/mod.rs @@ -741,7 +741,10 @@ struct ScopeInstantiator<'me, 'tcx> { } impl<'me, 'tcx> TypeVisitor<'tcx> for ScopeInstantiator<'me, 'tcx> { - fn visit_binder>(&mut self, t: &ty::Binder) -> ControlFlow<()> { + fn visit_binder>( + &mut self, + t: &ty::Binder, + ) -> ControlFlow { self.target_index.shift_in(1); t.super_visit_with(self); self.target_index.shift_out(1); @@ -749,7 +752,7 @@ impl<'me, 'tcx> TypeVisitor<'tcx> for ScopeInstantiator<'me, 'tcx> { ControlFlow::CONTINUE } - fn visit_region(&mut self, r: ty::Region<'tcx>) -> ControlFlow<()> { + fn visit_region(&mut self, r: ty::Region<'tcx>) -> ControlFlow { let ScopeInstantiator { bound_region_scope, next_region, .. } = self; match r { diff --git a/compiler/rustc_infer/src/infer/outlives/obligations.rs b/compiler/rustc_infer/src/infer/outlives/obligations.rs index eb1a7806256..16d86e6243d 100644 --- a/compiler/rustc_infer/src/infer/outlives/obligations.rs +++ b/compiler/rustc_infer/src/infer/outlives/obligations.rs @@ -167,7 +167,7 @@ impl<'cx, 'tcx> InferCtxt<'cx, 'tcx> { sup_type, sub_region, origin ); - let sup_type = self.resolve_vars_if_possible(&sup_type); + let sup_type = self.resolve_vars_if_possible(sup_type); if let Some(region_bound_pairs) = region_bound_pairs_map.get(&body_id) { let outlives = &mut TypeOutlives::new( @@ -205,7 +205,7 @@ impl<'cx, 'tcx> InferCtxt<'cx, 'tcx> { implicit_region_bound, param_env, ); - let ty = self.resolve_vars_if_possible(&ty); + let ty = self.resolve_vars_if_possible(ty); outlives.type_must_outlive(origin, ty, region); } } diff --git a/compiler/rustc_infer/src/infer/outlives/verify.rs b/compiler/rustc_infer/src/infer/outlives/verify.rs index 2b827f4f4ed..f69212c599b 100644 --- a/compiler/rustc_infer/src/infer/outlives/verify.rs +++ b/compiler/rustc_infer/src/infer/outlives/verify.rs @@ -124,10 +124,10 @@ impl<'cx, 'tcx> VerifyBoundCx<'cx, 'tcx> { projection_ty: ty::ProjectionTy<'tcx>, ) -> Vec, ty::Region<'tcx>>> { let projection_ty = GenericKind::Projection(projection_ty).to_ty(self.tcx); - let erased_projection_ty = self.tcx.erase_regions(&projection_ty); + let erased_projection_ty = self.tcx.erase_regions(projection_ty); self.declared_generic_bounds_from_env_with_compare_fn(|ty| { if let ty::Projection(..) = ty.kind() { - let erased_ty = self.tcx.erase_regions(&ty); + let erased_ty = self.tcx.erase_regions(ty); erased_ty == erased_projection_ty } else { false diff --git a/compiler/rustc_infer/src/infer/resolve.rs b/compiler/rustc_infer/src/infer/resolve.rs index fe4ba5aa4e8..d72be0134fb 100644 --- a/compiler/rustc_infer/src/infer/resolve.rs +++ b/compiler/rustc_infer/src/infer/resolve.rs @@ -111,19 +111,17 @@ impl<'a, 'tcx> TypeFolder<'tcx> for OpportunisticRegionResolver<'a, 'tcx> { /// involve some hashing and so forth). pub struct UnresolvedTypeFinder<'a, 'tcx> { infcx: &'a InferCtxt<'a, 'tcx>, - - /// Used to find the type parameter name and location for error reporting. - pub first_unresolved: Option<(Ty<'tcx>, Option)>, } impl<'a, 'tcx> UnresolvedTypeFinder<'a, 'tcx> { pub fn new(infcx: &'a InferCtxt<'a, 'tcx>) -> Self { - UnresolvedTypeFinder { infcx, first_unresolved: None } + UnresolvedTypeFinder { infcx } } } impl<'a, 'tcx> TypeVisitor<'tcx> for UnresolvedTypeFinder<'a, 'tcx> { - fn visit_ty(&mut self, t: Ty<'tcx>) -> ControlFlow<()> { + type BreakTy = (Ty<'tcx>, Option); + fn visit_ty(&mut self, t: Ty<'tcx>) -> ControlFlow { let t = self.infcx.shallow_resolve(t); if t.has_infer_types() { if let ty::Infer(infer_ty) = *t.kind() { @@ -144,8 +142,7 @@ impl<'a, 'tcx> TypeVisitor<'tcx> for UnresolvedTypeFinder<'a, 'tcx> { } else { None }; - self.first_unresolved = Some((t, ty_var_span)); - ControlFlow::BREAK + ControlFlow::Break((t, ty_var_span)) } else { // Otherwise, visit its contents. t.super_visit_with(self) @@ -164,7 +161,7 @@ impl<'a, 'tcx> TypeVisitor<'tcx> for UnresolvedTypeFinder<'a, 'tcx> { /// Full type resolution replaces all type and region variables with /// their concrete results. If any variable cannot be replaced (never unified, etc) /// then an `Err` result is returned. -pub fn fully_resolve<'a, 'tcx, T>(infcx: &InferCtxt<'a, 'tcx>, value: &T) -> FixupResult<'tcx, T> +pub fn fully_resolve<'a, 'tcx, T>(infcx: &InferCtxt<'a, 'tcx>, value: T) -> FixupResult<'tcx, T> where T: TypeFoldable<'tcx>, { diff --git a/compiler/rustc_infer/src/traits/structural_impls.rs b/compiler/rustc_infer/src/traits/structural_impls.rs index 1a1c2637a6f..c4a2ecee096 100644 --- a/compiler/rustc_infer/src/traits/structural_impls.rs +++ b/compiler/rustc_infer/src/traits/structural_impls.rs @@ -60,16 +60,16 @@ impl<'tcx> fmt::Debug for traits::MismatchedProjectionTypes<'tcx> { // TypeFoldable implementations. impl<'tcx, O: TypeFoldable<'tcx>> TypeFoldable<'tcx> for traits::Obligation<'tcx, O> { - fn super_fold_with>(&self, folder: &mut F) -> Self { + fn super_fold_with>(self, folder: &mut F) -> Self { traits::Obligation { - cause: self.cause.clone(), + cause: self.cause, recursion_depth: self.recursion_depth, predicate: self.predicate.fold_with(folder), param_env: self.param_env.fold_with(folder), } } - fn super_visit_with>(&self, visitor: &mut V) -> ControlFlow<()> { + fn super_visit_with>(&self, visitor: &mut V) -> ControlFlow { self.predicate.visit_with(visitor) } } diff --git a/compiler/rustc_infer/src/traits/util.rs b/compiler/rustc_infer/src/traits/util.rs index f6ef9840788..b0b0e4372b8 100644 --- a/compiler/rustc_infer/src/traits/util.rs +++ b/compiler/rustc_infer/src/traits/util.rs @@ -9,7 +9,7 @@ pub fn anonymize_predicate<'tcx>( tcx: TyCtxt<'tcx>, pred: ty::Predicate<'tcx>, ) -> ty::Predicate<'tcx> { - match pred.kind() { + match *pred.kind() { ty::PredicateKind::ForAll(binder) => { let new = ty::PredicateKind::ForAll(tcx.anonymize_late_bound_regions(binder)); tcx.reuse_or_mk_predicate(pred, new) diff --git a/compiler/rustc_interface/Cargo.toml b/compiler/rustc_interface/Cargo.toml index e214493a567..2481a27dee7 100644 --- a/compiler/rustc_interface/Cargo.toml +++ b/compiler/rustc_interface/Cargo.toml @@ -41,7 +41,7 @@ rustc_plugin_impl = { path = "../rustc_plugin_impl" } rustc_privacy = { path = "../rustc_privacy" } rustc_resolve = { path = "../rustc_resolve" } rustc_trait_selection = { path = "../rustc_trait_selection" } -rustc_ty = { path = "../rustc_ty" } +rustc_ty_utils = { path = "../rustc_ty_utils" } tempfile = "3.0.5" [target.'cfg(windows)'.dependencies] diff --git a/compiler/rustc_interface/src/lib.rs b/compiler/rustc_interface/src/lib.rs index 88d2efe96d1..0935eb2bd71 100644 --- a/compiler/rustc_interface/src/lib.rs +++ b/compiler/rustc_interface/src/lib.rs @@ -1,6 +1,6 @@ #![feature(bool_to_option)] #![feature(box_syntax)] -#![feature(set_stdio)] +#![feature(internal_output_capture)] #![feature(nll)] #![feature(generator_trait)] #![feature(generators)] diff --git a/compiler/rustc_interface/src/passes.rs b/compiler/rustc_interface/src/passes.rs index 548b6c03daa..5fd560d7eff 100644 --- a/compiler/rustc_interface/src/passes.rs +++ b/compiler/rustc_interface/src/passes.rs @@ -6,6 +6,7 @@ use rustc_ast::mut_visit::MutVisitor; use rustc_ast::{self as ast, visit}; use rustc_codegen_ssa::back::link::emit_metadata; use rustc_codegen_ssa::traits::CodegenBackend; +use rustc_data_structures::steal::Steal; use rustc_data_structures::sync::{par_iter, Lrc, OnceCell, ParallelIterator, WorkerLocal}; use rustc_data_structures::temp_dir::MaybeTempDir; use rustc_data_structures::{box_region_allow_access, declare_box_region_type, parallel}; @@ -20,7 +21,6 @@ use rustc_middle::dep_graph::DepGraph; use rustc_middle::middle; use rustc_middle::middle::cstore::{CrateStore, MetadataLoader, MetadataLoaderDyn}; use rustc_middle::ty::query::Providers; -use rustc_middle::ty::steal::Steal; use rustc_middle::ty::{self, GlobalCtxt, ResolverOutputs, TyCtxt}; use rustc_mir as mir; use rustc_mir_build as mir_build; @@ -239,16 +239,12 @@ fn configure_and_expand_inner<'a>( krate = sess.time("crate_injection", || { let alt_std_name = sess.opts.alt_std_name.as_ref().map(|s| Symbol::intern(s)); - let (krate, name) = rustc_builtin_macros::standard_library_imports::inject( + rustc_builtin_macros::standard_library_imports::inject( krate, &mut resolver, &sess, alt_std_name, - ); - if let Some(name) = name { - sess.parse_sess.injected_crate_name.set(name).expect("not yet initialized"); - } - krate + ) }); util::check_attr_crate_type(&sess, &krate.attrs, &mut resolver.lint_buffer()); @@ -703,7 +699,7 @@ pub static DEFAULT_QUERY_PROVIDERS: SyncLazy = SyncLazy::new(|| { rustc_passes::provide(providers); rustc_resolve::provide(providers); rustc_traits::provide(providers); - rustc_ty::provide(providers); + rustc_ty_utils::provide(providers); rustc_metadata::provide(providers); rustc_lint::provide(providers); rustc_symbol_mangling::provide(providers); diff --git a/compiler/rustc_interface/src/queries.rs b/compiler/rustc_interface/src/queries.rs index 1de7350a3e2..a2704c3adbf 100644 --- a/compiler/rustc_interface/src/queries.rs +++ b/compiler/rustc_interface/src/queries.rs @@ -3,6 +3,7 @@ use crate::passes::{self, BoxedResolver, QueryContext}; use rustc_ast as ast; use rustc_codegen_ssa::traits::CodegenBackend; +use rustc_data_structures::steal::Steal; use rustc_data_structures::svh::Svh; use rustc_data_structures::sync::{Lrc, OnceCell, WorkerLocal}; use rustc_errors::ErrorReported; @@ -12,7 +13,6 @@ use rustc_incremental::DepGraphFuture; use rustc_lint::LintStore; use rustc_middle::arena::Arena; use rustc_middle::dep_graph::DepGraph; -use rustc_middle::ty::steal::Steal; use rustc_middle::ty::{GlobalCtxt, ResolverOutputs, TyCtxt}; use rustc_serialize::json; use rustc_session::config::{self, OutputFilenames, OutputType}; diff --git a/compiler/rustc_interface/src/tests.rs b/compiler/rustc_interface/src/tests.rs index a1ba1be88a1..fd8052c2ecf 100644 --- a/compiler/rustc_interface/src/tests.rs +++ b/compiler/rustc_interface/src/tests.rs @@ -555,6 +555,8 @@ fn test_debugging_options_tracking_hash() { tracked!(function_sections, Some(false)); tracked!(human_readable_cgu_names, true); tracked!(inline_in_all_cgus, Some(true)); + tracked!(inline_mir_threshold, 123); + tracked!(inline_mir_hint_threshold, 123); tracked!(insert_sideeffect, true); tracked!(instrument_coverage, true); tracked!(instrument_mcount, true); diff --git a/compiler/rustc_interface/src/util.rs b/compiler/rustc_interface/src/util.rs index 3ed7d20ae45..20a7b47313e 100644 --- a/compiler/rustc_interface/src/util.rs +++ b/compiler/rustc_interface/src/util.rs @@ -25,7 +25,7 @@ use rustc_span::symbol::{sym, Symbol}; use smallvec::SmallVec; use std::env; use std::env::consts::{DLL_PREFIX, DLL_SUFFIX}; -use std::io::{self, Write}; +use std::io; use std::lazy::SyncOnceCell; use std::mem; use std::ops::DerefMut; @@ -106,21 +106,6 @@ fn get_stack_size() -> Option { env::var_os("RUST_MIN_STACK").is_none().then_some(STACK_SIZE) } -struct Sink(Arc>>); -impl Write for Sink { - fn write(&mut self, data: &[u8]) -> io::Result { - Write::write(&mut *self.0.lock().unwrap(), data) - } - fn flush(&mut self) -> io::Result<()> { - Ok(()) - } -} -impl io::LocalOutput for Sink { - fn clone_box(&self) -> Box { - Box::new(Self(self.0.clone())) - } -} - /// Like a `thread::Builder::spawn` followed by a `join()`, but avoids the need /// for `'static` bounds. #[cfg(not(parallel_compiler))] @@ -163,9 +148,7 @@ pub fn setup_callbacks_and_run_in_thread_pool_with_globals R + Se let main_handler = move || { rustc_span::with_session_globals(edition, || { - if let Some(stderr) = stderr { - io::set_panic(Some(box Sink(stderr.clone()))); - } + io::set_output_capture(stderr.clone()); f() }) }; @@ -203,9 +186,7 @@ pub fn setup_callbacks_and_run_in_thread_pool_with_globals R + Se // on the new threads. let main_handler = move |thread: rayon::ThreadBuilder| { rustc_span::SESSION_GLOBALS.set(session_globals, || { - if let Some(stderr) = stderr { - io::set_panic(Some(box Sink(stderr.clone()))); - } + io::set_output_capture(stderr.clone()); thread.run() }) }; @@ -880,12 +861,6 @@ impl<'a> MutVisitor for ReplaceBodyWithLoop<'a, '_> { }) } } - - // in general the pretty printer processes unexpanded code, so - // we override the default `visit_mac` method which panics. - fn visit_mac(&mut self, mac: &mut ast::MacCall) { - noop_visit_mac(mac, self) - } } /// Returns a version string such as "rustc 1.46.0 (04488afe3 2020-08-24)" diff --git a/compiler/rustc_lint/Cargo.toml b/compiler/rustc_lint/Cargo.toml index 760a8e385d6..c56eb09b634 100644 --- a/compiler/rustc_lint/Cargo.toml +++ b/compiler/rustc_lint/Cargo.toml @@ -20,3 +20,4 @@ rustc_feature = { path = "../rustc_feature" } rustc_index = { path = "../rustc_index" } rustc_session = { path = "../rustc_session" } rustc_trait_selection = { path = "../rustc_trait_selection" } +rustc_parse_format = { path = "../rustc_parse_format" } diff --git a/compiler/rustc_lint/src/early.rs b/compiler/rustc_lint/src/early.rs index 9aeeb627792..08c147ec3ac 100644 --- a/compiler/rustc_lint/src/early.rs +++ b/compiler/rustc_lint/src/early.rs @@ -270,15 +270,9 @@ impl<'a, T: EarlyLintPass> ast_visit::Visitor<'a> for EarlyContextAndPass<'a, T> self.check_id(id); } - fn visit_mac(&mut self, mac: &'a ast::MacCall) { - // FIXME(#54110): So, this setup isn't really right. I think - // that (a) the librustc_ast visitor ought to be doing this as - // part of `walk_mac`, and (b) we should be calling - // `visit_path`, *but* that would require a `NodeId`, and I - // want to get #53686 fixed quickly. -nmatsakis - ast_visit::walk_path(self, &mac.path); - + fn visit_mac_call(&mut self, mac: &'a ast::MacCall) { run_early_pass!(self, check_mac, mac); + ast_visit::walk_mac(self, mac); } } diff --git a/compiler/rustc_lint/src/levels.rs b/compiler/rustc_lint/src/levels.rs index aca28988364..02da85d25d5 100644 --- a/compiler/rustc_lint/src/levels.rs +++ b/compiler/rustc_lint/src/levels.rs @@ -30,6 +30,8 @@ fn lint_levels(tcx: TyCtxt<'_>, cnum: CrateNum) -> LintLevelMap { let mut builder = LintLevelMapBuilder { levels, tcx, store }; let krate = tcx.hir().krate(); + builder.levels.id_to_set.reserve(krate.exported_macros.len() + 1); + let push = builder.levels.push(&krate.item.attrs, &store, true); builder.levels.register_id(hir::CRATE_HIR_ID); for macro_def in krate.exported_macros { diff --git a/compiler/rustc_lint/src/lib.rs b/compiler/rustc_lint/src/lib.rs index 24bfdad970a..81549be4b09 100644 --- a/compiler/rustc_lint/src/lib.rs +++ b/compiler/rustc_lint/src/lib.rs @@ -55,6 +55,7 @@ mod levels; mod methods; mod non_ascii_idents; mod nonstandard_style; +mod panic_fmt; mod passes; mod redundant_semicolon; mod traits; @@ -80,6 +81,7 @@ use internal::*; use methods::*; use non_ascii_idents::*; use nonstandard_style::*; +use panic_fmt::PanicFmt; use redundant_semicolon::*; use traits::*; use types::*; @@ -166,6 +168,7 @@ macro_rules! late_lint_passes { ClashingExternDeclarations: ClashingExternDeclarations::new(), DropTraitConstraints: DropTraitConstraints, TemporaryCStringAsPtr: TemporaryCStringAsPtr, + PanicFmt: PanicFmt, ] ); }; diff --git a/compiler/rustc_lint/src/nonstandard_style.rs b/compiler/rustc_lint/src/nonstandard_style.rs index f117ce1f805..dd2627f7bc1 100644 --- a/compiler/rustc_lint/src/nonstandard_style.rs +++ b/compiler/rustc_lint/src/nonstandard_style.rs @@ -127,14 +127,20 @@ impl NonCamelCaseTypes { if !is_camel_case(name) { cx.struct_span_lint(NON_CAMEL_CASE_TYPES, ident.span, |lint| { let msg = format!("{} `{}` should have an upper camel case name", sort, name); - lint.build(&msg) - .span_suggestion( + let mut err = lint.build(&msg); + let cc = to_camel_case(name); + // We cannot provide meaningful suggestions + // if the characters are in the category of "Lowercase Letter". + if name.to_string() != cc { + err.span_suggestion( ident.span, "convert the identifier to upper camel case", to_camel_case(name), Applicability::MaybeIncorrect, - ) - .emit() + ); + } + + err.emit(); }) } } @@ -263,17 +269,21 @@ impl NonSnakeCase { let sc = NonSnakeCase::to_snake_case(name); let msg = format!("{} `{}` should have a snake case name", sort, name); let mut err = lint.build(&msg); - // We have a valid span in almost all cases, but we don't have one when linting a crate - // name provided via the command line. - if !ident.span.is_dummy() { - err.span_suggestion( - ident.span, - "convert the identifier to snake case", - sc, - Applicability::MaybeIncorrect, - ); - } else { - err.help(&format!("convert the identifier to snake case: `{}`", sc)); + // We cannot provide meaningful suggestions + // if the characters are in the category of "Uppercase Letter". + if name.to_string() != sc { + // We have a valid span in almost all cases, but we don't have one when linting a crate + // name provided via the command line. + if !ident.span.is_dummy() { + err.span_suggestion( + ident.span, + "convert the identifier to snake case", + sc, + Applicability::MaybeIncorrect, + ); + } else { + err.help(&format!("convert the identifier to snake case: `{}`", sc)); + } } err.emit(); @@ -441,14 +451,20 @@ impl NonUpperCaseGlobals { if name.chars().any(|c| c.is_lowercase()) { cx.struct_span_lint(NON_UPPER_CASE_GLOBALS, ident.span, |lint| { let uc = NonSnakeCase::to_snake_case(&name).to_uppercase(); - lint.build(&format!("{} `{}` should have an upper case name", sort, name)) - .span_suggestion( + let mut err = + lint.build(&format!("{} `{}` should have an upper case name", sort, name)); + // We cannot provide meaningful suggestions + // if the characters are in the category of "Lowercase Letter". + if name.to_string() != uc { + err.span_suggestion( ident.span, "convert the identifier to upper case", uc, Applicability::MaybeIncorrect, - ) - .emit(); + ); + } + + err.emit(); }) } } diff --git a/compiler/rustc_lint/src/panic_fmt.rs b/compiler/rustc_lint/src/panic_fmt.rs new file mode 100644 index 00000000000..0d2b20989b0 --- /dev/null +++ b/compiler/rustc_lint/src/panic_fmt.rs @@ -0,0 +1,150 @@ +use crate::{LateContext, LateLintPass, LintContext}; +use rustc_ast as ast; +use rustc_errors::{pluralize, Applicability}; +use rustc_hir as hir; +use rustc_middle::ty; +use rustc_parse_format::{ParseMode, Parser, Piece}; +use rustc_span::{sym, InnerSpan}; + +declare_lint! { + /// The `panic_fmt` lint detects `panic!("..")` with `{` or `}` in the string literal. + /// + /// ### Example + /// + /// ```rust,no_run + /// panic!("{}"); + /// ``` + /// + /// {{produces}} + /// + /// ### Explanation + /// + /// `panic!("{}")` panics with the message `"{}"`, as a `panic!()` invocation + /// with a single argument does not use `format_args!()`. + /// A future edition of Rust will interpret this string as format string, + /// which would break this. + PANIC_FMT, + Warn, + "detect braces in single-argument panic!() invocations", + report_in_external_macro +} + +declare_lint_pass!(PanicFmt => [PANIC_FMT]); + +impl<'tcx> LateLintPass<'tcx> for PanicFmt { + fn check_expr(&mut self, cx: &LateContext<'tcx>, expr: &'tcx hir::Expr<'tcx>) { + if let hir::ExprKind::Call(f, [arg]) = &expr.kind { + if let &ty::FnDef(def_id, _) = cx.typeck_results().expr_ty(f).kind() { + if Some(def_id) == cx.tcx.lang_items().begin_panic_fn() + || Some(def_id) == cx.tcx.lang_items().panic_fn() + { + check_panic(cx, f, arg); + } + } + } + } +} + +fn check_panic<'tcx>(cx: &LateContext<'tcx>, f: &'tcx hir::Expr<'tcx>, arg: &'tcx hir::Expr<'tcx>) { + if let hir::ExprKind::Lit(lit) = &arg.kind { + if let ast::LitKind::Str(sym, _) = lit.node { + let mut expn = f.span.ctxt().outer_expn_data(); + if let Some(id) = expn.macro_def_id { + if cx.tcx.is_diagnostic_item(sym::std_panic_macro, id) + || cx.tcx.is_diagnostic_item(sym::core_panic_macro, id) + { + let fmt = sym.as_str(); + if !fmt.contains(&['{', '}'][..]) { + return; + } + + let fmt_span = arg.span.source_callsite(); + + let (snippet, style) = + match cx.sess().parse_sess.source_map().span_to_snippet(fmt_span) { + Ok(snippet) => { + // Count the number of `#`s between the `r` and `"`. + let style = snippet.strip_prefix('r').and_then(|s| s.find('"')); + (Some(snippet), style) + } + Err(_) => (None, None), + }; + + let mut fmt_parser = + Parser::new(fmt.as_ref(), style, snippet.clone(), false, ParseMode::Format); + let n_arguments = + (&mut fmt_parser).filter(|a| matches!(a, Piece::NextArgument(_))).count(); + + // Unwrap another level of macro expansion if this panic!() + // was expanded from assert!() or debug_assert!(). + for &assert in &[sym::assert_macro, sym::debug_assert_macro] { + let parent = expn.call_site.ctxt().outer_expn_data(); + if parent + .macro_def_id + .map_or(false, |id| cx.tcx.is_diagnostic_item(assert, id)) + { + expn = parent; + } + } + + if n_arguments > 0 && fmt_parser.errors.is_empty() { + let arg_spans: Vec<_> = match &fmt_parser.arg_places[..] { + [] => vec![fmt_span], + v => v.iter().map(|span| fmt_span.from_inner(*span)).collect(), + }; + cx.struct_span_lint(PANIC_FMT, arg_spans, |lint| { + let mut l = lint.build(match n_arguments { + 1 => "panic message contains an unused formatting placeholder", + _ => "panic message contains unused formatting placeholders", + }); + l.note("this message is not used as a format string when given without arguments, but will be in a future Rust edition"); + if expn.call_site.contains(arg.span) { + l.span_suggestion( + arg.span.shrink_to_hi(), + &format!("add the missing argument{}", pluralize!(n_arguments)), + ", ...".into(), + Applicability::HasPlaceholders, + ); + l.span_suggestion( + arg.span.shrink_to_lo(), + "or add a \"{}\" format string to use the message literally", + "\"{}\", ".into(), + Applicability::MachineApplicable, + ); + } + l.emit(); + }); + } else { + let brace_spans: Option> = snippet + .filter(|s| s.starts_with('"') || s.starts_with("r#")) + .map(|s| { + s.char_indices() + .filter(|&(_, c)| c == '{' || c == '}') + .map(|(i, _)| { + fmt_span.from_inner(InnerSpan { start: i, end: i + 1 }) + }) + .collect() + }); + let msg = match &brace_spans { + Some(v) if v.len() == 1 => "panic message contains a brace", + _ => "panic message contains braces", + }; + cx.struct_span_lint(PANIC_FMT, brace_spans.unwrap_or(vec![expn.call_site]), |lint| { + let mut l = lint.build(msg); + l.note("this message is not used as a format string, but will be in a future Rust edition"); + if expn.call_site.contains(arg.span) { + l.span_suggestion( + arg.span.shrink_to_lo(), + "add a \"{}\" format string to use the message literally", + "\"{}\", ".into(), + Applicability::MachineApplicable, + ); + } + l.emit(); + }); + } + } + } + } + } +} diff --git a/compiler/rustc_lint/src/types.rs b/compiler/rustc_lint/src/types.rs index 467a3a42590..38c71e6e925 100644 --- a/compiler/rustc_lint/src/types.rs +++ b/compiler/rustc_lint/src/types.rs @@ -880,7 +880,7 @@ impl<'a, 'tcx> ImproperCTypesVisitor<'a, 'tcx> { return FfiSafe; } - match ty.kind() { + match *ty.kind() { ty::Adt(def, _) if def.is_box() && matches!(self.mode, CItemKind::Definition) => { FfiSafe } @@ -1044,7 +1044,7 @@ impl<'a, 'tcx> ImproperCTypesVisitor<'a, 'tcx> { }; } - let sig = tcx.erase_late_bound_regions(&sig); + let sig = tcx.erase_late_bound_regions(sig); if !sig.output().is_unit() { let r = self.check_type_for_ffi(cache, sig.output()); match r { @@ -1131,16 +1131,14 @@ impl<'a, 'tcx> ImproperCTypesVisitor<'a, 'tcx> { fn check_for_opaque_ty(&mut self, sp: Span, ty: Ty<'tcx>) -> bool { struct ProhibitOpaqueTypes<'a, 'tcx> { cx: &'a LateContext<'tcx>, - ty: Option>, }; impl<'a, 'tcx> ty::fold::TypeVisitor<'tcx> for ProhibitOpaqueTypes<'a, 'tcx> { - fn visit_ty(&mut self, ty: Ty<'tcx>) -> ControlFlow<()> { + type BreakTy = Ty<'tcx>; + + fn visit_ty(&mut self, ty: Ty<'tcx>) -> ControlFlow { match ty.kind() { - ty::Opaque(..) => { - self.ty = Some(ty); - ControlFlow::BREAK - } + ty::Opaque(..) => ControlFlow::Break(ty), // Consider opaque types within projections FFI-safe if they do not normalize // to more opaque types. ty::Projection(..) => { @@ -1159,9 +1157,7 @@ impl<'a, 'tcx> ImproperCTypesVisitor<'a, 'tcx> { } } - let mut visitor = ProhibitOpaqueTypes { cx: self.cx, ty: None }; - ty.visit_with(&mut visitor); - if let Some(ty) = visitor.ty { + if let Some(ty) = ty.visit_with(&mut ProhibitOpaqueTypes { cx: self.cx }).break_value() { self.emit_ffi_unsafe_type_lint(ty, sp, "opaque types have no C equivalent", None); true } else { @@ -1218,7 +1214,7 @@ impl<'a, 'tcx> ImproperCTypesVisitor<'a, 'tcx> { fn check_foreign_fn(&mut self, id: hir::HirId, decl: &hir::FnDecl<'_>) { let def_id = self.cx.tcx.hir().local_def_id(id); let sig = self.cx.tcx.fn_sig(def_id); - let sig = self.cx.tcx.erase_late_bound_regions(&sig); + let sig = self.cx.tcx.erase_late_bound_regions(sig); for (input_ty, input_hir) in sig.inputs().iter().zip(decl.inputs) { self.check_type_for_ffi_and_report_errors(input_hir.span, input_ty, false, false); @@ -1295,7 +1291,7 @@ impl<'tcx> LateLintPass<'tcx> for VariantSizeDifferences { if let hir::ItemKind::Enum(ref enum_definition, _) = it.kind { let item_def_id = cx.tcx.hir().local_def_id(it.hir_id); let t = cx.tcx.type_of(item_def_id); - let ty = cx.tcx.erase_regions(&t); + let ty = cx.tcx.erase_regions(t); let layout = match cx.layout_of(ty) { Ok(layout) => layout, Err( diff --git a/compiler/rustc_lint/src/unused.rs b/compiler/rustc_lint/src/unused.rs index 4bbc180b226..2a5ad5e6c98 100644 --- a/compiler/rustc_lint/src/unused.rs +++ b/compiler/rustc_lint/src/unused.rs @@ -1152,7 +1152,7 @@ declare_lint! { /// ```rust /// #![feature(box_syntax)] /// fn main() { - /// let a = (box [1,2,3]).len(); + /// let a = (box [1, 2, 3]).len(); /// } /// ``` /// diff --git a/compiler/rustc_lint_defs/src/builtin.rs b/compiler/rustc_lint_defs/src/builtin.rs index 1d0d6980b7a..fa82dce0ae2 100644 --- a/compiler/rustc_lint_defs/src/builtin.rs +++ b/compiler/rustc_lint_defs/src/builtin.rs @@ -1719,7 +1719,7 @@ declare_lint! { /// /// impl MyIterator for T where T: Iterator { } /// - /// let x = vec![1,2,3]; + /// let x = vec![1, 2, 3]; /// let _ = x.iter().is_sorted(); /// ``` /// diff --git a/compiler/rustc_llvm/llvm-wrapper/PassWrapper.cpp b/compiler/rustc_llvm/llvm-wrapper/PassWrapper.cpp index 71ca4f23bbb..01d76bb3e94 100644 --- a/compiler/rustc_llvm/llvm-wrapper/PassWrapper.cpp +++ b/compiler/rustc_llvm/llvm-wrapper/PassWrapper.cpp @@ -16,9 +16,7 @@ #include "llvm/Object/ObjectFile.h" #include "llvm/Object/IRObjectFile.h" #include "llvm/Passes/PassBuilder.h" -#if LLVM_VERSION_GE(9, 0) #include "llvm/Passes/StandardInstrumentations.h" -#endif #include "llvm/Support/CBindingWrapping.h" #include "llvm/Support/FileSystem.h" #include "llvm/Support/Host.h" @@ -31,15 +29,11 @@ #include "llvm-c/Transforms/PassManagerBuilder.h" #include "llvm/Transforms/Instrumentation.h" -#if LLVM_VERSION_GE(9, 0) #include "llvm/Transforms/Instrumentation/AddressSanitizer.h" #include "llvm/Support/TimeProfiler.h" -#endif #include "llvm/Transforms/Instrumentation/ThreadSanitizer.h" #include "llvm/Transforms/Instrumentation/MemorySanitizer.h" -#if LLVM_VERSION_GE(9, 0) #include "llvm/Transforms/Utils/CanonicalizeAliases.h" -#endif #include "llvm/Transforms/Utils/NameAnonGlobals.h" using namespace llvm; @@ -73,20 +67,18 @@ extern "C" void LLVMTimeTraceProfilerInitialize() { timeTraceProfilerInitialize( /* TimeTraceGranularity */ 0, /* ProcName */ "rustc"); -#elif LLVM_VERSION_GE(9, 0) +#else timeTraceProfilerInitialize(); #endif } extern "C" void LLVMTimeTraceProfilerFinish(const char* FileName) { -#if LLVM_VERSION_GE(9, 0) StringRef FN(FileName); std::error_code EC; raw_fd_ostream OS(FN, EC, sys::fs::CD_CreateAlways); timeTraceProfilerWrite(OS); timeTraceProfilerCleanup(); -#endif } enum class LLVMRustPassKind { @@ -127,22 +119,14 @@ extern "C" LLVMPassRef LLVMRustCreateAddressSanitizerFunctionPass(bool Recover) extern "C" LLVMPassRef LLVMRustCreateModuleAddressSanitizerPass(bool Recover) { const bool CompileKernel = false; -#if LLVM_VERSION_GE(9, 0) return wrap(createModuleAddressSanitizerLegacyPassPass(CompileKernel, Recover)); -#else - return wrap(createAddressSanitizerModulePass(CompileKernel, Recover)); -#endif } extern "C" LLVMPassRef LLVMRustCreateMemorySanitizerPass(int TrackOrigins, bool Recover) { -#if LLVM_VERSION_GE(9, 0) const bool CompileKernel = false; return wrap(createMemorySanitizerLegacyPassPass( MemorySanitizerOptions{TrackOrigins, Recover, CompileKernel})); -#else - return wrap(createMemorySanitizerLegacyPassPass(TrackOrigins, Recover)); -#endif } extern "C" LLVMPassRef LLVMRustCreateThreadSanitizerPass() { @@ -657,8 +641,6 @@ extern "C" typedef void (*LLVMRustSelfProfileBeforePassCallback)(void*, // LlvmS const char*); // IR name extern "C" typedef void (*LLVMRustSelfProfileAfterPassCallback)(void*); // LlvmSelfProfiler -#if LLVM_VERSION_GE(9, 0) - std::string LLVMRustwrappedIrGetName(const llvm::Any &WrappedIr) { if (any_isa(WrappedIr)) return any_cast(WrappedIr)->getName().str(); @@ -706,7 +688,6 @@ void LLVMSelfProfileInitializeCallbacks( AfterPassCallback(LlvmSelfProfiler); }); } -#endif enum class LLVMRustOptStage { PreLinkNoLTO, @@ -739,7 +720,6 @@ LLVMRustOptimizeWithNewPassManager( void* LlvmSelfProfiler, LLVMRustSelfProfileBeforePassCallback BeforePassCallback, LLVMRustSelfProfileAfterPassCallback AfterPassCallback) { -#if LLVM_VERSION_GE(9, 0) Module *TheModule = unwrap(ModuleRef); TargetMachine *TM = unwrap(TMRef); PassBuilder::OptimizationLevel OptLevel = fromRust(OptLevelRust); @@ -970,11 +950,6 @@ LLVMRustOptimizeWithNewPassManager( UpgradeCallsToIntrinsic(&*I++); // must be post-increment, as we remove MPM.run(*TheModule, MAM); -#else - // The new pass manager has been available for a long time, - // but we don't bother supporting it on old LLVM versions. - report_fatal_error("New pass manager only supported since LLVM 9"); -#endif } // Callback to demangle function name @@ -1325,12 +1300,9 @@ LLVMRustCreateThinLTOData(LLVMRustThinLTOModule *modules, GlobalValue::LinkageTypes NewLinkage) { Ret->ResolvedODR[ModuleIdentifier][GUID] = NewLinkage; }; -#if LLVM_VERSION_GE(9, 0) + thinLTOResolvePrevailingInIndex(Ret->Index, isPrevailing, recordNewLinkage, Ret->GUIDPreservedSymbols); -#else - thinLTOResolvePrevailingInIndex(Ret->Index, isPrevailing, recordNewLinkage); -#endif // Here we calculate an `ExportedGUIDs` set for use in the `isExported` // callback below. This callback below will dictate the linkage for all diff --git a/compiler/rustc_llvm/llvm-wrapper/RustWrapper.cpp b/compiler/rustc_llvm/llvm-wrapper/RustWrapper.cpp index e957e46499a..16057adcdf6 100644 --- a/compiler/rustc_llvm/llvm-wrapper/RustWrapper.cpp +++ b/compiler/rustc_llvm/llvm-wrapper/RustWrapper.cpp @@ -125,9 +125,7 @@ extern "C" LLVMValueRef LLVMRustGetOrInsertFunction(LLVMModuleRef M, return wrap(unwrap(M) ->getOrInsertFunction(StringRef(Name, NameLen), unwrap(FunctionTy)) -#if LLVM_VERSION_GE(9, 0) .getCallee() -#endif ); } @@ -252,11 +250,7 @@ extern "C" void LLVMRustAddDereferenceableOrNullCallSiteAttr(LLVMValueRef Instr, extern "C" void LLVMRustAddByValCallSiteAttr(LLVMValueRef Instr, unsigned Index, LLVMTypeRef Ty) { CallBase *Call = unwrap(Instr); -#if LLVM_VERSION_GE(9, 0) Attribute Attr = Attribute::getWithByValType(Call->getContext(), unwrap(Ty)); -#else - Attribute Attr = Attribute::get(Call->getContext(), Attribute::ByVal); -#endif Call->addAttribute(Index, Attr); } @@ -297,11 +291,7 @@ extern "C" void LLVMRustAddDereferenceableOrNullAttr(LLVMValueRef Fn, extern "C" void LLVMRustAddByValAttr(LLVMValueRef Fn, unsigned Index, LLVMTypeRef Ty) { Function *F = unwrap(Fn); -#if LLVM_VERSION_GE(9, 0) Attribute Attr = Attribute::getWithByValType(F->getContext(), unwrap(Ty)); -#else - Attribute Attr = Attribute::get(F->getContext(), Attribute::ByVal); -#endif F->addAttribute(Index, Attr); } @@ -617,11 +607,9 @@ static DISubprogram::DISPFlags fromRust(LLVMRustDISPFlags SPFlags) { if (isSet(SPFlags & LLVMRustDISPFlags::SPFlagOptimized)) { Result |= DISubprogram::DISPFlags::SPFlagOptimized; } -#if LLVM_VERSION_GE(9, 0) if (isSet(SPFlags & LLVMRustDISPFlags::SPFlagMainSubprogram)) { Result |= DISubprogram::DISPFlags::SPFlagMainSubprogram; } -#endif return Result; } @@ -745,10 +733,6 @@ extern "C" LLVMMetadataRef LLVMRustDIBuilderCreateFunction( DITemplateParameterArray(unwrap(TParam)); DISubprogram::DISPFlags llvmSPFlags = fromRust(SPFlags); DINode::DIFlags llvmFlags = fromRust(Flags); -#if LLVM_VERSION_LT(9, 0) - if (isSet(SPFlags & LLVMRustDISPFlags::SPFlagMainSubprogram)) - llvmFlags |= DINode::DIFlags::FlagMainSubprogram; -#endif DISubprogram *Sub = Builder->createFunction( unwrapDI(Scope), StringRef(Name, NameLen), diff --git a/compiler/rustc_macros/src/type_foldable.rs b/compiler/rustc_macros/src/type_foldable.rs index 8fa6e6a7101..082af087bf4 100644 --- a/compiler/rustc_macros/src/type_foldable.rs +++ b/compiler/rustc_macros/src/type_foldable.rs @@ -6,6 +6,12 @@ pub fn type_foldable_derive(mut s: synstructure::Structure<'_>) -> proc_macro2:: } s.add_bounds(synstructure::AddBounds::Generics); + let body_visit = s.each(|bind| { + quote! { + ::rustc_middle::ty::fold::TypeFoldable::visit_with(#bind, __folder)?; + } + }); + s.bind_with(|_| synstructure::BindStyle::Move); let body_fold = s.each_variant(|vi| { let bindings = vi.bindings(); vi.construct(|_, index| { @@ -16,26 +22,20 @@ pub fn type_foldable_derive(mut s: synstructure::Structure<'_>) -> proc_macro2:: }) }); - let body_visit = s.each(|bind| { - quote! { - ::rustc_middle::ty::fold::TypeFoldable::visit_with(#bind, __folder)?; - } - }); - s.bound_impl( quote!(::rustc_middle::ty::fold::TypeFoldable<'tcx>), quote! { fn super_fold_with<__F: ::rustc_middle::ty::fold::TypeFolder<'tcx>>( - &self, + self, __folder: &mut __F ) -> Self { - match *self { #body_fold } + match self { #body_fold } } fn super_visit_with<__F: ::rustc_middle::ty::fold::TypeVisitor<'tcx>>( &self, __folder: &mut __F - ) -> ::std::ops::ControlFlow<()> { + ) -> ::std::ops::ControlFlow<__F::BreakTy> { match *self { #body_visit } ::std::ops::ControlFlow::CONTINUE } diff --git a/compiler/rustc_metadata/src/dependency_format.rs b/compiler/rustc_metadata/src/dependency_format.rs index 44f57cfbe28..c3afc9f048c 100644 --- a/compiler/rustc_metadata/src/dependency_format.rs +++ b/compiler/rustc_metadata/src/dependency_format.rs @@ -127,7 +127,7 @@ fn calculate_type(tcx: TyCtxt<'_>, ty: CrateType) -> DependencyList { if ty == CrateType::Staticlib || (ty == CrateType::Executable && sess.crt_static(Some(ty)) - && !sess.target.options.crt_static_allows_dylibs) + && !sess.target.crt_static_allows_dylibs) { for &cnum in tcx.crates().iter() { if tcx.dep_kind(cnum).macros_only() { diff --git a/compiler/rustc_metadata/src/lib.rs b/compiler/rustc_metadata/src/lib.rs index 77766be7397..2560cfa7462 100644 --- a/compiler/rustc_metadata/src/lib.rs +++ b/compiler/rustc_metadata/src/lib.rs @@ -1,5 +1,4 @@ #![doc(html_root_url = "https://doc.rust-lang.org/nightly/nightly-rustc/")] -#![feature(bool_to_option)] #![feature(core_intrinsics)] #![feature(crate_visibility_modifier)] #![feature(drain_filter)] diff --git a/compiler/rustc_metadata/src/locator.rs b/compiler/rustc_metadata/src/locator.rs index d16985b9c2b..c4c025de8b3 100644 --- a/compiler/rustc_metadata/src/locator.rs +++ b/compiler/rustc_metadata/src/locator.rs @@ -373,11 +373,10 @@ impl<'a> CrateLocator<'a> { seen_paths: &mut FxHashSet, ) -> Result, CrateError> { // want: crate_name.dir_part() + prefix + crate_name.file_part + "-" - let dylib_prefix = - format!("{}{}{}", self.target.options.dll_prefix, self.crate_name, extra_prefix); + let dylib_prefix = format!("{}{}{}", self.target.dll_prefix, self.crate_name, extra_prefix); let rlib_prefix = format!("lib{}{}", self.crate_name, extra_prefix); let staticlib_prefix = - format!("{}{}{}", self.target.options.staticlib_prefix, self.crate_name, extra_prefix); + format!("{}{}{}", self.target.staticlib_prefix, self.crate_name, extra_prefix); let mut candidates: FxHashMap<_, (FxHashMap<_, _>, FxHashMap<_, _>, FxHashMap<_, _>)> = Default::default(); @@ -405,17 +404,14 @@ impl<'a> CrateLocator<'a> { (&file[(rlib_prefix.len())..(file.len() - ".rlib".len())], CrateFlavor::Rlib) } else if file.starts_with(&rlib_prefix) && file.ends_with(".rmeta") { (&file[(rlib_prefix.len())..(file.len() - ".rmeta".len())], CrateFlavor::Rmeta) - } else if file.starts_with(&dylib_prefix) - && file.ends_with(&self.target.options.dll_suffix) - { + } else if file.starts_with(&dylib_prefix) && file.ends_with(&self.target.dll_suffix) { ( - &file - [(dylib_prefix.len())..(file.len() - self.target.options.dll_suffix.len())], + &file[(dylib_prefix.len())..(file.len() - self.target.dll_suffix.len())], CrateFlavor::Dylib, ) } else { if file.starts_with(&staticlib_prefix) - && file.ends_with(&self.target.options.staticlib_suffix) + && file.ends_with(&self.target.staticlib_suffix) { staticlibs .push(CrateMismatch { path: spf.path.clone(), got: "static".to_string() }); @@ -679,8 +675,8 @@ impl<'a> CrateLocator<'a> { }; if file.starts_with("lib") && (file.ends_with(".rlib") || file.ends_with(".rmeta")) - || file.starts_with(&self.target.options.dll_prefix) - && file.ends_with(&self.target.options.dll_suffix) + || file.starts_with(&self.target.dll_prefix) + && file.ends_with(&self.target.dll_suffix) { // Make sure there's at most one rlib and at most one dylib. // Note to take care and match against the non-canonicalized name: @@ -712,8 +708,8 @@ impl<'a> CrateLocator<'a> { crate_name: self.crate_name, root: self.root.cloned(), triple: self.triple, - dll_prefix: self.target.options.dll_prefix.clone(), - dll_suffix: self.target.options.dll_suffix.clone(), + dll_prefix: self.target.dll_prefix.clone(), + dll_suffix: self.target.dll_suffix.clone(), rejected_via_hash: self.rejected_via_hash, rejected_via_triple: self.rejected_via_triple, rejected_via_kind: self.rejected_via_kind, diff --git a/compiler/rustc_metadata/src/native_libs.rs b/compiler/rustc_metadata/src/native_libs.rs index 5e65f075ea4..2f7c2c2c405 100644 --- a/compiler/rustc_metadata/src/native_libs.rs +++ b/compiler/rustc_metadata/src/native_libs.rs @@ -149,7 +149,7 @@ impl Collector<'tcx> { } return; } - let is_osx = self.tcx.sess.target.options.is_like_osx; + let is_osx = self.tcx.sess.target.is_like_osx; if lib.kind == NativeLibKind::Framework && !is_osx { let msg = "native frameworks are only available on macOS targets"; match span { diff --git a/compiler/rustc_metadata/src/rmeta/decoder.rs b/compiler/rustc_metadata/src/rmeta/decoder.rs index c031e0e2e19..19340dd51de 100644 --- a/compiler/rustc_metadata/src/rmeta/decoder.rs +++ b/compiler/rustc_metadata/src/rmeta/decoder.rs @@ -784,6 +784,7 @@ impl<'a, 'tcx> CrateMetadataRef<'a> { } }; + let attrs: Vec<_> = self.get_item_attrs(id, sess).collect(); SyntaxExtension::new( sess, kind, @@ -791,7 +792,7 @@ impl<'a, 'tcx> CrateMetadataRef<'a> { helper_attrs, self.root.edition, Symbol::intern(name), - &self.get_item_attrs(id, sess), + &attrs, ) } @@ -856,7 +857,7 @@ impl<'a, 'tcx> CrateMetadataRef<'a> { .tables .children .get(self, index) - .unwrap_or(Lazy::empty()) + .unwrap_or_else(Lazy::empty) .decode(self) .map(|index| ty::FieldDef { did: self.local_def_id(index), @@ -888,7 +889,7 @@ impl<'a, 'tcx> CrateMetadataRef<'a> { .tables .children .get(self, item_id) - .unwrap_or(Lazy::empty()) + .unwrap_or_else(Lazy::empty) .decode(self) .map(|index| self.get_variant(&self.kind(index), index, did, tcx.sess)) .collect() @@ -946,7 +947,12 @@ impl<'a, 'tcx> CrateMetadataRef<'a> { } fn get_type(&self, id: DefIndex, tcx: TyCtxt<'tcx>) -> Ty<'tcx> { - self.root.tables.ty.get(self, id).unwrap().decode((self, tcx)) + self.root + .tables + .ty + .get(self, id) + .unwrap_or_else(|| panic!("Not a type: {:?}", id)) + .decode((self, tcx)) } fn get_stability(&self, id: DefIndex) -> Option { @@ -1075,7 +1081,7 @@ impl<'a, 'tcx> CrateMetadataRef<'a> { // Iterate over all children. let macros_only = self.dep_kind.lock().macros_only(); - let children = self.root.tables.children.get(self, id).unwrap_or(Lazy::empty()); + let children = self.root.tables.children.get(self, id).unwrap_or_else(Lazy::empty); for child_index in children.decode((self, sess)) { if macros_only { continue; @@ -1098,7 +1104,7 @@ impl<'a, 'tcx> CrateMetadataRef<'a> { .tables .children .get(self, child_index) - .unwrap_or(Lazy::empty()); + .unwrap_or_else(Lazy::empty); for child_index in child_children.decode((self, sess)) { let kind = self.def_kind(child_index); callback(Export { @@ -1157,7 +1163,8 @@ impl<'a, 'tcx> CrateMetadataRef<'a> { // within the crate. We only need this for fictive constructors, // for other constructors correct visibilities // were already encoded in metadata. - let attrs = self.get_item_attrs(def_id.index, sess); + let attrs: Vec<_> = + self.get_item_attrs(def_id.index, sess).collect(); if sess.contains_name(&attrs, sym::non_exhaustive) { let crate_def_id = self.local_def_id(CRATE_DEF_INDEX); vis = ty::Visibility::Restricted(crate_def_id); @@ -1283,8 +1290,8 @@ impl<'a, 'tcx> CrateMetadataRef<'a> { } } - fn get_item_variances(&self, id: DefIndex) -> Vec { - self.root.tables.variances.get(self, id).unwrap_or(Lazy::empty()).decode(self).collect() + fn get_item_variances(&'a self, id: DefIndex) -> impl Iterator + 'a { + self.root.tables.variances.get(self, id).unwrap_or_else(Lazy::empty).decode(self) } fn get_ctor_kind(&self, node_id: DefIndex) -> CtorKind { @@ -1308,7 +1315,11 @@ impl<'a, 'tcx> CrateMetadataRef<'a> { } } - fn get_item_attrs(&self, node_id: DefIndex, sess: &Session) -> Vec { + fn get_item_attrs( + &'a self, + node_id: DefIndex, + sess: &'a Session, + ) -> impl Iterator + 'a { // The attributes for a tuple struct/variant are attached to the definition, not the ctor; // we assume that someone passing in a tuple struct ctor is actually wanting to // look at the definition @@ -1323,9 +1334,8 @@ impl<'a, 'tcx> CrateMetadataRef<'a> { .tables .attributes .get(self, item_id) - .unwrap_or(Lazy::empty()) + .unwrap_or_else(Lazy::empty) .decode((self, sess)) - .collect::>() } fn get_struct_field_names(&self, id: DefIndex, sess: &Session) -> Vec> { @@ -1333,7 +1343,7 @@ impl<'a, 'tcx> CrateMetadataRef<'a> { .tables .children .get(self, id) - .unwrap_or(Lazy::empty()) + .unwrap_or_else(Lazy::empty) .decode(self) .map(|index| respan(self.get_span(index, sess), self.item_ident(index, sess).name)) .collect() @@ -1349,7 +1359,7 @@ impl<'a, 'tcx> CrateMetadataRef<'a> { .tables .inherent_impls .get(self, id) - .unwrap_or(Lazy::empty()) + .unwrap_or_else(Lazy::empty) .decode(self) .map(|index| self.local_def_id(index)), ) diff --git a/compiler/rustc_metadata/src/rmeta/decoder/cstore_impl.rs b/compiler/rustc_metadata/src/rmeta/decoder/cstore_impl.rs index ddd85ab7aaa..85dc60d7eed 100644 --- a/compiler/rustc_metadata/src/rmeta/decoder/cstore_impl.rs +++ b/compiler/rustc_metadata/src/rmeta/decoder/cstore_impl.rs @@ -138,7 +138,7 @@ provide! { <'tcx> tcx, def_id, other, cdata, cdata.get_deprecation(def_id.index).map(DeprecationEntry::external) } item_attrs => { tcx.arena.alloc_from_iter( - cdata.get_item_attrs(def_id.index, tcx.sess).into_iter() + cdata.get_item_attrs(def_id.index, tcx.sess) ) } fn_arg_names => { cdata.get_fn_param_names(tcx, def_id.index) } rendered_const => { cdata.get_rendered_const(def_id.index) } @@ -415,11 +415,7 @@ impl CStore { let span = data.get_span(id.index, sess); - // Mark the attrs as used - let attrs = data.get_item_attrs(id.index, sess); - for attr in attrs.iter() { - sess.mark_attr_used(attr); - } + let attrs = data.get_item_attrs(id.index, sess).collect(); let ident = data.item_ident(id.index, sess); @@ -428,7 +424,7 @@ impl CStore { ident, id: ast::DUMMY_NODE_ID, span, - attrs: attrs.to_vec(), + attrs, kind: ast::ItemKind::MacroDef(data.get_macro(id.index, sess)), vis: ast::Visibility { span: span.shrink_to_lo(), diff --git a/compiler/rustc_middle/Cargo.toml b/compiler/rustc_middle/Cargo.toml index 66532ea02f3..3250f1830de 100644 --- a/compiler/rustc_middle/Cargo.toml +++ b/compiler/rustc_middle/Cargo.toml @@ -26,7 +26,7 @@ rustc_index = { path = "../rustc_index" } rustc_serialize = { path = "../rustc_serialize" } rustc_ast = { path = "../rustc_ast" } rustc_span = { path = "../rustc_span" } -chalk-ir = "0.32.0" +chalk-ir = "0.36.0" smallvec = { version = "1.0", features = ["union", "may_dangle"] } measureme = "9.0.0" rustc_session = { path = "../rustc_session" } diff --git a/compiler/rustc_middle/src/arena.rs b/compiler/rustc_middle/src/arena.rs index f6570cc95d2..672073b1d34 100644 --- a/compiler/rustc_middle/src/arena.rs +++ b/compiler/rustc_middle/src/arena.rs @@ -14,10 +14,10 @@ macro_rules! arena_types { [] layouts: rustc_target::abi::Layout, // AdtDef are interned and compared by address [] adt_def: rustc_middle::ty::AdtDef, - [] steal_mir: rustc_middle::ty::steal::Steal>, + [] steal_mir: rustc_data_structures::steal::Steal>, [decode] mir: rustc_middle::mir::Body<$tcx>, [] steal_promoted: - rustc_middle::ty::steal::Steal< + rustc_data_structures::steal::Steal< rustc_index::vec::IndexVec< rustc_middle::mir::Promoted, rustc_middle::mir::Body<$tcx> diff --git a/compiler/rustc_middle/src/dep_graph/dep_node.rs b/compiler/rustc_middle/src/dep_graph/dep_node.rs index a61b9af9bac..38bc3b46b0f 100644 --- a/compiler/rustc_middle/src/dep_graph/dep_node.rs +++ b/compiler/rustc_middle/src/dep_graph/dep_node.rs @@ -193,6 +193,15 @@ macro_rules! define_dep_nodes { pub type DepNode = rustc_query_system::dep_graph::DepNode; + // We keep a lot of `DepNode`s in memory during compilation. It's not + // required that their size stay the same, but we don't want to change + // it inadvertently. This assert just ensures we're aware of any change. + #[cfg(any(target_arch = "x86", target_arch = "x86_64"))] + static_assert_size!(DepNode, 17); + + #[cfg(not(any(target_arch = "x86", target_arch = "x86_64")))] + static_assert_size!(DepNode, 24); + pub trait DepNodeExt: Sized { /// Construct a DepNode from the given DepKind and DefPathHash. This /// method will assert that the given DepKind actually requires a @@ -227,7 +236,7 @@ macro_rules! define_dep_nodes { debug_assert!(kind.can_reconstruct_query_key() && kind.has_params()); DepNode { kind, - hash: def_path_hash.0, + hash: def_path_hash.0.into(), } } @@ -243,7 +252,7 @@ macro_rules! define_dep_nodes { /// has been removed. fn extract_def_id(&self, tcx: TyCtxt<'tcx>) -> Option { if self.kind.can_reconstruct_query_key() { - let def_path_hash = DefPathHash(self.hash); + let def_path_hash = DefPathHash(self.hash.into()); tcx.def_path_hash_to_def_id.as_ref()?.get(&def_path_hash).cloned() } else { None diff --git a/compiler/rustc_middle/src/hir/map/mod.rs b/compiler/rustc_middle/src/hir/map/mod.rs index d86e8987195..37ec3d3d1ca 100644 --- a/compiler/rustc_middle/src/hir/map/mod.rs +++ b/compiler/rustc_middle/src/hir/map/mod.rs @@ -806,25 +806,34 @@ impl<'hir> Map<'hir> { /// Given a node ID, gets a list of attributes associated with the AST /// corresponding to the node-ID. pub fn attrs(&self, id: HirId) -> &'hir [ast::Attribute] { - let attrs = match self.find_entry(id).map(|entry| entry.node) { - Some(Node::Param(a)) => Some(&a.attrs[..]), - Some(Node::Local(l)) => Some(&l.attrs[..]), - Some(Node::Item(i)) => Some(&i.attrs[..]), - Some(Node::ForeignItem(fi)) => Some(&fi.attrs[..]), - Some(Node::TraitItem(ref ti)) => Some(&ti.attrs[..]), - Some(Node::ImplItem(ref ii)) => Some(&ii.attrs[..]), - Some(Node::Variant(ref v)) => Some(&v.attrs[..]), - Some(Node::Field(ref f)) => Some(&f.attrs[..]), - Some(Node::Expr(ref e)) => Some(&*e.attrs), - Some(Node::Stmt(ref s)) => Some(s.kind.attrs(|id| self.item(id.id))), - Some(Node::Arm(ref a)) => Some(&*a.attrs), - Some(Node::GenericParam(param)) => Some(¶m.attrs[..]), + let attrs = self.find_entry(id).map(|entry| match entry.node { + Node::Param(a) => &a.attrs[..], + Node::Local(l) => &l.attrs[..], + Node::Item(i) => &i.attrs[..], + Node::ForeignItem(fi) => &fi.attrs[..], + Node::TraitItem(ref ti) => &ti.attrs[..], + Node::ImplItem(ref ii) => &ii.attrs[..], + Node::Variant(ref v) => &v.attrs[..], + Node::Field(ref f) => &f.attrs[..], + Node::Expr(ref e) => &*e.attrs, + Node::Stmt(ref s) => s.kind.attrs(|id| self.item(id.id)), + Node::Arm(ref a) => &*a.attrs, + Node::GenericParam(param) => ¶m.attrs[..], // Unit/tuple structs/variants take the attributes straight from // the struct/variant definition. - Some(Node::Ctor(..)) => return self.attrs(self.get_parent_item(id)), - Some(Node::Crate(item)) => Some(&item.attrs[..]), - _ => None, - }; + Node::Ctor(..) => self.attrs(self.get_parent_item(id)), + Node::Crate(item) => &item.attrs[..], + Node::MacroDef(def) => def.attrs, + Node::AnonConst(..) + | Node::PathSegment(..) + | Node::Ty(..) + | Node::Pat(..) + | Node::Binding(..) + | Node::TraitRef(..) + | Node::Block(..) + | Node::Lifetime(..) + | Node::Visibility(..) => &[], + }); attrs.unwrap_or(&[]) } diff --git a/compiler/rustc_middle/src/ich/impls_syntax.rs b/compiler/rustc_middle/src/ich/impls_syntax.rs index cab2ca2919f..bfbe15749ee 100644 --- a/compiler/rustc_middle/src/ich/impls_syntax.rs +++ b/compiler/rustc_middle/src/ich/impls_syntax.rs @@ -40,8 +40,8 @@ impl<'ctx> rustc_ast::HashStableContext for StableHashingContext<'ctx> { debug_assert!(!attr.ident().map_or(false, |ident| self.is_ignored_attr(ident.name))); debug_assert!(!attr.is_doc_comment()); - let ast::Attribute { kind, id: _, style, span, tokens } = attr; - if let ast::AttrKind::Normal(item) = kind { + let ast::Attribute { kind, id: _, style, span } = attr; + if let ast::AttrKind::Normal(item, tokens) = kind { item.hash_stable(self, hasher); style.hash_stable(self, hasher); span.hash_stable(self, hasher); diff --git a/compiler/rustc_middle/src/ich/impls_ty.rs b/compiler/rustc_middle/src/ich/impls_ty.rs index 8f15c99f951..69bb4e23c4c 100644 --- a/compiler/rustc_middle/src/ich/impls_ty.rs +++ b/compiler/rustc_middle/src/ich/impls_ty.rs @@ -184,15 +184,6 @@ impl<'a> HashStable> for ty::FloatVid { } } -impl<'a, T> HashStable> for ty::steal::Steal -where - T: HashStable>, -{ - fn hash_stable(&self, hcx: &mut StableHashingContext<'a>, hasher: &mut StableHasher) { - self.borrow().hash_stable(hcx, hasher); - } -} - impl<'a> HashStable> for crate::middle::privacy::AccessLevels { fn hash_stable(&self, hcx: &mut StableHashingContext<'a>, hasher: &mut StableHasher) { hcx.with_node_id_hashing_mode(NodeIdHashingMode::HashDefPath, |hcx| { diff --git a/compiler/rustc_middle/src/infer/canonical.rs b/compiler/rustc_middle/src/infer/canonical.rs index 1e15ae49a0c..6e5f95c4527 100644 --- a/compiler/rustc_middle/src/infer/canonical.rs +++ b/compiler/rustc_middle/src/infer/canonical.rs @@ -40,7 +40,7 @@ pub struct Canonical<'tcx, V> { pub value: V, } -pub type CanonicalVarInfos<'tcx> = &'tcx List; +pub type CanonicalVarInfos<'tcx> = &'tcx List>; /// A set of values corresponding to the canonical variables from some /// `Canonical`. You can give these values to @@ -88,11 +88,11 @@ impl Default for OriginalQueryValues<'tcx> { /// a copy of the canonical value in some other inference context, /// with fresh inference variables replacing the canonical values. #[derive(Copy, Clone, Debug, PartialEq, Eq, Hash, TyDecodable, TyEncodable, HashStable)] -pub struct CanonicalVarInfo { - pub kind: CanonicalVarKind, +pub struct CanonicalVarInfo<'tcx> { + pub kind: CanonicalVarKind<'tcx>, } -impl CanonicalVarInfo { +impl<'tcx> CanonicalVarInfo<'tcx> { pub fn universe(&self) -> ty::UniverseIndex { self.kind.universe() } @@ -113,7 +113,7 @@ impl CanonicalVarInfo { /// in the type-theory sense of the term -- i.e., a "meta" type system /// that analyzes type-like values. #[derive(Copy, Clone, Debug, PartialEq, Eq, Hash, TyDecodable, TyEncodable, HashStable)] -pub enum CanonicalVarKind { +pub enum CanonicalVarKind<'tcx> { /// Some kind of type inference variable. Ty(CanonicalTyVarKind), @@ -132,10 +132,10 @@ pub enum CanonicalVarKind { Const(ty::UniverseIndex), /// A "placeholder" that represents "any const". - PlaceholderConst(ty::PlaceholderConst), + PlaceholderConst(ty::PlaceholderConst<'tcx>), } -impl CanonicalVarKind { +impl<'tcx> CanonicalVarKind<'tcx> { pub fn universe(self) -> ty::UniverseIndex { match self { CanonicalVarKind::Ty(kind) => match kind { @@ -286,13 +286,15 @@ impl<'tcx, V> Canonical<'tcx, V> { pub type QueryOutlivesConstraint<'tcx> = ty::Binder, Region<'tcx>>>; -CloneTypeFoldableAndLiftImpls! { - crate::infer::canonical::Certainty, - crate::infer::canonical::CanonicalVarInfo, - crate::infer::canonical::CanonicalVarKind, +TrivialTypeFoldableAndLiftImpls! { + for <'tcx> { + crate::infer::canonical::Certainty, + crate::infer::canonical::CanonicalVarInfo<'tcx>, + crate::infer::canonical::CanonicalVarKind<'tcx>, + } } -CloneTypeFoldableImpls! { +TrivialTypeFoldableImpls! { for <'tcx> { crate::infer::canonical::CanonicalVarInfos<'tcx>, } diff --git a/compiler/rustc_middle/src/lib.rs b/compiler/rustc_middle/src/lib.rs index 4a1d5459d1e..cdc5940d9ba 100644 --- a/compiler/rustc_middle/src/lib.rs +++ b/compiler/rustc_middle/src/lib.rs @@ -51,6 +51,7 @@ #![feature(half_open_range_patterns)] #![feature(exclusive_range_pattern)] #![feature(control_flow_enum)] +#![feature(associated_type_defaults)] #![recursion_limit = "512"] #[macro_use] diff --git a/compiler/rustc_middle/src/macros.rs b/compiler/rustc_middle/src/macros.rs index 921086366be..c0f2a76c19d 100644 --- a/compiler/rustc_middle/src/macros.rs +++ b/compiler/rustc_middle/src/macros.rs @@ -48,21 +48,21 @@ macro_rules! CloneLiftImpls { /// Used for types that are `Copy` and which **do not care arena /// allocated data** (i.e., don't need to be folded). #[macro_export] -macro_rules! CloneTypeFoldableImpls { +macro_rules! TrivialTypeFoldableImpls { (for <$tcx:lifetime> { $($ty:ty,)+ }) => { $( impl<$tcx> $crate::ty::fold::TypeFoldable<$tcx> for $ty { fn super_fold_with>( - &self, + self, _: &mut F ) -> $ty { - Clone::clone(self) + self } fn super_visit_with>( &self, _: &mut F) - -> ::std::ops::ControlFlow<()> + -> ::std::ops::ControlFlow { ::std::ops::ControlFlow::CONTINUE } @@ -71,7 +71,7 @@ macro_rules! CloneTypeFoldableImpls { }; ($($ty:ty,)+) => { - CloneTypeFoldableImpls! { + TrivialTypeFoldableImpls! { for <'tcx> { $($ty,)+ } @@ -80,9 +80,9 @@ macro_rules! CloneTypeFoldableImpls { } #[macro_export] -macro_rules! CloneTypeFoldableAndLiftImpls { +macro_rules! TrivialTypeFoldableAndLiftImpls { ($($t:tt)*) => { - CloneTypeFoldableImpls! { $($t)* } + TrivialTypeFoldableImpls! { $($t)* } CloneLiftImpls! { $($t)* } } } @@ -96,7 +96,7 @@ macro_rules! EnumTypeFoldableImpl { $(where $($wc)*)* { fn super_fold_with>( - &self, + self, folder: &mut V, ) -> Self { EnumTypeFoldableImpl!(@FoldVariants(self, folder) input($($variants)*) output()) @@ -105,7 +105,7 @@ macro_rules! EnumTypeFoldableImpl { fn super_visit_with>( &self, visitor: &mut V, - ) -> ::std::ops::ControlFlow<()> { + ) -> ::std::ops::ControlFlow { EnumTypeFoldableImpl!(@VisitVariants(self, visitor) input($($variants)*) output()) } } diff --git a/compiler/rustc_middle/src/middle/stability.rs b/compiler/rustc_middle/src/middle/stability.rs index 978f08927c6..47c140e0b18 100644 --- a/compiler/rustc_middle/src/middle/stability.rs +++ b/compiler/rustc_middle/src/middle/stability.rs @@ -4,7 +4,7 @@ pub use self::StabilityLevel::*; use crate::ty::{self, TyCtxt}; -use rustc_ast::CRATE_NODE_ID; +use rustc_ast::NodeId; use rustc_attr::{self as attr, ConstStability, Deprecation, Stability}; use rustc_data_structures::fx::{FxHashMap, FxHashSet}; use rustc_errors::{Applicability, DiagnosticBuilder}; @@ -211,13 +211,14 @@ pub fn early_report_deprecation( suggestion: Option, lint: &'static Lint, span: Span, + node_id: NodeId, ) { if span.in_derive_expansion() { return; } let diag = BuiltinLintDiagnostics::DeprecatedMacro(suggestion, span); - lint_buffer.buffer_lint_with_diagnostic(lint, CRATE_NODE_ID, span, message, diag); + lint_buffer.buffer_lint_with_diagnostic(lint, node_id, span, message, diag); } fn late_report_deprecation( diff --git a/compiler/rustc_middle/src/mir/interpret/error.rs b/compiler/rustc_middle/src/mir/interpret/error.rs index e35ff6b996e..397d2ffd565 100644 --- a/compiler/rustc_middle/src/mir/interpret/error.rs +++ b/compiler/rustc_middle/src/mir/interpret/error.rs @@ -29,7 +29,7 @@ impl From for ErrorHandled { } } -CloneTypeFoldableAndLiftImpls! { +TrivialTypeFoldableAndLiftImpls! { ErrorHandled, } diff --git a/compiler/rustc_middle/src/mir/interpret/queries.rs b/compiler/rustc_middle/src/mir/interpret/queries.rs index f366681bc75..0517ec5bb1a 100644 --- a/compiler/rustc_middle/src/mir/interpret/queries.rs +++ b/compiler/rustc_middle/src/mir/interpret/queries.rs @@ -67,7 +67,7 @@ impl<'tcx> TyCtxt<'tcx> { ) -> EvalToConstValueResult<'tcx> { // Const-eval shouldn't depend on lifetimes at all, so we can erase them, which should // improve caching of queries. - let inputs = self.erase_regions(¶m_env.and(cid)); + let inputs = self.erase_regions(param_env.and(cid)); if let Some(span) = span { self.at(span).eval_to_const_value_raw(inputs) } else { diff --git a/compiler/rustc_middle/src/mir/mod.rs b/compiler/rustc_middle/src/mir/mod.rs index 5fe7b0f647d..9289d4708de 100644 --- a/compiler/rustc_middle/src/mir/mod.rs +++ b/compiler/rustc_middle/src/mir/mod.rs @@ -420,7 +420,9 @@ impl<'tcx> Body<'tcx> { /// Returns an iterator over all user-defined variables and compiler-generated temporaries (all /// locals that are neither arguments nor the return place). #[inline] - pub fn vars_and_temps_iter(&self) -> impl Iterator + ExactSizeIterator { + pub fn vars_and_temps_iter( + &self, + ) -> impl DoubleEndedIterator + ExactSizeIterator { let arg_count = self.arg_count; let local_count = self.local_decls.len(); (arg_count + 1..local_count).map(Local::new) @@ -742,7 +744,7 @@ pub enum ImplicitSelfKind { None, } -CloneTypeFoldableAndLiftImpls! { BindingForm<'tcx>, } +TrivialTypeFoldableAndLiftImpls! { BindingForm<'tcx>, } mod binding_form_impl { use crate::ich::StableHashingContext; @@ -2452,32 +2454,20 @@ impl UserTypeProjection { } } -CloneTypeFoldableAndLiftImpls! { ProjectionKind, } +TrivialTypeFoldableAndLiftImpls! { ProjectionKind, } impl<'tcx> TypeFoldable<'tcx> for UserTypeProjection { - fn super_fold_with>(&self, folder: &mut F) -> Self { - use crate::mir::ProjectionElem::*; - - let base = self.base.fold_with(folder); - let projs: Vec<_> = self - .projs - .iter() - .map(|&elem| match elem { - Deref => Deref, - Field(f, ()) => Field(f, ()), - Index(()) => Index(()), - Downcast(symbol, variantidx) => Downcast(symbol, variantidx), - ConstantIndex { offset, min_length, from_end } => { - ConstantIndex { offset, min_length, from_end } - } - Subslice { from, to, from_end } => Subslice { from, to, from_end }, - }) - .collect(); - - UserTypeProjection { base, projs } + fn super_fold_with>(self, folder: &mut F) -> Self { + UserTypeProjection { + base: self.base.fold_with(folder), + projs: self.projs.fold_with(folder), + } } - fn super_visit_with>(&self, visitor: &mut Vs) -> ControlFlow<()> { + fn super_visit_with>( + &self, + visitor: &mut Vs, + ) -> ControlFlow { self.base.visit_with(visitor) // Note: there's nothing in `self.proj` to visit. } diff --git a/compiler/rustc_middle/src/mir/predecessors.rs b/compiler/rustc_middle/src/mir/predecessors.rs index a8b74883355..fd6bb76dc43 100644 --- a/compiler/rustc_middle/src/mir/predecessors.rs +++ b/compiler/rustc_middle/src/mir/predecessors.rs @@ -75,6 +75,6 @@ impl HashStable for PredecessorCache { } } -CloneTypeFoldableAndLiftImpls! { +TrivialTypeFoldableAndLiftImpls! { PredecessorCache, } diff --git a/compiler/rustc_middle/src/mir/query.rs b/compiler/rustc_middle/src/mir/query.rs index 6022194342d..db0056e482b 100644 --- a/compiler/rustc_middle/src/mir/query.rs +++ b/compiler/rustc_middle/src/mir/query.rs @@ -233,14 +233,15 @@ pub struct BorrowCheckResult<'tcx> { /// The result of the `mir_const_qualif` query. /// -/// Each field corresponds to an implementer of the `Qualif` trait in -/// `librustc_mir/transform/check_consts/qualifs.rs`. See that file for more information on each +/// Each field (except `error_occured`) corresponds to an implementer of the `Qualif` trait in +/// `rustc_mir/src/transform/check_consts/qualifs.rs`. See that file for more information on each /// `Qualif`. #[derive(Clone, Copy, Debug, Default, TyEncodable, TyDecodable, HashStable)] pub struct ConstQualifs { pub has_mut_interior: bool, pub needs_drop: bool, pub custom_eq: bool, + pub error_occured: Option, } /// After we borrow check a closure, we are left with various diff --git a/compiler/rustc_middle/src/mir/type_foldable.rs b/compiler/rustc_middle/src/mir/type_foldable.rs index 391bd8be7e4..da8e189ba9d 100644 --- a/compiler/rustc_middle/src/mir/type_foldable.rs +++ b/compiler/rustc_middle/src/mir/type_foldable.rs @@ -2,8 +2,9 @@ use super::*; use crate::ty; +use rustc_data_structures::functor::IdFunctor; -CloneTypeFoldableAndLiftImpls! { +TrivialTypeFoldableAndLiftImpls! { BlockTailInfo, MirPhase, SourceInfo, @@ -15,34 +16,33 @@ CloneTypeFoldableAndLiftImpls! { } impl<'tcx> TypeFoldable<'tcx> for Terminator<'tcx> { - fn super_fold_with>(&self, folder: &mut F) -> Self { + fn super_fold_with>(self, folder: &mut F) -> Self { use crate::mir::TerminatorKind::*; let kind = match self.kind { Goto { target } => Goto { target }, - SwitchInt { ref discr, switch_ty, ref targets } => SwitchInt { + SwitchInt { discr, switch_ty, targets } => SwitchInt { discr: discr.fold_with(folder), switch_ty: switch_ty.fold_with(folder), - targets: targets.clone(), + targets, }, - Drop { ref place, target, unwind } => { + Drop { place, target, unwind } => { Drop { place: place.fold_with(folder), target, unwind } } - DropAndReplace { ref place, ref value, target, unwind } => DropAndReplace { + DropAndReplace { place, value, target, unwind } => DropAndReplace { place: place.fold_with(folder), value: value.fold_with(folder), target, unwind, }, - Yield { ref value, resume, ref resume_arg, drop } => Yield { + Yield { value, resume, resume_arg, drop } => Yield { value: value.fold_with(folder), resume, resume_arg: resume_arg.fold_with(folder), drop, }, - Call { ref func, ref args, ref destination, cleanup, from_hir_call, fn_span } => { - let dest = - destination.as_ref().map(|&(ref loc, dest)| (loc.fold_with(folder), dest)); + Call { func, args, destination, cleanup, from_hir_call, fn_span } => { + let dest = destination.map(|(loc, dest)| (loc.fold_with(folder), dest)); Call { func: func.fold_with(folder), @@ -53,17 +53,17 @@ impl<'tcx> TypeFoldable<'tcx> for Terminator<'tcx> { fn_span, } } - Assert { ref cond, expected, ref msg, target, cleanup } => { + Assert { cond, expected, msg, target, cleanup } => { use AssertKind::*; let msg = match msg { BoundsCheck { len, index } => { BoundsCheck { len: len.fold_with(folder), index: index.fold_with(folder) } } - Overflow(op, l, r) => Overflow(*op, l.fold_with(folder), r.fold_with(folder)), + Overflow(op, l, r) => Overflow(op, l.fold_with(folder), r.fold_with(folder)), OverflowNeg(op) => OverflowNeg(op.fold_with(folder)), DivisionByZero(op) => DivisionByZero(op.fold_with(folder)), RemainderByZero(op) => RemainderByZero(op.fold_with(folder)), - ResumedAfterReturn(_) | ResumedAfterPanic(_) => msg.clone(), + ResumedAfterReturn(_) | ResumedAfterPanic(_) => msg, }; Assert { cond: cond.fold_with(folder), expected, msg, target, cleanup } } @@ -76,7 +76,7 @@ impl<'tcx> TypeFoldable<'tcx> for Terminator<'tcx> { FalseEdge { real_target, imaginary_target } } FalseUnwind { real_target, unwind } => FalseUnwind { real_target, unwind }, - InlineAsm { template, ref operands, options, line_spans, destination } => InlineAsm { + InlineAsm { template, operands, options, line_spans, destination } => InlineAsm { template, operands: operands.fold_with(folder), options, @@ -87,7 +87,7 @@ impl<'tcx> TypeFoldable<'tcx> for Terminator<'tcx> { Terminator { source_info: self.source_info, kind } } - fn super_visit_with>(&self, visitor: &mut V) -> ControlFlow<()> { + fn super_visit_with>(&self, visitor: &mut V) -> ControlFlow { use crate::mir::TerminatorKind::*; match self.kind { @@ -109,24 +109,21 @@ impl<'tcx> TypeFoldable<'tcx> for Terminator<'tcx> { args.visit_with(visitor) } Assert { ref cond, ref msg, .. } => { - if cond.visit_with(visitor).is_break() { - use AssertKind::*; - match msg { - BoundsCheck { ref len, ref index } => { - len.visit_with(visitor)?; - index.visit_with(visitor) - } - Overflow(_, l, r) => { - l.visit_with(visitor)?; - r.visit_with(visitor) - } - OverflowNeg(op) | DivisionByZero(op) | RemainderByZero(op) => { - op.visit_with(visitor) - } - ResumedAfterReturn(_) | ResumedAfterPanic(_) => ControlFlow::CONTINUE, + cond.visit_with(visitor)?; + use AssertKind::*; + match msg { + BoundsCheck { ref len, ref index } => { + len.visit_with(visitor)?; + index.visit_with(visitor) + } + Overflow(_, l, r) => { + l.visit_with(visitor)?; + r.visit_with(visitor) } - } else { - ControlFlow::CONTINUE + OverflowNeg(op) | DivisionByZero(op) | RemainderByZero(op) => { + op.visit_with(visitor) + } + ResumedAfterReturn(_) | ResumedAfterPanic(_) => ControlFlow::CONTINUE, } } InlineAsm { ref operands, .. } => operands.visit_with(visitor), @@ -143,61 +140,56 @@ impl<'tcx> TypeFoldable<'tcx> for Terminator<'tcx> { } impl<'tcx> TypeFoldable<'tcx> for GeneratorKind { - fn super_fold_with>(&self, _: &mut F) -> Self { - *self + fn super_fold_with>(self, _: &mut F) -> Self { + self } - fn super_visit_with>(&self, _: &mut V) -> ControlFlow<()> { + fn super_visit_with>(&self, _: &mut V) -> ControlFlow { ControlFlow::CONTINUE } } impl<'tcx> TypeFoldable<'tcx> for Place<'tcx> { - fn super_fold_with>(&self, folder: &mut F) -> Self { + fn super_fold_with>(self, folder: &mut F) -> Self { Place { local: self.local.fold_with(folder), projection: self.projection.fold_with(folder) } } - fn super_visit_with>(&self, visitor: &mut V) -> ControlFlow<()> { + fn super_visit_with>(&self, visitor: &mut V) -> ControlFlow { self.local.visit_with(visitor)?; self.projection.visit_with(visitor) } } impl<'tcx> TypeFoldable<'tcx> for &'tcx ty::List> { - fn super_fold_with>(&self, folder: &mut F) -> Self { - let v = self.iter().map(|t| t.fold_with(folder)).collect::>(); - folder.tcx().intern_place_elems(&v) + fn super_fold_with>(self, folder: &mut F) -> Self { + ty::util::fold_list(self, folder, |tcx, v| tcx.intern_place_elems(v)) } - fn super_visit_with>(&self, visitor: &mut V) -> ControlFlow<()> { + fn super_visit_with>(&self, visitor: &mut V) -> ControlFlow { self.iter().try_for_each(|t| t.visit_with(visitor)) } } impl<'tcx> TypeFoldable<'tcx> for Rvalue<'tcx> { - fn super_fold_with>(&self, folder: &mut F) -> Self { + fn super_fold_with>(self, folder: &mut F) -> Self { use crate::mir::Rvalue::*; - match *self { - Use(ref op) => Use(op.fold_with(folder)), - Repeat(ref op, len) => Repeat(op.fold_with(folder), len.fold_with(folder)), + match self { + Use(op) => Use(op.fold_with(folder)), + Repeat(op, len) => Repeat(op.fold_with(folder), len.fold_with(folder)), ThreadLocalRef(did) => ThreadLocalRef(did.fold_with(folder)), - Ref(region, bk, ref place) => { - Ref(region.fold_with(folder), bk, place.fold_with(folder)) - } - AddressOf(mutability, ref place) => AddressOf(mutability, place.fold_with(folder)), - Len(ref place) => Len(place.fold_with(folder)), - Cast(kind, ref op, ty) => Cast(kind, op.fold_with(folder), ty.fold_with(folder)), - BinaryOp(op, ref rhs, ref lhs) => { - BinaryOp(op, rhs.fold_with(folder), lhs.fold_with(folder)) - } - CheckedBinaryOp(op, ref rhs, ref lhs) => { + Ref(region, bk, place) => Ref(region.fold_with(folder), bk, place.fold_with(folder)), + AddressOf(mutability, place) => AddressOf(mutability, place.fold_with(folder)), + Len(place) => Len(place.fold_with(folder)), + Cast(kind, op, ty) => Cast(kind, op.fold_with(folder), ty.fold_with(folder)), + BinaryOp(op, rhs, lhs) => BinaryOp(op, rhs.fold_with(folder), lhs.fold_with(folder)), + CheckedBinaryOp(op, rhs, lhs) => { CheckedBinaryOp(op, rhs.fold_with(folder), lhs.fold_with(folder)) } - UnaryOp(op, ref val) => UnaryOp(op, val.fold_with(folder)), - Discriminant(ref place) => Discriminant(place.fold_with(folder)), + UnaryOp(op, val) => UnaryOp(op, val.fold_with(folder)), + Discriminant(place) => Discriminant(place.fold_with(folder)), NullaryOp(op, ty) => NullaryOp(op, ty.fold_with(folder)), - Aggregate(ref kind, ref fields) => { - let kind = box match **kind { + Aggregate(kind, fields) => { + let kind = kind.map_id(|kind| match kind { AggregateKind::Array(ty) => AggregateKind::Array(ty.fold_with(folder)), AggregateKind::Tuple => AggregateKind::Tuple, AggregateKind::Adt(def, v, substs, user_ty, n) => AggregateKind::Adt( @@ -213,13 +205,13 @@ impl<'tcx> TypeFoldable<'tcx> for Rvalue<'tcx> { AggregateKind::Generator(id, substs, movablity) => { AggregateKind::Generator(id, substs.fold_with(folder), movablity) } - }; + }); Aggregate(kind, fields.fold_with(folder)) } } } - fn super_visit_with>(&self, visitor: &mut V) -> ControlFlow<()> { + fn super_visit_with>(&self, visitor: &mut V) -> ControlFlow { use crate::mir::Rvalue::*; match *self { Use(ref op) => op.visit_with(visitor), @@ -266,15 +258,15 @@ impl<'tcx> TypeFoldable<'tcx> for Rvalue<'tcx> { } impl<'tcx> TypeFoldable<'tcx> for Operand<'tcx> { - fn super_fold_with>(&self, folder: &mut F) -> Self { - match *self { - Operand::Copy(ref place) => Operand::Copy(place.fold_with(folder)), - Operand::Move(ref place) => Operand::Move(place.fold_with(folder)), - Operand::Constant(ref c) => Operand::Constant(c.fold_with(folder)), + fn super_fold_with>(self, folder: &mut F) -> Self { + match self { + Operand::Copy(place) => Operand::Copy(place.fold_with(folder)), + Operand::Move(place) => Operand::Move(place.fold_with(folder)), + Operand::Constant(c) => Operand::Constant(c.fold_with(folder)), } } - fn super_visit_with>(&self, visitor: &mut V) -> ControlFlow<()> { + fn super_visit_with>(&self, visitor: &mut V) -> ControlFlow { match *self { Operand::Copy(ref place) | Operand::Move(ref place) => place.visit_with(visitor), Operand::Constant(ref c) => c.visit_with(visitor), @@ -283,10 +275,10 @@ impl<'tcx> TypeFoldable<'tcx> for Operand<'tcx> { } impl<'tcx> TypeFoldable<'tcx> for PlaceElem<'tcx> { - fn super_fold_with>(&self, folder: &mut F) -> Self { + fn super_fold_with>(self, folder: &mut F) -> Self { use crate::mir::ProjectionElem::*; - match *self { + match self { Deref => Deref, Field(f, ty) => Field(f, ty.fold_with(folder)), Index(v) => Index(v.fold_with(folder)), @@ -298,7 +290,10 @@ impl<'tcx> TypeFoldable<'tcx> for PlaceElem<'tcx> { } } - fn super_visit_with>(&self, visitor: &mut Vs) -> ControlFlow<()> { + fn super_visit_with>( + &self, + visitor: &mut Vs, + ) -> ControlFlow { use crate::mir::ProjectionElem::*; match self { @@ -310,41 +305,41 @@ impl<'tcx> TypeFoldable<'tcx> for PlaceElem<'tcx> { } impl<'tcx> TypeFoldable<'tcx> for Field { - fn super_fold_with>(&self, _: &mut F) -> Self { - *self + fn super_fold_with>(self, _: &mut F) -> Self { + self } - fn super_visit_with>(&self, _: &mut V) -> ControlFlow<()> { + fn super_visit_with>(&self, _: &mut V) -> ControlFlow { ControlFlow::CONTINUE } } impl<'tcx> TypeFoldable<'tcx> for GeneratorSavedLocal { - fn super_fold_with>(&self, _: &mut F) -> Self { - *self + fn super_fold_with>(self, _: &mut F) -> Self { + self } - fn super_visit_with>(&self, _: &mut V) -> ControlFlow<()> { + fn super_visit_with>(&self, _: &mut V) -> ControlFlow { ControlFlow::CONTINUE } } impl<'tcx, R: Idx, C: Idx> TypeFoldable<'tcx> for BitMatrix { - fn super_fold_with>(&self, _: &mut F) -> Self { - self.clone() + fn super_fold_with>(self, _: &mut F) -> Self { + self } - fn super_visit_with>(&self, _: &mut V) -> ControlFlow<()> { + fn super_visit_with>(&self, _: &mut V) -> ControlFlow { ControlFlow::CONTINUE } } impl<'tcx> TypeFoldable<'tcx> for Constant<'tcx> { - fn super_fold_with>(&self, folder: &mut F) -> Self { + fn super_fold_with>(self, folder: &mut F) -> Self { Constant { span: self.span, user_ty: self.user_ty.fold_with(folder), literal: self.literal.fold_with(folder), } } - fn super_visit_with>(&self, visitor: &mut V) -> ControlFlow<()> { + fn super_visit_with>(&self, visitor: &mut V) -> ControlFlow { self.literal.visit_with(visitor) } } diff --git a/compiler/rustc_middle/src/mir/visit.rs b/compiler/rustc_middle/src/mir/visit.rs index d8d639ab734..638dd8ce970 100644 --- a/compiler/rustc_middle/src/mir/visit.rs +++ b/compiler/rustc_middle/src/mir/visit.rs @@ -1,70 +1,70 @@ +//! # The MIR Visitor +//! +//! ## Overview +//! +//! There are two visitors, one for immutable and one for mutable references, +//! but both are generated by the following macro. The code is written according +//! to the following conventions: +//! +//! - introduce a `visit_foo` and a `super_foo` method for every MIR type +//! - `visit_foo`, by default, calls `super_foo` +//! - `super_foo`, by default, destructures the `foo` and calls `visit_foo` +//! +//! This allows you as a user to override `visit_foo` for types are +//! interested in, and invoke (within that method) call +//! `self.super_foo` to get the default behavior. Just as in an OO +//! language, you should never call `super` methods ordinarily except +//! in that circumstance. +//! +//! For the most part, we do not destructure things external to the +//! MIR, e.g., types, spans, etc, but simply visit them and stop. This +//! avoids duplication with other visitors like `TypeFoldable`. +//! +//! ## Updating +//! +//! The code is written in a very deliberate style intended to minimize +//! the chance of things being overlooked. You'll notice that we always +//! use pattern matching to reference fields and we ensure that all +//! matches are exhaustive. +//! +//! For example, the `super_basic_block_data` method begins like this: +//! +//! ```rust +//! fn super_basic_block_data(&mut self, +//! block: BasicBlock, +//! data: & $($mutability)? BasicBlockData<'tcx>) { +//! let BasicBlockData { +//! statements, +//! terminator, +//! is_cleanup: _ +//! } = *data; +//! +//! for statement in statements { +//! self.visit_statement(block, statement); +//! } +//! +//! ... +//! } +//! ``` +//! +//! Here we used `let BasicBlockData { } = *data` deliberately, +//! rather than writing `data.statements` in the body. This is because if one +//! adds a new field to `BasicBlockData`, one will be forced to revise this code, +//! and hence one will (hopefully) invoke the correct visit methods (if any). +//! +//! For this to work, ALL MATCHES MUST BE EXHAUSTIVE IN FIELDS AND VARIANTS. +//! That means you never write `..` to skip over fields, nor do you write `_` +//! to skip over variants in a `match`. +//! +//! The only place that `_` is acceptable is to match a field (or +//! variant argument) that does not require visiting, as in +//! `is_cleanup` above. + use crate::mir::*; use crate::ty::subst::SubstsRef; use crate::ty::{CanonicalUserTypeAnnotation, Ty}; use rustc_span::Span; -// # The MIR Visitor -// -// ## Overview -// -// There are two visitors, one for immutable and one for mutable references, -// but both are generated by the following macro. The code is written according -// to the following conventions: -// -// - introduce a `visit_foo` and a `super_foo` method for every MIR type -// - `visit_foo`, by default, calls `super_foo` -// - `super_foo`, by default, destructures the `foo` and calls `visit_foo` -// -// This allows you as a user to override `visit_foo` for types are -// interested in, and invoke (within that method) call -// `self.super_foo` to get the default behavior. Just as in an OO -// language, you should never call `super` methods ordinarily except -// in that circumstance. -// -// For the most part, we do not destructure things external to the -// MIR, e.g., types, spans, etc, but simply visit them and stop. This -// avoids duplication with other visitors like `TypeFoldable`. -// -// ## Updating -// -// The code is written in a very deliberate style intended to minimize -// the chance of things being overlooked. You'll notice that we always -// use pattern matching to reference fields and we ensure that all -// matches are exhaustive. -// -// For example, the `super_basic_block_data` method begins like this: -// -// ```rust -// fn super_basic_block_data(&mut self, -// block: BasicBlock, -// data: & $($mutability)? BasicBlockData<'tcx>) { -// let BasicBlockData { -// statements, -// terminator, -// is_cleanup: _ -// } = *data; -// -// for statement in statements { -// self.visit_statement(block, statement); -// } -// -// ... -// } -// ``` -// -// Here we used `let BasicBlockData { } = *data` deliberately, -// rather than writing `data.statements` in the body. This is because if one -// adds a new field to `BasicBlockData`, one will be forced to revise this code, -// and hence one will (hopefully) invoke the correct visit methods (if any). -// -// For this to work, ALL MATCHES MUST BE EXHAUSTIVE IN FIELDS AND VARIANTS. -// That means you never write `..` to skip over fields, nor do you write `_` -// to skip over variants in a `match`. -// -// The only place that `_` is acceptable is to match a field (or -// variant argument) that does not require visiting, as in -// `is_cleanup` above. - macro_rules! make_mir_visitor { ($visitor_trait_name:ident, $($mutability:ident)?) => { pub trait $visitor_trait_name<'tcx> { diff --git a/compiler/rustc_middle/src/query/mod.rs b/compiler/rustc_middle/src/query/mod.rs index 72360e219ec..634d50368bd 100644 --- a/compiler/rustc_middle/src/query/mod.rs +++ b/compiler/rustc_middle/src/query/mod.rs @@ -367,7 +367,7 @@ rustc_queries! { TypeChecking { /// Erases regions from `ty` to yield a new type. - /// Normally you would just use `tcx.erase_regions(&value)`, + /// Normally you would just use `tcx.erase_regions(value)`, /// however, which uses this query as a kind of cache. query erase_regions_ty(ty: Ty<'tcx>) -> Ty<'tcx> { // This query is not expected to have input -- as a result, it diff --git a/compiler/rustc_middle/src/traits/chalk.rs b/compiler/rustc_middle/src/traits/chalk.rs index d8507d08c1b..f864ad8ebcd 100644 --- a/compiler/rustc_middle/src/traits/chalk.rs +++ b/compiler/rustc_middle/src/traits/chalk.rs @@ -102,48 +102,6 @@ impl<'tcx> chalk_ir::interner::Interner for RustInterner<'tcx> { Some(write()) } - fn debug_application_ty( - application_ty: &chalk_ir::ApplicationTy, - fmt: &mut fmt::Formatter<'_>, - ) -> Option { - match application_ty.name { - chalk_ir::TypeName::Ref(mutbl) => { - let data = application_ty.substitution.interned(); - match (&**data[0].interned(), &**data[1].interned()) { - ( - chalk_ir::GenericArgData::Lifetime(lifetime), - chalk_ir::GenericArgData::Ty(ty), - ) => Some(match mutbl { - chalk_ir::Mutability::Not => write!(fmt, "(&{:?} {:?})", lifetime, ty), - chalk_ir::Mutability::Mut => write!(fmt, "(&{:?} mut {:?})", lifetime, ty), - }), - _ => unreachable!(), - } - } - chalk_ir::TypeName::Array => { - let data = application_ty.substitution.interned(); - match (&**data[0].interned(), &**data[1].interned()) { - (chalk_ir::GenericArgData::Ty(ty), chalk_ir::GenericArgData::Const(len)) => { - Some(write!(fmt, "[{:?}; {:?}]", ty, len)) - } - _ => unreachable!(), - } - } - chalk_ir::TypeName::Slice => { - let data = application_ty.substitution.interned(); - let ty = match &**data[0].interned() { - chalk_ir::GenericArgData::Ty(t) => t, - _ => unreachable!(), - }; - Some(write!(fmt, "[{:?}]", ty)) - } - _ => { - let chalk_ir::ApplicationTy { name, substitution } = application_ty; - Some(write!(fmt, "{:?}{:?}", name, chalk_ir::debug::Angle(substitution.interned()))) - } - } - } - fn debug_substitution( substitution: &chalk_ir::Substitution, fmt: &mut fmt::Formatter<'_>, @@ -174,6 +132,32 @@ impl<'tcx> chalk_ir::interner::Interner for RustInterner<'tcx> { Some(write!(fmt, "{:?}", clauses.interned())) } + fn debug_ty(ty: &chalk_ir::Ty, fmt: &mut fmt::Formatter<'_>) -> Option { + match &ty.interned().kind { + chalk_ir::TyKind::Ref(chalk_ir::Mutability::Not, lifetime, ty) => { + Some(write!(fmt, "(&{:?} {:?})", lifetime, ty)) + } + chalk_ir::TyKind::Ref(chalk_ir::Mutability::Mut, lifetime, ty) => { + Some(write!(fmt, "(&{:?} mut {:?})", lifetime, ty)) + } + chalk_ir::TyKind::Array(ty, len) => Some(write!(fmt, "[{:?}; {:?}]", ty, len)), + chalk_ir::TyKind::Slice(ty) => Some(write!(fmt, "[{:?}]", ty)), + chalk_ir::TyKind::Tuple(len, substs) => Some((|| { + write!(fmt, "(")?; + for (idx, substitution) in substs.interned().iter().enumerate() { + if idx == *len && *len != 1 { + // Don't add a trailing comma if the tuple has more than one element + write!(fmt, "{:?}", substitution)?; + } else { + write!(fmt, "{:?},", substitution)?; + } + } + write!(fmt, ")") + })()), + _ => None, + } + } + fn debug_alias( alias_ty: &chalk_ir::AliasTy, fmt: &mut fmt::Formatter<'_>, diff --git a/compiler/rustc_middle/src/traits/structural_impls.rs b/compiler/rustc_middle/src/traits/structural_impls.rs index b8f6675b8e2..194e275496e 100644 --- a/compiler/rustc_middle/src/traits/structural_impls.rs +++ b/compiler/rustc_middle/src/traits/structural_impls.rs @@ -105,7 +105,7 @@ impl<'tcx, N: fmt::Debug> fmt::Debug for traits::ImplSourceTraitAliasData<'tcx, /////////////////////////////////////////////////////////////////////////// // Lift implementations -CloneTypeFoldableAndLiftImpls! { +TrivialTypeFoldableAndLiftImpls! { super::IfExpressionCause, super::ImplSourceDiscriminantKindData, } diff --git a/compiler/rustc_middle/src/ty/binding.rs b/compiler/rustc_middle/src/ty/binding.rs index 3237147c8ba..7ab192daf4b 100644 --- a/compiler/rustc_middle/src/ty/binding.rs +++ b/compiler/rustc_middle/src/ty/binding.rs @@ -8,7 +8,7 @@ pub enum BindingMode { BindByValue(Mutability), } -CloneTypeFoldableAndLiftImpls! { BindingMode, } +TrivialTypeFoldableAndLiftImpls! { BindingMode, } impl BindingMode { pub fn convert(ba: BindingAnnotation) -> BindingMode { diff --git a/compiler/rustc_middle/src/ty/codec.rs b/compiler/rustc_middle/src/ty/codec.rs index aaf6a857043..1def4936860 100644 --- a/compiler/rustc_middle/src/ty/codec.rs +++ b/compiler/rustc_middle/src/ty/codec.rs @@ -278,7 +278,7 @@ impl<'tcx, D: TyDecoder<'tcx>> Decodable for ty::Region<'tcx> { impl<'tcx, D: TyDecoder<'tcx>> Decodable for CanonicalVarInfos<'tcx> { fn decode(decoder: &mut D) -> Result { let len = decoder.read_usize()?; - let interned: Result, _> = + let interned: Result>, _> = (0..len).map(|_| Decodable::decode(decoder)).collect(); Ok(decoder.tcx().intern_canonical_var_infos(interned?.as_slice())) } diff --git a/compiler/rustc_middle/src/ty/consts/int.rs b/compiler/rustc_middle/src/ty/consts/int.rs index 126257a5b49..63e95f25bb7 100644 --- a/compiler/rustc_middle/src/ty/consts/int.rs +++ b/compiler/rustc_middle/src/ty/consts/int.rs @@ -168,7 +168,7 @@ impl ScalarInt { #[inline(always)] fn check_data(self) { // Using a block `{self.data}` here to force a copy instead of using `self.data` - // directly, because `assert_eq` takes references to its arguments and formatting + // directly, because `debug_assert_eq` takes references to its arguments and formatting // arguments and would thus borrow `self.data`. Since `Self` // is a packed struct, that would create a possibly unaligned reference, which // is UB. diff --git a/compiler/rustc_middle/src/ty/consts/kind.rs b/compiler/rustc_middle/src/ty/consts/kind.rs index ede28522000..ecf2837b3e4 100644 --- a/compiler/rustc_middle/src/ty/consts/kind.rs +++ b/compiler/rustc_middle/src/ty/consts/kind.rs @@ -23,7 +23,7 @@ pub enum ConstKind<'tcx> { Bound(ty::DebruijnIndex, ty::BoundVar), /// A placeholder const - universally quantified higher-ranked const. - Placeholder(ty::PlaceholderConst), + Placeholder(ty::PlaceholderConst<'tcx>), /// Used in the HIR by using `Unevaluated` everywhere and later normalizing to one of the other /// variants when the code is monomorphic enough for that. @@ -103,9 +103,9 @@ impl<'tcx> ConstKind<'tcx> { // so that we don't try to invoke this query with // any region variables. let param_env_and_substs = tcx - .erase_regions(¶m_env) + .erase_regions(param_env) .with_reveal_all_normalized(tcx) - .and(tcx.erase_regions(&substs)); + .and(tcx.erase_regions(substs)); // HACK(eddyb) when the query key would contain inference variables, // attempt using identity substs and `ParamEnv` instead, that will succeed diff --git a/compiler/rustc_middle/src/ty/context.rs b/compiler/rustc_middle/src/ty/context.rs index 1c6937e685c..36cbd36a770 100644 --- a/compiler/rustc_middle/src/ty/context.rs +++ b/compiler/rustc_middle/src/ty/context.rs @@ -14,7 +14,6 @@ use crate::mir::interpret::{self, Allocation, ConstValue, Scalar}; use crate::mir::{Body, Field, Local, Place, PlaceElem, ProjectionKind, Promoted}; use crate::traits; use crate::ty::query::{self, TyCtxtAt}; -use crate::ty::steal::Steal; use crate::ty::subst::{GenericArg, GenericArgKind, InternalSubsts, Subst, SubstsRef, UserSubsts}; use crate::ty::TyKind::*; use crate::ty::{ @@ -33,6 +32,7 @@ use rustc_data_structures::sharded::{IntoPointer, ShardedHashMap}; use rustc_data_structures::stable_hasher::{ hash_stable_hashmap, HashStable, StableHasher, StableVec, }; +use rustc_data_structures::steal::Steal; use rustc_data_structures::sync::{self, Lock, Lrc, WorkerLocal}; use rustc_data_structures::unhash::UnhashMap; use rustc_errors::ErrorReported; @@ -83,7 +83,7 @@ pub struct CtxtInterners<'tcx> { type_: InternedSet<'tcx, TyS<'tcx>>, type_list: InternedSet<'tcx, List>>, substs: InternedSet<'tcx, InternalSubsts<'tcx>>, - canonical_var_infos: InternedSet<'tcx, List>, + canonical_var_infos: InternedSet<'tcx, List>>, region: InternedSet<'tcx, RegionKind>, existential_predicates: InternedSet<'tcx, List>>, predicate: InternedSet<'tcx, PredicateInner<'tcx>>, @@ -415,9 +415,19 @@ pub struct TypeckResults<'tcx> { /// entire variable. pub closure_captures: ty::UpvarListMap, + /// Tracks the minimum captures required for a closure; + /// see `MinCaptureInformationMap` for more details. + pub closure_min_captures: ty::MinCaptureInformationMap<'tcx>, + /// Stores the type, expression, span and optional scope span of all types /// that are live across the yield of this generator (if a generator). pub generator_interior_types: Vec>, + + /// We sometimes treat byte string literals (which are of type `&[u8; N]`) + /// as `&[u8]`, depending on the pattern in which they are used. + /// This hashset records all instances where we behave + /// like this to allow `const_to_pat` to reliably handle this situation. + pub treat_byte_string_as_slice: ItemLocalSet, } impl<'tcx> TypeckResults<'tcx> { @@ -442,7 +452,9 @@ impl<'tcx> TypeckResults<'tcx> { tainted_by_errors: None, concrete_opaque_types: Default::default(), closure_captures: Default::default(), + closure_min_captures: Default::default(), generator_interior_types: Default::default(), + treat_byte_string_as_slice: Default::default(), } } @@ -676,7 +688,9 @@ impl<'a, 'tcx> HashStable> for TypeckResults<'tcx> { tainted_by_errors, ref concrete_opaque_types, ref closure_captures, + ref closure_min_captures, ref generator_interior_types, + ref treat_byte_string_as_slice, } = *self; hcx.with_node_id_hashing_mode(NodeIdHashingMode::HashDefPath, |hcx| { @@ -709,7 +723,9 @@ impl<'a, 'tcx> HashStable> for TypeckResults<'tcx> { tainted_by_errors.hash_stable(hcx, hasher); concrete_opaque_types.hash_stable(hcx, hasher); closure_captures.hash_stable(hcx, hasher); + closure_min_captures.hash_stable(hcx, hasher); generator_interior_types.hash_stable(hcx, hasher); + treat_byte_string_as_slice.hash_stable(hcx, hasher); }) } } @@ -1495,7 +1511,7 @@ impl<'tcx> TyCtxt<'tcx> { match ret_ty.kind() { ty::FnDef(_, _) => { let sig = ret_ty.fn_sig(self); - let output = self.erase_late_bound_regions(&sig.output()); + let output = self.erase_late_bound_regions(sig.output()); if output.is_impl_trait() { let fn_decl = self.hir().fn_decl_by_hir_id(hir_id).unwrap(); Some((output, fn_decl.output.span())) @@ -1613,7 +1629,7 @@ nop_lift! {predicate; &'a PredicateInner<'a> => &'tcx PredicateInner<'tcx>} nop_list_lift! {type_list; Ty<'a> => Ty<'tcx>} nop_list_lift! {existential_predicates; ExistentialPredicate<'a> => ExistentialPredicate<'tcx>} nop_list_lift! {predicates; Predicate<'a> => Predicate<'tcx>} -nop_list_lift! {canonical_var_infos; CanonicalVarInfo => CanonicalVarInfo} +nop_list_lift! {canonical_var_infos; CanonicalVarInfo<'a> => CanonicalVarInfo<'tcx>} nop_list_lift! {projs; ProjectionKind => ProjectionKind} // This is the impl for `&'a InternalSubsts<'a>`. @@ -2049,7 +2065,7 @@ macro_rules! slice_interners { slice_interners!( type_list: _intern_type_list(Ty<'tcx>), substs: _intern_substs(GenericArg<'tcx>), - canonical_var_infos: _intern_canonical_var_infos(CanonicalVarInfo), + canonical_var_infos: _intern_canonical_var_infos(CanonicalVarInfo<'tcx>), existential_predicates: _intern_existential_predicates(ExistentialPredicate<'tcx>), predicates: _intern_predicates(Predicate<'tcx>), projs: _intern_projs(ProjectionKind), @@ -2448,7 +2464,10 @@ impl<'tcx> TyCtxt<'tcx> { if ts.is_empty() { List::empty() } else { self._intern_place_elems(ts) } } - pub fn intern_canonical_var_infos(self, ts: &[CanonicalVarInfo]) -> CanonicalVarInfos<'tcx> { + pub fn intern_canonical_var_infos( + self, + ts: &[CanonicalVarInfo<'tcx>], + ) -> CanonicalVarInfos<'tcx> { if ts.is_empty() { List::empty() } else { self._intern_canonical_var_infos(ts) } } diff --git a/compiler/rustc_middle/src/ty/erase_regions.rs b/compiler/rustc_middle/src/ty/erase_regions.rs index 48d0fc1839e..4412ba9408c 100644 --- a/compiler/rustc_middle/src/ty/erase_regions.rs +++ b/compiler/rustc_middle/src/ty/erase_regions.rs @@ -15,17 +15,17 @@ impl<'tcx> TyCtxt<'tcx> { /// Returns an equivalent value with all free regions removed (note /// that late-bound regions remain, because they are important for /// subtyping, but they are anonymized and normalized as well).. - pub fn erase_regions(self, value: &T) -> T + pub fn erase_regions(self, value: T) -> T where T: TypeFoldable<'tcx>, { // If there's nothing to erase avoid performing the query at all if !value.has_type_flags(TypeFlags::HAS_RE_LATE_BOUND | TypeFlags::HAS_FREE_REGIONS) { - return value.clone(); + return value; } - + debug!("erase_regions({:?})", value); let value1 = value.fold_with(&mut RegionEraserVisitor { tcx: self }); - debug!("erase_regions({:?}) = {:?}", value, value1); + debug!("erase_regions = {:?}", value1); value1 } } @@ -43,7 +43,7 @@ impl TypeFolder<'tcx> for RegionEraserVisitor<'tcx> { if ty.needs_infer() { ty.super_fold_with(self) } else { self.tcx.erase_regions_ty(ty) } } - fn fold_binder(&mut self, t: &ty::Binder) -> ty::Binder + fn fold_binder(&mut self, t: ty::Binder) -> ty::Binder where T: TypeFoldable<'tcx>, { diff --git a/compiler/rustc_middle/src/ty/fold.rs b/compiler/rustc_middle/src/ty/fold.rs index 70a8157c04a..1883c89a151 100644 --- a/compiler/rustc_middle/src/ty/fold.rs +++ b/compiler/rustc_middle/src/ty/fold.rs @@ -44,13 +44,13 @@ use std::ops::ControlFlow; /// /// To implement this conveniently, use the derive macro located in librustc_macros. pub trait TypeFoldable<'tcx>: fmt::Debug + Clone { - fn super_fold_with>(&self, folder: &mut F) -> Self; - fn fold_with>(&self, folder: &mut F) -> Self { + fn super_fold_with>(self, folder: &mut F) -> Self; + fn fold_with>(self, folder: &mut F) -> Self { self.super_fold_with(folder) } - fn super_visit_with>(&self, visitor: &mut V) -> ControlFlow<()>; - fn visit_with>(&self, visitor: &mut V) -> ControlFlow<()> { + fn super_visit_with>(&self, visitor: &mut V) -> ControlFlow; + fn visit_with>(&self, visitor: &mut V) -> ControlFlow { self.super_visit_with(visitor) } @@ -73,7 +73,7 @@ pub trait TypeFoldable<'tcx>: fmt::Debug + Clone { } fn has_type_flags(&self, flags: TypeFlags) -> bool { - self.visit_with(&mut HasTypeFlagsVisitor { flags }).is_break() + self.visit_with(&mut HasTypeFlagsVisitor { flags }).break_value() == Some(FoundFlags) } fn has_projections(&self) -> bool { self.has_type_flags(TypeFlags::HAS_PROJECTION) @@ -142,26 +142,13 @@ pub trait TypeFoldable<'tcx>: fmt::Debug + Clone { fn still_further_specializable(&self) -> bool { self.has_type_flags(TypeFlags::STILL_FURTHER_SPECIALIZABLE) } - - /// A visitor that does not recurse into types, works like `fn walk_shallow` in `Ty`. - fn visit_tys_shallow(&self, visit: impl FnMut(Ty<'tcx>) -> ControlFlow<()>) -> ControlFlow<()> { - pub struct Visitor(F); - - impl<'tcx, F: FnMut(Ty<'tcx>) -> ControlFlow<()>> TypeVisitor<'tcx> for Visitor { - fn visit_ty(&mut self, ty: Ty<'tcx>) -> ControlFlow<()> { - self.0(ty) - } - } - - self.visit_with(&mut Visitor(visit)) - } } impl TypeFoldable<'tcx> for hir::Constness { - fn super_fold_with>(&self, _: &mut F) -> Self { - *self + fn super_fold_with>(self, _: &mut F) -> Self { + self } - fn super_visit_with>(&self, _: &mut V) -> ControlFlow<()> { + fn super_visit_with>(&self, _: &mut V) -> ControlFlow { ControlFlow::CONTINUE } } @@ -174,7 +161,7 @@ impl TypeFoldable<'tcx> for hir::Constness { pub trait TypeFolder<'tcx>: Sized { fn tcx<'a>(&'a self) -> TyCtxt<'tcx>; - fn fold_binder(&mut self, t: &Binder) -> Binder + fn fold_binder(&mut self, t: Binder) -> Binder where T: TypeFoldable<'tcx>, { @@ -195,23 +182,25 @@ pub trait TypeFolder<'tcx>: Sized { } pub trait TypeVisitor<'tcx>: Sized { - fn visit_binder>(&mut self, t: &Binder) -> ControlFlow<()> { + type BreakTy = !; + + fn visit_binder>(&mut self, t: &Binder) -> ControlFlow { t.super_visit_with(self) } - fn visit_ty(&mut self, t: Ty<'tcx>) -> ControlFlow<()> { + fn visit_ty(&mut self, t: Ty<'tcx>) -> ControlFlow { t.super_visit_with(self) } - fn visit_region(&mut self, r: ty::Region<'tcx>) -> ControlFlow<()> { + fn visit_region(&mut self, r: ty::Region<'tcx>) -> ControlFlow { r.super_visit_with(self) } - fn visit_const(&mut self, c: &'tcx ty::Const<'tcx>) -> ControlFlow<()> { + fn visit_const(&mut self, c: &'tcx ty::Const<'tcx>) -> ControlFlow { c.super_visit_with(self) } - fn visit_predicate(&mut self, p: ty::Predicate<'tcx>) -> ControlFlow<()> { + fn visit_predicate(&mut self, p: ty::Predicate<'tcx>) -> ControlFlow { p.super_visit_with(self) } } @@ -266,7 +255,7 @@ impl<'tcx> TyCtxt<'tcx> { /// and skipped. pub fn fold_regions( self, - value: &T, + value: T, skipped_regions: &mut bool, mut f: impl FnMut(ty::Region<'tcx>, ty::DebruijnIndex) -> ty::Region<'tcx>, ) -> T @@ -329,14 +318,19 @@ impl<'tcx> TyCtxt<'tcx> { where F: FnMut(ty::Region<'tcx>) -> bool, { - fn visit_binder>(&mut self, t: &Binder) -> ControlFlow<()> { + type BreakTy = (); + + fn visit_binder>( + &mut self, + t: &Binder, + ) -> ControlFlow { self.outer_index.shift_in(1); let result = t.as_ref().skip_binder().visit_with(self); self.outer_index.shift_out(1); result } - fn visit_region(&mut self, r: ty::Region<'tcx>) -> ControlFlow<()> { + fn visit_region(&mut self, r: ty::Region<'tcx>) -> ControlFlow { match *r { ty::ReLateBound(debruijn, _) if debruijn < self.outer_index => { ControlFlow::CONTINUE @@ -351,7 +345,7 @@ impl<'tcx> TyCtxt<'tcx> { } } - fn visit_ty(&mut self, ty: Ty<'tcx>) -> ControlFlow<()> { + fn visit_ty(&mut self, ty: Ty<'tcx>) -> ControlFlow { // We're only interested in types involving regions if ty.flags().intersects(TypeFlags::HAS_FREE_REGIONS) { ty.super_visit_with(self) @@ -406,7 +400,7 @@ impl<'a, 'tcx> TypeFolder<'tcx> for RegionFolder<'a, 'tcx> { self.tcx } - fn fold_binder>(&mut self, t: &ty::Binder) -> ty::Binder { + fn fold_binder>(&mut self, t: ty::Binder) -> ty::Binder { self.current_index.shift_in(1); let t = t.super_fold_with(self); self.current_index.shift_out(1); @@ -466,7 +460,7 @@ impl<'a, 'tcx> TypeFolder<'tcx> for BoundVarReplacer<'a, 'tcx> { self.tcx } - fn fold_binder>(&mut self, t: &ty::Binder) -> ty::Binder { + fn fold_binder>(&mut self, t: ty::Binder) -> ty::Binder { self.current_index.shift_in(1); let t = t.super_fold_with(self); self.current_index.shift_out(1); @@ -549,7 +543,7 @@ impl<'tcx> TyCtxt<'tcx> { /// contain escaping bound types. pub fn replace_late_bound_regions( self, - value: &Binder, + value: Binder, fld_r: F, ) -> (T, BTreeMap>) where @@ -561,7 +555,7 @@ impl<'tcx> TyCtxt<'tcx> { let fld_c = |bound_ct, ty| { self.mk_const(ty::Const { val: ty::ConstKind::Bound(ty::INNERMOST, bound_ct), ty }) }; - self.replace_escaping_bound_vars(value.as_ref().skip_binder(), fld_r, fld_t, fld_c) + self.replace_escaping_bound_vars(value.skip_binder(), fld_r, fld_t, fld_c) } /// Replaces all escaping bound vars. The `fld_r` closure replaces escaping @@ -569,7 +563,7 @@ impl<'tcx> TyCtxt<'tcx> { /// closure replaces escaping bound consts. pub fn replace_escaping_bound_vars( self, - value: &T, + value: T, mut fld_r: F, mut fld_t: G, mut fld_c: H, @@ -609,7 +603,7 @@ impl<'tcx> TyCtxt<'tcx> { /// types. pub fn replace_bound_vars( self, - value: &Binder, + value: Binder, fld_r: F, fld_t: G, fld_c: H, @@ -620,16 +614,12 @@ impl<'tcx> TyCtxt<'tcx> { H: FnMut(ty::BoundVar, Ty<'tcx>) -> &'tcx ty::Const<'tcx>, T: TypeFoldable<'tcx>, { - self.replace_escaping_bound_vars(value.as_ref().skip_binder(), fld_r, fld_t, fld_c) + self.replace_escaping_bound_vars(value.skip_binder(), fld_r, fld_t, fld_c) } /// Replaces any late-bound regions bound in `value` with /// free variants attached to `all_outlive_scope`. - pub fn liberate_late_bound_regions( - self, - all_outlive_scope: DefId, - value: &ty::Binder, - ) -> T + pub fn liberate_late_bound_regions(self, all_outlive_scope: DefId, value: ty::Binder) -> T where T: TypeFoldable<'tcx>, { @@ -683,7 +673,7 @@ impl<'tcx> TyCtxt<'tcx> { /// Replaces any late-bound regions bound in `value` with `'erased`. Useful in codegen but also /// method lookup and a few other places where precise region relationships are not required. - pub fn erase_late_bound_regions(self, value: &Binder) -> T + pub fn erase_late_bound_regions(self, value: Binder) -> T where T: TypeFoldable<'tcx>, { @@ -698,7 +688,7 @@ impl<'tcx> TyCtxt<'tcx> { /// `FnSig`s or `TraitRef`s which are equivalent up to region naming will become /// structurally identical. For example, `for<'a, 'b> fn(&'a isize, &'b isize)` and /// `for<'a, 'b> fn(&'b isize, &'a isize)` will become identical after anonymization. - pub fn anonymize_late_bound_regions(self, sig: &Binder) -> Binder + pub fn anonymize_late_bound_regions(self, sig: Binder) -> Binder where T: TypeFoldable<'tcx>, { @@ -740,7 +730,7 @@ impl TypeFolder<'tcx> for Shifter<'tcx> { self.tcx } - fn fold_binder>(&mut self, t: &ty::Binder) -> ty::Binder { + fn fold_binder>(&mut self, t: ty::Binder) -> ty::Binder { self.current_index.shift_in(1); let t = t.super_fold_with(self); self.current_index.shift_out(1); @@ -804,7 +794,7 @@ pub fn shift_region<'tcx>( } } -pub fn shift_vars<'tcx, T>(tcx: TyCtxt<'tcx>, value: &T, amount: u32) -> T +pub fn shift_vars<'tcx, T>(tcx: TyCtxt<'tcx>, value: T, amount: u32) -> T where T: TypeFoldable<'tcx>, { @@ -813,6 +803,9 @@ where value.fold_with(&mut Shifter::new(tcx, amount)) } +#[derive(Debug, PartialEq, Eq, Copy, Clone)] +struct FoundEscapingVars; + /// An "escaping var" is a bound var whose binder is not part of `t`. A bound var can be a /// bound region or a bound type. /// @@ -844,93 +837,114 @@ struct HasEscapingVarsVisitor { } impl<'tcx> TypeVisitor<'tcx> for HasEscapingVarsVisitor { - fn visit_binder>(&mut self, t: &Binder) -> ControlFlow<()> { + type BreakTy = FoundEscapingVars; + + fn visit_binder>(&mut self, t: &Binder) -> ControlFlow { self.outer_index.shift_in(1); let result = t.super_visit_with(self); self.outer_index.shift_out(1); result } - fn visit_ty(&mut self, t: Ty<'tcx>) -> ControlFlow<()> { + fn visit_ty(&mut self, t: Ty<'tcx>) -> ControlFlow { // If the outer-exclusive-binder is *strictly greater* than // `outer_index`, that means that `t` contains some content // bound at `outer_index` or above (because // `outer_exclusive_binder` is always 1 higher than the // content in `t`). Therefore, `t` has some escaping vars. if t.outer_exclusive_binder > self.outer_index { - ControlFlow::BREAK + ControlFlow::Break(FoundEscapingVars) } else { ControlFlow::CONTINUE } } - fn visit_region(&mut self, r: ty::Region<'tcx>) -> ControlFlow<()> { + fn visit_region(&mut self, r: ty::Region<'tcx>) -> ControlFlow { // If the region is bound by `outer_index` or anything outside // of outer index, then it escapes the binders we have // visited. if r.bound_at_or_above_binder(self.outer_index) { - ControlFlow::BREAK + ControlFlow::Break(FoundEscapingVars) } else { ControlFlow::CONTINUE } } - fn visit_const(&mut self, ct: &'tcx ty::Const<'tcx>) -> ControlFlow<()> { + fn visit_const(&mut self, ct: &'tcx ty::Const<'tcx>) -> ControlFlow { // we don't have a `visit_infer_const` callback, so we have to // hook in here to catch this case (annoying...), but // otherwise we do want to remember to visit the rest of the // const, as it has types/regions embedded in a lot of other // places. match ct.val { - ty::ConstKind::Bound(debruijn, _) if debruijn >= self.outer_index => ControlFlow::BREAK, + ty::ConstKind::Bound(debruijn, _) if debruijn >= self.outer_index => { + ControlFlow::Break(FoundEscapingVars) + } _ => ct.super_visit_with(self), } } - fn visit_predicate(&mut self, predicate: ty::Predicate<'tcx>) -> ControlFlow<()> { + fn visit_predicate(&mut self, predicate: ty::Predicate<'tcx>) -> ControlFlow { if predicate.inner.outer_exclusive_binder > self.outer_index { - ControlFlow::BREAK + ControlFlow::Break(FoundEscapingVars) } else { ControlFlow::CONTINUE } } } +#[derive(Debug, PartialEq, Eq, Copy, Clone)] +struct FoundFlags; + // FIXME: Optimize for checking for infer flags struct HasTypeFlagsVisitor { flags: ty::TypeFlags, } impl<'tcx> TypeVisitor<'tcx> for HasTypeFlagsVisitor { - fn visit_ty(&mut self, t: Ty<'_>) -> ControlFlow<()> { + type BreakTy = FoundFlags; + + fn visit_ty(&mut self, t: Ty<'_>) -> ControlFlow { debug!( "HasTypeFlagsVisitor: t={:?} t.flags={:?} self.flags={:?}", t, t.flags(), self.flags ); - if t.flags().intersects(self.flags) { ControlFlow::BREAK } else { ControlFlow::CONTINUE } + if t.flags().intersects(self.flags) { + ControlFlow::Break(FoundFlags) + } else { + ControlFlow::CONTINUE + } } - fn visit_region(&mut self, r: ty::Region<'tcx>) -> ControlFlow<()> { + fn visit_region(&mut self, r: ty::Region<'tcx>) -> ControlFlow { let flags = r.type_flags(); debug!("HasTypeFlagsVisitor: r={:?} r.flags={:?} self.flags={:?}", r, flags, self.flags); - if flags.intersects(self.flags) { ControlFlow::BREAK } else { ControlFlow::CONTINUE } + if flags.intersects(self.flags) { + ControlFlow::Break(FoundFlags) + } else { + ControlFlow::CONTINUE + } } - fn visit_const(&mut self, c: &'tcx ty::Const<'tcx>) -> ControlFlow<()> { + fn visit_const(&mut self, c: &'tcx ty::Const<'tcx>) -> ControlFlow { let flags = FlagComputation::for_const(c); debug!("HasTypeFlagsVisitor: c={:?} c.flags={:?} self.flags={:?}", c, flags, self.flags); - if flags.intersects(self.flags) { ControlFlow::BREAK } else { ControlFlow::CONTINUE } + if flags.intersects(self.flags) { + ControlFlow::Break(FoundFlags) + } else { + ControlFlow::CONTINUE + } } - fn visit_predicate(&mut self, predicate: ty::Predicate<'tcx>) -> ControlFlow<()> { + fn visit_predicate(&mut self, predicate: ty::Predicate<'tcx>) -> ControlFlow { debug!( "HasTypeFlagsVisitor: predicate={:?} predicate.flags={:?} self.flags={:?}", predicate, predicate.inner.flags, self.flags ); if predicate.inner.flags.intersects(self.flags) { - ControlFlow::BREAK + ControlFlow::Break(FoundFlags) } else { ControlFlow::CONTINUE } @@ -964,14 +978,14 @@ impl LateBoundRegionsCollector { } impl<'tcx> TypeVisitor<'tcx> for LateBoundRegionsCollector { - fn visit_binder>(&mut self, t: &Binder) -> ControlFlow<()> { + fn visit_binder>(&mut self, t: &Binder) -> ControlFlow { self.current_index.shift_in(1); let result = t.super_visit_with(self); self.current_index.shift_out(1); result } - fn visit_ty(&mut self, t: Ty<'tcx>) -> ControlFlow<()> { + fn visit_ty(&mut self, t: Ty<'tcx>) -> ControlFlow { // if we are only looking for "constrained" region, we have to // ignore the inputs to a projection, as they may not appear // in the normalized form @@ -984,7 +998,7 @@ impl<'tcx> TypeVisitor<'tcx> for LateBoundRegionsCollector { t.super_visit_with(self) } - fn visit_const(&mut self, c: &'tcx ty::Const<'tcx>) -> ControlFlow<()> { + fn visit_const(&mut self, c: &'tcx ty::Const<'tcx>) -> ControlFlow { // if we are only looking for "constrained" region, we have to // ignore the inputs of an unevaluated const, as they may not appear // in the normalized form @@ -997,7 +1011,7 @@ impl<'tcx> TypeVisitor<'tcx> for LateBoundRegionsCollector { c.super_visit_with(self) } - fn visit_region(&mut self, r: ty::Region<'tcx>) -> ControlFlow<()> { + fn visit_region(&mut self, r: ty::Region<'tcx>) -> ControlFlow { if let ty::ReLateBound(debruijn, br) = *r { if debruijn == self.current_index { self.regions.insert(br); diff --git a/compiler/rustc_middle/src/ty/instance.rs b/compiler/rustc_middle/src/ty/instance.rs index 306cebd9cb7..f52466d85f8 100644 --- a/compiler/rustc_middle/src/ty/instance.rs +++ b/compiler/rustc_middle/src/ty/instance.rs @@ -359,15 +359,15 @@ impl<'tcx> Instance<'tcx> { // HACK(eddyb) erase regions in `substs` first, so that `param_env.and(...)` // below is more likely to ignore the bounds in scope (e.g. if the only // generic parameters mentioned by `substs` were lifetime ones). - let substs = tcx.erase_regions(&substs); + let substs = tcx.erase_regions(substs); // FIXME(eddyb) should this always use `param_env.with_reveal_all()`? if let Some((did, param_did)) = def.as_const_arg() { tcx.resolve_instance_of_const_arg( - tcx.erase_regions(¶m_env.and((did, param_did, substs))), + tcx.erase_regions(param_env.and((did, param_did, substs))), ) } else { - tcx.resolve_instance(tcx.erase_regions(¶m_env.and((def.did, substs)))) + tcx.resolve_instance(tcx.erase_regions(param_env.and((def.did, substs)))) } } @@ -452,7 +452,7 @@ impl<'tcx> Instance<'tcx> { let self_ty = tcx.mk_closure(closure_did, substs); let sig = substs.as_closure().sig(); - let sig = tcx.normalize_erasing_late_bound_regions(ty::ParamEnv::reveal_all(), &sig); + let sig = tcx.normalize_erasing_late_bound_regions(ty::ParamEnv::reveal_all(), sig); assert_eq!(sig.inputs().len(), 1); let substs = tcx.mk_substs_trait(self_ty, &[sig.inputs()[0].into()]); @@ -485,7 +485,7 @@ impl<'tcx> Instance<'tcx> { &self, tcx: TyCtxt<'tcx>, param_env: ty::ParamEnv<'tcx>, - v: &T, + v: T, ) -> T where T: TypeFoldable<'tcx> + Clone, @@ -493,7 +493,7 @@ impl<'tcx> Instance<'tcx> { if let Some(substs) = self.substs_for_mir_body() { tcx.subst_and_normalize_erasing_regions(substs, param_env, v) } else { - tcx.normalize_erasing_regions(param_env, v.clone()) + tcx.normalize_erasing_regions(param_env, v) } } diff --git a/compiler/rustc_middle/src/ty/layout.rs b/compiler/rustc_middle/src/ty/layout.rs index 91c3dcbfa81..fa0711193df 100644 --- a/compiler/rustc_middle/src/ty/layout.rs +++ b/compiler/rustc_middle/src/ty/layout.rs @@ -15,7 +15,7 @@ use rustc_session::{DataTypeKind, FieldInfo, SizeKind, VariantInfo}; use rustc_span::symbol::{Ident, Symbol}; use rustc_span::DUMMY_SP; use rustc_target::abi::call::{ - ArgAbi, ArgAttribute, ArgAttributes, Conv, FnAbi, PassMode, Reg, RegKind, + ArgAbi, ArgAttribute, ArgAttributes, ArgExtension, Conv, FnAbi, PassMode, Reg, RegKind, }; use rustc_target::abi::*; use rustc_target::spec::{abi::Abi as SpecAbi, HasTargetSpec, PanicStrategy}; @@ -176,7 +176,7 @@ impl<'tcx> fmt::Display for LayoutError<'tcx> { match *self { LayoutError::Unknown(ty) => write!(f, "the type `{}` has an unknown layout", ty), LayoutError::SizeOverflow(ty) => { - write!(f, "the type `{}` is too big for the current architecture", ty) + write!(f, "values of the type `{}` are too big for the current architecture", ty) } } } @@ -1756,7 +1756,7 @@ impl<'tcx> SizeSkeleton<'tcx> { match tail.kind() { ty::Param(_) | ty::Projection(_) => { debug_assert!(tail.has_param_types_or_consts()); - Ok(SizeSkeleton::Pointer { non_zero, tail: tcx.erase_regions(&tail) }) + Ok(SizeSkeleton::Pointer { non_zero, tail: tcx.erase_regions(tail) }) } _ => bug!( "SizeSkeleton::compute({}): layout errored ({}), yet \ @@ -2545,7 +2545,7 @@ where ) -> Self { debug!("FnAbi::new_internal({:?}, {:?})", sig, extra_args); - let sig = cx.tcx().normalize_erasing_late_bound_regions(ty::ParamEnv::reveal_all(), &sig); + let sig = cx.tcx().normalize_erasing_late_bound_regions(ty::ParamEnv::reveal_all(), sig); use rustc_target::spec::abi::Abi::*; let conv = match cx.tcx().sess.target.adjust_abi(sig.abi) { @@ -2601,15 +2601,14 @@ where }; let target = &cx.tcx().sess.target; - let target_env_gnu_like = matches!(&target.target_env[..], "gnu" | "musl"); - let win_x64_gnu = - target.target_os == "windows" && target.arch == "x86_64" && target.target_env == "gnu"; + let target_env_gnu_like = matches!(&target.env[..], "gnu" | "musl"); + let win_x64_gnu = target.os == "windows" && target.arch == "x86_64" && target.env == "gnu"; let linux_s390x_gnu_like = - target.target_os == "linux" && target.arch == "s390x" && target_env_gnu_like; + target.os == "linux" && target.arch == "s390x" && target_env_gnu_like; let linux_sparc64_gnu_like = - target.target_os == "linux" && target.arch == "sparc64" && target_env_gnu_like; + target.os == "linux" && target.arch == "sparc64" && target_env_gnu_like; let linux_powerpc_gnu_like = - target.target_os == "linux" && target.arch == "powerpc" && target_env_gnu_like; + target.os == "linux" && target.arch == "powerpc" && target_env_gnu_like; let rust_abi = matches!(sig.abi, RustIntrinsic | PlatformIntrinsic | Rust | RustCall); // Handle safe Rust thin and fat pointers. @@ -2620,7 +2619,7 @@ where is_return: bool| { // Booleans are always an i1 that needs to be zero-extended. if scalar.is_bool() { - attrs.set(ArgAttribute::ZExt); + attrs.ext(ArgExtension::Zext); return; } @@ -2775,7 +2774,7 @@ where // anyway, we control all calls to it in libstd. Abi::Vector { .. } if abi != SpecAbi::PlatformIntrinsic - && cx.tcx().sess.target.options.simd_types_indirect => + && cx.tcx().sess.target.simd_types_indirect => { arg.make_indirect(); return; @@ -2802,9 +2801,6 @@ where for arg in &mut self.args { fixup(arg, false); } - if let PassMode::Indirect(ref mut attrs, _) = self.ret.mode { - attrs.set(ArgAttribute::StructRet); - } return; } diff --git a/compiler/rustc_middle/src/ty/mod.rs b/compiler/rustc_middle/src/ty/mod.rs index 0042b4a3a42..6a67935cd98 100644 --- a/compiler/rustc_middle/src/ty/mod.rs +++ b/compiler/rustc_middle/src/ty/mod.rs @@ -6,6 +6,7 @@ pub use self::IntVarValue::*; pub use self::Variance::*; use crate::hir::exports::ExportMap; +use crate::hir::place::Place as HirPlace; use crate::ich::StableHashingContext; use crate::middle::cstore::CrateStoreDyn; use crate::middle::resolve_lifetime::ObjectLifetimeDefault; @@ -106,7 +107,6 @@ pub mod outlives; pub mod print; pub mod query; pub mod relate; -pub mod steal; pub mod subst; pub mod trait_def; pub mod util; @@ -611,6 +611,18 @@ pub struct TyS<'tcx> { outer_exclusive_binder: ty::DebruijnIndex, } +impl<'tcx> TyS<'tcx> { + /// A constructor used only for internal testing. + #[allow(rustc::usage_of_ty_tykind)] + pub fn make_for_test( + kind: TyKind<'tcx>, + flags: TypeFlags, + outer_exclusive_binder: ty::DebruijnIndex, + ) -> TyS<'tcx> { + TyS { kind, flags, outer_exclusive_binder } + } +} + // `TyS` is used a lot. Make sure it doesn't unintentionally get bigger. #[cfg(target_arch = "x86_64")] static_assert_size!(TyS<'_>, 32); @@ -674,6 +686,12 @@ pub struct UpvarId { pub closure_expr_id: LocalDefId, } +impl UpvarId { + pub fn new(var_hir_id: hir::HirId, closure_def_id: LocalDefId) -> UpvarId { + UpvarId { var_path: UpvarPath { hir_id: var_hir_id }, closure_expr_id: closure_def_id } + } +} + #[derive(Clone, PartialEq, Debug, TyEncodable, TyDecodable, Copy, HashStable)] pub enum BorrowKind { /// Data must be immutable and is aliasable. @@ -756,6 +774,56 @@ pub struct UpvarBorrow<'tcx> { pub region: ty::Region<'tcx>, } +/// Given the closure DefId this map provides a map of root variables to minimum +/// set of `CapturedPlace`s that need to be tracked to support all captures of that closure. +pub type MinCaptureInformationMap<'tcx> = FxHashMap>; + +/// Part of `MinCaptureInformationMap`; Maps a root variable to the list of `CapturedPlace`. +/// Used to track the minimum set of `Place`s that need to be captured to support all +/// Places captured by the closure starting at a given root variable. +/// +/// This provides a convenient and quick way of checking if a variable being used within +/// a closure is a capture of a local variable. +pub type RootVariableMinCaptureList<'tcx> = FxIndexMap>; + +/// Part of `MinCaptureInformationMap`; List of `CapturePlace`s. +pub type MinCaptureList<'tcx> = Vec>; + +/// A `Place` and the corresponding `CaptureInfo`. +#[derive(PartialEq, Clone, Debug, TyEncodable, TyDecodable, HashStable)] +pub struct CapturedPlace<'tcx> { + pub place: HirPlace<'tcx>, + pub info: CaptureInfo<'tcx>, +} + +/// Part of `MinCaptureInformationMap`; describes the capture kind (&, &mut, move) +/// for a particular capture as well as identifying the part of the source code +/// that triggered this capture to occur. +#[derive(PartialEq, Clone, Debug, Copy, TyEncodable, TyDecodable, HashStable)] +pub struct CaptureInfo<'tcx> { + /// Expr Id pointing to use that resulted in selecting the current capture kind + /// + /// If the user doesn't enable feature `capture_disjoint_fields` (RFC 2229) then, it is + /// possible that we don't see the use of a particular place resulting in expr_id being + /// None. In such case we fallback on uvpars_mentioned for span. + /// + /// Eg: + /// ```rust,no_run + /// let x = 5; + /// + /// let c = || { + /// let _ = x + /// }; + /// ``` + /// + /// In this example, if `capture_disjoint_fields` is **not** set, then x will be captured, + /// but we won't see it being used during capture analysis, since it's essentially a discard. + pub expr_id: Option, + + /// Capture mode that was selected + pub capture_kind: UpvarCapture<'tcx>, +} + pub type UpvarListMap = FxHashMap>; pub type UpvarCaptureMap<'tcx> = FxHashMap>; @@ -1580,11 +1648,9 @@ impl UniverseIndex { } } -/// The "placeholder index" fully defines a placeholder region. -/// Placeholder regions are identified by both a **universe** as well -/// as a "bound-region" within that universe. The `bound_region` is -/// basically a name -- distinct bound regions within the same -/// universe are just two regions with an unknown relationship to one +/// The "placeholder index" fully defines a placeholder region, type, or const. Placeholders are +/// identified by both a universe, as well as a name residing within that universe. Distinct bound +/// regions/types/consts within the same universe simply have an unknown relationship to one /// another. #[derive(Copy, Clone, Debug, PartialEq, Eq, Hash, TyEncodable, TyDecodable, PartialOrd, Ord)] pub struct Placeholder { @@ -1606,7 +1672,14 @@ pub type PlaceholderRegion = Placeholder; pub type PlaceholderType = Placeholder; -pub type PlaceholderConst = Placeholder; +#[derive(Copy, Clone, Debug, PartialEq, Eq, Hash, HashStable)] +#[derive(TyEncodable, TyDecodable, PartialOrd, Ord)] +pub struct BoundConst<'tcx> { + pub var: BoundVar, + pub ty: Ty<'tcx>, +} + +pub type PlaceholderConst<'tcx> = Placeholder>; /// A `DefId` which is potentially bundled with its corresponding generic parameter /// in case `did` is a const argument. @@ -1772,11 +1845,11 @@ impl<'a, 'tcx> HashStable> for ParamEnv<'tcx> { } impl<'tcx> TypeFoldable<'tcx> for ParamEnv<'tcx> { - fn super_fold_with>(&self, folder: &mut F) -> Self { + fn super_fold_with>(self, folder: &mut F) -> Self { ParamEnv::new(self.caller_bounds().fold_with(folder), self.reveal().fold_with(folder)) } - fn super_visit_with>(&self, visitor: &mut V) -> ControlFlow<()> { + fn super_visit_with>(&self, visitor: &mut V) -> ControlFlow { self.caller_bounds().visit_with(visitor)?; self.reveal().visit_with(visitor) } diff --git a/compiler/rustc_middle/src/ty/normalize_erasing_regions.rs b/compiler/rustc_middle/src/ty/normalize_erasing_regions.rs index a594a8ad512..9d97815a5f1 100644 --- a/compiler/rustc_middle/src/ty/normalize_erasing_regions.rs +++ b/compiler/rustc_middle/src/ty/normalize_erasing_regions.rs @@ -30,7 +30,7 @@ impl<'tcx> TyCtxt<'tcx> { // Erase first before we do the real query -- this keeps the // cache from being too polluted. - let value = self.erase_regions(&value); + let value = self.erase_regions(value); if !value.has_projections() { value } else { @@ -49,7 +49,7 @@ impl<'tcx> TyCtxt<'tcx> { pub fn normalize_erasing_late_bound_regions( self, param_env: ty::ParamEnv<'tcx>, - value: &ty::Binder, + value: ty::Binder, ) -> T where T: TypeFoldable<'tcx>, @@ -65,7 +65,7 @@ impl<'tcx> TyCtxt<'tcx> { self, param_substs: SubstsRef<'tcx>, param_env: ty::ParamEnv<'tcx>, - value: &T, + value: T, ) -> T where T: TypeFoldable<'tcx>, diff --git a/compiler/rustc_middle/src/ty/print/pretty.rs b/compiler/rustc_middle/src/ty/print/pretty.rs index 8ff4adda606..38f8e779f6a 100644 --- a/compiler/rustc_middle/src/ty/print/pretty.rs +++ b/compiler/rustc_middle/src/ty/print/pretty.rs @@ -63,7 +63,7 @@ thread_local! { /// Avoids running any queries during any prints that occur /// during the closure. This may alter the appearance of some /// types (e.g. forcing verbose printing for opaque types). -/// This method is used during some queries (e.g. `predicates_of` +/// This method is used during some queries (e.g. `explicit_item_bounds` /// for opaque types), to ensure that any debug printing that /// occurs during the query computation does not end up recursively /// calling the same query. @@ -1750,7 +1750,7 @@ impl FmtPrinter<'_, 'tcx, F> { define_scoped_cx!(self); let mut region_index = self.region_index; - let new_value = self.tcx.replace_late_bound_regions(value, |br| { + let new_value = self.tcx.replace_late_bound_regions(value.clone(), |br| { let _ = start_or_continue(&mut self, "for<", ", "); let br = match br { ty::BrNamed(_, name) => { @@ -1796,7 +1796,7 @@ impl FmtPrinter<'_, 'tcx, F> { { struct LateBoundRegionNameCollector<'a>(&'a mut FxHashSet); impl<'tcx> ty::fold::TypeVisitor<'tcx> for LateBoundRegionNameCollector<'_> { - fn visit_region(&mut self, r: ty::Region<'tcx>) -> ControlFlow<()> { + fn visit_region(&mut self, r: ty::Region<'tcx>) -> ControlFlow { if let ty::ReLateBound(_, ty::BrNamed(_, name)) = *r { self.0.insert(name); } diff --git a/compiler/rustc_middle/src/ty/query/mod.rs b/compiler/rustc_middle/src/ty/query/mod.rs index 7ba4d5a14df..187f86a52f4 100644 --- a/compiler/rustc_middle/src/ty/query/mod.rs +++ b/compiler/rustc_middle/src/ty/query/mod.rs @@ -28,13 +28,13 @@ use crate::traits::query::{ }; use crate::traits::specialization_graph; use crate::traits::{self, ImplSource}; -use crate::ty::steal::Steal; use crate::ty::subst::{GenericArg, SubstsRef}; use crate::ty::util::AlwaysRequiresDrop; use crate::ty::{self, AdtSizedConstraint, CrateInherentImpls, ParamEnvAnd, Ty, TyCtxt}; use rustc_data_structures::fingerprint::Fingerprint; use rustc_data_structures::fx::{FxHashMap, FxHashSet, FxIndexMap}; use rustc_data_structures::stable_hasher::StableVec; +use rustc_data_structures::steal::Steal; use rustc_data_structures::svh::Svh; use rustc_data_structures::sync::Lrc; use rustc_errors::ErrorReported; diff --git a/compiler/rustc_middle/src/ty/structural_impls.rs b/compiler/rustc_middle/src/ty/structural_impls.rs index 89fd803fe51..94e69a93a6b 100644 --- a/compiler/rustc_middle/src/ty/structural_impls.rs +++ b/compiler/rustc_middle/src/ty/structural_impls.rs @@ -7,12 +7,12 @@ use crate::mir::ProjectionKind; use crate::ty::fold::{TypeFoldable, TypeFolder, TypeVisitor}; use crate::ty::print::{with_no_trimmed_paths, FmtPrinter, Printer}; use crate::ty::{self, InferConst, Lift, Ty, TyCtxt}; +use rustc_data_structures::functor::IdFunctor; use rustc_hir as hir; use rustc_hir::def::Namespace; use rustc_hir::def_id::CRATE_DEF_INDEX; use rustc_index::vec::{Idx, IndexVec}; -use smallvec::SmallVec; use std::fmt; use std::ops::ControlFlow; use std::rc::Rc; @@ -274,7 +274,7 @@ impl fmt::Debug for ty::PredicateAtom<'tcx> { // For things that don't carry any arena-allocated data (and are // copy...), just add them to this list. -CloneTypeFoldableAndLiftImpls! { +TrivialTypeFoldableAndLiftImpls! { (), bool, usize, @@ -725,21 +725,21 @@ impl<'a, 'tcx> Lift<'tcx> for ty::InstanceDef<'a> { /// AdtDefs are basically the same as a DefId. impl<'tcx> TypeFoldable<'tcx> for &'tcx ty::AdtDef { - fn super_fold_with>(&self, _folder: &mut F) -> Self { - *self + fn super_fold_with>(self, _folder: &mut F) -> Self { + self } - fn super_visit_with>(&self, _visitor: &mut V) -> ControlFlow<()> { + fn super_visit_with>(&self, _visitor: &mut V) -> ControlFlow { ControlFlow::CONTINUE } } impl<'tcx, T: TypeFoldable<'tcx>, U: TypeFoldable<'tcx>> TypeFoldable<'tcx> for (T, U) { - fn super_fold_with>(&self, folder: &mut F) -> (T, U) { + fn super_fold_with>(self, folder: &mut F) -> (T, U) { (self.0.fold_with(folder), self.1.fold_with(folder)) } - fn super_visit_with>(&self, visitor: &mut V) -> ControlFlow<()> { + fn super_visit_with>(&self, visitor: &mut V) -> ControlFlow { self.0.visit_with(visitor)?; self.1.visit_with(visitor) } @@ -748,11 +748,11 @@ impl<'tcx, T: TypeFoldable<'tcx>, U: TypeFoldable<'tcx>> TypeFoldable<'tcx> for impl<'tcx, A: TypeFoldable<'tcx>, B: TypeFoldable<'tcx>, C: TypeFoldable<'tcx>> TypeFoldable<'tcx> for (A, B, C) { - fn super_fold_with>(&self, folder: &mut F) -> (A, B, C) { + fn super_fold_with>(self, folder: &mut F) -> (A, B, C) { (self.0.fold_with(folder), self.1.fold_with(folder), self.2.fold_with(folder)) } - fn super_visit_with>(&self, visitor: &mut V) -> ControlFlow<()> { + fn super_visit_with>(&self, visitor: &mut V) -> ControlFlow { self.0.visit_with(visitor)?; self.1.visit_with(visitor)?; self.2.visit_with(visitor) @@ -774,106 +774,107 @@ EnumTypeFoldableImpl! { } impl<'tcx, T: TypeFoldable<'tcx>> TypeFoldable<'tcx> for Rc { - fn super_fold_with>(&self, folder: &mut F) -> Self { - Rc::new((**self).fold_with(folder)) + fn super_fold_with>(self, folder: &mut F) -> Self { + // FIXME: Reuse the `Rc` here. + Rc::new((*self).clone().fold_with(folder)) } - fn super_visit_with>(&self, visitor: &mut V) -> ControlFlow<()> { + fn super_visit_with>(&self, visitor: &mut V) -> ControlFlow { (**self).visit_with(visitor) } } impl<'tcx, T: TypeFoldable<'tcx>> TypeFoldable<'tcx> for Arc { - fn super_fold_with>(&self, folder: &mut F) -> Self { - Arc::new((**self).fold_with(folder)) + fn super_fold_with>(self, folder: &mut F) -> Self { + // FIXME: Reuse the `Arc` here. + Arc::new((*self).clone().fold_with(folder)) } - fn super_visit_with>(&self, visitor: &mut V) -> ControlFlow<()> { + fn super_visit_with>(&self, visitor: &mut V) -> ControlFlow { (**self).visit_with(visitor) } } impl<'tcx, T: TypeFoldable<'tcx>> TypeFoldable<'tcx> for Box { - fn super_fold_with>(&self, folder: &mut F) -> Self { - let content: T = (**self).fold_with(folder); - box content + fn super_fold_with>(self, folder: &mut F) -> Self { + self.map_id(|value| value.fold_with(folder)) } - fn super_visit_with>(&self, visitor: &mut V) -> ControlFlow<()> { + fn super_visit_with>(&self, visitor: &mut V) -> ControlFlow { (**self).visit_with(visitor) } } impl<'tcx, T: TypeFoldable<'tcx>> TypeFoldable<'tcx> for Vec { - fn super_fold_with>(&self, folder: &mut F) -> Self { - self.iter().map(|t| t.fold_with(folder)).collect() + fn super_fold_with>(self, folder: &mut F) -> Self { + self.map_id(|t| t.fold_with(folder)) } - fn super_visit_with>(&self, visitor: &mut V) -> ControlFlow<()> { + fn super_visit_with>(&self, visitor: &mut V) -> ControlFlow { self.iter().try_for_each(|t| t.visit_with(visitor)) } } impl<'tcx, T: TypeFoldable<'tcx>> TypeFoldable<'tcx> for Box<[T]> { - fn super_fold_with>(&self, folder: &mut F) -> Self { - self.iter().map(|t| t.fold_with(folder)).collect::>().into_boxed_slice() + fn super_fold_with>(self, folder: &mut F) -> Self { + self.map_id(|t| t.fold_with(folder)) } - fn super_visit_with>(&self, visitor: &mut V) -> ControlFlow<()> { + fn super_visit_with>(&self, visitor: &mut V) -> ControlFlow { self.iter().try_for_each(|t| t.visit_with(visitor)) } } impl<'tcx, T: TypeFoldable<'tcx>> TypeFoldable<'tcx> for ty::Binder { - fn super_fold_with>(&self, folder: &mut F) -> Self { - self.map_bound_ref(|ty| ty.fold_with(folder)) + fn super_fold_with>(self, folder: &mut F) -> Self { + self.map_bound(|ty| ty.fold_with(folder)) } - fn fold_with>(&self, folder: &mut F) -> Self { + fn fold_with>(self, folder: &mut F) -> Self { folder.fold_binder(self) } - fn super_visit_with>(&self, visitor: &mut V) -> ControlFlow<()> { + fn super_visit_with>(&self, visitor: &mut V) -> ControlFlow { self.as_ref().skip_binder().visit_with(visitor) } - fn visit_with>(&self, visitor: &mut V) -> ControlFlow<()> { + fn visit_with>(&self, visitor: &mut V) -> ControlFlow { visitor.visit_binder(self) } } impl<'tcx> TypeFoldable<'tcx> for &'tcx ty::List> { - fn super_fold_with>(&self, folder: &mut F) -> Self { - fold_list(*self, folder, |tcx, v| tcx.intern_existential_predicates(v)) + fn super_fold_with>(self, folder: &mut F) -> Self { + ty::util::fold_list(self, folder, |tcx, v| tcx.intern_existential_predicates(v)) } - fn super_visit_with>(&self, visitor: &mut V) -> ControlFlow<()> { + fn super_visit_with>(&self, visitor: &mut V) -> ControlFlow { self.iter().try_for_each(|p| p.visit_with(visitor)) } } impl<'tcx> TypeFoldable<'tcx> for &'tcx ty::List> { - fn super_fold_with>(&self, folder: &mut F) -> Self { - fold_list(*self, folder, |tcx, v| tcx.intern_type_list(v)) + fn super_fold_with>(self, folder: &mut F) -> Self { + ty::util::fold_list(self, folder, |tcx, v| tcx.intern_type_list(v)) } - fn super_visit_with>(&self, visitor: &mut V) -> ControlFlow<()> { + fn super_visit_with>(&self, visitor: &mut V) -> ControlFlow { self.iter().try_for_each(|t| t.visit_with(visitor)) } } impl<'tcx> TypeFoldable<'tcx> for &'tcx ty::List { - fn super_fold_with>(&self, folder: &mut F) -> Self { - fold_list(*self, folder, |tcx, v| tcx.intern_projs(v)) + fn super_fold_with>(self, folder: &mut F) -> Self { + ty::util::fold_list(self, folder, |tcx, v| tcx.intern_projs(v)) } - fn super_visit_with>(&self, visitor: &mut V) -> ControlFlow<()> { + fn super_visit_with>(&self, visitor: &mut V) -> ControlFlow { self.iter().try_for_each(|t| t.visit_with(visitor)) } } impl<'tcx> TypeFoldable<'tcx> for ty::instance::Instance<'tcx> { - fn super_fold_with>(&self, folder: &mut F) -> Self { + fn super_fold_with>(self, folder: &mut F) -> Self { use crate::ty::InstanceDef::*; Self { substs: self.substs.fold_with(folder), @@ -893,7 +894,7 @@ impl<'tcx> TypeFoldable<'tcx> for ty::instance::Instance<'tcx> { } } - fn super_visit_with>(&self, visitor: &mut V) -> ControlFlow<()> { + fn super_visit_with>(&self, visitor: &mut V) -> ControlFlow { use crate::ty::InstanceDef::*; self.substs.visit_with(visitor)?; match self.def { @@ -915,36 +916,36 @@ impl<'tcx> TypeFoldable<'tcx> for ty::instance::Instance<'tcx> { } impl<'tcx> TypeFoldable<'tcx> for interpret::GlobalId<'tcx> { - fn super_fold_with>(&self, folder: &mut F) -> Self { + fn super_fold_with>(self, folder: &mut F) -> Self { Self { instance: self.instance.fold_with(folder), promoted: self.promoted } } - fn super_visit_with>(&self, visitor: &mut V) -> ControlFlow<()> { + fn super_visit_with>(&self, visitor: &mut V) -> ControlFlow { self.instance.visit_with(visitor) } } impl<'tcx> TypeFoldable<'tcx> for Ty<'tcx> { - fn super_fold_with>(&self, folder: &mut F) -> Self { - let kind = match self.kind() { + fn super_fold_with>(self, folder: &mut F) -> Self { + let kind = match *self.kind() { ty::RawPtr(tm) => ty::RawPtr(tm.fold_with(folder)), ty::Array(typ, sz) => ty::Array(typ.fold_with(folder), sz.fold_with(folder)), ty::Slice(typ) => ty::Slice(typ.fold_with(folder)), - ty::Adt(tid, substs) => ty::Adt(*tid, substs.fold_with(folder)), - ty::Dynamic(ref trait_ty, ref region) => { + ty::Adt(tid, substs) => ty::Adt(tid, substs.fold_with(folder)), + ty::Dynamic(trait_ty, region) => { ty::Dynamic(trait_ty.fold_with(folder), region.fold_with(folder)) } ty::Tuple(ts) => ty::Tuple(ts.fold_with(folder)), - ty::FnDef(def_id, substs) => ty::FnDef(*def_id, substs.fold_with(folder)), + ty::FnDef(def_id, substs) => ty::FnDef(def_id, substs.fold_with(folder)), ty::FnPtr(f) => ty::FnPtr(f.fold_with(folder)), - ty::Ref(ref r, ty, mutbl) => ty::Ref(r.fold_with(folder), ty.fold_with(folder), *mutbl), + ty::Ref(r, ty, mutbl) => ty::Ref(r.fold_with(folder), ty.fold_with(folder), mutbl), ty::Generator(did, substs, movability) => { - ty::Generator(*did, substs.fold_with(folder), *movability) + ty::Generator(did, substs.fold_with(folder), movability) } ty::GeneratorWitness(types) => ty::GeneratorWitness(types.fold_with(folder)), - ty::Closure(did, substs) => ty::Closure(*did, substs.fold_with(folder)), - ty::Projection(ref data) => ty::Projection(data.fold_with(folder)), - ty::Opaque(did, substs) => ty::Opaque(*did, substs.fold_with(folder)), + ty::Closure(did, substs) => ty::Closure(did, substs.fold_with(folder)), + ty::Projection(data) => ty::Projection(data.fold_with(folder)), + ty::Opaque(did, substs) => ty::Opaque(did, substs.fold_with(folder)), ty::Bool | ty::Char @@ -964,11 +965,11 @@ impl<'tcx> TypeFoldable<'tcx> for Ty<'tcx> { if *self.kind() == kind { self } else { folder.tcx().mk_ty(kind) } } - fn fold_with>(&self, folder: &mut F) -> Self { - folder.fold_ty(*self) + fn fold_with>(self, folder: &mut F) -> Self { + folder.fold_ty(self) } - fn super_visit_with>(&self, visitor: &mut V) -> ControlFlow<()> { + fn super_visit_with>(&self, visitor: &mut V) -> ControlFlow { match self.kind() { ty::RawPtr(ref tm) => tm.visit_with(visitor), ty::Array(typ, sz) => { @@ -1010,40 +1011,40 @@ impl<'tcx> TypeFoldable<'tcx> for Ty<'tcx> { } } - fn visit_with>(&self, visitor: &mut V) -> ControlFlow<()> { + fn visit_with>(&self, visitor: &mut V) -> ControlFlow { visitor.visit_ty(self) } } impl<'tcx> TypeFoldable<'tcx> for ty::Region<'tcx> { - fn super_fold_with>(&self, _folder: &mut F) -> Self { - *self + fn super_fold_with>(self, _folder: &mut F) -> Self { + self } - fn fold_with>(&self, folder: &mut F) -> Self { - folder.fold_region(*self) + fn fold_with>(self, folder: &mut F) -> Self { + folder.fold_region(self) } - fn super_visit_with>(&self, _visitor: &mut V) -> ControlFlow<()> { + fn super_visit_with>(&self, _visitor: &mut V) -> ControlFlow { ControlFlow::CONTINUE } - fn visit_with>(&self, visitor: &mut V) -> ControlFlow<()> { + fn visit_with>(&self, visitor: &mut V) -> ControlFlow { visitor.visit_region(*self) } } impl<'tcx> TypeFoldable<'tcx> for ty::Predicate<'tcx> { - fn super_fold_with>(&self, folder: &mut F) -> Self { - let new = ty::PredicateKind::super_fold_with(&self.inner.kind, folder); - folder.tcx().reuse_or_mk_predicate(*self, new) + fn super_fold_with>(self, folder: &mut F) -> Self { + let new = ty::PredicateKind::super_fold_with(self.inner.kind, folder); + folder.tcx().reuse_or_mk_predicate(self, new) } - fn super_visit_with>(&self, visitor: &mut V) -> ControlFlow<()> { + fn super_visit_with>(&self, visitor: &mut V) -> ControlFlow { ty::PredicateKind::super_visit_with(&self.inner.kind, visitor) } - fn visit_with>(&self, visitor: &mut V) -> ControlFlow<()> { + fn visit_with>(&self, visitor: &mut V) -> ControlFlow { visitor.visit_predicate(*self) } @@ -1057,53 +1058,53 @@ impl<'tcx> TypeFoldable<'tcx> for ty::Predicate<'tcx> { } impl<'tcx> TypeFoldable<'tcx> for &'tcx ty::List> { - fn super_fold_with>(&self, folder: &mut F) -> Self { - fold_list(*self, folder, |tcx, v| tcx.intern_predicates(v)) + fn super_fold_with>(self, folder: &mut F) -> Self { + ty::util::fold_list(self, folder, |tcx, v| tcx.intern_predicates(v)) } - fn super_visit_with>(&self, visitor: &mut V) -> ControlFlow<()> { + fn super_visit_with>(&self, visitor: &mut V) -> ControlFlow { self.iter().try_for_each(|p| p.visit_with(visitor)) } } impl<'tcx, T: TypeFoldable<'tcx>, I: Idx> TypeFoldable<'tcx> for IndexVec { - fn super_fold_with>(&self, folder: &mut F) -> Self { - self.iter().map(|x| x.fold_with(folder)).collect() + fn super_fold_with>(self, folder: &mut F) -> Self { + self.map_id(|x| x.fold_with(folder)) } - fn super_visit_with>(&self, visitor: &mut V) -> ControlFlow<()> { + fn super_visit_with>(&self, visitor: &mut V) -> ControlFlow { self.iter().try_for_each(|t| t.visit_with(visitor)) } } impl<'tcx> TypeFoldable<'tcx> for &'tcx ty::Const<'tcx> { - fn super_fold_with>(&self, folder: &mut F) -> Self { + fn super_fold_with>(self, folder: &mut F) -> Self { let ty = self.ty.fold_with(folder); let val = self.val.fold_with(folder); if ty != self.ty || val != self.val { folder.tcx().mk_const(ty::Const { ty, val }) } else { - *self + self } } - fn fold_with>(&self, folder: &mut F) -> Self { - folder.fold_const(*self) + fn fold_with>(self, folder: &mut F) -> Self { + folder.fold_const(self) } - fn super_visit_with>(&self, visitor: &mut V) -> ControlFlow<()> { + fn super_visit_with>(&self, visitor: &mut V) -> ControlFlow { self.ty.visit_with(visitor)?; self.val.visit_with(visitor) } - fn visit_with>(&self, visitor: &mut V) -> ControlFlow<()> { + fn visit_with>(&self, visitor: &mut V) -> ControlFlow { visitor.visit_const(self) } } impl<'tcx> TypeFoldable<'tcx> for ty::ConstKind<'tcx> { - fn super_fold_with>(&self, folder: &mut F) -> Self { - match *self { + fn super_fold_with>(self, folder: &mut F) -> Self { + match self { ty::ConstKind::Infer(ic) => ty::ConstKind::Infer(ic.fold_with(folder)), ty::ConstKind::Param(p) => ty::ConstKind::Param(p.fold_with(folder)), ty::ConstKind::Unevaluated(did, substs, promoted) => { @@ -1112,11 +1113,11 @@ impl<'tcx> TypeFoldable<'tcx> for ty::ConstKind<'tcx> { ty::ConstKind::Value(_) | ty::ConstKind::Bound(..) | ty::ConstKind::Placeholder(..) - | ty::ConstKind::Error(_) => *self, + | ty::ConstKind::Error(_) => self, } } - fn super_visit_with>(&self, visitor: &mut V) -> ControlFlow<()> { + fn super_visit_with>(&self, visitor: &mut V) -> ControlFlow { match *self { ty::ConstKind::Infer(ic) => ic.visit_with(visitor), ty::ConstKind::Param(p) => p.visit_with(visitor), @@ -1130,42 +1131,11 @@ impl<'tcx> TypeFoldable<'tcx> for ty::ConstKind<'tcx> { } impl<'tcx> TypeFoldable<'tcx> for InferConst<'tcx> { - fn super_fold_with>(&self, _folder: &mut F) -> Self { - *self + fn super_fold_with>(self, _folder: &mut F) -> Self { + self } - fn super_visit_with>(&self, _visitor: &mut V) -> ControlFlow<()> { + fn super_visit_with>(&self, _visitor: &mut V) -> ControlFlow { ControlFlow::CONTINUE } } - -// Does the equivalent of -// ``` -// let v = self.iter().map(|p| p.fold_with(folder)).collect::>(); -// folder.tcx().intern_*(&v) -// ``` -fn fold_list<'tcx, F, T>( - list: &'tcx ty::List, - folder: &mut F, - intern: impl FnOnce(TyCtxt<'tcx>, &[T]) -> &'tcx ty::List, -) -> &'tcx ty::List -where - F: TypeFolder<'tcx>, - T: TypeFoldable<'tcx> + PartialEq + Copy, -{ - let mut iter = list.iter(); - // Look for the first element that changed - if let Some((i, new_t)) = iter.by_ref().enumerate().find_map(|(i, t)| { - let new_t = t.fold_with(folder); - if new_t == t { None } else { Some((i, new_t)) } - }) { - // An element changed, prepare to intern the resulting list - let mut new_list = SmallVec::<[_; 8]>::with_capacity(list.len()); - new_list.extend_from_slice(&list[..i]); - new_list.push(new_t); - new_list.extend(iter.map(|t| t.fold_with(folder))); - intern(folder.tcx(), &new_list) - } else { - list - } -} diff --git a/compiler/rustc_middle/src/ty/sty.rs b/compiler/rustc_middle/src/ty/sty.rs index 384d08f8348..4bf16436855 100644 --- a/compiler/rustc_middle/src/ty/sty.rs +++ b/compiler/rustc_middle/src/ty/sty.rs @@ -1001,7 +1001,7 @@ impl Binder { T: TypeFoldable<'tcx>, { if value.has_escaping_bound_vars() { - Binder::bind(super::fold::shift_vars(tcx, &value, 1)) + Binder::bind(super::fold::shift_vars(tcx, value, 1)) } else { Binder::dummy(value) } diff --git a/compiler/rustc_middle/src/ty/subst.rs b/compiler/rustc_middle/src/ty/subst.rs index 07f775cf8b1..5d1b976ae97 100644 --- a/compiler/rustc_middle/src/ty/subst.rs +++ b/compiler/rustc_middle/src/ty/subst.rs @@ -152,7 +152,7 @@ impl<'a, 'tcx> Lift<'tcx> for GenericArg<'a> { } impl<'tcx> TypeFoldable<'tcx> for GenericArg<'tcx> { - fn super_fold_with>(&self, folder: &mut F) -> Self { + fn super_fold_with>(self, folder: &mut F) -> Self { match self.unpack() { GenericArgKind::Lifetime(lt) => lt.fold_with(folder).into(), GenericArgKind::Type(ty) => ty.fold_with(folder).into(), @@ -160,7 +160,7 @@ impl<'tcx> TypeFoldable<'tcx> for GenericArg<'tcx> { } } - fn super_visit_with>(&self, visitor: &mut V) -> ControlFlow<()> { + fn super_visit_with>(&self, visitor: &mut V) -> ControlFlow { match self.unpack() { GenericArgKind::Lifetime(lt) => lt.visit_with(visitor), GenericArgKind::Type(ty) => ty.visit_with(visitor), @@ -363,7 +363,7 @@ impl<'a, 'tcx> InternalSubsts<'tcx> { } impl<'tcx> TypeFoldable<'tcx> for SubstsRef<'tcx> { - fn super_fold_with>(&self, folder: &mut F) -> Self { + fn super_fold_with>(self, folder: &mut F) -> Self { // This code is hot enough that it's worth specializing for the most // common length lists, to avoid the overhead of `SmallVec` creation. // The match arms are in order of frequency. The 1, 2, and 0 cases are @@ -392,7 +392,7 @@ impl<'tcx> TypeFoldable<'tcx> for SubstsRef<'tcx> { } } - fn super_visit_with>(&self, visitor: &mut V) -> ControlFlow<()> { + fn super_visit_with>(&self, visitor: &mut V) -> ControlFlow { self.iter().try_for_each(|t| t.visit_with(visitor)) } } @@ -405,12 +405,12 @@ impl<'tcx> TypeFoldable<'tcx> for SubstsRef<'tcx> { // there is more information available (for better errors). pub trait Subst<'tcx>: Sized { - fn subst(&self, tcx: TyCtxt<'tcx>, substs: &[GenericArg<'tcx>]) -> Self { + fn subst(self, tcx: TyCtxt<'tcx>, substs: &[GenericArg<'tcx>]) -> Self { self.subst_spanned(tcx, substs, None) } fn subst_spanned( - &self, + self, tcx: TyCtxt<'tcx>, substs: &[GenericArg<'tcx>], span: Option, @@ -419,13 +419,13 @@ pub trait Subst<'tcx>: Sized { impl<'tcx, T: TypeFoldable<'tcx>> Subst<'tcx> for T { fn subst_spanned( - &self, + self, tcx: TyCtxt<'tcx>, substs: &[GenericArg<'tcx>], span: Option, ) -> T { let mut folder = SubstFolder { tcx, substs, span, binders_passed: 0 }; - (*self).fold_with(&mut folder) + self.fold_with(&mut folder) } } @@ -448,7 +448,7 @@ impl<'a, 'tcx> TypeFolder<'tcx> for SubstFolder<'a, 'tcx> { self.tcx } - fn fold_binder>(&mut self, t: &ty::Binder) -> ty::Binder { + fn fold_binder>(&mut self, t: ty::Binder) -> ty::Binder { self.binders_passed += 1; let t = t.super_fold_with(self); self.binders_passed -= 1; @@ -634,7 +634,7 @@ impl<'a, 'tcx> SubstFolder<'a, 'tcx> { return val; } - let result = ty::fold::shift_vars(self.tcx(), &val, self.binders_passed); + let result = ty::fold::shift_vars(self.tcx(), val, self.binders_passed); debug!("shift_vars: shifted result = {:?}", result); result diff --git a/compiler/rustc_middle/src/ty/util.rs b/compiler/rustc_middle/src/ty/util.rs index 5f117e19eca..e23c3f51967 100644 --- a/compiler/rustc_middle/src/ty/util.rs +++ b/compiler/rustc_middle/src/ty/util.rs @@ -160,7 +160,7 @@ impl<'tcx> TyCtxt<'tcx> { // We want the type_id be independent of the types free regions, so we // erase them. The erase_regions() call will also anonymize bound // regions, which is desirable too. - let ty = self.erase_regions(&ty); + let ty = self.erase_regions(ty); hcx.while_hashing_spans(false, |hcx| { hcx.with_node_id_hashing_mode(NodeIdHashingMode::HashDefPath, |hcx| { @@ -1130,6 +1130,37 @@ pub fn needs_drop_components( } } +// Does the equivalent of +// ``` +// let v = self.iter().map(|p| p.fold_with(folder)).collect::>(); +// folder.tcx().intern_*(&v) +// ``` +pub fn fold_list<'tcx, F, T>( + list: &'tcx ty::List, + folder: &mut F, + intern: impl FnOnce(TyCtxt<'tcx>, &[T]) -> &'tcx ty::List, +) -> &'tcx ty::List +where + F: TypeFolder<'tcx>, + T: TypeFoldable<'tcx> + PartialEq + Copy, +{ + let mut iter = list.iter(); + // Look for the first element that changed + if let Some((i, new_t)) = iter.by_ref().enumerate().find_map(|(i, t)| { + let new_t = t.fold_with(folder); + if new_t == t { None } else { Some((i, new_t)) } + }) { + // An element changed, prepare to intern the resulting list + let mut new_list = SmallVec::<[_; 8]>::with_capacity(list.len()); + new_list.extend_from_slice(&list[..i]); + new_list.push(new_t); + new_list.extend(iter.map(|t| t.fold_with(folder))); + intern(folder.tcx(), &new_list) + } else { + list + } +} + #[derive(Copy, Clone, Debug, HashStable, TyEncodable, TyDecodable)] pub struct AlwaysRequiresDrop; diff --git a/compiler/rustc_mir/Cargo.toml b/compiler/rustc_mir/Cargo.toml index 487668cfa11..9bfd1da0391 100644 --- a/compiler/rustc_mir/Cargo.toml +++ b/compiler/rustc_mir/Cargo.toml @@ -31,3 +31,6 @@ rustc_ast = { path = "../rustc_ast" } rustc_span = { path = "../rustc_span" } rustc_apfloat = { path = "../rustc_apfloat" } smallvec = { version = "1.0", features = ["union", "may_dangle"] } + +[dev-dependencies] +coverage_test_macros = { path = "src/transform/coverage/test_macros" } diff --git a/compiler/rustc_mir/src/borrow_check/diagnostics/mod.rs b/compiler/rustc_mir/src/borrow_check/diagnostics/mod.rs index 4256f6e39d5..41f3edaa413 100644 --- a/compiler/rustc_mir/src/borrow_check/diagnostics/mod.rs +++ b/compiler/rustc_mir/src/borrow_check/diagnostics/mod.rs @@ -383,16 +383,14 @@ impl<'cx, 'tcx> MirBorrowckCtxt<'cx, 'tcx> { self.describe_field_from_ty(&ty, field, variant_index) } ty::Closure(def_id, _) | ty::Generator(def_id, _, _) => { - // `tcx.upvars_mentioned(def_id)` returns an `Option`, which is `None` in case - // the closure comes from another crate. But in that case we wouldn't - // be borrowck'ing it, so we can just unwrap: - let (&var_id, _) = self - .infcx - .tcx - .upvars_mentioned(def_id) - .unwrap() - .get_index(field.index()) - .unwrap(); + // We won't be borrowck'ing here if the closure came from another crate, + // so it's safe to call `expect_local`. + // + // We know the field exists so it's safe to call operator[] and `unwrap` here. + let (&var_id, _) = + self.infcx.tcx.typeck(def_id.expect_local()).closure_captures[&def_id] + .get_index(field.index()) + .unwrap(); self.infcx.tcx.hir().name(var_id).to_string() } @@ -967,9 +965,12 @@ impl<'cx, 'tcx> MirBorrowckCtxt<'cx, 'tcx> { let expr = &self.infcx.tcx.hir().expect_expr(hir_id).kind; debug!("closure_span: hir_id={:?} expr={:?}", hir_id, expr); if let hir::ExprKind::Closure(.., body_id, args_span, _) = expr { - for ((upvar_hir_id, upvar), place) in - self.infcx.tcx.upvars_mentioned(def_id)?.iter().zip(places) + for (upvar_hir_id, place) in + self.infcx.tcx.typeck(def_id.expect_local()).closure_captures[&def_id] + .keys() + .zip(places) { + let span = self.infcx.tcx.upvars_mentioned(local_did)?[upvar_hir_id].span; match place { Operand::Copy(place) | Operand::Move(place) if target_place == place.as_ref() => @@ -991,7 +992,7 @@ impl<'cx, 'tcx> MirBorrowckCtxt<'cx, 'tcx> { let usage_span = match self.infcx.tcx.typeck(local_did).upvar_capture(upvar_id) { ty::UpvarCapture::ByValue(Some(span)) => span, - _ => upvar.span, + _ => span, }; return Some((*args_span, generator_kind, usage_span)); } diff --git a/compiler/rustc_mir/src/borrow_check/diagnostics/region_name.rs b/compiler/rustc_mir/src/borrow_check/diagnostics/region_name.rs index 2e5a231fef0..2a90fb042dd 100644 --- a/compiler/rustc_mir/src/borrow_check/diagnostics/region_name.rs +++ b/compiler/rustc_mir/src/borrow_check/diagnostics/region_name.rs @@ -6,8 +6,8 @@ use rustc_hir::def::{DefKind, Res}; use rustc_middle::ty::print::RegionHighlightMode; use rustc_middle::ty::subst::{GenericArgKind, SubstsRef}; use rustc_middle::ty::{self, RegionVid, Ty}; -use rustc_span::symbol::kw; -use rustc_span::{symbol::Symbol, Span, DUMMY_SP}; +use rustc_span::symbol::{kw, sym, Ident, Symbol}; +use rustc_span::{Span, DUMMY_SP}; use crate::borrow_check::{nll::ToRegionVid, universal_regions::DefiningTy, MirBorrowckCtxt}; @@ -39,7 +39,7 @@ crate enum RegionNameSource { /// The region corresponding to a closure upvar. AnonRegionFromUpvar(Span, String), /// The region corresponding to the return type of a closure. - AnonRegionFromOutput(Span, String, String), + AnonRegionFromOutput(RegionNameHighlight, String), /// The region from a type yielded by a generator. AnonRegionFromYieldTy(Span, String), /// An anonymous region from an async fn. @@ -57,6 +57,10 @@ crate enum RegionNameHighlight { /// The anonymous region corresponds to a region where the type annotation is completely missing /// from the code, e.g. in a closure arguments `|x| { ... }`, where `x` is a reference. CannotMatchHirTy(Span, String), + /// The anonymous region corresponds to a region where the type annotation is completely missing + /// from the code, and *even if* we print out the full name of the type, the region name won't + /// be included. This currently occurs for opaque types like `impl Future`. + Occluded(Span, String), } impl RegionName { @@ -81,13 +85,14 @@ impl RegionName { | RegionNameSource::NamedFreeRegion(span) | RegionNameSource::SynthesizedFreeEnvRegion(span, _) | RegionNameSource::AnonRegionFromUpvar(span, _) - | RegionNameSource::AnonRegionFromOutput(span, _, _) | RegionNameSource::AnonRegionFromYieldTy(span, _) | RegionNameSource::AnonRegionFromAsyncFn(span) => Some(span), - RegionNameSource::AnonRegionFromArgument(ref highlight) => match *highlight { + RegionNameSource::AnonRegionFromArgument(ref highlight) + | RegionNameSource::AnonRegionFromOutput(ref highlight, _) => match *highlight { RegionNameHighlight::MatchedHirTy(span) | RegionNameHighlight::MatchedAdtAndSegment(span) - | RegionNameHighlight::CannotMatchHirTy(span, _) => Some(span), + | RegionNameHighlight::CannotMatchHirTy(span, _) + | RegionNameHighlight::Occluded(span, _) => Some(span), }, } } @@ -112,6 +117,7 @@ impl RegionName { diag.span_label(*span, format!("has type `{}`", type_name)); } RegionNameSource::AnonRegionFromArgument(RegionNameHighlight::MatchedHirTy(span)) + | RegionNameSource::AnonRegionFromOutput(RegionNameHighlight::MatchedHirTy(span), _) | RegionNameSource::AnonRegionFromAsyncFn(span) => { diag.span_label( *span, @@ -120,16 +126,44 @@ impl RegionName { } RegionNameSource::AnonRegionFromArgument( RegionNameHighlight::MatchedAdtAndSegment(span), + ) + | RegionNameSource::AnonRegionFromOutput( + RegionNameHighlight::MatchedAdtAndSegment(span), + _, ) => { diag.span_label(*span, format!("let's call this `{}`", self)); } + RegionNameSource::AnonRegionFromArgument(RegionNameHighlight::Occluded( + span, + type_name, + )) => { + diag.span_label( + *span, + format!("lifetime `{}` appears in the type {}", self, type_name), + ); + } + RegionNameSource::AnonRegionFromOutput( + RegionNameHighlight::Occluded(span, type_name), + mir_description, + ) => { + diag.span_label( + *span, + format!( + "return type{} `{}` contains a lifetime `{}`", + mir_description, type_name, self + ), + ); + } RegionNameSource::AnonRegionFromUpvar(span, upvar_name) => { diag.span_label( *span, format!("lifetime `{}` appears in the type of `{}`", self, upvar_name), ); } - RegionNameSource::AnonRegionFromOutput(span, mir_description, type_name) => { + RegionNameSource::AnonRegionFromOutput( + RegionNameHighlight::CannotMatchHirTy(span, type_name), + mir_description, + ) => { diag.span_label(*span, format!("return type{} is {}", mir_description, type_name)); } RegionNameSource::AnonRegionFromYieldTy(span, type_name) => { @@ -349,19 +383,21 @@ impl<'tcx> MirBorrowckCtxt<'_, 'tcx> { argument_index, ); - self.get_argument_hir_ty_for_highlighting(argument_index) + let highlight = self + .get_argument_hir_ty_for_highlighting(argument_index) .and_then(|arg_hir_ty| self.highlight_if_we_can_match_hir_ty(fr, arg_ty, arg_hir_ty)) - .or_else(|| { + .unwrap_or_else(|| { // `highlight_if_we_cannot_match_hir_ty` needs to know the number we will give to // the anonymous region. If it succeeds, the `synthesize_region_name` call below // will increment the counter, "reserving" the number we just used. let counter = *self.next_region_name.try_borrow().unwrap(); self.highlight_if_we_cannot_match_hir_ty(fr, arg_ty, span, counter) - }) - .map(|highlight| RegionName { - name: self.synthesize_region_name(), - source: RegionNameSource::AnonRegionFromArgument(highlight), - }) + }); + + Some(RegionName { + name: self.synthesize_region_name(), + source: RegionNameSource::AnonRegionFromArgument(highlight), + }) } fn get_argument_hir_ty_for_highlighting( @@ -399,7 +435,7 @@ impl<'tcx> MirBorrowckCtxt<'_, 'tcx> { ty: Ty<'tcx>, span: Span, counter: usize, - ) -> Option { + ) -> RegionNameHighlight { let mut highlight = RegionHighlightMode::default(); highlight.highlighting_region_vid(needle_fr, counter); let type_name = @@ -411,9 +447,9 @@ impl<'tcx> MirBorrowckCtxt<'_, 'tcx> { ); if type_name.find(&format!("'{}", counter)).is_some() { // Only add a label if we can confirm that a region was labelled. - Some(RegionNameHighlight::CannotMatchHirTy(span, type_name)) + RegionNameHighlight::CannotMatchHirTy(span, type_name) } else { - None + RegionNameHighlight::Occluded(span, type_name) } } @@ -643,6 +679,7 @@ impl<'tcx> MirBorrowckCtxt<'_, 'tcx> { /// or be early bound (named, not in argument). fn give_name_if_anonymous_region_appears_in_output(&self, fr: RegionVid) -> Option { let tcx = self.infcx.tcx; + let hir = tcx.hir(); let return_ty = self.regioncx.universal_regions().unnormalized_output_ty; debug!("give_name_if_anonymous_region_appears_in_output: return_ty = {:?}", return_ty); @@ -650,42 +687,123 @@ impl<'tcx> MirBorrowckCtxt<'_, 'tcx> { return None; } - let mut highlight = RegionHighlightMode::default(); - highlight.highlighting_region_vid(fr, *self.next_region_name.try_borrow().unwrap()); - let type_name = - self.infcx.extract_inference_diagnostics_data(return_ty.into(), Some(highlight)).name; + let mir_hir_id = self.mir_hir_id(); - let (return_span, mir_description) = match tcx.hir().get(self.mir_hir_id()) { + let (return_span, mir_description, hir_ty) = match hir.get(mir_hir_id) { hir::Node::Expr(hir::Expr { - kind: hir::ExprKind::Closure(_, return_ty, _, span, gen_move), - .. - }) => ( - match return_ty.output { - hir::FnRetTy::DefaultReturn(_) => tcx.sess.source_map().end_point(*span), - hir::FnRetTy::Return(_) => return_ty.output.span(), - }, - if gen_move.is_some() { " of generator" } else { " of closure" }, - ), - hir::Node::ImplItem(hir::ImplItem { - kind: hir::ImplItemKind::Fn(method_sig, _), + kind: hir::ExprKind::Closure(_, return_ty, body_id, span, _), .. - }) => (method_sig.decl.output.span(), ""), - _ => (self.body.span, ""), + }) => { + let (mut span, mut hir_ty) = match return_ty.output { + hir::FnRetTy::DefaultReturn(_) => { + (tcx.sess.source_map().end_point(*span), None) + } + hir::FnRetTy::Return(hir_ty) => (return_ty.output.span(), Some(hir_ty)), + }; + let mir_description = match hir.body(*body_id).generator_kind { + Some(hir::GeneratorKind::Async(gen)) => match gen { + hir::AsyncGeneratorKind::Block => " of async block", + hir::AsyncGeneratorKind::Closure => " of async closure", + hir::AsyncGeneratorKind::Fn => { + let parent_item = hir.get(hir.get_parent_item(mir_hir_id)); + let output = &parent_item + .fn_decl() + .expect("generator lowered from async fn should be in fn") + .output; + span = output.span(); + if let hir::FnRetTy::Return(ret) = output { + hir_ty = Some(self.get_future_inner_return_ty(*ret)); + } + " of async function" + } + }, + Some(hir::GeneratorKind::Gen) => " of generator", + None => " of closure", + }; + (span, mir_description, hir_ty) + } + node => match node.fn_decl() { + Some(fn_decl) => { + let hir_ty = match fn_decl.output { + hir::FnRetTy::DefaultReturn(_) => None, + hir::FnRetTy::Return(ty) => Some(ty), + }; + (fn_decl.output.span(), "", hir_ty) + } + None => (self.body.span, "", None), + }, }; + let highlight = hir_ty + .and_then(|hir_ty| self.highlight_if_we_can_match_hir_ty(fr, return_ty, hir_ty)) + .unwrap_or_else(|| { + // `highlight_if_we_cannot_match_hir_ty` needs to know the number we will give to + // the anonymous region. If it succeeds, the `synthesize_region_name` call below + // will increment the counter, "reserving" the number we just used. + let counter = *self.next_region_name.try_borrow().unwrap(); + self.highlight_if_we_cannot_match_hir_ty(fr, return_ty, return_span, counter) + }); + Some(RegionName { - // This counter value will already have been used, so this function will increment it - // so the next value will be used next and return the region name that would have been - // used. name: self.synthesize_region_name(), - source: RegionNameSource::AnonRegionFromOutput( - return_span, - mir_description.to_string(), - type_name, - ), + source: RegionNameSource::AnonRegionFromOutput(highlight, mir_description.to_string()), }) } + /// From the [`hir::Ty`] of an async function's lowered return type, + /// retrieve the `hir::Ty` representing the type the user originally wrote. + /// + /// e.g. given the function: + /// + /// ``` + /// async fn foo() -> i32 {} + /// ``` + /// + /// this function, given the lowered return type of `foo`, an [`OpaqueDef`] that implements `Future`, + /// returns the `i32`. + /// + /// [`OpaqueDef`]: hir::TyKind::OpaqueDef + fn get_future_inner_return_ty(&self, hir_ty: &'tcx hir::Ty<'tcx>) -> &'tcx hir::Ty<'tcx> { + let hir = self.infcx.tcx.hir(); + + if let hir::TyKind::OpaqueDef(id, _) = hir_ty.kind { + let opaque_ty = hir.item(id.id); + if let hir::ItemKind::OpaqueTy(hir::OpaqueTy { + bounds: + [hir::GenericBound::LangItemTrait( + hir::LangItem::Future, + _, + _, + hir::GenericArgs { + bindings: + [hir::TypeBinding { + ident: Ident { name: sym::Output, .. }, + kind: hir::TypeBindingKind::Equality { ty }, + .. + }], + .. + }, + )], + .. + }) = opaque_ty.kind + { + ty + } else { + span_bug!( + hir_ty.span, + "bounds from lowered return type of async fn did not match expected format: {:?}", + opaque_ty + ); + } + } else { + span_bug!( + hir_ty.span, + "lowered return type of async fn is not OpaqueDef: {:?}", + hir_ty + ); + } + } + fn give_name_if_anonymous_region_appears_in_yield_ty( &self, fr: RegionVid, diff --git a/compiler/rustc_mir/src/borrow_check/region_infer/mod.rs b/compiler/rustc_mir/src/borrow_check/region_infer/mod.rs index ac8ab71a1dc..3586be2804d 100644 --- a/compiler/rustc_mir/src/borrow_check/region_infer/mod.rs +++ b/compiler/rustc_mir/src/borrow_check/region_infer/mod.rs @@ -582,7 +582,7 @@ impl<'tcx> RegionInferenceContext<'tcx> { self.check_member_constraints(infcx, &mut errors_buffer); } - let outlives_requirements = outlives_requirements.unwrap_or(vec![]); + let outlives_requirements = outlives_requirements.unwrap_or_default(); if outlives_requirements.is_empty() { (None, errors_buffer) @@ -876,7 +876,7 @@ impl<'tcx> RegionInferenceContext<'tcx> { } // Type-test failed. Report the error. - let erased_generic_kind = infcx.tcx.erase_regions(&type_test.generic_kind); + let erased_generic_kind = infcx.tcx.erase_regions(type_test.generic_kind); // Skip duplicate-ish errors. if deduplicate_errors.insert(( @@ -1006,7 +1006,7 @@ impl<'tcx> RegionInferenceContext<'tcx> { debug!("try_promote_type_test_subject(ty = {:?})", ty); - let ty = tcx.fold_regions(&ty, &mut false, |r, _depth| { + let ty = tcx.fold_regions(ty, &mut false, |r, _depth| { let region_vid = self.to_region_vid(r); // The challenge if this. We have some region variable `r` @@ -1248,7 +1248,7 @@ impl<'tcx> RegionInferenceContext<'tcx> { where T: TypeFoldable<'tcx>, { - tcx.fold_regions(&value, &mut false, |r, _db| { + tcx.fold_regions(value, &mut false, |r, _db| { let vid = self.to_region_vid(r); let scc = self.constraint_sccs.scc(vid); let repr = self.scc_representatives[scc]; diff --git a/compiler/rustc_mir/src/borrow_check/region_infer/opaque_types.rs b/compiler/rustc_mir/src/borrow_check/region_infer/opaque_types.rs index 325dca8c8ca..f7c902355cb 100644 --- a/compiler/rustc_mir/src/borrow_check/region_infer/opaque_types.rs +++ b/compiler/rustc_mir/src/borrow_check/region_infer/opaque_types.rs @@ -63,7 +63,7 @@ impl<'tcx> RegionInferenceContext<'tcx> { let mut subst_regions = vec![self.universal_regions.fr_static]; let universal_substs = - infcx.tcx.fold_regions(&substs, &mut false, |region, _| match *region { + infcx.tcx.fold_regions(substs, &mut false, |region, _| match *region { ty::ReVar(vid) => { subst_regions.push(vid); self.definitions[vid].external_name.unwrap_or_else(|| { @@ -94,7 +94,7 @@ impl<'tcx> RegionInferenceContext<'tcx> { subst_regions.dedup(); let universal_concrete_type = - infcx.tcx.fold_regions(&concrete_type, &mut false, |region, _| match *region { + infcx.tcx.fold_regions(concrete_type, &mut false, |region, _| match *region { ty::ReVar(vid) => subst_regions .iter() .find(|ur_vid| self.eval_equal(vid, **ur_vid)) @@ -139,7 +139,7 @@ impl<'tcx> RegionInferenceContext<'tcx> { where T: TypeFoldable<'tcx>, { - tcx.fold_regions(&ty, &mut false, |region, _| match *region { + tcx.fold_regions(ty, &mut false, |region, _| match *region { ty::ReVar(vid) => { // Find something that we can name let upper_bound = self.approx_universal_upper_bound(vid); diff --git a/compiler/rustc_mir/src/borrow_check/renumber.rs b/compiler/rustc_mir/src/borrow_check/renumber.rs index 5df033b48c1..e563e37adc2 100644 --- a/compiler/rustc_mir/src/borrow_check/renumber.rs +++ b/compiler/rustc_mir/src/borrow_check/renumber.rs @@ -26,7 +26,7 @@ pub fn renumber_mir<'tcx>( /// Replaces all regions appearing in `value` with fresh inference /// variables. -pub fn renumber_regions<'tcx, T>(infcx: &InferCtxt<'_, 'tcx>, value: &T) -> T +pub fn renumber_regions<'tcx, T>(infcx: &InferCtxt<'_, 'tcx>, value: T) -> T where T: TypeFoldable<'tcx>, { @@ -43,7 +43,7 @@ struct NLLVisitor<'a, 'tcx> { } impl<'a, 'tcx> NLLVisitor<'a, 'tcx> { - fn renumber_regions(&mut self, value: &T) -> T + fn renumber_regions(&mut self, value: T) -> T where T: TypeFoldable<'tcx>, { @@ -70,7 +70,7 @@ impl<'a, 'tcx> MutVisitor<'tcx> for NLLVisitor<'a, 'tcx> { _: Location, ) -> Option> { if let PlaceElem::Field(field, ty) = elem { - let new_ty = self.renumber_regions(&ty); + let new_ty = self.renumber_regions(ty); if new_ty != ty { return Some(PlaceElem::Field(field, new_ty)); @@ -83,7 +83,7 @@ impl<'a, 'tcx> MutVisitor<'tcx> for NLLVisitor<'a, 'tcx> { fn visit_substs(&mut self, substs: &mut SubstsRef<'tcx>, location: Location) { debug!("visit_substs(substs={:?}, location={:?})", substs, location); - *substs = self.renumber_regions(&{ *substs }); + *substs = self.renumber_regions(*substs); debug!("visit_substs: substs={:?}", substs); } diff --git a/compiler/rustc_mir/src/borrow_check/type_check/input_output.rs b/compiler/rustc_mir/src/borrow_check/type_check/input_output.rs index 444f9fe8d0a..b7d22fab3dd 100644 --- a/compiler/rustc_mir/src/borrow_check/type_check/input_output.rs +++ b/compiler/rustc_mir/src/borrow_check/type_check/input_output.rs @@ -59,7 +59,7 @@ impl<'a, 'tcx> TypeChecker<'a, 'tcx> { .replace_bound_vars_with_fresh_vars( body.span, LateBoundRegionConversionTime::FnCall, - &poly_sig, + poly_sig, ) .0, ) diff --git a/compiler/rustc_mir/src/borrow_check/type_check/mod.rs b/compiler/rustc_mir/src/borrow_check/type_check/mod.rs index 409399094e8..a5c45452dec 100644 --- a/compiler/rustc_mir/src/borrow_check/type_check/mod.rs +++ b/compiler/rustc_mir/src/borrow_check/type_check/mod.rs @@ -784,7 +784,7 @@ impl<'a, 'b, 'tcx> TypeVerifier<'a, 'b, 'tcx> { }; if let Some(field) = variant.fields.get(field.index()) { - Ok(self.cx.normalize(&field.ty(tcx, substs), location)) + Ok(self.cx.normalize(field.ty(tcx, substs), location)) } else { Err(FieldAccessError::OutOfRange { field_count: variant.fields.len() }) } @@ -1245,7 +1245,7 @@ impl<'a, 'tcx> TypeChecker<'a, 'tcx> { anon_owner_def_id, dummy_body_id, param_env, - &anon_ty, + anon_ty, locations.span(body), )); debug!( @@ -1271,7 +1271,7 @@ impl<'a, 'tcx> TypeChecker<'a, 'tcx> { ); for (&opaque_def_id, opaque_decl) in &opaque_type_map { - let resolved_ty = infcx.resolve_vars_if_possible(&opaque_decl.concrete_ty); + let resolved_ty = infcx.resolve_vars_if_possible(opaque_decl.concrete_ty); let concrete_is_opaque = if let ty::Opaque(def_id, _) = resolved_ty.kind() { *def_id == opaque_def_id } else { @@ -1296,7 +1296,7 @@ impl<'a, 'tcx> TypeChecker<'a, 'tcx> { let subst_opaque_defn_ty = opaque_defn_ty.concrete_type.subst(tcx, opaque_decl.substs); let renumbered_opaque_defn_ty = - renumber::renumber_regions(infcx, &subst_opaque_defn_ty); + renumber::renumber_regions(infcx, subst_opaque_defn_ty); debug!( "eq_opaque_type_and_type: concrete_ty={:?}={:?} opaque_defn_ty={:?}", @@ -1601,7 +1601,7 @@ impl<'a, 'tcx> TypeChecker<'a, 'tcx> { let (sig, map) = self.infcx.replace_bound_vars_with_fresh_vars( term.source_info.span, LateBoundRegionConversionTime::FnCall, - &sig, + sig, ); let sig = self.normalize(sig, term_location); self.check_call_dest(body, term, &sig, destination, term_location); @@ -1900,7 +1900,7 @@ impl<'a, 'tcx> TypeChecker<'a, 'tcx> { // Erase the regions from `ty` to get a global type. The // `Sized` bound in no way depends on precise regions, so this // shouldn't affect `is_sized`. - let erased_ty = tcx.erase_regions(&ty); + let erased_ty = tcx.erase_regions(ty); if !erased_ty.is_sized(tcx.at(span), self.param_env) { // in current MIR construction, all non-control-flow rvalue // expressions evaluate through `as_temp` or `into` a return diff --git a/compiler/rustc_mir/src/borrow_check/universal_regions.rs b/compiler/rustc_mir/src/borrow_check/universal_regions.rs index 4742113b1a5..7ad38a1f82c 100644 --- a/compiler/rustc_mir/src/borrow_check/universal_regions.rs +++ b/compiler/rustc_mir/src/borrow_check/universal_regions.rs @@ -438,7 +438,7 @@ impl<'cx, 'tcx> UniversalRegionsBuilder<'cx, 'tcx> { let inputs_and_output = self.infcx.replace_bound_regions_with_nll_infer_vars( FR, self.mir_def.did, - &bound_inputs_and_output, + bound_inputs_and_output, &mut indices, ); // Converse of above, if this is a function then the late-bound regions declared on its @@ -522,7 +522,7 @@ impl<'cx, 'tcx> UniversalRegionsBuilder<'cx, 'tcx> { debug!("defining_ty (pre-replacement): {:?}", defining_ty); let defining_ty = - self.infcx.replace_free_regions_with_nll_infer_vars(FR, &defining_ty); + self.infcx.replace_free_regions_with_nll_infer_vars(FR, defining_ty); match *defining_ty.kind() { ty::Closure(def_id, substs) => DefiningTy::Closure(def_id, substs), @@ -543,7 +543,7 @@ impl<'cx, 'tcx> UniversalRegionsBuilder<'cx, 'tcx> { assert_eq!(self.mir_def.did.to_def_id(), closure_base_def_id); let identity_substs = InternalSubsts::identity_for_item(tcx, closure_base_def_id); let substs = - self.infcx.replace_free_regions_with_nll_infer_vars(FR, &identity_substs); + self.infcx.replace_free_regions_with_nll_infer_vars(FR, identity_substs); DefiningTy::Const(self.mir_def.did.to_def_id(), substs) } } @@ -628,7 +628,7 @@ impl<'cx, 'tcx> UniversalRegionsBuilder<'cx, 'tcx> { DefiningTy::FnDef(def_id, _) => { let sig = tcx.fn_sig(def_id); - let sig = indices.fold_to_region_vids(tcx, &sig); + let sig = indices.fold_to_region_vids(tcx, sig); sig.inputs_and_output() } @@ -637,7 +637,7 @@ impl<'cx, 'tcx> UniversalRegionsBuilder<'cx, 'tcx> { // "output" (the type of the constant). assert_eq!(self.mir_def.did.to_def_id(), def_id); let ty = tcx.type_of(self.mir_def.def_id_for_type_of()); - let ty = indices.fold_to_region_vids(tcx, &ty); + let ty = indices.fold_to_region_vids(tcx, ty); ty::Binder::dummy(tcx.intern_type_list(&[ty])) } } @@ -648,7 +648,7 @@ trait InferCtxtExt<'tcx> { fn replace_free_regions_with_nll_infer_vars( &self, origin: NLLRegionVariableOrigin, - value: &T, + value: T, ) -> T where T: TypeFoldable<'tcx>; @@ -657,7 +657,7 @@ trait InferCtxtExt<'tcx> { &self, origin: NLLRegionVariableOrigin, all_outlive_scope: LocalDefId, - value: &ty::Binder, + value: ty::Binder, indices: &mut UniversalRegionIndices<'tcx>, ) -> T where @@ -674,7 +674,7 @@ impl<'cx, 'tcx> InferCtxtExt<'tcx> for InferCtxt<'cx, 'tcx> { fn replace_free_regions_with_nll_infer_vars( &self, origin: NLLRegionVariableOrigin, - value: &T, + value: T, ) -> T where T: TypeFoldable<'tcx>, @@ -686,7 +686,7 @@ impl<'cx, 'tcx> InferCtxtExt<'tcx> for InferCtxt<'cx, 'tcx> { &self, origin: NLLRegionVariableOrigin, all_outlive_scope: LocalDefId, - value: &ty::Binder, + value: ty::Binder, indices: &mut UniversalRegionIndices<'tcx>, ) -> T where @@ -771,7 +771,7 @@ impl<'tcx> UniversalRegionIndices<'tcx> { /// Replaces all free regions in `value` with region vids, as /// returned by `to_region_vid`. - pub fn fold_to_region_vids(&self, tcx: TyCtxt<'tcx>, value: &T) -> T + pub fn fold_to_region_vids(&self, tcx: TyCtxt<'tcx>, value: T) -> T where T: TypeFoldable<'tcx>, { diff --git a/compiler/rustc_mir/src/const_eval/eval_queries.rs b/compiler/rustc_mir/src/const_eval/eval_queries.rs index 0cac7c087d4..6e09ae43406 100644 --- a/compiler/rustc_mir/src/const_eval/eval_queries.rs +++ b/compiler/rustc_mir/src/const_eval/eval_queries.rs @@ -6,6 +6,7 @@ use crate::interpret::{ ScalarMaybeUninit, StackPopCleanup, }; +use rustc_errors::ErrorReported; use rustc_hir::def::DefKind; use rustc_middle::mir; use rustc_middle::mir::interpret::ErrorHandled; @@ -274,6 +275,16 @@ pub fn eval_to_allocation_raw_provider<'tcx>( return Err(ErrorHandled::Reported(error_reported)); } } + if !tcx.is_mir_available(def.did) { + tcx.sess.delay_span_bug( + tcx.def_span(def.did), + &format!("no MIR body is available for {:?}", def.did), + ); + return Err(ErrorHandled::Reported(ErrorReported {})); + } + if let Some(error_reported) = tcx.mir_const_qualif_opt_const_arg(def).error_occured { + return Err(ErrorHandled::Reported(error_reported)); + } } let is_static = tcx.is_static(def.did); diff --git a/compiler/rustc_mir/src/dataflow/drop_flag_effects.rs b/compiler/rustc_mir/src/dataflow/drop_flag_effects.rs index d1d507e54ef..d16366fded9 100644 --- a/compiler/rustc_mir/src/dataflow/drop_flag_effects.rs +++ b/compiler/rustc_mir/src/dataflow/drop_flag_effects.rs @@ -152,7 +152,7 @@ pub(crate) fn on_all_drop_children_bits<'tcx, F>( let ty = place.ty(body, tcx).ty; debug!("on_all_drop_children_bits({:?}, {:?} : {:?})", path, place, ty); - let erased_ty = tcx.erase_regions(&ty); + let erased_ty = tcx.erase_regions(ty); if erased_ty.needs_drop(tcx, ctxt.param_env) { each_child(child); } else { diff --git a/compiler/rustc_mir/src/dataflow/framework/engine.rs b/compiler/rustc_mir/src/dataflow/framework/engine.rs index 1b7264f86a2..3f9f558223b 100644 --- a/compiler/rustc_mir/src/dataflow/framework/engine.rs +++ b/compiler/rustc_mir/src/dataflow/framework/engine.rs @@ -208,12 +208,19 @@ where } } + // `state` is not actually used between iterations; + // this is just an optimization to avoid reallocating + // every iteration. let mut state = analysis.bottom_value(body); while let Some(bb) = dirty_queue.pop() { let bb_data = &body[bb]; - // Apply the block transfer function, using the cached one if it exists. + // Set the state to the entry state of the block. + // This is equivalent to `state = entry_sets[bb].clone()`, + // but it saves an allocation, thus improving compile times. state.clone_from(&entry_sets[bb]); + + // Apply the block transfer function, using the cached one if it exists. match &apply_trans_for_block { Some(apply) => apply(bb, &mut state), None => A::Direction::apply_effects_in_block(&analysis, &mut state, bb, bb_data), diff --git a/compiler/rustc_mir/src/interpret/eval_context.rs b/compiler/rustc_mir/src/interpret/eval_context.rs index 0f86a181a55..05b4d1c410d 100644 --- a/compiler/rustc_mir/src/interpret/eval_context.rs +++ b/compiler/rustc_mir/src/interpret/eval_context.rs @@ -505,7 +505,7 @@ impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> { frame: &Frame<'mir, 'tcx, M::PointerTag, M::FrameExtra>, value: T, ) -> T { - frame.instance.subst_mir_and_normalize_erasing_regions(*self.tcx, self.param_env, &value) + frame.instance.subst_mir_and_normalize_erasing_regions(*self.tcx, self.param_env, value) } /// The `substs` are assumed to already be in our interpreter "universe" (param_env). diff --git a/compiler/rustc_mir/src/interpret/intrinsics.rs b/compiler/rustc_mir/src/interpret/intrinsics.rs index d3b6d706337..f666a89ca56 100644 --- a/compiler/rustc_mir/src/interpret/intrinsics.rs +++ b/compiler/rustc_mir/src/interpret/intrinsics.rs @@ -75,13 +75,35 @@ crate fn eval_nullary_intrinsic<'tcx>( ensure_monomorphic_enough(tcx, tp_ty)?; ConstValue::from_u64(tcx.type_id_hash(tp_ty)) } - sym::variant_count => { - if let ty::Adt(ref adt, _) = tp_ty.kind() { - ConstValue::from_machine_usize(adt.variants.len() as u64, &tcx) - } else { - ConstValue::from_machine_usize(0u64, &tcx) - } - } + sym::variant_count => match tp_ty.kind() { + ty::Adt(ref adt, _) => ConstValue::from_machine_usize(adt.variants.len() as u64, &tcx), + ty::Projection(_) + | ty::Opaque(_, _) + | ty::Param(_) + | ty::Bound(_, _) + | ty::Placeholder(_) + | ty::Infer(_) => throw_inval!(TooGeneric), + ty::Bool + | ty::Char + | ty::Int(_) + | ty::Uint(_) + | ty::Float(_) + | ty::Foreign(_) + | ty::Str + | ty::Array(_, _) + | ty::Slice(_) + | ty::RawPtr(_) + | ty::Ref(_, _, _) + | ty::FnDef(_, _) + | ty::FnPtr(_) + | ty::Dynamic(_, _) + | ty::Closure(_, _) + | ty::Generator(_, _, _) + | ty::GeneratorWitness(_) + | ty::Never + | ty::Tuple(_) + | ty::Error(_) => ConstValue::from_machine_usize(0u64, &tcx), + }, other => bug!("`{}` is not a zero arg intrinsic", other), }) } diff --git a/compiler/rustc_mir/src/interpret/traits.rs b/compiler/rustc_mir/src/interpret/traits.rs index 77f4593fa16..fa7036f4e5b 100644 --- a/compiler/rustc_mir/src/interpret/traits.rs +++ b/compiler/rustc_mir/src/interpret/traits.rs @@ -21,7 +21,7 @@ impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> { ) -> InterpResult<'tcx, Pointer> { trace!("get_vtable(trait_ref={:?})", poly_trait_ref); - let (ty, poly_trait_ref) = self.tcx.erase_regions(&(ty, poly_trait_ref)); + let (ty, poly_trait_ref) = self.tcx.erase_regions((ty, poly_trait_ref)); // All vtables must be monomorphic, bail out otherwise. ensure_monomorphic_enough(*self.tcx, ty)?; @@ -37,7 +37,7 @@ impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> { let methods = if let Some(poly_trait_ref) = poly_trait_ref { let trait_ref = poly_trait_ref.with_self_ty(*self.tcx, ty); - let trait_ref = self.tcx.erase_regions(&trait_ref); + let trait_ref = self.tcx.erase_regions(trait_ref); self.tcx.vtable_methods(trait_ref) } else { @@ -143,7 +143,7 @@ impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> { let drop_instance = self.memory.get_fn(drop_fn)?.as_instance()?; trace!("Found drop fn: {:?}", drop_instance); let fn_sig = drop_instance.ty(*self.tcx, self.param_env).fn_sig(*self.tcx); - let fn_sig = self.tcx.normalize_erasing_late_bound_regions(self.param_env, &fn_sig); + let fn_sig = self.tcx.normalize_erasing_late_bound_regions(self.param_env, fn_sig); // The drop function takes `*mut T` where `T` is the type being dropped, so get that. let args = fn_sig.inputs(); if args.len() != 1 { diff --git a/compiler/rustc_mir/src/interpret/util.rs b/compiler/rustc_mir/src/interpret/util.rs index fce5553c993..e49b1c9f64d 100644 --- a/compiler/rustc_mir/src/interpret/util.rs +++ b/compiler/rustc_mir/src/interpret/util.rs @@ -18,7 +18,9 @@ where }; impl<'tcx> TypeVisitor<'tcx> for UsedParamsNeedSubstVisitor<'tcx> { - fn visit_const(&mut self, c: &'tcx ty::Const<'tcx>) -> ControlFlow<()> { + type BreakTy = (); + + fn visit_const(&mut self, c: &'tcx ty::Const<'tcx>) -> ControlFlow { if !c.needs_subst() { return ControlFlow::CONTINUE; } @@ -29,7 +31,7 @@ where } } - fn visit_ty(&mut self, ty: Ty<'tcx>) -> ControlFlow<()> { + fn visit_ty(&mut self, ty: Ty<'tcx>) -> ControlFlow { if !ty.needs_subst() { return ControlFlow::CONTINUE; } diff --git a/compiler/rustc_mir/src/monomorphize/collector.rs b/compiler/rustc_mir/src/monomorphize/collector.rs index 938181abff2..a6f90992172 100644 --- a/compiler/rustc_mir/src/monomorphize/collector.rs +++ b/compiler/rustc_mir/src/monomorphize/collector.rs @@ -546,7 +546,7 @@ impl<'a, 'tcx> MirNeighborCollector<'a, 'tcx> { self.instance.subst_mir_and_normalize_erasing_regions( self.tcx, ty::ParamEnv::reveal_all(), - &value, + value, ) } } @@ -1118,7 +1118,7 @@ impl RootCollector<'_, 'v> { // late-bound regions, since late-bound // regions must appear in the argument // listing. - let main_ret_ty = self.tcx.erase_regions(&main_ret_ty.no_bound_vars().unwrap()); + let main_ret_ty = self.tcx.erase_regions(main_ret_ty.no_bound_vars().unwrap()); let start_instance = Instance::resolve( self.tcx, diff --git a/compiler/rustc_mir/src/monomorphize/partitioning/default.rs b/compiler/rustc_mir/src/monomorphize/partitioning/default.rs index 5083a45b539..d5a845dd76f 100644 --- a/compiler/rustc_mir/src/monomorphize/partitioning/default.rs +++ b/compiler/rustc_mir/src/monomorphize/partitioning/default.rs @@ -304,7 +304,7 @@ fn characteristic_def_id_of_mono_item<'tcx>( let impl_self_ty = tcx.subst_and_normalize_erasing_regions( instance.substs, ty::ParamEnv::reveal_all(), - &tcx.type_of(impl_def_id), + tcx.type_of(impl_def_id), ); if let Some(def_id) = characteristic_def_id_of_type(impl_self_ty) { return Some(def_id); @@ -532,7 +532,7 @@ fn mono_item_visibility( } fn default_visibility(tcx: TyCtxt<'_>, id: DefId, is_generic: bool) -> Visibility { - if !tcx.sess.target.options.default_hidden_visibility { + if !tcx.sess.target.default_hidden_visibility { return Visibility::Default; } diff --git a/compiler/rustc_mir/src/monomorphize/polymorphize.rs b/compiler/rustc_mir/src/monomorphize/polymorphize.rs index c2ebc954a22..0ce1c5a0489 100644 --- a/compiler/rustc_mir/src/monomorphize/polymorphize.rs +++ b/compiler/rustc_mir/src/monomorphize/polymorphize.rs @@ -250,7 +250,7 @@ impl<'a, 'tcx> Visitor<'tcx> for MarkUsedGenericParams<'a, 'tcx> { } impl<'a, 'tcx> TypeVisitor<'tcx> for MarkUsedGenericParams<'a, 'tcx> { - fn visit_const(&mut self, c: &'tcx Const<'tcx>) -> ControlFlow<()> { + fn visit_const(&mut self, c: &'tcx Const<'tcx>) -> ControlFlow { debug!("visit_const: c={:?}", c); if !c.has_param_types_or_consts() { return ControlFlow::CONTINUE; @@ -283,7 +283,7 @@ impl<'a, 'tcx> TypeVisitor<'tcx> for MarkUsedGenericParams<'a, 'tcx> { } } - fn visit_ty(&mut self, ty: Ty<'tcx>) -> ControlFlow<()> { + fn visit_ty(&mut self, ty: Ty<'tcx>) -> ControlFlow { debug!("visit_ty: ty={:?}", ty); if !ty.has_param_types_or_consts() { return ControlFlow::CONTINUE; @@ -318,7 +318,9 @@ struct HasUsedGenericParams<'a> { } impl<'a, 'tcx> TypeVisitor<'tcx> for HasUsedGenericParams<'a> { - fn visit_const(&mut self, c: &'tcx Const<'tcx>) -> ControlFlow<()> { + type BreakTy = (); + + fn visit_const(&mut self, c: &'tcx Const<'tcx>) -> ControlFlow { debug!("visit_const: c={:?}", c); if !c.has_param_types_or_consts() { return ControlFlow::CONTINUE; @@ -336,7 +338,7 @@ impl<'a, 'tcx> TypeVisitor<'tcx> for HasUsedGenericParams<'a> { } } - fn visit_ty(&mut self, ty: Ty<'tcx>) -> ControlFlow<()> { + fn visit_ty(&mut self, ty: Ty<'tcx>) -> ControlFlow { debug!("visit_ty: ty={:?}", ty); if !ty.has_param_types_or_consts() { return ControlFlow::CONTINUE; diff --git a/compiler/rustc_mir/src/shim.rs b/compiler/rustc_mir/src/shim.rs index b2fa4b11f36..aa5835686a2 100644 --- a/compiler/rustc_mir/src/shim.rs +++ b/compiler/rustc_mir/src/shim.rs @@ -135,7 +135,7 @@ fn build_drop_shim<'tcx>(tcx: TyCtxt<'tcx>, def_id: DefId, ty: Option>) // Check if this is a generator, if so, return the drop glue for it if let Some(&ty::Generator(gen_def_id, substs, _)) = ty.map(|ty| ty.kind()) { let body = &**tcx.optimized_mir(gen_def_id).generator_drop.as_ref().unwrap(); - return body.subst(tcx, substs); + return body.clone().subst(tcx, substs); } let substs = if let Some(ty) = ty { @@ -144,7 +144,7 @@ fn build_drop_shim<'tcx>(tcx: TyCtxt<'tcx>, def_id: DefId, ty: Option>) InternalSubsts::identity_for_item(tcx, def_id) }; let sig = tcx.fn_sig(def_id).subst(tcx, substs); - let sig = tcx.erase_late_bound_regions(&sig); + let sig = tcx.erase_late_bound_regions(sig); let span = tcx.def_span(def_id); let source_info = SourceInfo::outermost(span); @@ -308,10 +308,7 @@ fn build_clone_shim<'tcx>(tcx: TyCtxt<'tcx>, def_id: DefId, self_ty: Ty<'tcx>) - match self_ty.kind() { _ if is_copy => builder.copy_shim(), - ty::Array(ty, len) => { - let len = len.eval_usize(tcx, param_env); - builder.array_shim(dest, src, ty, len) - } + ty::Array(ty, len) => builder.array_shim(dest, src, ty, len), ty::Closure(_, substs) => { builder.tuple_like_shim(dest, src, substs.as_closure().upvar_tys()) } @@ -338,7 +335,7 @@ impl CloneShimBuilder<'tcx> { // or access fields of a Place of type TySelf. let substs = tcx.mk_substs_trait(self_ty, &[]); let sig = tcx.fn_sig(def_id).subst(tcx, substs); - let sig = tcx.erase_late_bound_regions(&sig); + let sig = tcx.erase_late_bound_regions(sig); let span = tcx.def_span(def_id); CloneShimBuilder { @@ -485,7 +482,13 @@ impl CloneShimBuilder<'tcx> { } } - fn array_shim(&mut self, dest: Place<'tcx>, src: Place<'tcx>, ty: Ty<'tcx>, len: u64) { + fn array_shim( + &mut self, + dest: Place<'tcx>, + src: Place<'tcx>, + ty: Ty<'tcx>, + len: &'tcx ty::Const<'tcx>, + ) { let tcx = self.tcx; let span = self.span; @@ -503,7 +506,11 @@ impl CloneShimBuilder<'tcx> { ))), self.make_statement(StatementKind::Assign(box ( end, - Rvalue::Use(Operand::Constant(self.make_usize(len))), + Rvalue::Use(Operand::Constant(box Constant { + span: self.span, + user_ty: None, + literal: len, + })), ))), ]; self.block(inits, TerminatorKind::Goto { target: BasicBlock::new(1) }, false); @@ -656,7 +663,7 @@ fn build_call_shim<'tcx>( // to substitute into the signature of the shim. It is not necessary for users of this // MIR body to perform further substitutions (see `InstanceDef::has_polymorphic_mir_body`). let (sig_substs, untuple_args) = if let ty::InstanceDef::FnPtrShim(_, ty) = instance { - let sig = tcx.erase_late_bound_regions(&ty.fn_sig(tcx)); + let sig = tcx.erase_late_bound_regions(ty.fn_sig(tcx)); let untuple_args = sig.inputs(); @@ -671,7 +678,7 @@ fn build_call_shim<'tcx>( let def_id = instance.def_id(); let sig = tcx.fn_sig(def_id); - let mut sig = tcx.erase_late_bound_regions(&sig); + let mut sig = tcx.erase_late_bound_regions(sig); assert_eq!(sig_substs.is_some(), !instance.has_polymorphic_mir_body()); if let Some(sig_substs) = sig_substs { diff --git a/compiler/rustc_mir/src/transform/check_consts/ops.rs b/compiler/rustc_mir/src/transform/check_consts/ops.rs index bd51136b8db..d2e65abfbc7 100644 --- a/compiler/rustc_mir/src/transform/check_consts/ops.rs +++ b/compiler/rustc_mir/src/transform/check_consts/ops.rs @@ -4,7 +4,6 @@ use rustc_errors::{struct_span_err, DiagnosticBuilder}; use rustc_hir as hir; use rustc_hir::def_id::DefId; use rustc_middle::mir; -use rustc_session::config::nightly_options; use rustc_session::parse::feature_err; use rustc_span::symbol::sym; use rustc_span::{Span, Symbol}; @@ -104,7 +103,7 @@ impl NonConstOp for FnCallUnstable { if ccx.is_const_stable_const_fn() { err.help("Const-stable functions can only call other const-stable functions"); - } else if nightly_options::is_nightly_build() { + } else if ccx.tcx.sess.is_nightly_build() { if let Some(feature) = feature { err.help(&format!( "add `#![feature({})]` to the crate attributes to enable", diff --git a/compiler/rustc_mir/src/transform/check_consts/qualifs.rs b/compiler/rustc_mir/src/transform/check_consts/qualifs.rs index b3d9beb3742..c66d3ed76df 100644 --- a/compiler/rustc_mir/src/transform/check_consts/qualifs.rs +++ b/compiler/rustc_mir/src/transform/check_consts/qualifs.rs @@ -2,6 +2,7 @@ //! //! See the `Qualif` trait for more info. +use rustc_errors::ErrorReported; use rustc_middle::mir::*; use rustc_middle::ty::{self, subst::SubstsRef, AdtDef, Ty}; use rustc_span::DUMMY_SP; @@ -9,11 +10,16 @@ use rustc_trait_selection::traits; use super::ConstCx; -pub fn in_any_value_of_ty(cx: &ConstCx<'_, 'tcx>, ty: Ty<'tcx>) -> ConstQualifs { +pub fn in_any_value_of_ty( + cx: &ConstCx<'_, 'tcx>, + ty: Ty<'tcx>, + error_occured: Option, +) -> ConstQualifs { ConstQualifs { has_mut_interior: HasMutInterior::in_any_value_of_ty(cx, ty), needs_drop: NeedsDrop::in_any_value_of_ty(cx, ty), custom_eq: CustomEq::in_any_value_of_ty(cx, ty), + error_occured, } } diff --git a/compiler/rustc_mir/src/transform/check_consts/validation.rs b/compiler/rustc_mir/src/transform/check_consts/validation.rs index 4139b544998..e4893044a15 100644 --- a/compiler/rustc_mir/src/transform/check_consts/validation.rs +++ b/compiler/rustc_mir/src/transform/check_consts/validation.rs @@ -1,6 +1,6 @@ //! The `Visitor` responsible for actually checking a `mir::Body` for invalid operations. -use rustc_errors::{struct_span_err, Applicability, Diagnostic}; +use rustc_errors::{struct_span_err, Applicability, Diagnostic, ErrorReported}; use rustc_hir::def_id::DefId; use rustc_hir::{self as hir, HirId, LangItem}; use rustc_infer::infer::TyCtxtInferExt; @@ -123,7 +123,11 @@ impl Qualifs<'mir, 'tcx> { has_mut_interior.get().contains(local) || self.indirectly_mutable(ccx, local, location) } - fn in_return_place(&mut self, ccx: &'mir ConstCx<'mir, 'tcx>) -> ConstQualifs { + fn in_return_place( + &mut self, + ccx: &'mir ConstCx<'mir, 'tcx>, + error_occured: Option, + ) -> ConstQualifs { // Find the `Return` terminator if one exists. // // If no `Return` terminator exists, this MIR is divergent. Just return the conservative @@ -139,7 +143,7 @@ impl Qualifs<'mir, 'tcx> { .map(|(bb, _)| bb); let return_block = match return_block { - None => return qualifs::in_any_value_of_ty(ccx, ccx.body.return_ty()), + None => return qualifs::in_any_value_of_ty(ccx, ccx.body.return_ty(), error_occured), Some(bb) => bb, }; @@ -170,6 +174,7 @@ impl Qualifs<'mir, 'tcx> { needs_drop: self.needs_drop(ccx, RETURN_PLACE, return_loc), has_mut_interior: self.has_mut_interior(ccx, RETURN_PLACE, return_loc), custom_eq, + error_occured, } } } @@ -181,7 +186,7 @@ pub struct Validator<'mir, 'tcx> { /// The span of the current statement. span: Span, - error_emitted: bool, + error_emitted: Option, secondary_errors: Vec, } @@ -199,7 +204,7 @@ impl Validator<'mir, 'tcx> { span: ccx.body.span, ccx, qualifs: Default::default(), - error_emitted: false, + error_emitted: None, secondary_errors: Vec::new(), } } @@ -266,7 +271,7 @@ impl Validator<'mir, 'tcx> { // If we got through const-checking without emitting any "primary" errors, emit any // "secondary" errors if they occurred. let secondary_errors = mem::take(&mut self.secondary_errors); - if !self.error_emitted { + if self.error_emitted.is_none() { for error in secondary_errors { self.tcx.sess.diagnostic().emit_diagnostic(&error); } @@ -276,7 +281,7 @@ impl Validator<'mir, 'tcx> { } pub fn qualifs_in_return_place(&mut self) -> ConstQualifs { - self.qualifs.in_return_place(self.ccx) + self.qualifs.in_return_place(self.ccx, self.error_emitted) } /// Emits an error if an expression cannot be evaluated in the current context. @@ -318,7 +323,7 @@ impl Validator<'mir, 'tcx> { match op.importance() { ops::DiagnosticImportance::Primary => { - self.error_emitted = true; + self.error_emitted = Some(ErrorReported); err.emit(); } diff --git a/compiler/rustc_mir/src/transform/const_prop.rs b/compiler/rustc_mir/src/transform/const_prop.rs index aeb9920c0e3..abcf1862fd8 100644 --- a/compiler/rustc_mir/src/transform/const_prop.rs +++ b/compiler/rustc_mir/src/transform/const_prop.rs @@ -800,7 +800,7 @@ impl<'mir, 'tcx> ConstPropagator<'mir, 'tcx> { } } - trace!("attepting to replace {:?} with {:?}", rval, value); + trace!("attempting to replace {:?} with {:?}", rval, value); if let Err(e) = self.ecx.const_validate_operand( value, vec![], @@ -890,6 +890,10 @@ impl<'mir, 'tcx> ConstPropagator<'mir, 'tcx> { return false; } + if !self.tcx.consider_optimizing(|| format!("ConstantPropagation - OpTy: {:?}", op)) { + return false; + } + match *op { interpret::Operand::Immediate(Immediate::Scalar(ScalarMaybeUninit::Scalar(s))) => { s.is_bits() diff --git a/compiler/rustc_mir/src/transform/coverage/counters.rs b/compiler/rustc_mir/src/transform/coverage/counters.rs index d6c2f7f7aaf..20f6a16e0f7 100644 --- a/compiler/rustc_mir/src/transform/coverage/counters.rs +++ b/compiler/rustc_mir/src/transform/coverage/counters.rs @@ -14,7 +14,7 @@ use rustc_middle::mir::coverage::*; /// Manages the counter and expression indexes/IDs to generate `CoverageKind` components for MIR /// `Coverage` statements. -pub(crate) struct CoverageCounters { +pub(super) struct CoverageCounters { function_source_hash: u64, next_counter_id: u32, num_expressions: u32, @@ -37,7 +37,7 @@ impl CoverageCounters { self.debug_counters.enable(); } - /// Makes `CoverageKind` `Counter`s and `Expressions` for the `BasicCoverageBlocks` directly or + /// Makes `CoverageKind` `Counter`s and `Expressions` for the `BasicCoverageBlock`s directly or /// indirectly associated with `CoverageSpans`, and returns additional `Expression`s /// representing intermediate values. pub fn make_bcb_counters( @@ -120,7 +120,6 @@ struct BcbCounters<'a> { basic_coverage_blocks: &'a mut CoverageGraph, } -// FIXME(richkadel): Add unit tests for `BcbCounters` functions/algorithms. impl<'a> BcbCounters<'a> { fn new( coverage_counters: &'a mut CoverageCounters, diff --git a/compiler/rustc_mir/src/transform/coverage/debug.rs b/compiler/rustc_mir/src/transform/coverage/debug.rs index ffa795134e2..7f1dc3844b2 100644 --- a/compiler/rustc_mir/src/transform/coverage/debug.rs +++ b/compiler/rustc_mir/src/transform/coverage/debug.rs @@ -127,7 +127,7 @@ pub const NESTED_INDENT: &str = " "; const RUSTC_COVERAGE_DEBUG_OPTIONS: &str = "RUSTC_COVERAGE_DEBUG_OPTIONS"; -pub(crate) fn debug_options<'a>() -> &'a DebugOptions { +pub(super) fn debug_options<'a>() -> &'a DebugOptions { static DEBUG_OPTIONS: SyncOnceCell = SyncOnceCell::new(); &DEBUG_OPTIONS.get_or_init(|| DebugOptions::from_env()) @@ -136,7 +136,7 @@ pub(crate) fn debug_options<'a>() -> &'a DebugOptions { /// Parses and maintains coverage-specific debug options captured from the environment variable /// "RUSTC_COVERAGE_DEBUG_OPTIONS", if set. #[derive(Debug, Clone)] -pub(crate) struct DebugOptions { +pub(super) struct DebugOptions { pub allow_unused_expressions: bool, counter_format: ExpressionFormat, } @@ -250,7 +250,7 @@ impl Default for ExpressionFormat { /// /// `DebugCounters` supports a recursive rendering of `Expression` counters, so they can be /// presented as nested expressions such as `(bcb3 - (bcb0 + bcb1))`. -pub(crate) struct DebugCounters { +pub(super) struct DebugCounters { some_counters: Option>, } @@ -386,7 +386,7 @@ impl DebugCounter { /// If enabled, this data structure captures additional debugging information used when generating /// a Graphviz (.dot file) representation of the `CoverageGraph`, for debugging purposes. -pub(crate) struct GraphvizData { +pub(super) struct GraphvizData { some_bcb_to_coverage_spans_with_counters: Option>>, some_bcb_to_dependency_counters: Option>>, @@ -496,7 +496,7 @@ impl GraphvizData { /// directly or indirectly, to compute the coverage counts for all `CoverageSpan`s, and any that are /// _not_ used are retained in the `unused_expressions` Vec, to be included in debug output (logs /// and/or a `CoverageGraph` graphviz output). -pub(crate) struct UsedExpressions { +pub(super) struct UsedExpressions { some_used_expression_operands: Option>>, some_unused_expressions: @@ -626,7 +626,7 @@ impl UsedExpressions { } /// Generates the MIR pass `CoverageSpan`-specific spanview dump file. -pub(crate) fn dump_coverage_spanview( +pub(super) fn dump_coverage_spanview( tcx: TyCtxt<'tcx>, mir_body: &mir::Body<'tcx>, basic_coverage_blocks: &CoverageGraph, @@ -666,7 +666,7 @@ fn span_viewables( } /// Generates the MIR pass coverage-specific graphviz dump file. -pub(crate) fn dump_coverage_graphviz( +pub(super) fn dump_coverage_graphviz( tcx: TyCtxt<'tcx>, mir_body: &mir::Body<'tcx>, pass_name: &str, @@ -815,7 +815,7 @@ fn bcb_to_string_sections( /// Returns a simple string representation of a `TerminatorKind` variant, indenpendent of any /// values it might hold. -pub(crate) fn term_type(kind: &TerminatorKind<'tcx>) -> &'static str { +pub(super) fn term_type(kind: &TerminatorKind<'tcx>) -> &'static str { match kind { TerminatorKind::Goto { .. } => "Goto", TerminatorKind::SwitchInt { .. } => "SwitchInt", diff --git a/compiler/rustc_mir/src/transform/coverage/graph.rs b/compiler/rustc_mir/src/transform/coverage/graph.rs index c2ed2cbb100..9d375633dcf 100644 --- a/compiler/rustc_mir/src/transform/coverage/graph.rs +++ b/compiler/rustc_mir/src/transform/coverage/graph.rs @@ -17,7 +17,8 @@ const ID_SEPARATOR: &str = ","; /// `CoverageKind` counter (to be added by `CoverageCounters::make_bcb_counters`), and an optional /// set of additional counters--if needed--to count incoming edges, if there are more than one. /// (These "edge counters" are eventually converted into new MIR `BasicBlock`s.) -pub(crate) struct CoverageGraph { +#[derive(Debug)] +pub(super) struct CoverageGraph { bcbs: IndexVec, bb_to_bcb: IndexVec>, pub successors: IndexVec>, @@ -275,7 +276,7 @@ impl graph::WithPredecessors for CoverageGraph { rustc_index::newtype_index! { /// A node in the [control-flow graph][CFG] of CoverageGraph. - pub(crate) struct BasicCoverageBlock { + pub(super) struct BasicCoverageBlock { DEBUG_FORMAT = "bcb{}", } } @@ -305,7 +306,7 @@ rustc_index::newtype_index! { /// queries (`is_dominated_by()`, `predecessors`, `successors`, etc.) have branch (control flow) /// significance. #[derive(Debug, Clone)] -pub(crate) struct BasicCoverageBlockData { +pub(super) struct BasicCoverageBlockData { pub basic_blocks: Vec, pub counter_kind: Option, edge_from_bcbs: Option>, @@ -431,7 +432,7 @@ impl BasicCoverageBlockData { /// the specific branching BCB, representing the edge between the two. The latter case /// distinguishes this incoming edge from other incoming edges to the same `target_bcb`. #[derive(Clone, Copy, PartialEq, Eq)] -pub(crate) struct BcbBranch { +pub(super) struct BcbBranch { pub edge_from_bcb: Option, pub target_bcb: BasicCoverageBlock, } @@ -498,9 +499,8 @@ fn bcb_filtered_successors<'a, 'tcx>( /// Maintains separate worklists for each loop in the BasicCoverageBlock CFG, plus one for the /// CoverageGraph outside all loops. This supports traversing the BCB CFG in a way that /// ensures a loop is completely traversed before processing Blocks after the end of the loop. -// FIXME(richkadel): Add unit tests for TraversalContext. #[derive(Debug)] -pub(crate) struct TraversalContext { +pub(super) struct TraversalContext { /// From one or more backedges returning to a loop header. pub loop_backedges: Option<(Vec, BasicCoverageBlock)>, @@ -510,7 +510,7 @@ pub(crate) struct TraversalContext { pub worklist: Vec, } -pub(crate) struct TraverseCoverageGraphWithLoops { +pub(super) struct TraverseCoverageGraphWithLoops { pub backedges: IndexVec>, pub context_stack: Vec, visited: BitSet, @@ -642,7 +642,7 @@ impl TraverseCoverageGraphWithLoops { } } -fn find_loop_backedges( +pub(super) fn find_loop_backedges( basic_coverage_blocks: &CoverageGraph, ) -> IndexVec> { let num_bcbs = basic_coverage_blocks.num_nodes(); diff --git a/compiler/rustc_mir/src/transform/coverage/mod.rs b/compiler/rustc_mir/src/transform/coverage/mod.rs index c55349239b0..192bb6680e4 100644 --- a/compiler/rustc_mir/src/transform/coverage/mod.rs +++ b/compiler/rustc_mir/src/transform/coverage/mod.rs @@ -5,6 +5,9 @@ mod debug; mod graph; mod spans; +#[cfg(test)] +mod tests; + use counters::CoverageCounters; use graph::{BasicCoverageBlock, BasicCoverageBlockData, CoverageGraph}; use spans::{CoverageSpan, CoverageSpans}; @@ -31,7 +34,7 @@ use rustc_span::{CharPos, Pos, SourceFile, Span, Symbol}; /// A simple error message wrapper for `coverage::Error`s. #[derive(Debug)] -pub(crate) struct Error { +struct Error { message: String, } diff --git a/compiler/rustc_mir/src/transform/coverage/spans.rs b/compiler/rustc_mir/src/transform/coverage/spans.rs index cda4fc12544..95c49922262 100644 --- a/compiler/rustc_mir/src/transform/coverage/spans.rs +++ b/compiler/rustc_mir/src/transform/coverage/spans.rs @@ -17,7 +17,7 @@ use rustc_span::{BytePos, Span, SyntaxContext}; use std::cmp::Ordering; #[derive(Debug, Copy, Clone)] -pub(crate) enum CoverageStatement { +pub(super) enum CoverageStatement { Statement(BasicBlock, Span, usize), Terminator(BasicBlock, Span), } @@ -66,7 +66,7 @@ impl CoverageStatement { /// or is subsumed by the `Span` associated with this `CoverageSpan`, and it's `BasicBlock` /// `is_dominated_by()` the `BasicBlock`s in this `CoverageSpan`. #[derive(Debug, Clone)] -pub(crate) struct CoverageSpan { +pub(super) struct CoverageSpan { pub span: Span, pub bcb: BasicCoverageBlock, pub coverage_statements: Vec, @@ -214,7 +214,7 @@ pub struct CoverageSpans<'a, 'tcx> { } impl<'a, 'tcx> CoverageSpans<'a, 'tcx> { - pub(crate) fn generate_coverage_spans( + pub(super) fn generate_coverage_spans( mir_body: &'a mir::Body<'tcx>, body_span: Span, basic_coverage_blocks: &'a CoverageGraph, @@ -645,7 +645,10 @@ impl<'a, 'tcx> CoverageSpans<'a, 'tcx> { } } -fn filtered_statement_span(statement: &'a Statement<'tcx>, body_span: Span) -> Option { +pub(super) fn filtered_statement_span( + statement: &'a Statement<'tcx>, + body_span: Span, +) -> Option { match statement.kind { // These statements have spans that are often outside the scope of the executed source code // for their parent `BasicBlock`. @@ -686,7 +689,10 @@ fn filtered_statement_span(statement: &'a Statement<'tcx>, body_span: Span) -> O } } -fn filtered_terminator_span(terminator: &'a Terminator<'tcx>, body_span: Span) -> Option { +pub(super) fn filtered_terminator_span( + terminator: &'a Terminator<'tcx>, + body_span: Span, +) -> Option { match terminator.kind { // These terminators have spans that don't positively contribute to computing a reasonable // span of actually executed source code. (For example, SwitchInt terminators extracted from diff --git a/compiler/rustc_mir/src/transform/coverage/test_macros/Cargo.toml b/compiler/rustc_mir/src/transform/coverage/test_macros/Cargo.toml new file mode 100644 index 00000000000..a9d6f0c803d --- /dev/null +++ b/compiler/rustc_mir/src/transform/coverage/test_macros/Cargo.toml @@ -0,0 +1,12 @@ +[package] +authors = ["The Rust Project Developers"] +name = "coverage_test_macros" +version = "0.0.0" +edition = "2018" + +[lib] +proc-macro = true +doctest = false + +[dependencies] +proc-macro2 = "1" diff --git a/compiler/rustc_mir/src/transform/coverage/test_macros/src/lib.rs b/compiler/rustc_mir/src/transform/coverage/test_macros/src/lib.rs new file mode 100644 index 00000000000..3d6095d2738 --- /dev/null +++ b/compiler/rustc_mir/src/transform/coverage/test_macros/src/lib.rs @@ -0,0 +1,6 @@ +use proc_macro::TokenStream; + +#[proc_macro] +pub fn let_bcb(item: TokenStream) -> TokenStream { + format!("let bcb{} = graph::BasicCoverageBlock::from_usize({});", item, item).parse().unwrap() +} diff --git a/compiler/rustc_mir/src/transform/coverage/tests.rs b/compiler/rustc_mir/src/transform/coverage/tests.rs new file mode 100644 index 00000000000..d36f1b8e5f6 --- /dev/null +++ b/compiler/rustc_mir/src/transform/coverage/tests.rs @@ -0,0 +1,714 @@ +//! This crate hosts a selection of "unit tests" for components of the `InstrumentCoverage` MIR +//! pass. +//! +//! The tests construct a few "mock" objects, as needed, to support the `InstrumentCoverage` +//! functions and algorithms. Mocked objects include instances of `mir::Body`; including +//! `Terminator`s of various `kind`s, and `Span` objects. Some functions used by or used on +//! real, runtime versions of these mocked-up objects have constraints (such as cross-thread +//! limitations) and deep dependencies on other elements of the full Rust compiler (which is +//! *not* constructed or mocked for these tests). +//! +//! Of particular note, attempting to simply print elements of the `mir::Body` with default +//! `Debug` formatting can fail because some `Debug` format implementations require the +//! `TyCtxt`, obtained via a static global variable that is *not* set for these tests. +//! Initializing the global type context is prohibitively complex for the scope and scale of these +//! tests (essentially requiring initializing the entire compiler). +//! +//! Also note, some basic features of `Span` also rely on the `Span`s own "session globals", which +//! are unrelated to the `TyCtxt` global. Without initializing the `Span` session globals, some +//! basic, coverage-specific features would be impossible to test, but thankfully initializing these +//! globals is comparitively simpler. The easiest way is to wrap the test in a closure argument +//! to: `rustc_span::with_default_session_globals(|| { test_here(); })`. + +use super::counters; +use super::debug; +use super::graph; +use super::spans; + +use coverage_test_macros::let_bcb; + +use rustc_data_structures::graph::WithNumNodes; +use rustc_data_structures::graph::WithSuccessors; +use rustc_index::vec::{Idx, IndexVec}; +use rustc_middle::mir::coverage::CoverageKind; +use rustc_middle::mir::*; +use rustc_middle::ty::{self, DebruijnIndex, TyS, TypeFlags}; +use rustc_span::{self, BytePos, Pos, Span, DUMMY_SP}; + +// All `TEMP_BLOCK` targets should be replaced before calling `to_body() -> mir::Body`. +const TEMP_BLOCK: BasicBlock = BasicBlock::MAX; + +fn dummy_ty() -> &'static TyS<'static> { + thread_local! { + static DUMMY_TYS: &'static TyS<'static> = Box::leak(box TyS::make_for_test( + ty::Bool, + TypeFlags::empty(), + DebruijnIndex::from_usize(0), + )); + } + + &DUMMY_TYS.with(|tys| *tys) +} + +struct MockBlocks<'tcx> { + blocks: IndexVec>, + dummy_place: Place<'tcx>, + next_local: usize, +} + +impl<'tcx> MockBlocks<'tcx> { + fn new() -> Self { + Self { + blocks: IndexVec::new(), + dummy_place: Place { local: RETURN_PLACE, projection: ty::List::empty() }, + next_local: 0, + } + } + + fn new_temp(&mut self) -> Local { + let index = self.next_local; + self.next_local += 1; + Local::new(index) + } + + fn push(&mut self, kind: TerminatorKind<'tcx>) -> BasicBlock { + let next_lo = if let Some(last) = self.blocks.last() { + self.blocks[last].terminator().source_info.span.hi() + } else { + BytePos(1) + }; + let next_hi = next_lo + BytePos(1); + self.blocks.push(BasicBlockData { + statements: vec![], + terminator: Some(Terminator { + source_info: SourceInfo::outermost(Span::with_root_ctxt(next_lo, next_hi)), + kind, + }), + is_cleanup: false, + }) + } + + fn link(&mut self, from_block: BasicBlock, to_block: BasicBlock) { + match self.blocks[from_block].terminator_mut().kind { + TerminatorKind::Assert { ref mut target, .. } + | TerminatorKind::Call { destination: Some((_, ref mut target)), .. } + | TerminatorKind::Drop { ref mut target, .. } + | TerminatorKind::DropAndReplace { ref mut target, .. } + | TerminatorKind::FalseEdge { real_target: ref mut target, .. } + | TerminatorKind::FalseUnwind { real_target: ref mut target, .. } + | TerminatorKind::Goto { ref mut target } + | TerminatorKind::InlineAsm { destination: Some(ref mut target), .. } + | TerminatorKind::Yield { resume: ref mut target, .. } => *target = to_block, + ref invalid => bug!("Invalid from_block: {:?}", invalid), + } + } + + fn add_block_from( + &mut self, + some_from_block: Option, + to_kind: TerminatorKind<'tcx>, + ) -> BasicBlock { + let new_block = self.push(to_kind); + if let Some(from_block) = some_from_block { + self.link(from_block, new_block); + } + new_block + } + + fn set_branch(&mut self, switchint: BasicBlock, branch_index: usize, to_block: BasicBlock) { + match self.blocks[switchint].terminator_mut().kind { + TerminatorKind::SwitchInt { ref mut targets, .. } => { + let mut branches = targets.iter().collect::>(); + let otherwise = if branch_index == branches.len() { + to_block + } else { + let old_otherwise = targets.otherwise(); + if branch_index > branches.len() { + branches.push((branches.len() as u128, old_otherwise)); + while branches.len() < branch_index { + branches.push((branches.len() as u128, TEMP_BLOCK)); + } + to_block + } else { + branches[branch_index] = (branch_index as u128, to_block); + old_otherwise + } + }; + *targets = SwitchTargets::new(branches.into_iter(), otherwise); + } + ref invalid => bug!("Invalid BasicBlock kind or no to_block: {:?}", invalid), + } + } + + fn call(&mut self, some_from_block: Option) -> BasicBlock { + self.add_block_from( + some_from_block, + TerminatorKind::Call { + func: Operand::Copy(self.dummy_place.clone()), + args: vec![], + destination: Some((self.dummy_place.clone(), TEMP_BLOCK)), + cleanup: None, + from_hir_call: false, + fn_span: DUMMY_SP, + }, + ) + } + + fn goto(&mut self, some_from_block: Option) -> BasicBlock { + self.add_block_from(some_from_block, TerminatorKind::Goto { target: TEMP_BLOCK }) + } + + fn switchint(&mut self, some_from_block: Option) -> BasicBlock { + let switchint_kind = TerminatorKind::SwitchInt { + discr: Operand::Move(Place::from(self.new_temp())), + switch_ty: dummy_ty(), + targets: SwitchTargets::static_if(0, TEMP_BLOCK, TEMP_BLOCK), + }; + self.add_block_from(some_from_block, switchint_kind) + } + + fn return_(&mut self, some_from_block: Option) -> BasicBlock { + self.add_block_from(some_from_block, TerminatorKind::Return) + } + + fn to_body(self) -> Body<'tcx> { + Body::new_cfg_only(self.blocks) + } +} + +fn debug_basic_blocks(mir_body: &Body<'tcx>) -> String { + format!( + "{:?}", + mir_body + .basic_blocks() + .iter_enumerated() + .map(|(bb, data)| { + let term = &data.terminator(); + let kind = &term.kind; + let span = term.source_info.span; + let sp = format!("(span:{},{})", span.lo().to_u32(), span.hi().to_u32()); + match kind { + TerminatorKind::Assert { target, .. } + | TerminatorKind::Call { destination: Some((_, target)), .. } + | TerminatorKind::Drop { target, .. } + | TerminatorKind::DropAndReplace { target, .. } + | TerminatorKind::FalseEdge { real_target: target, .. } + | TerminatorKind::FalseUnwind { real_target: target, .. } + | TerminatorKind::Goto { target } + | TerminatorKind::InlineAsm { destination: Some(target), .. } + | TerminatorKind::Yield { resume: target, .. } => { + format!("{}{:?}:{} -> {:?}", sp, bb, debug::term_type(kind), target) + } + TerminatorKind::SwitchInt { targets, .. } => { + format!("{}{:?}:{} -> {:?}", sp, bb, debug::term_type(kind), targets) + } + _ => format!("{}{:?}:{}", sp, bb, debug::term_type(kind)), + } + }) + .collect::>() + ) +} + +static PRINT_GRAPHS: bool = false; + +fn print_mir_graphviz(name: &str, mir_body: &Body<'_>) { + if PRINT_GRAPHS { + println!( + "digraph {} {{\n{}\n}}", + name, + mir_body + .basic_blocks() + .iter_enumerated() + .map(|(bb, data)| { + format!( + " {:?} [label=\"{:?}: {}\"];\n{}", + bb, + bb, + debug::term_type(&data.terminator().kind), + mir_body + .successors(bb) + .map(|successor| { format!(" {:?} -> {:?};", bb, successor) }) + .collect::>() + .join("\n") + ) + }) + .collect::>() + .join("\n") + ); + } +} + +fn print_coverage_graphviz( + name: &str, + mir_body: &Body<'_>, + basic_coverage_blocks: &graph::CoverageGraph, +) { + if PRINT_GRAPHS { + println!( + "digraph {} {{\n{}\n}}", + name, + basic_coverage_blocks + .iter_enumerated() + .map(|(bcb, bcb_data)| { + format!( + " {:?} [label=\"{:?}: {}\"];\n{}", + bcb, + bcb, + debug::term_type(&bcb_data.terminator(mir_body).kind), + basic_coverage_blocks + .successors(bcb) + .map(|successor| { format!(" {:?} -> {:?};", bcb, successor) }) + .collect::>() + .join("\n") + ) + }) + .collect::>() + .join("\n") + ); + } +} + +/// Create a mock `Body` with a simple flow. +fn goto_switchint() -> Body<'a> { + let mut blocks = MockBlocks::new(); + let start = blocks.call(None); + let goto = blocks.goto(Some(start)); + let switchint = blocks.switchint(Some(goto)); + let then_call = blocks.call(None); + let else_call = blocks.call(None); + blocks.set_branch(switchint, 0, then_call); + blocks.set_branch(switchint, 1, else_call); + blocks.return_(Some(then_call)); + blocks.return_(Some(else_call)); + + let mir_body = blocks.to_body(); + print_mir_graphviz("mir_goto_switchint", &mir_body); + /* Graphviz character plots created using: `graph-easy --as=boxart`: + ┌────────────────┐ + │ bb0: Call │ + └────────────────┘ + │ + │ + ▼ + ┌────────────────┐ + │ bb1: Goto │ + └────────────────┘ + │ + │ + ▼ + ┌─────────────┐ ┌────────────────┐ + │ bb4: Call │ ◀── │ bb2: SwitchInt │ + └─────────────┘ └────────────────┘ + │ │ + │ │ + ▼ ▼ + ┌─────────────┐ ┌────────────────┐ + │ bb6: Return │ │ bb3: Call │ + └─────────────┘ └────────────────┘ + │ + │ + ▼ + ┌────────────────┐ + │ bb5: Return │ + └────────────────┘ + */ + mir_body +} + +macro_rules! assert_successors { + ($basic_coverage_blocks:ident, $i:ident, [$($successor:ident),*]) => { + let mut successors = $basic_coverage_blocks.successors[$i].clone(); + successors.sort_unstable(); + assert_eq!(successors, vec![$($successor),*]); + } +} + +#[test] +fn test_covgraph_goto_switchint() { + let mir_body = goto_switchint(); + if false { + println!("basic_blocks = {}", debug_basic_blocks(&mir_body)); + } + let basic_coverage_blocks = graph::CoverageGraph::from_mir(&mir_body); + print_coverage_graphviz("covgraph_goto_switchint ", &mir_body, &basic_coverage_blocks); + /* + ┌──────────────┐ ┌─────────────────┐ + │ bcb2: Return │ ◀── │ bcb0: SwitchInt │ + └──────────────┘ └─────────────────┘ + │ + │ + ▼ + ┌─────────────────┐ + │ bcb1: Return │ + └─────────────────┘ + */ + assert_eq!( + basic_coverage_blocks.num_nodes(), + 3, + "basic_coverage_blocks: {:?}", + basic_coverage_blocks.iter_enumerated().collect::>() + ); + + let_bcb!(0); + let_bcb!(1); + let_bcb!(2); + + assert_successors!(basic_coverage_blocks, bcb0, [bcb1, bcb2]); + assert_successors!(basic_coverage_blocks, bcb1, []); + assert_successors!(basic_coverage_blocks, bcb2, []); +} + +/// Create a mock `Body` with a loop. +fn switchint_then_loop_else_return() -> Body<'a> { + let mut blocks = MockBlocks::new(); + let start = blocks.call(None); + let switchint = blocks.switchint(Some(start)); + let then_call = blocks.call(None); + blocks.set_branch(switchint, 0, then_call); + let backedge_goto = blocks.goto(Some(then_call)); + blocks.link(backedge_goto, switchint); + let else_return = blocks.return_(None); + blocks.set_branch(switchint, 1, else_return); + + let mir_body = blocks.to_body(); + print_mir_graphviz("mir_switchint_then_loop_else_return", &mir_body); + /* + ┌────────────────┐ + │ bb0: Call │ + └────────────────┘ + │ + │ + ▼ + ┌─────────────┐ ┌────────────────┐ + │ bb4: Return │ ◀── │ bb1: SwitchInt │ ◀┐ + └─────────────┘ └────────────────┘ │ + │ │ + │ │ + ▼ │ + ┌────────────────┐ │ + │ bb2: Call │ │ + └────────────────┘ │ + │ │ + │ │ + ▼ │ + ┌────────────────┐ │ + │ bb3: Goto │ ─┘ + └────────────────┘ + */ + mir_body +} + +#[test] +fn test_covgraph_switchint_then_loop_else_return() { + let mir_body = switchint_then_loop_else_return(); + let basic_coverage_blocks = graph::CoverageGraph::from_mir(&mir_body); + print_coverage_graphviz( + "covgraph_switchint_then_loop_else_return", + &mir_body, + &basic_coverage_blocks, + ); + /* + ┌─────────────────┐ + │ bcb0: Call │ + └─────────────────┘ + │ + │ + ▼ + ┌────────────┐ ┌─────────────────┐ + │ bcb3: Goto │ ◀── │ bcb1: SwitchInt │ ◀┐ + └────────────┘ └─────────────────┘ │ + │ │ │ + │ │ │ + │ ▼ │ + │ ┌─────────────────┐ │ + │ │ bcb2: Return │ │ + │ └─────────────────┘ │ + │ │ + └─────────────────────────────────────┘ + */ + assert_eq!( + basic_coverage_blocks.num_nodes(), + 4, + "basic_coverage_blocks: {:?}", + basic_coverage_blocks.iter_enumerated().collect::>() + ); + + let_bcb!(0); + let_bcb!(1); + let_bcb!(2); + let_bcb!(3); + + assert_successors!(basic_coverage_blocks, bcb0, [bcb1]); + assert_successors!(basic_coverage_blocks, bcb1, [bcb2, bcb3]); + assert_successors!(basic_coverage_blocks, bcb2, []); + assert_successors!(basic_coverage_blocks, bcb3, [bcb1]); +} + +/// Create a mock `Body` with nested loops. +fn switchint_loop_then_inner_loop_else_break() -> Body<'a> { + let mut blocks = MockBlocks::new(); + let start = blocks.call(None); + let switchint = blocks.switchint(Some(start)); + let then_call = blocks.call(None); + blocks.set_branch(switchint, 0, then_call); + let else_return = blocks.return_(None); + blocks.set_branch(switchint, 1, else_return); + + let inner_start = blocks.call(Some(then_call)); + let inner_switchint = blocks.switchint(Some(inner_start)); + let inner_then_call = blocks.call(None); + blocks.set_branch(inner_switchint, 0, inner_then_call); + let inner_backedge_goto = blocks.goto(Some(inner_then_call)); + blocks.link(inner_backedge_goto, inner_switchint); + let inner_else_break_goto = blocks.goto(None); + blocks.set_branch(inner_switchint, 1, inner_else_break_goto); + + let backedge_goto = blocks.goto(Some(inner_else_break_goto)); + blocks.link(backedge_goto, switchint); + + let mir_body = blocks.to_body(); + print_mir_graphviz("mir_switchint_loop_then_inner_loop_else_break", &mir_body); + /* + ┌────────────────┐ + │ bb0: Call │ + └────────────────┘ + │ + │ + ▼ + ┌─────────────┐ ┌────────────────┐ + │ bb3: Return │ ◀── │ bb1: SwitchInt │ ◀─────┐ + └─────────────┘ └────────────────┘ │ + │ │ + │ │ + ▼ │ + ┌────────────────┐ │ + │ bb2: Call │ │ + └────────────────┘ │ + │ │ + │ │ + ▼ │ + ┌────────────────┐ │ + │ bb4: Call │ │ + └────────────────┘ │ + │ │ + │ │ + ▼ │ + ┌─────────────┐ ┌────────────────┐ │ + │ bb8: Goto │ ◀── │ bb5: SwitchInt │ ◀┐ │ + └─────────────┘ └────────────────┘ │ │ + │ │ │ │ + │ │ │ │ + ▼ ▼ │ │ + ┌─────────────┐ ┌────────────────┐ │ │ + │ bb9: Goto │ ─┐ │ bb6: Call │ │ │ + └─────────────┘ │ └────────────────┘ │ │ + │ │ │ │ + │ │ │ │ + │ ▼ │ │ + │ ┌────────────────┐ │ │ + │ │ bb7: Goto │ ─┘ │ + │ └────────────────┘ │ + │ │ + └───────────────────────────┘ + */ + mir_body +} + +#[test] +fn test_covgraph_switchint_loop_then_inner_loop_else_break() { + let mir_body = switchint_loop_then_inner_loop_else_break(); + let basic_coverage_blocks = graph::CoverageGraph::from_mir(&mir_body); + print_coverage_graphviz( + "covgraph_switchint_loop_then_inner_loop_else_break", + &mir_body, + &basic_coverage_blocks, + ); + /* + ┌─────────────────┐ + │ bcb0: Call │ + └─────────────────┘ + │ + │ + ▼ + ┌──────────────┐ ┌─────────────────┐ + │ bcb2: Return │ ◀── │ bcb1: SwitchInt │ ◀┐ + └──────────────┘ └─────────────────┘ │ + │ │ + │ │ + ▼ │ + ┌─────────────────┐ │ + │ bcb3: Call │ │ + └─────────────────┘ │ + │ │ + │ │ + ▼ │ + ┌──────────────┐ ┌─────────────────┐ │ + │ bcb6: Goto │ ◀── │ bcb4: SwitchInt │ ◀┼────┐ + └──────────────┘ └─────────────────┘ │ │ + │ │ │ │ + │ │ │ │ + │ ▼ │ │ + │ ┌─────────────────┐ │ │ + │ │ bcb5: Goto │ ─┘ │ + │ └─────────────────┘ │ + │ │ + └────────────────────────────────────────────┘ + */ + assert_eq!( + basic_coverage_blocks.num_nodes(), + 7, + "basic_coverage_blocks: {:?}", + basic_coverage_blocks.iter_enumerated().collect::>() + ); + + let_bcb!(0); + let_bcb!(1); + let_bcb!(2); + let_bcb!(3); + let_bcb!(4); + let_bcb!(5); + let_bcb!(6); + + assert_successors!(basic_coverage_blocks, bcb0, [bcb1]); + assert_successors!(basic_coverage_blocks, bcb1, [bcb2, bcb3]); + assert_successors!(basic_coverage_blocks, bcb2, []); + assert_successors!(basic_coverage_blocks, bcb3, [bcb4]); + assert_successors!(basic_coverage_blocks, bcb4, [bcb5, bcb6]); + assert_successors!(basic_coverage_blocks, bcb5, [bcb1]); + assert_successors!(basic_coverage_blocks, bcb6, [bcb4]); +} + +#[test] +fn test_find_loop_backedges_none() { + let mir_body = goto_switchint(); + let basic_coverage_blocks = graph::CoverageGraph::from_mir(&mir_body); + if false { + println!( + "basic_coverage_blocks = {:?}", + basic_coverage_blocks.iter_enumerated().collect::>() + ); + println!("successors = {:?}", basic_coverage_blocks.successors); + } + let backedges = graph::find_loop_backedges(&basic_coverage_blocks); + assert_eq!( + backedges.iter_enumerated().map(|(_bcb, backedges)| backedges.len()).sum::(), + 0, + "backedges: {:?}", + backedges + ); +} + +#[test] +fn test_find_loop_backedges_one() { + let mir_body = switchint_then_loop_else_return(); + let basic_coverage_blocks = graph::CoverageGraph::from_mir(&mir_body); + let backedges = graph::find_loop_backedges(&basic_coverage_blocks); + assert_eq!( + backedges.iter_enumerated().map(|(_bcb, backedges)| backedges.len()).sum::(), + 1, + "backedges: {:?}", + backedges + ); + + let_bcb!(1); + let_bcb!(3); + + assert_eq!(backedges[bcb1], vec![bcb3]); +} + +#[test] +fn test_find_loop_backedges_two() { + let mir_body = switchint_loop_then_inner_loop_else_break(); + let basic_coverage_blocks = graph::CoverageGraph::from_mir(&mir_body); + let backedges = graph::find_loop_backedges(&basic_coverage_blocks); + assert_eq!( + backedges.iter_enumerated().map(|(_bcb, backedges)| backedges.len()).sum::(), + 2, + "backedges: {:?}", + backedges + ); + + let_bcb!(1); + let_bcb!(4); + let_bcb!(5); + let_bcb!(6); + + assert_eq!(backedges[bcb1], vec![bcb5]); + assert_eq!(backedges[bcb4], vec![bcb6]); +} + +#[test] +fn test_traverse_coverage_with_loops() { + let mir_body = switchint_loop_then_inner_loop_else_break(); + let basic_coverage_blocks = graph::CoverageGraph::from_mir(&mir_body); + let mut traversed_in_order = Vec::new(); + let mut traversal = graph::TraverseCoverageGraphWithLoops::new(&basic_coverage_blocks); + while let Some(bcb) = traversal.next(&basic_coverage_blocks) { + traversed_in_order.push(bcb); + } + + let_bcb!(6); + + // bcb0 is visited first. Then bcb1 starts the first loop, and all remaining nodes, *except* + // bcb6 are inside the first loop. + assert_eq!( + *traversed_in_order.last().expect("should have elements"), + bcb6, + "bcb6 should not be visited until all nodes inside the first loop have been visited" + ); +} + +fn synthesize_body_span_from_terminators(mir_body: &Body<'_>) -> Span { + let mut some_span: Option = None; + for (_, data) in mir_body.basic_blocks().iter_enumerated() { + let term_span = data.terminator().source_info.span; + if let Some(span) = some_span.as_mut() { + *span = span.to(term_span); + } else { + some_span = Some(term_span) + } + } + some_span.expect("body must have at least one BasicBlock") +} + +#[test] +fn test_make_bcb_counters() { + rustc_span::with_default_session_globals(|| { + let mir_body = goto_switchint(); + let body_span = synthesize_body_span_from_terminators(&mir_body); + let mut basic_coverage_blocks = graph::CoverageGraph::from_mir(&mir_body); + let mut coverage_spans = Vec::new(); + for (bcb, data) in basic_coverage_blocks.iter_enumerated() { + if let Some(span) = + spans::filtered_terminator_span(data.terminator(&mir_body), body_span) + { + coverage_spans.push(spans::CoverageSpan::for_terminator(span, bcb, data.last_bb())); + } + } + let mut coverage_counters = counters::CoverageCounters::new(0); + let intermediate_expressions = coverage_counters + .make_bcb_counters(&mut basic_coverage_blocks, &coverage_spans) + .expect("should be Ok"); + assert_eq!(intermediate_expressions.len(), 0); + + let_bcb!(1); + assert_eq!( + 1, // coincidentally, bcb1 has a `Counter` with id = 1 + match basic_coverage_blocks[bcb1].counter().expect("should have a counter") { + CoverageKind::Counter { id, .. } => id, + _ => panic!("expected a Counter"), + } + .as_u32() + ); + + let_bcb!(2); + assert_eq!( + 2, // coincidentally, bcb2 has a `Counter` with id = 2 + match basic_coverage_blocks[bcb2].counter().expect("should have a counter") { + CoverageKind::Counter { id, .. } => id, + _ => panic!("expected a Counter"), + } + .as_u32() + ); + }); +} diff --git a/compiler/rustc_mir/src/transform/early_otherwise_branch.rs b/compiler/rustc_mir/src/transform/early_otherwise_branch.rs index f97dcf4852d..f91477911a4 100644 --- a/compiler/rustc_mir/src/transform/early_otherwise_branch.rs +++ b/compiler/rustc_mir/src/transform/early_otherwise_branch.rs @@ -46,6 +46,10 @@ impl<'tcx> MirPass<'tcx> for EarlyOtherwiseBranch { let should_cleanup = !opts_to_apply.is_empty(); for opt_to_apply in opts_to_apply { + if !tcx.consider_optimizing(|| format!("EarlyOtherwiseBranch {:?}", &opt_to_apply)) { + break; + } + trace!("SUCCESS: found optimization possibility to apply: {:?}", &opt_to_apply); let statements_before = diff --git a/compiler/rustc_mir/src/transform/generator.rs b/compiler/rustc_mir/src/transform/generator.rs index 039d4753a8c..dc413f8dd2d 100644 --- a/compiler/rustc_mir/src/transform/generator.rs +++ b/compiler/rustc_mir/src/transform/generator.rs @@ -720,13 +720,13 @@ fn sanitize_witness<'tcx>( tcx: TyCtxt<'tcx>, body: &Body<'tcx>, witness: Ty<'tcx>, - upvars: &Vec>, + upvars: Vec>, saved_locals: &GeneratorSavedLocals, ) { let did = body.source.def_id(); let allowed_upvars = tcx.erase_regions(upvars); let allowed = match witness.kind() { - ty::GeneratorWitness(s) => tcx.erase_late_bound_regions(&s), + &ty::GeneratorWitness(s) => tcx.erase_late_bound_regions(s), _ => { tcx.sess.delay_span_bug( body.span, @@ -1303,7 +1303,7 @@ impl<'tcx> MirPass<'tcx> for StateTransform { let liveness_info = locals_live_across_suspend_points(tcx, body, &always_live_locals, movable); - sanitize_witness(tcx, body, interior, &upvars, &liveness_info.saved_locals); + sanitize_witness(tcx, body, interior, upvars, &liveness_info.saved_locals); if tcx.sess.opts.debugging_opts.validate_mir { let mut vis = EnsureGeneratorFieldAssignmentsNeverAlias { diff --git a/compiler/rustc_mir/src/transform/inline.rs b/compiler/rustc_mir/src/transform/inline.rs index a41304236b2..4eeb8969bb1 100644 --- a/compiler/rustc_mir/src/transform/inline.rs +++ b/compiler/rustc_mir/src/transform/inline.rs @@ -1,23 +1,21 @@ //! Inlining pass for MIR functions use rustc_attr as attr; +use rustc_hir as hir; use rustc_index::bit_set::BitSet; use rustc_index::vec::Idx; use rustc_middle::middle::codegen_fn_attrs::{CodegenFnAttrFlags, CodegenFnAttrs}; use rustc_middle::mir::visit::*; use rustc_middle::mir::*; +use rustc_middle::ty::subst::Subst; use rustc_middle::ty::{self, ConstKind, Instance, InstanceDef, ParamEnv, Ty, TyCtxt}; use rustc_span::{hygiene::ExpnKind, ExpnData, Span}; use rustc_target::spec::abi::Abi; use super::simplify::{remove_dead_blocks, CfgSimplifier}; use crate::transform::MirPass; -use std::collections::VecDeque; use std::iter; -use std::ops::RangeFrom; - -const DEFAULT_THRESHOLD: usize = 50; -const HINT_THRESHOLD: usize = 100; +use std::ops::{Range, RangeFrom}; const INSTR_COST: usize = 5; const CALL_PENALTY: usize = 25; @@ -31,138 +29,136 @@ pub struct Inline; #[derive(Copy, Clone, Debug)] struct CallSite<'tcx> { callee: Instance<'tcx>, - bb: BasicBlock, + fn_sig: ty::PolyFnSig<'tcx>, + block: BasicBlock, + target: Option, source_info: SourceInfo, } impl<'tcx> MirPass<'tcx> for Inline { fn run_pass(&self, tcx: TyCtxt<'tcx>, body: &mut Body<'tcx>) { - if tcx.sess.opts.debugging_opts.mir_opt_level >= 2 { - if tcx.sess.opts.debugging_opts.instrument_coverage { - // The current implementation of source code coverage injects code region counters - // into the MIR, and assumes a 1-to-1 correspondence between MIR and source-code- - // based function. - debug!("function inlining is disabled when compiling with `instrument_coverage`"); - } else { - Inliner { - tcx, - param_env: tcx.param_env_reveal_all_normalized(body.source.def_id()), - codegen_fn_attrs: tcx.codegen_fn_attrs(body.source.def_id()), - } - .run_pass(body); - } + if tcx.sess.opts.debugging_opts.mir_opt_level < 2 { + return; } + + if tcx.sess.opts.debugging_opts.instrument_coverage { + // The current implementation of source code coverage injects code region counters + // into the MIR, and assumes a 1-to-1 correspondence between MIR and source-code- + // based function. + debug!("function inlining is disabled when compiling with `instrument_coverage`"); + return; + } + + if inline(tcx, body) { + debug!("running simplify cfg on {:?}", body.source); + CfgSimplifier::new(body).simplify(); + remove_dead_blocks(body); + } + } +} + +fn inline(tcx: TyCtxt<'tcx>, body: &mut Body<'tcx>) -> bool { + let def_id = body.source.def_id(); + let hir_id = tcx.hir().local_def_id_to_hir_id(def_id.expect_local()); + + // Only do inlining into fn bodies. + if !tcx.hir().body_owner_kind(hir_id).is_fn_or_closure() { + return false; + } + if body.source.promoted.is_some() { + return false; } + + let mut this = Inliner { + tcx, + param_env: tcx.param_env_reveal_all_normalized(body.source.def_id()), + codegen_fn_attrs: tcx.codegen_fn_attrs(body.source.def_id()), + hir_id, + history: Vec::new(), + changed: false, + }; + let blocks = BasicBlock::new(0)..body.basic_blocks().next_index(); + this.process_blocks(body, blocks); + this.changed } struct Inliner<'tcx> { tcx: TyCtxt<'tcx>, param_env: ParamEnv<'tcx>, + /// Caller codegen attributes. codegen_fn_attrs: &'tcx CodegenFnAttrs, + /// Caller HirID. + hir_id: hir::HirId, + /// Stack of inlined instances. + history: Vec>, + /// Indicates that the caller body has been modified. + changed: bool, } impl Inliner<'tcx> { - fn run_pass(&self, caller_body: &mut Body<'tcx>) { - // Keep a queue of callsites to try inlining on. We take - // advantage of the fact that queries detect cycles here to - // allow us to try and fetch the fully optimized MIR of a - // call; if it succeeds, we can inline it and we know that - // they do not call us. Otherwise, we just don't try to - // inline. - // - // We use a queue so that we inline "broadly" before we inline - // in depth. It is unclear if this is the best heuristic, - // really, but that's true of all the heuristics in this - // file. =) - - let mut callsites = VecDeque::new(); - - let def_id = caller_body.source.def_id(); + fn process_blocks(&mut self, caller_body: &mut Body<'tcx>, blocks: Range) { + for bb in blocks { + let callsite = match self.get_valid_function_call(bb, &caller_body[bb], caller_body) { + None => continue, + Some(it) => it, + }; - // Only do inlining into fn bodies. - let self_hir_id = self.tcx.hir().local_def_id_to_hir_id(def_id.expect_local()); - if self.tcx.hir().body_owner_kind(self_hir_id).is_fn_or_closure() - && caller_body.source.promoted.is_none() - { - for (bb, bb_data) in caller_body.basic_blocks().iter_enumerated() { - if let Some(callsite) = self.get_valid_function_call(bb, bb_data, caller_body) { - callsites.push_back(callsite); - } + if !self.is_mir_available(&callsite.callee, caller_body) { + debug!("MIR unavailable {}", callsite.callee); + continue; } - } else { - return; - } - let mut changed = false; - while let Some(callsite) = callsites.pop_front() { - debug!("checking whether to inline callsite {:?}", callsite); - - if let InstanceDef::Item(_) = callsite.callee.def { - if !self.tcx.is_mir_available(callsite.callee.def_id()) { - debug!("checking whether to inline callsite {:?} - MIR unavailable", callsite,); - continue; - } + let callee_body = self.tcx.instance_mir(callsite.callee.def); + if !self.should_inline(callsite, callee_body) { + continue; } - let callee_body = if let Some(callee_def_id) = callsite.callee.def_id().as_local() { - let callee_hir_id = self.tcx.hir().local_def_id_to_hir_id(callee_def_id); - // Avoid a cycle here by only using `instance_mir` only if we have - // a lower `HirId` than the callee. This ensures that the callee will - // not inline us. This trick only works without incremental compilation. - // So don't do it if that is enabled. Also avoid inlining into generators, - // since their `optimized_mir` is used for layout computation, which can - // create a cycle, even when no attempt is made to inline the function - // in the other direction. - if !self.tcx.dep_graph.is_fully_enabled() - && self_hir_id < callee_hir_id - && caller_body.generator_kind.is_none() - { - self.tcx.instance_mir(callsite.callee.def) - } else { - continue; - } - } else { - // This cannot result in a cycle since the callee MIR is from another crate - // and is already optimized. - self.tcx.instance_mir(callsite.callee.def) - }; - - if !self.consider_optimizing(callsite, &callee_body) { - continue; + if !self.tcx.consider_optimizing(|| { + format!("Inline {:?} into {}", callee_body.span, callsite.callee) + }) { + return; } let callee_body = callsite.callee.subst_mir_and_normalize_erasing_regions( self.tcx, self.param_env, - callee_body, + callee_body.clone(), ); - let start = caller_body.basic_blocks().len(); - debug!("attempting to inline callsite {:?} - body={:?}", callsite, callee_body); - if !self.inline_call(callsite, caller_body, callee_body) { - debug!("attempting to inline callsite {:?} - failure", callsite); - continue; - } - debug!("attempting to inline callsite {:?} - success", callsite); - - // Add callsites from inlined function - for (bb, bb_data) in caller_body.basic_blocks().iter_enumerated().skip(start) { - if let Some(new_callsite) = self.get_valid_function_call(bb, bb_data, caller_body) { - // Don't inline the same function multiple times. - if callsite.callee != new_callsite.callee { - callsites.push_back(new_callsite); - } - } - } + let old_blocks = caller_body.basic_blocks().next_index(); + self.inline_call(callsite, caller_body, callee_body); + let new_blocks = old_blocks..caller_body.basic_blocks().next_index(); + self.changed = true; - changed = true; + self.history.push(callsite.callee); + self.process_blocks(caller_body, new_blocks); + self.history.pop(); } + } - // Simplify if we inlined anything. - if changed { - debug!("running simplify cfg on {:?}", caller_body.source); - CfgSimplifier::new(caller_body).simplify(); - remove_dead_blocks(caller_body); + fn is_mir_available(&self, callee: &Instance<'tcx>, caller_body: &Body<'tcx>) -> bool { + if let InstanceDef::Item(_) = callee.def { + if !self.tcx.is_mir_available(callee.def_id()) { + return false; + } + } + + if let Some(callee_def_id) = callee.def_id().as_local() { + let callee_hir_id = self.tcx.hir().local_def_id_to_hir_id(callee_def_id); + // Avoid a cycle here by only using `instance_mir` only if we have + // a lower `HirId` than the callee. This ensures that the callee will + // not inline us. This trick only works without incremental compilation. + // So don't do it if that is enabled. Also avoid inlining into generators, + // since their `optimized_mir` is used for layout computation, which can + // create a cycle, even when no attempt is made to inline the function + // in the other direction. + !self.tcx.dep_graph.is_fully_enabled() + && self.hir_id < callee_hir_id + && caller_body.generator_kind.is_none() + } else { + // This cannot result in a cycle since the callee MIR is from another crate + // and is already optimized. + true } } @@ -179,42 +175,39 @@ impl Inliner<'tcx> { // Only consider direct calls to functions let terminator = bb_data.terminator(); - if let TerminatorKind::Call { func: ref op, .. } = terminator.kind { - if let ty::FnDef(callee_def_id, substs) = *op.ty(caller_body, self.tcx).kind() { - // To resolve an instance its substs have to be fully normalized, so - // we do this here. - let normalized_substs = self.tcx.normalize_erasing_regions(self.param_env, substs); + if let TerminatorKind::Call { ref func, ref destination, .. } = terminator.kind { + let func_ty = func.ty(caller_body, self.tcx); + if let ty::FnDef(def_id, substs) = *func_ty.kind() { + // To resolve an instance its substs have to be fully normalized. + let substs = self.tcx.normalize_erasing_regions(self.param_env, substs); let callee = - Instance::resolve(self.tcx, self.param_env, callee_def_id, normalized_substs) - .ok() - .flatten()?; + Instance::resolve(self.tcx, self.param_env, def_id, substs).ok().flatten()?; if let InstanceDef::Virtual(..) | InstanceDef::Intrinsic(_) = callee.def { return None; } - return Some(CallSite { callee, bb, source_info: terminator.source_info }); + let fn_sig = self.tcx.fn_sig(def_id).subst(self.tcx, substs); + + return Some(CallSite { + callee, + fn_sig, + block: bb, + target: destination.map(|(_, target)| target), + source_info: terminator.source_info, + }); } } None } - fn consider_optimizing(&self, callsite: CallSite<'tcx>, callee_body: &Body<'tcx>) -> bool { - debug!("consider_optimizing({:?})", callsite); - self.should_inline(callsite, callee_body) - && self.tcx.consider_optimizing(|| { - format!("Inline {:?} into {:?}", callee_body.span, callsite) - }) - } - fn should_inline(&self, callsite: CallSite<'tcx>, callee_body: &Body<'tcx>) -> bool { debug!("should_inline({:?})", callsite); let tcx = self.tcx; - // Cannot inline generators which haven't been transformed yet - if callee_body.yield_ty.is_some() { - debug!(" yield ty present - not inlining"); + if callsite.fn_sig.c_variadic() { + debug!("callee is variadic - not inlining"); return false; } @@ -227,11 +220,7 @@ impl Inliner<'tcx> { return false; } - let self_no_sanitize = - self.codegen_fn_attrs.no_sanitize & self.tcx.sess.opts.debugging_opts.sanitizer; - let callee_no_sanitize = - codegen_fn_attrs.no_sanitize & self.tcx.sess.opts.debugging_opts.sanitizer; - if self_no_sanitize != callee_no_sanitize { + if self.codegen_fn_attrs.no_sanitize != codegen_fn_attrs.no_sanitize { debug!("`callee has incompatible no_sanitize attribute - not inlining"); return false; } @@ -259,11 +248,20 @@ impl Inliner<'tcx> { } } - let mut threshold = if hinted { HINT_THRESHOLD } else { DEFAULT_THRESHOLD }; + let mut threshold = if hinted { + self.tcx.sess.opts.debugging_opts.inline_mir_hint_threshold + } else { + self.tcx.sess.opts.debugging_opts.inline_mir_threshold + }; + + if codegen_fn_attrs.flags.contains(CodegenFnAttrFlags::NAKED) { + debug!("#[naked] present - not inlining"); + return false; + } - // Significantly lower the threshold for inlining cold functions if codegen_fn_attrs.flags.contains(CodegenFnAttrFlags::COLD) { - threshold /= 5; + debug!("#[cold] present - not inlining"); + return false; } // Give a bonus functions with a small number of blocks, @@ -327,7 +325,18 @@ impl Inliner<'tcx> { } TerminatorKind::Call { func: Operand::Constant(ref f), cleanup, .. } => { - if let ty::FnDef(def_id, _) = *f.literal.ty.kind() { + if let ty::FnDef(def_id, substs) = + *callsite.callee.subst_mir(self.tcx, &f.literal.ty).kind() + { + let substs = self.tcx.normalize_erasing_regions(self.param_env, substs); + if let Ok(Some(instance)) = + Instance::resolve(self.tcx, self.param_env, def_id, substs) + { + if callsite.callee == instance || self.history.contains(&instance) { + debug!("`callee is recursive - not inlining"); + return false; + } + } // Don't give intrinsics the extra penalty for calls let f = tcx.fn_sig(def_id); if f.abi() == Abi::RustIntrinsic || f.abi() == Abi::PlatformIntrinsic { @@ -397,13 +406,10 @@ impl Inliner<'tcx> { callsite: CallSite<'tcx>, caller_body: &mut Body<'tcx>, mut callee_body: Body<'tcx>, - ) -> bool { - let terminator = caller_body[callsite.bb].terminator.take().unwrap(); + ) { + let terminator = caller_body[callsite.block].terminator.take().unwrap(); match terminator.kind { - // FIXME: Handle inlining of diverging calls - TerminatorKind::Call { args, destination: Some(destination), cleanup, .. } => { - debug!("inlined {:?} into {:?}", callsite.callee, caller_body.source); - + TerminatorKind::Call { args, destination, cleanup, .. } => { // If the call is something like `a[*i] = f(i)`, where // `i : &mut usize`, then just duplicating the `a[*i]` // Place could result in two different locations if `f` @@ -420,35 +426,31 @@ impl Inliner<'tcx> { false } - let dest = if dest_needs_borrow(destination.0) { - trace!("creating temp for return destination"); - let dest = Rvalue::Ref( - self.tcx.lifetimes.re_erased, - BorrowKind::Mut { allow_two_phase_borrow: false }, - destination.0, - ); - - let ty = dest.ty(caller_body, self.tcx); - - let temp = LocalDecl::new(ty, callsite.source_info.span); - - let tmp = caller_body.local_decls.push(temp); - let tmp = Place::from(tmp); - - let stmt = Statement { - source_info: callsite.source_info, - kind: StatementKind::Assign(box (tmp, dest)), - }; - caller_body[callsite.bb].statements.push(stmt); - self.tcx.mk_place_deref(tmp) + let dest = if let Some((destination_place, _)) = destination { + if dest_needs_borrow(destination_place) { + trace!("creating temp for return destination"); + let dest = Rvalue::Ref( + self.tcx.lifetimes.re_erased, + BorrowKind::Mut { allow_two_phase_borrow: false }, + destination_place, + ); + let dest_ty = dest.ty(caller_body, self.tcx); + let temp = Place::from(self.new_call_temp(caller_body, &callsite, dest_ty)); + caller_body[callsite.block].statements.push(Statement { + source_info: callsite.source_info, + kind: StatementKind::Assign(box (temp, dest)), + }); + self.tcx.mk_place_deref(temp) + } else { + destination_place + } } else { - destination.0 + trace!("creating temp for return place"); + Place::from(self.new_call_temp(caller_body, &callsite, callee_body.return_ty())) }; - let return_block = destination.1; - // Copy the arguments if needed. - let args: Vec<_> = self.make_call_args(args, &callsite, caller_body, return_block); + let args: Vec<_> = self.make_call_args(args, &callsite, caller_body, &callee_body); let mut integrator = Integrator { args: &args, @@ -456,12 +458,13 @@ impl Inliner<'tcx> { new_scopes: SourceScope::new(caller_body.source_scopes.len()).., new_blocks: BasicBlock::new(caller_body.basic_blocks().len()).., destination: dest, - return_block, + return_block: callsite.target, cleanup_block: cleanup, in_cleanup_block: false, tcx: self.tcx, callsite_span: callsite.source_info.span, body_span: callee_body.span, + always_live_locals: BitSet::new_filled(callee_body.local_decls.len()), }; // Map all `Local`s, `SourceScope`s and `BasicBlock`s to new ones @@ -493,6 +496,34 @@ impl Inliner<'tcx> { } } + // If there are any locals without storage markers, give them storage only for the + // duration of the call. + for local in callee_body.vars_and_temps_iter() { + if integrator.always_live_locals.contains(local) { + let new_local = integrator.map_local(local); + caller_body[callsite.block].statements.push(Statement { + source_info: callsite.source_info, + kind: StatementKind::StorageLive(new_local), + }); + } + } + if let Some(block) = callsite.target { + // To avoid repeated O(n) insert, push any new statements to the end and rotate + // the slice once. + let mut n = 0; + for local in callee_body.vars_and_temps_iter().rev() { + if integrator.always_live_locals.contains(local) { + let new_local = integrator.map_local(local); + caller_body[block].statements.push(Statement { + source_info: callsite.source_info, + kind: StatementKind::StorageDead(new_local), + }); + n += 1; + } + } + caller_body[block].statements.rotate_right(n); + } + // Insert all of the (mapped) parts of the callee body into the caller. caller_body.local_decls.extend( // FIXME(eddyb) make `Range` iterable so that we can use @@ -505,7 +536,7 @@ impl Inliner<'tcx> { caller_body.var_debug_info.extend(callee_body.var_debug_info.drain(..)); caller_body.basic_blocks_mut().extend(callee_body.basic_blocks_mut().drain(..)); - caller_body[callsite.bb].terminator = Some(Terminator { + caller_body[callsite.block].terminator = Some(Terminator { source_info: callsite.source_info, kind: TerminatorKind::Goto { target: integrator.map_block(START_BLOCK) }, }); @@ -519,14 +550,8 @@ impl Inliner<'tcx> { matches!(constant.literal.val, ConstKind::Unevaluated(_, _, _)) }), ); - - true - } - kind => { - caller_body[callsite.bb].terminator = - Some(Terminator { source_info: terminator.source_info, kind }); - false } + kind => bug!("unexpected terminator kind {:?}", kind), } } @@ -535,7 +560,7 @@ impl Inliner<'tcx> { args: Vec>, callsite: &CallSite<'tcx>, caller_body: &mut Body<'tcx>, - return_block: BasicBlock, + callee_body: &Body<'tcx>, ) -> Vec { let tcx = self.tcx; @@ -562,22 +587,10 @@ impl Inliner<'tcx> { // tmp2 = tuple_tmp.2 // // and the vector is `[closure_ref, tmp0, tmp1, tmp2]`. - // FIXME(eddyb) make this check for `"rust-call"` ABI combined with - // `callee_body.spread_arg == None`, instead of special-casing closures. - if tcx.is_closure(callsite.callee.def_id()) { + if callsite.fn_sig.abi() == Abi::RustCall && callee_body.spread_arg.is_none() { let mut args = args.into_iter(); - let self_ = self.create_temp_if_necessary( - args.next().unwrap(), - callsite, - caller_body, - return_block, - ); - let tuple = self.create_temp_if_necessary( - args.next().unwrap(), - callsite, - caller_body, - return_block, - ); + let self_ = self.create_temp_if_necessary(args.next().unwrap(), callsite, caller_body); + let tuple = self.create_temp_if_necessary(args.next().unwrap(), callsite, caller_body); assert!(args.next().is_none()); let tuple = Place::from(tuple); @@ -597,13 +610,13 @@ impl Inliner<'tcx> { Operand::Move(tcx.mk_place_field(tuple, Field::new(i), ty.expect_ty())); // Spill to a local to make e.g., `tmp0`. - self.create_temp_if_necessary(tuple_field, callsite, caller_body, return_block) + self.create_temp_if_necessary(tuple_field, callsite, caller_body) }); closure_ref_arg.chain(tuple_tmp_args).collect() } else { args.into_iter() - .map(|a| self.create_temp_if_necessary(a, callsite, caller_body, return_block)) + .map(|a| self.create_temp_if_necessary(a, callsite, caller_body)) .collect() } } @@ -615,46 +628,52 @@ impl Inliner<'tcx> { arg: Operand<'tcx>, callsite: &CallSite<'tcx>, caller_body: &mut Body<'tcx>, - return_block: BasicBlock, ) -> Local { - // FIXME: Analysis of the usage of the arguments to avoid - // unnecessary temporaries. - + // Reuse the operand if it is a moved temporary. if let Operand::Move(place) = &arg { if let Some(local) = place.as_local() { if caller_body.local_kind(local) == LocalKind::Temp { - // Reuse the operand if it's a temporary already return local; } } } + // Otherwise, create a temporary for the argument. trace!("creating temp for argument {:?}", arg); - // Otherwise, create a temporary for the arg - let arg = Rvalue::Use(arg); - - let ty = arg.ty(caller_body, self.tcx); - - let arg_tmp = LocalDecl::new(ty, callsite.source_info.span); - let arg_tmp = caller_body.local_decls.push(arg_tmp); - - caller_body[callsite.bb].statements.push(Statement { + let arg_ty = arg.ty(caller_body, self.tcx); + let local = self.new_call_temp(caller_body, callsite, arg_ty); + caller_body[callsite.block].statements.push(Statement { source_info: callsite.source_info, - kind: StatementKind::StorageLive(arg_tmp), + kind: StatementKind::Assign(box (Place::from(local), Rvalue::Use(arg))), }); - caller_body[callsite.bb].statements.push(Statement { + local + } + + /// Introduces a new temporary into the caller body that is live for the duration of the call. + fn new_call_temp( + &self, + caller_body: &mut Body<'tcx>, + callsite: &CallSite<'tcx>, + ty: Ty<'tcx>, + ) -> Local { + let local = caller_body.local_decls.push(LocalDecl::new(ty, callsite.source_info.span)); + + caller_body[callsite.block].statements.push(Statement { source_info: callsite.source_info, - kind: StatementKind::Assign(box (Place::from(arg_tmp), arg)), + kind: StatementKind::StorageLive(local), }); - caller_body[return_block].statements.insert( - 0, - Statement { - source_info: callsite.source_info, - kind: StatementKind::StorageDead(arg_tmp), - }, - ); - - arg_tmp + + if let Some(block) = callsite.target { + caller_body[block].statements.insert( + 0, + Statement { + source_info: callsite.source_info, + kind: StatementKind::StorageDead(local), + }, + ); + } + + local } } @@ -679,12 +698,13 @@ struct Integrator<'a, 'tcx> { new_scopes: RangeFrom, new_blocks: RangeFrom, destination: Place<'tcx>, - return_block: BasicBlock, + return_block: Option, cleanup_block: Option, in_cleanup_block: bool, tcx: TyCtxt<'tcx>, callsite_span: Span, body_span: Span, + always_live_locals: BitSet, } impl<'a, 'tcx> Integrator<'a, 'tcx> { @@ -738,6 +758,12 @@ impl<'a, 'tcx> MutVisitor<'tcx> for Integrator<'a, 'tcx> { } fn visit_place(&mut self, place: &mut Place<'tcx>, context: PlaceContext, location: Location) { + for elem in place.projection { + // FIXME: Make sure that return place is not used in an indexing projection, since it + // won't be rebased as it is supposed to be. + assert_ne!(ProjectionElem::Index(RETURN_PLACE), elem); + } + // If this is the `RETURN_PLACE`, we need to rebase any projections onto it. let dest_proj_len = self.destination.projection.len(); if place.local == RETURN_PLACE && dest_proj_len > 0 { @@ -768,6 +794,15 @@ impl<'a, 'tcx> MutVisitor<'tcx> for Integrator<'a, 'tcx> { } } + fn visit_statement(&mut self, statement: &mut Statement<'tcx>, location: Location) { + if let StatementKind::StorageLive(local) | StatementKind::StorageDead(local) = + statement.kind + { + self.always_live_locals.remove(local); + } + self.super_statement(statement, location); + } + fn visit_terminator(&mut self, terminator: &mut Terminator<'tcx>, loc: Location) { // Don't try to modify the implicit `_0` access on return (`return` terminators are // replaced down below anyways). @@ -819,7 +854,11 @@ impl<'a, 'tcx> MutVisitor<'tcx> for Integrator<'a, 'tcx> { } } TerminatorKind::Return => { - terminator.kind = TerminatorKind::Goto { target: self.return_block }; + terminator.kind = if let Some(tgt) = self.return_block { + TerminatorKind::Goto { target: tgt } + } else { + TerminatorKind::Unreachable + } } TerminatorKind::Resume => { if let Some(tgt) = self.cleanup_block { diff --git a/compiler/rustc_mir/src/transform/instcombine.rs b/compiler/rustc_mir/src/transform/instcombine.rs index 59b7db24319..3eb2b500d66 100644 --- a/compiler/rustc_mir/src/transform/instcombine.rs +++ b/compiler/rustc_mir/src/transform/instcombine.rs @@ -39,13 +39,21 @@ pub struct InstCombineVisitor<'tcx> { tcx: TyCtxt<'tcx>, } +impl<'tcx> InstCombineVisitor<'tcx> { + fn should_combine(&self, rvalue: &Rvalue<'tcx>, location: Location) -> bool { + self.tcx.consider_optimizing(|| { + format!("InstCombine - Rvalue: {:?} Location: {:?}", rvalue, location) + }) + } +} + impl<'tcx> MutVisitor<'tcx> for InstCombineVisitor<'tcx> { fn tcx(&self) -> TyCtxt<'tcx> { self.tcx } fn visit_rvalue(&mut self, rvalue: &mut Rvalue<'tcx>, location: Location) { - if self.optimizations.and_stars.remove(&location) { + if self.optimizations.and_stars.remove(&location) && self.should_combine(rvalue, location) { debug!("replacing `&*`: {:?}", rvalue); let new_place = match rvalue { Rvalue::Ref(_, _, place) => { @@ -67,18 +75,24 @@ impl<'tcx> MutVisitor<'tcx> for InstCombineVisitor<'tcx> { } if let Some(constant) = self.optimizations.arrays_lengths.remove(&location) { - debug!("replacing `Len([_; N])`: {:?}", rvalue); - *rvalue = Rvalue::Use(Operand::Constant(box constant)); + if self.should_combine(rvalue, location) { + debug!("replacing `Len([_; N])`: {:?}", rvalue); + *rvalue = Rvalue::Use(Operand::Constant(box constant)); + } } if let Some(operand) = self.optimizations.unneeded_equality_comparison.remove(&location) { - debug!("replacing {:?} with {:?}", rvalue, operand); - *rvalue = Rvalue::Use(operand); + if self.should_combine(rvalue, location) { + debug!("replacing {:?} with {:?}", rvalue, operand); + *rvalue = Rvalue::Use(operand); + } } if let Some(place) = self.optimizations.unneeded_deref.remove(&location) { - debug!("unneeded_deref: replacing {:?} with {:?}", rvalue, place); - *rvalue = Rvalue::Use(Operand::Copy(place)); + if self.should_combine(rvalue, location) { + debug!("unneeded_deref: replacing {:?} with {:?}", rvalue, place); + *rvalue = Rvalue::Use(Operand::Copy(place)); + } } self.super_rvalue(rvalue, location) diff --git a/compiler/rustc_mir/src/transform/lower_intrinsics.rs b/compiler/rustc_mir/src/transform/lower_intrinsics.rs new file mode 100644 index 00000000000..543acb74acb --- /dev/null +++ b/compiler/rustc_mir/src/transform/lower_intrinsics.rs @@ -0,0 +1,104 @@ +//! Lowers intrinsic calls + +use crate::transform::MirPass; +use rustc_middle::mir::*; +use rustc_middle::ty::subst::SubstsRef; +use rustc_middle::ty::{self, Ty, TyCtxt}; +use rustc_span::symbol::{sym, Symbol}; +use rustc_target::spec::abi::Abi; + +pub struct LowerIntrinsics; + +impl<'tcx> MirPass<'tcx> for LowerIntrinsics { + fn run_pass(&self, tcx: TyCtxt<'tcx>, body: &mut Body<'tcx>) { + let (basic_blocks, local_decls) = body.basic_blocks_and_local_decls_mut(); + for block in basic_blocks { + let terminator = block.terminator.as_mut().unwrap(); + if let TerminatorKind::Call { func, args, destination, .. } = &mut terminator.kind { + let func_ty = func.ty(local_decls, tcx); + let (intrinsic_name, substs) = match resolve_rust_intrinsic(tcx, func_ty) { + None => continue, + Some(it) => it, + }; + match intrinsic_name { + sym::unreachable => { + terminator.kind = TerminatorKind::Unreachable; + } + sym::forget => { + if let Some((destination, target)) = *destination { + block.statements.push(Statement { + source_info: terminator.source_info, + kind: StatementKind::Assign(box ( + destination, + Rvalue::Use(Operand::Constant(box Constant { + span: terminator.source_info.span, + user_ty: None, + literal: ty::Const::zero_sized(tcx, tcx.types.unit), + })), + )), + }); + terminator.kind = TerminatorKind::Goto { target }; + } + } + sym::wrapping_add | sym::wrapping_sub | sym::wrapping_mul => { + if let Some((destination, target)) = *destination { + let lhs; + let rhs; + { + let mut args = args.drain(..); + lhs = args.next().unwrap(); + rhs = args.next().unwrap(); + } + let bin_op = match intrinsic_name { + sym::wrapping_add => BinOp::Add, + sym::wrapping_sub => BinOp::Sub, + sym::wrapping_mul => BinOp::Mul, + _ => bug!("unexpected intrinsic"), + }; + block.statements.push(Statement { + source_info: terminator.source_info, + kind: StatementKind::Assign(box ( + destination, + Rvalue::BinaryOp(bin_op, lhs, rhs), + )), + }); + terminator.kind = TerminatorKind::Goto { target }; + } + } + sym::add_with_overflow | sym::sub_with_overflow | sym::mul_with_overflow => { + // The checked binary operations are not suitable target for lowering here, + // since their semantics depend on the value of overflow-checks flag used + // during codegen. Issue #35310. + } + sym::size_of => { + if let Some((destination, target)) = *destination { + let tp_ty = substs.type_at(0); + block.statements.push(Statement { + source_info: terminator.source_info, + kind: StatementKind::Assign(box ( + destination, + Rvalue::NullaryOp(NullOp::SizeOf, tp_ty), + )), + }); + terminator.kind = TerminatorKind::Goto { target }; + } + } + _ => {} + } + } + } + } +} + +fn resolve_rust_intrinsic( + tcx: TyCtxt<'tcx>, + func_ty: Ty<'tcx>, +) -> Option<(Symbol, SubstsRef<'tcx>)> { + if let ty::FnDef(def_id, substs) = *func_ty.kind() { + let fn_sig = func_ty.fn_sig(tcx); + if fn_sig.abi() == Abi::RustIntrinsic { + return Some((tcx.item_name(def_id), substs)); + } + } + None +} diff --git a/compiler/rustc_mir/src/transform/match_branches.rs b/compiler/rustc_mir/src/transform/match_branches.rs index 82c0b924f28..53eeecc780f 100644 --- a/compiler/rustc_mir/src/transform/match_branches.rs +++ b/compiler/rustc_mir/src/transform/match_branches.rs @@ -43,8 +43,13 @@ impl<'tcx> MirPass<'tcx> for MatchBranchSimplification { } let param_env = tcx.param_env(body.source.def_id()); + let def_id = body.source.def_id(); let (bbs, local_decls) = body.basic_blocks_and_local_decls_mut(); 'outer: for bb_idx in bbs.indices() { + if !tcx.consider_optimizing(|| format!("MatchBranchSimplification {:?} ", def_id)) { + continue; + } + let (discr, val, switch_ty, first, second) = match bbs[bb_idx].terminator().kind { TerminatorKind::SwitchInt { discr: ref discr @ (Operand::Copy(_) | Operand::Move(_)), diff --git a/compiler/rustc_mir/src/transform/mod.rs b/compiler/rustc_mir/src/transform/mod.rs index e3fea2d2701..e86d11e248f 100644 --- a/compiler/rustc_mir/src/transform/mod.rs +++ b/compiler/rustc_mir/src/transform/mod.rs @@ -1,6 +1,7 @@ use crate::{shim, util}; use required_consts::RequiredConstsVisitor; use rustc_data_structures::fx::FxHashSet; +use rustc_data_structures::steal::Steal; use rustc_hir as hir; use rustc_hir::def_id::{CrateNum, DefId, LocalDefId, LOCAL_CRATE}; use rustc_hir::intravisit::{self, NestedVisitorMap, Visitor}; @@ -8,7 +9,6 @@ use rustc_index::vec::IndexVec; use rustc_middle::mir::visit::Visitor as _; use rustc_middle::mir::{traversal, Body, ConstQualifs, MirPhase, Promoted}; use rustc_middle::ty::query::Providers; -use rustc_middle::ty::steal::Steal; use rustc_middle::ty::{self, TyCtxt, TypeFoldable}; use rustc_span::{Span, Symbol}; use std::borrow::Cow; @@ -32,6 +32,7 @@ pub mod function_item_references; pub mod generator; pub mod inline; pub mod instcombine; +pub mod lower_intrinsics; pub mod match_branches; pub mod multiple_return_terminators; pub mod no_landing_pads; @@ -390,6 +391,7 @@ fn run_optimization_passes<'tcx>(tcx: TyCtxt<'tcx>, body: &mut Body<'tcx>) { // The main optimizations that we do on MIR. let optimizations: &[&dyn MirPass<'tcx>] = &[ + &lower_intrinsics::LowerIntrinsics, &remove_unneeded_drops::RemoveUnneededDrops, &match_branches::MatchBranchSimplification, // inst combine is after MatchBranchSimplification to clean up Ne(_1, false) diff --git a/compiler/rustc_mir/src/transform/multiple_return_terminators.rs b/compiler/rustc_mir/src/transform/multiple_return_terminators.rs index c37b54a3190..617086622cc 100644 --- a/compiler/rustc_mir/src/transform/multiple_return_terminators.rs +++ b/compiler/rustc_mir/src/transform/multiple_return_terminators.rs @@ -16,6 +16,7 @@ impl<'tcx> MirPass<'tcx> for MultipleReturnTerminators { // find basic blocks with no statement and a return terminator let mut bbs_simple_returns = BitSet::new_empty(body.basic_blocks().len()); + let def_id = body.source.def_id(); let bbs = body.basic_blocks_mut(); for idx in bbs.indices() { if bbs[idx].statements.is_empty() @@ -26,6 +27,10 @@ impl<'tcx> MirPass<'tcx> for MultipleReturnTerminators { } for bb in bbs { + if !tcx.consider_optimizing(|| format!("MultipleReturnTerminators {:?} ", def_id)) { + break; + } + if let TerminatorKind::Goto { target } = bb.terminator().kind { if bbs_simple_returns.contains(target) { bb.terminator_mut().kind = TerminatorKind::Return; diff --git a/compiler/rustc_mir/src/transform/nrvo.rs b/compiler/rustc_mir/src/transform/nrvo.rs index 45b906bf542..ce02fb261df 100644 --- a/compiler/rustc_mir/src/transform/nrvo.rs +++ b/compiler/rustc_mir/src/transform/nrvo.rs @@ -38,18 +38,22 @@ impl<'tcx> MirPass<'tcx> for RenameReturnPlace { return; } + let def_id = body.source.def_id(); let returned_local = match local_eligible_for_nrvo(body) { Some(l) => l, None => { - debug!("`{:?}` was ineligible for NRVO", body.source.def_id()); + debug!("`{:?}` was ineligible for NRVO", def_id); return; } }; + if !tcx.consider_optimizing(|| format!("RenameReturnPlace {:?}", def_id)) { + return; + } + debug!( "`{:?}` was eligible for NRVO, making {:?} the return place", - body.source.def_id(), - returned_local + def_id, returned_local ); RenameToReturnPlace { tcx, to_rename: returned_local }.visit_body(body); diff --git a/compiler/rustc_mir/src/transform/remove_unneeded_drops.rs b/compiler/rustc_mir/src/transform/remove_unneeded_drops.rs index aaf3ecab4dc..221114eebaa 100644 --- a/compiler/rustc_mir/src/transform/remove_unneeded_drops.rs +++ b/compiler/rustc_mir/src/transform/remove_unneeded_drops.rs @@ -21,6 +21,12 @@ impl<'tcx> MirPass<'tcx> for RemoveUnneededDrops { opt_finder.visit_body(body); let should_simplify = !opt_finder.optimizations.is_empty(); for (loc, target) in opt_finder.optimizations { + if !tcx + .consider_optimizing(|| format!("RemoveUnneededDrops {:?} ", body.source.def_id())) + { + break; + } + let terminator = body.basic_blocks_mut()[loc.block].terminator_mut(); debug!("SUCCESS: replacing `drop` with goto({:?})", target); terminator.kind = TerminatorKind::Goto { target }; diff --git a/compiler/rustc_mir/src/transform/unreachable_prop.rs b/compiler/rustc_mir/src/transform/unreachable_prop.rs index f6d39dae342..e39c8656021 100644 --- a/compiler/rustc_mir/src/transform/unreachable_prop.rs +++ b/compiler/rustc_mir/src/transform/unreachable_prop.rs @@ -50,6 +50,12 @@ impl MirPass<'_> for UnreachablePropagation { let replaced = !replacements.is_empty(); for (bb, terminator_kind) in replacements { + if !tcx.consider_optimizing(|| { + format!("UnreachablePropagation {:?} ", body.source.def_id()) + }) { + break; + } + body.basic_blocks_mut()[bb].terminator_mut().kind = terminator_kind; } diff --git a/compiler/rustc_mir/src/transform/validate.rs b/compiler/rustc_mir/src/transform/validate.rs index e1e6e71acb5..919e4a90a17 100644 --- a/compiler/rustc_mir/src/transform/validate.rs +++ b/compiler/rustc_mir/src/transform/validate.rs @@ -183,6 +183,13 @@ impl<'a, 'tcx> TypeChecker<'a, 'tcx> { impl<'a, 'tcx> Visitor<'tcx> for TypeChecker<'a, 'tcx> { fn visit_local(&mut self, local: &Local, context: PlaceContext, location: Location) { + if self.body.local_decls.get(*local).is_none() { + self.fail( + location, + format!("local {:?} has no corresponding declaration in `body.local_decls`", local), + ); + } + if self.reachable_blocks.contains(location.block) && context.is_use() { // Uses of locals must occur while the local's storage is allocated. self.storage_liveness.seek_after_primary_effect(location); diff --git a/compiler/rustc_mir/src/util/pretty.rs b/compiler/rustc_mir/src/util/pretty.rs index 8bee8417c51..cd60602b088 100644 --- a/compiler/rustc_mir/src/util/pretty.rs +++ b/compiler/rustc_mir/src/util/pretty.rs @@ -640,7 +640,7 @@ pub fn write_allocations<'tcx>( } struct CollectAllocIds(BTreeSet); impl<'tcx> TypeVisitor<'tcx> for CollectAllocIds { - fn visit_const(&mut self, c: &'tcx ty::Const<'tcx>) -> ControlFlow<()> { + fn visit_const(&mut self, c: &'tcx ty::Const<'tcx>) -> ControlFlow { if let ty::ConstKind::Value(val) = c.val { self.0.extend(alloc_ids_from_const(val)); } diff --git a/compiler/rustc_mir_build/src/build/expr/as_place.rs b/compiler/rustc_mir_build/src/build/expr/as_place.rs index b94346fa439..e6263e5d6cf 100644 --- a/compiler/rustc_mir_build/src/build/expr/as_place.rs +++ b/compiler/rustc_mir_build/src/build/expr/as_place.rs @@ -160,7 +160,30 @@ impl<'a, 'tcx> Builder<'a, 'tcx> { expr_span, source_info, ), - ExprKind::SelfRef => block.and(PlaceBuilder::from(Local::new(1))), + ExprKind::UpvarRef { closure_def_id, var_hir_id } => { + let capture = this + .hir + .typeck_results + .closure_captures + .get(&closure_def_id) + .and_then(|captures| captures.get_full(&var_hir_id)); + + if capture.is_none() { + if !this.hir.tcx().features().capture_disjoint_fields { + bug!( + "No associated capture found for {:?} even though \ + capture_disjoint_fields isn't enabled", + expr.kind + ) + } + // FIXME(project-rfc-2229#24): Handle this case properly + } + + // Unwrap until the FIXME has been resolved + let (capture_index, _, upvar_id) = capture.unwrap(); + this.lower_closure_capture(block, capture_index, *upvar_id) + } + ExprKind::VarRef { id } => { let place_builder = if this.is_bound_var_in_guard(id) { let index = this.var_local_id(id, RefWithinGuard); @@ -270,6 +293,61 @@ impl<'a, 'tcx> Builder<'a, 'tcx> { } } + /// Lower a closure/generator capture by representing it as a field + /// access within the desugared closure/generator. + /// + /// `capture_index` is the index of the capture within the desugared + /// closure/generator. + fn lower_closure_capture( + &mut self, + block: BasicBlock, + capture_index: usize, + upvar_id: ty::UpvarId, + ) -> BlockAnd> { + let closure_ty = self + .hir + .typeck_results() + .node_type(self.hir.tcx().hir().local_def_id_to_hir_id(upvar_id.closure_expr_id)); + + // Captures are represented using fields inside a structure. + // This represents accessing self in the closure structure + let mut place_builder = PlaceBuilder::from(Local::new(1)); + + // In case of Fn/FnMut closures we must deref to access the fields + // Generators are considered FnOnce, so we ignore this step for them. + if let ty::Closure(_, closure_substs) = closure_ty.kind() { + match self.hir.infcx().closure_kind(closure_substs).unwrap() { + ty::ClosureKind::Fn | ty::ClosureKind::FnMut => { + place_builder = place_builder.deref(); + } + ty::ClosureKind::FnOnce => {} + } + } + + let substs = match closure_ty.kind() { + ty::Closure(_, substs) => ty::UpvarSubsts::Closure(substs), + ty::Generator(_, substs, _) => ty::UpvarSubsts::Generator(substs), + _ => bug!("Lowering capture for non-closure type {:?}", closure_ty) + }; + + // Access the capture by accessing the field within the Closure struct. + // + // We must have inferred the capture types since we are building MIR, therefore + // it's safe to call `upvar_tys` and we can unwrap here because + // we know that the capture exists and is the `capture_index`-th capture. + let var_ty = substs.upvar_tys().nth(capture_index).unwrap(); + place_builder = place_builder.field(Field::new(capture_index), var_ty); + + // If the variable is captured via ByRef(Immutable/Mutable) Borrow, + // we need to deref it + match self.hir.typeck_results.upvar_capture(upvar_id) { + ty::UpvarCapture::ByRef(_) => { + block.and(place_builder.deref()) + } + ty::UpvarCapture::ByValue(_) => block.and(place_builder), + } + } + /// Lower an index expression /// /// This has two complications; diff --git a/compiler/rustc_mir_build/src/build/expr/as_rvalue.rs b/compiler/rustc_mir_build/src/build/expr/as_rvalue.rs index 2853bf887fa..b6728c6b2ce 100644 --- a/compiler/rustc_mir_build/src/build/expr/as_rvalue.rs +++ b/compiler/rustc_mir_build/src/build/expr/as_rvalue.rs @@ -250,7 +250,7 @@ impl<'a, 'tcx> Builder<'a, 'tcx> { | ExprKind::Deref { .. } | ExprKind::Index { .. } | ExprKind::VarRef { .. } - | ExprKind::SelfRef + | ExprKind::UpvarRef { .. } | ExprKind::Break { .. } | ExprKind::Continue { .. } | ExprKind::Return { .. } diff --git a/compiler/rustc_mir_build/src/build/expr/category.rs b/compiler/rustc_mir_build/src/build/expr/category.rs index ac5cf187aa0..8561170856f 100644 --- a/compiler/rustc_mir_build/src/build/expr/category.rs +++ b/compiler/rustc_mir_build/src/build/expr/category.rs @@ -38,7 +38,7 @@ impl Category { ExprKind::Field { .. } | ExprKind::Deref { .. } | ExprKind::Index { .. } - | ExprKind::SelfRef + | ExprKind::UpvarRef { .. } | ExprKind::VarRef { .. } | ExprKind::PlaceTypeAscription { .. } | ExprKind::ValueTypeAscription { .. } => Some(Category::Place), diff --git a/compiler/rustc_mir_build/src/build/expr/into.rs b/compiler/rustc_mir_build/src/build/expr/into.rs index 9dc596a345f..50001c38dc7 100644 --- a/compiler/rustc_mir_build/src/build/expr/into.rs +++ b/compiler/rustc_mir_build/src/build/expr/into.rs @@ -400,7 +400,7 @@ impl<'a, 'tcx> Builder<'a, 'tcx> { // Avoid creating a temporary ExprKind::VarRef { .. } - | ExprKind::SelfRef + | ExprKind::UpvarRef { .. } | ExprKind::PlaceTypeAscription { .. } | ExprKind::ValueTypeAscription { .. } => { debug_assert!(Category::of(&expr.kind) == Some(Category::Place)); diff --git a/compiler/rustc_mir_build/src/build/matches/test.rs b/compiler/rustc_mir_build/src/build/matches/test.rs index 7bea8220ada..07173f41cd6 100644 --- a/compiler/rustc_mir_build/src/build/matches/test.rs +++ b/compiler/rustc_mir_build/src/build/matches/test.rs @@ -671,6 +671,17 @@ impl<'a, 'tcx> Builder<'a, 'tcx> { (&TestKind::Range { .. }, _) => None, (&TestKind::Eq { .. } | &TestKind::Len { .. }, _) => { + // The call to `self.test(&match_pair)` below is not actually used to generate any + // MIR. Instead, we just want to compare with `test` (the parameter of the method) + // to see if it is the same. + // + // However, at this point we can still encounter or-patterns that were extracted + // from previous calls to `sort_candidate`, so we need to manually address that + // case to avoid panicking in `self.test()`. + if let PatKind::Or { .. } = &*match_pair.pattern.kind { + return None; + } + // These are all binary tests. // // FIXME(#29623) we can be more clever here diff --git a/compiler/rustc_mir_build/src/build/mod.rs b/compiler/rustc_mir_build/src/build/mod.rs index f9995f43f5a..c50389a850e 100644 --- a/compiler/rustc_mir_build/src/build/mod.rs +++ b/compiler/rustc_mir_build/src/build/mod.rs @@ -24,7 +24,7 @@ use super::lints; crate fn mir_built<'tcx>( tcx: TyCtxt<'tcx>, def: ty::WithOptConstParam, -) -> &'tcx ty::steal::Steal> { +) -> &'tcx rustc_data_structures::steal::Steal> { if let Some(def) = def.try_upgrade(tcx) { return tcx.mir_built(def); } @@ -240,7 +240,7 @@ fn liberated_closure_env_ty( }; let closure_env_ty = tcx.closure_env_ty(closure_def_id, closure_substs).unwrap(); - tcx.erase_late_bound_regions(&closure_env_ty) + tcx.erase_late_bound_regions(closure_env_ty) } #[derive(Debug, PartialEq, Eq)] diff --git a/compiler/rustc_mir_build/src/thir/cx/expr.rs b/compiler/rustc_mir_build/src/thir/cx/expr.rs index 6ed7ed575fc..e404afeb698 100644 --- a/compiler/rustc_mir_build/src/thir/cx/expr.rs +++ b/compiler/rustc_mir_build/src/thir/cx/expr.rs @@ -387,8 +387,9 @@ fn make_mirror_unadjusted<'a, 'tcx>( } }; let upvars = cx - .tcx - .upvars_mentioned(def_id) + .typeck_results() + .closure_captures + .get(&def_id) .iter() .flat_map(|upvars| upvars.iter()) .zip(substs.upvar_tys()) @@ -879,130 +880,26 @@ fn convert_path_expr<'a, 'tcx>( ExprKind::Deref { arg: Expr { ty, temp_lifetime, span: expr.span, kind }.to_ref() } } - Res::Local(var_hir_id) => convert_var(cx, expr, var_hir_id), + Res::Local(var_hir_id) => convert_var(cx, var_hir_id), _ => span_bug!(expr.span, "res `{:?}` not yet implemented", res), } } -fn convert_var<'tcx>( - cx: &mut Cx<'_, 'tcx>, - expr: &'tcx hir::Expr<'tcx>, - var_hir_id: hir::HirId, -) -> ExprKind<'tcx> { - let upvar_index = cx - .typeck_results() - .closure_captures - .get(&cx.body_owner) - .and_then(|upvars| upvars.get_full(&var_hir_id).map(|(i, _, _)| i)); - - debug!( - "convert_var({:?}): upvar_index={:?}, body_owner={:?}", - var_hir_id, upvar_index, cx.body_owner - ); - - let temp_lifetime = cx.region_scope_tree.temporary_scope(expr.hir_id.local_id); - - match upvar_index { - None => ExprKind::VarRef { id: var_hir_id }, - - Some(upvar_index) => { - let closure_def_id = cx.body_owner; - let upvar_id = ty::UpvarId { - var_path: ty::UpvarPath { hir_id: var_hir_id }, - closure_expr_id: closure_def_id.expect_local(), - }; - let var_ty = cx.typeck_results().node_type(var_hir_id); +fn convert_var<'tcx>(cx: &mut Cx<'_, 'tcx>, var_hir_id: hir::HirId) -> ExprKind<'tcx> { + // We want upvars here not captures. + // Captures will be handled in MIR. + let is_upvar = cx + .tcx + .upvars_mentioned(cx.body_owner) + .map_or(false, |upvars| upvars.contains_key(&var_hir_id)); - // FIXME free regions in closures are not right - let closure_ty = cx - .typeck_results() - .node_type(cx.tcx.hir().local_def_id_to_hir_id(upvar_id.closure_expr_id)); - - // FIXME we're just hard-coding the idea that the - // signature will be &self or &mut self and hence will - // have a bound region with number 0 - let region = ty::ReFree(ty::FreeRegion { - scope: closure_def_id, - bound_region: ty::BoundRegion::BrAnon(0), - }); - let region = cx.tcx.mk_region(region); - - let self_expr = if let ty::Closure(_, closure_substs) = closure_ty.kind() { - match cx.infcx.closure_kind(closure_substs).unwrap() { - ty::ClosureKind::Fn => { - let ref_closure_ty = cx.tcx.mk_ref( - region, - ty::TypeAndMut { ty: closure_ty, mutbl: hir::Mutability::Not }, - ); - Expr { - ty: closure_ty, - temp_lifetime, - span: expr.span, - kind: ExprKind::Deref { - arg: Expr { - ty: ref_closure_ty, - temp_lifetime, - span: expr.span, - kind: ExprKind::SelfRef, - } - .to_ref(), - }, - } - } - ty::ClosureKind::FnMut => { - let ref_closure_ty = cx.tcx.mk_ref( - region, - ty::TypeAndMut { ty: closure_ty, mutbl: hir::Mutability::Mut }, - ); - Expr { - ty: closure_ty, - temp_lifetime, - span: expr.span, - kind: ExprKind::Deref { - arg: Expr { - ty: ref_closure_ty, - temp_lifetime, - span: expr.span, - kind: ExprKind::SelfRef, - } - .to_ref(), - }, - } - } - ty::ClosureKind::FnOnce => Expr { - ty: closure_ty, - temp_lifetime, - span: expr.span, - kind: ExprKind::SelfRef, - }, - } - } else { - Expr { ty: closure_ty, temp_lifetime, span: expr.span, kind: ExprKind::SelfRef } - }; + debug!("convert_var({:?}): is_upvar={}, body_owner={:?}", var_hir_id, is_upvar, cx.body_owner); - // at this point we have `self.n`, which loads up the upvar - let field_kind = - ExprKind::Field { lhs: self_expr.to_ref(), name: Field::new(upvar_index) }; - - // ...but the upvar might be an `&T` or `&mut T` capture, at which - // point we need an implicit deref - match cx.typeck_results().upvar_capture(upvar_id) { - ty::UpvarCapture::ByValue(_) => field_kind, - ty::UpvarCapture::ByRef(borrow) => ExprKind::Deref { - arg: Expr { - temp_lifetime, - ty: cx.tcx.mk_ref( - borrow.region, - ty::TypeAndMut { ty: var_ty, mutbl: borrow.kind.to_mutbl_lossy() }, - ), - span: expr.span, - kind: field_kind, - } - .to_ref(), - }, - } - } + if is_upvar { + ExprKind::UpvarRef { closure_def_id: cx.body_owner, var_hir_id } + } else { + ExprKind::VarRef { id: var_hir_id } } } @@ -1101,7 +998,7 @@ fn capture_upvar<'tcx>( temp_lifetime, ty: var_ty, span: closure_expr.span, - kind: convert_var(cx, closure_expr, var_hir_id), + kind: convert_var(cx, var_hir_id), }; match upvar_capture { ty::UpvarCapture::ByValue(_) => captured_var.to_ref(), diff --git a/compiler/rustc_mir_build/src/thir/cx/mod.rs b/compiler/rustc_mir_build/src/thir/cx/mod.rs index cf42fee873e..465808cea9d 100644 --- a/compiler/rustc_mir_build/src/thir/cx/mod.rs +++ b/compiler/rustc_mir_build/src/thir/cx/mod.rs @@ -186,6 +186,10 @@ impl<'a, 'tcx> Cx<'a, 'tcx> { ty.needs_drop(self.tcx, self.param_env) } + crate fn infcx(&self) -> &'a InferCtxt<'a, 'tcx> { + self.infcx + } + crate fn tcx(&self) -> TyCtxt<'tcx> { self.tcx } diff --git a/compiler/rustc_mir_build/src/thir/mod.rs b/compiler/rustc_mir_build/src/thir/mod.rs index f2a2ef0d8f2..1a901746d50 100644 --- a/compiler/rustc_mir_build/src/thir/mod.rs +++ b/compiler/rustc_mir_build/src/thir/mod.rs @@ -211,8 +211,14 @@ crate enum ExprKind<'tcx> { VarRef { id: hir::HirId, }, - /// first argument, used for self in a closure - SelfRef, + /// Used to represent upvars mentioned in a closure/generator + UpvarRef { + /// DefId of the closure/generator + closure_def_id: DefId, + + /// HirId of the root variable + var_hir_id: hir::HirId, + }, Borrow { borrow_kind: BorrowKind, arg: ExprRef<'tcx>, diff --git a/compiler/rustc_mir_build/src/thir/pattern/_match.rs b/compiler/rustc_mir_build/src/thir/pattern/_match.rs index 5e7e81eba62..79a74e38743 100644 --- a/compiler/rustc_mir_build/src/thir/pattern/_match.rs +++ b/compiler/rustc_mir_build/src/thir/pattern/_match.rs @@ -3,6 +3,7 @@ //! - ui/or-patterns //! - ui/consts/const_in_pattern //! - ui/rfc-2008-non-exhaustive +//! - ui/half-open-range-patterns //! - probably many others //! I (Nadrieril) prefer to put new tests in `ui/pattern/usefulness` unless there's a specific //! reason not to, for example if they depend on a particular feature like or_patterns. @@ -364,14 +365,14 @@ impl<'tcx> Pat<'tcx> { /// A row of a matrix. Rows of len 1 are very common, which is why `SmallVec[_; 2]` /// works well. #[derive(Debug, Clone)] -crate struct PatStack<'p, 'tcx> { +struct PatStack<'p, 'tcx> { pats: SmallVec<[&'p Pat<'tcx>; 2]>, /// Cache for the constructor of the head head_ctor: OnceCell>, } impl<'p, 'tcx> PatStack<'p, 'tcx> { - crate fn from_pattern(pat: &'p Pat<'tcx>) -> Self { + fn from_pattern(pat: &'p Pat<'tcx>) -> Self { Self::from_vec(smallvec![pat]) } @@ -455,17 +456,17 @@ impl<'p, 'tcx> FromIterator<&'p Pat<'tcx>> for PatStack<'p, 'tcx> { /// A 2D matrix. #[derive(Clone, PartialEq)] -crate struct Matrix<'p, 'tcx> { +struct Matrix<'p, 'tcx> { patterns: Vec>, } impl<'p, 'tcx> Matrix<'p, 'tcx> { - crate fn empty() -> Self { + fn empty() -> Self { Matrix { patterns: vec![] } } /// Pushes a new row to the matrix. If the row starts with an or-pattern, this expands it. - crate fn push(&mut self, row: PatStack<'p, 'tcx>) { + fn push(&mut self, row: PatStack<'p, 'tcx>) { if let Some(rows) = row.expand_or_pat() { for row in rows { // We recursively expand the or-patterns of the new rows. @@ -588,7 +589,7 @@ impl<'a, 'tcx> MatchCheckCtxt<'a, 'tcx> { } /// Returns whether the given type is an enum from another crate declared `#[non_exhaustive]`. - crate fn is_foreign_non_exhaustive_enum(&self, ty: Ty<'tcx>) -> bool { + fn is_foreign_non_exhaustive_enum(&self, ty: Ty<'tcx>) -> bool { match ty.kind() { ty::Adt(def, ..) => { def.is_enum() && def.is_variant_list_non_exhaustive() && !def.did.is_local() @@ -1392,13 +1393,12 @@ impl<'tcx> Usefulness<'tcx> { pcx: PatCtxt<'_, 'p, 'tcx>, ctor: &Constructor<'tcx>, ctor_wild_subpatterns: &Fields<'p, 'tcx>, - is_top_level: bool, ) -> Self { match self { UsefulWithWitness(witnesses) => { let new_witnesses = if ctor.is_wildcard() { let missing_ctors = MissingConstructors::new(pcx); - let new_patterns = missing_ctors.report_patterns(pcx, is_top_level); + let new_patterns = missing_ctors.report_patterns(pcx); witnesses .into_iter() .flat_map(|witness| { @@ -1440,7 +1440,7 @@ impl<'tcx> Usefulness<'tcx> { } #[derive(Copy, Clone, Debug)] -crate enum WitnessPreference { +enum WitnessPreference { ConstructWitness, LeaveOutWitness, } @@ -1454,6 +1454,9 @@ struct PatCtxt<'a, 'p, 'tcx> { ty: Ty<'tcx>, /// Span of the current pattern under investigation. span: Span, + /// Whether the current pattern is the whole pattern as found in a match arm, or if it's a + /// subpattern. + is_top_level: bool, } /// A witness of non-exhaustiveness for error reporting, represented @@ -1493,7 +1496,8 @@ struct PatCtxt<'a, 'p, 'tcx> { crate struct Witness<'tcx>(Vec>); impl<'tcx> Witness<'tcx> { - crate fn single_pattern(self) -> Pat<'tcx> { + /// Asserts that the witness contains a single pattern, and returns it. + fn single_pattern(self) -> Pat<'tcx> { assert_eq!(self.0.len(), 1); self.0.into_iter().next().unwrap() } @@ -1585,11 +1589,12 @@ fn all_constructors<'p, 'tcx>(pcx: PatCtxt<'_, 'p, 'tcx>) -> Vec(pcx: PatCtxt<'_, 'p, 'tcx>) -> Vec { + vec![NonExhaustive] + } + ty::Never => vec![], _ if cx.is_uninhabited(pcx.ty) => vec![], ty::Adt(..) | ty::Tuple(..) | ty::Ref(..) => vec![Single], // This type is one for which we cannot list constructors, like `str` or `f64`. @@ -2012,11 +2024,7 @@ impl<'tcx> MissingConstructors<'tcx> { /// List the patterns corresponding to the missing constructors. In some cases, instead of /// listing all constructors of a given type, we prefer to simply report a wildcard. - fn report_patterns<'p>( - &self, - pcx: PatCtxt<'_, 'p, 'tcx>, - is_top_level: bool, - ) -> SmallVec<[Pat<'tcx>; 1]> { + fn report_patterns<'p>(&self, pcx: PatCtxt<'_, 'p, 'tcx>) -> SmallVec<[Pat<'tcx>; 1]> { // There are 2 ways we can report a witness here. // Commonly, we can report all the "free" // constructors as witnesses, e.g., if we have: @@ -2044,7 +2052,7 @@ impl<'tcx> MissingConstructors<'tcx> { // `used_ctors` is empty. // The exception is: if we are at the top-level, for example in an empty match, we // sometimes prefer reporting the list of constructors instead of just `_`. - let report_when_all_missing = is_top_level && !IntRange::is_integral(pcx.ty); + let report_when_all_missing = pcx.is_top_level && !IntRange::is_integral(pcx.ty); if self.used_ctors.is_empty() && !report_when_all_missing { // All constructors are unused. Report only a wildcard // rather than each individual constructor. @@ -2086,7 +2094,7 @@ impl<'tcx> MissingConstructors<'tcx> { /// `is_under_guard` is used to inform if the pattern has a guard. If it /// has one it must not be inserted into the matrix. This shouldn't be /// relied on for soundness. -crate fn is_useful<'p, 'tcx>( +fn is_useful<'p, 'tcx>( cx: &MatchCheckCtxt<'p, 'tcx>, matrix: &Matrix<'p, 'tcx>, v: &PatStack<'p, 'tcx>, @@ -2200,7 +2208,7 @@ crate fn is_useful<'p, 'tcx>( // FIXME(Nadrieril): Hack to work around type normalization issues (see #72476). let ty = matrix.heads().next().map(|r| r.ty).unwrap_or(v.head().ty); - let pcx = PatCtxt { cx, matrix, ty, span: v.head().span }; + let pcx = PatCtxt { cx, matrix, ty, span: v.head().span, is_top_level }; debug!("is_useful_expand_first_col: ty={:#?}, expanding {:#?}", pcx.ty, v.head()); @@ -2215,7 +2223,7 @@ crate fn is_useful<'p, 'tcx>( let v = v.pop_head_constructor(&ctor_wild_subpatterns); let usefulness = is_useful(pcx.cx, &matrix, &v, witness_preference, hir_id, is_under_guard, false); - usefulness.apply_constructor(pcx, &ctor, &ctor_wild_subpatterns, is_top_level) + usefulness.apply_constructor(pcx, &ctor, &ctor_wild_subpatterns) }) .find(|result| result.is_useful()) .unwrap_or(NotUseful); @@ -2283,3 +2291,63 @@ fn pat_constructor<'p, 'tcx>( PatKind::Or { .. } => bug!("Or-pattern should have been expanded earlier on."), } } + +/// The arm of a match expression. +#[derive(Clone, Copy)] +crate struct MatchArm<'p, 'tcx> { + /// The pattern must have been lowered through `MatchVisitor::lower_pattern`. + crate pat: &'p super::Pat<'tcx>, + crate hir_id: HirId, + crate has_guard: bool, +} + +/// The output of checking a match for exhaustiveness and arm reachability. +crate struct UsefulnessReport<'p, 'tcx> { + /// For each arm of the input, whether that arm is reachable after the arms above it. + crate arm_usefulness: Vec<(MatchArm<'p, 'tcx>, Usefulness<'tcx>)>, + /// If the match is exhaustive, this is empty. If not, this contains witnesses for the lack of + /// exhaustiveness. + crate non_exhaustiveness_witnesses: Vec>, +} + +/// The entrypoint for the usefulness algorithm. Computes whether a match is exhaustive and which +/// of its arms are reachable. +/// +/// Note: the input patterns must have been lowered through `MatchVisitor::lower_pattern`. +crate fn compute_match_usefulness<'p, 'tcx>( + cx: &MatchCheckCtxt<'p, 'tcx>, + arms: &[MatchArm<'p, 'tcx>], + scrut_hir_id: HirId, + scrut_ty: Ty<'tcx>, +) -> UsefulnessReport<'p, 'tcx> { + let mut matrix = Matrix::empty(); + let arm_usefulness: Vec<_> = arms + .iter() + .copied() + .map(|arm| { + let v = PatStack::from_pattern(arm.pat); + let usefulness = + is_useful(cx, &matrix, &v, LeaveOutWitness, arm.hir_id, arm.has_guard, true); + if !arm.has_guard { + matrix.push(v); + } + (arm, usefulness) + }) + .collect(); + + let wild_pattern = cx.pattern_arena.alloc(super::Pat::wildcard_from_ty(scrut_ty)); + let v = PatStack::from_pattern(wild_pattern); + let usefulness = is_useful(cx, &matrix, &v, ConstructWitness, scrut_hir_id, false, true); + let non_exhaustiveness_witnesses = match usefulness { + NotUseful => vec![], // Wildcard pattern isn't useful, so the match is exhaustive. + UsefulWithWitness(pats) => { + if pats.is_empty() { + bug!("Exhaustiveness check returned no witnesses") + } else { + pats.into_iter().map(|w| w.single_pattern()).collect() + } + } + Useful(_) => bug!(), + }; + UsefulnessReport { arm_usefulness, non_exhaustiveness_witnesses } +} diff --git a/compiler/rustc_mir_build/src/thir/pattern/check_match.rs b/compiler/rustc_mir_build/src/thir/pattern/check_match.rs index 14ed93f1127..f9fe261bcee 100644 --- a/compiler/rustc_mir_build/src/thir/pattern/check_match.rs +++ b/compiler/rustc_mir_build/src/thir/pattern/check_match.rs @@ -1,6 +1,7 @@ use super::_match::Usefulness::*; -use super::_match::WitnessPreference::*; -use super::_match::{expand_pattern, is_useful, MatchCheckCtxt, Matrix, PatStack}; +use super::_match::{ + compute_match_usefulness, expand_pattern, MatchArm, MatchCheckCtxt, UsefulnessReport, +}; use super::{PatCtxt, PatKind, PatternError}; use rustc_arena::TypedArena; @@ -12,7 +13,6 @@ use rustc_hir::def_id::DefId; use rustc_hir::intravisit::{self, NestedVisitorMap, Visitor}; use rustc_hir::{HirId, Pat}; use rustc_middle::ty::{self, Ty, TyCtxt}; -use rustc_session::config::nightly_options; use rustc_session::lint::builtin::BINDINGS_WITH_VARIANT_NAME; use rustc_session::lint::builtin::{IRREFUTABLE_LET_PATTERNS, UNREACHABLE_PATTERNS}; use rustc_session::parse::feature_err; @@ -170,39 +170,50 @@ impl<'tcx> MatchVisitor<'_, 'tcx> { let mut have_errors = false; - let inlined_arms: Vec<_> = arms + let arms: Vec<_> = arms .iter() - .map(|hir::Arm { pat, guard, .. }| { - (self.lower_pattern(&mut cx, pat, &mut have_errors).0, pat.hir_id, guard.is_some()) + .map(|hir::Arm { pat, guard, .. }| MatchArm { + pat: self.lower_pattern(&mut cx, pat, &mut have_errors).0, + hir_id: pat.hir_id, + has_guard: guard.is_some(), }) .collect(); - // Bail out early if inlining failed. + // Bail out early if lowering failed. if have_errors { return; } - // Fourth, check for unreachable arms. - let matrix = check_arms(&mut cx, &inlined_arms, source); + let scrut_ty = self.typeck_results.expr_ty_adjusted(scrut); + let report = compute_match_usefulness(&cx, &arms, scrut.hir_id, scrut_ty); + + // Report unreachable arms. + report_arm_reachability(&cx, &report, source); - // Fifth, check if the match is exhaustive. + // Check if the match is exhaustive. // Note: An empty match isn't the same as an empty matrix for diagnostics purposes, // since an empty matrix can occur when there are arms, if those arms all have guards. - let scrut_ty = self.typeck_results.expr_ty_adjusted(scrut); - let is_empty_match = inlined_arms.is_empty(); - check_exhaustive(&mut cx, scrut_ty, scrut.span, &matrix, scrut.hir_id, is_empty_match); + let is_empty_match = arms.is_empty(); + let witnesses = report.non_exhaustiveness_witnesses; + if !witnesses.is_empty() { + non_exhaustive_match(&cx, scrut_ty, scrut.span, witnesses, is_empty_match); + } } fn check_irrefutable(&self, pat: &'tcx Pat<'tcx>, origin: &str, sp: Option) { let mut cx = self.new_cx(pat.hir_id); let (pattern, pattern_ty) = self.lower_pattern(&mut cx, pat, &mut false); - let pats: Matrix<'_, '_> = vec![PatStack::from_pattern(pattern)].into_iter().collect(); - - let witnesses = match check_not_useful(&mut cx, pattern_ty, &pats, pat.hir_id) { - Ok(_) => return, - Err(err) => err, - }; + let arms = vec![MatchArm { pat: pattern, hir_id: pat.hir_id, has_guard: false }]; + let report = compute_match_usefulness(&cx, &arms, pat.hir_id, pattern_ty); + + // Note: we ignore whether the pattern is unreachable (i.e. whether the type is empty). We + // only care about exhaustiveness here. + let witnesses = report.non_exhaustiveness_witnesses; + if witnesses.is_empty() { + // The pattern is irrefutable. + return; + } let joined_patterns = joined_uncovered_patterns(&witnesses); let mut err = struct_span_err!( @@ -355,17 +366,15 @@ fn irrefutable_let_pattern(tcx: TyCtxt<'_>, span: Span, id: HirId, source: hir:: }); } -/// Check for unreachable patterns. -fn check_arms<'p, 'tcx>( - cx: &mut MatchCheckCtxt<'p, 'tcx>, - arms: &[(&'p super::Pat<'tcx>, HirId, bool)], +/// Report unreachable arms, if any. +fn report_arm_reachability<'p, 'tcx>( + cx: &MatchCheckCtxt<'p, 'tcx>, + report: &UsefulnessReport<'p, 'tcx>, source: hir::MatchSource, -) -> Matrix<'p, 'tcx> { - let mut seen = Matrix::empty(); +) { let mut catchall = None; - for (arm_index, (pat, id, has_guard)) in arms.iter().copied().enumerate() { - let v = PatStack::from_pattern(pat); - match is_useful(cx, &seen, &v, LeaveOutWitness, id, has_guard, true) { + for (arm_index, (arm, is_useful)) in report.arm_usefulness.iter().enumerate() { + match is_useful { NotUseful => { match source { hir::MatchSource::IfDesugar { .. } | hir::MatchSource::WhileDesugar => bug!(), @@ -374,15 +383,15 @@ fn check_arms<'p, 'tcx>( // Check which arm we're on. match arm_index { // The arm with the user-specified pattern. - 0 => unreachable_pattern(cx.tcx, pat.span, id, None), + 0 => unreachable_pattern(cx.tcx, arm.pat.span, arm.hir_id, None), // The arm with the wildcard pattern. - 1 => irrefutable_let_pattern(cx.tcx, pat.span, id, source), + 1 => irrefutable_let_pattern(cx.tcx, arm.pat.span, arm.hir_id, source), _ => bug!(), } } hir::MatchSource::ForLoopDesugar | hir::MatchSource::Normal => { - unreachable_pattern(cx.tcx, pat.span, id, catchall); + unreachable_pattern(cx.tcx, arm.pat.span, arm.hir_id, catchall); } // Unreachable patterns in try and await expressions occur when one of @@ -390,79 +399,32 @@ fn check_arms<'p, 'tcx>( hir::MatchSource::AwaitDesugar | hir::MatchSource::TryDesugar => {} } } + Useful(unreachables) if unreachables.is_empty() => {} + // The arm is reachable, but contains unreachable subpatterns (from or-patterns). Useful(unreachables) => { - let mut unreachables: Vec<_> = unreachables.into_iter().flatten().collect(); + let mut unreachables: Vec<_> = unreachables.iter().flatten().copied().collect(); // Emit lints in the order in which they occur in the file. unreachables.sort_unstable(); for span in unreachables { - unreachable_pattern(cx.tcx, span, id, None); + unreachable_pattern(cx.tcx, span, arm.hir_id, None); } } UsefulWithWitness(_) => bug!(), } - if !has_guard { - seen.push(v); - if catchall.is_none() && pat_is_catchall(pat) { - catchall = Some(pat.span); - } + if !arm.has_guard && catchall.is_none() && pat_is_catchall(arm.pat) { + catchall = Some(arm.pat.span); } } - seen } -fn check_not_useful<'p, 'tcx>( - cx: &mut MatchCheckCtxt<'p, 'tcx>, - ty: Ty<'tcx>, - matrix: &Matrix<'p, 'tcx>, - hir_id: HirId, -) -> Result<(), Vec>> { - let wild_pattern = cx.pattern_arena.alloc(super::Pat::wildcard_from_ty(ty)); - let v = PatStack::from_pattern(wild_pattern); - - // false is given for `is_under_guard` argument due to the wildcard - // pattern not having a guard - match is_useful(cx, matrix, &v, ConstructWitness, hir_id, false, true) { - NotUseful => Ok(()), // This is good, wildcard pattern isn't reachable. - UsefulWithWitness(pats) => Err(if pats.is_empty() { - bug!("Exhaustiveness check returned no witnesses") - } else { - pats.into_iter().map(|w| w.single_pattern()).collect() - }), - Useful(_) => bug!(), - } -} - -fn check_exhaustive<'p, 'tcx>( - cx: &mut MatchCheckCtxt<'p, 'tcx>, +/// Report that a match is not exhaustive. +fn non_exhaustive_match<'p, 'tcx>( + cx: &MatchCheckCtxt<'p, 'tcx>, scrut_ty: Ty<'tcx>, sp: Span, - matrix: &Matrix<'p, 'tcx>, - hir_id: HirId, + witnesses: Vec>, is_empty_match: bool, ) { - // In the absence of the `exhaustive_patterns` feature, empty matches are not detected by - // `is_useful` to exhaustively match uninhabited types, so we manually check here. - if is_empty_match && !cx.tcx.features().exhaustive_patterns { - let scrutinee_is_visibly_uninhabited = match scrut_ty.kind() { - ty::Never => true, - ty::Adt(def, _) => { - def.is_enum() - && def.variants.is_empty() - && !cx.is_foreign_non_exhaustive_enum(scrut_ty) - } - _ => false, - }; - if scrutinee_is_visibly_uninhabited { - // If the type *is* uninhabited, an empty match is vacuously exhaustive. - return; - } - } - - let witnesses = match check_not_useful(cx, scrut_ty, matrix, hir_id) { - Ok(_) => return, - Err(err) => err, - }; - let non_empty_enum = match scrut_ty.kind() { ty::Adt(def, _) => def.is_enum() && !def.variants.is_empty(), _ => false, @@ -502,7 +464,7 @@ fn check_exhaustive<'p, 'tcx>( so a wildcard `_` is necessary to match exhaustively", scrut_ty, )); - if nightly_options::is_nightly_build() { + if cx.tcx.sess.is_nightly_build() { err.help(&format!( "add `#![feature(precise_pointer_size_matching)]` \ to the crate attributes to enable precise `{}` matching", diff --git a/compiler/rustc_mir_build/src/thir/pattern/const_to_pat.rs b/compiler/rustc_mir_build/src/thir/pattern/const_to_pat.rs index 6370f8c375b..32fc0f008e9 100644 --- a/compiler/rustc_mir_build/src/thir/pattern/const_to_pat.rs +++ b/compiler/rustc_mir_build/src/thir/pattern/const_to_pat.rs @@ -18,6 +18,7 @@ impl<'a, 'tcx> PatCtxt<'a, 'tcx> { /// Converts an evaluated constant to a pattern (if possible). /// This means aggregate values (like structs and enums) are converted /// to a pattern that matches the value (as if you'd compared via structural equality). + #[instrument(skip(self))] pub(super) fn const_to_pat( &self, cv: &'tcx ty::Const<'tcx>, @@ -25,15 +26,12 @@ impl<'a, 'tcx> PatCtxt<'a, 'tcx> { span: Span, mir_structural_match_violation: bool, ) -> Pat<'tcx> { - debug!("const_to_pat: cv={:#?} id={:?}", cv, id); - debug!("const_to_pat: cv.ty={:?} span={:?}", cv.ty, span); - let pat = self.tcx.infer_ctxt().enter(|infcx| { let mut convert = ConstToPat::new(self, id, span, infcx); convert.to_pat(cv, mir_structural_match_violation) }); - debug!("const_to_pat: pat={:?}", pat); + debug!(?pat); pat } } @@ -61,6 +59,8 @@ struct ConstToPat<'a, 'tcx> { infcx: InferCtxt<'a, 'tcx>, include_lint_checks: bool, + + treat_byte_string_as_slice: bool, } mod fallback_to_const_ref { @@ -88,6 +88,7 @@ impl<'a, 'tcx> ConstToPat<'a, 'tcx> { span: Span, infcx: InferCtxt<'a, 'tcx>, ) -> Self { + trace!(?pat_ctxt.typeck_results.hir_owner); ConstToPat { id, span, @@ -97,6 +98,10 @@ impl<'a, 'tcx> ConstToPat<'a, 'tcx> { saw_const_match_error: Cell::new(false), saw_const_match_lint: Cell::new(false), behind_reference: Cell::new(false), + treat_byte_string_as_slice: pat_ctxt + .typeck_results + .treat_byte_string_as_slice + .contains(&id.local_id), } } @@ -153,6 +158,7 @@ impl<'a, 'tcx> ConstToPat<'a, 'tcx> { cv: &'tcx ty::Const<'tcx>, mir_structural_match_violation: bool, ) -> Pat<'tcx> { + trace!(self.treat_byte_string_as_slice); // This method is just a wrapper handling a validity check; the heavy lifting is // performed by the recursive `recur` method, which is not meant to be // invoked except by this method. @@ -384,7 +390,7 @@ impl<'a, 'tcx> ConstToPat<'a, 'tcx> { } PatKind::Wild } - // `&str` and `&[u8]` are represented as `ConstValue::Slice`, let's keep using this + // `&str` is represented as `ConstValue::Slice`, let's keep using this // optimization for now. ty::Str => PatKind::Constant { value: cv }, // `b"foo"` produces a `&[u8; 3]`, but you can't use constants of array type when @@ -393,11 +399,33 @@ impl<'a, 'tcx> ConstToPat<'a, 'tcx> { // as slices. This means we turn `&[T; N]` constants into slice patterns, which // has no negative effects on pattern matching, even if we're actually matching on // arrays. - ty::Array(..) | + ty::Array(..) if !self.treat_byte_string_as_slice => { + let old = self.behind_reference.replace(true); + let array = tcx.deref_const(self.param_env.and(cv)); + let val = PatKind::Deref { + subpattern: Pat { + kind: Box::new(PatKind::Array { + prefix: tcx + .destructure_const(param_env.and(array)) + .fields + .iter() + .map(|val| self.recur(val, false)) + .collect::>()?, + slice: None, + suffix: vec![], + }), + span, + ty: pointee_ty, + }, + }; + self.behind_reference.set(old); + val + } + ty::Array(elem_ty, _) | // Cannot merge this with the catch all branch below, because the `const_deref` // changes the type from slice to array, we need to keep the original type in the // pattern. - ty::Slice(..) => { + ty::Slice(elem_ty) => { let old = self.behind_reference.replace(true); let array = tcx.deref_const(self.param_env.and(cv)); let val = PatKind::Deref { @@ -413,7 +441,7 @@ impl<'a, 'tcx> ConstToPat<'a, 'tcx> { suffix: vec![], }), span, - ty: pointee_ty, + ty: tcx.mk_slice(elem_ty), }, }; self.behind_reference.set(old); diff --git a/compiler/rustc_parse/src/lexer/mod.rs b/compiler/rustc_parse/src/lexer/mod.rs index 0dfacd78908..b5b34c7338d 100644 --- a/compiler/rustc_parse/src/lexer/mod.rs +++ b/compiler/rustc_parse/src/lexer/mod.rs @@ -510,7 +510,7 @@ impl<'a> StringReader<'a> { FatalError.raise() } - /// Note: It was decided to not add a test case, because it would be to big. + /// Note: It was decided to not add a test case, because it would be too big. /// fn report_too_many_hashes(&self, start: BytePos, found: usize) -> ! { self.fatal_span_( diff --git a/compiler/rustc_parse/src/lib.rs b/compiler/rustc_parse/src/lib.rs index bad43cd5350..8a6b0230023 100644 --- a/compiler/rustc_parse/src/lib.rs +++ b/compiler/rustc_parse/src/lib.rs @@ -1,6 +1,5 @@ //! The main parser interface. -#![feature(bool_to_option)] #![feature(crate_visibility_modifier)] #![feature(bindings_after_at)] #![feature(iter_order_by)] @@ -616,12 +615,7 @@ fn prepend_attrs( if attr.style == ast::AttrStyle::Inner { return None; } - builder.push( - attr.tokens - .as_ref() - .unwrap_or_else(|| panic!("Attribute {:?} is missing tokens!", attr)) - .create_token_stream(), - ); + builder.push(attr.tokens()); } builder.push(tokens); Some(builder.build()) diff --git a/compiler/rustc_parse/src/parser/attr.rs b/compiler/rustc_parse/src/parser/attr.rs index 053b7e0b75f..41985757b57 100644 --- a/compiler/rustc_parse/src/parser/attr.rs +++ b/compiler/rustc_parse/src/parser/attr.rs @@ -30,52 +30,44 @@ impl<'a> Parser<'a> { let mut just_parsed_doc_comment = false; loop { debug!("parse_outer_attributes: self.token={:?}", self.token); - let (attr, tokens) = if self.check(&token::Pound) { - self.collect_tokens(|this| { - let inner_error_reason = if just_parsed_doc_comment { - "an inner attribute is not permitted following an outer doc comment" - } else if !attrs.is_empty() { - "an inner attribute is not permitted following an outer attribute" - } else { - DEFAULT_UNEXPECTED_INNER_ATTR_ERR_MSG - }; - let inner_parse_policy = InnerAttrPolicy::Forbidden { - reason: inner_error_reason, - saw_doc_comment: just_parsed_doc_comment, - prev_attr_sp: attrs.last().map(|a| a.span), - }; - let attr = this.parse_attribute_with_inner_parse_policy(inner_parse_policy)?; - just_parsed_doc_comment = false; - Ok(Some(attr)) - })? + let attr = if self.check(&token::Pound) { + let inner_error_reason = if just_parsed_doc_comment { + "an inner attribute is not permitted following an outer doc comment" + } else if !attrs.is_empty() { + "an inner attribute is not permitted following an outer attribute" + } else { + DEFAULT_UNEXPECTED_INNER_ATTR_ERR_MSG + }; + let inner_parse_policy = InnerAttrPolicy::Forbidden { + reason: inner_error_reason, + saw_doc_comment: just_parsed_doc_comment, + prev_attr_sp: attrs.last().map(|a| a.span), + }; + just_parsed_doc_comment = false; + Some(self.parse_attribute(inner_parse_policy)?) } else if let token::DocComment(comment_kind, attr_style, data) = self.token.kind { - self.collect_tokens(|this| { - let attr = - attr::mk_doc_comment(comment_kind, attr_style, data, this.token.span); - if attr.style != ast::AttrStyle::Outer { - this.sess - .span_diagnostic - .struct_span_err_with_code( - this.token.span, - "expected outer doc comment", - error_code!(E0753), - ) - .note( - "inner doc comments like this (starting with \ - `//!` or `/*!`) can only appear before items", - ) - .emit(); - } - this.bump(); - just_parsed_doc_comment = true; - Ok(Some(attr)) - })? + if attr_style != ast::AttrStyle::Outer { + self.sess + .span_diagnostic + .struct_span_err_with_code( + self.token.span, + "expected outer doc comment", + error_code!(E0753), + ) + .note( + "inner doc comments like this (starting with \ + `//!` or `/*!`) can only appear before items", + ) + .emit(); + } + self.bump(); + just_parsed_doc_comment = true; + Some(attr::mk_doc_comment(comment_kind, attr_style, data, self.prev_token.span)) } else { - (None, None) + None }; - if let Some(mut attr) = attr { - attr.tokens = tokens; + if let Some(attr) = attr { attrs.push(attr); } else { break; @@ -85,49 +77,43 @@ impl<'a> Parser<'a> { } /// Matches `attribute = # ! [ meta_item ]`. - /// - /// If `permit_inner` is `true`, then a leading `!` indicates an inner - /// attribute. - pub fn parse_attribute(&mut self, permit_inner: bool) -> PResult<'a, ast::Attribute> { - debug!("parse_attribute: permit_inner={:?} self.token={:?}", permit_inner, self.token); - let inner_parse_policy = - if permit_inner { InnerAttrPolicy::Permitted } else { DEFAULT_INNER_ATTR_FORBIDDEN }; - self.parse_attribute_with_inner_parse_policy(inner_parse_policy) - } - - /// The same as `parse_attribute`, except it takes in an `InnerAttrPolicy` - /// that prescribes how to handle inner attributes. - fn parse_attribute_with_inner_parse_policy( + /// `inner_parse_policy` prescribes how to handle inner attributes. + fn parse_attribute( &mut self, inner_parse_policy: InnerAttrPolicy<'_>, ) -> PResult<'a, ast::Attribute> { debug!( - "parse_attribute_with_inner_parse_policy: inner_parse_policy={:?} self.token={:?}", + "parse_attribute: inner_parse_policy={:?} self.token={:?}", inner_parse_policy, self.token ); let lo = self.token.span; - let (span, item, style) = if self.eat(&token::Pound) { - let style = - if self.eat(&token::Not) { ast::AttrStyle::Inner } else { ast::AttrStyle::Outer }; - - self.expect(&token::OpenDelim(token::Bracket))?; - let item = self.parse_attr_item(false)?; - self.expect(&token::CloseDelim(token::Bracket))?; - let attr_sp = lo.to(self.prev_token.span); - - // Emit error if inner attribute is encountered and forbidden. - if style == ast::AttrStyle::Inner { - self.error_on_forbidden_inner_attr(attr_sp, inner_parse_policy); - } + let ((item, style, span), tokens) = self.collect_tokens(|this| { + if this.eat(&token::Pound) { + let style = if this.eat(&token::Not) { + ast::AttrStyle::Inner + } else { + ast::AttrStyle::Outer + }; - (attr_sp, item, style) - } else { - let token_str = pprust::token_to_string(&self.token); - let msg = &format!("expected `#`, found `{}`", token_str); - return Err(self.struct_span_err(self.token.span, msg)); - }; + this.expect(&token::OpenDelim(token::Bracket))?; + let item = this.parse_attr_item(false)?; + this.expect(&token::CloseDelim(token::Bracket))?; + let attr_sp = lo.to(this.prev_token.span); + + // Emit error if inner attribute is encountered and forbidden. + if style == ast::AttrStyle::Inner { + this.error_on_forbidden_inner_attr(attr_sp, inner_parse_policy); + } + + Ok((item, style, attr_sp)) + } else { + let token_str = pprust::token_to_string(&this.token); + let msg = &format!("expected `#`, found `{}`", token_str); + Err(this.struct_span_err(this.token.span, msg)) + } + })?; - Ok(attr::mk_attr_from_item(style, item, span)) + Ok(attr::mk_attr_from_item(item, tokens, style, span)) } pub(super) fn error_on_forbidden_inner_attr(&self, attr_sp: Span, policy: InnerAttrPolicy<'_>) { @@ -196,30 +182,19 @@ impl<'a> Parser<'a> { let mut attrs: Vec = vec![]; loop { // Only try to parse if it is an inner attribute (has `!`). - let (attr, tokens) = - if self.check(&token::Pound) && self.look_ahead(1, |t| t == &token::Not) { - self.collect_tokens(|this| { - let attr = this.parse_attribute(true)?; - assert_eq!(attr.style, ast::AttrStyle::Inner); - Ok(Some(attr)) - })? - } else if let token::DocComment(comment_kind, attr_style, data) = self.token.kind { - self.collect_tokens(|this| { - // We need to get the position of this token before we bump. - let attr = - attr::mk_doc_comment(comment_kind, attr_style, data, this.token.span); - if attr.style == ast::AttrStyle::Inner { - this.bump(); - Ok(Some(attr)) - } else { - Ok(None) - } - })? + let attr = if self.check(&token::Pound) && self.look_ahead(1, |t| t == &token::Not) { + Some(self.parse_attribute(InnerAttrPolicy::Permitted)?) + } else if let token::DocComment(comment_kind, attr_style, data) = self.token.kind { + if attr_style == ast::AttrStyle::Inner { + self.bump(); + Some(attr::mk_doc_comment(comment_kind, attr_style, data, self.prev_token.span)) } else { - (None, None) - }; - if let Some(mut attr) = attr { - attr.tokens = tokens; + None + } + } else { + None + }; + if let Some(attr) = attr { attrs.push(attr); } else { break; @@ -337,14 +312,13 @@ impl<'a> Parser<'a> { } pub fn maybe_needs_tokens(attrs: &[ast::Attribute]) -> bool { + // One of the attributes may either itself be a macro, or apply derive macros (`derive`), + // or expand to macro attributes (`cfg_attr`). attrs.iter().any(|attr| { - if let Some(ident) = attr.ident() { + attr.ident().map_or(true, |ident| { ident.name == sym::derive - // This might apply a custom attribute/derive - || ident.name == sym::cfg_attr - || !rustc_feature::is_builtin_attr_name(ident.name) - } else { - true - } + || ident.name == sym::cfg_attr + || !rustc_feature::is_builtin_attr_name(ident.name) + }) }) } diff --git a/compiler/rustc_parse/src/parser/diagnostics.rs b/compiler/rustc_parse/src/parser/diagnostics.rs index cd3b8db2303..350a372a684 100644 --- a/compiler/rustc_parse/src/parser/diagnostics.rs +++ b/compiler/rustc_parse/src/parser/diagnostics.rs @@ -1808,9 +1808,13 @@ impl<'a> Parser<'a> { return Ok(false); // Don't continue. } - /// Handle a generic const argument that had not been enclosed in braces, and suggest enclosing - /// it braces. In this situation, unlike in `handle_ambiguous_unbraced_const_arg`, this is - /// almost certainly a const argument, so we always offer a suggestion. + /// Attempt to parse a generic const argument that has not been enclosed in braces. + /// There are a limited number of expressions that are permitted without being encoded + /// in braces: + /// - Literals. + /// - Single-segment paths (i.e. standalone generic const parameters). + /// All other expressions that can be parsed will emit an error suggesting the expression be + /// wrapped in braces. pub fn handle_unambiguous_unbraced_const_arg(&mut self) -> PResult<'a, P> { let start = self.token.span; let expr = self.parse_expr_res(Restrictions::CONST_EXPR, None).map_err(|mut err| { diff --git a/compiler/rustc_parse/src/parser/expr.rs b/compiler/rustc_parse/src/parser/expr.rs index c2a13d4b0de..ffbf786491d 100644 --- a/compiler/rustc_parse/src/parser/expr.rs +++ b/compiler/rustc_parse/src/parser/expr.rs @@ -1089,6 +1089,9 @@ impl<'a> Parser<'a> { self.parse_yield_expr(attrs) } else if self.eat_keyword(kw::Let) { self.parse_let_expr(attrs) + } else if self.eat_keyword(kw::Underscore) { + self.sess.gated_spans.gate(sym::destructuring_assignment, self.prev_token.span); + Ok(self.mk_expr(self.prev_token.span, ExprKind::Underscore, attrs)) } else if !self.unclosed_delims.is_empty() && self.check(&token::Semi) { // Don't complain about bare semicolons after unclosed braces // recovery in order to keep the error count down. Fixing the @@ -2087,7 +2090,7 @@ impl<'a> Parser<'a> { recover: bool, ) -> PResult<'a, P> { let mut fields = Vec::new(); - let mut base = None; + let mut base = ast::StructRest::None; let mut recover_async = false; attrs.extend(self.parse_inner_attributes()?); @@ -2102,8 +2105,14 @@ impl<'a> Parser<'a> { while self.token != token::CloseDelim(token::Brace) { if self.eat(&token::DotDot) { let exp_span = self.prev_token.span; + // We permit `.. }` on the left-hand side of a destructuring assignment. + if self.check(&token::CloseDelim(token::Brace)) { + self.sess.gated_spans.gate(sym::destructuring_assignment, self.prev_token.span); + base = ast::StructRest::Rest(self.prev_token.span.shrink_to_hi()); + break; + } match self.parse_expr() { - Ok(e) => base = Some(e), + Ok(e) => base = ast::StructRest::Base(e), Err(mut e) if recover => { e.emit(); self.recover_stmt(); diff --git a/compiler/rustc_parse/src/parser/mod.rs b/compiler/rustc_parse/src/parser/mod.rs index da1c54e88b5..40aa2db58c7 100644 --- a/compiler/rustc_parse/src/parser/mod.rs +++ b/compiler/rustc_parse/src/parser/mod.rs @@ -1180,8 +1180,7 @@ impl<'a> Parser<'a> { /// Records all tokens consumed by the provided callback, /// including the current token. These tokens are collected /// into a `LazyTokenStream`, and returned along with the result - /// of the callback. The returned `LazyTokenStream` will be `None` - /// if not tokens were captured. + /// of the callback. /// /// Note: If your callback consumes an opening delimiter /// (including the case where you call `collect_tokens` @@ -1203,17 +1202,14 @@ impl<'a> Parser<'a> { let ret = f(self)?; - // We didn't capture any tokens - let num_calls = self.token_cursor.num_next_calls - cursor_snapshot.num_next_calls; - if num_calls == 0 { - return Ok((ret, None)); - } - // Produces a `TokenStream` on-demand. Using `cursor_snapshot` // and `num_calls`, we can reconstruct the `TokenStream` seen // by the callback. This allows us to avoid producing a `TokenStream` // if it is never needed - for example, a captured `macro_rules!` // argument that is never passed to a proc macro. + // In practice token stream creation happens rarely compared to + // calls to `collect_tokens` (see some statistics in #78736), + // so we are doing as little up-front work as possible. // // This also makes `Parser` very cheap to clone, since // there is no intermediate collection buffer to clone. @@ -1247,8 +1243,8 @@ impl<'a> Parser<'a> { let lazy_impl = LazyTokenStreamImpl { start_token, + num_calls: self.token_cursor.num_next_calls - cursor_snapshot.num_next_calls, cursor_snapshot, - num_calls, desugar_doc_comments: self.desugar_doc_comments, }; Ok((ret, Some(LazyTokenStream::new(lazy_impl)))) diff --git a/compiler/rustc_parse/src/parser/pat.rs b/compiler/rustc_parse/src/parser/pat.rs index 196790a0ab3..ee9a6dca5ad 100644 --- a/compiler/rustc_parse/src/parser/pat.rs +++ b/compiler/rustc_parse/src/parser/pat.rs @@ -1,6 +1,6 @@ use super::{Parser, PathStyle}; use crate::{maybe_recover_from_interpolated_ty_qpath, maybe_whole}; -use rustc_ast::mut_visit::{noop_visit_mac, noop_visit_pat, MutVisitor}; +use rustc_ast::mut_visit::{noop_visit_pat, MutVisitor}; use rustc_ast::ptr::P; use rustc_ast::token; use rustc_ast::{self as ast, AttrVec, Attribute, FieldPat, MacCall, Pat, PatKind, RangeEnd}; @@ -570,10 +570,6 @@ impl<'a> Parser<'a> { fn make_all_value_bindings_mutable(pat: &mut P) -> bool { struct AddMut(bool); impl MutVisitor for AddMut { - fn visit_mac(&mut self, mac: &mut MacCall) { - noop_visit_mac(mac, self); - } - fn visit_pat(&mut self, pat: &mut P) { if let PatKind::Ident(BindingMode::ByValue(m @ Mutability::Not), ..) = &mut pat.kind { diff --git a/compiler/rustc_parse/src/parser/path.rs b/compiler/rustc_parse/src/parser/path.rs index 79e73749038..d64fd59b0a6 100644 --- a/compiler/rustc_parse/src/parser/path.rs +++ b/compiler/rustc_parse/src/parser/path.rs @@ -489,6 +489,7 @@ impl<'a> Parser<'a> { /// - An expression surrounded in `{}`. /// - A literal. /// - A numeric literal prefixed by `-`. + /// - A single-segment path. pub(super) fn expr_is_valid_const_arg(&self, expr: &P) -> bool { match &expr.kind { ast::ExprKind::Block(_, _) | ast::ExprKind::Lit(_) => true, @@ -496,6 +497,13 @@ impl<'a> Parser<'a> { ast::ExprKind::Lit(_) => true, _ => false, }, + // We can only resolve single-segment paths at the moment, because multi-segment paths + // require type-checking: see `visit_generic_arg` in `src/librustc_resolve/late.rs`. + ast::ExprKind::Path(None, path) + if path.segments.len() == 1 && path.segments[0].args.is_none() => + { + true + } _ => false, } } diff --git a/compiler/rustc_passes/src/check_const.rs b/compiler/rustc_passes/src/check_const.rs index b24c62b971a..e37c6418eb8 100644 --- a/compiler/rustc_passes/src/check_const.rs +++ b/compiler/rustc_passes/src/check_const.rs @@ -15,7 +15,6 @@ use rustc_hir::intravisit::{self, NestedVisitorMap, Visitor}; use rustc_middle::hir::map::Map; use rustc_middle::ty::query::Providers; use rustc_middle::ty::TyCtxt; -use rustc_session::config::nightly_options; use rustc_session::parse::feature_err; use rustc_span::{sym, Span, Symbol}; @@ -145,7 +144,7 @@ impl<'tcx> CheckConstVisitor<'tcx> { // // FIXME(ecstaticmorse): Maybe this could be incorporated into `feature_err`? This // is a pretty narrow case, however. - if nightly_options::is_nightly_build() { + if tcx.sess.is_nightly_build() { for gate in missing_secondary { let note = format!( "add `#![feature({})]` to the crate attributes to enable", diff --git a/compiler/rustc_passes/src/diagnostic_items.rs b/compiler/rustc_passes/src/diagnostic_items.rs index 0f4aa72d5c4..5a087c41f58 100644 --- a/compiler/rustc_passes/src/diagnostic_items.rs +++ b/compiler/rustc_passes/src/diagnostic_items.rs @@ -113,6 +113,10 @@ fn collect<'tcx>(tcx: TyCtxt<'tcx>) -> FxHashMap { } } + for m in tcx.hir().krate().exported_macros { + collector.observe_item(m.attrs, m.hir_id); + } + collector.items } diff --git a/compiler/rustc_passes/src/hir_stats.rs b/compiler/rustc_passes/src/hir_stats.rs index 9537321026e..1d02c9aa637 100644 --- a/compiler/rustc_passes/src/hir_stats.rs +++ b/compiler/rustc_passes/src/hir_stats.rs @@ -336,8 +336,9 @@ impl<'v> ast_visit::Visitor<'v> for StatCollector<'v> { ast_visit::walk_lifetime(self, lifetime) } - fn visit_mac(&mut self, mac: &'v ast::MacCall) { + fn visit_mac_call(&mut self, mac: &'v ast::MacCall) { self.record("MacCall", Id::None, mac); + ast_visit::walk_mac(self, mac) } fn visit_path_segment(&mut self, path_span: Span, path_segment: &'v ast::PathSegment) { diff --git a/compiler/rustc_passes/src/liveness.rs b/compiler/rustc_passes/src/liveness.rs index 7288015e170..debb873beb9 100644 --- a/compiler/rustc_passes/src/liveness.rs +++ b/compiler/rustc_passes/src/liveness.rs @@ -317,10 +317,11 @@ impl<'tcx> Visitor<'tcx> for IrMaps<'tcx> { // swap in a new set of IR maps for this body let mut maps = IrMaps::new(self.tcx); let hir_id = maps.tcx.hir().body_owner(body.id()); - let def_id = maps.tcx.hir().local_def_id(hir_id); + let local_def_id = maps.tcx.hir().local_def_id(hir_id); + let def_id = local_def_id.to_def_id(); // Don't run unused pass for #[derive()] - if let Some(parent) = self.tcx.parent(def_id.to_def_id()) { + if let Some(parent) = self.tcx.parent(def_id) { if let DefKind::Impl = self.tcx.def_kind(parent.expect_local()) { if self.tcx.has_attr(parent, sym::automatically_derived) { return; @@ -328,8 +329,8 @@ impl<'tcx> Visitor<'tcx> for IrMaps<'tcx> { } } - if let Some(upvars) = maps.tcx.upvars_mentioned(def_id) { - for (&var_hir_id, _upvar) in upvars { + if let Some(captures) = maps.tcx.typeck(local_def_id).closure_captures.get(&def_id) { + for &var_hir_id in captures.keys() { let var_name = maps.tcx.hir().name(var_hir_id); maps.add_variable(Upvar(var_hir_id, var_name)); } @@ -340,7 +341,7 @@ impl<'tcx> Visitor<'tcx> for IrMaps<'tcx> { intravisit::walk_body(&mut maps, body); // compute liveness - let mut lsets = Liveness::new(&mut maps, def_id); + let mut lsets = Liveness::new(&mut maps, local_def_id); let entry_ln = lsets.compute(&body, hir_id); lsets.log_liveness(entry_ln, body.id().hir_id); @@ -397,10 +398,18 @@ impl<'tcx> Visitor<'tcx> for IrMaps<'tcx> { // construction site. let mut call_caps = Vec::new(); let closure_def_id = self.tcx.hir().local_def_id(expr.hir_id); - if let Some(upvars) = self.tcx.upvars_mentioned(closure_def_id) { - call_caps.extend(upvars.iter().map(|(&var_id, upvar)| { + if let Some(captures) = self + .tcx + .typeck(closure_def_id) + .closure_captures + .get(&closure_def_id.to_def_id()) + { + // If closure captures is Some, upvars_mentioned must also be Some + let upvars = self.tcx.upvars_mentioned(closure_def_id).unwrap(); + call_caps.extend(captures.keys().map(|var_id| { + let upvar = upvars[var_id]; let upvar_ln = self.add_live_node(UpvarNode(upvar.span)); - CaptureInfo { ln: upvar_ln, var_hid: var_id } + CaptureInfo { ln: upvar_ln, var_hid: *var_id } })); } self.set_captures(expr.hir_id, call_caps); @@ -564,6 +573,7 @@ struct Liveness<'a, 'tcx> { typeck_results: &'a ty::TypeckResults<'tcx>, param_env: ty::ParamEnv<'tcx>, upvars: Option<&'tcx FxIndexMap>, + closure_captures: Option<&'tcx FxIndexMap>, successors: IndexVec>, rwu_table: RWUTable, @@ -587,6 +597,7 @@ impl<'a, 'tcx> Liveness<'a, 'tcx> { let typeck_results = ir.tcx.typeck(body_owner); let param_env = ir.tcx.param_env(body_owner); let upvars = ir.tcx.upvars_mentioned(body_owner); + let closure_captures = typeck_results.closure_captures.get(&body_owner.to_def_id()); let closure_ln = ir.add_live_node(ClosureNode); let exit_ln = ir.add_live_node(ExitNode); @@ -600,6 +611,7 @@ impl<'a, 'tcx> Liveness<'a, 'tcx> { typeck_results, param_env, upvars, + closure_captures, successors: IndexVec::from_elem_n(None, num_live_nodes), rwu_table: RWUTable::new(num_live_nodes * num_vars), closure_ln, @@ -850,14 +862,13 @@ impl<'a, 'tcx> Liveness<'a, 'tcx> { // if they are live on the entry to the closure, since only the closure // itself can access them on subsequent calls. - if let Some(upvars) = self.upvars { + if let Some(closure_captures) = self.closure_captures { // Mark upvars captured by reference as used after closure exits. - for (&var_hir_id, upvar) in upvars.iter().rev() { - let upvar_id = ty::UpvarId { - var_path: ty::UpvarPath { hir_id: var_hir_id }, - closure_expr_id: self.body_owner, - }; - match self.typeck_results.upvar_capture(upvar_id) { + // Since closure_captures is Some, upvars must exists too. + let upvars = self.upvars.unwrap(); + for (&var_hir_id, upvar_id) in closure_captures { + let upvar = upvars[&var_hir_id]; + match self.typeck_results.upvar_capture(*upvar_id) { ty::UpvarCapture::ByRef(_) => { let var = self.variable(var_hir_id, upvar.span); self.acc(self.exit_ln, var, ACC_READ | ACC_USE); @@ -869,7 +880,7 @@ impl<'a, 'tcx> Liveness<'a, 'tcx> { let succ = self.propagate_through_expr(&body.value, self.exit_ln); - if self.upvars.is_none() { + if self.closure_captures.is_none() { // Either not a closure, or closure without any captured variables. // No need to determine liveness of captured variables, since there // are none. @@ -1341,7 +1352,21 @@ impl<'a, 'tcx> Liveness<'a, 'tcx> { acc: u32, ) -> LiveNode { match path.res { - Res::Local(hid) => self.access_var(hir_id, hid, succ, acc, path.span), + Res::Local(hid) => { + let in_upvars = self.upvars.map_or(false, |u| u.contains_key(&hid)); + let in_captures = self.closure_captures.map_or(false, |c| c.contains_key(&hid)); + + match (in_upvars, in_captures) { + (false, _) | (true, true) => self.access_var(hir_id, hid, succ, acc, path.span), + (true, false) => { + // This case is possible when with RFC-2229, a wild pattern + // is used within a closure. + // eg: `let _ = x`. The closure doesn't capture x here, + // even though it's mentioned in the closure. + succ + } + } + } _ => succ, } } @@ -1531,11 +1556,15 @@ impl<'tcx> Liveness<'_, 'tcx> { } fn warn_about_unused_upvars(&self, entry_ln: LiveNode) { - let upvars = match self.upvars { + let closure_captures = match self.closure_captures { None => return, - Some(upvars) => upvars, + Some(closure_captures) => closure_captures, }; - for (&var_hir_id, upvar) in upvars.iter() { + + // If closure_captures is Some(), upvars must be Some() too. + let upvars = self.upvars.unwrap(); + for &var_hir_id in closure_captures.keys() { + let upvar = upvars[&var_hir_id]; let var = self.variable(var_hir_id, upvar.span); let upvar_id = ty::UpvarId { var_path: ty::UpvarPath { hir_id: var_hir_id }, diff --git a/compiler/rustc_passes/src/weak_lang_items.rs b/compiler/rustc_passes/src/weak_lang_items.rs index 8650ee05d37..4273d600004 100644 --- a/compiler/rustc_passes/src/weak_lang_items.rs +++ b/compiler/rustc_passes/src/weak_lang_items.rs @@ -26,7 +26,7 @@ pub fn check_crate<'tcx>(tcx: TyCtxt<'tcx>, items: &mut lang_items::LanguageItem if items.eh_personality().is_none() { items.missing.push(LangItem::EhPersonality); } - if tcx.sess.target.options.is_like_emscripten && items.eh_catch_typeinfo().is_none() { + if tcx.sess.target.is_like_emscripten && items.eh_catch_typeinfo().is_none() { items.missing.push(LangItem::EhCatchTypeinfo); } diff --git a/compiler/rustc_privacy/src/lib.rs b/compiler/rustc_privacy/src/lib.rs index 75d75433f1b..4a0d356d337 100644 --- a/compiler/rustc_privacy/src/lib.rs +++ b/compiler/rustc_privacy/src/lib.rs @@ -4,6 +4,7 @@ #![feature(or_patterns)] #![feature(control_flow_enum)] #![feature(try_blocks)] +#![feature(associated_type_defaults)] #![recursion_limit = "256"] use rustc_attr as attr; @@ -44,6 +45,8 @@ use std::{cmp, fmt, mem}; /// manually. Second, it doesn't visit some type components like signatures of fn types, or traits /// in `impl Trait`, see individual comments in `DefIdVisitorSkeleton::visit_ty`. trait DefIdVisitor<'tcx> { + type BreakTy = (); + fn tcx(&self) -> TyCtxt<'tcx>; fn shallow(&self) -> bool { false @@ -56,7 +59,7 @@ trait DefIdVisitor<'tcx> { def_id: DefId, kind: &str, descr: &dyn fmt::Display, - ) -> ControlFlow<()>; + ) -> ControlFlow; /// Not overridden, but used to actually visit types and traits. fn skeleton(&mut self) -> DefIdVisitorSkeleton<'_, 'tcx, Self> { @@ -66,13 +69,16 @@ trait DefIdVisitor<'tcx> { dummy: Default::default(), } } - fn visit(&mut self, ty_fragment: impl TypeFoldable<'tcx>) -> ControlFlow<()> { + fn visit(&mut self, ty_fragment: impl TypeFoldable<'tcx>) -> ControlFlow { ty_fragment.visit_with(&mut self.skeleton()) } - fn visit_trait(&mut self, trait_ref: TraitRef<'tcx>) -> ControlFlow<()> { + fn visit_trait(&mut self, trait_ref: TraitRef<'tcx>) -> ControlFlow { self.skeleton().visit_trait(trait_ref) } - fn visit_predicates(&mut self, predicates: ty::GenericPredicates<'tcx>) -> ControlFlow<()> { + fn visit_predicates( + &mut self, + predicates: ty::GenericPredicates<'tcx>, + ) -> ControlFlow { self.skeleton().visit_predicates(predicates) } } @@ -87,13 +93,13 @@ impl<'tcx, V> DefIdVisitorSkeleton<'_, 'tcx, V> where V: DefIdVisitor<'tcx> + ?Sized, { - fn visit_trait(&mut self, trait_ref: TraitRef<'tcx>) -> ControlFlow<()> { + fn visit_trait(&mut self, trait_ref: TraitRef<'tcx>) -> ControlFlow { let TraitRef { def_id, substs } = trait_ref; self.def_id_visitor.visit_def_id(def_id, "trait", &trait_ref.print_only_trait_path())?; if self.def_id_visitor.shallow() { ControlFlow::CONTINUE } else { substs.visit_with(self) } } - fn visit_predicate(&mut self, predicate: ty::Predicate<'tcx>) -> ControlFlow<()> { + fn visit_predicate(&mut self, predicate: ty::Predicate<'tcx>) -> ControlFlow { match predicate.skip_binders() { ty::PredicateAtom::Trait(ty::TraitPredicate { trait_ref }, _) => { self.visit_trait(trait_ref) @@ -119,7 +125,10 @@ where } } - fn visit_predicates(&mut self, predicates: ty::GenericPredicates<'tcx>) -> ControlFlow<()> { + fn visit_predicates( + &mut self, + predicates: ty::GenericPredicates<'tcx>, + ) -> ControlFlow { let ty::GenericPredicates { parent: _, predicates } = predicates; predicates.iter().try_for_each(|&(predicate, _span)| self.visit_predicate(predicate)) } @@ -129,7 +138,9 @@ impl<'tcx, V> TypeVisitor<'tcx> for DefIdVisitorSkeleton<'_, 'tcx, V> where V: DefIdVisitor<'tcx> + ?Sized, { - fn visit_ty(&mut self, ty: Ty<'tcx>) -> ControlFlow<()> { + type BreakTy = V::BreakTy; + + fn visit_ty(&mut self, ty: Ty<'tcx>) -> ControlFlow { let tcx = self.def_id_visitor.tcx(); // InternalSubsts are not visited here because they are visited below in `super_visit_with`. match *ty.kind() { @@ -283,7 +294,7 @@ impl<'a, 'tcx, VL: VisibilityLike> DefIdVisitor<'tcx> for FindMin<'a, 'tcx, VL> def_id: DefId, _kind: &str, _descr: &dyn fmt::Display, - ) -> ControlFlow<()> { + ) -> ControlFlow { self.min = VL::new_min(self, def_id); ControlFlow::CONTINUE } @@ -902,7 +913,7 @@ impl DefIdVisitor<'tcx> for ReachEverythingInTheInterfaceVisitor<'_, 'tcx> { def_id: DefId, _kind: &str, _descr: &dyn fmt::Display, - ) -> ControlFlow<()> { + ) -> ControlFlow { if let Some(def_id) = def_id.as_local() { if let (ty::Visibility::Public, _) | (_, Some(AccessLevel::ReachableFromImplTrait)) = (self.tcx().visibility(def_id.to_def_id()), self.access_level) @@ -1299,7 +1310,7 @@ impl DefIdVisitor<'tcx> for TypePrivacyVisitor<'tcx> { def_id: DefId, kind: &str, descr: &dyn fmt::Display, - ) -> ControlFlow<()> { + ) -> ControlFlow { if self.check_def_id(def_id, kind, descr) { ControlFlow::BREAK } else { @@ -1799,7 +1810,7 @@ impl DefIdVisitor<'tcx> for SearchInterfaceForPrivateItemsVisitor<'tcx> { def_id: DefId, kind: &str, descr: &dyn fmt::Display, - ) -> ControlFlow<()> { + ) -> ControlFlow { if self.check_def_id(def_id, kind, descr) { ControlFlow::BREAK } else { diff --git a/compiler/rustc_query_system/src/dep_graph/dep_node.rs b/compiler/rustc_query_system/src/dep_graph/dep_node.rs index 7808a28dff0..3d9e739cd28 100644 --- a/compiler/rustc_query_system/src/dep_graph/dep_node.rs +++ b/compiler/rustc_query_system/src/dep_graph/dep_node.rs @@ -44,7 +44,7 @@ use super::{DepContext, DepKind}; -use rustc_data_structures::fingerprint::Fingerprint; +use rustc_data_structures::fingerprint::{Fingerprint, PackedFingerprint}; use rustc_data_structures::stable_hasher::{HashStable, StableHasher}; use std::fmt; @@ -53,7 +53,7 @@ use std::hash::Hash; #[derive(Clone, Copy, PartialEq, Eq, PartialOrd, Ord, Hash, Encodable, Decodable)] pub struct DepNode { pub kind: K, - pub hash: Fingerprint, + pub hash: PackedFingerprint, } impl DepNode { @@ -62,7 +62,7 @@ impl DepNode { /// does not require any parameters. pub fn new_no_params(kind: K) -> DepNode { debug_assert!(!kind.has_params()); - DepNode { kind, hash: Fingerprint::ZERO } + DepNode { kind, hash: Fingerprint::ZERO.into() } } pub fn construct(tcx: Ctxt, kind: K, arg: &Key) -> DepNode @@ -71,7 +71,7 @@ impl DepNode { Key: DepNodeParams, { let hash = arg.to_fingerprint(tcx); - let dep_node = DepNode { kind, hash }; + let dep_node = DepNode { kind, hash: hash.into() }; #[cfg(debug_assertions)] { diff --git a/compiler/rustc_query_system/src/dep_graph/graph.rs b/compiler/rustc_query_system/src/dep_graph/graph.rs index d9b687c48af..617ec84ae71 100644 --- a/compiler/rustc_query_system/src/dep_graph/graph.rs +++ b/compiler/rustc_query_system/src/dep_graph/graph.rs @@ -976,7 +976,7 @@ impl CurrentDepGraph { // Fingerprint::combine() is faster than sending Fingerprint // through the StableHasher (at least as long as StableHasher // is so slow). - hash: self.anon_id_seed.combine(hasher.finish()), + hash: self.anon_id_seed.combine(hasher.finish()).into(), }; self.intern_node(target_dep_node, task_deps.reads, Fingerprint::ZERO) diff --git a/compiler/rustc_query_system/src/query/job.rs b/compiler/rustc_query_system/src/query/job.rs index c1d3210b617..5fed500390b 100644 --- a/compiler/rustc_query_system/src/query/job.rs +++ b/compiler/rustc_query_system/src/query/job.rs @@ -33,11 +33,11 @@ pub struct QueryInfo { pub(crate) type QueryMap = FxHashMap, QueryJobInfo>; -/// A value uniquely identifiying an active query job within a shard in the query cache. +/// A value uniquely identifying an active query job within a shard in the query cache. #[derive(Copy, Clone, Eq, PartialEq, Hash)] pub struct QueryShardJobId(pub NonZeroU32); -/// A value uniquely identifiying an active query job. +/// A value uniquely identifying an active query job. #[derive(Copy, Clone, Eq, PartialEq, Hash)] pub struct QueryJobId { /// Which job within a shard is this @@ -536,7 +536,7 @@ fn remove_cycle( }; // We unwrap `waiter` here since there must always be one - // edge which is resumeable / waited using a query latch + // edge which is resumable / waited using a query latch let (waitee_query, waiter_idx) = waiter.unwrap(); // Extract the waiter we want to resume diff --git a/compiler/rustc_resolve/src/build_reduced_graph.rs b/compiler/rustc_resolve/src/build_reduced_graph.rs index 83016ed36a7..493b9f15271 100644 --- a/compiler/rustc_resolve/src/build_reduced_graph.rs +++ b/compiler/rustc_resolve/src/build_reduced_graph.rs @@ -7,7 +7,7 @@ use crate::def_collector::collect_definitions; use crate::imports::{Import, ImportKind}; -use crate::macros::{MacroRulesBinding, MacroRulesScope}; +use crate::macros::{MacroRulesBinding, MacroRulesScope, MacroRulesScopeRef}; use crate::Namespace::{self, MacroNS, TypeNS, ValueNS}; use crate::{CrateLint, Determinacy, PathResult, ResolutionError, VisResolutionError}; use crate::{ @@ -209,7 +209,7 @@ impl<'a> Resolver<'a> { &mut self, fragment: &AstFragment, parent_scope: ParentScope<'a>, - ) -> MacroRulesScope<'a> { + ) -> MacroRulesScopeRef<'a> { collect_definitions(self, fragment, parent_scope.expansion); let mut visitor = BuildReducedGraphVisitor { r: self, parent_scope }; fragment.visit_with(&mut visitor); @@ -220,7 +220,8 @@ impl<'a> Resolver<'a> { let def_id = module.def_id().expect("unpopulated module without a def-id"); for child in self.cstore().item_children_untracked(def_id, self.session) { let child = child.map_id(|_| panic!("unexpected id")); - BuildReducedGraphVisitor { r: self, parent_scope: ParentScope::module(module) } + let parent_scope = ParentScope::module(module, self); + BuildReducedGraphVisitor { r: self, parent_scope } .build_reduced_graph_for_external_crate_res(child); } } @@ -1154,7 +1155,7 @@ impl<'a, 'b> BuildReducedGraphVisitor<'a, 'b> { false } - fn visit_invoc(&mut self, id: NodeId) -> MacroRulesScope<'a> { + fn visit_invoc(&mut self, id: NodeId) -> MacroRulesScopeRef<'a> { let invoc_id = id.placeholder_to_expn_id(); self.parent_scope.module.unexpanded_invocations.borrow_mut().insert(invoc_id); @@ -1162,7 +1163,7 @@ impl<'a, 'b> BuildReducedGraphVisitor<'a, 'b> { let old_parent_scope = self.r.invocation_parent_scopes.insert(invoc_id, self.parent_scope); assert!(old_parent_scope.is_none(), "invocation data is reset for an invocation"); - MacroRulesScope::Invocation(invoc_id) + self.r.arenas.alloc_macro_rules_scope(MacroRulesScope::Invocation(invoc_id)) } fn proc_macro_stub(&self, item: &ast::Item) -> Option<(MacroKind, Ident, Span)> { @@ -1196,7 +1197,7 @@ impl<'a, 'b> BuildReducedGraphVisitor<'a, 'b> { } } - fn define_macro(&mut self, item: &ast::Item) -> MacroRulesScope<'a> { + fn define_macro(&mut self, item: &ast::Item) -> MacroRulesScopeRef<'a> { let parent_scope = self.parent_scope; let expansion = parent_scope.expansion; let def_id = self.r.local_def_id(item.id); @@ -1239,11 +1240,13 @@ impl<'a, 'b> BuildReducedGraphVisitor<'a, 'b> { self.insert_unused_macro(ident, def_id, item.id, span); } self.r.visibilities.insert(def_id, vis); - MacroRulesScope::Binding(self.r.arenas.alloc_macro_rules_binding(MacroRulesBinding { - parent_macro_rules_scope: parent_scope.macro_rules, - binding, - ident, - })) + self.r.arenas.alloc_macro_rules_scope(MacroRulesScope::Binding( + self.r.arenas.alloc_macro_rules_binding(MacroRulesBinding { + parent_macro_rules_scope: parent_scope.macro_rules, + binding, + ident, + }), + )) } else { let module = parent_scope.module; let vis = match item.kind { diff --git a/compiler/rustc_resolve/src/diagnostics.rs b/compiler/rustc_resolve/src/diagnostics.rs index 5c7a7c1d0ae..2cca1a6ee59 100644 --- a/compiler/rustc_resolve/src/diagnostics.rs +++ b/compiler/rustc_resolve/src/diagnostics.rs @@ -609,7 +609,7 @@ impl<'a> Resolver<'a> { } } Scope::DeriveHelpersCompat => { - let res = Res::NonMacroAttr(NonMacroAttrKind::DeriveHelper); + let res = Res::NonMacroAttr(NonMacroAttrKind::DeriveHelperCompat); if filter_fn(res) { for derive in parent_scope.derives { let parent_scope = &ParentScope { derives: &[], ..*parent_scope }; @@ -630,7 +630,7 @@ impl<'a> Resolver<'a> { } } Scope::MacroRules(macro_rules_scope) => { - if let MacroRulesScope::Binding(macro_rules_binding) = macro_rules_scope { + if let MacroRulesScope::Binding(macro_rules_binding) = macro_rules_scope.get() { let res = macro_rules_binding.binding.res(); if filter_fn(res) { suggestions diff --git a/compiler/rustc_resolve/src/late.rs b/compiler/rustc_resolve/src/late.rs index 2337f0d09ab..f156caf23ba 100644 --- a/compiler/rustc_resolve/src/late.rs +++ b/compiler/rustc_resolve/src/late.rs @@ -298,9 +298,7 @@ impl<'a> PathSource<'a> { _, ) | Res::SelfCtor(..)), - PathSource::TupleStruct(..) => { - matches!(res, Res::Def(DefKind::Ctor(_, CtorKind::Fn), _) | Res::SelfCtor(..)) - } + PathSource::TupleStruct(..) => res.expected_in_tuple_struct_pat(), PathSource::Struct => matches!(res, Res::Def( DefKind::Struct | DefKind::Union @@ -677,7 +675,7 @@ impl<'a: 'ast, 'b, 'ast> LateResolutionVisitor<'a, 'b, 'ast> { // During late resolution we only track the module component of the parent scope, // although it may be useful to track other components as well for diagnostics. let graph_root = resolver.graph_root; - let parent_scope = ParentScope::module(graph_root); + let parent_scope = ParentScope::module(graph_root, resolver); let start_rib_kind = ModuleRibKind(graph_root); LateResolutionVisitor { r: resolver, diff --git a/compiler/rustc_resolve/src/late/diagnostics.rs b/compiler/rustc_resolve/src/late/diagnostics.rs index 00e6d5ca381..2473436a916 100644 --- a/compiler/rustc_resolve/src/late/diagnostics.rs +++ b/compiler/rustc_resolve/src/late/diagnostics.rs @@ -16,7 +16,6 @@ use rustc_hir::def::Namespace::{self, *}; use rustc_hir::def::{self, CtorKind, CtorOf, DefKind}; use rustc_hir::def_id::{DefId, CRATE_DEF_INDEX, LOCAL_CRATE}; use rustc_hir::PrimTy; -use rustc_session::config::nightly_options; use rustc_session::parse::feature_err; use rustc_span::hygiene::MacroKind; use rustc_span::symbol::{kw, sym, Ident, Symbol}; @@ -890,7 +889,7 @@ impl<'a: 'ast, 'ast> LateResolutionVisitor<'a, '_, 'ast> { } (Res::Def(DefKind::TyAlias, def_id), PathSource::Trait(_)) => { err.span_label(span, "type aliases cannot be used as traits"); - if nightly_options::is_nightly_build() { + if self.r.session.is_nightly_build() { let msg = "you might have meant to use `#![feature(trait_alias)]` instead of a \ `type` alias"; if let Some(span) = self.def_span(def_id) { @@ -1675,7 +1674,7 @@ impl<'tcx> LifetimeContext<'_, 'tcx> { _ => {} } } - if nightly_options::is_nightly_build() + if self.tcx.sess.is_nightly_build() && !self.tcx.features().in_band_lifetimes && suggests_in_band { diff --git a/compiler/rustc_resolve/src/lib.rs b/compiler/rustc_resolve/src/lib.rs index f1e30470f8e..d18335ef2e6 100644 --- a/compiler/rustc_resolve/src/lib.rs +++ b/compiler/rustc_resolve/src/lib.rs @@ -65,7 +65,7 @@ use diagnostics::{extend_span_to_previous_binding, find_span_of_binding_until_ne use diagnostics::{ImportSuggestion, LabelSuggestion, Suggestion}; use imports::{Import, ImportKind, ImportResolver, NameResolution}; use late::{HasGenericParams, PathSource, Rib, RibKind::*}; -use macros::{MacroRulesBinding, MacroRulesScope}; +use macros::{MacroRulesBinding, MacroRulesScope, MacroRulesScopeRef}; type Res = def::Res; @@ -101,7 +101,7 @@ impl Determinacy { enum Scope<'a> { DeriveHelpers(ExpnId), DeriveHelpersCompat, - MacroRules(MacroRulesScope<'a>), + MacroRules(MacroRulesScopeRef<'a>), CrateRoot, Module(Module<'a>), RegisteredAttrs, @@ -134,18 +134,18 @@ enum ScopeSet { pub struct ParentScope<'a> { module: Module<'a>, expansion: ExpnId, - macro_rules: MacroRulesScope<'a>, + macro_rules: MacroRulesScopeRef<'a>, derives: &'a [ast::Path], } impl<'a> ParentScope<'a> { /// Creates a parent scope with the passed argument used as the module scope component, /// and other scope components set to default empty values. - pub fn module(module: Module<'a>) -> ParentScope<'a> { + pub fn module(module: Module<'a>, resolver: &Resolver<'a>) -> ParentScope<'a> { ParentScope { module, expansion: ExpnId::root(), - macro_rules: MacroRulesScope::Empty, + macro_rules: resolver.arenas.alloc_macro_rules_scope(MacroRulesScope::Empty), derives: &[], } } @@ -975,7 +975,7 @@ pub struct Resolver<'a> { invocation_parent_scopes: FxHashMap>, /// `macro_rules` scopes *produced* by expanding the macro invocations, /// include all the `macro_rules` items and other invocations generated by them. - output_macro_rules_scopes: FxHashMap>, + output_macro_rules_scopes: FxHashMap>, /// Helper attributes that are in scope for the given expansion. helper_attrs: FxHashMap>, @@ -1044,6 +1044,9 @@ impl<'a> ResolverArenas<'a> { fn alloc_name_resolution(&'a self) -> &'a RefCell> { self.name_resolutions.alloc(Default::default()) } + fn alloc_macro_rules_scope(&'a self, scope: MacroRulesScope<'a>) -> MacroRulesScopeRef<'a> { + PtrKey(self.dropless.alloc(Cell::new(scope))) + } fn alloc_macro_rules_binding( &'a self, binding: MacroRulesBinding<'a>, @@ -1231,14 +1234,11 @@ impl<'a> Resolver<'a> { let (registered_attrs, registered_tools) = macros::registered_attrs_and_tools(session, &krate.attrs); - let mut invocation_parent_scopes = FxHashMap::default(); - invocation_parent_scopes.insert(ExpnId::root(), ParentScope::module(graph_root)); - let features = session.features_untracked(); let non_macro_attr = |mark_used| Lrc::new(SyntaxExtension::non_macro_attr(mark_used, session.edition())); - Resolver { + let mut resolver = Resolver { session, definitions, @@ -1305,7 +1305,7 @@ impl<'a> Resolver<'a> { dummy_ext_bang: Lrc::new(SyntaxExtension::dummy_bang(session.edition())), dummy_ext_derive: Lrc::new(SyntaxExtension::dummy_derive(session.edition())), non_macro_attrs: [non_macro_attr(false), non_macro_attr(true)], - invocation_parent_scopes, + invocation_parent_scopes: Default::default(), output_macro_rules_scopes: Default::default(), helper_attrs: Default::default(), local_macro_def_scopes: FxHashMap::default(), @@ -1333,7 +1333,12 @@ impl<'a> Resolver<'a> { invocation_parents, next_disambiguator: Default::default(), trait_impl_items: Default::default(), - } + }; + + let root_parent_scope = ParentScope::module(graph_root, &resolver); + resolver.invocation_parent_scopes.insert(ExpnId::root(), root_parent_scope); + + resolver } pub fn next_node_id(&mut self) -> NodeId { @@ -1671,7 +1676,20 @@ impl<'a> Resolver<'a> { !(expn_id == parent_scope.expansion && macro_kind == Some(MacroKind::Derive)) } Scope::DeriveHelpersCompat => true, - Scope::MacroRules(..) => true, + Scope::MacroRules(macro_rules_scope) => { + // Use "path compression" on `macro_rules` scope chains. This is an optimization + // used to avoid long scope chains, see the comments on `MacroRulesScopeRef`. + // As another consequence of this optimization visitors never observe invocation + // scopes for macros that were already expanded. + while let MacroRulesScope::Invocation(invoc_id) = macro_rules_scope.get() { + if let Some(next_scope) = self.output_macro_rules_scopes.get(&invoc_id) { + macro_rules_scope.set(next_scope.get()); + } else { + break; + } + } + true + } Scope::CrateRoot => true, Scope::Module(..) => true, Scope::RegisteredAttrs => use_prelude, @@ -1703,15 +1721,13 @@ impl<'a> Resolver<'a> { } Scope::DeriveHelpers(..) => Scope::DeriveHelpersCompat, Scope::DeriveHelpersCompat => Scope::MacroRules(parent_scope.macro_rules), - Scope::MacroRules(macro_rules_scope) => match macro_rules_scope { + Scope::MacroRules(macro_rules_scope) => match macro_rules_scope.get() { MacroRulesScope::Binding(binding) => { Scope::MacroRules(binding.parent_macro_rules_scope) } - MacroRulesScope::Invocation(invoc_id) => Scope::MacroRules( - self.output_macro_rules_scopes.get(&invoc_id).cloned().unwrap_or_else( - || self.invocation_parent_scopes[&invoc_id].macro_rules, - ), - ), + MacroRulesScope::Invocation(invoc_id) => { + Scope::MacroRules(self.invocation_parent_scopes[&invoc_id].macro_rules) + } MacroRulesScope::Empty => Scope::Module(module), }, Scope::CrateRoot => match ns { @@ -3200,7 +3216,7 @@ impl<'a> Resolver<'a> { } }; let module = self.get_module(module_id); - let parent_scope = &ParentScope::module(module); + let parent_scope = &ParentScope::module(module, self); let res = self.resolve_ast_path(&path, ns, parent_scope).map_err(|_| ())?; Ok((path, res)) } diff --git a/compiler/rustc_resolve/src/macros.rs b/compiler/rustc_resolve/src/macros.rs index b5b281b93bc..c8dbe685128 100644 --- a/compiler/rustc_resolve/src/macros.rs +++ b/compiler/rustc_resolve/src/macros.rs @@ -11,24 +11,26 @@ use rustc_ast_lowering::ResolverAstLowering; use rustc_ast_pretty::pprust; use rustc_attr::StabilityLevel; use rustc_data_structures::fx::FxHashSet; +use rustc_data_structures::ptr_key::PtrKey; +use rustc_data_structures::sync::Lrc; use rustc_errors::struct_span_err; use rustc_expand::base::{Indeterminate, InvocationRes, ResolverExpand, SyntaxExtension}; use rustc_expand::compile_declarative_macro; -use rustc_expand::expand::{AstFragment, AstFragmentKind, Invocation, InvocationKind}; +use rustc_expand::expand::{AstFragment, Invocation, InvocationKind}; use rustc_feature::is_builtin_attr_name; use rustc_hir::def::{self, DefKind, NonMacroAttrKind}; use rustc_hir::def_id; use rustc_middle::middle::stability; use rustc_middle::ty; -use rustc_session::lint::builtin::UNUSED_MACROS; +use rustc_session::lint::builtin::{SOFT_UNSTABLE, UNUSED_MACROS}; +use rustc_session::parse::feature_err; use rustc_session::Session; use rustc_span::edition::Edition; use rustc_span::hygiene::{self, ExpnData, ExpnId, ExpnKind}; +use rustc_span::hygiene::{AstPass, MacroKind}; use rustc_span::symbol::{kw, sym, Ident, Symbol}; use rustc_span::{Span, DUMMY_SP}; - -use rustc_data_structures::sync::Lrc; -use rustc_span::hygiene::{AstPass, MacroKind}; +use std::cell::Cell; use std::{mem, ptr}; type Res = def::Res; @@ -39,7 +41,7 @@ type Res = def::Res; pub struct MacroRulesBinding<'a> { crate binding: &'a NameBinding<'a>, /// `macro_rules` scope into which the `macro_rules` item was planted. - crate parent_macro_rules_scope: MacroRulesScope<'a>, + crate parent_macro_rules_scope: MacroRulesScopeRef<'a>, crate ident: Ident, } @@ -59,6 +61,14 @@ pub enum MacroRulesScope<'a> { Invocation(ExpnId), } +/// `macro_rules!` scopes are always kept by reference and inside a cell. +/// The reason is that we update scopes with value `MacroRulesScope::Invocation(invoc_id)` +/// in-place after `invoc_id` gets expanded. +/// This helps to avoid uncontrollable growth of `macro_rules!` scope chains, +/// which usually grow lineraly with the number of macro invocations +/// in a module (including derives) and hurt performance. +pub(crate) type MacroRulesScopeRef<'a> = PtrKey<'a, Cell>>; + // Macro namespace is separated into two sub-namespaces, one for bang macros and // one for attribute-like macros (attributes, derives). // We ignore resolutions from one sub-namespace when searching names in scope for another. @@ -231,15 +241,20 @@ impl<'a> ResolverExpand for Resolver<'a> { } }; - let (path, kind, derives, after_derive) = match invoc.kind { + let (path, kind, inner_attr, derives, after_derive) = match invoc.kind { InvocationKind::Attr { ref attr, ref derives, after_derive, .. } => ( &attr.get_normal_item().path, MacroKind::Attr, + attr.style == ast::AttrStyle::Inner, self.arenas.alloc_ast_paths(derives), after_derive, ), - InvocationKind::Bang { ref mac, .. } => (&mac.path, MacroKind::Bang, &[][..], false), - InvocationKind::Derive { ref path, .. } => (path, MacroKind::Derive, &[][..], false), + InvocationKind::Bang { ref mac, .. } => { + (&mac.path, MacroKind::Bang, false, &[][..], false) + } + InvocationKind::Derive { ref path, .. } => { + (path, MacroKind::Derive, false, &[][..], false) + } InvocationKind::DeriveContainer { ref derives, .. } => { // Block expansion of the container until we resolve all derives in it. // This is required for two reasons: @@ -271,7 +286,7 @@ impl<'a> ResolverExpand for Resolver<'a> { ext.helper_attrs.iter().map(|name| Ident::new(*name, span)), ); if ext.is_derive_copy { - self.add_derive_copy(invoc_id); + self.containers_deriving_copy.insert(invoc_id); } ext } @@ -289,8 +304,17 @@ impl<'a> ResolverExpand for Resolver<'a> { // Derives are not included when `invocations` are collected, so we have to add them here. let parent_scope = &ParentScope { derives, ..parent_scope }; + let require_inert = !invoc.fragment_kind.supports_macro_expansion(); let node_id = self.lint_node_id(eager_expansion_root); - let (ext, res) = self.smart_resolve_macro_path(path, kind, parent_scope, node_id, force)?; + let (ext, res) = self.smart_resolve_macro_path( + path, + kind, + require_inert, + inner_attr, + parent_scope, + node_id, + force, + )?; let span = invoc.span(); invoc_id.set_expn_data(ext.expn_data( @@ -308,29 +332,6 @@ impl<'a> ResolverExpand for Resolver<'a> { self.definitions.add_parent_module_of_macro_def(invoc_id, normal_module_def_id); } - match invoc.fragment_kind { - AstFragmentKind::Arms - | AstFragmentKind::Fields - | AstFragmentKind::FieldPats - | AstFragmentKind::GenericParams - | AstFragmentKind::Params - | AstFragmentKind::StructFields - | AstFragmentKind::Variants => { - if let Res::Def(..) = res { - self.session.span_err( - span, - &format!( - "expected an inert attribute, found {} {}", - res.article(), - res.descr() - ), - ); - return Ok(InvocationRes::Single(self.dummy_ext(kind))); - } - } - _ => {} - } - Ok(InvocationRes::Single(ext)) } @@ -350,10 +351,6 @@ impl<'a> ResolverExpand for Resolver<'a> { self.containers_deriving_copy.contains(&expn_id) } - fn add_derive_copy(&mut self, expn_id: ExpnId) { - self.containers_deriving_copy.insert(expn_id); - } - // The function that implements the resolution logic of `#[cfg_accessible(path)]`. // Returns true if the path can certainly be resolved in one of three namespaces, // returns false if the path certainly cannot be resolved in any of the three namespaces. @@ -393,10 +390,14 @@ impl<'a> ResolverExpand for Resolver<'a> { impl<'a> Resolver<'a> { /// Resolve macro path with error reporting and recovery. + /// Uses dummy syntax extensions for unresolved macros or macros with unexpected resolutions + /// for better error recovery. fn smart_resolve_macro_path( &mut self, path: &ast::Path, kind: MacroKind, + require_inert: bool, + inner_attr: bool, parent_scope: &ParentScope<'a>, node_id: NodeId, force: bool, @@ -404,7 +405,6 @@ impl<'a> Resolver<'a> { let (ext, res) = match self.resolve_macro_path(path, Some(kind), parent_scope, true, force) { Ok((Some(ext), res)) => (ext, res), - // Use dummy syntax extensions for unresolved macros for better recovery. Ok((None, res)) => (self.dummy_ext(kind), res), Err(Determinacy::Determined) => (self.dummy_ext(kind), Res::Err), Err(Determinacy::Undetermined) => return Err(Indeterminate), @@ -441,19 +441,42 @@ impl<'a> Resolver<'a> { self.check_stability_and_deprecation(&ext, path, node_id); - Ok(if ext.macro_kind() != kind { - let expected = kind.descr_expected(); + let unexpected_res = if ext.macro_kind() != kind { + Some((kind.article(), kind.descr_expected())) + } else if require_inert && matches!(res, Res::Def(..)) { + Some(("a", "non-macro attribute")) + } else { + None + }; + if let Some((article, expected)) = unexpected_res { let path_str = pprust::path_to_string(path); let msg = format!("expected {}, found {} `{}`", expected, res.descr(), path_str); self.session .struct_span_err(path.span, &msg) - .span_label(path.span, format!("not {} {}", kind.article(), expected)) + .span_label(path.span, format!("not {} {}", article, expected)) .emit(); - // Use dummy syntax extensions for unexpected macro kinds for better recovery. - (self.dummy_ext(kind), Res::Err) - } else { - (ext, res) - }) + return Ok((self.dummy_ext(kind), Res::Err)); + } + + // We are trying to avoid reporting this error if other related errors were reported. + if res != Res::Err + && inner_attr + && !self.session.features_untracked().custom_inner_attributes + { + let msg = match res { + Res::Def(..) => "inner macro attributes are unstable", + Res::NonMacroAttr(..) => "custom inner attributes are unstable", + _ => unreachable!(), + }; + if path == &sym::test { + self.session.parse_sess.buffer_lint(SOFT_UNSTABLE, path.span, node_id, msg); + } else { + feature_err(&self.session.parse_sess, sym::custom_inner_attributes, path.span, msg) + .emit(); + } + } + + Ok((ext, res)) } pub fn resolve_macro_path( @@ -558,10 +581,9 @@ impl<'a> Resolver<'a> { struct Flags: u8 { const MACRO_RULES = 1 << 0; const MODULE = 1 << 1; - const DERIVE_HELPER_COMPAT = 1 << 2; - const MISC_SUGGEST_CRATE = 1 << 3; - const MISC_SUGGEST_SELF = 1 << 4; - const MISC_FROM_PRELUDE = 1 << 5; + const MISC_SUGGEST_CRATE = 1 << 2; + const MISC_SUGGEST_SELF = 1 << 3; + const MISC_FROM_PRELUDE = 1 << 4; } } @@ -636,14 +658,11 @@ impl<'a> Resolver<'a> { ) { Ok((Some(ext), _)) => { if ext.helper_attrs.contains(&ident.name) { - let binding = ( - Res::NonMacroAttr(NonMacroAttrKind::DeriveHelper), - ty::Visibility::Public, + result = ok( + Res::NonMacroAttr(NonMacroAttrKind::DeriveHelperCompat), derive.span, - ExpnId::root(), - ) - .to_name_binding(this.arenas); - result = Ok((binding, Flags::DERIVE_HELPER_COMPAT)); + this.arenas, + ); break; } } @@ -655,17 +674,13 @@ impl<'a> Resolver<'a> { } result } - Scope::MacroRules(macro_rules_scope) => match macro_rules_scope { + Scope::MacroRules(macro_rules_scope) => match macro_rules_scope.get() { MacroRulesScope::Binding(macro_rules_binding) if ident == macro_rules_binding.ident => { Ok((macro_rules_binding.binding, Flags::MACRO_RULES)) } - MacroRulesScope::Invocation(invoc_id) - if !this.output_macro_rules_scopes.contains_key(&invoc_id) => - { - Err(Determinacy::Undetermined) - } + MacroRulesScope::Invocation(_) => Err(Determinacy::Undetermined), _ => Err(Determinacy::Determined), }, Scope::CrateRoot => { @@ -793,17 +808,15 @@ impl<'a> Resolver<'a> { let (res, innermost_res) = (binding.res(), innermost_binding.res()); if res != innermost_res { let builtin = Res::NonMacroAttr(NonMacroAttrKind::Builtin); - let is_derive_helper_compat = |res, flags: Flags| { - res == Res::NonMacroAttr(NonMacroAttrKind::DeriveHelper) - && flags.contains(Flags::DERIVE_HELPER_COMPAT) - }; + let derive_helper_compat = + Res::NonMacroAttr(NonMacroAttrKind::DeriveHelperCompat); let ambiguity_error_kind = if is_import { Some(AmbiguityKind::Import) } else if innermost_res == builtin || res == builtin { Some(AmbiguityKind::BuiltinAttr) - } else if is_derive_helper_compat(innermost_res, innermost_flags) - || is_derive_helper_compat(res, flags) + } else if innermost_res == derive_helper_compat + || res == derive_helper_compat { Some(AmbiguityKind::DeriveHelper) } else if innermost_flags.contains(Flags::MACRO_RULES) @@ -1028,6 +1041,7 @@ impl<'a> Resolver<'a> { depr.suggestion, lint, span, + node_id, ); } } diff --git a/compiler/rustc_save_analysis/src/dump_visitor.rs b/compiler/rustc_save_analysis/src/dump_visitor.rs index dbb5e3cc9f0..40d60a8394b 100644 --- a/compiler/rustc_save_analysis/src/dump_visitor.rs +++ b/compiler/rustc_save_analysis/src/dump_visitor.rs @@ -816,7 +816,7 @@ impl<'tcx> DumpVisitor<'tcx> { path: &'tcx hir::QPath<'tcx>, fields: &'tcx [hir::Field<'tcx>], variant: &'tcx ty::VariantDef, - base: Option<&'tcx hir::Expr<'tcx>>, + rest: Option<&'tcx hir::Expr<'tcx>>, ) { if let Some(struct_lit_data) = self.save_ctxt.get_expr_data(ex) { if let hir::QPath::Resolved(_, path) = path { @@ -836,7 +836,9 @@ impl<'tcx> DumpVisitor<'tcx> { } } - walk_list!(self, visit_expr, base); + if let Some(base) = rest { + self.visit_expr(&base); + } } fn process_method_call( @@ -1399,7 +1401,7 @@ impl<'tcx> Visitor<'tcx> for DumpVisitor<'tcx> { debug!("visit_expr {:?}", ex.kind); self.process_macro_use(ex.span); match ex.kind { - hir::ExprKind::Struct(ref path, ref fields, ref base) => { + hir::ExprKind::Struct(ref path, ref fields, ref rest) => { let hir_expr = self.save_ctxt.tcx.hir().expect_expr(ex.hir_id); let adt = match self.save_ctxt.typeck_results().expr_ty_opt(&hir_expr) { Some(ty) if ty.ty_adt_def().is_some() => ty.ty_adt_def().unwrap(), @@ -1409,7 +1411,7 @@ impl<'tcx> Visitor<'tcx> for DumpVisitor<'tcx> { } }; let res = self.save_ctxt.get_path_res(hir_expr.hir_id); - self.process_struct_lit(ex, path, fields, adt.variant_of_res(res), *base) + self.process_struct_lit(ex, path, fields, adt.variant_of_res(res), *rest) } hir::ExprKind::MethodCall(ref seg, _, args, _) => { self.process_method_call(ex, seg, args) diff --git a/compiler/rustc_save_analysis/src/sig.rs b/compiler/rustc_save_analysis/src/sig.rs index 1bf8160e4c3..2f82d0546ba 100644 --- a/compiler/rustc_save_analysis/src/sig.rs +++ b/compiler/rustc_save_analysis/src/sig.rs @@ -21,7 +21,7 @@ // references. // // Signatures do not include visibility info. I'm not sure if this is a feature -// or an ommission (FIXME). +// or an omission (FIXME). // // FIXME where clauses need implementing, defs/refs in generics are mostly missing. @@ -677,7 +677,7 @@ impl<'hir> Sig for hir::Variant<'hir> { let mut text = self.ident.to_string(); match self.data { hir::VariantData::Struct(fields, r) => { - let id = parent_id.unwrap(); + let id = parent_id.ok_or("Missing id for Variant's parent")?; let name_def = SigElement { id: id_from_hir_id(id, scx), start: offset, diff --git a/compiler/rustc_session/src/config.rs b/compiler/rustc_session/src/config.rs index f9ce5847dc4..9d943819e05 100644 --- a/compiler/rustc_session/src/config.rs +++ b/compiler/rustc_session/src/config.rs @@ -771,15 +771,15 @@ pub const fn default_lib_output() -> CrateType { } pub fn default_configuration(sess: &Session) -> CrateConfig { - let end = &sess.target.target_endian; + let end = &sess.target.endian; let arch = &sess.target.arch; let wordsz = sess.target.pointer_width.to_string(); - let os = &sess.target.target_os; - let env = &sess.target.target_env; - let vendor = &sess.target.target_vendor; + let os = &sess.target.os; + let env = &sess.target.env; + let vendor = &sess.target.vendor; let min_atomic_width = sess.target.min_atomic_width(); let max_atomic_width = sess.target.max_atomic_width(); - let atomic_cas = sess.target.options.atomic_cas; + let atomic_cas = sess.target.atomic_cas; let layout = TargetDataLayout::parse(&sess.target).unwrap_or_else(|err| { sess.fatal(&err); }); @@ -788,7 +788,7 @@ pub fn default_configuration(sess: &Session) -> CrateConfig { ret.reserve(6); // the minimum number of insertions // Target bindings. ret.insert((sym::target_os, Some(Symbol::intern(os)))); - if let Some(ref fam) = sess.target.options.target_family { + if let Some(ref fam) = sess.target.os_family { ret.insert((sym::target_family, Some(Symbol::intern(fam)))); if fam == "windows" { ret.insert((sym::windows, None)); @@ -801,7 +801,7 @@ pub fn default_configuration(sess: &Session) -> CrateConfig { ret.insert((sym::target_pointer_width, Some(Symbol::intern(&wordsz)))); ret.insert((sym::target_env, Some(Symbol::intern(env)))); ret.insert((sym::target_vendor, Some(Symbol::intern(vendor)))); - if sess.target.options.has_elf_tls { + if sess.target.has_elf_tls { ret.insert((sym::target_thread_local, None)); } for &(i, align) in &[ @@ -829,6 +829,9 @@ pub fn default_configuration(sess: &Session) -> CrateConfig { } } + let panic_strategy = sess.panic_strategy(); + ret.insert((sym::panic, Some(panic_strategy.desc_symbol()))); + for s in sess.opts.debugging_opts.sanitizer { let symbol = Symbol::intern(&s.to_string()); ret.insert((sym::sanitize, Some(symbol))); @@ -1294,7 +1297,7 @@ fn parse_crate_edition(matches: &getopts::Matches) -> Edition { None => DEFAULT_EDITION, }; - if !edition.is_stable() && !nightly_options::is_nightly_build() { + if !edition.is_stable() && !nightly_options::match_is_nightly_build(matches) { early_error( ErrorOutputType::default(), &format!( @@ -1591,7 +1594,9 @@ fn parse_libs( ); } }; - if kind == NativeLibKind::StaticNoBundle && !nightly_options::is_nightly_build() { + if kind == NativeLibKind::StaticNoBundle + && !nightly_options::match_is_nightly_build(matches) + { early_error( error_format, "the library kind 'static-nobundle' is only \ @@ -1887,10 +1892,10 @@ pub fn build_session_options(matches: &getopts::Matches) -> Options { cg, error_format, externs, + unstable_features: UnstableFeatures::from_environment(crate_name.as_deref()), crate_name, alt_std_name: None, libs, - unstable_features: UnstableFeatures::from_environment(), debug_assertions, actually_rustdoc: false, trimmed_def_paths: TrimmedDefPaths::default(), @@ -2011,17 +2016,21 @@ pub mod nightly_options { use rustc_feature::UnstableFeatures; pub fn is_unstable_enabled(matches: &getopts::Matches) -> bool { - is_nightly_build() && matches.opt_strs("Z").iter().any(|x| *x == "unstable-options") + match_is_nightly_build(matches) + && matches.opt_strs("Z").iter().any(|x| *x == "unstable-options") + } + + pub fn match_is_nightly_build(matches: &getopts::Matches) -> bool { + is_nightly_build(matches.opt_str("crate-name").as_deref()) } - pub fn is_nightly_build() -> bool { - UnstableFeatures::from_environment().is_nightly_build() + pub fn is_nightly_build(krate: Option<&str>) -> bool { + UnstableFeatures::from_environment(krate).is_nightly_build() } pub fn check_nightly_options(matches: &getopts::Matches, flags: &[RustcOptGroup]) { let has_z_unstable_option = matches.opt_strs("Z").iter().any(|x| *x == "unstable-options"); - let really_allows_unstable_options = - UnstableFeatures::from_environment().is_nightly_build(); + let really_allows_unstable_options = match_is_nightly_build(matches); for opt in flags.iter() { if opt.stability == OptionStability::Stable { diff --git a/compiler/rustc_session/src/options.rs b/compiler/rustc_session/src/options.rs index e5d9728aca8..be5024972a3 100644 --- a/compiler/rustc_session/src/options.rs +++ b/compiler/rustc_session/src/options.rs @@ -942,6 +942,10 @@ options! {DebuggingOptions, DebuggingSetter, basic_debugging_options, (default: no)"), incremental_verify_ich: bool = (false, parse_bool, [UNTRACKED], "verify incr. comp. hashes of green query instances (default: no)"), + inline_mir_threshold: usize = (50, parse_uint, [TRACKED], + "a default MIR inlining threshold (default: 50)"), + inline_mir_hint_threshold: usize = (100, parse_uint, [TRACKED], + "inlining threshold for functions with inline hint (default: 100)"), inline_in_all_cgus: Option = (None, parse_opt_bool, [TRACKED], "control whether `#[inline]` functions are in all CGUs"), input_stats: bool = (false, parse_bool, [UNTRACKED], diff --git a/compiler/rustc_session/src/output.rs b/compiler/rustc_session/src/output.rs index 130c3a06122..777eea3f68d 100644 --- a/compiler/rustc_session/src/output.rs +++ b/compiler/rustc_session/src/output.rs @@ -150,17 +150,15 @@ pub fn filename_for_input( match crate_type { CrateType::Rlib => outputs.out_directory.join(&format!("lib{}.rlib", libname)), CrateType::Cdylib | CrateType::ProcMacro | CrateType::Dylib => { - let (prefix, suffix) = - (&sess.target.options.dll_prefix, &sess.target.options.dll_suffix); + let (prefix, suffix) = (&sess.target.dll_prefix, &sess.target.dll_suffix); outputs.out_directory.join(&format!("{}{}{}", prefix, libname, suffix)) } CrateType::Staticlib => { - let (prefix, suffix) = - (&sess.target.options.staticlib_prefix, &sess.target.options.staticlib_suffix); + let (prefix, suffix) = (&sess.target.staticlib_prefix, &sess.target.staticlib_suffix); outputs.out_directory.join(&format!("{}{}{}", prefix, libname, suffix)) } CrateType::Executable => { - let suffix = &sess.target.options.exe_suffix; + let suffix = &sess.target.exe_suffix; let out_filename = outputs.path(OutputType::Exe); if suffix.is_empty() { out_filename } else { out_filename.with_extension(&suffix[1..]) } } @@ -177,29 +175,29 @@ pub fn filename_for_input( /// interaction with Rust code through static library is the only /// option for now pub fn default_output_for_target(sess: &Session) -> CrateType { - if !sess.target.options.executables { CrateType::Staticlib } else { CrateType::Executable } + if !sess.target.executables { CrateType::Staticlib } else { CrateType::Executable } } /// Checks if target supports crate_type as output pub fn invalid_output_for_target(sess: &Session, crate_type: CrateType) -> bool { match crate_type { CrateType::Cdylib | CrateType::Dylib | CrateType::ProcMacro => { - if !sess.target.options.dynamic_linking { + if !sess.target.dynamic_linking { return true; } - if sess.crt_static(Some(crate_type)) && !sess.target.options.crt_static_allows_dylibs { + if sess.crt_static(Some(crate_type)) && !sess.target.crt_static_allows_dylibs { return true; } } _ => {} } - if sess.target.options.only_cdylib { + if sess.target.only_cdylib { match crate_type { CrateType::ProcMacro | CrateType::Dylib => return true, _ => {} } } - if !sess.target.options.executables && crate_type == CrateType::Executable { + if !sess.target.executables && crate_type == CrateType::Executable { return true; } diff --git a/compiler/rustc_session/src/parse.rs b/compiler/rustc_session/src/parse.rs index 6f10d0c4b89..66c3738fb5b 100644 --- a/compiler/rustc_session/src/parse.rs +++ b/compiler/rustc_session/src/parse.rs @@ -4,7 +4,7 @@ use crate::lint::{BufferedEarlyLint, BuiltinLintDiagnostics, Lint, LintId}; use rustc_ast::node_id::NodeId; use rustc_data_structures::fx::{FxHashMap, FxHashSet}; -use rustc_data_structures::sync::{Lock, Lrc, OnceCell}; +use rustc_data_structures::sync::{Lock, Lrc}; use rustc_errors::{emitter::SilentEmitter, ColorConfig, Handler}; use rustc_errors::{error_code, Applicability, DiagnosticBuilder}; use rustc_feature::{find_feature_issue, GateIssue, UnstableFeatures}; @@ -129,7 +129,6 @@ pub struct ParseSess { /// operation token that followed it, but that the parser cannot identify without further /// analysis. pub ambiguous_block_expr_parse: Lock>, - pub injected_crate_name: OnceCell, pub gated_spans: GatedSpans, pub symbol_gallery: SymbolGallery, /// The parser has reached `Eof` due to an unclosed brace. Used to silence unnecessary errors. @@ -150,7 +149,7 @@ impl ParseSess { pub fn with_span_handler(handler: Handler, source_map: Lrc) -> Self { Self { span_diagnostic: handler, - unstable_features: UnstableFeatures::from_environment(), + unstable_features: UnstableFeatures::from_environment(None), config: FxHashSet::default(), edition: ExpnId::root().expn_data().edition, raw_identifier_spans: Lock::new(Vec::new()), @@ -158,7 +157,6 @@ impl ParseSess { source_map, buffered_lints: Lock::new(vec![]), ambiguous_block_expr_parse: Lock::new(FxHashMap::default()), - injected_crate_name: OnceCell::new(), gated_spans: GatedSpans::default(), symbol_gallery: SymbolGallery::default(), reached_eof: Lock::new(false), diff --git a/compiler/rustc_session/src/session.rs b/compiler/rustc_session/src/session.rs index 0b7c35a8afd..419d1447764 100644 --- a/compiler/rustc_session/src/session.rs +++ b/compiler/rustc_session/src/session.rs @@ -663,7 +663,7 @@ impl Session { /// Calculates the flavor of LTO to use for this compilation. pub fn lto(&self) -> config::Lto { // If our target has codegen requirements ignore the command line - if self.target.options.requires_lto { + if self.target.requires_lto { return config::Lto::Fat; } @@ -731,7 +731,7 @@ impl Session { /// Returns the panic strategy for this compile session. If the user explicitly selected one /// using '-C panic', use that, otherwise use the panic strategy defined by the target. pub fn panic_strategy(&self) -> PanicStrategy { - self.opts.cg.panic.unwrap_or(self.target.options.panic_strategy) + self.opts.cg.panic.unwrap_or(self.target.panic_strategy) } pub fn fewer_names(&self) -> bool { let more_names = self.opts.output_types.contains_key(&OutputType::LlvmAssembly) @@ -745,6 +745,9 @@ impl Session { pub fn unstable_options(&self) -> bool { self.opts.debugging_opts.unstable_options } + pub fn is_nightly_build(&self) -> bool { + self.opts.unstable_features.is_nightly_build() + } pub fn overflow_checks(&self) -> bool { self.opts .cg @@ -755,9 +758,9 @@ impl Session { /// Check whether this compile session and crate type use static crt. pub fn crt_static(&self, crate_type: Option) -> bool { - if !self.target.options.crt_static_respected { + if !self.target.crt_static_respected { // If the target does not opt in to crt-static support, use its default. - return self.target.options.crt_static_default; + return self.target.crt_static_default; } let requested_features = self.opts.cg.target_feature.split(','); @@ -774,20 +777,20 @@ impl Session { // We can't check `#![crate_type = "proc-macro"]` here. false } else { - self.target.options.crt_static_default + self.target.crt_static_default } } pub fn relocation_model(&self) -> RelocModel { - self.opts.cg.relocation_model.unwrap_or(self.target.options.relocation_model) + self.opts.cg.relocation_model.unwrap_or(self.target.relocation_model) } pub fn code_model(&self) -> Option { - self.opts.cg.code_model.or(self.target.options.code_model) + self.opts.cg.code_model.or(self.target.code_model) } pub fn tls_model(&self) -> TlsModel { - self.opts.debugging_opts.tls_model.unwrap_or(self.target.options.tls_model) + self.opts.debugging_opts.tls_model.unwrap_or(self.target.tls_model) } pub fn must_not_eliminate_frame_pointers(&self) -> bool { @@ -798,7 +801,7 @@ impl Session { } else if let Some(x) = self.opts.cg.force_frame_pointers { x } else { - !self.target.options.eliminate_frame_pointer + !self.target.eliminate_frame_pointer } } @@ -822,7 +825,7 @@ impl Session { // value, if it is provided, or disable them, if not. if self.panic_strategy() == PanicStrategy::Unwind { true - } else if self.target.options.requires_uwtable { + } else if self.target.requires_uwtable { true } else { self.opts.cg.force_unwind_tables.unwrap_or(false) @@ -993,7 +996,7 @@ impl Session { if let Some(n) = self.opts.cli_forced_codegen_units { return n; } - if let Some(n) = self.target.options.default_codegen_units { + if let Some(n) = self.target.default_codegen_units { return n as usize; } @@ -1078,11 +1081,11 @@ impl Session { pub fn needs_plt(&self) -> bool { // Check if the current target usually needs PLT to be enabled. // The user can use the command line flag to override it. - let needs_plt = self.target.options.needs_plt; + let needs_plt = self.target.needs_plt; let dbg_opts = &self.opts.debugging_opts; - let relro_level = dbg_opts.relro_level.unwrap_or(self.target.options.relro_level); + let relro_level = dbg_opts.relro_level.unwrap_or(self.target.relro_level); // Only enable this optimization by default if full relro is also enabled. // In this case, lazy binding was already unavailable, so nothing is lost. @@ -1106,7 +1109,7 @@ impl Session { match self.opts.cg.link_dead_code { Some(explicitly_set) => explicitly_set, None => { - self.opts.debugging_opts.instrument_coverage && !self.target.options.is_like_msvc + self.opts.debugging_opts.instrument_coverage && !self.target.is_like_msvc // Issue #76038: (rustc `-Clink-dead-code` causes MSVC linker to produce invalid // binaries when LLVM InstrProf counters are enabled). As described by this issue, // the "link dead code" option produces incorrect binaries when compiled and linked @@ -1305,9 +1308,9 @@ pub fn build_session( early_error(sopts.error_format, &format!("Error loading host specification: {}", e)) }); - let loader = file_loader.unwrap_or(Box::new(RealFileLoader)); + let loader = file_loader.unwrap_or_else(|| Box::new(RealFileLoader)); let hash_kind = sopts.debugging_opts.src_hash_algorithm.unwrap_or_else(|| { - if target_cfg.options.is_like_msvc { + if target_cfg.is_like_msvc { SourceFileHashAlgorithm::Sha1 } else { SourceFileHashAlgorithm::Md5 @@ -1417,11 +1420,8 @@ pub fn build_session( if candidate.join("library/std/src/lib.rs").is_file() { Some(candidate) } else { None } }; - let asm_arch = if target_cfg.options.allow_asm { - InlineAsmArch::from_str(&target_cfg.arch).ok() - } else { - None - }; + let asm_arch = + if target_cfg.allow_asm { InlineAsmArch::from_str(&target_cfg.arch).ok() } else { None }; let sess = Session { target: target_cfg, @@ -1487,7 +1487,7 @@ fn validate_commandline_args_with_session_available(sess: &Session) { // the `dllimport` attributes and `__imp_` symbols in that case. if sess.opts.cg.linker_plugin_lto.enabled() && sess.opts.cg.prefer_dynamic - && sess.target.options.is_like_windows + && sess.target.is_like_windows { sess.err( "Linker plugin based LTO is not supported together with \ @@ -1515,7 +1515,7 @@ fn validate_commandline_args_with_session_available(sess: &Session) { ); } - if sess.target.options.requires_uwtable && !include_uwtables { + if sess.target.requires_uwtable && !include_uwtables { sess.err( "target requires unwind tables, they cannot be disabled with \ `-C force-unwind-tables=no`.", @@ -1530,7 +1530,7 @@ fn validate_commandline_args_with_session_available(sess: &Session) { // We should only display this error if we're actually going to run PGO. // If we're just supposed to print out some data, don't show the error (#61002). if sess.opts.cg.profile_generate.enabled() - && sess.target.options.is_like_msvc + && sess.target.is_like_msvc && sess.panic_strategy() == PanicStrategy::Unwind && sess.opts.prints.iter().all(|&p| p == PrintRequest::NativeStaticLibs) { diff --git a/compiler/rustc_span/src/lib.rs b/compiler/rustc_span/src/lib.rs index f52b64f4883..0926561f4c5 100644 --- a/compiler/rustc_span/src/lib.rs +++ b/compiler/rustc_span/src/lib.rs @@ -1894,16 +1894,37 @@ where return; } + let (_, line_hi, col_hi) = match ctx.byte_pos_to_line_and_col(span.hi) { + Some(pos) => pos, + None => { + Hash::hash(&TAG_INVALID_SPAN, hasher); + span.ctxt.hash_stable(ctx, hasher); + return; + } + }; + Hash::hash(&TAG_VALID_SPAN, hasher); // We truncate the stable ID hash and line and column numbers. The chances // of causing a collision this way should be minimal. Hash::hash(&(file_lo.name_hash as u64), hasher); - let col = (col_lo.0 as u64) & 0xFF; - let line = ((line_lo as u64) & 0xFF_FF_FF) << 8; - let len = ((span.hi - span.lo).0 as u64) << 32; - let line_col_len = col | line | len; - Hash::hash(&line_col_len, hasher); + // Hash both the length and the end location (line/column) of a span. If we + // hash only the length, for example, then two otherwise equal spans with + // different end locations will have the same hash. This can cause a problem + // during incremental compilation wherein a previous result for a query that + // depends on the end location of a span will be incorrectly reused when the + // end location of the span it depends on has changed (see issue #74890). A + // similar analysis applies if some query depends specifically on the length + // of the span, but we only hash the end location. So hash both. + + let col_lo_trunc = (col_lo.0 as u64) & 0xFF; + let line_lo_trunc = ((line_lo as u64) & 0xFF_FF_FF) << 8; + let col_hi_trunc = (col_hi.0 as u64) & 0xFF << 32; + let line_hi_trunc = ((line_hi as u64) & 0xFF_FF_FF) << 40; + let col_line = col_lo_trunc | line_lo_trunc | col_hi_trunc | line_hi_trunc; + let len = (span.hi - span.lo).0; + Hash::hash(&col_line, hasher); + Hash::hash(&len, hasher); span.ctxt.hash_stable(ctx, hasher); } } diff --git a/compiler/rustc_span/src/symbol.rs b/compiler/rustc_span/src/symbol.rs index 1de062ad5fa..ff5cc5aac42 100644 --- a/compiler/rustc_span/src/symbol.rs +++ b/compiler/rustc_span/src/symbol.rs @@ -267,6 +267,7 @@ symbols! { asm, assert, assert_inhabited, + assert_macro, assert_receiver_is_total_eq, assert_uninit_valid, assert_zero_valid, @@ -318,6 +319,7 @@ symbols! { call_mut, call_once, caller_location, + capture_disjoint_fields, cdylib, ceilf32, ceilf64, @@ -326,6 +328,7 @@ symbols! { cfg_attr, cfg_attr_multi, cfg_doctest, + cfg_panic, cfg_sanitize, cfg_target_feature, cfg_target_has_atomic, @@ -391,6 +394,7 @@ symbols! { copysignf64, core, core_intrinsics, + core_panic_macro, cosf32, cosf64, crate_id, @@ -414,6 +418,7 @@ symbols! { dead_code, dealloc, debug, + debug_assert_macro, debug_assertions, debug_struct, debug_trait, @@ -789,6 +794,7 @@ symbols! { panic_runtime, panic_str, panic_unwind, + panicking, param_attrs, parent_trait, partial_cmp, @@ -910,6 +916,7 @@ symbols! { rustc_args_required_const, rustc_attrs, rustc_builtin_macro, + rustc_capture_analysis, rustc_clean, rustc_const_stable, rustc_const_unstable, @@ -1063,6 +1070,7 @@ symbols! { staticlib, std, std_inject, + std_panic_macro, stmt, stmt_expr_attributes, stop_after_dataflow, diff --git a/compiler/rustc_symbol_mangling/src/legacy.rs b/compiler/rustc_symbol_mangling/src/legacy.rs index ac91fcf6293..eba8e1a0613 100644 --- a/compiler/rustc_symbol_mangling/src/legacy.rs +++ b/compiler/rustc_symbol_mangling/src/legacy.rs @@ -51,7 +51,7 @@ pub(super) fn mangle( // Erase regions because they may not be deterministic when hashed // and should not matter anyhow. - let instance_ty = tcx.erase_regions(&instance_ty); + let instance_ty = tcx.erase_regions(instance_ty); let hash = get_symbol_hash(tcx, instance, instance_ty, instantiating_crate); diff --git a/compiler/rustc_symbol_mangling/src/test.rs b/compiler/rustc_symbol_mangling/src/test.rs index 822a8352934..a28c8cac728 100644 --- a/compiler/rustc_symbol_mangling/src/test.rs +++ b/compiler/rustc_symbol_mangling/src/test.rs @@ -39,7 +39,7 @@ impl SymbolNamesTest<'tcx> { let def_id = def_id.to_def_id(); let instance = Instance::new( def_id, - tcx.erase_regions(&InternalSubsts::identity_for_item(tcx, def_id)), + tcx.erase_regions(InternalSubsts::identity_for_item(tcx, def_id)), ); let mangled = tcx.symbol_name(instance); tcx.sess.span_err(attr.span, &format!("symbol-name({})", mangled)); diff --git a/compiler/rustc_target/src/abi/call/mips64.rs b/compiler/rustc_target/src/abi/call/mips64.rs index 917dd104d14..a630c84142b 100644 --- a/compiler/rustc_target/src/abi/call/mips64.rs +++ b/compiler/rustc_target/src/abi/call/mips64.rs @@ -1,4 +1,4 @@ -use crate::abi::call::{ArgAbi, ArgAttribute, CastTarget, FnAbi, PassMode, Reg, RegKind, Uniform}; +use crate::abi::call::{ArgAbi, ArgExtension, CastTarget, FnAbi, PassMode, Reg, RegKind, Uniform}; use crate::abi::{self, HasDataLayout, LayoutOf, Size, TyAndLayout, TyAndLayoutMethods}; fn extend_integer_width_mips(arg: &mut ArgAbi<'_, Ty>, bits: u64) { @@ -7,7 +7,7 @@ fn extend_integer_width_mips(arg: &mut ArgAbi<'_, Ty>, bits: u64) { if let abi::Int(i, signed) = scalar.value { if !signed && i.size().bits() == 32 { if let PassMode::Direct(ref mut attrs) = arg.mode { - attrs.set(ArgAttribute::SExt); + attrs.ext(ArgExtension::Sext); return; } } @@ -137,7 +137,7 @@ where let rest_size = size - Size::from_bytes(8) * prefix_index as u64; arg.cast_to(CastTarget { prefix, - prefix_chunk: Size::from_bytes(8), + prefix_chunk_size: Size::from_bytes(8), rest: Uniform { unit: Reg::i64(), total: rest_size }, }); } diff --git a/compiler/rustc_target/src/abi/call/mod.rs b/compiler/rustc_target/src/abi/call/mod.rs index 507ae10877b..5de9a8dfa7a 100644 --- a/compiler/rustc_target/src/abi/call/mod.rs +++ b/compiler/rustc_target/src/abi/call/mod.rs @@ -36,9 +36,12 @@ pub enum PassMode { /// a single uniform or a pair of registers. Cast(CastTarget), /// Pass the argument indirectly via a hidden pointer. - /// The second value, if any, is for the extra data (vtable or length) + /// The `extra_attrs` value, if any, is for the extra data (vtable or length) /// which indicates that it refers to an unsized rvalue. - Indirect(ArgAttributes, Option), + /// `on_stack` defines that the the value should be passed at a fixed + /// stack offset in accordance to the ABI rather than passed using a + /// pointer. This corresponds to the `byval` LLVM argument attribute. + Indirect { attrs: ArgAttributes, extra_attrs: Option, on_stack: bool }, } // Hack to disable non_upper_case_globals only for the bitflags! and not for the rest @@ -52,24 +55,31 @@ mod attr_impl { bitflags::bitflags! { #[derive(Default)] pub struct ArgAttribute: u16 { - const ByVal = 1 << 0; const NoAlias = 1 << 1; const NoCapture = 1 << 2; const NonNull = 1 << 3; const ReadOnly = 1 << 4; - const SExt = 1 << 5; - const StructRet = 1 << 6; - const ZExt = 1 << 7; const InReg = 1 << 8; } } } +/// Sometimes an ABI requires small integers to be extended to a full or partial register. This enum +/// defines if this extension should be zero-extension or sign-extension when necssary. When it is +/// not necesary to extend the argument, this enum is ignored. +#[derive(Copy, Clone, PartialEq, Eq, Debug)] +pub enum ArgExtension { + None, + Zext, + Sext, +} + /// A compact representation of LLVM attributes (at least those relevant for this module) /// that can be manipulated without interacting with LLVM's Attribute machinery. #[derive(Copy, Clone, PartialEq, Eq, Debug)] pub struct ArgAttributes { pub regular: ArgAttribute, + pub arg_ext: ArgExtension, /// The minimum size of the pointee, guaranteed to be valid for the duration of the whole call /// (corresponding to LLVM's dereferenceable and dereferenceable_or_null attributes). pub pointee_size: Size, @@ -80,11 +90,18 @@ impl ArgAttributes { pub fn new() -> Self { ArgAttributes { regular: ArgAttribute::default(), + arg_ext: ArgExtension::None, pointee_size: Size::ZERO, pointee_align: None, } } + pub fn ext(&mut self, ext: ArgExtension) -> &mut Self { + assert!(self.arg_ext == ArgExtension::None || self.arg_ext == ext); + self.arg_ext = ext; + self + } + pub fn set(&mut self, attr: ArgAttribute) -> &mut Self { self.regular |= attr; self @@ -180,7 +197,7 @@ impl Uniform { #[derive(Clone, Copy, PartialEq, Eq, Debug)] pub struct CastTarget { pub prefix: [Option; 8], - pub prefix_chunk: Size, + pub prefix_chunk_size: Size, pub rest: Uniform, } @@ -192,7 +209,7 @@ impl From for CastTarget { impl From for CastTarget { fn from(uniform: Uniform) -> CastTarget { - CastTarget { prefix: [None; 8], prefix_chunk: Size::ZERO, rest: uniform } + CastTarget { prefix: [None; 8], prefix_chunk_size: Size::ZERO, rest: uniform } } } @@ -200,13 +217,13 @@ impl CastTarget { pub fn pair(a: Reg, b: Reg) -> CastTarget { CastTarget { prefix: [Some(a.kind), None, None, None, None, None, None, None], - prefix_chunk: a.size, + prefix_chunk_size: a.size, rest: Uniform::from(b), } } pub fn size(&self, cx: &C) -> Size { - (self.prefix_chunk * self.prefix.iter().filter(|x| x.is_some()).count() as u64) + (self.prefix_chunk_size * self.prefix.iter().filter(|x| x.is_some()).count() as u64) .align_to(self.rest.align(cx)) + self.rest.total } @@ -214,7 +231,7 @@ impl CastTarget { pub fn align(&self, cx: &C) -> Align { self.prefix .iter() - .filter_map(|x| x.map(|kind| Reg { kind, size: self.prefix_chunk }.align(cx))) + .filter_map(|x| x.map(|kind| Reg { kind, size: self.prefix_chunk_size }.align(cx))) .fold(cx.data_layout().aggregate_align.abi.max(self.rest.align(cx)), |acc, align| { acc.max(align) }) @@ -438,14 +455,14 @@ impl<'a, Ty> ArgAbi<'a, Ty> { let extra_attrs = self.layout.is_unsized().then_some(ArgAttributes::new()); - self.mode = PassMode::Indirect(attrs, extra_attrs); + self.mode = PassMode::Indirect { attrs, extra_attrs, on_stack: false }; } pub fn make_indirect_byval(&mut self) { self.make_indirect(); match self.mode { - PassMode::Indirect(ref mut attrs, _) => { - attrs.set(ArgAttribute::ByVal); + PassMode::Indirect { attrs: _, extra_attrs: _, ref mut on_stack } => { + *on_stack = true; } _ => unreachable!(), } @@ -457,7 +474,11 @@ impl<'a, Ty> ArgAbi<'a, Ty> { if let abi::Int(i, signed) = scalar.value { if i.size().bits() < bits { if let PassMode::Direct(ref mut attrs) = self.mode { - attrs.set(if signed { ArgAttribute::SExt } else { ArgAttribute::ZExt }); + if signed { + attrs.ext(ArgExtension::Sext) + } else { + attrs.ext(ArgExtension::Zext) + }; } } } @@ -474,15 +495,15 @@ impl<'a, Ty> ArgAbi<'a, Ty> { } pub fn is_indirect(&self) -> bool { - matches!(self.mode, PassMode::Indirect(..)) + matches!(self.mode, PassMode::Indirect {..}) } pub fn is_sized_indirect(&self) -> bool { - matches!(self.mode, PassMode::Indirect(_, None)) + matches!(self.mode, PassMode::Indirect { attrs: _, extra_attrs: None, on_stack: _ }) } pub fn is_unsized_indirect(&self) -> bool { - matches!(self.mode, PassMode::Indirect(_, Some(_))) + matches!(self.mode, PassMode::Indirect { attrs: _, extra_attrs: Some(_), on_stack: _ }) } pub fn is_ignore(&self) -> bool { @@ -562,7 +583,7 @@ impl<'a, Ty> FnAbi<'a, Ty> { "x86_64" => { if abi == spec::abi::Abi::SysV64 { x86_64::compute_abi_info(cx, self); - } else if abi == spec::abi::Abi::Win64 || cx.target_spec().options.is_like_windows { + } else if abi == spec::abi::Abi::Win64 || cx.target_spec().is_like_windows { x86_win64::compute_abi_info(self); } else { x86_64::compute_abi_info(cx, self); @@ -584,17 +605,13 @@ impl<'a, Ty> FnAbi<'a, Ty> { "nvptx64" => nvptx64::compute_abi_info(self), "hexagon" => hexagon::compute_abi_info(self), "riscv32" | "riscv64" => riscv::compute_abi_info(cx, self), - "wasm32" if cx.target_spec().target_os != "emscripten" => { + "wasm32" if cx.target_spec().os != "emscripten" => { wasm32_bindgen_compat::compute_abi_info(self) } "wasm32" | "asmjs" => wasm32::compute_abi_info(cx, self), a => return Err(format!("unrecognized arch \"{}\" in target specification", a)), } - if let PassMode::Indirect(ref mut attrs, _) = self.ret.mode { - attrs.set(ArgAttribute::StructRet); - } - Ok(()) } } diff --git a/compiler/rustc_target/src/abi/call/powerpc64.rs b/compiler/rustc_target/src/abi/call/powerpc64.rs index b740707320f..8c2a9d09a3d 100644 --- a/compiler/rustc_target/src/abi/call/powerpc64.rs +++ b/compiler/rustc_target/src/abi/call/powerpc64.rs @@ -119,7 +119,7 @@ where Ty: TyAndLayoutMethods<'a, C> + Copy, C: LayoutOf> + HasDataLayout + HasTargetSpec, { - let abi = if cx.target_spec().target_env == "musl" { + let abi = if cx.target_spec().env == "musl" { ELFv2 } else { match cx.data_layout().endian { diff --git a/compiler/rustc_target/src/abi/call/riscv.rs b/compiler/rustc_target/src/abi/call/riscv.rs index 47530eeacd0..1ab881dd13d 100644 --- a/compiler/rustc_target/src/abi/call/riscv.rs +++ b/compiler/rustc_target/src/abi/call/riscv.rs @@ -4,7 +4,7 @@ // Reference: Clang RISC-V ELF psABI lowering code // https://github.com/llvm/llvm-project/blob/8e780252a7284be45cf1ba224cabd884847e8e92/clang/lib/CodeGen/TargetInfo.cpp#L9311-L9773 -use crate::abi::call::{ArgAbi, ArgAttribute, CastTarget, FnAbi, PassMode, Reg, RegKind, Uniform}; +use crate::abi::call::{ArgAbi, ArgExtension, CastTarget, FnAbi, PassMode, Reg, RegKind, Uniform}; use crate::abi::{ self, Abi, FieldsShape, HasDataLayout, LayoutOf, Size, TyAndLayout, TyAndLayoutMethods, }; @@ -308,7 +308,7 @@ fn extend_integer_width<'a, Ty>(arg: &mut ArgAbi<'a, Ty>, xlen: u64) { // 32-bit integers are always sign-extended if i.size().bits() == 32 && xlen > 32 { if let PassMode::Direct(ref mut attrs) = arg.mode { - attrs.set(ArgAttribute::SExt); + attrs.ext(ArgExtension::Sext); return; } } @@ -323,7 +323,7 @@ where Ty: TyAndLayoutMethods<'a, C> + Copy, C: LayoutOf> + HasDataLayout + HasTargetSpec, { - let flen = match &cx.target_spec().options.llvm_abiname[..] { + let flen = match &cx.target_spec().llvm_abiname[..] { "ilp32f" | "lp64f" => 32, "ilp32d" | "lp64d" => 64, _ => 0, diff --git a/compiler/rustc_target/src/abi/call/x86.rs b/compiler/rustc_target/src/abi/call/x86.rs index df3dd5d9208..713b4100a33 100644 --- a/compiler/rustc_target/src/abi/call/x86.rs +++ b/compiler/rustc_target/src/abi/call/x86.rs @@ -41,10 +41,10 @@ where // http://www.angelcode.com/dev/callconv/callconv.html // Clang's ABI handling is in lib/CodeGen/TargetInfo.cpp let t = cx.target_spec(); - if t.options.abi_return_struct_as_int { + if t.abi_return_struct_as_int { // According to Clang, everyone but MSVC returns single-element // float aggregates directly in a floating-point register. - if !t.options.is_like_msvc && is_single_fp_element(cx, fn_abi.ret.layout) { + if !t.is_like_msvc && is_single_fp_element(cx, fn_abi.ret.layout) { match fn_abi.ret.layout.size.bytes() { 4 => fn_abi.ret.cast_to(Reg::f32()), 8 => fn_abi.ret.cast_to(Reg::f64()), @@ -92,9 +92,14 @@ where for arg in &mut fn_abi.args { let attrs = match arg.mode { - PassMode::Ignore | PassMode::Indirect(_, None) => continue, + PassMode::Ignore + | PassMode::Indirect { attrs: _, extra_attrs: None, on_stack: _ } => { + continue; + } PassMode::Direct(ref mut attrs) => attrs, - PassMode::Pair(..) | PassMode::Indirect(_, Some(_)) | PassMode::Cast(_) => { + PassMode::Pair(..) + | PassMode::Indirect { attrs: _, extra_attrs: Some(_), on_stack: _ } + | PassMode::Cast(_) => { unreachable!("x86 shouldn't be passing arguments by {:?}", arg.mode) } }; diff --git a/compiler/rustc_target/src/abi/mod.rs b/compiler/rustc_target/src/abi/mod.rs index d3c31773c1e..a43080b09e9 100644 --- a/compiler/rustc_target/src/abi/mod.rs +++ b/compiler/rustc_target/src/abi/mod.rs @@ -156,11 +156,11 @@ impl TargetDataLayout { Endian::Little => "little", Endian::Big => "big", }; - if endian_str != target.target_endian { + if endian_str != target.endian { return Err(format!( "inconsistent target specification: \"data-layout\" claims \ architecture is {}-endian, while \"target-endian\" is `{}`", - endian_str, target.target_endian + endian_str, target.endian )); } diff --git a/compiler/rustc_target/src/asm/arm.rs b/compiler/rustc_target/src/asm/arm.rs index 85a136b94aa..28000916e0c 100644 --- a/compiler/rustc_target/src/asm/arm.rs +++ b/compiler/rustc_target/src/asm/arm.rs @@ -61,7 +61,7 @@ impl ArmInlineAsmRegClass { // This uses the same logic as useR7AsFramePointer in LLVM fn frame_pointer_is_r7(mut has_feature: impl FnMut(&str) -> bool, target: &Target) -> bool { - target.options.is_like_osx || (!target.options.is_like_windows && has_feature("thumb-mode")) + target.is_like_osx || (!target.is_like_windows && has_feature("thumb-mode")) } fn frame_pointer_r11( diff --git a/compiler/rustc_target/src/asm/mod.rs b/compiler/rustc_target/src/asm/mod.rs index 8e60085262a..5ebd6c4a234 100644 --- a/compiler/rustc_target/src/asm/mod.rs +++ b/compiler/rustc_target/src/asm/mod.rs @@ -155,6 +155,7 @@ mod hexagon; mod mips; mod nvptx; mod riscv; +mod spirv; mod x86; pub use aarch64::{AArch64InlineAsmReg, AArch64InlineAsmRegClass}; @@ -163,6 +164,7 @@ pub use hexagon::{HexagonInlineAsmReg, HexagonInlineAsmRegClass}; pub use mips::{MipsInlineAsmReg, MipsInlineAsmRegClass}; pub use nvptx::{NvptxInlineAsmReg, NvptxInlineAsmRegClass}; pub use riscv::{RiscVInlineAsmReg, RiscVInlineAsmRegClass}; +pub use spirv::{SpirVInlineAsmReg, SpirVInlineAsmRegClass}; pub use x86::{X86InlineAsmReg, X86InlineAsmRegClass}; #[derive(Copy, Clone, Encodable, Decodable, Debug, Eq, PartialEq, Hash)] @@ -177,6 +179,7 @@ pub enum InlineAsmArch { Hexagon, Mips, Mips64, + SpirV, } impl FromStr for InlineAsmArch { @@ -194,6 +197,7 @@ impl FromStr for InlineAsmArch { "hexagon" => Ok(Self::Hexagon), "mips" => Ok(Self::Mips), "mips64" => Ok(Self::Mips64), + "spirv" => Ok(Self::SpirV), _ => Err(()), } } @@ -208,6 +212,7 @@ pub enum InlineAsmReg { Nvptx(NvptxInlineAsmReg), Hexagon(HexagonInlineAsmReg), Mips(MipsInlineAsmReg), + SpirV(SpirVInlineAsmReg), } impl InlineAsmReg { @@ -264,6 +269,9 @@ impl InlineAsmReg { InlineAsmArch::Mips | InlineAsmArch::Mips64 => { Self::Mips(MipsInlineAsmReg::parse(arch, has_feature, target, &name)?) } + InlineAsmArch::SpirV => { + Self::SpirV(SpirVInlineAsmReg::parse(arch, has_feature, target, &name)?) + } }) } @@ -306,6 +314,7 @@ pub enum InlineAsmRegClass { Nvptx(NvptxInlineAsmRegClass), Hexagon(HexagonInlineAsmRegClass), Mips(MipsInlineAsmRegClass), + SpirV(SpirVInlineAsmRegClass), } impl InlineAsmRegClass { @@ -318,6 +327,7 @@ impl InlineAsmRegClass { Self::Nvptx(r) => r.name(), Self::Hexagon(r) => r.name(), Self::Mips(r) => r.name(), + Self::SpirV(r) => r.name(), } } @@ -333,6 +343,7 @@ impl InlineAsmRegClass { Self::Nvptx(r) => r.suggest_class(arch, ty).map(InlineAsmRegClass::Nvptx), Self::Hexagon(r) => r.suggest_class(arch, ty).map(InlineAsmRegClass::Hexagon), Self::Mips(r) => r.suggest_class(arch, ty).map(InlineAsmRegClass::Mips), + Self::SpirV(r) => r.suggest_class(arch, ty).map(InlineAsmRegClass::SpirV), } } @@ -355,6 +366,7 @@ impl InlineAsmRegClass { Self::Nvptx(r) => r.suggest_modifier(arch, ty), Self::Hexagon(r) => r.suggest_modifier(arch, ty), Self::Mips(r) => r.suggest_modifier(arch, ty), + Self::SpirV(r) => r.suggest_modifier(arch, ty), } } @@ -373,6 +385,7 @@ impl InlineAsmRegClass { Self::Nvptx(r) => r.default_modifier(arch), Self::Hexagon(r) => r.default_modifier(arch), Self::Mips(r) => r.default_modifier(arch), + Self::SpirV(r) => r.default_modifier(arch), } } @@ -390,6 +403,7 @@ impl InlineAsmRegClass { Self::Nvptx(r) => r.supported_types(arch), Self::Hexagon(r) => r.supported_types(arch), Self::Mips(r) => r.supported_types(arch), + Self::SpirV(r) => r.supported_types(arch), } } @@ -414,6 +428,7 @@ impl InlineAsmRegClass { InlineAsmArch::Mips | InlineAsmArch::Mips64 => { Self::Mips(MipsInlineAsmRegClass::parse(arch, name)?) } + InlineAsmArch::SpirV => Self::SpirV(SpirVInlineAsmRegClass::parse(arch, name)?), }) }) } @@ -429,6 +444,7 @@ impl InlineAsmRegClass { Self::Nvptx(r) => r.valid_modifiers(arch), Self::Hexagon(r) => r.valid_modifiers(arch), Self::Mips(r) => r.valid_modifiers(arch), + Self::SpirV(r) => r.valid_modifiers(arch), } } } @@ -571,5 +587,10 @@ pub fn allocatable_registers( mips::fill_reg_map(arch, has_feature, target, &mut map); map } + InlineAsmArch::SpirV => { + let mut map = spirv::regclass_map(); + spirv::fill_reg_map(arch, has_feature, target, &mut map); + map + } } } diff --git a/compiler/rustc_target/src/asm/spirv.rs b/compiler/rustc_target/src/asm/spirv.rs new file mode 100644 index 00000000000..da82749e96a --- /dev/null +++ b/compiler/rustc_target/src/asm/spirv.rs @@ -0,0 +1,46 @@ +use super::{InlineAsmArch, InlineAsmType}; +use rustc_macros::HashStable_Generic; + +def_reg_class! { + SpirV SpirVInlineAsmRegClass { + reg, + } +} + +impl SpirVInlineAsmRegClass { + pub fn valid_modifiers(self, _arch: super::InlineAsmArch) -> &'static [char] { + &[] + } + + pub fn suggest_class(self, _arch: InlineAsmArch, _ty: InlineAsmType) -> Option { + None + } + + pub fn suggest_modifier( + self, + _arch: InlineAsmArch, + _ty: InlineAsmType, + ) -> Option<(char, &'static str)> { + None + } + + pub fn default_modifier(self, _arch: InlineAsmArch) -> Option<(char, &'static str)> { + None + } + + pub fn supported_types( + self, + _arch: InlineAsmArch, + ) -> &'static [(InlineAsmType, Option<&'static str>)] { + match self { + Self::reg => { + types! { _: I8, I16, I32, I64, F32, F64; } + } + } + } +} + +def_regs! { + // SPIR-V is SSA-based, it does not have registers. + SpirV SpirVInlineAsmReg SpirVInlineAsmRegClass {} +} diff --git a/compiler/rustc_target/src/spec/aarch64_apple_darwin.rs b/compiler/rustc_target/src/spec/aarch64_apple_darwin.rs index 098651614d0..7de809f7622 100644 --- a/compiler/rustc_target/src/spec/aarch64_apple_darwin.rs +++ b/compiler/rustc_target/src/spec/aarch64_apple_darwin.rs @@ -19,6 +19,6 @@ pub fn target() -> Target { pointer_width: 64, data_layout: "e-m:o-i64:64-i128:128-n32:64-S128".to_string(), arch: arch.to_string(), - options: TargetOptions { target_mcount: "\u{1}mcount".to_string(), ..base }, + options: TargetOptions { mcount: "\u{1}mcount".to_string(), ..base }, } } diff --git a/compiler/rustc_target/src/spec/aarch64_unknown_linux_gnu.rs b/compiler/rustc_target/src/spec/aarch64_unknown_linux_gnu.rs index 9d9698a440d..58c72af4e76 100644 --- a/compiler/rustc_target/src/spec/aarch64_unknown_linux_gnu.rs +++ b/compiler/rustc_target/src/spec/aarch64_unknown_linux_gnu.rs @@ -1,7 +1,7 @@ use crate::spec::{Target, TargetOptions}; pub fn target() -> Target { - let mut base = super::linux_base::opts(); + let mut base = super::linux_gnu_base::opts(); base.max_atomic_width = Some(128); Target { @@ -11,7 +11,7 @@ pub fn target() -> Target { arch: "aarch64".to_string(), options: TargetOptions { unsupported_abis: super::arm_base::unsupported_abis(), - target_mcount: "\u{1}_mcount".to_string(), + mcount: "\u{1}_mcount".to_string(), ..base }, } diff --git a/compiler/rustc_target/src/spec/aarch64_unknown_linux_musl.rs b/compiler/rustc_target/src/spec/aarch64_unknown_linux_musl.rs index 2dd703b66ff..7bbfc8ec0f7 100644 --- a/compiler/rustc_target/src/spec/aarch64_unknown_linux_musl.rs +++ b/compiler/rustc_target/src/spec/aarch64_unknown_linux_musl.rs @@ -11,7 +11,7 @@ pub fn target() -> Target { arch: "aarch64".to_string(), options: TargetOptions { unsupported_abis: super::arm_base::unsupported_abis(), - target_mcount: "\u{1}_mcount".to_string(), + mcount: "\u{1}_mcount".to_string(), ..base }, } diff --git a/compiler/rustc_target/src/spec/aarch64_unknown_netbsd.rs b/compiler/rustc_target/src/spec/aarch64_unknown_netbsd.rs index 81e383ca5f1..09efbdbb293 100644 --- a/compiler/rustc_target/src/spec/aarch64_unknown_netbsd.rs +++ b/compiler/rustc_target/src/spec/aarch64_unknown_netbsd.rs @@ -10,6 +10,6 @@ pub fn target() -> Target { pointer_width: 64, data_layout: "e-m:e-i8:8:32-i16:16:32-i64:64-i128:128-n32:64-S128".to_string(), arch: "aarch64".to_string(), - options: TargetOptions { target_mcount: "__mcount".to_string(), ..base }, + options: TargetOptions { mcount: "__mcount".to_string(), ..base }, } } diff --git a/compiler/rustc_target/src/spec/aarch64_unknown_none.rs b/compiler/rustc_target/src/spec/aarch64_unknown_none.rs index 1088807f2c2..c9f622820de 100644 --- a/compiler/rustc_target/src/spec/aarch64_unknown_none.rs +++ b/compiler/rustc_target/src/spec/aarch64_unknown_none.rs @@ -10,7 +10,6 @@ use super::{LinkerFlavor, LldFlavor, PanicStrategy, RelocModel, Target, TargetOp pub fn target() -> Target { let opts = TargetOptions { - target_vendor: String::new(), linker_flavor: LinkerFlavor::Lld(LldFlavor::Ld), linker: Some("rust-lld".to_owned()), features: "+strict-align,+neon,+fp-armv8".to_string(), diff --git a/compiler/rustc_target/src/spec/aarch64_unknown_none_softfloat.rs b/compiler/rustc_target/src/spec/aarch64_unknown_none_softfloat.rs index 044c9fa1de8..0811871c993 100644 --- a/compiler/rustc_target/src/spec/aarch64_unknown_none_softfloat.rs +++ b/compiler/rustc_target/src/spec/aarch64_unknown_none_softfloat.rs @@ -10,7 +10,6 @@ use super::{LinkerFlavor, LldFlavor, PanicStrategy, RelocModel, Target, TargetOp pub fn target() -> Target { let opts = TargetOptions { - target_vendor: String::new(), linker_flavor: LinkerFlavor::Lld(LldFlavor::Ld), linker: Some("rust-lld".to_owned()), features: "+strict-align,-neon,-fp-armv8".to_string(), diff --git a/compiler/rustc_target/src/spec/android_base.rs b/compiler/rustc_target/src/spec/android_base.rs index 1bd5eb6988c..f6fbe7cd5f6 100644 --- a/compiler/rustc_target/src/spec/android_base.rs +++ b/compiler/rustc_target/src/spec/android_base.rs @@ -1,15 +1,14 @@ use crate::spec::{LinkerFlavor, TargetOptions}; pub fn opts() -> TargetOptions { - let mut base = super::linux_base::opts(); - base.target_os = "android".to_string(); + let mut base = super::linux_gnu_base::opts(); + base.os = "android".to_string(); // Many of the symbols defined in compiler-rt are also defined in libgcc. // Android's linker doesn't like that by default. base.pre_link_args .get_mut(&LinkerFlavor::Gcc) .unwrap() .push("-Wl,--allow-multiple-definition".to_string()); - base.is_like_android = true; base.dwarf_version = Some(2); base.position_independent_executables = true; base.has_elf_tls = false; diff --git a/compiler/rustc_target/src/spec/apple_base.rs b/compiler/rustc_target/src/spec/apple_base.rs index 045d9967f30..e271a6dec40 100644 --- a/compiler/rustc_target/src/spec/apple_base.rs +++ b/compiler/rustc_target/src/spec/apple_base.rs @@ -17,13 +17,13 @@ pub fn opts(os: &str) -> TargetOptions { let version = macos_deployment_target(); TargetOptions { - target_os: os.to_string(), - target_vendor: "apple".to_string(), + os: os.to_string(), + vendor: "apple".to_string(), // macOS has -dead_strip, which doesn't rely on function_sections function_sections: false, dynamic_linking: true, executables: true, - target_family: Some("unix".to_string()), + os_family: Some("unix".to_string()), is_like_osx: true, dwarf_version: Some(2), has_rpath: true, diff --git a/compiler/rustc_target/src/spec/arm_unknown_linux_gnueabi.rs b/compiler/rustc_target/src/spec/arm_unknown_linux_gnueabi.rs index dca0b1ec2e4..c41cf6e521a 100644 --- a/compiler/rustc_target/src/spec/arm_unknown_linux_gnueabi.rs +++ b/compiler/rustc_target/src/spec/arm_unknown_linux_gnueabi.rs @@ -1,7 +1,7 @@ use crate::spec::{Target, TargetOptions}; pub fn target() -> Target { - let mut base = super::linux_base::opts(); + let mut base = super::linux_gnu_base::opts(); base.max_atomic_width = Some(64); Target { llvm_target: "arm-unknown-linux-gnueabi".to_string(), @@ -12,7 +12,7 @@ pub fn target() -> Target { options: TargetOptions { features: "+strict-align,+v6".to_string(), unsupported_abis: super::arm_base::unsupported_abis(), - target_mcount: "\u{1}__gnu_mcount_nc".to_string(), + mcount: "\u{1}__gnu_mcount_nc".to_string(), ..base }, } diff --git a/compiler/rustc_target/src/spec/arm_unknown_linux_gnueabihf.rs b/compiler/rustc_target/src/spec/arm_unknown_linux_gnueabihf.rs index ee71ae61972..f2143966c1d 100644 --- a/compiler/rustc_target/src/spec/arm_unknown_linux_gnueabihf.rs +++ b/compiler/rustc_target/src/spec/arm_unknown_linux_gnueabihf.rs @@ -1,7 +1,7 @@ use crate::spec::{Target, TargetOptions}; pub fn target() -> Target { - let mut base = super::linux_base::opts(); + let mut base = super::linux_gnu_base::opts(); base.max_atomic_width = Some(64); Target { llvm_target: "arm-unknown-linux-gnueabihf".to_string(), @@ -12,7 +12,7 @@ pub fn target() -> Target { options: TargetOptions { features: "+strict-align,+v6,+vfp2,-d32".to_string(), unsupported_abis: super::arm_base::unsupported_abis(), - target_mcount: "\u{1}__gnu_mcount_nc".to_string(), + mcount: "\u{1}__gnu_mcount_nc".to_string(), ..base }, } diff --git a/compiler/rustc_target/src/spec/arm_unknown_linux_musleabi.rs b/compiler/rustc_target/src/spec/arm_unknown_linux_musleabi.rs index 6938a043602..53ff1001c20 100644 --- a/compiler/rustc_target/src/spec/arm_unknown_linux_musleabi.rs +++ b/compiler/rustc_target/src/spec/arm_unknown_linux_musleabi.rs @@ -17,7 +17,7 @@ pub fn target() -> Target { arch: "arm".to_string(), options: TargetOptions { unsupported_abis: super::arm_base::unsupported_abis(), - target_mcount: "\u{1}mcount".to_string(), + mcount: "\u{1}mcount".to_string(), ..base }, } diff --git a/compiler/rustc_target/src/spec/arm_unknown_linux_musleabihf.rs b/compiler/rustc_target/src/spec/arm_unknown_linux_musleabihf.rs index 4adf3a33893..6d8a5f9f88b 100644 --- a/compiler/rustc_target/src/spec/arm_unknown_linux_musleabihf.rs +++ b/compiler/rustc_target/src/spec/arm_unknown_linux_musleabihf.rs @@ -17,7 +17,7 @@ pub fn target() -> Target { arch: "arm".to_string(), options: TargetOptions { unsupported_abis: super::arm_base::unsupported_abis(), - target_mcount: "\u{1}mcount".to_string(), + mcount: "\u{1}mcount".to_string(), ..base }, } diff --git a/compiler/rustc_target/src/spec/armebv7r_none_eabi.rs b/compiler/rustc_target/src/spec/armebv7r_none_eabi.rs index 7bfa5baecb5..c6586b79b87 100644 --- a/compiler/rustc_target/src/spec/armebv7r_none_eabi.rs +++ b/compiler/rustc_target/src/spec/armebv7r_none_eabi.rs @@ -11,8 +11,7 @@ pub fn target() -> Target { arch: "arm".to_string(), options: TargetOptions { - target_endian: "big".to_string(), - target_vendor: String::new(), + endian: "big".to_string(), linker_flavor: LinkerFlavor::Lld(LldFlavor::Ld), executables: true, linker: Some("rust-lld".to_owned()), diff --git a/compiler/rustc_target/src/spec/armebv7r_none_eabihf.rs b/compiler/rustc_target/src/spec/armebv7r_none_eabihf.rs index 7afc933a28f..e3d4397f612 100644 --- a/compiler/rustc_target/src/spec/armebv7r_none_eabihf.rs +++ b/compiler/rustc_target/src/spec/armebv7r_none_eabihf.rs @@ -11,8 +11,7 @@ pub fn target() -> Target { arch: "arm".to_string(), options: TargetOptions { - target_endian: "big".to_string(), - target_vendor: String::new(), + endian: "big".to_string(), linker_flavor: LinkerFlavor::Lld(LldFlavor::Ld), executables: true, linker: Some("rust-lld".to_owned()), diff --git a/compiler/rustc_target/src/spec/armv4t_unknown_linux_gnueabi.rs b/compiler/rustc_target/src/spec/armv4t_unknown_linux_gnueabi.rs index c58fa7407b4..e1ba72bf83b 100644 --- a/compiler/rustc_target/src/spec/armv4t_unknown_linux_gnueabi.rs +++ b/compiler/rustc_target/src/spec/armv4t_unknown_linux_gnueabi.rs @@ -1,7 +1,7 @@ use crate::spec::{Target, TargetOptions}; pub fn target() -> Target { - let base = super::linux_base::opts(); + let base = super::linux_gnu_base::opts(); Target { llvm_target: "armv4t-unknown-linux-gnueabi".to_string(), pointer_width: 32, @@ -13,7 +13,7 @@ pub fn target() -> Target { // Atomic operations provided by compiler-builtins max_atomic_width: Some(32), unsupported_abis: super::arm_base::unsupported_abis(), - target_mcount: "\u{1}__gnu_mcount_nc".to_string(), + mcount: "\u{1}__gnu_mcount_nc".to_string(), has_thumb_interworking: true, ..base }, diff --git a/compiler/rustc_target/src/spec/armv5te_unknown_linux_gnueabi.rs b/compiler/rustc_target/src/spec/armv5te_unknown_linux_gnueabi.rs index 049a031398a..3ac8d53564d 100644 --- a/compiler/rustc_target/src/spec/armv5te_unknown_linux_gnueabi.rs +++ b/compiler/rustc_target/src/spec/armv5te_unknown_linux_gnueabi.rs @@ -1,7 +1,7 @@ use crate::spec::{Target, TargetOptions}; pub fn target() -> Target { - let base = super::linux_base::opts(); + let base = super::linux_gnu_base::opts(); Target { llvm_target: "armv5te-unknown-linux-gnueabi".to_string(), pointer_width: 32, @@ -13,7 +13,7 @@ pub fn target() -> Target { // Atomic operations provided by compiler-builtins max_atomic_width: Some(32), unsupported_abis: super::arm_base::unsupported_abis(), - target_mcount: "\u{1}__gnu_mcount_nc".to_string(), + mcount: "\u{1}__gnu_mcount_nc".to_string(), has_thumb_interworking: true, ..base }, diff --git a/compiler/rustc_target/src/spec/armv5te_unknown_linux_musleabi.rs b/compiler/rustc_target/src/spec/armv5te_unknown_linux_musleabi.rs index 77cf8bb76d3..40d405c30a2 100644 --- a/compiler/rustc_target/src/spec/armv5te_unknown_linux_musleabi.rs +++ b/compiler/rustc_target/src/spec/armv5te_unknown_linux_musleabi.rs @@ -16,7 +16,7 @@ pub fn target() -> Target { // Atomic operations provided by compiler-builtins max_atomic_width: Some(32), unsupported_abis: super::arm_base::unsupported_abis(), - target_mcount: "\u{1}mcount".to_string(), + mcount: "\u{1}mcount".to_string(), has_thumb_interworking: true, ..base }, diff --git a/compiler/rustc_target/src/spec/armv6_unknown_freebsd.rs b/compiler/rustc_target/src/spec/armv6_unknown_freebsd.rs index 981d615f684..a149bd983b7 100644 --- a/compiler/rustc_target/src/spec/armv6_unknown_freebsd.rs +++ b/compiler/rustc_target/src/spec/armv6_unknown_freebsd.rs @@ -9,11 +9,11 @@ pub fn target() -> Target { arch: "arm".to_string(), options: TargetOptions { - target_env: "gnueabihf".to_string(), + env: "gnueabihf".to_string(), features: "+v6,+vfp2,-d32".to_string(), max_atomic_width: Some(64), unsupported_abis: super::arm_base::unsupported_abis(), - target_mcount: "\u{1}__gnu_mcount_nc".to_string(), + mcount: "\u{1}__gnu_mcount_nc".to_string(), ..base }, } diff --git a/compiler/rustc_target/src/spec/armv6_unknown_netbsd_eabihf.rs b/compiler/rustc_target/src/spec/armv6_unknown_netbsd_eabihf.rs index 8417a8f2801..6c81a458b9b 100644 --- a/compiler/rustc_target/src/spec/armv6_unknown_netbsd_eabihf.rs +++ b/compiler/rustc_target/src/spec/armv6_unknown_netbsd_eabihf.rs @@ -10,10 +10,10 @@ pub fn target() -> Target { arch: "arm".to_string(), options: TargetOptions { - target_env: "eabihf".to_string(), + env: "eabihf".to_string(), features: "+v6,+vfp2,-d32".to_string(), unsupported_abis: super::arm_base::unsupported_abis(), - target_mcount: "__mcount".to_string(), + mcount: "__mcount".to_string(), ..base }, } diff --git a/compiler/rustc_target/src/spec/armv7_unknown_cloudabi_eabihf.rs b/compiler/rustc_target/src/spec/armv7_unknown_cloudabi_eabihf.rs index 921640d0aa6..d47ee541b25 100644 --- a/compiler/rustc_target/src/spec/armv7_unknown_cloudabi_eabihf.rs +++ b/compiler/rustc_target/src/spec/armv7_unknown_cloudabi_eabihf.rs @@ -13,6 +13,6 @@ pub fn target() -> Target { pointer_width: 32, data_layout: "e-m:e-p:32:32-Fi8-i64:64-v128:64:128-a:0:32-n32-S64".to_string(), arch: "arm".to_string(), - options: TargetOptions { target_mcount: "\u{1}mcount".to_string(), ..base }, + options: TargetOptions { mcount: "\u{1}mcount".to_string(), ..base }, } } diff --git a/compiler/rustc_target/src/spec/armv7_unknown_freebsd.rs b/compiler/rustc_target/src/spec/armv7_unknown_freebsd.rs index 88d5c86cfab..6f24c6818fc 100644 --- a/compiler/rustc_target/src/spec/armv7_unknown_freebsd.rs +++ b/compiler/rustc_target/src/spec/armv7_unknown_freebsd.rs @@ -9,11 +9,11 @@ pub fn target() -> Target { arch: "arm".to_string(), options: TargetOptions { - target_env: "gnueabihf".to_string(), + env: "gnueabihf".to_string(), features: "+v7,+vfp3,-d32,+thumb2,-neon".to_string(), max_atomic_width: Some(64), unsupported_abis: super::arm_base::unsupported_abis(), - target_mcount: "\u{1}__gnu_mcount_nc".to_string(), + mcount: "\u{1}__gnu_mcount_nc".to_string(), ..base }, } diff --git a/compiler/rustc_target/src/spec/armv7_unknown_linux_gnueabi.rs b/compiler/rustc_target/src/spec/armv7_unknown_linux_gnueabi.rs index 2a31bf4e332..ae6b8286f08 100644 --- a/compiler/rustc_target/src/spec/armv7_unknown_linux_gnueabi.rs +++ b/compiler/rustc_target/src/spec/armv7_unknown_linux_gnueabi.rs @@ -4,7 +4,7 @@ use crate::spec::{Target, TargetOptions}; // hardfloat. pub fn target() -> Target { - let base = super::linux_base::opts(); + let base = super::linux_gnu_base::opts(); Target { llvm_target: "armv7-unknown-linux-gnueabi".to_string(), pointer_width: 32, @@ -16,7 +16,7 @@ pub fn target() -> Target { cpu: "generic".to_string(), max_atomic_width: Some(64), unsupported_abis: super::arm_base::unsupported_abis(), - target_mcount: "\u{1}__gnu_mcount_nc".to_string(), + mcount: "\u{1}__gnu_mcount_nc".to_string(), ..base }, } diff --git a/compiler/rustc_target/src/spec/armv7_unknown_linux_gnueabihf.rs b/compiler/rustc_target/src/spec/armv7_unknown_linux_gnueabihf.rs index d04400b79df..48c16b620fd 100644 --- a/compiler/rustc_target/src/spec/armv7_unknown_linux_gnueabihf.rs +++ b/compiler/rustc_target/src/spec/armv7_unknown_linux_gnueabihf.rs @@ -4,7 +4,7 @@ use crate::spec::{Target, TargetOptions}; // thumb-mode. See the thumbv7neon variant for enabling both. pub fn target() -> Target { - let base = super::linux_base::opts(); + let base = super::linux_gnu_base::opts(); Target { llvm_target: "armv7-unknown-linux-gnueabihf".to_string(), pointer_width: 32, @@ -17,7 +17,7 @@ pub fn target() -> Target { cpu: "generic".to_string(), max_atomic_width: Some(64), unsupported_abis: super::arm_base::unsupported_abis(), - target_mcount: "\u{1}__gnu_mcount_nc".to_string(), + mcount: "\u{1}__gnu_mcount_nc".to_string(), ..base }, } diff --git a/compiler/rustc_target/src/spec/armv7_unknown_linux_musleabi.rs b/compiler/rustc_target/src/spec/armv7_unknown_linux_musleabi.rs index ebbbd61fc11..9f9f1bd79b0 100644 --- a/compiler/rustc_target/src/spec/armv7_unknown_linux_musleabi.rs +++ b/compiler/rustc_target/src/spec/armv7_unknown_linux_musleabi.rs @@ -21,7 +21,7 @@ pub fn target() -> Target { cpu: "generic".to_string(), max_atomic_width: Some(64), unsupported_abis: super::arm_base::unsupported_abis(), - target_mcount: "\u{1}mcount".to_string(), + mcount: "\u{1}mcount".to_string(), ..base }, } diff --git a/compiler/rustc_target/src/spec/armv7_unknown_linux_musleabihf.rs b/compiler/rustc_target/src/spec/armv7_unknown_linux_musleabihf.rs index ee603aa0684..59deee30ef2 100644 --- a/compiler/rustc_target/src/spec/armv7_unknown_linux_musleabihf.rs +++ b/compiler/rustc_target/src/spec/armv7_unknown_linux_musleabihf.rs @@ -20,7 +20,7 @@ pub fn target() -> Target { cpu: "generic".to_string(), max_atomic_width: Some(64), unsupported_abis: super::arm_base::unsupported_abis(), - target_mcount: "\u{1}mcount".to_string(), + mcount: "\u{1}mcount".to_string(), ..base }, } diff --git a/compiler/rustc_target/src/spec/armv7_unknown_netbsd_eabihf.rs b/compiler/rustc_target/src/spec/armv7_unknown_netbsd_eabihf.rs index 09c531ebc8a..660525704c1 100644 --- a/compiler/rustc_target/src/spec/armv7_unknown_netbsd_eabihf.rs +++ b/compiler/rustc_target/src/spec/armv7_unknown_netbsd_eabihf.rs @@ -9,12 +9,12 @@ pub fn target() -> Target { arch: "arm".to_string(), options: TargetOptions { - target_env: "eabihf".to_string(), + env: "eabihf".to_string(), features: "+v7,+vfp3,-d32,+thumb2,-neon".to_string(), cpu: "generic".to_string(), max_atomic_width: Some(64), unsupported_abis: super::arm_base::unsupported_abis(), - target_mcount: "__mcount".to_string(), + mcount: "__mcount".to_string(), ..base }, } diff --git a/compiler/rustc_target/src/spec/armv7a_none_eabi.rs b/compiler/rustc_target/src/spec/armv7a_none_eabi.rs index b6b34e27562..74deab01916 100644 --- a/compiler/rustc_target/src/spec/armv7a_none_eabi.rs +++ b/compiler/rustc_target/src/spec/armv7a_none_eabi.rs @@ -10,9 +10,6 @@ // bare-metal binaries (the `gcc` linker has the advantage that it knows where C // libraries and crt*.o are but it's not much of an advantage here); LLD is also // faster -// - `target_os` set to `none`. rationale: matches `thumb` targets -// - `target_{env,vendor}` set to an empty string. rationale: matches `thumb` -// targets // - `panic_strategy` set to `abort`. rationale: matches `thumb` targets // - `relocation-model` set to `static`; also no PIE, no relro and no dynamic // linking. rationale: matches `thumb` targets @@ -21,7 +18,6 @@ use super::{LinkerFlavor, LldFlavor, PanicStrategy, RelocModel, Target, TargetOp pub fn target() -> Target { let opts = TargetOptions { - target_vendor: String::new(), linker_flavor: LinkerFlavor::Lld(LldFlavor::Ld), linker: Some("rust-lld".to_owned()), features: "+v7,+thumb2,+soft-float,-neon,+strict-align".to_string(), diff --git a/compiler/rustc_target/src/spec/armv7a_none_eabihf.rs b/compiler/rustc_target/src/spec/armv7a_none_eabihf.rs index 8b9df361844..c5c720f5fbd 100644 --- a/compiler/rustc_target/src/spec/armv7a_none_eabihf.rs +++ b/compiler/rustc_target/src/spec/armv7a_none_eabihf.rs @@ -9,7 +9,6 @@ use super::{LinkerFlavor, LldFlavor, PanicStrategy, RelocModel, Target, TargetOp pub fn target() -> Target { let opts = TargetOptions { - target_vendor: String::new(), linker_flavor: LinkerFlavor::Lld(LldFlavor::Ld), linker: Some("rust-lld".to_owned()), features: "+v7,+vfp3,-d32,+thumb2,-neon,+strict-align".to_string(), diff --git a/compiler/rustc_target/src/spec/armv7r_none_eabi.rs b/compiler/rustc_target/src/spec/armv7r_none_eabi.rs index fdd74d27619..3f49bd87869 100644 --- a/compiler/rustc_target/src/spec/armv7r_none_eabi.rs +++ b/compiler/rustc_target/src/spec/armv7r_none_eabi.rs @@ -11,7 +11,6 @@ pub fn target() -> Target { arch: "arm".to_string(), options: TargetOptions { - target_vendor: String::new(), linker_flavor: LinkerFlavor::Lld(LldFlavor::Ld), executables: true, linker: Some("rust-lld".to_owned()), diff --git a/compiler/rustc_target/src/spec/armv7r_none_eabihf.rs b/compiler/rustc_target/src/spec/armv7r_none_eabihf.rs index 7baafea90b9..9b2e8a8058f 100644 --- a/compiler/rustc_target/src/spec/armv7r_none_eabihf.rs +++ b/compiler/rustc_target/src/spec/armv7r_none_eabihf.rs @@ -11,7 +11,6 @@ pub fn target() -> Target { arch: "arm".to_string(), options: TargetOptions { - target_vendor: String::new(), linker_flavor: LinkerFlavor::Lld(LldFlavor::Ld), executables: true, linker: Some("rust-lld".to_owned()), diff --git a/compiler/rustc_target/src/spec/asmjs_unknown_emscripten.rs b/compiler/rustc_target/src/spec/asmjs_unknown_emscripten.rs index 1c3f5c4f9e8..b1adefe1a51 100644 --- a/compiler/rustc_target/src/spec/asmjs_unknown_emscripten.rs +++ b/compiler/rustc_target/src/spec/asmjs_unknown_emscripten.rs @@ -3,7 +3,6 @@ use super::{wasm32_unknown_emscripten, LinkerFlavor, Target}; pub fn target() -> Target { let mut target = wasm32_unknown_emscripten::target(); target - .options .post_link_args .entry(LinkerFlavor::Em) .or_default() diff --git a/compiler/rustc_target/src/spec/avr_gnu_base.rs b/compiler/rustc_target/src/spec/avr_gnu_base.rs index 268af87cfe9..67a7684da2c 100644 --- a/compiler/rustc_target/src/spec/avr_gnu_base.rs +++ b/compiler/rustc_target/src/spec/avr_gnu_base.rs @@ -10,8 +10,7 @@ pub fn target(target_cpu: String) -> Target { llvm_target: "avr-unknown-unknown".to_string(), pointer_width: 16, options: TargetOptions { - target_c_int_width: "16".to_string(), - target_os: "unknown".to_string(), + c_int_width: "16".to_string(), cpu: target_cpu.clone(), exe_suffix: ".elf".to_string(), diff --git a/compiler/rustc_target/src/spec/cloudabi_base.rs b/compiler/rustc_target/src/spec/cloudabi_base.rs index 0053adb8552..20a095742ec 100644 --- a/compiler/rustc_target/src/spec/cloudabi_base.rs +++ b/compiler/rustc_target/src/spec/cloudabi_base.rs @@ -12,9 +12,9 @@ pub fn opts() -> TargetOptions { ); TargetOptions { - target_os: "cloudabi".to_string(), + os: "cloudabi".to_string(), executables: true, - target_family: None, + os_family: None, linker_is_gnu: true, pre_link_args: args, position_independent_executables: true, diff --git a/compiler/rustc_target/src/spec/dragonfly_base.rs b/compiler/rustc_target/src/spec/dragonfly_base.rs index a182e37dd80..b96de7ab1ed 100644 --- a/compiler/rustc_target/src/spec/dragonfly_base.rs +++ b/compiler/rustc_target/src/spec/dragonfly_base.rs @@ -16,10 +16,10 @@ pub fn opts() -> TargetOptions { ); TargetOptions { - target_os: "dragonfly".to_string(), + os: "dragonfly".to_string(), dynamic_linking: true, executables: true, - target_family: Some("unix".to_string()), + os_family: Some("unix".to_string()), linker_is_gnu: true, has_rpath: true, pre_link_args: args, diff --git a/compiler/rustc_target/src/spec/freebsd_base.rs b/compiler/rustc_target/src/spec/freebsd_base.rs index 25535117743..c70c492716b 100644 --- a/compiler/rustc_target/src/spec/freebsd_base.rs +++ b/compiler/rustc_target/src/spec/freebsd_base.rs @@ -16,10 +16,10 @@ pub fn opts() -> TargetOptions { ); TargetOptions { - target_os: "freebsd".to_string(), + os: "freebsd".to_string(), dynamic_linking: true, executables: true, - target_family: Some("unix".to_string()), + os_family: Some("unix".to_string()), linker_is_gnu: true, has_rpath: true, pre_link_args: args, diff --git a/compiler/rustc_target/src/spec/fuchsia_base.rs b/compiler/rustc_target/src/spec/fuchsia_base.rs index 97998eed886..5c39773cbe3 100644 --- a/compiler/rustc_target/src/spec/fuchsia_base.rs +++ b/compiler/rustc_target/src/spec/fuchsia_base.rs @@ -20,14 +20,13 @@ pub fn opts() -> TargetOptions { ); TargetOptions { - target_os: "fuchsia".to_string(), - target_vendor: String::new(), + os: "fuchsia".to_string(), linker_flavor: LinkerFlavor::Lld(LldFlavor::Ld), linker: Some("rust-lld".to_owned()), lld_flavor: LldFlavor::Ld, dynamic_linking: true, executables: true, - target_family: Some("unix".to_string()), + os_family: Some("unix".to_string()), is_like_fuchsia: true, linker_is_gnu: true, has_rpath: false, diff --git a/compiler/rustc_target/src/spec/haiku_base.rs b/compiler/rustc_target/src/spec/haiku_base.rs index 3d9dd44e786..ec87645c4fa 100644 --- a/compiler/rustc_target/src/spec/haiku_base.rs +++ b/compiler/rustc_target/src/spec/haiku_base.rs @@ -2,11 +2,11 @@ use crate::spec::{RelroLevel, TargetOptions}; pub fn opts() -> TargetOptions { TargetOptions { - target_os: "haiku".to_string(), + os: "haiku".to_string(), dynamic_linking: true, executables: true, has_rpath: false, - target_family: Some("unix".to_string()), + os_family: Some("unix".to_string()), relro_level: RelroLevel::Full, linker_is_gnu: true, ..Default::default() diff --git a/compiler/rustc_target/src/spec/hermit_base.rs b/compiler/rustc_target/src/spec/hermit_base.rs index 2953646afd0..a75158a0ea0 100644 --- a/compiler/rustc_target/src/spec/hermit_base.rs +++ b/compiler/rustc_target/src/spec/hermit_base.rs @@ -9,7 +9,7 @@ pub fn opts() -> TargetOptions { ); TargetOptions { - target_os: "hermit".to_string(), + os: "hermit".to_string(), linker_flavor: LinkerFlavor::Lld(LldFlavor::Ld), linker: Some("rust-lld".to_owned()), executables: true, @@ -20,7 +20,7 @@ pub fn opts() -> TargetOptions { position_independent_executables: true, static_position_independent_executables: true, relocation_model: RelocModel::Pic, - target_family: None, + os_family: None, tls_model: TlsModel::InitialExec, ..Default::default() } diff --git a/compiler/rustc_target/src/spec/hermit_kernel_base.rs b/compiler/rustc_target/src/spec/hermit_kernel_base.rs index 7d06cbd62f5..622f0d9a471 100644 --- a/compiler/rustc_target/src/spec/hermit_kernel_base.rs +++ b/compiler/rustc_target/src/spec/hermit_kernel_base.rs @@ -9,7 +9,7 @@ pub fn opts() -> TargetOptions { ); TargetOptions { - target_os: "hermit".to_string(), + os: "hermit".to_string(), linker_flavor: LinkerFlavor::Lld(LldFlavor::Ld), disable_redzone: true, linker: Some("rust-lld".to_owned()), @@ -21,7 +21,7 @@ pub fn opts() -> TargetOptions { position_independent_executables: true, static_position_independent_executables: true, relocation_model: RelocModel::Pic, - target_family: None, + os_family: None, tls_model: TlsModel::InitialExec, ..Default::default() } diff --git a/compiler/rustc_target/src/spec/i586_pc_windows_msvc.rs b/compiler/rustc_target/src/spec/i586_pc_windows_msvc.rs index 664b9d5d515..4a7779a6df0 100644 --- a/compiler/rustc_target/src/spec/i586_pc_windows_msvc.rs +++ b/compiler/rustc_target/src/spec/i586_pc_windows_msvc.rs @@ -2,7 +2,7 @@ use crate::spec::Target; pub fn target() -> Target { let mut base = super::i686_pc_windows_msvc::target(); - base.options.cpu = "pentium".to_string(); + base.cpu = "pentium".to_string(); base.llvm_target = "i586-pc-windows-msvc".to_string(); base } diff --git a/compiler/rustc_target/src/spec/i586_unknown_linux_gnu.rs b/compiler/rustc_target/src/spec/i586_unknown_linux_gnu.rs index 3276f1d0094..7c92dda8a9d 100644 --- a/compiler/rustc_target/src/spec/i586_unknown_linux_gnu.rs +++ b/compiler/rustc_target/src/spec/i586_unknown_linux_gnu.rs @@ -2,7 +2,7 @@ use crate::spec::Target; pub fn target() -> Target { let mut base = super::i686_unknown_linux_gnu::target(); - base.options.cpu = "pentium".to_string(); + base.cpu = "pentium".to_string(); base.llvm_target = "i586-unknown-linux-gnu".to_string(); base } diff --git a/compiler/rustc_target/src/spec/i586_unknown_linux_musl.rs b/compiler/rustc_target/src/spec/i586_unknown_linux_musl.rs index 5fbf0487226..1fea02bbee8 100644 --- a/compiler/rustc_target/src/spec/i586_unknown_linux_musl.rs +++ b/compiler/rustc_target/src/spec/i586_unknown_linux_musl.rs @@ -2,7 +2,7 @@ use crate::spec::Target; pub fn target() -> Target { let mut base = super::i686_unknown_linux_musl::target(); - base.options.cpu = "pentium".to_string(); + base.cpu = "pentium".to_string(); base.llvm_target = "i586-unknown-linux-musl".to_string(); base } diff --git a/compiler/rustc_target/src/spec/i686_apple_darwin.rs b/compiler/rustc_target/src/spec/i686_apple_darwin.rs index ac295aa3587..0ab40340928 100644 --- a/compiler/rustc_target/src/spec/i686_apple_darwin.rs +++ b/compiler/rustc_target/src/spec/i686_apple_darwin.rs @@ -22,6 +22,6 @@ pub fn target() -> Target { f64:32:64-f80:128-n8:16:32-S128" .to_string(), arch: "x86".to_string(), - options: TargetOptions { target_mcount: "\u{1}mcount".to_string(), ..base }, + options: TargetOptions { mcount: "\u{1}mcount".to_string(), ..base }, } } diff --git a/compiler/rustc_target/src/spec/i686_unknown_linux_gnu.rs b/compiler/rustc_target/src/spec/i686_unknown_linux_gnu.rs index 62b02d841c2..083c115d084 100644 --- a/compiler/rustc_target/src/spec/i686_unknown_linux_gnu.rs +++ b/compiler/rustc_target/src/spec/i686_unknown_linux_gnu.rs @@ -1,7 +1,7 @@ use crate::spec::{LinkerFlavor, Target}; pub fn target() -> Target { - let mut base = super::linux_base::opts(); + let mut base = super::linux_gnu_base::opts(); base.cpu = "pentium4".to_string(); base.max_atomic_width = Some(64); base.pre_link_args.get_mut(&LinkerFlavor::Gcc).unwrap().push("-m32".to_string()); diff --git a/compiler/rustc_target/src/spec/i686_unknown_netbsd.rs b/compiler/rustc_target/src/spec/i686_unknown_netbsd.rs index 2568fabfb05..c22139b5875 100644 --- a/compiler/rustc_target/src/spec/i686_unknown_netbsd.rs +++ b/compiler/rustc_target/src/spec/i686_unknown_netbsd.rs @@ -14,6 +14,6 @@ pub fn target() -> Target { f64:32:64-f80:32-n8:16:32-S128" .to_string(), arch: "x86".to_string(), - options: TargetOptions { target_mcount: "__mcount".to_string(), ..base }, + options: TargetOptions { mcount: "__mcount".to_string(), ..base }, } } diff --git a/compiler/rustc_target/src/spec/illumos_base.rs b/compiler/rustc_target/src/spec/illumos_base.rs index 625f7b18b25..d9b5716c041 100644 --- a/compiler/rustc_target/src/spec/illumos_base.rs +++ b/compiler/rustc_target/src/spec/illumos_base.rs @@ -16,11 +16,11 @@ pub fn opts() -> TargetOptions { ); TargetOptions { - target_os: "illumos".to_string(), + os: "illumos".to_string(), dynamic_linking: true, executables: true, has_rpath: true, - target_family: Some("unix".to_string()), + os_family: Some("unix".to_string()), is_like_solaris: true, limit_rdylib_exports: false, // Linker doesn't support this eliminate_frame_pointer: false, diff --git a/compiler/rustc_target/src/spec/l4re_base.rs b/compiler/rustc_target/src/spec/l4re_base.rs index 6d1e610d0e9..660fae5f5c7 100644 --- a/compiler/rustc_target/src/spec/l4re_base.rs +++ b/compiler/rustc_target/src/spec/l4re_base.rs @@ -17,15 +17,15 @@ pub fn opts() -> TargetOptions { args.insert(LinkerFlavor::Gcc, vec![]); TargetOptions { - target_os: "l4re".to_string(), - target_env: "uclibc".to_string(), + os: "l4re".to_string(), + env: "uclibc".to_string(), linker_flavor: LinkerFlavor::Ld, executables: true, has_elf_tls: false, panic_strategy: PanicStrategy::Abort, linker: Some("ld".to_string()), pre_link_args: args, - target_family: Some("unix".to_string()), + os_family: Some("unix".to_string()), ..Default::default() } } diff --git a/compiler/rustc_target/src/spec/linux_base.rs b/compiler/rustc_target/src/spec/linux_base.rs index b3a850591fd..0631644ad63 100644 --- a/compiler/rustc_target/src/spec/linux_base.rs +++ b/compiler/rustc_target/src/spec/linux_base.rs @@ -19,11 +19,10 @@ pub fn opts() -> TargetOptions { ); TargetOptions { - target_os: "linux".to_string(), - target_env: "gnu".to_string(), + os: "linux".to_string(), dynamic_linking: true, executables: true, - target_family: Some("unix".to_string()), + os_family: Some("unix".to_string()), linker_is_gnu: true, has_rpath: true, pre_link_args: args, diff --git a/compiler/rustc_target/src/spec/linux_gnu_base.rs b/compiler/rustc_target/src/spec/linux_gnu_base.rs new file mode 100644 index 00000000000..3d940ceaf02 --- /dev/null +++ b/compiler/rustc_target/src/spec/linux_gnu_base.rs @@ -0,0 +1,5 @@ +use crate::spec::TargetOptions; + +pub fn opts() -> TargetOptions { + TargetOptions { env: "gnu".to_string(), ..super::linux_base::opts() } +} diff --git a/compiler/rustc_target/src/spec/linux_kernel_base.rs b/compiler/rustc_target/src/spec/linux_kernel_base.rs index 9c883f9a188..a5fc1649e7f 100644 --- a/compiler/rustc_target/src/spec/linux_kernel_base.rs +++ b/compiler/rustc_target/src/spec/linux_kernel_base.rs @@ -8,7 +8,7 @@ pub fn opts() -> TargetOptions { ); TargetOptions { - target_env: "gnu".to_string(), + env: "gnu".to_string(), disable_redzone: true, panic_strategy: PanicStrategy::Abort, stack_probes: true, diff --git a/compiler/rustc_target/src/spec/linux_musl_base.rs b/compiler/rustc_target/src/spec/linux_musl_base.rs index 3a44d3326eb..5038a967d0a 100644 --- a/compiler/rustc_target/src/spec/linux_musl_base.rs +++ b/compiler/rustc_target/src/spec/linux_musl_base.rs @@ -4,7 +4,7 @@ use crate::spec::TargetOptions; pub fn opts() -> TargetOptions { let mut base = super::linux_base::opts(); - base.target_env = "musl".to_string(); + base.env = "musl".to_string(); base.pre_link_objects_fallback = crt_objects::pre_musl_fallback(); base.post_link_objects_fallback = crt_objects::post_musl_fallback(); base.crt_objects_fallback = Some(CrtObjectsFallback::Musl); diff --git a/compiler/rustc_target/src/spec/linux_uclibc_base.rs b/compiler/rustc_target/src/spec/linux_uclibc_base.rs index ce7c79c1644..ef6d50656e4 100644 --- a/compiler/rustc_target/src/spec/linux_uclibc_base.rs +++ b/compiler/rustc_target/src/spec/linux_uclibc_base.rs @@ -1,5 +1,5 @@ use crate::spec::TargetOptions; pub fn opts() -> TargetOptions { - TargetOptions { target_env: "uclibc".to_string(), ..super::linux_base::opts() } + TargetOptions { env: "uclibc".to_string(), ..super::linux_base::opts() } } diff --git a/compiler/rustc_target/src/spec/mips64_unknown_linux_gnuabi64.rs b/compiler/rustc_target/src/spec/mips64_unknown_linux_gnuabi64.rs index f0a266a63af..daa0d9da172 100644 --- a/compiler/rustc_target/src/spec/mips64_unknown_linux_gnuabi64.rs +++ b/compiler/rustc_target/src/spec/mips64_unknown_linux_gnuabi64.rs @@ -7,14 +7,14 @@ pub fn target() -> Target { data_layout: "E-m:e-i8:8:32-i16:16:32-i64:64-n32:64-S128".to_string(), arch: "mips64".to_string(), options: TargetOptions { - target_endian: "big".to_string(), + endian: "big".to_string(), // NOTE(mips64r2) matches C toolchain cpu: "mips64r2".to_string(), features: "+mips64r2".to_string(), max_atomic_width: Some(64), - target_mcount: "_mcount".to_string(), + mcount: "_mcount".to_string(), - ..super::linux_base::opts() + ..super::linux_gnu_base::opts() }, } } diff --git a/compiler/rustc_target/src/spec/mips64_unknown_linux_muslabi64.rs b/compiler/rustc_target/src/spec/mips64_unknown_linux_muslabi64.rs index 805a965bc0f..db8d0c04e6f 100644 --- a/compiler/rustc_target/src/spec/mips64_unknown_linux_muslabi64.rs +++ b/compiler/rustc_target/src/spec/mips64_unknown_linux_muslabi64.rs @@ -11,10 +11,6 @@ pub fn target() -> Target { pointer_width: 64, data_layout: "E-m:e-i8:8:32-i16:16:32-i64:64-n32:64-S128".to_string(), arch: "mips64".to_string(), - options: TargetOptions { - target_endian: "big".to_string(), - target_mcount: "_mcount".to_string(), - ..base - }, + options: TargetOptions { endian: "big".to_string(), mcount: "_mcount".to_string(), ..base }, } } diff --git a/compiler/rustc_target/src/spec/mips64el_unknown_linux_gnuabi64.rs b/compiler/rustc_target/src/spec/mips64el_unknown_linux_gnuabi64.rs index f47b058bd08..d767705b045 100644 --- a/compiler/rustc_target/src/spec/mips64el_unknown_linux_gnuabi64.rs +++ b/compiler/rustc_target/src/spec/mips64el_unknown_linux_gnuabi64.rs @@ -11,9 +11,9 @@ pub fn target() -> Target { cpu: "mips64r2".to_string(), features: "+mips64r2".to_string(), max_atomic_width: Some(64), - target_mcount: "_mcount".to_string(), + mcount: "_mcount".to_string(), - ..super::linux_base::opts() + ..super::linux_gnu_base::opts() }, } } diff --git a/compiler/rustc_target/src/spec/mips64el_unknown_linux_muslabi64.rs b/compiler/rustc_target/src/spec/mips64el_unknown_linux_muslabi64.rs index 5c985eb842c..766ed69df4b 100644 --- a/compiler/rustc_target/src/spec/mips64el_unknown_linux_muslabi64.rs +++ b/compiler/rustc_target/src/spec/mips64el_unknown_linux_muslabi64.rs @@ -11,6 +11,6 @@ pub fn target() -> Target { pointer_width: 64, data_layout: "e-m:e-i8:8:32-i16:16:32-i64:64-n32:64-S128".to_string(), arch: "mips64".to_string(), - options: TargetOptions { target_mcount: "_mcount".to_string(), ..base }, + options: TargetOptions { mcount: "_mcount".to_string(), ..base }, } } diff --git a/compiler/rustc_target/src/spec/mips_unknown_linux_gnu.rs b/compiler/rustc_target/src/spec/mips_unknown_linux_gnu.rs index 1fc66861364..a7ec1f19c9d 100644 --- a/compiler/rustc_target/src/spec/mips_unknown_linux_gnu.rs +++ b/compiler/rustc_target/src/spec/mips_unknown_linux_gnu.rs @@ -7,13 +7,13 @@ pub fn target() -> Target { data_layout: "E-m:m-p:32:32-i8:8:32-i16:16:32-i64:64-n32-S64".to_string(), arch: "mips".to_string(), options: TargetOptions { - target_endian: "big".to_string(), + endian: "big".to_string(), cpu: "mips32r2".to_string(), features: "+mips32r2,+fpxx,+nooddspreg".to_string(), max_atomic_width: Some(32), - target_mcount: "_mcount".to_string(), + mcount: "_mcount".to_string(), - ..super::linux_base::opts() + ..super::linux_gnu_base::opts() }, } } diff --git a/compiler/rustc_target/src/spec/mips_unknown_linux_musl.rs b/compiler/rustc_target/src/spec/mips_unknown_linux_musl.rs index ed03f5d990e..1ebe577bc1c 100644 --- a/compiler/rustc_target/src/spec/mips_unknown_linux_musl.rs +++ b/compiler/rustc_target/src/spec/mips_unknown_linux_musl.rs @@ -11,10 +11,6 @@ pub fn target() -> Target { pointer_width: 32, data_layout: "E-m:m-p:32:32-i8:8:32-i16:16:32-i64:64-n32-S64".to_string(), arch: "mips".to_string(), - options: TargetOptions { - target_endian: "big".to_string(), - target_mcount: "_mcount".to_string(), - ..base - }, + options: TargetOptions { endian: "big".to_string(), mcount: "_mcount".to_string(), ..base }, } } diff --git a/compiler/rustc_target/src/spec/mips_unknown_linux_uclibc.rs b/compiler/rustc_target/src/spec/mips_unknown_linux_uclibc.rs index fa1d789bfa8..2123d5e1a0f 100644 --- a/compiler/rustc_target/src/spec/mips_unknown_linux_uclibc.rs +++ b/compiler/rustc_target/src/spec/mips_unknown_linux_uclibc.rs @@ -7,11 +7,11 @@ pub fn target() -> Target { data_layout: "E-m:m-p:32:32-i8:8:32-i16:16:32-i64:64-n32-S64".to_string(), arch: "mips".to_string(), options: TargetOptions { - target_endian: "big".to_string(), + endian: "big".to_string(), cpu: "mips32r2".to_string(), features: "+mips32r2,+soft-float".to_string(), max_atomic_width: Some(32), - target_mcount: "_mcount".to_string(), + mcount: "_mcount".to_string(), ..super::linux_uclibc_base::opts() }, diff --git a/compiler/rustc_target/src/spec/mipsel_sony_psp.rs b/compiler/rustc_target/src/spec/mipsel_sony_psp.rs index 3f426e2e5fe..08c290e6ff1 100644 --- a/compiler/rustc_target/src/spec/mipsel_sony_psp.rs +++ b/compiler/rustc_target/src/spec/mipsel_sony_psp.rs @@ -15,8 +15,8 @@ pub fn target() -> Target { arch: "mips".to_string(), options: TargetOptions { - target_os: "psp".to_string(), - target_vendor: "sony".to_string(), + os: "psp".to_string(), + vendor: "sony".to_string(), linker_flavor: LinkerFlavor::Lld(LldFlavor::Ld), cpu: "mips2".to_string(), executables: true, diff --git a/compiler/rustc_target/src/spec/mipsel_unknown_linux_gnu.rs b/compiler/rustc_target/src/spec/mipsel_unknown_linux_gnu.rs index 16fbab58140..9cb2a13c7d4 100644 --- a/compiler/rustc_target/src/spec/mipsel_unknown_linux_gnu.rs +++ b/compiler/rustc_target/src/spec/mipsel_unknown_linux_gnu.rs @@ -11,9 +11,9 @@ pub fn target() -> Target { cpu: "mips32r2".to_string(), features: "+mips32r2,+fpxx,+nooddspreg".to_string(), max_atomic_width: Some(32), - target_mcount: "_mcount".to_string(), + mcount: "_mcount".to_string(), - ..super::linux_base::opts() + ..super::linux_gnu_base::opts() }, } } diff --git a/compiler/rustc_target/src/spec/mipsel_unknown_linux_musl.rs b/compiler/rustc_target/src/spec/mipsel_unknown_linux_musl.rs index d1b603cd9de..3374cdd4485 100644 --- a/compiler/rustc_target/src/spec/mipsel_unknown_linux_musl.rs +++ b/compiler/rustc_target/src/spec/mipsel_unknown_linux_musl.rs @@ -11,6 +11,6 @@ pub fn target() -> Target { pointer_width: 32, data_layout: "e-m:m-p:32:32-i8:8:32-i16:16:32-i64:64-n32-S64".to_string(), arch: "mips".to_string(), - options: TargetOptions { target_mcount: "_mcount".to_string(), ..base }, + options: TargetOptions { mcount: "_mcount".to_string(), ..base }, } } diff --git a/compiler/rustc_target/src/spec/mipsel_unknown_linux_uclibc.rs b/compiler/rustc_target/src/spec/mipsel_unknown_linux_uclibc.rs index a09f7ad0121..0831eb7a0a7 100644 --- a/compiler/rustc_target/src/spec/mipsel_unknown_linux_uclibc.rs +++ b/compiler/rustc_target/src/spec/mipsel_unknown_linux_uclibc.rs @@ -11,7 +11,7 @@ pub fn target() -> Target { cpu: "mips32r2".to_string(), features: "+mips32r2,+soft-float".to_string(), max_atomic_width: Some(32), - target_mcount: "_mcount".to_string(), + mcount: "_mcount".to_string(), ..super::linux_uclibc_base::opts() }, diff --git a/compiler/rustc_target/src/spec/mipsel_unknown_none.rs b/compiler/rustc_target/src/spec/mipsel_unknown_none.rs index 60c4c3bb051..0f9d3c3de15 100644 --- a/compiler/rustc_target/src/spec/mipsel_unknown_none.rs +++ b/compiler/rustc_target/src/spec/mipsel_unknown_none.rs @@ -14,7 +14,6 @@ pub fn target() -> Target { arch: "mips".to_string(), options: TargetOptions { - target_vendor: String::new(), linker_flavor: LinkerFlavor::Lld(LldFlavor::Ld), cpu: "mips32r2".to_string(), features: "+mips32r2,+soft-float,+noabicalls".to_string(), diff --git a/compiler/rustc_target/src/spec/mipsisa32r6_unknown_linux_gnu.rs b/compiler/rustc_target/src/spec/mipsisa32r6_unknown_linux_gnu.rs index 417ee6e043b..11b3734a105 100644 --- a/compiler/rustc_target/src/spec/mipsisa32r6_unknown_linux_gnu.rs +++ b/compiler/rustc_target/src/spec/mipsisa32r6_unknown_linux_gnu.rs @@ -7,13 +7,13 @@ pub fn target() -> Target { data_layout: "E-m:m-p:32:32-i8:8:32-i16:16:32-i64:64-n32-S64".to_string(), arch: "mips".to_string(), options: TargetOptions { - target_endian: "big".to_string(), + endian: "big".to_string(), cpu: "mips32r6".to_string(), features: "+mips32r6".to_string(), max_atomic_width: Some(32), - target_mcount: "_mcount".to_string(), + mcount: "_mcount".to_string(), - ..super::linux_base::opts() + ..super::linux_gnu_base::opts() }, } } diff --git a/compiler/rustc_target/src/spec/mipsisa32r6el_unknown_linux_gnu.rs b/compiler/rustc_target/src/spec/mipsisa32r6el_unknown_linux_gnu.rs index cf273c6ab2b..06a5f40d69b 100644 --- a/compiler/rustc_target/src/spec/mipsisa32r6el_unknown_linux_gnu.rs +++ b/compiler/rustc_target/src/spec/mipsisa32r6el_unknown_linux_gnu.rs @@ -11,9 +11,9 @@ pub fn target() -> Target { cpu: "mips32r6".to_string(), features: "+mips32r6".to_string(), max_atomic_width: Some(32), - target_mcount: "_mcount".to_string(), + mcount: "_mcount".to_string(), - ..super::linux_base::opts() + ..super::linux_gnu_base::opts() }, } } diff --git a/compiler/rustc_target/src/spec/mipsisa64r6_unknown_linux_gnuabi64.rs b/compiler/rustc_target/src/spec/mipsisa64r6_unknown_linux_gnuabi64.rs index 1d82395f536..6282c9e1d54 100644 --- a/compiler/rustc_target/src/spec/mipsisa64r6_unknown_linux_gnuabi64.rs +++ b/compiler/rustc_target/src/spec/mipsisa64r6_unknown_linux_gnuabi64.rs @@ -7,14 +7,14 @@ pub fn target() -> Target { data_layout: "E-m:e-i8:8:32-i16:16:32-i64:64-n32:64-S128".to_string(), arch: "mips64".to_string(), options: TargetOptions { - target_endian: "big".to_string(), + endian: "big".to_string(), // NOTE(mips64r6) matches C toolchain cpu: "mips64r6".to_string(), features: "+mips64r6".to_string(), max_atomic_width: Some(64), - target_mcount: "_mcount".to_string(), + mcount: "_mcount".to_string(), - ..super::linux_base::opts() + ..super::linux_gnu_base::opts() }, } } diff --git a/compiler/rustc_target/src/spec/mipsisa64r6el_unknown_linux_gnuabi64.rs b/compiler/rustc_target/src/spec/mipsisa64r6el_unknown_linux_gnuabi64.rs index aadd36235bf..589d7acba68 100644 --- a/compiler/rustc_target/src/spec/mipsisa64r6el_unknown_linux_gnuabi64.rs +++ b/compiler/rustc_target/src/spec/mipsisa64r6el_unknown_linux_gnuabi64.rs @@ -11,9 +11,9 @@ pub fn target() -> Target { cpu: "mips64r6".to_string(), features: "+mips64r6".to_string(), max_atomic_width: Some(64), - target_mcount: "_mcount".to_string(), + mcount: "_mcount".to_string(), - ..super::linux_base::opts() + ..super::linux_gnu_base::opts() }, } } diff --git a/compiler/rustc_target/src/spec/mod.rs b/compiler/rustc_target/src/spec/mod.rs index 895114b026e..045da7963c5 100644 --- a/compiler/rustc_target/src/spec/mod.rs +++ b/compiler/rustc_target/src/spec/mod.rs @@ -37,8 +37,9 @@ use crate::spec::abi::{lookup as lookup_abi, Abi}; use crate::spec::crt_objects::{CrtObjects, CrtObjectsFallback}; use rustc_serialize::json::{Json, ToJson}; +use rustc_span::symbol::{sym, Symbol}; use std::collections::BTreeMap; -use std::ops::Deref; +use std::ops::{Deref, DerefMut}; use std::path::{Path, PathBuf}; use std::str::FromStr; use std::{fmt, io}; @@ -63,6 +64,7 @@ mod hermit_kernel_base; mod illumos_base; mod l4re_base; mod linux_base; +mod linux_gnu_base; mod linux_kernel_base; mod linux_musl_base; mod linux_uclibc_base; @@ -176,6 +178,13 @@ impl PanicStrategy { PanicStrategy::Abort => "abort", } } + + pub fn desc_symbol(&self) -> Symbol { + match *self { + PanicStrategy::Unwind => sym::unwind, + PanicStrategy::Abort => sym::abort, + } + } } impl ToJson for PanicStrategy { @@ -446,7 +455,7 @@ macro_rules! supported_targets { $( $($triple)|+ => $module::target(), )+ _ => return None, }; - t.options.is_builtin = true; + t.is_builtin = true; debug!("got builtin target: {:?}", t); Some(t) } @@ -691,21 +700,28 @@ impl HasTargetSpec for Target { /// /// This has an implementation of `Default`, see each field for what the default is. In general, /// these try to take "minimal defaults" that don't assume anything about the runtime they run in. +/// +/// `TargetOptions` as a separate structure is mostly an implementation detail of `Target` +/// construction, all its fields logically belong to `Target` and available from `Target` +/// through `Deref` impls. #[derive(PartialEq, Clone, Debug)] pub struct TargetOptions { /// Whether the target is built-in or loaded from a custom target specification. pub is_builtin: bool, /// String to use as the `target_endian` `cfg` variable. Defaults to "little". - pub target_endian: String, + pub endian: String, /// Width of c_int type. Defaults to "32". - pub target_c_int_width: String, - /// OS name to use for conditional compilation. Defaults to "none". - pub target_os: String, - /// Environment name to use for conditional compilation. Defaults to "". - pub target_env: String, - /// Vendor name to use for conditional compilation. Defaults to "unknown". - pub target_vendor: String, + pub c_int_width: String, + /// OS name to use for conditional compilation (`target_os`). Defaults to "none". + /// "none" implies a bare metal target without `std` library. + /// A couple of targets having `std` also use "unknown" as an `os` value, + /// but they are exceptions. + pub os: String, + /// Environment name to use for conditional compilation (`target_env`). Defaults to "". + pub env: String, + /// Vendor name to use for conditional compilation (`target_vendor`). Defaults to "unknown". + pub vendor: String, /// Default linker flavor used if `-C linker-flavor` or `-C linker` are not passed /// on the command line. Defaults to `LinkerFlavor::Gcc`. pub linker_flavor: LinkerFlavor, @@ -796,7 +812,7 @@ pub struct TargetOptions { /// String to append to the name of every static library. Defaults to ".a". pub staticlib_suffix: String, /// OS family to use for conditional compilation. Valid options: "unix", "windows". - pub target_family: Option, + pub os_family: Option, /// Whether the target toolchain's ABI supports returning small structs as an integer. pub abi_return_struct_as_int: bool, /// Whether the target toolchain is like macOS's. Only useful for compiling against iOS/macOS, @@ -806,14 +822,24 @@ pub struct TargetOptions { /// Only useful for compiling against Illumos/Solaris, /// as they have a different set of linker flags. Defaults to false. pub is_like_solaris: bool, - /// Whether the target toolchain is like Windows'. Only useful for compiling against Windows, - /// only really used for figuring out how to find libraries, since Windows uses its own - /// library naming convention. Defaults to false. + /// Whether the target is like Windows. + /// This is a combination of several more specific properties represented as a single flag: + /// - The target uses a Windows ABI, + /// - uses PE/COFF as a format for object code, + /// - uses Windows-style dllexport/dllimport for shared libraries, + /// - uses import libraries and .def files for symbol exports, + /// - executables support setting a subsystem. pub is_like_windows: bool, + /// Whether the target is like MSVC. + /// This is a combination of several more specific properties represented as a single flag: + /// - The target has all the properties from `is_like_windows` + /// (for in-tree targets "is_like_msvc ⇒ is_like_windows" is ensured by a unit test), + /// - has some MSVC-specific Windows ABI properties, + /// - uses a link.exe-like linker, + /// - uses CodeView/PDB for debuginfo and natvis for its visualization, + /// - uses SEH-based unwinding, + /// - supports control flow guard mechanism. pub is_like_msvc: bool, - /// Whether the target toolchain is like Android's. Only useful for compiling against Android. - /// Defaults to false. - pub is_like_android: bool, /// Whether the target toolchain is like Emscripten's. Only useful for compiling with /// Emscripten toolchain. /// Defaults to false. @@ -957,7 +983,7 @@ pub struct TargetOptions { pub merge_functions: MergeFunctions, /// Use platform dependent mcount function - pub target_mcount: String, + pub mcount: String, /// LLVM ABI name, corresponds to the '-mabi' parameter available in multilib C compilers pub llvm_abiname: String, @@ -988,11 +1014,11 @@ impl Default for TargetOptions { fn default() -> TargetOptions { TargetOptions { is_builtin: false, - target_endian: "little".to_string(), - target_c_int_width: "32".to_string(), - target_os: "none".to_string(), - target_env: String::new(), - target_vendor: "unknown".to_string(), + endian: "little".to_string(), + c_int_width: "32".to_string(), + os: "none".to_string(), + env: String::new(), + vendor: "unknown".to_string(), linker_flavor: LinkerFlavor::Gcc, linker: option_env!("CFG_DEFAULT_LINKER").map(|s| s.to_string()), lld_flavor: LldFlavor::Ld, @@ -1016,12 +1042,11 @@ impl Default for TargetOptions { exe_suffix: String::new(), staticlib_prefix: "lib".to_string(), staticlib_suffix: ".a".to_string(), - target_family: None, + os_family: None, abi_return_struct_as_int: false, is_like_osx: false, is_like_solaris: false, is_like_windows: false, - is_like_android: false, is_like_emscripten: false, is_like_msvc: false, is_like_fuchsia: false, @@ -1073,7 +1098,7 @@ impl Default for TargetOptions { limit_rdylib_exports: true, override_export_symbols: None, merge_functions: MergeFunctions::Aliases, - target_mcount: "mcount".to_string(), + mcount: "mcount".to_string(), llvm_abiname: "".to_string(), relax_elf_relocations: false, llvm_args: vec![], @@ -1094,13 +1119,18 @@ impl Deref for Target { &self.options } } +impl DerefMut for Target { + fn deref_mut(&mut self) -> &mut Self::Target { + &mut self.options + } +} impl Target { /// Given a function ABI, turn it into the correct ABI for this target. pub fn adjust_abi(&self, abi: Abi) -> Abi { match abi { Abi::System => { - if self.options.is_like_windows && self.arch == "x86" { + if self.is_like_windows && self.arch == "x86" { Abi::Stdcall } else { Abi::C @@ -1110,7 +1140,7 @@ impl Target { // See https://docs.microsoft.com/en-us/cpp/cpp/argument-passing-and-naming-conventions // and the individual pages for __stdcall et al. Abi::Stdcall | Abi::Fastcall | Abi::Vectorcall | Abi::Thiscall => { - if self.options.is_like_windows && self.arch != "x86" { Abi::C } else { abi } + if self.is_like_windows && self.arch != "x86" { Abi::C } else { abi } } Abi::EfiApi => { if self.arch == "x86_64" { @@ -1126,17 +1156,17 @@ impl Target { /// Minimum integer size in bits that this target can perform atomic /// operations on. pub fn min_atomic_width(&self) -> u64 { - self.options.min_atomic_width.unwrap_or(8) + self.min_atomic_width.unwrap_or(8) } /// Maximum integer size in bits that this target can perform atomic /// operations on. pub fn max_atomic_width(&self) -> u64 { - self.options.max_atomic_width.unwrap_or_else(|| self.pointer_width.into()) + self.max_atomic_width.unwrap_or_else(|| self.pointer_width.into()) } pub fn is_abi_supported(&self, abi: Abi) -> bool { - abi.generic() || !self.options.unsupported_abis.contains(&abi) + abi.generic() || !self.unsupported_abis.contains(&abi) } /// Loads a target descriptor from a JSON object. @@ -1169,19 +1199,19 @@ impl Target { ($key_name:ident) => ( { let name = (stringify!($key_name)).replace("_", "-"); if let Some(s) = obj.find(&name).and_then(Json::as_string) { - base.options.$key_name = s.to_string(); + base.$key_name = s.to_string(); } } ); ($key_name:ident = $json_name:expr) => ( { let name = $json_name; if let Some(s) = obj.find(&name).and_then(Json::as_string) { - base.options.$key_name = s.to_string(); + base.$key_name = s.to_string(); } } ); ($key_name:ident, bool) => ( { let name = (stringify!($key_name)).replace("_", "-"); if let Some(s) = obj.find(&name).and_then(Json::as_boolean) { - base.options.$key_name = s; + base.$key_name = s; } } ); ($key_name:ident, Option) => ( { @@ -1190,20 +1220,20 @@ impl Target { if s < 1 || s > 5 { return Err("Not a valid DWARF version number".to_string()); } - base.options.$key_name = Some(s as u32); + base.$key_name = Some(s as u32); } } ); ($key_name:ident, Option) => ( { let name = (stringify!($key_name)).replace("_", "-"); if let Some(s) = obj.find(&name).and_then(Json::as_u64) { - base.options.$key_name = Some(s); + base.$key_name = Some(s); } } ); ($key_name:ident, MergeFunctions) => ( { let name = (stringify!($key_name)).replace("_", "-"); obj.find(&name[..]).and_then(|o| o.as_string().and_then(|s| { match s.parse::() { - Ok(mergefunc) => base.options.$key_name = mergefunc, + Ok(mergefunc) => base.$key_name = mergefunc, _ => return Some(Err(format!("'{}' is not a valid value for \ merge-functions. Use 'disabled', \ 'trampolines', or 'aliases'.", @@ -1216,7 +1246,7 @@ impl Target { let name = (stringify!($key_name)).replace("_", "-"); obj.find(&name[..]).and_then(|o| o.as_string().and_then(|s| { match s.parse::() { - Ok(relocation_model) => base.options.$key_name = relocation_model, + Ok(relocation_model) => base.$key_name = relocation_model, _ => return Some(Err(format!("'{}' is not a valid relocation model. \ Run `rustc --print relocation-models` to \ see the list of supported values.", s))), @@ -1228,7 +1258,7 @@ impl Target { let name = (stringify!($key_name)).replace("_", "-"); obj.find(&name[..]).and_then(|o| o.as_string().and_then(|s| { match s.parse::() { - Ok(code_model) => base.options.$key_name = Some(code_model), + Ok(code_model) => base.$key_name = Some(code_model), _ => return Some(Err(format!("'{}' is not a valid code model. \ Run `rustc --print code-models` to \ see the list of supported values.", s))), @@ -1240,7 +1270,7 @@ impl Target { let name = (stringify!($key_name)).replace("_", "-"); obj.find(&name[..]).and_then(|o| o.as_string().and_then(|s| { match s.parse::() { - Ok(tls_model) => base.options.$key_name = tls_model, + Ok(tls_model) => base.$key_name = tls_model, _ => return Some(Err(format!("'{}' is not a valid TLS model. \ Run `rustc --print tls-models` to \ see the list of supported values.", s))), @@ -1252,8 +1282,8 @@ impl Target { let name = (stringify!($key_name)).replace("_", "-"); obj.find(&name[..]).and_then(|o| o.as_string().and_then(|s| { match s { - "unwind" => base.options.$key_name = PanicStrategy::Unwind, - "abort" => base.options.$key_name = PanicStrategy::Abort, + "unwind" => base.$key_name = PanicStrategy::Unwind, + "abort" => base.$key_name = PanicStrategy::Abort, _ => return Some(Err(format!("'{}' is not a valid value for \ panic-strategy. Use 'unwind' or 'abort'.", s))), @@ -1265,7 +1295,7 @@ impl Target { let name = (stringify!($key_name)).replace("_", "-"); obj.find(&name[..]).and_then(|o| o.as_string().and_then(|s| { match s.parse::() { - Ok(level) => base.options.$key_name = level, + Ok(level) => base.$key_name = level, _ => return Some(Err(format!("'{}' is not a valid value for \ relro-level. Use 'full', 'partial, or 'off'.", s))), @@ -1276,7 +1306,7 @@ impl Target { ($key_name:ident, list) => ( { let name = (stringify!($key_name)).replace("_", "-"); if let Some(v) = obj.find(&name).and_then(Json::as_array) { - base.options.$key_name = v.iter() + base.$key_name = v.iter() .map(|a| a.as_string().unwrap().to_string()) .collect(); } @@ -1284,7 +1314,7 @@ impl Target { ($key_name:ident, opt_list) => ( { let name = (stringify!($key_name)).replace("_", "-"); if let Some(v) = obj.find(&name).and_then(Json::as_array) { - base.options.$key_name = Some(v.iter() + base.$key_name = Some(v.iter() .map(|a| a.as_string().unwrap().to_string()) .collect()); } @@ -1292,7 +1322,15 @@ impl Target { ($key_name:ident, optional) => ( { let name = (stringify!($key_name)).replace("_", "-"); if let Some(o) = obj.find(&name[..]) { - base.options.$key_name = o + base.$key_name = o + .as_string() + .map(|s| s.to_string() ); + } + } ); + ($key_name:ident = $json_name:expr, optional) => ( { + let name = $json_name; + if let Some(o) = obj.find(&name[..]) { + base.$key_name = o .as_string() .map(|s| s.to_string() ); } @@ -1301,7 +1339,7 @@ impl Target { let name = (stringify!($key_name)).replace("_", "-"); obj.find(&name[..]).and_then(|o| o.as_string().and_then(|s| { if let Some(flavor) = LldFlavor::from_str(&s) { - base.options.$key_name = flavor; + base.$key_name = flavor; } else { return Some(Err(format!( "'{}' is not a valid value for lld-flavor. \ @@ -1315,7 +1353,7 @@ impl Target { let name = (stringify!($key_name)).replace("_", "-"); obj.find(&name[..]).and_then(|o| o.as_string().and_then(|s| { match LinkerFlavor::from_str(s) { - Some(linker_flavor) => base.options.$key_name = linker_flavor, + Some(linker_flavor) => base.$key_name = linker_flavor, _ => return Some(Err(format!("'{}' is not a valid value for linker-flavor. \ Use {}", s, LinkerFlavor::one_of()))), } @@ -1326,7 +1364,7 @@ impl Target { let name = (stringify!($key_name)).replace("_", "-"); obj.find(&name[..]).and_then(|o| o.as_string().and_then(|s| { match s.parse::() { - Ok(fallback) => base.options.$key_name = Some(fallback), + Ok(fallback) => base.$key_name = Some(fallback), _ => return Some(Err(format!("'{}' is not a valid CRT objects fallback. \ Use 'musl', 'mingw' or 'wasm'", s))), } @@ -1358,7 +1396,7 @@ impl Target { args.insert(kind, v); } - base.options.$key_name = args; + base.$key_name = args; } } ); ($key_name:ident, link_args) => ( { @@ -1385,7 +1423,7 @@ impl Target { args.insert(flavor, v); } - base.options.$key_name = args; + base.$key_name = args; } } ); ($key_name:ident, env) => ( { @@ -1397,7 +1435,7 @@ impl Target { if p.len() == 2 { let k = p[0].to_string(); let v = p[1].to_string(); - base.options.$key_name.push((k, v)); + base.$key_name.push((k, v)); } } } @@ -1406,11 +1444,11 @@ impl Target { } key!(is_builtin, bool); - key!(target_endian); - key!(target_c_int_width); - key!(target_os = "os"); - key!(target_env = "env"); - key!(target_vendor = "vendor"); + key!(endian = "target-endian"); + key!(c_int_width = "target-c-int-width"); + key!(os); + key!(env); + key!(vendor); key!(linker_flavor, LinkerFlavor)?; key!(linker, optional); key!(lld_flavor, LldFlavor)?; @@ -1444,14 +1482,13 @@ impl Target { key!(exe_suffix); key!(staticlib_prefix); key!(staticlib_suffix); - key!(target_family, optional); + key!(os_family = "target-family", optional); key!(abi_return_struct_as_int, bool); key!(is_like_osx, bool); key!(is_like_solaris, bool); key!(is_like_windows, bool); key!(is_like_msvc, bool); key!(is_like_emscripten, bool); - key!(is_like_android, bool); key!(is_like_fuchsia, bool); key!(dwarf_version, Option); key!(linker_is_gnu, bool); @@ -1490,7 +1527,7 @@ impl Target { key!(limit_rdylib_exports, bool); key!(override_export_symbols, opt_list); key!(merge_functions, MergeFunctions)?; - key!(target_mcount); + key!(mcount = "target-mcount"); key!(llvm_abiname); key!(relax_elf_relocations, bool); key!(llvm_args, list); @@ -1513,7 +1550,7 @@ impl Target { )); } - base.options.unsupported_abis.push(abi) + base.unsupported_abis.push(abi) } None => { return Err(format!( @@ -1602,21 +1639,20 @@ impl ToJson for Target { macro_rules! target_option_val { ($attr:ident) => {{ let name = (stringify!($attr)).replace("_", "-"); - if default.$attr != self.options.$attr { - d.insert(name, self.options.$attr.to_json()); + if default.$attr != self.$attr { + d.insert(name, self.$attr.to_json()); } }}; ($attr:ident, $key_name:expr) => {{ let name = $key_name; - if default.$attr != self.options.$attr { - d.insert(name.to_string(), self.options.$attr.to_json()); + if default.$attr != self.$attr { + d.insert(name.to_string(), self.$attr.to_json()); } }}; (link_args - $attr:ident) => {{ let name = (stringify!($attr)).replace("_", "-"); - if default.$attr != self.options.$attr { + if default.$attr != self.$attr { let obj = self - .options .$attr .iter() .map(|(k, v)| (k.desc().to_owned(), v.clone())) @@ -1626,9 +1662,8 @@ impl ToJson for Target { }}; (env - $attr:ident) => {{ let name = (stringify!($attr)).replace("_", "-"); - if default.$attr != self.options.$attr { + if default.$attr != self.$attr { let obj = self - .options .$attr .iter() .map(|&(ref k, ref v)| k.clone() + "=" + &v) @@ -1644,11 +1679,11 @@ impl ToJson for Target { target_val!(data_layout); target_option_val!(is_builtin); - target_option_val!(target_endian); - target_option_val!(target_c_int_width); - target_option_val!(target_os, "os"); - target_option_val!(target_env, "env"); - target_option_val!(target_vendor, "vendor"); + target_option_val!(endian, "target-endian"); + target_option_val!(c_int_width, "target-c-int-width"); + target_option_val!(os); + target_option_val!(env); + target_option_val!(vendor); target_option_val!(linker_flavor); target_option_val!(linker); target_option_val!(lld_flavor); @@ -1682,14 +1717,13 @@ impl ToJson for Target { target_option_val!(exe_suffix); target_option_val!(staticlib_prefix); target_option_val!(staticlib_suffix); - target_option_val!(target_family); + target_option_val!(os_family, "target-family"); target_option_val!(abi_return_struct_as_int); target_option_val!(is_like_osx); target_option_val!(is_like_solaris); target_option_val!(is_like_windows); target_option_val!(is_like_msvc); target_option_val!(is_like_emscripten); - target_option_val!(is_like_android); target_option_val!(is_like_fuchsia); target_option_val!(dwarf_version); target_option_val!(linker_is_gnu); @@ -1728,7 +1762,7 @@ impl ToJson for Target { target_option_val!(limit_rdylib_exports); target_option_val!(override_export_symbols); target_option_val!(merge_functions); - target_option_val!(target_mcount); + target_option_val!(mcount, "target-mcount"); target_option_val!(llvm_abiname); target_option_val!(relax_elf_relocations); target_option_val!(llvm_args); @@ -1736,11 +1770,10 @@ impl ToJson for Target { target_option_val!(eh_frame_header); target_option_val!(has_thumb_interworking); - if default.unsupported_abis != self.options.unsupported_abis { + if default.unsupported_abis != self.unsupported_abis { d.insert( "unsupported-abis".to_string(), - self.options - .unsupported_abis + self.unsupported_abis .iter() .map(|&name| Abi::name(name).to_json()) .collect::>() diff --git a/compiler/rustc_target/src/spec/msp430_none_elf.rs b/compiler/rustc_target/src/spec/msp430_none_elf.rs index 48b6d1be9ce..cc2578aa578 100644 --- a/compiler/rustc_target/src/spec/msp430_none_elf.rs +++ b/compiler/rustc_target/src/spec/msp430_none_elf.rs @@ -8,8 +8,7 @@ pub fn target() -> Target { arch: "msp430".to_string(), options: TargetOptions { - target_c_int_width: "16".to_string(), - target_vendor: String::new(), + c_int_width: "16".to_string(), executables: true, // The LLVM backend currently can't generate object files. To diff --git a/compiler/rustc_target/src/spec/netbsd_base.rs b/compiler/rustc_target/src/spec/netbsd_base.rs index 437b50b6f11..a77d60bd9d7 100644 --- a/compiler/rustc_target/src/spec/netbsd_base.rs +++ b/compiler/rustc_target/src/spec/netbsd_base.rs @@ -14,10 +14,10 @@ pub fn opts() -> TargetOptions { ); TargetOptions { - target_os: "netbsd".to_string(), + os: "netbsd".to_string(), dynamic_linking: true, executables: true, - target_family: Some("unix".to_string()), + os_family: Some("unix".to_string()), linker_is_gnu: true, no_default_libraries: false, has_rpath: true, diff --git a/compiler/rustc_target/src/spec/nvptx64_nvidia_cuda.rs b/compiler/rustc_target/src/spec/nvptx64_nvidia_cuda.rs index f759724445e..3c9c7d578fb 100644 --- a/compiler/rustc_target/src/spec/nvptx64_nvidia_cuda.rs +++ b/compiler/rustc_target/src/spec/nvptx64_nvidia_cuda.rs @@ -9,8 +9,8 @@ pub fn target() -> Target { pointer_width: 64, options: TargetOptions { - target_os: "cuda".to_string(), - target_vendor: "nvidia".to_string(), + os: "cuda".to_string(), + vendor: "nvidia".to_string(), linker_flavor: LinkerFlavor::PtxLinker, // The linker can be installed from `crates.io`. linker: Some("rust-ptx-linker".to_string()), diff --git a/compiler/rustc_target/src/spec/openbsd_base.rs b/compiler/rustc_target/src/spec/openbsd_base.rs index 5e83e79d9ed..2b40a1ed945 100644 --- a/compiler/rustc_target/src/spec/openbsd_base.rs +++ b/compiler/rustc_target/src/spec/openbsd_base.rs @@ -16,10 +16,10 @@ pub fn opts() -> TargetOptions { ); TargetOptions { - target_os: "openbsd".to_string(), + os: "openbsd".to_string(), dynamic_linking: true, executables: true, - target_family: Some("unix".to_string()), + os_family: Some("unix".to_string()), linker_is_gnu: true, has_rpath: true, abi_return_struct_as_int: true, diff --git a/compiler/rustc_target/src/spec/powerpc64_unknown_freebsd.rs b/compiler/rustc_target/src/spec/powerpc64_unknown_freebsd.rs index 3d20f15b391..626865aa242 100644 --- a/compiler/rustc_target/src/spec/powerpc64_unknown_freebsd.rs +++ b/compiler/rustc_target/src/spec/powerpc64_unknown_freebsd.rs @@ -11,10 +11,6 @@ pub fn target() -> Target { pointer_width: 64, data_layout: "E-m:e-i64:64-n32:64".to_string(), arch: "powerpc64".to_string(), - options: TargetOptions { - target_endian: "big".to_string(), - target_mcount: "_mcount".to_string(), - ..base - }, + options: TargetOptions { endian: "big".to_string(), mcount: "_mcount".to_string(), ..base }, } } diff --git a/compiler/rustc_target/src/spec/powerpc64_unknown_linux_gnu.rs b/compiler/rustc_target/src/spec/powerpc64_unknown_linux_gnu.rs index e52643eb893..03322818d33 100644 --- a/compiler/rustc_target/src/spec/powerpc64_unknown_linux_gnu.rs +++ b/compiler/rustc_target/src/spec/powerpc64_unknown_linux_gnu.rs @@ -1,7 +1,7 @@ use crate::spec::{LinkerFlavor, RelroLevel, Target, TargetOptions}; pub fn target() -> Target { - let mut base = super::linux_base::opts(); + let mut base = super::linux_gnu_base::opts(); base.cpu = "ppc64".to_string(); base.pre_link_args.get_mut(&LinkerFlavor::Gcc).unwrap().push("-m64".to_string()); base.max_atomic_width = Some(64); @@ -15,10 +15,6 @@ pub fn target() -> Target { pointer_width: 64, data_layout: "E-m:e-i64:64-n32:64".to_string(), arch: "powerpc64".to_string(), - options: TargetOptions { - target_endian: "big".to_string(), - target_mcount: "_mcount".to_string(), - ..base - }, + options: TargetOptions { endian: "big".to_string(), mcount: "_mcount".to_string(), ..base }, } } diff --git a/compiler/rustc_target/src/spec/powerpc64_unknown_linux_musl.rs b/compiler/rustc_target/src/spec/powerpc64_unknown_linux_musl.rs index 315192929ac..231539756f3 100644 --- a/compiler/rustc_target/src/spec/powerpc64_unknown_linux_musl.rs +++ b/compiler/rustc_target/src/spec/powerpc64_unknown_linux_musl.rs @@ -11,10 +11,6 @@ pub fn target() -> Target { pointer_width: 64, data_layout: "E-m:e-i64:64-n32:64".to_string(), arch: "powerpc64".to_string(), - options: TargetOptions { - target_endian: "big".to_string(), - target_mcount: "_mcount".to_string(), - ..base - }, + options: TargetOptions { endian: "big".to_string(), mcount: "_mcount".to_string(), ..base }, } } diff --git a/compiler/rustc_target/src/spec/powerpc64_wrs_vxworks.rs b/compiler/rustc_target/src/spec/powerpc64_wrs_vxworks.rs index a31256761a4..1c83e3e64d4 100644 --- a/compiler/rustc_target/src/spec/powerpc64_wrs_vxworks.rs +++ b/compiler/rustc_target/src/spec/powerpc64_wrs_vxworks.rs @@ -11,6 +11,6 @@ pub fn target() -> Target { pointer_width: 64, data_layout: "E-m:e-i64:64-n32:64".to_string(), arch: "powerpc64".to_string(), - options: TargetOptions { target_endian: "big".to_string(), ..base }, + options: TargetOptions { endian: "big".to_string(), ..base }, } } diff --git a/compiler/rustc_target/src/spec/powerpc64le_unknown_linux_gnu.rs b/compiler/rustc_target/src/spec/powerpc64le_unknown_linux_gnu.rs index 4cf296c3fa7..07e0bf81bc7 100644 --- a/compiler/rustc_target/src/spec/powerpc64le_unknown_linux_gnu.rs +++ b/compiler/rustc_target/src/spec/powerpc64le_unknown_linux_gnu.rs @@ -1,7 +1,7 @@ use crate::spec::{LinkerFlavor, Target, TargetOptions}; pub fn target() -> Target { - let mut base = super::linux_base::opts(); + let mut base = super::linux_gnu_base::opts(); base.cpu = "ppc64le".to_string(); base.pre_link_args.get_mut(&LinkerFlavor::Gcc).unwrap().push("-m64".to_string()); base.max_atomic_width = Some(64); @@ -11,6 +11,6 @@ pub fn target() -> Target { pointer_width: 64, data_layout: "e-m:e-i64:64-n32:64".to_string(), arch: "powerpc64".to_string(), - options: TargetOptions { target_mcount: "_mcount".to_string(), ..base }, + options: TargetOptions { mcount: "_mcount".to_string(), ..base }, } } diff --git a/compiler/rustc_target/src/spec/powerpc64le_unknown_linux_musl.rs b/compiler/rustc_target/src/spec/powerpc64le_unknown_linux_musl.rs index 41756028cbe..41c78a5f276 100644 --- a/compiler/rustc_target/src/spec/powerpc64le_unknown_linux_musl.rs +++ b/compiler/rustc_target/src/spec/powerpc64le_unknown_linux_musl.rs @@ -11,6 +11,6 @@ pub fn target() -> Target { pointer_width: 64, data_layout: "e-m:e-i64:64-n32:64".to_string(), arch: "powerpc64".to_string(), - options: TargetOptions { target_mcount: "_mcount".to_string(), ..base }, + options: TargetOptions { mcount: "_mcount".to_string(), ..base }, } } diff --git a/compiler/rustc_target/src/spec/powerpc_unknown_linux_gnu.rs b/compiler/rustc_target/src/spec/powerpc_unknown_linux_gnu.rs index f3ec02c10d2..3a9271247b0 100644 --- a/compiler/rustc_target/src/spec/powerpc_unknown_linux_gnu.rs +++ b/compiler/rustc_target/src/spec/powerpc_unknown_linux_gnu.rs @@ -1,7 +1,7 @@ use crate::spec::{LinkerFlavor, Target, TargetOptions}; pub fn target() -> Target { - let mut base = super::linux_base::opts(); + let mut base = super::linux_gnu_base::opts(); base.pre_link_args.get_mut(&LinkerFlavor::Gcc).unwrap().push("-m32".to_string()); base.max_atomic_width = Some(32); @@ -10,10 +10,6 @@ pub fn target() -> Target { pointer_width: 32, data_layout: "E-m:e-p:32:32-i64:64-n32".to_string(), arch: "powerpc".to_string(), - options: TargetOptions { - target_endian: "big".to_string(), - target_mcount: "_mcount".to_string(), - ..base - }, + options: TargetOptions { endian: "big".to_string(), mcount: "_mcount".to_string(), ..base }, } } diff --git a/compiler/rustc_target/src/spec/powerpc_unknown_linux_gnuspe.rs b/compiler/rustc_target/src/spec/powerpc_unknown_linux_gnuspe.rs index 4e3ffca0a08..105a0b21aaf 100644 --- a/compiler/rustc_target/src/spec/powerpc_unknown_linux_gnuspe.rs +++ b/compiler/rustc_target/src/spec/powerpc_unknown_linux_gnuspe.rs @@ -1,7 +1,7 @@ use crate::spec::{LinkerFlavor, Target, TargetOptions}; pub fn target() -> Target { - let mut base = super::linux_base::opts(); + let mut base = super::linux_gnu_base::opts(); base.pre_link_args.get_mut(&LinkerFlavor::Gcc).unwrap().push("-mspe".to_string()); base.max_atomic_width = Some(32); @@ -10,10 +10,6 @@ pub fn target() -> Target { pointer_width: 32, data_layout: "E-m:e-p:32:32-i64:64-n32".to_string(), arch: "powerpc".to_string(), - options: TargetOptions { - target_endian: "big".to_string(), - target_mcount: "_mcount".to_string(), - ..base - }, + options: TargetOptions { endian: "big".to_string(), mcount: "_mcount".to_string(), ..base }, } } diff --git a/compiler/rustc_target/src/spec/powerpc_unknown_linux_musl.rs b/compiler/rustc_target/src/spec/powerpc_unknown_linux_musl.rs index 1d5c19b5420..49d32944789 100644 --- a/compiler/rustc_target/src/spec/powerpc_unknown_linux_musl.rs +++ b/compiler/rustc_target/src/spec/powerpc_unknown_linux_musl.rs @@ -10,10 +10,6 @@ pub fn target() -> Target { pointer_width: 32, data_layout: "E-m:e-p:32:32-i64:64-n32".to_string(), arch: "powerpc".to_string(), - options: TargetOptions { - target_endian: "big".to_string(), - target_mcount: "_mcount".to_string(), - ..base - }, + options: TargetOptions { endian: "big".to_string(), mcount: "_mcount".to_string(), ..base }, } } diff --git a/compiler/rustc_target/src/spec/powerpc_unknown_netbsd.rs b/compiler/rustc_target/src/spec/powerpc_unknown_netbsd.rs index 4d7eb8d0100..387d6cdc456 100644 --- a/compiler/rustc_target/src/spec/powerpc_unknown_netbsd.rs +++ b/compiler/rustc_target/src/spec/powerpc_unknown_netbsd.rs @@ -11,8 +11,8 @@ pub fn target() -> Target { data_layout: "E-m:e-p:32:32-i64:64-n32".to_string(), arch: "powerpc".to_string(), options: TargetOptions { - target_endian: "big".to_string(), - target_mcount: "__mcount".to_string(), + endian: "big".to_string(), + mcount: "__mcount".to_string(), ..base }, } diff --git a/compiler/rustc_target/src/spec/powerpc_wrs_vxworks.rs b/compiler/rustc_target/src/spec/powerpc_wrs_vxworks.rs index dc6a4e28a3d..20ffa07b997 100644 --- a/compiler/rustc_target/src/spec/powerpc_wrs_vxworks.rs +++ b/compiler/rustc_target/src/spec/powerpc_wrs_vxworks.rs @@ -12,7 +12,7 @@ pub fn target() -> Target { data_layout: "E-m:e-p:32:32-i64:64-n32".to_string(), arch: "powerpc".to_string(), options: TargetOptions { - target_endian: "big".to_string(), + endian: "big".to_string(), features: "+secure-plt".to_string(), ..base }, diff --git a/compiler/rustc_target/src/spec/powerpc_wrs_vxworks_spe.rs b/compiler/rustc_target/src/spec/powerpc_wrs_vxworks_spe.rs index 1ce3fa21918..0e713fccd23 100644 --- a/compiler/rustc_target/src/spec/powerpc_wrs_vxworks_spe.rs +++ b/compiler/rustc_target/src/spec/powerpc_wrs_vxworks_spe.rs @@ -12,7 +12,7 @@ pub fn target() -> Target { data_layout: "E-m:e-p:32:32-i64:64-n32".to_string(), arch: "powerpc".to_string(), options: TargetOptions { - target_endian: "big".to_string(), + endian: "big".to_string(), // feature msync would disable instruction 'fsync' which is not supported by fsl_p1p2 features: "+secure-plt,+msync".to_string(), ..base diff --git a/compiler/rustc_target/src/spec/redox_base.rs b/compiler/rustc_target/src/spec/redox_base.rs index 04409a1cd04..5ef705878a8 100644 --- a/compiler/rustc_target/src/spec/redox_base.rs +++ b/compiler/rustc_target/src/spec/redox_base.rs @@ -19,11 +19,11 @@ pub fn opts() -> TargetOptions { ); TargetOptions { - target_os: "redox".to_string(), - target_env: "relibc".to_string(), + os: "redox".to_string(), + env: "relibc".to_string(), dynamic_linking: true, executables: true, - target_family: Some("unix".to_string()), + os_family: Some("unix".to_string()), linker_is_gnu: true, has_rpath: true, pre_link_args: args, diff --git a/compiler/rustc_target/src/spec/riscv32gc_unknown_linux_gnu.rs b/compiler/rustc_target/src/spec/riscv32gc_unknown_linux_gnu.rs index f9405d9dfb6..cf5e0201d08 100644 --- a/compiler/rustc_target/src/spec/riscv32gc_unknown_linux_gnu.rs +++ b/compiler/rustc_target/src/spec/riscv32gc_unknown_linux_gnu.rs @@ -13,7 +13,7 @@ pub fn target() -> Target { features: "+m,+a,+f,+d,+c".to_string(), llvm_abiname: "ilp32d".to_string(), max_atomic_width: Some(32), - ..super::linux_base::opts() + ..super::linux_gnu_base::opts() }, } } diff --git a/compiler/rustc_target/src/spec/riscv64gc_unknown_linux_gnu.rs b/compiler/rustc_target/src/spec/riscv64gc_unknown_linux_gnu.rs index 3b7ff47a540..84f28413fcb 100644 --- a/compiler/rustc_target/src/spec/riscv64gc_unknown_linux_gnu.rs +++ b/compiler/rustc_target/src/spec/riscv64gc_unknown_linux_gnu.rs @@ -13,7 +13,7 @@ pub fn target() -> Target { features: "+m,+a,+f,+d,+c".to_string(), llvm_abiname: "lp64d".to_string(), max_atomic_width: Some(64), - ..super::linux_base::opts() + ..super::linux_gnu_base::opts() }, } } diff --git a/compiler/rustc_target/src/spec/s390x_unknown_linux_gnu.rs b/compiler/rustc_target/src/spec/s390x_unknown_linux_gnu.rs index 69b880cdb81..d6e8e6ee220 100644 --- a/compiler/rustc_target/src/spec/s390x_unknown_linux_gnu.rs +++ b/compiler/rustc_target/src/spec/s390x_unknown_linux_gnu.rs @@ -1,8 +1,8 @@ use crate::spec::Target; pub fn target() -> Target { - let mut base = super::linux_base::opts(); - base.target_endian = "big".to_string(); + let mut base = super::linux_gnu_base::opts(); + base.endian = "big".to_string(); // z10 is the oldest CPU supported by LLVM base.cpu = "z10".to_string(); // FIXME: The data_layout string below and the ABI implementation in diff --git a/compiler/rustc_target/src/spec/solaris_base.rs b/compiler/rustc_target/src/spec/solaris_base.rs index 1454d83e936..33e0cf8e967 100644 --- a/compiler/rustc_target/src/spec/solaris_base.rs +++ b/compiler/rustc_target/src/spec/solaris_base.rs @@ -2,12 +2,12 @@ use crate::spec::TargetOptions; pub fn opts() -> TargetOptions { TargetOptions { - target_os: "solaris".to_string(), - target_vendor: "sun".to_string(), + os: "solaris".to_string(), + vendor: "sun".to_string(), dynamic_linking: true, executables: true, has_rpath: true, - target_family: Some("unix".to_string()), + os_family: Some("unix".to_string()), is_like_solaris: true, limit_rdylib_exports: false, // Linker doesn't support this eh_frame_header: false, diff --git a/compiler/rustc_target/src/spec/sparc64_unknown_linux_gnu.rs b/compiler/rustc_target/src/spec/sparc64_unknown_linux_gnu.rs index f02b01a514b..e9b5520ac3d 100644 --- a/compiler/rustc_target/src/spec/sparc64_unknown_linux_gnu.rs +++ b/compiler/rustc_target/src/spec/sparc64_unknown_linux_gnu.rs @@ -1,8 +1,8 @@ use crate::spec::Target; pub fn target() -> Target { - let mut base = super::linux_base::opts(); - base.target_endian = "big".to_string(); + let mut base = super::linux_gnu_base::opts(); + base.endian = "big".to_string(); base.cpu = "v9".to_string(); base.max_atomic_width = Some(64); diff --git a/compiler/rustc_target/src/spec/sparc64_unknown_netbsd.rs b/compiler/rustc_target/src/spec/sparc64_unknown_netbsd.rs index de35bb8fe14..c8e90f832d0 100644 --- a/compiler/rustc_target/src/spec/sparc64_unknown_netbsd.rs +++ b/compiler/rustc_target/src/spec/sparc64_unknown_netbsd.rs @@ -12,8 +12,8 @@ pub fn target() -> Target { data_layout: "E-m:e-i64:64-n32:64-S128".to_string(), arch: "sparc64".to_string(), options: TargetOptions { - target_endian: "big".to_string(), - target_mcount: "__mcount".to_string(), + endian: "big".to_string(), + mcount: "__mcount".to_string(), ..base }, } diff --git a/compiler/rustc_target/src/spec/sparc64_unknown_openbsd.rs b/compiler/rustc_target/src/spec/sparc64_unknown_openbsd.rs index 301c91e432c..630ce6123f9 100644 --- a/compiler/rustc_target/src/spec/sparc64_unknown_openbsd.rs +++ b/compiler/rustc_target/src/spec/sparc64_unknown_openbsd.rs @@ -2,7 +2,7 @@ use crate::spec::{LinkerFlavor, Target}; pub fn target() -> Target { let mut base = super::openbsd_base::opts(); - base.target_endian = "big".to_string(); + base.endian = "big".to_string(); base.cpu = "v9".to_string(); base.pre_link_args.get_mut(&LinkerFlavor::Gcc).unwrap().push("-m64".to_string()); base.max_atomic_width = Some(64); diff --git a/compiler/rustc_target/src/spec/sparc_unknown_linux_gnu.rs b/compiler/rustc_target/src/spec/sparc_unknown_linux_gnu.rs index 071175819f4..aae186b2293 100644 --- a/compiler/rustc_target/src/spec/sparc_unknown_linux_gnu.rs +++ b/compiler/rustc_target/src/spec/sparc_unknown_linux_gnu.rs @@ -1,8 +1,8 @@ use crate::spec::{LinkerFlavor, Target}; pub fn target() -> Target { - let mut base = super::linux_base::opts(); - base.target_endian = "big".to_string(); + let mut base = super::linux_gnu_base::opts(); + base.endian = "big".to_string(); base.cpu = "v9".to_string(); base.max_atomic_width = Some(64); base.pre_link_args.get_mut(&LinkerFlavor::Gcc).unwrap().push("-mv8plus".to_string()); diff --git a/compiler/rustc_target/src/spec/sparcv9_sun_solaris.rs b/compiler/rustc_target/src/spec/sparcv9_sun_solaris.rs index e8c30dcbf85..5f99e0b14f9 100644 --- a/compiler/rustc_target/src/spec/sparcv9_sun_solaris.rs +++ b/compiler/rustc_target/src/spec/sparcv9_sun_solaris.rs @@ -2,7 +2,7 @@ use crate::spec::{LinkerFlavor, Target}; pub fn target() -> Target { let mut base = super::solaris_base::opts(); - base.target_endian = "big".to_string(); + base.endian = "big".to_string(); base.pre_link_args.insert(LinkerFlavor::Gcc, vec!["-m64".to_string()]); // llvm calls this "v9" base.cpu = "v9".to_string(); diff --git a/compiler/rustc_target/src/spec/tests/tests_impl.rs b/compiler/rustc_target/src/spec/tests/tests_impl.rs index d06ab368e1c..9ec8467e0ac 100644 --- a/compiler/rustc_target/src/spec/tests/tests_impl.rs +++ b/compiler/rustc_target/src/spec/tests/tests_impl.rs @@ -8,33 +8,48 @@ pub(super) fn test_target(target: Target) { impl Target { fn check_consistency(&self) { + assert!(self.is_like_windows || !self.is_like_msvc); // Check that LLD with the given flavor is treated identically to the linker it emulates. // If your target really needs to deviate from the rules below, except it and document the // reasons. assert_eq!( self.linker_flavor == LinkerFlavor::Msvc || self.linker_flavor == LinkerFlavor::Lld(LldFlavor::Link), - self.options.lld_flavor == LldFlavor::Link, + self.lld_flavor == LldFlavor::Link, ); + assert_eq!(self.is_like_msvc, self.lld_flavor == LldFlavor::Link); for args in &[ - &self.options.pre_link_args, - &self.options.late_link_args, - &self.options.late_link_args_dynamic, - &self.options.late_link_args_static, - &self.options.post_link_args, + &self.pre_link_args, + &self.late_link_args, + &self.late_link_args_dynamic, + &self.late_link_args_static, + &self.post_link_args, ] { assert_eq!( args.get(&LinkerFlavor::Msvc), args.get(&LinkerFlavor::Lld(LldFlavor::Link)), ); if args.contains_key(&LinkerFlavor::Msvc) { - assert_eq!(self.options.lld_flavor, LldFlavor::Link); + assert_eq!(self.lld_flavor, LldFlavor::Link); } } assert!( - (self.options.pre_link_objects_fallback.is_empty() - && self.options.post_link_objects_fallback.is_empty()) - || self.options.crt_objects_fallback.is_some() + (self.pre_link_objects_fallback.is_empty() + && self.post_link_objects_fallback.is_empty()) + || self.crt_objects_fallback.is_some() ); + // Keep the default "unknown" vendor instead. + assert_ne!(self.vendor, ""); + if !self.can_use_os_unknown() { + // Keep the default "none" for bare metal targets instead. + assert_ne!(self.os, "unknown"); + } + } + + // Add your target to the whitelist if it has `std` library + // and you certainly want "unknown" for the OS name. + fn can_use_os_unknown(&self) -> bool { + self.llvm_target == "wasm32-unknown-unknown" + || (self.env == "sgx" && self.vendor == "fortanix") } } diff --git a/compiler/rustc_target/src/spec/thumb_base.rs b/compiler/rustc_target/src/spec/thumb_base.rs index cc955799d2f..ec24807fec4 100644 --- a/compiler/rustc_target/src/spec/thumb_base.rs +++ b/compiler/rustc_target/src/spec/thumb_base.rs @@ -32,7 +32,6 @@ use crate::spec::{LinkerFlavor, LldFlavor, PanicStrategy, RelocModel, TargetOpti pub fn opts() -> TargetOptions { // See rust-lang/rfcs#1645 for a discussion about these defaults TargetOptions { - target_vendor: String::new(), linker_flavor: LinkerFlavor::Lld(LldFlavor::Ld), executables: true, // In most cases, LLD is good enough diff --git a/compiler/rustc_target/src/spec/thumbv7neon_unknown_linux_gnueabihf.rs b/compiler/rustc_target/src/spec/thumbv7neon_unknown_linux_gnueabihf.rs index 561da4d15cd..352d2468743 100644 --- a/compiler/rustc_target/src/spec/thumbv7neon_unknown_linux_gnueabihf.rs +++ b/compiler/rustc_target/src/spec/thumbv7neon_unknown_linux_gnueabihf.rs @@ -7,7 +7,7 @@ use crate::spec::{Target, TargetOptions}; // https://static.docs.arm.com/ddi0406/cd/DDI0406C_d_armv7ar_arm.pdf pub fn target() -> Target { - let base = super::linux_base::opts(); + let base = super::linux_gnu_base::opts(); Target { llvm_target: "armv7-unknown-linux-gnueabihf".to_string(), pointer_width: 32, diff --git a/compiler/rustc_target/src/spec/thumbv7neon_unknown_linux_musleabihf.rs b/compiler/rustc_target/src/spec/thumbv7neon_unknown_linux_musleabihf.rs index 5b1fc74bdd0..a788167aede 100644 --- a/compiler/rustc_target/src/spec/thumbv7neon_unknown_linux_musleabihf.rs +++ b/compiler/rustc_target/src/spec/thumbv7neon_unknown_linux_musleabihf.rs @@ -24,7 +24,7 @@ pub fn target() -> Target { cpu: "generic".to_string(), max_atomic_width: Some(64), unsupported_abis: super::arm_base::unsupported_abis(), - target_mcount: "\u{1}mcount".to_string(), + mcount: "\u{1}mcount".to_string(), ..base }, } diff --git a/compiler/rustc_target/src/spec/uefi_msvc_base.rs b/compiler/rustc_target/src/spec/uefi_msvc_base.rs index 91a39f7b9b4..322b6f530e9 100644 --- a/compiler/rustc_target/src/spec/uefi_msvc_base.rs +++ b/compiler/rustc_target/src/spec/uefi_msvc_base.rs @@ -37,7 +37,7 @@ pub fn opts() -> TargetOptions { .extend(pre_link_args_msvc); TargetOptions { - target_os: "uefi".to_string(), + os: "uefi".to_string(), linker_flavor: LinkerFlavor::Lld(LldFlavor::Link), disable_redzone: true, exe_suffix: ".efi".to_string(), @@ -46,15 +46,6 @@ pub fn opts() -> TargetOptions { stack_probes: true, singlethread: true, linker: Some("rust-lld".to_string()), - // FIXME: This should likely be `true` inherited from `msvc_base` - // because UEFI follows Windows ABI and uses PE/COFF. - // The `false` is probably causing ABI bugs right now. - is_like_windows: false, - // FIXME: This should likely be `true` inherited from `msvc_base` - // because UEFI follows Windows ABI and uses PE/COFF. - // The `false` is probably causing ABI bugs right now. - is_like_msvc: false, - ..base } } diff --git a/compiler/rustc_target/src/spec/vxworks_base.rs b/compiler/rustc_target/src/spec/vxworks_base.rs index e8044e4dc1a..70bc9ce3e0e 100644 --- a/compiler/rustc_target/src/spec/vxworks_base.rs +++ b/compiler/rustc_target/src/spec/vxworks_base.rs @@ -17,14 +17,14 @@ pub fn opts() -> TargetOptions { ); TargetOptions { - target_os: "vxworks".to_string(), - target_env: "gnu".to_string(), - target_vendor: "wrs".to_string(), + os: "vxworks".to_string(), + env: "gnu".to_string(), + vendor: "wrs".to_string(), linker: Some("wr-c++".to_string()), exe_suffix: ".vxe".to_string(), dynamic_linking: true, executables: true, - target_family: Some("unix".to_string()), + os_family: Some("unix".to_string()), linker_is_gnu: true, has_rpath: true, pre_link_args: args, @@ -34,7 +34,7 @@ pub fn opts() -> TargetOptions { crt_static_respected: true, crt_static_allows_dylibs: true, // VxWorks needs to implement this to support profiling - target_mcount: "_mcount".to_string(), + mcount: "_mcount".to_string(), ..Default::default() } } diff --git a/compiler/rustc_target/src/spec/wasm32_unknown_emscripten.rs b/compiler/rustc_target/src/spec/wasm32_unknown_emscripten.rs index dbafe362f2a..c12757b8f98 100644 --- a/compiler/rustc_target/src/spec/wasm32_unknown_emscripten.rs +++ b/compiler/rustc_target/src/spec/wasm32_unknown_emscripten.rs @@ -17,7 +17,7 @@ pub fn target() -> Target { ); let opts = TargetOptions { - target_os: "emscripten".to_string(), + os: "emscripten".to_string(), linker_flavor: LinkerFlavor::Em, // emcc emits two files - a .js file to instantiate the wasm and supply platform // functionality, and a .wasm file. @@ -27,7 +27,7 @@ pub fn target() -> Target { is_like_emscripten: true, panic_strategy: PanicStrategy::Unwind, post_link_args, - target_family: Some("unix".to_string()), + os_family: Some("unix".to_string()), ..wasm32_base::options() }; Target { diff --git a/compiler/rustc_target/src/spec/wasm32_unknown_unknown.rs b/compiler/rustc_target/src/spec/wasm32_unknown_unknown.rs index 4401772788b..6037aa5b430 100644 --- a/compiler/rustc_target/src/spec/wasm32_unknown_unknown.rs +++ b/compiler/rustc_target/src/spec/wasm32_unknown_unknown.rs @@ -15,7 +15,7 @@ use super::{LinkerFlavor, LldFlavor, Target}; pub fn target() -> Target { let mut options = wasm32_base::options(); - options.target_os = "unknown".to_string(); + options.os = "unknown".to_string(); options.linker_flavor = LinkerFlavor::Lld(LldFlavor::Wasm); let clang_args = options.pre_link_args.get_mut(&LinkerFlavor::Gcc).unwrap(); diff --git a/compiler/rustc_target/src/spec/wasm32_wasi.rs b/compiler/rustc_target/src/spec/wasm32_wasi.rs index 6f5316e30f6..3f44acdc36b 100644 --- a/compiler/rustc_target/src/spec/wasm32_wasi.rs +++ b/compiler/rustc_target/src/spec/wasm32_wasi.rs @@ -78,8 +78,7 @@ use super::{crt_objects, LinkerFlavor, LldFlavor, Target}; pub fn target() -> Target { let mut options = wasm32_base::options(); - options.target_os = "wasi".to_string(); - options.target_vendor = String::new(); + options.os = "wasi".to_string(); options.linker_flavor = LinkerFlavor::Lld(LldFlavor::Wasm); options .pre_link_args diff --git a/compiler/rustc_target/src/spec/windows_gnu_base.rs b/compiler/rustc_target/src/spec/windows_gnu_base.rs index 37188a59eb5..f556a13a519 100644 --- a/compiler/rustc_target/src/spec/windows_gnu_base.rs +++ b/compiler/rustc_target/src/spec/windows_gnu_base.rs @@ -62,9 +62,9 @@ pub fn opts() -> TargetOptions { late_link_args_static.insert(LinkerFlavor::Lld(LldFlavor::Ld), static_unwind_libs); TargetOptions { - target_os: "windows".to_string(), - target_env: "gnu".to_string(), - target_vendor: "pc".to_string(), + os: "windows".to_string(), + env: "gnu".to_string(), + vendor: "pc".to_string(), // FIXME(#13846) this should be enabled for windows function_sections: false, linker: Some("gcc".to_string()), @@ -75,7 +75,7 @@ pub fn opts() -> TargetOptions { exe_suffix: ".exe".to_string(), staticlib_prefix: "lib".to_string(), staticlib_suffix: ".a".to_string(), - target_family: Some("windows".to_string()), + os_family: Some("windows".to_string()), is_like_windows: true, allows_weak_linkage: false, pre_link_args, diff --git a/compiler/rustc_target/src/spec/windows_msvc_base.rs b/compiler/rustc_target/src/spec/windows_msvc_base.rs index c1101623867..c041245e328 100644 --- a/compiler/rustc_target/src/spec/windows_msvc_base.rs +++ b/compiler/rustc_target/src/spec/windows_msvc_base.rs @@ -4,16 +4,16 @@ pub fn opts() -> TargetOptions { let base = super::msvc_base::opts(); TargetOptions { - target_os: "windows".to_string(), - target_env: "msvc".to_string(), - target_vendor: "pc".to_string(), + os: "windows".to_string(), + env: "msvc".to_string(), + vendor: "pc".to_string(), dynamic_linking: true, dll_prefix: String::new(), dll_suffix: ".dll".to_string(), exe_suffix: ".exe".to_string(), staticlib_prefix: String::new(), staticlib_suffix: ".lib".to_string(), - target_family: Some("windows".to_string()), + os_family: Some("windows".to_string()), crt_static_allows_dylibs: true, crt_static_respected: true, requires_uwtable: true, diff --git a/compiler/rustc_target/src/spec/windows_uwp_gnu_base.rs b/compiler/rustc_target/src/spec/windows_uwp_gnu_base.rs index 225b94c3755..67d1be399b3 100644 --- a/compiler/rustc_target/src/spec/windows_uwp_gnu_base.rs +++ b/compiler/rustc_target/src/spec/windows_uwp_gnu_base.rs @@ -25,7 +25,7 @@ pub fn opts() -> TargetOptions { late_link_args.insert(LinkerFlavor::Lld(LldFlavor::Ld), mingw_libs); TargetOptions { - target_vendor: "uwp".to_string(), + vendor: "uwp".to_string(), executables: false, limit_rdylib_exports: false, late_link_args, diff --git a/compiler/rustc_target/src/spec/windows_uwp_msvc_base.rs b/compiler/rustc_target/src/spec/windows_uwp_msvc_base.rs index 380d685dacf..700ee5ec646 100644 --- a/compiler/rustc_target/src/spec/windows_uwp_msvc_base.rs +++ b/compiler/rustc_target/src/spec/windows_uwp_msvc_base.rs @@ -3,7 +3,7 @@ use crate::spec::{LinkerFlavor, LldFlavor, TargetOptions}; pub fn opts() -> TargetOptions { let mut opts = super::windows_msvc_base::opts(); - opts.target_vendor = "uwp".to_string(); + opts.vendor = "uwp".to_string(); let pre_link_args_msvc = vec!["/APPCONTAINER".to_string(), "mincore.lib".to_string()]; opts.pre_link_args.get_mut(&LinkerFlavor::Msvc).unwrap().extend(pre_link_args_msvc.clone()); opts.pre_link_args diff --git a/compiler/rustc_target/src/spec/x86_64_apple_darwin.rs b/compiler/rustc_target/src/spec/x86_64_apple_darwin.rs index 6cd4daa7a74..edb33fe6e2b 100644 --- a/compiler/rustc_target/src/spec/x86_64_apple_darwin.rs +++ b/compiler/rustc_target/src/spec/x86_64_apple_darwin.rs @@ -24,6 +24,6 @@ pub fn target() -> Target { data_layout: "e-m:o-p270:32:32-p271:32:32-p272:64:64-i64:64-f80:128-n8:16:32:64-S128" .to_string(), arch: arch.to_string(), - options: TargetOptions { target_mcount: "\u{1}mcount".to_string(), ..base }, + options: TargetOptions { mcount: "\u{1}mcount".to_string(), ..base }, } } diff --git a/compiler/rustc_target/src/spec/x86_64_fortanix_unknown_sgx.rs b/compiler/rustc_target/src/spec/x86_64_fortanix_unknown_sgx.rs index 550d308ed8f..74fb6f0a834 100644 --- a/compiler/rustc_target/src/spec/x86_64_fortanix_unknown_sgx.rs +++ b/compiler/rustc_target/src/spec/x86_64_fortanix_unknown_sgx.rs @@ -55,9 +55,9 @@ pub fn target() -> Target { "TEXT_SIZE", ]; let opts = TargetOptions { - target_os: "unknown".into(), - target_env: "sgx".into(), - target_vendor: "fortanix".into(), + os: "unknown".into(), + env: "sgx".into(), + vendor: "fortanix".into(), linker_flavor: LinkerFlavor::Lld(LldFlavor::Ld), dynamic_linking: false, executables: true, diff --git a/compiler/rustc_target/src/spec/x86_64_rumprun_netbsd.rs b/compiler/rustc_target/src/spec/x86_64_rumprun_netbsd.rs index 511a4559935..095c6f15c77 100644 --- a/compiler/rustc_target/src/spec/x86_64_rumprun_netbsd.rs +++ b/compiler/rustc_target/src/spec/x86_64_rumprun_netbsd.rs @@ -2,7 +2,7 @@ use crate::spec::{LinkerFlavor, Target, TargetOptions}; pub fn target() -> Target { let mut base = super::netbsd_base::opts(); - base.target_vendor = "rumprun".to_string(); + base.vendor = "rumprun".to_string(); base.cpu = "x86-64".to_string(); base.pre_link_args.get_mut(&LinkerFlavor::Gcc).unwrap().push("-m64".to_string()); base.linker = Some("x86_64-rumprun-netbsd-gcc".to_string()); @@ -20,6 +20,6 @@ pub fn target() -> Target { data_layout: "e-m:e-p270:32:32-p271:32:32-p272:64:64-i64:64-f80:128-n8:16:32:64-S128" .to_string(), arch: "x86_64".to_string(), - options: TargetOptions { target_mcount: "__mcount".to_string(), ..base }, + options: TargetOptions { mcount: "__mcount".to_string(), ..base }, } } diff --git a/compiler/rustc_target/src/spec/x86_64_unknown_linux_gnu.rs b/compiler/rustc_target/src/spec/x86_64_unknown_linux_gnu.rs index 1f368ff1611..f127dd49bc4 100644 --- a/compiler/rustc_target/src/spec/x86_64_unknown_linux_gnu.rs +++ b/compiler/rustc_target/src/spec/x86_64_unknown_linux_gnu.rs @@ -1,7 +1,7 @@ use crate::spec::{LinkerFlavor, Target}; pub fn target() -> Target { - let mut base = super::linux_base::opts(); + let mut base = super::linux_gnu_base::opts(); base.cpu = "x86-64".to_string(); base.max_atomic_width = Some(64); base.pre_link_args.get_mut(&LinkerFlavor::Gcc).unwrap().push("-m64".to_string()); diff --git a/compiler/rustc_target/src/spec/x86_64_unknown_linux_gnux32.rs b/compiler/rustc_target/src/spec/x86_64_unknown_linux_gnux32.rs index 375b22fd92b..0cae5752848 100644 --- a/compiler/rustc_target/src/spec/x86_64_unknown_linux_gnux32.rs +++ b/compiler/rustc_target/src/spec/x86_64_unknown_linux_gnux32.rs @@ -1,7 +1,7 @@ use crate::spec::{LinkerFlavor, Target}; pub fn target() -> Target { - let mut base = super::linux_base::opts(); + let mut base = super::linux_gnu_base::opts(); base.cpu = "x86-64".to_string(); base.max_atomic_width = Some(64); base.pre_link_args.get_mut(&LinkerFlavor::Gcc).unwrap().push("-mx32".to_string()); diff --git a/compiler/rustc_target/src/spec/x86_64_unknown_netbsd.rs b/compiler/rustc_target/src/spec/x86_64_unknown_netbsd.rs index 656ef90892c..7e91a6ddbe2 100644 --- a/compiler/rustc_target/src/spec/x86_64_unknown_netbsd.rs +++ b/compiler/rustc_target/src/spec/x86_64_unknown_netbsd.rs @@ -13,6 +13,6 @@ pub fn target() -> Target { data_layout: "e-m:e-p270:32:32-p271:32:32-p272:64:64-i64:64-f80:128-n8:16:32:64-S128" .to_string(), arch: "x86_64".to_string(), - options: TargetOptions { target_mcount: "__mcount".to_string(), ..base }, + options: TargetOptions { mcount: "__mcount".to_string(), ..base }, } } diff --git a/compiler/rustc_trait_selection/src/autoderef.rs b/compiler/rustc_trait_selection/src/autoderef.rs index b9c5123e49a..05b6c4a48de 100644 --- a/compiler/rustc_trait_selection/src/autoderef.rs +++ b/compiler/rustc_trait_selection/src/autoderef.rs @@ -109,7 +109,7 @@ impl<'a, 'tcx> Autoderef<'a, 'tcx> { param_env, state: AutoderefSnapshot { steps: vec![], - cur_ty: infcx.resolve_vars_if_possible(&base_ty), + cur_ty: infcx.resolve_vars_if_possible(base_ty), obligations: vec![], at_start: true, reached_recursion_limit: false, @@ -164,14 +164,14 @@ impl<'a, 'tcx> Autoderef<'a, 'tcx> { debug!("overloaded_deref_ty({:?}) = ({:?}, {:?})", ty, normalized_ty, obligations); self.state.obligations.extend(obligations); - Some(self.infcx.resolve_vars_if_possible(&normalized_ty)) + Some(self.infcx.resolve_vars_if_possible(normalized_ty)) } /// Returns the final type we ended up with, which may be an inference /// variable (we will resolve it first, if we want). pub fn final_ty(&self, resolve: bool) -> Ty<'tcx> { if resolve { - self.infcx.resolve_vars_if_possible(&self.state.cur_ty) + self.infcx.resolve_vars_if_possible(self.state.cur_ty) } else { self.state.cur_ty } diff --git a/compiler/rustc_trait_selection/src/infer.rs b/compiler/rustc_trait_selection/src/infer.rs index 4ec1b29bca4..41184ce2116 100644 --- a/compiler/rustc_trait_selection/src/infer.rs +++ b/compiler/rustc_trait_selection/src/infer.rs @@ -28,7 +28,7 @@ pub trait InferCtxtExt<'tcx> { span: Span, body_id: hir::HirId, param_env: ty::ParamEnv<'tcx>, - value: &T, + value: T, ) -> InferOk<'tcx, T> where T: TypeFoldable<'tcx>; @@ -41,7 +41,7 @@ impl<'cx, 'tcx> InferCtxtExt<'tcx> for InferCtxt<'cx, 'tcx> { ty: Ty<'tcx>, span: Span, ) -> bool { - let ty = self.resolve_vars_if_possible(&ty); + let ty = self.resolve_vars_if_possible(ty); if !(param_env, ty).needs_infer() { return ty.is_copy_modulo_regions(self.tcx.at(span), param_env); @@ -63,7 +63,7 @@ impl<'cx, 'tcx> InferCtxtExt<'tcx> for InferCtxt<'cx, 'tcx> { span: Span, body_id: hir::HirId, param_env: ty::ParamEnv<'tcx>, - value: &T, + value: T, ) -> InferOk<'tcx, T> where T: TypeFoldable<'tcx>, @@ -173,7 +173,7 @@ impl<'tcx> OutlivesEnvironmentExt<'tcx> for OutlivesEnvironment<'tcx> { debug!("add_implied_bounds()"); for &ty in fn_sig_tys { - let ty = infcx.resolve_vars_if_possible(&ty); + let ty = infcx.resolve_vars_if_possible(ty); debug!("add_implied_bounds: ty = {}", ty); let implied_bounds = infcx.implied_outlives_bounds(self.param_env, body_id, ty, span); self.add_outlives_bounds(Some(infcx), implied_bounds) diff --git a/compiler/rustc_trait_selection/src/opaque_types.rs b/compiler/rustc_trait_selection/src/opaque_types.rs index 914fa1e52c2..ca547bf88b5 100644 --- a/compiler/rustc_trait_selection/src/opaque_types.rs +++ b/compiler/rustc_trait_selection/src/opaque_types.rs @@ -12,7 +12,6 @@ use rustc_infer::infer::{self, InferCtxt, InferOk}; use rustc_middle::ty::fold::{BottomUpFolder, TypeFoldable, TypeFolder, TypeVisitor}; use rustc_middle::ty::subst::{GenericArg, GenericArgKind, InternalSubsts, Subst, SubstsRef}; use rustc_middle::ty::{self, Ty, TyCtxt}; -use rustc_session::config::nightly_options; use rustc_span::Span; use std::ops::ControlFlow; @@ -113,7 +112,7 @@ pub trait InferCtxtExt<'tcx> { parent_def_id: LocalDefId, body_id: hir::HirId, param_env: ty::ParamEnv<'tcx>, - value: &T, + value: T, value_span: Span, ) -> InferOk<'tcx, (T, OpaqueTypeMap<'tcx>)>; @@ -189,7 +188,7 @@ impl<'a, 'tcx> InferCtxtExt<'tcx> for InferCtxt<'a, 'tcx> { parent_def_id: LocalDefId, body_id: hir::HirId, param_env: ty::ParamEnv<'tcx>, - value: &T, + value: T, value_span: Span, ) -> InferOk<'tcx, (T, OpaqueTypeMap<'tcx>)> { debug!( @@ -403,7 +402,7 @@ impl<'a, 'tcx> InferCtxtExt<'tcx> for InferCtxt<'a, 'tcx> { let tcx = self.tcx; - let concrete_ty = self.resolve_vars_if_possible(&opaque_defn.concrete_ty); + let concrete_ty = self.resolve_vars_if_possible(opaque_defn.concrete_ty); debug!("constrain_opaque_type: concrete_ty={:?}", concrete_ty); @@ -602,7 +601,7 @@ impl<'a, 'tcx> InferCtxtExt<'tcx> for InferCtxt<'a, 'tcx> { }; err.span_label(span, label); - if nightly_options::is_nightly_build() { + if self.tcx.sess.is_nightly_build() { err.help("add #![feature(member_constraints)] to the crate attributes to enable"); } @@ -693,12 +692,15 @@ impl<'tcx, OP> TypeVisitor<'tcx> for ConstrainOpaqueTypeRegionVisitor where OP: FnMut(ty::Region<'tcx>), { - fn visit_binder>(&mut self, t: &ty::Binder) -> ControlFlow<()> { + fn visit_binder>( + &mut self, + t: &ty::Binder, + ) -> ControlFlow { t.as_ref().skip_binder().visit_with(self); ControlFlow::CONTINUE } - fn visit_region(&mut self, r: ty::Region<'tcx>) -> ControlFlow<()> { + fn visit_region(&mut self, r: ty::Region<'tcx>) -> ControlFlow { match *r { // ignore bound regions, keep visiting ty::ReLateBound(_, _) => ControlFlow::CONTINUE, @@ -709,7 +711,7 @@ where } } - fn visit_ty(&mut self, ty: Ty<'tcx>) -> ControlFlow<()> { + fn visit_ty(&mut self, ty: Ty<'tcx>) -> ControlFlow { // We're only interested in types involving regions if !ty.flags().intersects(ty::TypeFlags::HAS_FREE_REGIONS) { return ControlFlow::CONTINUE; @@ -1002,7 +1004,7 @@ struct Instantiator<'a, 'tcx> { } impl<'a, 'tcx> Instantiator<'a, 'tcx> { - fn instantiate_opaque_types_in_map>(&mut self, value: &T) -> T { + fn instantiate_opaque_types_in_map>(&mut self, value: T) -> T { debug!("instantiate_opaque_types_in_map(value={:?})", value); let tcx = self.infcx.tcx; value.fold_with(&mut BottomUpFolder { @@ -1126,7 +1128,7 @@ impl<'a, 'tcx> Instantiator<'a, 'tcx> { let param_env = tcx.param_env(def_id); let InferOk { value: bounds, obligations } = - infcx.partially_normalize_associated_types_in(span, self.body_id, param_env, &bounds); + infcx.partially_normalize_associated_types_in(span, self.body_id, param_env, bounds); self.obligations.extend(obligations); debug!("instantiate_opaque_types: bounds={:?}", bounds); @@ -1174,7 +1176,7 @@ impl<'a, 'tcx> Instantiator<'a, 'tcx> { // Change the predicate to refer to the type variable, // which will be the concrete type instead of the opaque type. // This also instantiates nested instances of `impl Trait`. - let predicate = self.instantiate_opaque_types_in_map(&predicate); + let predicate = self.instantiate_opaque_types_in_map(predicate); let cause = traits::ObligationCause::new(span, self.body_id, traits::MiscObligation); diff --git a/compiler/rustc_trait_selection/src/traits/auto_trait.rs b/compiler/rustc_trait_selection/src/traits/auto_trait.rs index 93a0073588e..6ab16886ed2 100644 --- a/compiler/rustc_trait_selection/src/traits/auto_trait.rs +++ b/compiler/rustc_trait_selection/src/traits/auto_trait.rs @@ -304,11 +304,8 @@ impl AutoTraitFinder<'tcx> { // Call `infcx.resolve_vars_if_possible` to see if we can // get rid of any inference variables. - let obligation = infcx.resolve_vars_if_possible(&Obligation::new( - dummy_cause.clone(), - new_env, - pred, - )); + let obligation = + infcx.resolve_vars_if_possible(Obligation::new(dummy_cause.clone(), new_env, pred)); let result = select.select(&obligation); match &result { @@ -627,7 +624,7 @@ impl AutoTraitFinder<'tcx> { fresh_preds.insert(self.clean_pred(select.infcx(), obligation.predicate)); // Resolve any inference variables that we can, to help selection succeed - let predicate = select.infcx().resolve_vars_if_possible(&obligation.predicate); + let predicate = select.infcx().resolve_vars_if_possible(obligation.predicate); // We only add a predicate as a user-displayable bound if // it involves a generic parameter, and doesn't contain diff --git a/compiler/rustc_trait_selection/src/traits/chalk_fulfill.rs b/compiler/rustc_trait_selection/src/traits/chalk_fulfill.rs index adc8ae59086..026ab414443 100644 --- a/compiler/rustc_trait_selection/src/traits/chalk_fulfill.rs +++ b/compiler/rustc_trait_selection/src/traits/chalk_fulfill.rs @@ -37,7 +37,7 @@ impl TraitEngine<'tcx> for FulfillmentContext<'tcx> { obligation: PredicateObligation<'tcx>, ) { assert!(!infcx.is_in_snapshot()); - let obligation = infcx.resolve_vars_if_possible(&obligation); + let obligation = infcx.resolve_vars_if_possible(obligation); self.obligations.insert(obligation); } @@ -80,11 +80,11 @@ impl TraitEngine<'tcx> for FulfillmentContext<'tcx> { // We iterate over all obligations, and record if we are able // to unambiguously prove at least one obligation. for obligation in self.obligations.drain(..) { - let obligation = infcx.resolve_vars_if_possible(&obligation); + let obligation = infcx.resolve_vars_if_possible(obligation); let environment = obligation.param_env.caller_bounds(); let goal = ChalkEnvironmentAndGoal { environment, goal: obligation.predicate }; let mut orig_values = OriginalQueryValues::default(); - let canonical_goal = infcx.canonicalize_query(&goal, &mut orig_values); + let canonical_goal = infcx.canonicalize_query(goal, &mut orig_values); match infcx.tcx.evaluate_goal(canonical_goal) { Ok(response) => { @@ -100,7 +100,7 @@ impl TraitEngine<'tcx> for FulfillmentContext<'tcx> { Ok(infer_ok) => next_round.extend( infer_ok.obligations.into_iter().map(|obligation| { assert!(!infcx.is_in_snapshot()); - infcx.resolve_vars_if_possible(&obligation) + infcx.resolve_vars_if_possible(obligation) }), ), diff --git a/compiler/rustc_trait_selection/src/traits/codegen.rs b/compiler/rustc_trait_selection/src/traits/codegen.rs index 3cb6ec86261..657d5c123e8 100644 --- a/compiler/rustc_trait_selection/src/traits/codegen.rs +++ b/compiler/rustc_trait_selection/src/traits/codegen.rs @@ -25,7 +25,7 @@ pub fn codegen_fulfill_obligation<'tcx>( (param_env, trait_ref): (ty::ParamEnv<'tcx>, ty::PolyTraitRef<'tcx>), ) -> Result, ErrorReported> { // Remove any references to regions; this helps improve caching. - let trait_ref = tcx.erase_regions(&trait_ref); + let trait_ref = tcx.erase_regions(trait_ref); // We expect the input to be fully normalized. debug_assert_eq!(trait_ref, tcx.normalize_erasing_regions(param_env, trait_ref)); debug!( @@ -89,7 +89,7 @@ pub fn codegen_fulfill_obligation<'tcx>( debug!("fulfill_obligation: register_predicate_obligation {:?}", predicate); fulfill_cx.register_predicate_obligation(&infcx, predicate); }); - let impl_source = drain_fulfillment_cx_or_panic(&infcx, &mut fulfill_cx, &impl_source); + let impl_source = drain_fulfillment_cx_or_panic(&infcx, &mut fulfill_cx, impl_source); info!("Cache miss: {:?} => {:?}", trait_ref, impl_source); Ok(impl_source) @@ -110,7 +110,7 @@ pub fn codegen_fulfill_obligation<'tcx>( fn drain_fulfillment_cx_or_panic( infcx: &InferCtxt<'_, 'tcx>, fulfill_cx: &mut FulfillmentContext<'tcx>, - result: &T, + result: T, ) -> T where T: TypeFoldable<'tcx>, @@ -128,5 +128,5 @@ where } let result = infcx.resolve_vars_if_possible(result); - infcx.tcx.erase_regions(&result) + infcx.tcx.erase_regions(result) } diff --git a/compiler/rustc_trait_selection/src/traits/coherence.rs b/compiler/rustc_trait_selection/src/traits/coherence.rs index c53c65c00b7..9324d55ac1b 100644 --- a/compiler/rustc_trait_selection/src/traits/coherence.rs +++ b/compiler/rustc_trait_selection/src/traits/coherence.rs @@ -103,7 +103,7 @@ fn with_fresh_ty_vars<'cx, 'tcx>( }; let Normalized { value: mut header, obligations } = - traits::normalize(selcx, param_env, ObligationCause::dummy(), &header); + traits::normalize(selcx, param_env, ObligationCause::dummy(), header); header.predicates.extend(obligations.into_iter().map(|o| o.predicate)); header @@ -162,7 +162,8 @@ fn overlap_within_probe( let opt_failing_obligation = a_impl_header .predicates .iter() - .chain(&b_impl_header.predicates) + .copied() + .chain(b_impl_header.predicates) .map(|p| infcx.resolve_vars_if_possible(p)) .map(|p| Obligation { cause: ObligationCause::dummy(), @@ -188,7 +189,7 @@ fn overlap_within_probe( } } - let impl_header = selcx.infcx().resolve_vars_if_possible(&a_impl_header); + let impl_header = selcx.infcx().resolve_vars_if_possible(a_impl_header); let intercrate_ambiguity_causes = selcx.take_intercrate_ambiguity_causes(); debug!("overlap: intercrate_ambiguity_causes={:#?}", intercrate_ambiguity_causes); diff --git a/compiler/rustc_trait_selection/src/traits/const_evaluatable.rs b/compiler/rustc_trait_selection/src/traits/const_evaluatable.rs index 638a8253e7e..fdb2361ba03 100644 --- a/compiler/rustc_trait_selection/src/traits/const_evaluatable.rs +++ b/compiler/rustc_trait_selection/src/traits/const_evaluatable.rs @@ -78,7 +78,7 @@ pub fn is_const_evaluatable<'cx, 'tcx>( Concrete, } let mut failure_kind = FailureKind::Concrete; - walk_abstract_const(tcx, ct, |node| match node { + walk_abstract_const::(tcx, ct, |node| match node { Node::Leaf(leaf) => { let leaf = leaf.subst(tcx, ct.substs); if leaf.has_infer_types_or_consts() { @@ -512,6 +512,13 @@ impl<'a, 'tcx> AbstractConstBuilder<'a, 'tcx> { block = &self.body.basic_blocks()[next]; } else { assert_eq!(self.locals[mir::RETURN_PLACE], self.nodes.last().unwrap()); + // `AbstractConst`s should not contain any promoteds as they require references which + // are not allowed. + assert!(!self.nodes.iter().any(|n| matches!( + n.node, + Node::Leaf(ty::Const { val: ty::ConstKind::Unevaluated(_, _, Some(_)), ty: _ }) + ))); + self.nodes[self.locals[mir::RETURN_PLACE]].used = true; if let Some(&unused) = self.nodes.iter().find(|n| !n.used) { self.error(Some(unused.span), "dead code")?; @@ -567,19 +574,19 @@ pub(super) fn try_unify_abstract_consts<'tcx>( // on `ErrorReported`. } -pub fn walk_abstract_const<'tcx, F>( +pub fn walk_abstract_const<'tcx, R, F>( tcx: TyCtxt<'tcx>, ct: AbstractConst<'tcx>, mut f: F, -) -> ControlFlow<()> +) -> ControlFlow where - F: FnMut(Node<'tcx>) -> ControlFlow<()>, + F: FnMut(Node<'tcx>) -> ControlFlow, { - fn recurse<'tcx>( + fn recurse<'tcx, R>( tcx: TyCtxt<'tcx>, ct: AbstractConst<'tcx>, - f: &mut dyn FnMut(Node<'tcx>) -> ControlFlow<()>, - ) -> ControlFlow<()> { + f: &mut dyn FnMut(Node<'tcx>) -> ControlFlow, + ) -> ControlFlow { let root = ct.root(); f(root)?; match root { @@ -609,6 +616,10 @@ pub(super) fn try_unify<'tcx>( (Node::Leaf(a_ct), Node::Leaf(b_ct)) => { let a_ct = a_ct.subst(tcx, a.substs); let b_ct = b_ct.subst(tcx, b.substs); + if a_ct.ty != b_ct.ty { + return false; + } + match (a_ct.val, b_ct.val) { // We can just unify errors with everything to reduce the amount of // emitted errors here. @@ -621,6 +632,12 @@ pub(super) fn try_unify<'tcx>( // we do not want to use `assert_eq!(a(), b())` to infer that `N` and `M` have to be `1`. This // means that we only allow inference variables if they are equal. (ty::ConstKind::Infer(a_val), ty::ConstKind::Infer(b_val)) => a_val == b_val, + // We may want to instead recurse into unevaluated constants here. That may require some + // care to prevent infinite recursion, so let's just ignore this for now. + ( + ty::ConstKind::Unevaluated(a_def, a_substs, None), + ty::ConstKind::Unevaluated(b_def, b_substs, None), + ) => a_def == b_def && a_substs == b_substs, // FIXME(const_evaluatable_checked): We may want to either actually try // to evaluate `a_ct` and `b_ct` if they are are fully concrete or something like // this, for now we just return false here. diff --git a/compiler/rustc_trait_selection/src/traits/error_reporting/mod.rs b/compiler/rustc_trait_selection/src/traits/error_reporting/mod.rs index 2d57c39f7c7..fe4127fd4d8 100644 --- a/compiler/rustc_trait_selection/src/traits/error_reporting/mod.rs +++ b/compiler/rustc_trait_selection/src/traits/error_reporting/mod.rs @@ -182,7 +182,7 @@ impl<'a, 'tcx> InferCtxtExt<'tcx> for InferCtxt<'a, 'tcx> { where T: fmt::Display + TypeFoldable<'tcx>, { - let predicate = self.resolve_vars_if_possible(&obligation.predicate); + let predicate = self.resolve_vars_if_possible(obligation.predicate.clone()); let mut err = struct_span_err!( self.tcx.sess, obligation.cause.span, @@ -200,6 +200,7 @@ impl<'a, 'tcx> InferCtxtExt<'tcx> for InferCtxt<'a, 'tcx> { &obligation.predicate, &obligation.cause.code, &mut vec![], + &mut Default::default(), ); err.emit(); @@ -213,7 +214,7 @@ impl<'a, 'tcx> InferCtxtExt<'tcx> for InferCtxt<'a, 'tcx> { /// we do not suggest increasing the overflow limit, which is not /// going to help). fn report_overflow_error_cycle(&self, cycle: &[PredicateObligation<'tcx>]) -> ! { - let cycle = self.resolve_vars_if_possible(&cycle.to_owned()); + let cycle = self.resolve_vars_if_possible(cycle.to_owned()); assert!(!cycle.is_empty()); debug!("report_overflow_error_cycle: cycle={:?}", cycle); @@ -259,7 +260,7 @@ impl<'a, 'tcx> InferCtxtExt<'tcx> for InferCtxt<'a, 'tcx> { match bound_predicate.skip_binder() { ty::PredicateAtom::Trait(trait_predicate, _) => { let trait_predicate = bound_predicate.rebind(trait_predicate); - let trait_predicate = self.resolve_vars_if_possible(&trait_predicate); + let trait_predicate = self.resolve_vars_if_possible(trait_predicate); if self.tcx.sess.has_errors() && trait_predicate.references_error() { return; @@ -414,17 +415,17 @@ impl<'a, 'tcx> InferCtxtExt<'tcx> for InferCtxt<'a, 'tcx> { err.span_label(enclosing_scope_span, s.as_str()); } - self.suggest_dereferences(&obligation, &mut err, &trait_ref, points_at_arg); - self.suggest_fn_call(&obligation, &mut err, &trait_ref, points_at_arg); - self.suggest_remove_reference(&obligation, &mut err, &trait_ref); - self.suggest_semicolon_removal(&obligation, &mut err, span, &trait_ref); + self.suggest_dereferences(&obligation, &mut err, trait_ref, points_at_arg); + self.suggest_fn_call(&obligation, &mut err, trait_ref, points_at_arg); + self.suggest_remove_reference(&obligation, &mut err, trait_ref); + self.suggest_semicolon_removal(&obligation, &mut err, span, trait_ref); self.note_version_mismatch(&mut err, &trait_ref); if Some(trait_ref.def_id()) == tcx.lang_items().try_trait() { - self.suggest_await_before_try(&mut err, &obligation, &trait_ref, span); + self.suggest_await_before_try(&mut err, &obligation, trait_ref, span); } - if self.suggest_impl_trait(&mut err, span, &obligation, &trait_ref) { + if self.suggest_impl_trait(&mut err, span, &obligation, trait_ref) { err.emit(); return; } @@ -487,7 +488,7 @@ impl<'a, 'tcx> InferCtxtExt<'tcx> for InferCtxt<'a, 'tcx> { self.suggest_change_mut( &obligation, &mut err, - &trait_ref, + trait_ref, points_at_arg, ); } @@ -533,7 +534,7 @@ impl<'a, 'tcx> InferCtxtExt<'tcx> for InferCtxt<'a, 'tcx> { ty::PredicateAtom::RegionOutlives(predicate) => { let predicate = bound_predicate.rebind(predicate); - let predicate = self.resolve_vars_if_possible(&predicate); + let predicate = self.resolve_vars_if_possible(predicate); let err = self .region_outlives_predicate(&obligation.cause, predicate) .err() @@ -549,7 +550,7 @@ impl<'a, 'tcx> InferCtxtExt<'tcx> for InferCtxt<'a, 'tcx> { } ty::PredicateAtom::Projection(..) | ty::PredicateAtom::TypeOutlives(..) => { - let predicate = self.resolve_vars_if_possible(&obligation.predicate); + let predicate = self.resolve_vars_if_possible(obligation.predicate); struct_span_err!( self.tcx.sess, span, @@ -671,9 +672,9 @@ impl<'a, 'tcx> InferCtxtExt<'tcx> for InferCtxt<'a, 'tcx> { } } - OutputTypeParameterMismatch(ref found_trait_ref, ref expected_trait_ref, _) => { - let found_trait_ref = self.resolve_vars_if_possible(&*found_trait_ref); - let expected_trait_ref = self.resolve_vars_if_possible(&*expected_trait_ref); + OutputTypeParameterMismatch(found_trait_ref, expected_trait_ref, _) => { + let found_trait_ref = self.resolve_vars_if_possible(found_trait_ref); + let expected_trait_ref = self.resolve_vars_if_possible(expected_trait_ref); if expected_trait_ref.self_ty().references_error() { return; @@ -1035,7 +1036,7 @@ trait InferCtxtPrivExt<'tcx> { fn mk_trait_obligation_with_new_self_ty( &self, param_env: ty::ParamEnv<'tcx>, - trait_ref: &ty::PolyTraitRef<'tcx>, + trait_ref: ty::PolyTraitRef<'tcx>, new_self_ty: Ty<'tcx>, ) -> PredicateObligation<'tcx>; @@ -1157,7 +1158,7 @@ impl<'a, 'tcx> InferCtxtPrivExt<'tcx> for InferCtxt<'a, 'tcx> { obligation: &PredicateObligation<'tcx>, error: &MismatchedProjectionTypes<'tcx>, ) { - let predicate = self.resolve_vars_if_possible(&obligation.predicate); + let predicate = self.resolve_vars_if_possible(obligation.predicate); if predicate.references_error() { return; @@ -1178,7 +1179,7 @@ impl<'a, 'tcx> InferCtxtPrivExt<'tcx> for InferCtxt<'a, 'tcx> { let (data, _) = self.replace_bound_vars_with_fresh_vars( obligation.cause.span, infer::LateBoundRegionConversionTime::HigherRankedType, - &bound_predicate.rebind(data), + bound_predicate.rebind(data), ); let mut obligations = vec![]; let normalized_ty = super::normalize_projection_type( @@ -1343,7 +1344,7 @@ impl<'a, 'tcx> InferCtxtPrivExt<'tcx> for InferCtxt<'a, 'tcx> { // Sort impl candidates so that ordering is consistent for UI tests. let mut normalized_impl_candidates = - impl_candidates.iter().map(normalize).collect::>(); + impl_candidates.iter().copied().map(normalize).collect::>(); // Sort before taking the `..end` range, // because the ordering of `impl_candidates` may not be deterministic: @@ -1364,7 +1365,7 @@ impl<'a, 'tcx> InferCtxtPrivExt<'tcx> for InferCtxt<'a, 'tcx> { ) -> Option<(String, Option)> { match code { &ObligationCauseCode::BuiltinDerivedObligation(ref data) => { - let parent_trait_ref = self.resolve_vars_if_possible(&data.parent_trait_ref); + let parent_trait_ref = self.resolve_vars_if_possible(data.parent_trait_ref); match self.get_parent_trait_ref(&data.parent_code) { Some(t) => Some(t), None => { @@ -1414,7 +1415,7 @@ impl<'a, 'tcx> InferCtxtPrivExt<'tcx> for InferCtxt<'a, 'tcx> { fn mk_trait_obligation_with_new_self_ty( &self, param_env: ty::ParamEnv<'tcx>, - trait_ref: &ty::PolyTraitRef<'tcx>, + trait_ref: ty::PolyTraitRef<'tcx>, new_self_ty: Ty<'tcx>, ) -> PredicateObligation<'tcx> { assert!(!new_self_ty.has_escaping_bound_vars()); @@ -1441,7 +1442,7 @@ impl<'a, 'tcx> InferCtxtPrivExt<'tcx> for InferCtxt<'a, 'tcx> { // ambiguous impls. The latter *ought* to be a // coherence violation, so we don't report it here. - let predicate = self.resolve_vars_if_possible(&obligation.predicate); + let predicate = self.resolve_vars_if_possible(obligation.predicate); let span = obligation.cause.span; debug!( @@ -1673,7 +1674,7 @@ impl<'a, 'tcx> InferCtxtPrivExt<'tcx> for InferCtxt<'a, 'tcx> { &mut selcx, param_env, ObligationCause::dummy(), - &cleaned_pred, + cleaned_pred, ) .value; @@ -1700,6 +1701,7 @@ impl<'a, 'tcx> InferCtxtPrivExt<'tcx> for InferCtxt<'a, 'tcx> { &obligation.predicate, &obligation.cause.code, &mut vec![], + &mut Default::default(), ); self.suggest_unsized_bound_if_applicable(err, obligation); } @@ -1808,7 +1810,7 @@ impl<'a, 'tcx> InferCtxtPrivExt<'tcx> for InferCtxt<'a, 'tcx> { cause_code: &ObligationCauseCode<'tcx>, ) -> bool { if let ObligationCauseCode::BuiltinDerivedObligation(ref data) = cause_code { - let parent_trait_ref = self.resolve_vars_if_possible(&data.parent_trait_ref); + let parent_trait_ref = self.resolve_vars_if_possible(data.parent_trait_ref); if obligated_types.iter().any(|ot| ot == &parent_trait_ref.skip_binder().self_ty()) { return true; diff --git a/compiler/rustc_trait_selection/src/traits/error_reporting/on_unimplemented.rs b/compiler/rustc_trait_selection/src/traits/error_reporting/on_unimplemented.rs index 0f5aad5af12..1b5375938af 100644 --- a/compiler/rustc_trait_selection/src/traits/error_reporting/on_unimplemented.rs +++ b/compiler/rustc_trait_selection/src/traits/error_reporting/on_unimplemented.rs @@ -36,7 +36,7 @@ impl<'a, 'tcx> InferCtxtExt<'tcx> for InferCtxt<'a, 'tcx> { ) -> Option { let tcx = self.tcx; let param_env = obligation.param_env; - let trait_ref = tcx.erase_late_bound_regions(&trait_ref); + let trait_ref = tcx.erase_late_bound_regions(trait_ref); let trait_self_ty = trait_ref.self_ty(); let mut self_match_impls = vec![]; diff --git a/compiler/rustc_trait_selection/src/traits/error_reporting/suggestions.rs b/compiler/rustc_trait_selection/src/traits/error_reporting/suggestions.rs index 1c6e661782f..095483aa5a2 100644 --- a/compiler/rustc_trait_selection/src/traits/error_reporting/suggestions.rs +++ b/compiler/rustc_trait_selection/src/traits/error_reporting/suggestions.rs @@ -7,6 +7,7 @@ use crate::autoderef::Autoderef; use crate::infer::InferCtxt; use crate::traits::normalize_projection_type; +use rustc_data_structures::fx::FxHashSet; use rustc_data_structures::stack::ensure_sufficient_stack; use rustc_errors::{error_code, struct_span_err, Applicability, DiagnosticBuilder, Style}; use rustc_hir as hir; @@ -49,7 +50,7 @@ pub trait InferCtxtExt<'tcx> { &self, obligation: &PredicateObligation<'tcx>, err: &mut DiagnosticBuilder<'tcx>, - trait_ref: &ty::PolyTraitRef<'tcx>, + trait_ref: ty::PolyTraitRef<'tcx>, points_at_arg: bool, ); @@ -64,7 +65,7 @@ pub trait InferCtxtExt<'tcx> { &self, obligation: &PredicateObligation<'tcx>, err: &mut DiagnosticBuilder<'_>, - trait_ref: &ty::Binder>, + trait_ref: ty::Binder>, points_at_arg: bool, ); @@ -81,14 +82,14 @@ pub trait InferCtxtExt<'tcx> { &self, obligation: &PredicateObligation<'tcx>, err: &mut DiagnosticBuilder<'_>, - trait_ref: &ty::Binder>, + trait_ref: ty::Binder>, ); fn suggest_change_mut( &self, obligation: &PredicateObligation<'tcx>, err: &mut DiagnosticBuilder<'_>, - trait_ref: &ty::Binder>, + trait_ref: ty::Binder>, points_at_arg: bool, ); @@ -97,7 +98,7 @@ pub trait InferCtxtExt<'tcx> { obligation: &PredicateObligation<'tcx>, err: &mut DiagnosticBuilder<'_>, span: Span, - trait_ref: &ty::Binder>, + trait_ref: ty::Binder>, ); fn return_type_span(&self, obligation: &PredicateObligation<'tcx>) -> Option; @@ -107,7 +108,7 @@ pub trait InferCtxtExt<'tcx> { err: &mut DiagnosticBuilder<'_>, span: Span, obligation: &PredicateObligation<'tcx>, - trait_ref: &ty::Binder>, + trait_ref: ty::Binder>, ) -> bool; fn point_at_returns_when_relevant( @@ -158,6 +159,7 @@ pub trait InferCtxtExt<'tcx> { predicate: &T, cause_code: &ObligationCauseCode<'tcx>, obligated_types: &mut Vec<&ty::TyS<'tcx>>, + seen_requirements: &mut FxHashSet, ) where T: fmt::Display; @@ -168,7 +170,7 @@ pub trait InferCtxtExt<'tcx> { &self, err: &mut DiagnosticBuilder<'_>, obligation: &PredicateObligation<'tcx>, - trait_ref: &ty::Binder>, + trait_ref: ty::Binder>, span: Span, ); } @@ -462,7 +464,7 @@ impl<'a, 'tcx> InferCtxtExt<'tcx> for InferCtxt<'a, 'tcx> { &self, obligation: &PredicateObligation<'tcx>, err: &mut DiagnosticBuilder<'tcx>, - trait_ref: &ty::PolyTraitRef<'tcx>, + trait_ref: ty::PolyTraitRef<'tcx>, points_at_arg: bool, ) { // It only make sense when suggesting dereferences for arguments @@ -475,7 +477,7 @@ impl<'a, 'tcx> InferCtxtExt<'tcx> for InferCtxt<'a, 'tcx> { let real_trait_ref = match &obligation.cause.code { ObligationCauseCode::ImplDerivedObligation(cause) | ObligationCauseCode::DerivedObligation(cause) - | ObligationCauseCode::BuiltinDerivedObligation(cause) => &cause.parent_trait_ref, + | ObligationCauseCode::BuiltinDerivedObligation(cause) => cause.parent_trait_ref, _ => trait_ref, }; let real_ty = match real_trait_ref.self_ty().no_bound_vars() { @@ -556,7 +558,7 @@ impl<'a, 'tcx> InferCtxtExt<'tcx> for InferCtxt<'a, 'tcx> { &self, obligation: &PredicateObligation<'tcx>, err: &mut DiagnosticBuilder<'_>, - trait_ref: &ty::Binder>, + trait_ref: ty::Binder>, points_at_arg: bool, ) { let self_ty = match trait_ref.self_ty().no_bound_vars() { @@ -734,7 +736,7 @@ impl<'a, 'tcx> InferCtxtExt<'tcx> for InferCtxt<'a, 'tcx> { &self, obligation: &PredicateObligation<'tcx>, err: &mut DiagnosticBuilder<'_>, - trait_ref: &ty::Binder>, + trait_ref: ty::Binder>, ) { let span = obligation.cause.span; @@ -797,7 +799,7 @@ impl<'a, 'tcx> InferCtxtExt<'tcx> for InferCtxt<'a, 'tcx> { &self, obligation: &PredicateObligation<'tcx>, err: &mut DiagnosticBuilder<'_>, - trait_ref: &ty::Binder>, + trait_ref: ty::Binder>, points_at_arg: bool, ) { let span = obligation.cause.span; @@ -832,7 +834,7 @@ impl<'a, 'tcx> InferCtxtExt<'tcx> for InferCtxt<'a, 'tcx> { let new_obligation = self.mk_trait_obligation_with_new_self_ty( obligation.param_env, - &trait_ref, + trait_ref, suggested_ty, ); let suggested_ty_would_satisfy_obligation = self @@ -869,7 +871,7 @@ impl<'a, 'tcx> InferCtxtExt<'tcx> for InferCtxt<'a, 'tcx> { obligation: &PredicateObligation<'tcx>, err: &mut DiagnosticBuilder<'_>, span: Span, - trait_ref: &ty::Binder>, + trait_ref: ty::Binder>, ) { let is_empty_tuple = |ty: ty::Binder>| *ty.skip_binder().kind() == ty::Tuple(ty::List::empty()); @@ -919,7 +921,7 @@ impl<'a, 'tcx> InferCtxtExt<'tcx> for InferCtxt<'a, 'tcx> { err: &mut DiagnosticBuilder<'_>, span: Span, obligation: &PredicateObligation<'tcx>, - trait_ref: &ty::Binder>, + trait_ref: ty::Binder>, ) -> bool { match obligation.cause.code.peel_derives() { // Only suggest `impl Trait` if the return type is unsized because it is `dyn Trait`. @@ -976,12 +978,12 @@ impl<'a, 'tcx> InferCtxtExt<'tcx> for InferCtxt<'a, 'tcx> { .returns .iter() .filter_map(|expr| typeck_results.node_type_opt(expr.hir_id)) - .map(|ty| self.resolve_vars_if_possible(&ty)); + .map(|ty| self.resolve_vars_if_possible(ty)); let (last_ty, all_returns_have_same_type, only_never_return) = ret_types.clone().fold( (None, true, true), |(last_ty, mut same, only_never_return): (std::option::Option>, bool, bool), ty| { - let ty = self.resolve_vars_if_possible(&ty); + let ty = self.resolve_vars_if_possible(ty); same &= !matches!(ty.kind(), ty::Error(_)) && last_ty.map_or(true, |last_ty| { @@ -1133,7 +1135,7 @@ impl<'a, 'tcx> InferCtxtExt<'tcx> for InferCtxt<'a, 'tcx> { let typeck_results = self.in_progress_typeck_results.map(|t| t.borrow()).unwrap(); for expr in &visitor.returns { if let Some(returned_ty) = typeck_results.node_type_opt(expr.hir_id) { - let ty = self.resolve_vars_if_possible(&returned_ty); + let ty = self.resolve_vars_if_possible(returned_ty); err.span_label(expr.span, &format!("this returned value is of type `{}`", ty)); } } @@ -1406,7 +1408,7 @@ impl<'a, 'tcx> InferCtxtExt<'tcx> for InferCtxt<'a, 'tcx> { // Look for a type inside the generator interior that matches the target type to get // a span. - let target_ty_erased = self.tcx.erase_regions(&target_ty); + let target_ty_erased = self.tcx.erase_regions(target_ty); let ty_matches = |ty| -> bool { // Careful: the regions for types that appear in the // generator interior are not generally known, so we @@ -1420,8 +1422,8 @@ impl<'a, 'tcx> InferCtxtExt<'tcx> for InferCtxt<'a, 'tcx> { // generator frame. Bound regions are preserved by // `erase_regions` and so we must also call // `erase_late_bound_regions`. - let ty_erased = self.tcx.erase_late_bound_regions(&ty::Binder::bind(ty)); - let ty_erased = self.tcx.erase_regions(&ty_erased); + let ty_erased = self.tcx.erase_late_bound_regions(ty::Binder::bind(ty)); + let ty_erased = self.tcx.erase_regions(ty_erased); let eq = ty::TyS::same_type(ty_erased, target_ty_erased); debug!( "maybe_note_obligation_cause_for_async_await: ty_erased={:?} \ @@ -1437,7 +1439,7 @@ impl<'a, 'tcx> InferCtxtExt<'tcx> for InferCtxt<'a, 'tcx> { if let Some(upvars) = self.tcx.upvars_mentioned(generator_did) { interior_or_upvar_span = upvars.iter().find_map(|(upvar_id, upvar)| { let upvar_ty = typeck_results.node_type(*upvar_id); - let upvar_ty = self.resolve_vars_if_possible(&upvar_ty); + let upvar_ty = self.resolve_vars_if_possible(upvar_ty); if ty_matches(&upvar_ty) { Some(GeneratorInteriorOrUpvar::Upvar(upvar.span)) } else { @@ -1787,6 +1789,7 @@ impl<'a, 'tcx> InferCtxtExt<'tcx> for InferCtxt<'a, 'tcx> { &obligation.predicate, next_code.unwrap(), &mut Vec::new(), + &mut Default::default(), ); } @@ -1796,6 +1799,7 @@ impl<'a, 'tcx> InferCtxtExt<'tcx> for InferCtxt<'a, 'tcx> { predicate: &T, cause_code: &ObligationCauseCode<'tcx>, obligated_types: &mut Vec<&ty::TyS<'tcx>>, + seen_requirements: &mut FxHashSet, ) where T: fmt::Display, { @@ -2010,7 +2014,7 @@ impl<'a, 'tcx> InferCtxtExt<'tcx> for InferCtxt<'a, 'tcx> { err.note("shared static variables must have a type that implements `Sync`"); } ObligationCauseCode::BuiltinDerivedObligation(ref data) => { - let parent_trait_ref = self.resolve_vars_if_possible(&data.parent_trait_ref); + let parent_trait_ref = self.resolve_vars_if_possible(data.parent_trait_ref); let ty = parent_trait_ref.skip_binder().self_ty(); if parent_trait_ref.references_error() { err.cancel(); @@ -2025,8 +2029,7 @@ impl<'a, 'tcx> InferCtxtExt<'tcx> for InferCtxt<'a, 'tcx> { if let ObligationCauseCode::BuiltinDerivedObligation(ref data) = *data.parent_code { - let parent_trait_ref = - self.resolve_vars_if_possible(&data.parent_trait_ref); + let parent_trait_ref = self.resolve_vars_if_possible(data.parent_trait_ref); let ty = parent_trait_ref.skip_binder().self_ty(); matches!(ty.kind(), ty::Generator(..)) || matches!(ty.kind(), ty::Closure(..)) @@ -2051,18 +2054,44 @@ impl<'a, 'tcx> InferCtxtExt<'tcx> for InferCtxt<'a, 'tcx> { &parent_predicate, &data.parent_code, obligated_types, + seen_requirements, ) }); } } ObligationCauseCode::ImplDerivedObligation(ref data) => { - let parent_trait_ref = self.resolve_vars_if_possible(&data.parent_trait_ref); + let mut parent_trait_ref = self.resolve_vars_if_possible(data.parent_trait_ref); + let parent_def_id = parent_trait_ref.def_id(); err.note(&format!( "required because of the requirements on the impl of `{}` for `{}`", parent_trait_ref.print_only_trait_path(), parent_trait_ref.skip_binder().self_ty() )); - let parent_predicate = parent_trait_ref.without_const().to_predicate(tcx); + + let mut parent_predicate = parent_trait_ref.without_const().to_predicate(tcx); + let mut data = data; + let mut count = 0; + seen_requirements.insert(parent_def_id); + while let ObligationCauseCode::ImplDerivedObligation(child) = &*data.parent_code { + // Skip redundant recursive obligation notes. See `ui/issue-20413.rs`. + let child_trait_ref = self.resolve_vars_if_possible(child.parent_trait_ref); + let child_def_id = child_trait_ref.def_id(); + if seen_requirements.insert(child_def_id) { + break; + } + count += 1; + data = child; + parent_predicate = child_trait_ref.without_const().to_predicate(tcx); + parent_trait_ref = child_trait_ref; + } + if count > 0 { + err.note(&format!("{} redundant requirements hidden", count)); + err.note(&format!( + "required because of the requirements on the impl of `{}` for `{}`", + parent_trait_ref.print_only_trait_path(), + parent_trait_ref.skip_binder().self_ty() + )); + } // #74711: avoid a stack overflow ensure_sufficient_stack(|| { self.note_obligation_cause_code( @@ -2070,11 +2099,12 @@ impl<'a, 'tcx> InferCtxtExt<'tcx> for InferCtxt<'a, 'tcx> { &parent_predicate, &data.parent_code, obligated_types, + seen_requirements, ) }); } ObligationCauseCode::DerivedObligation(ref data) => { - let parent_trait_ref = self.resolve_vars_if_possible(&data.parent_trait_ref); + let parent_trait_ref = self.resolve_vars_if_possible(data.parent_trait_ref); let parent_predicate = parent_trait_ref.without_const().to_predicate(tcx); // #74711: avoid a stack overflow ensure_sufficient_stack(|| { @@ -2083,20 +2113,21 @@ impl<'a, 'tcx> InferCtxtExt<'tcx> for InferCtxt<'a, 'tcx> { &parent_predicate, &data.parent_code, obligated_types, + seen_requirements, ) }); } ObligationCauseCode::CompareImplMethodObligation { .. } => { err.note(&format!( - "the requirement `{}` appears on the impl method \ - but not on the corresponding trait method", + "the requirement `{}` appears on the impl method but not on the corresponding \ + trait method", predicate )); } ObligationCauseCode::CompareImplTypeObligation { .. } => { err.note(&format!( - "the requirement `{}` appears on the associated impl type \ - but not on the corresponding associated trait type", + "the requirement `{}` appears on the associated impl type but not on the \ + corresponding associated trait type", predicate )); } @@ -2132,7 +2163,7 @@ impl<'a, 'tcx> InferCtxtExt<'tcx> for InferCtxt<'a, 'tcx> { &self, err: &mut DiagnosticBuilder<'_>, obligation: &PredicateObligation<'tcx>, - trait_ref: &ty::Binder>, + trait_ref: ty::Binder>, span: Span, ) { debug!( @@ -2150,13 +2181,13 @@ impl<'a, 'tcx> InferCtxtExt<'tcx> for InferCtxt<'a, 'tcx> { if let Some(hir::GeneratorKind::Async(_)) = body.generator_kind { let future_trait = self.tcx.require_lang_item(LangItem::Future, None); - let self_ty = self.resolve_vars_if_possible(&trait_ref.self_ty()); + let self_ty = self.resolve_vars_if_possible(trait_ref.self_ty()); // Do not check on infer_types to avoid panic in evaluate_obligation. if self_ty.has_infer_types() { return; } - let self_ty = self.tcx.erase_regions(&self_ty); + let self_ty = self.tcx.erase_regions(self_ty); let impls_future = self.tcx.type_implements_trait(( future_trait, @@ -2197,7 +2228,7 @@ impl<'a, 'tcx> InferCtxtExt<'tcx> for InferCtxt<'a, 'tcx> { debug!( "suggest_await_before_try: normalized_projection_type {:?}", - self.resolve_vars_if_possible(&normalized_ty) + self.resolve_vars_if_possible(normalized_ty) ); let try_obligation = self.mk_trait_obligation_with_new_self_ty( obligation.param_env, diff --git a/compiler/rustc_trait_selection/src/traits/fulfill.rs b/compiler/rustc_trait_selection/src/traits/fulfill.rs index 538c14c6b72..a04f816b0f8 100644 --- a/compiler/rustc_trait_selection/src/traits/fulfill.rs +++ b/compiler/rustc_trait_selection/src/traits/fulfill.rs @@ -202,7 +202,7 @@ impl<'tcx> TraitEngine<'tcx> for FulfillmentContext<'tcx> { ) { // this helps to reduce duplicate errors, as well as making // debug output much nicer to read and so on. - let obligation = infcx.resolve_vars_if_possible(&obligation); + let obligation = infcx.resolve_vars_if_possible(obligation); debug!(?obligation, "register_predicate_obligation"); @@ -298,7 +298,7 @@ impl<'a, 'b, 'tcx> ObligationProcessor for FulfillProcessor<'a, 'b, 'tcx> { if !change { debug!( "process_predicate: pending obligation {:?} still stalled on {:?}", - self.selcx.infcx().resolve_vars_if_possible(&pending_obligation.obligation), + self.selcx.infcx().resolve_vars_if_possible(pending_obligation.obligation.clone()), pending_obligation.stalled_on ); return ProcessResult::Unchanged; @@ -338,14 +338,14 @@ impl<'a, 'b, 'tcx> FulfillProcessor<'a, 'b, 'tcx> { if obligation.predicate.has_infer_types_or_consts() { obligation.predicate = - self.selcx.infcx().resolve_vars_if_possible(&obligation.predicate); + self.selcx.infcx().resolve_vars_if_possible(obligation.predicate); } debug!(?obligation, ?obligation.cause, "process_obligation"); let infcx = self.selcx.infcx(); - match obligation.predicate.kind() { + match *obligation.predicate.kind() { ty::PredicateKind::ForAll(binder) => match binder.skip_binder() { // Evaluation will discard candidates using the leak check. // This means we need to pass it the bound version of our @@ -384,9 +384,9 @@ impl<'a, 'b, 'tcx> FulfillProcessor<'a, 'b, 'tcx> { bug!("TypeWellFormedFromEnv is only used for Chalk") } }, - &ty::PredicateKind::Atom(atom) => match atom { - ty::PredicateAtom::Trait(ref data, _) => { - let trait_obligation = obligation.with(Binder::dummy(*data)); + ty::PredicateKind::Atom(atom) => match atom { + ty::PredicateAtom::Trait(data, _) => { + let trait_obligation = obligation.with(Binder::dummy(data)); self.process_trait_obligation( obligation, @@ -639,7 +639,7 @@ impl<'a, 'b, 'tcx> FulfillProcessor<'a, 'b, 'tcx> { debug!( "process_predicate: pending obligation {:?} now stalled on {:?}", - infcx.resolve_vars_if_possible(obligation), + infcx.resolve_vars_if_possible(obligation.clone()), stalled_on ); @@ -684,7 +684,7 @@ fn trait_ref_infer_vars<'a, 'tcx>( ) -> Vec> { selcx .infcx() - .resolve_vars_if_possible(&trait_ref) + .resolve_vars_if_possible(trait_ref) .skip_binder() .substs .iter() diff --git a/compiler/rustc_trait_selection/src/traits/misc.rs b/compiler/rustc_trait_selection/src/traits/misc.rs index e23f5a583b2..cedd1aa54b8 100644 --- a/compiler/rustc_trait_selection/src/traits/misc.rs +++ b/compiler/rustc_trait_selection/src/traits/misc.rs @@ -50,7 +50,7 @@ pub fn can_type_implement_copy( let span = tcx.def_span(field.did); let cause = ObligationCause::dummy_with_span(span); let ctx = traits::FulfillmentContext::new(); - match traits::fully_normalize(&infcx, ctx, cause, param_env, &ty) { + match traits::fully_normalize(&infcx, ctx, cause, param_env, ty) { Ok(ty) => { if !infcx.type_is_copy_modulo_regions(param_env, ty, span) { infringing.push(field); diff --git a/compiler/rustc_trait_selection/src/traits/mod.rs b/compiler/rustc_trait_selection/src/traits/mod.rs index c93087a18cf..2d7df2ddd11 100644 --- a/compiler/rustc_trait_selection/src/traits/mod.rs +++ b/compiler/rustc_trait_selection/src/traits/mod.rs @@ -223,7 +223,7 @@ fn do_normalize_predicates<'tcx>( // we move over to lazy normalization *anyway*. let fulfill_cx = FulfillmentContext::new_ignoring_regions(); let predicates = - match fully_normalize(&infcx, fulfill_cx, cause, elaborated_env, &predicates) { + match fully_normalize(&infcx, fulfill_cx, cause, elaborated_env, predicates) { Ok(predicates) => predicates, Err(errors) => { infcx.report_fulfillment_errors(&errors, None, false); @@ -243,7 +243,7 @@ fn do_normalize_predicates<'tcx>( RegionckMode::default(), ); - let predicates = match infcx.fully_resolve(&predicates) { + let predicates = match infcx.fully_resolve(predicates) { Ok(predicates) => predicates, Err(fixup_err) => { // If we encounter a fixup error, it means that some type @@ -384,7 +384,7 @@ pub fn fully_normalize<'a, 'tcx, T>( mut fulfill_cx: FulfillmentContext<'tcx>, cause: ObligationCause<'tcx>, param_env: ty::ParamEnv<'tcx>, - value: &T, + value: T, ) -> Result>> where T: TypeFoldable<'tcx>, @@ -404,7 +404,7 @@ where debug!("fully_normalize: select_all_or_error start"); fulfill_cx.select_all_or_error(infcx)?; debug!("fully_normalize: select_all_or_error complete"); - let resolved_value = infcx.resolve_vars_if_possible(&normalized_value); + let resolved_value = infcx.resolve_vars_if_possible(normalized_value); debug!("fully_normalize: resolved_value={:?}", resolved_value); Ok(resolved_value) } @@ -424,7 +424,7 @@ pub fn impossible_predicates<'tcx>( let mut fulfill_cx = FulfillmentContext::new(); let cause = ObligationCause::dummy(); let Normalized { value: predicates, obligations } = - normalize(&mut selcx, param_env, cause.clone(), &predicates); + normalize(&mut selcx, param_env, cause.clone(), predicates); for obligation in obligations { fulfill_cx.register_predicate_obligation(&infcx, obligation); } @@ -435,7 +435,7 @@ pub fn impossible_predicates<'tcx>( fulfill_cx.select_all_or_error(&infcx).is_err() }); - debug!("impossible_predicates(predicates={:?}) = {:?}", predicates, result); + debug!("impossible_predicates = {:?}", result); result } @@ -494,7 +494,7 @@ fn vtable_methods<'tcx>( // erase them if they appear, so that we get the type // at some particular call site. let substs = - tcx.normalize_erasing_late_bound_regions(ty::ParamEnv::reveal_all(), &substs); + tcx.normalize_erasing_late_bound_regions(ty::ParamEnv::reveal_all(), substs); // It's possible that the method relies on where-clauses that // do not hold for this particular set of type parameters. diff --git a/compiler/rustc_trait_selection/src/traits/object_safety.rs b/compiler/rustc_trait_selection/src/traits/object_safety.rs index 32e0991733b..d912a00d6b7 100644 --- a/compiler/rustc_trait_selection/src/traits/object_safety.rs +++ b/compiler/rustc_trait_selection/src/traits/object_safety.rs @@ -446,7 +446,7 @@ fn virtual_call_violation_for_method<'tcx>( } let receiver_ty = - tcx.liberate_late_bound_regions(method.def_id, &sig.map_bound(|sig| sig.inputs()[0])); + tcx.liberate_late_bound_regions(method.def_id, sig.map_bound(|sig| sig.inputs()[0])); // Until `unsized_locals` is fully implemented, `self: Self` can't be dispatched on. // However, this is already considered object-safe. We allow it as a special case here. @@ -771,7 +771,9 @@ fn contains_illegal_self_type_reference<'tcx, T: TypeFoldable<'tcx>>( } impl<'tcx> TypeVisitor<'tcx> for IllegalSelfTypeVisitor<'tcx> { - fn visit_ty(&mut self, t: Ty<'tcx>) -> ControlFlow<()> { + type BreakTy = (); + + fn visit_ty(&mut self, t: Ty<'tcx>) -> ControlFlow { match t.kind() { ty::Param(_) => { if t == self.tcx.types.self_param { @@ -812,7 +814,7 @@ fn contains_illegal_self_type_reference<'tcx, T: TypeFoldable<'tcx>>( } } - fn visit_const(&mut self, ct: &ty::Const<'tcx>) -> ControlFlow<()> { + fn visit_const(&mut self, ct: &ty::Const<'tcx>) -> ControlFlow { // First check if the type of this constant references `Self`. self.visit_ty(ct.ty)?; @@ -844,7 +846,7 @@ fn contains_illegal_self_type_reference<'tcx, T: TypeFoldable<'tcx>>( } } - fn visit_predicate(&mut self, pred: ty::Predicate<'tcx>) -> ControlFlow<()> { + fn visit_predicate(&mut self, pred: ty::Predicate<'tcx>) -> ControlFlow { if let ty::PredicateAtom::ConstEvaluatable(def, substs) = pred.skip_binders() { // FIXME(const_evaluatable_checked): We should probably deduplicate the logic for // `AbstractConst`s here, it might make sense to change `ConstEvaluatable` to diff --git a/compiler/rustc_trait_selection/src/traits/project.rs b/compiler/rustc_trait_selection/src/traits/project.rs index a85ffd3c961..df472e6ed7e 100644 --- a/compiler/rustc_trait_selection/src/traits/project.rs +++ b/compiler/rustc_trait_selection/src/traits/project.rs @@ -168,7 +168,7 @@ pub(super) fn poly_project_and_unify_type<'cx, 'tcx>( let infcx = selcx.infcx(); infcx.commit_if_ok(|_snapshot| { let placeholder_predicate = - infcx.replace_bound_vars_with_placeholders(&obligation.predicate); + infcx.replace_bound_vars_with_placeholders(obligation.predicate); let placeholder_obligation = obligation.with(placeholder_predicate); let result = project_and_unify_type(selcx, &placeholder_obligation)?; @@ -232,7 +232,7 @@ pub fn normalize<'a, 'b, 'tcx, T>( selcx: &'a mut SelectionContext<'b, 'tcx>, param_env: ty::ParamEnv<'tcx>, cause: ObligationCause<'tcx>, - value: &T, + value: T, ) -> Normalized<'tcx, T> where T: TypeFoldable<'tcx>, @@ -246,7 +246,7 @@ pub fn normalize_to<'a, 'b, 'tcx, T>( selcx: &'a mut SelectionContext<'b, 'tcx>, param_env: ty::ParamEnv<'tcx>, cause: ObligationCause<'tcx>, - value: &T, + value: T, obligations: &mut Vec>, ) -> T where @@ -261,7 +261,7 @@ pub fn normalize_with_depth<'a, 'b, 'tcx, T>( param_env: ty::ParamEnv<'tcx>, cause: ObligationCause<'tcx>, depth: usize, - value: &T, + value: T, ) -> Normalized<'tcx, T> where T: TypeFoldable<'tcx>, @@ -277,7 +277,7 @@ pub fn normalize_with_depth_to<'a, 'b, 'tcx, T>( param_env: ty::ParamEnv<'tcx>, cause: ObligationCause<'tcx>, depth: usize, - value: &T, + value: T, obligations: &mut Vec>, ) -> T where @@ -309,7 +309,7 @@ impl<'a, 'b, 'tcx> AssocTypeNormalizer<'a, 'b, 'tcx> { AssocTypeNormalizer { selcx, param_env, cause, obligations, depth } } - fn fold>(&mut self, value: &T) -> T { + fn fold>(&mut self, value: T) -> T { let value = self.selcx.infcx().resolve_vars_if_possible(value); if !value.has_projections() { value } else { value.fold_with(self) } @@ -365,7 +365,7 @@ impl<'a, 'b, 'tcx> TypeFolder<'tcx> for AssocTypeNormalizer<'a, 'b, 'tcx> { } } - ty::Projection(ref data) if !data.has_escaping_bound_vars() => { + ty::Projection(data) if !data.has_escaping_bound_vars() => { // This is kind of hacky -- we need to be able to // handle normalization within binders because // otherwise we wind up a need to normalize when doing @@ -381,7 +381,7 @@ impl<'a, 'b, 'tcx> TypeFolder<'tcx> for AssocTypeNormalizer<'a, 'b, 'tcx> { let normalized_ty = normalize_projection_type( self.selcx, self.param_env, - *data, + data, self.cause.clone(), self.depth, &mut self.obligations, @@ -474,7 +474,7 @@ fn opt_normalize_projection_type<'a, 'b, 'tcx>( ) -> Result>, InProgress> { let infcx = selcx.infcx(); - let projection_ty = infcx.resolve_vars_if_possible(&projection_ty); + let projection_ty = infcx.resolve_vars_if_possible(projection_ty); let cache_key = ProjectionCacheKey::new(projection_ty); // FIXME(#20304) For now, I am caching here, which is good, but it @@ -567,7 +567,7 @@ fn opt_normalize_projection_type<'a, 'b, 'tcx>( depth + 1, &mut projected_obligations, ); - let normalized_ty = normalizer.fold(&projected_ty); + let normalized_ty = normalizer.fold(projected_ty); debug!(?normalized_ty, ?depth); @@ -997,7 +997,7 @@ fn assemble_candidates_from_impls<'cx, 'tcx>( // type. // // NOTE: This should be kept in sync with the similar code in - // `rustc_ty::instance::resolve_associated_item()`. + // `rustc_ty_utils::instance::resolve_associated_item()`. let node_item = assoc_ty_def(selcx, impl_data.impl_def_id, obligation.predicate.item_def_id) .map_err(|ErrorReported| ())?; @@ -1013,8 +1013,7 @@ fn assemble_candidates_from_impls<'cx, 'tcx>( if obligation.param_env.reveal() == Reveal::All { // NOTE(eddyb) inference variables can resolve to parameters, so // assume `poly_trait_ref` isn't monomorphic, if it contains any. - let poly_trait_ref = - selcx.infcx().resolve_vars_if_possible(&poly_trait_ref); + let poly_trait_ref = selcx.infcx().resolve_vars_if_possible(poly_trait_ref); !poly_trait_ref.still_further_specializable() } else { debug!( @@ -1192,7 +1191,7 @@ fn confirm_generator_candidate<'cx, 'tcx>( obligation.param_env, obligation.cause.clone(), obligation.recursion_depth + 1, - &gen_sig, + gen_sig, ); debug!(?obligation, ?gen_sig, ?obligations, "confirm_generator_candidate"); @@ -1263,7 +1262,7 @@ fn confirm_fn_pointer_candidate<'cx, 'tcx>( obligation.param_env, obligation.cause.clone(), obligation.recursion_depth + 1, - &sig, + sig, ); confirm_callable_candidate(selcx, obligation, sig, util::TupleArgumentsFlag::Yes) @@ -1282,7 +1281,7 @@ fn confirm_closure_candidate<'cx, 'tcx>( obligation.param_env, obligation.cause.clone(), obligation.recursion_depth + 1, - &closure_sig, + closure_sig, ); debug!(?obligation, ?closure_sig, ?obligations, "confirm_closure_candidate"); @@ -1336,7 +1335,7 @@ fn confirm_param_env_candidate<'cx, 'tcx>( let (cache_entry, _) = infcx.replace_bound_vars_with_fresh_vars( cause.span, LateBoundRegionConversionTime::HigherRankedType, - &poly_cache_entry, + poly_cache_entry, ); let cache_trait_ref = cache_entry.projection_ty.trait_ref(infcx.tcx); @@ -1349,7 +1348,7 @@ fn confirm_param_env_candidate<'cx, 'tcx>( obligation.param_env, obligation.cause.clone(), obligation.recursion_depth + 1, - &cache_trait_ref, + cache_trait_ref, &mut nested_obligations, ) }) @@ -1445,7 +1444,7 @@ fn assoc_ty_own_obligations<'cx, 'tcx>( obligation.param_env, obligation.cause.clone(), obligation.recursion_depth + 1, - &predicate, + predicate, nested, ); nested.push(Obligation::with_depth( @@ -1526,7 +1525,7 @@ impl<'tcx> ProjectionCacheKeyExt<'tcx> for ProjectionCacheKey<'tcx> { // from a specific call to `opt_normalize_projection_type` - if // there's no precise match, the original cache entry is "stranded" // anyway. - infcx.resolve_vars_if_possible(&predicate.projection_ty), + infcx.resolve_vars_if_possible(predicate.projection_ty), ) }) } diff --git a/compiler/rustc_trait_selection/src/traits/query/dropck_outlives.rs b/compiler/rustc_trait_selection/src/traits/query/dropck_outlives.rs index 8212823a6db..f05582f0614 100644 --- a/compiler/rustc_trait_selection/src/traits/query/dropck_outlives.rs +++ b/compiler/rustc_trait_selection/src/traits/query/dropck_outlives.rs @@ -40,10 +40,10 @@ impl<'cx, 'tcx> AtExt<'tcx> for At<'cx, 'tcx> { } let mut orig_values = OriginalQueryValues::default(); - let c_ty = self.infcx.canonicalize_query(&self.param_env.and(ty), &mut orig_values); + let c_ty = self.infcx.canonicalize_query(self.param_env.and(ty), &mut orig_values); let span = self.cause.span; debug!("c_ty = {:?}", c_ty); - if let Ok(result) = &tcx.dropck_outlives(c_ty) { + if let Ok(result) = tcx.dropck_outlives(c_ty) { if result.is_proven() { if let Ok(InferOk { value, obligations }) = self.infcx.instantiate_query_response_and_region_obligations( @@ -53,7 +53,7 @@ impl<'cx, 'tcx> AtExt<'tcx> for At<'cx, 'tcx> { result, ) { - let ty = self.infcx.resolve_vars_if_possible(&ty); + let ty = self.infcx.resolve_vars_if_possible(ty); let kinds = value.into_kinds_reporting_overflows(tcx, span, ty); return InferOk { value: kinds, obligations }; } diff --git a/compiler/rustc_trait_selection/src/traits/query/evaluate_obligation.rs b/compiler/rustc_trait_selection/src/traits/query/evaluate_obligation.rs index 0569f6217da..b83a4cd1e57 100644 --- a/compiler/rustc_trait_selection/src/traits/query/evaluate_obligation.rs +++ b/compiler/rustc_trait_selection/src/traits/query/evaluate_obligation.rs @@ -65,7 +65,7 @@ impl<'cx, 'tcx> InferCtxtExt<'tcx> for InferCtxt<'cx, 'tcx> { ) -> Result { let mut _orig_values = OriginalQueryValues::default(); let c_pred = self - .canonicalize_query(&obligation.param_env.and(obligation.predicate), &mut _orig_values); + .canonicalize_query(obligation.param_env.and(obligation.predicate), &mut _orig_values); // Run canonical query. If overflow occurs, rerun from scratch but this time // in standard trait query mode so that overflow is handled appropriately // within `SelectionContext`. diff --git a/compiler/rustc_trait_selection/src/traits/query/normalize.rs b/compiler/rustc_trait_selection/src/traits/query/normalize.rs index 42a598ce3a0..54743ef9ce9 100644 --- a/compiler/rustc_trait_selection/src/traits/query/normalize.rs +++ b/compiler/rustc_trait_selection/src/traits/query/normalize.rs @@ -19,7 +19,7 @@ use super::NoSolution; pub use rustc_middle::traits::query::NormalizationResult; pub trait AtExt<'tcx> { - fn normalize(&self, value: &T) -> Result, NoSolution> + fn normalize(&self, value: T) -> Result, NoSolution> where T: TypeFoldable<'tcx>; } @@ -38,7 +38,7 @@ impl<'cx, 'tcx> AtExt<'tcx> for At<'cx, 'tcx> { /// normalizing, but for now should be used only when we actually /// know that normalization will succeed, since error reporting /// and other details are still "under development". - fn normalize(&self, value: &T) -> Result, NoSolution> + fn normalize(&self, value: T) -> Result, NoSolution> where T: TypeFoldable<'tcx>, { @@ -97,6 +97,7 @@ impl<'cx, 'tcx> TypeFolder<'tcx> for QueryNormalizer<'cx, 'tcx> { self.infcx.tcx } + #[instrument(skip(self))] fn fold_ty(&mut self, ty: Ty<'tcx>) -> Ty<'tcx> { if !ty.has_projections() { return ty; @@ -145,7 +146,7 @@ impl<'cx, 'tcx> TypeFolder<'tcx> for QueryNormalizer<'cx, 'tcx> { } } - ty::Projection(ref data) if !data.has_escaping_bound_vars() => { + ty::Projection(data) if !data.has_escaping_bound_vars() => { // This is kind of hacky -- we need to be able to // handle normalization within binders because // otherwise we wind up a need to normalize when doing @@ -165,7 +166,7 @@ impl<'cx, 'tcx> TypeFolder<'tcx> for QueryNormalizer<'cx, 'tcx> { // so we cannot canonicalize it. let c_data = self .infcx - .canonicalize_hr_query_hack(&self.param_env.and(*data), &mut orig_values); + .canonicalize_hr_query_hack(self.param_env.and(data), &mut orig_values); debug!("QueryNormalizer: c_data = {:#?}", c_data); debug!("QueryNormalizer: orig_values = {:#?}", orig_values); match tcx.normalize_projection_ty(c_data) { @@ -180,7 +181,7 @@ impl<'cx, 'tcx> TypeFolder<'tcx> for QueryNormalizer<'cx, 'tcx> { self.cause, self.param_env, &orig_values, - &result, + result, ) { Ok(InferOk { value: result, obligations }) => { debug!("QueryNormalizer: result = {:#?}", result); diff --git a/compiler/rustc_trait_selection/src/traits/query/outlives_bounds.rs b/compiler/rustc_trait_selection/src/traits/query/outlives_bounds.rs index a42409515db..f5fa52c915d 100644 --- a/compiler/rustc_trait_selection/src/traits/query/outlives_bounds.rs +++ b/compiler/rustc_trait_selection/src/traits/query/outlives_bounds.rs @@ -51,7 +51,7 @@ impl<'cx, 'tcx> InferCtxtExt<'tcx> for InferCtxt<'cx, 'tcx> { debug!("implied_outlives_bounds(ty = {:?})", ty); let mut orig_values = OriginalQueryValues::default(); - let key = self.canonicalize_query(¶m_env.and(ty), &mut orig_values); + let key = self.canonicalize_query(param_env.and(ty), &mut orig_values); let result = match self.tcx.implied_outlives_bounds(key) { Ok(r) => r, Err(NoSolution) => { @@ -68,7 +68,7 @@ impl<'cx, 'tcx> InferCtxtExt<'tcx> for InferCtxt<'cx, 'tcx> { &ObligationCause::misc(span, body_id), param_env, &orig_values, - &result, + result, ); debug!("implied_outlives_bounds for {:?}: {:#?}", ty, result); let result = match result { diff --git a/compiler/rustc_trait_selection/src/traits/query/type_op/custom.rs b/compiler/rustc_trait_selection/src/traits/query/type_op/custom.rs index 915e8ae4a7a..1688539165a 100644 --- a/compiler/rustc_trait_selection/src/traits/query/type_op/custom.rs +++ b/compiler/rustc_trait_selection/src/traits/query/type_op/custom.rs @@ -96,7 +96,7 @@ fn scrape_region_constraints<'tcx, R>( region_obligations .iter() .map(|(_, r_o)| (r_o.sup_type, r_o.sub_region)) - .map(|(ty, r)| (infcx.resolve_vars_if_possible(&ty), r)), + .map(|(ty, r)| (infcx.resolve_vars_if_possible(ty), r)), ®ion_constraint_data, ); diff --git a/compiler/rustc_trait_selection/src/traits/query/type_op/mod.rs b/compiler/rustc_trait_selection/src/traits/query/type_op/mod.rs index ed6c6d0cc0a..130ffa1a33a 100644 --- a/compiler/rustc_trait_selection/src/traits/query/type_op/mod.rs +++ b/compiler/rustc_trait_selection/src/traits/query/type_op/mod.rs @@ -81,16 +81,14 @@ pub trait QueryTypeOp<'tcx>: fmt::Debug + Sized + TypeFoldable<'tcx> + 'tcx { // like the subtype query, which go awry around // `'static` otherwise. let mut canonical_var_values = OriginalQueryValues::default(); - let canonical_self = - infcx.canonicalize_hr_query_hack(&query_key, &mut canonical_var_values); + let old_param_env = query_key.param_env; + let canonical_self = infcx.canonicalize_hr_query_hack(query_key, &mut canonical_var_values); let canonical_result = Self::perform_query(infcx.tcx, canonical_self)?; - let param_env = query_key.param_env; - let InferOk { value, obligations } = infcx .instantiate_nll_query_response_and_region_obligations( &ObligationCause::dummy(), - param_env, + old_param_env, &canonical_var_values, canonical_result, output_query_region_constraints, diff --git a/compiler/rustc_trait_selection/src/traits/select/candidate_assembly.rs b/compiler/rustc_trait_selection/src/traits/select/candidate_assembly.rs index b0bfb4ad173..d2556c44fb4 100644 --- a/compiler/rustc_trait_selection/src/traits/select/candidate_assembly.rs +++ b/compiler/rustc_trait_selection/src/traits/select/candidate_assembly.rs @@ -229,7 +229,7 @@ impl<'cx, 'tcx> SelectionContext<'cx, 'tcx> { param_env: obligation.param_env, cause: obligation.cause.clone(), recursion_depth: obligation.recursion_depth, - predicate: self.infcx().resolve_vars_if_possible(&obligation.predicate), + predicate: self.infcx().resolve_vars_if_possible(obligation.predicate), }; if obligation.predicate.skip_binder().self_ty().is_ty_var() { @@ -353,16 +353,13 @@ impl<'cx, 'tcx> SelectionContext<'cx, 'tcx> { all_bounds.filter(|p| p.def_id() == stack.obligation.predicate.def_id()); // Keep only those bounds which may apply, and propagate overflow if it occurs. - let mut param_candidates = vec![]; for bound in matching_bounds { let wc = self.evaluate_where_clause(stack, bound)?; if wc.may_apply() { - param_candidates.push(ParamCandidate(bound)); + candidates.vec.push(ParamCandidate(bound)); } } - candidates.vec.extend(param_candidates); - Ok(()) } @@ -607,7 +604,7 @@ impl<'cx, 'tcx> SelectionContext<'cx, 'tcx> { // The code below doesn't care about regions, and the // self-ty here doesn't escape this probe, so just erase // any LBR. - let self_ty = self.tcx().erase_late_bound_regions(&obligation.self_ty()); + let self_ty = self.tcx().erase_late_bound_regions(obligation.self_ty()); let poly_trait_ref = match self_ty.kind() { ty::Dynamic(ref data, ..) => { if data.auto_traits().any(|did| did == obligation.predicate.def_id()) { @@ -642,9 +639,9 @@ impl<'cx, 'tcx> SelectionContext<'cx, 'tcx> { debug!(?poly_trait_ref, "assemble_candidates_from_object_ty"); - let poly_trait_predicate = self.infcx().resolve_vars_if_possible(&obligation.predicate); + let poly_trait_predicate = self.infcx().resolve_vars_if_possible(obligation.predicate); let placeholder_trait_predicate = - self.infcx().replace_bound_vars_with_placeholders(&poly_trait_predicate); + self.infcx().replace_bound_vars_with_placeholders(poly_trait_predicate); // Count only those upcast versions that match the trait-ref // we are looking for. Specifically, do not only check for the diff --git a/compiler/rustc_trait_selection/src/traits/select/confirmation.rs b/compiler/rustc_trait_selection/src/traits/select/confirmation.rs index 872b8e85f56..7c155c7684e 100644 --- a/compiler/rustc_trait_selection/src/traits/select/confirmation.rs +++ b/compiler/rustc_trait_selection/src/traits/select/confirmation.rs @@ -126,7 +126,7 @@ impl<'cx, 'tcx> SelectionContext<'cx, 'tcx> { let trait_predicate = self.infcx.shallow_resolve(obligation.predicate); let placeholder_trait_predicate = - self.infcx().replace_bound_vars_with_placeholders(&trait_predicate); + self.infcx().replace_bound_vars_with_placeholders(trait_predicate); let placeholder_self_ty = placeholder_trait_predicate.self_ty(); let (def_id, substs) = match *placeholder_self_ty.kind() { ty::Projection(proj) => (proj.item_def_id, proj.substs), @@ -144,7 +144,7 @@ impl<'cx, 'tcx> SelectionContext<'cx, 'tcx> { obligation.param_env, obligation.cause.clone(), obligation.recursion_depth + 1, - &candidate, + candidate, &mut obligations, ); @@ -163,7 +163,7 @@ impl<'cx, 'tcx> SelectionContext<'cx, 'tcx> { obligation.param_env, obligation.cause.clone(), obligation.recursion_depth + 1, - &predicate, + predicate, &mut obligations, ); obligations.push(Obligation::with_depth( @@ -285,8 +285,7 @@ impl<'cx, 'tcx> SelectionContext<'cx, 'tcx> { let trait_obligations: Vec> = self.infcx.commit_unconditionally(|_| { let poly_trait_ref = obligation.predicate.to_poly_trait_ref(); - let trait_ref = - self.infcx.replace_bound_vars_with_placeholders(&poly_trait_ref); + let trait_ref = self.infcx.replace_bound_vars_with_placeholders(poly_trait_ref); let cause = obligation.derived_cause(ImplDerivedObligation); self.impl_or_trait_obligations( cause, @@ -370,11 +369,10 @@ impl<'cx, 'tcx> SelectionContext<'cx, 'tcx> { let tcx = self.tcx(); debug!(?obligation, ?index, "confirm_object_candidate"); - let trait_predicate = - self.infcx.replace_bound_vars_with_placeholders(&obligation.predicate); + let trait_predicate = self.infcx.replace_bound_vars_with_placeholders(obligation.predicate); let self_ty = self.infcx.shallow_resolve(trait_predicate.self_ty()); let obligation_trait_ref = ty::Binder::dummy(trait_predicate.trait_ref); - let data = match self_ty.kind() { + let data = match *self_ty.kind() { ty::Dynamic(data, ..) => { self.infcx .replace_bound_vars_with_fresh_vars( @@ -416,7 +414,7 @@ impl<'cx, 'tcx> SelectionContext<'cx, 'tcx> { obligation.param_env, obligation.cause.clone(), obligation.recursion_depth + 1, - &unnormalized_upcast_trait_ref, + unnormalized_upcast_trait_ref, &mut nested, ); @@ -442,7 +440,7 @@ impl<'cx, 'tcx> SelectionContext<'cx, 'tcx> { obligation.param_env, obligation.cause.clone(), obligation.recursion_depth + 1, - &super_trait, + super_trait, &mut nested, ); nested.push(Obligation::new( @@ -480,7 +478,7 @@ impl<'cx, 'tcx> SelectionContext<'cx, 'tcx> { obligation.param_env, obligation.cause.clone(), obligation.recursion_depth + 1, - &subst_bound, + subst_bound, &mut nested, ); nested.push(Obligation::new( @@ -520,7 +518,7 @@ impl<'cx, 'tcx> SelectionContext<'cx, 'tcx> { obligation.param_env, obligation.cause.clone(), obligation.recursion_depth + 1, - &trait_ref, + trait_ref, ) }); @@ -541,8 +539,7 @@ impl<'cx, 'tcx> SelectionContext<'cx, 'tcx> { debug!(?obligation, ?alias_def_id, "confirm_trait_alias_candidate"); self.infcx.commit_unconditionally(|_| { - let predicate = - self.infcx().replace_bound_vars_with_placeholders(&obligation.predicate); + let predicate = self.infcx().replace_bound_vars_with_placeholders(obligation.predicate); let trait_ref = predicate.trait_ref; let trait_def_id = trait_ref.def_id; let substs = trait_ref.substs; @@ -584,7 +581,7 @@ impl<'cx, 'tcx> SelectionContext<'cx, 'tcx> { obligation.param_env, obligation.cause.clone(), obligation.recursion_depth + 1, - &trait_ref, + trait_ref, ) }); @@ -627,7 +624,7 @@ impl<'cx, 'tcx> SelectionContext<'cx, 'tcx> { obligation.param_env, obligation.cause.clone(), obligation.recursion_depth + 1, - &trait_ref, + trait_ref, ) }); diff --git a/compiler/rustc_trait_selection/src/traits/select/mod.rs b/compiler/rustc_trait_selection/src/traits/select/mod.rs index 4cc4bc0acda..05ff9a6fb9c 100644 --- a/compiler/rustc_trait_selection/src/traits/select/mod.rs +++ b/compiler/rustc_trait_selection/src/traits/select/mod.rs @@ -279,7 +279,7 @@ impl<'cx, 'tcx> SelectionContext<'cx, 'tcx> { /// tracking is not enabled, just returns an empty vector. pub fn take_intercrate_ambiguity_causes(&mut self) -> Vec { assert!(self.intercrate); - self.intercrate_ambiguity_causes.take().unwrap_or(vec![]) + self.intercrate_ambiguity_causes.take().unwrap_or_default() } pub fn infcx(&self) -> &'cx InferCtxt<'cx, 'tcx> { @@ -1019,7 +1019,7 @@ impl<'cx, 'tcx> SelectionContext<'cx, 'tcx> { } let obligation = &stack.obligation; - let predicate = self.infcx().resolve_vars_if_possible(&obligation.predicate); + let predicate = self.infcx().resolve_vars_if_possible(obligation.predicate); // Okay to skip binder because of the nature of the // trait-ref-is-knowable check, which does not care about @@ -1138,9 +1138,9 @@ impl<'cx, 'tcx> SelectionContext<'cx, 'tcx> { &mut self, obligation: &TraitObligation<'tcx>, ) -> smallvec::SmallVec<[usize; 2]> { - let poly_trait_predicate = self.infcx().resolve_vars_if_possible(&obligation.predicate); + let poly_trait_predicate = self.infcx().resolve_vars_if_possible(obligation.predicate); let placeholder_trait_predicate = - self.infcx().replace_bound_vars_with_placeholders(&poly_trait_predicate); + self.infcx().replace_bound_vars_with_placeholders(poly_trait_predicate); debug!( ?placeholder_trait_predicate, "match_projection_obligation_against_definition_bounds" @@ -1220,7 +1220,7 @@ impl<'cx, 'tcx> SelectionContext<'cx, 'tcx> { obligation.param_env, obligation.cause.clone(), obligation.recursion_depth + 1, - &trait_bound, + trait_bound, ) }); self.infcx @@ -1266,12 +1266,12 @@ impl<'cx, 'tcx> SelectionContext<'cx, 'tcx> { obligation.param_env, obligation.cause.clone(), obligation.recursion_depth + 1, - &data.map_bound_ref(|data| data.projection_ty), + data.map_bound(|data| data.projection_ty), &mut nested_obligations, ) }) } else { - data.map_bound_ref(|data| data.projection_ty) + data.map_bound(|data| data.projection_ty) }; // FIXME(generic_associated_types): Compare the whole projections @@ -1737,7 +1737,7 @@ impl<'cx, 'tcx> SelectionContext<'cx, 'tcx> { let ty: ty::Binder> = ty::Binder::bind(ty); // <----/ self.infcx.commit_unconditionally(|_| { - let placeholder_ty = self.infcx.replace_bound_vars_with_placeholders(&ty); + let placeholder_ty = self.infcx.replace_bound_vars_with_placeholders(ty); let Normalized { value: normalized_ty, mut obligations } = ensure_sufficient_stack(|| { project::normalize_with_depth( @@ -1745,7 +1745,7 @@ impl<'cx, 'tcx> SelectionContext<'cx, 'tcx> { param_env, cause.clone(), recursion_depth, - &placeholder_ty, + placeholder_ty, ) }); let placeholder_obligation = predicate_for_trait_def( @@ -1807,7 +1807,7 @@ impl<'cx, 'tcx> SelectionContext<'cx, 'tcx> { } let placeholder_obligation = - self.infcx().replace_bound_vars_with_placeholders(&obligation.predicate); + self.infcx().replace_bound_vars_with_placeholders(obligation.predicate); let placeholder_obligation_trait_ref = placeholder_obligation.trait_ref; let impl_substs = self.infcx.fresh_substs_for_item(obligation.cause.span, impl_def_id); @@ -1821,7 +1821,7 @@ impl<'cx, 'tcx> SelectionContext<'cx, 'tcx> { obligation.param_env, obligation.cause.clone(), obligation.recursion_depth + 1, - &impl_trait_ref, + impl_trait_ref, ) }); @@ -2028,7 +2028,7 @@ impl<'cx, 'tcx> SelectionContext<'cx, 'tcx> { param_env, cause.clone(), recursion_depth, - &predicate.subst(tcx, substs), + predicate.subst(tcx, substs), &mut obligations, ); obligations.push(Obligation { diff --git a/compiler/rustc_trait_selection/src/traits/specialize/mod.rs b/compiler/rustc_trait_selection/src/traits/specialize/mod.rs index 4d81a3baa0e..512591960f5 100644 --- a/compiler/rustc_trait_selection/src/traits/specialize/mod.rs +++ b/compiler/rustc_trait_selection/src/traits/specialize/mod.rs @@ -158,7 +158,7 @@ pub(super) fn specializes(tcx: TyCtxt<'_>, (impl1_def_id, impl2_def_id): (DefId, FulfillmentContext::new(), ObligationCause::dummy(), penv, - &impl1_trait_ref, + impl1_trait_ref, ) { Ok(impl1_trait_ref) => impl1_trait_ref, Err(err) => { @@ -247,7 +247,7 @@ fn fulfill_implication<'a, 'tcx>( // Now resolve the *substitution* we built for the target earlier, replacing // the inference variables inside with whatever we got from fulfillment. - Ok(infcx.resolve_vars_if_possible(&target_substs)) + Ok(infcx.resolve_vars_if_possible(target_substs)) } } }) diff --git a/compiler/rustc_trait_selection/src/traits/structural_match.rs b/compiler/rustc_trait_selection/src/traits/structural_match.rs index ce0d3ef8a6a..3d20a8d5cf3 100644 --- a/compiler/rustc_trait_selection/src/traits/structural_match.rs +++ b/compiler/rustc_trait_selection/src/traits/structural_match.rs @@ -55,9 +55,7 @@ pub fn search_for_structural_match_violation<'tcx>( ) -> Option> { // FIXME: we should instead pass in an `infcx` from the outside. tcx.infer_ctxt().enter(|infcx| { - let mut search = Search { infcx, span, found: None, seen: FxHashSet::default() }; - ty.visit_with(&mut search); - search.found + ty.visit_with(&mut Search { infcx, span, seen: FxHashSet::default() }).break_value() }) } @@ -116,9 +114,6 @@ struct Search<'a, 'tcx> { infcx: InferCtxt<'a, 'tcx>, - /// Records first ADT that does not implement a structural-match trait. - found: Option>, - /// Tracks ADTs previously encountered during search, so that /// we will not recur on them again. seen: FxHashSet, @@ -135,38 +130,33 @@ impl Search<'a, 'tcx> { } impl<'a, 'tcx> TypeVisitor<'tcx> for Search<'a, 'tcx> { - fn visit_ty(&mut self, ty: Ty<'tcx>) -> ControlFlow<()> { + type BreakTy = NonStructuralMatchTy<'tcx>; + + fn visit_ty(&mut self, ty: Ty<'tcx>) -> ControlFlow { debug!("Search visiting ty: {:?}", ty); let (adt_def, substs) = match *ty.kind() { ty::Adt(adt_def, substs) => (adt_def, substs), ty::Param(_) => { - self.found = Some(NonStructuralMatchTy::Param); - return ControlFlow::BREAK; + return ControlFlow::Break(NonStructuralMatchTy::Param); } ty::Dynamic(..) => { - self.found = Some(NonStructuralMatchTy::Dynamic); - return ControlFlow::BREAK; + return ControlFlow::Break(NonStructuralMatchTy::Dynamic); } ty::Foreign(_) => { - self.found = Some(NonStructuralMatchTy::Foreign); - return ControlFlow::BREAK; + return ControlFlow::Break(NonStructuralMatchTy::Foreign); } ty::Opaque(..) => { - self.found = Some(NonStructuralMatchTy::Opaque); - return ControlFlow::BREAK; + return ControlFlow::Break(NonStructuralMatchTy::Opaque); } ty::Projection(..) => { - self.found = Some(NonStructuralMatchTy::Projection); - return ControlFlow::BREAK; + return ControlFlow::Break(NonStructuralMatchTy::Projection); } ty::Generator(..) | ty::GeneratorWitness(..) => { - self.found = Some(NonStructuralMatchTy::Generator); - return ControlFlow::BREAK; + return ControlFlow::Break(NonStructuralMatchTy::Generator); } ty::Closure(..) => { - self.found = Some(NonStructuralMatchTy::Closure); - return ControlFlow::BREAK; + return ControlFlow::Break(NonStructuralMatchTy::Closure); } ty::RawPtr(..) => { // structural-match ignores substructure of @@ -206,8 +196,7 @@ impl<'a, 'tcx> TypeVisitor<'tcx> for Search<'a, 'tcx> { ty::Array(..) | ty::Slice(_) | ty::Ref(..) | ty::Tuple(..) => { // First check all contained types and then tell the caller to continue searching. - ty.super_visit_with(self); - return ControlFlow::CONTINUE; + return ty.super_visit_with(self); } ty::Infer(_) | ty::Placeholder(_) | ty::Bound(..) => { bug!("unexpected type during structural-match checking: {:?}", ty); @@ -227,8 +216,7 @@ impl<'a, 'tcx> TypeVisitor<'tcx> for Search<'a, 'tcx> { if !self.type_marked_structural(ty) { debug!("Search found ty: {:?}", ty); - self.found = Some(NonStructuralMatchTy::Adt(&adt_def)); - return ControlFlow::BREAK; + return ControlFlow::Break(NonStructuralMatchTy::Adt(&adt_def)); } // structural-match does not care about the @@ -244,20 +232,11 @@ impl<'a, 'tcx> TypeVisitor<'tcx> for Search<'a, 'tcx> { // even though we skip super_visit_with, we must recur on // fields of ADT. let tcx = self.tcx(); - for field_ty in adt_def.all_fields().map(|field| field.ty(tcx, substs)) { + adt_def.all_fields().map(|field| field.ty(tcx, substs)).try_for_each(|field_ty| { let ty = self.tcx().normalize_erasing_regions(ty::ParamEnv::empty(), field_ty); debug!("structural-match ADT: field_ty={:?}, ty={:?}", field_ty, ty); - - if ty.visit_with(self).is_break() { - // found an ADT without structural-match; halt visiting! - assert!(self.found.is_some()); - return ControlFlow::BREAK; - } - } - - // Even though we do not want to recur on substs, we do - // want our caller to continue its own search. - ControlFlow::CONTINUE + ty.visit_with(self) + }) } } diff --git a/compiler/rustc_trait_selection/src/traits/util.rs b/compiler/rustc_trait_selection/src/traits/util.rs index f626bb0b7e3..2430620323f 100644 --- a/compiler/rustc_trait_selection/src/traits/util.rs +++ b/compiler/rustc_trait_selection/src/traits/util.rs @@ -205,12 +205,12 @@ pub fn impl_trait_ref_and_oblig<'a, 'tcx>( let impl_trait_ref = selcx.tcx().impl_trait_ref(impl_def_id).unwrap(); let impl_trait_ref = impl_trait_ref.subst(selcx.tcx(), impl_substs); let Normalized { value: impl_trait_ref, obligations: normalization_obligations1 } = - super::normalize(selcx, param_env, ObligationCause::dummy(), &impl_trait_ref); + super::normalize(selcx, param_env, ObligationCause::dummy(), impl_trait_ref); let predicates = selcx.tcx().predicates_of(impl_def_id); let predicates = predicates.instantiate(selcx.tcx(), impl_substs); let Normalized { value: predicates, obligations: normalization_obligations2 } = - super::normalize(selcx, param_env, ObligationCause::dummy(), &predicates); + super::normalize(selcx, param_env, ObligationCause::dummy(), predicates); let impl_obligations = predicates_for_generics(ObligationCause::dummy(), 0, param_env, predicates); diff --git a/compiler/rustc_trait_selection/src/traits/wf.rs b/compiler/rustc_trait_selection/src/traits/wf.rs index 496dff6c5b2..e5a792f229d 100644 --- a/compiler/rustc_trait_selection/src/traits/wf.rs +++ b/compiler/rustc_trait_selection/src/traits/wf.rs @@ -269,7 +269,7 @@ impl<'a, 'tcx> WfPredicates<'a, 'tcx> { param_env, cause.clone(), self.recursion_depth, - &obligation.predicate, + obligation.predicate, &mut obligations, ); obligation.predicate = normalized_predicate; diff --git a/compiler/rustc_traits/Cargo.toml b/compiler/rustc_traits/Cargo.toml index a54fe08394e..8bd9e29629d 100644 --- a/compiler/rustc_traits/Cargo.toml +++ b/compiler/rustc_traits/Cargo.toml @@ -12,9 +12,9 @@ rustc_hir = { path = "../rustc_hir" } rustc_index = { path = "../rustc_index" } rustc_ast = { path = "../rustc_ast" } rustc_span = { path = "../rustc_span" } -chalk-ir = "0.32.0" -chalk-solve = "0.32.0" -chalk-engine = "0.32.0" +chalk-ir = "0.36.0" +chalk-solve = "0.36.0" +chalk-engine = "0.36.0" smallvec = { version = "1.0", features = ["union", "may_dangle"] } rustc_infer = { path = "../rustc_infer" } rustc_trait_selection = { path = "../rustc_trait_selection" } diff --git a/compiler/rustc_traits/src/chalk/db.rs b/compiler/rustc_traits/src/chalk/db.rs index e5ae899a2f3..b1b9ef343d5 100644 --- a/compiler/rustc_traits/src/chalk/db.rs +++ b/compiler/rustc_traits/src/chalk/db.rs @@ -37,7 +37,7 @@ impl<'tcx> RustIrDatabase<'tcx> { def_id: DefId, bound_vars: SubstsRef<'tcx>, ) -> Vec>> { - let predicates = self.interner.tcx.predicates_of(def_id).predicates; + let predicates = self.interner.tcx.predicates_defined_on(def_id).predicates; let mut regions_substitutor = lowering::RegionsSubstitutor::new(self.interner.tcx, self.reempty_placeholder); predicates @@ -118,34 +118,27 @@ impl<'tcx> chalk_solve::RustIrDatabase> for RustIrDatabase<'t .map(|i| chalk_ir::AssocTypeId(i.def_id)) .collect(); - let well_known = if self.interner.tcx.lang_items().sized_trait() == Some(def_id) { + let lang_items = self.interner.tcx.lang_items(); + let well_known = if lang_items.sized_trait() == Some(def_id) { Some(chalk_solve::rust_ir::WellKnownTrait::Sized) - } else if self.interner.tcx.lang_items().copy_trait() == Some(def_id) { + } else if lang_items.copy_trait() == Some(def_id) { Some(chalk_solve::rust_ir::WellKnownTrait::Copy) - } else if self.interner.tcx.lang_items().clone_trait() == Some(def_id) { + } else if lang_items.clone_trait() == Some(def_id) { Some(chalk_solve::rust_ir::WellKnownTrait::Clone) - } else if self.interner.tcx.lang_items().drop_trait() == Some(def_id) { + } else if lang_items.drop_trait() == Some(def_id) { Some(chalk_solve::rust_ir::WellKnownTrait::Drop) - } else if self.interner.tcx.lang_items().fn_trait() == Some(def_id) { + } else if lang_items.fn_trait() == Some(def_id) { Some(chalk_solve::rust_ir::WellKnownTrait::Fn) - } else if self - .interner - .tcx - .lang_items() - .fn_once_trait() - .map(|t| def_id == t) - .unwrap_or(false) - { + } else if lang_items.fn_once_trait() == Some(def_id) { Some(chalk_solve::rust_ir::WellKnownTrait::FnOnce) - } else if self - .interner - .tcx - .lang_items() - .fn_mut_trait() - .map(|t| def_id == t) - .unwrap_or(false) - { + } else if lang_items.fn_mut_trait() == Some(def_id) { Some(chalk_solve::rust_ir::WellKnownTrait::FnMut) + } else if lang_items.unsize_trait() == Some(def_id) { + Some(chalk_solve::rust_ir::WellKnownTrait::Unsize) + } else if lang_items.unpin_trait() == Some(def_id) { + Some(chalk_solve::rust_ir::WellKnownTrait::Unpin) + } else if lang_items.coerce_unsized_trait() == Some(def_id) { + Some(chalk_solve::rust_ir::WellKnownTrait::CoerceUnsized) } else { None }; @@ -234,7 +227,7 @@ impl<'tcx> chalk_solve::RustIrDatabase> for RustIrDatabase<'t let (inputs_and_output, iobinders, _) = crate::chalk::lowering::collect_bound_vars( &self.interner, self.interner.tcx, - &sig.inputs_and_output().subst(self.interner.tcx, bound_vars), + sig.inputs_and_output().subst(self.interner.tcx, bound_vars), ); let argument_types = inputs_and_output[..inputs_and_output.len() - 1] @@ -281,11 +274,20 @@ impl<'tcx> chalk_solve::RustIrDatabase> for RustIrDatabase<'t where_clauses, }; + let associated_ty_value_ids: Vec<_> = self + .interner + .tcx + .associated_items(def_id) + .in_definition_order() + .filter(|i| i.kind == AssocKind::Type) + .map(|i| chalk_solve::rust_ir::AssociatedTyValueId(i.def_id)) + .collect(); + Arc::new(chalk_solve::rust_ir::ImplDatum { - polarity: chalk_solve::rust_ir::Polarity::Positive, + polarity: self.interner.tcx.impl_polarity(def_id).lower_into(&self.interner), binders: chalk_ir::Binders::new(binders, value), impl_type: chalk_solve::rust_ir::ImplType::Local, - associated_ty_value_ids: vec![], + associated_ty_value_ids, }) } @@ -324,19 +326,19 @@ impl<'tcx> chalk_solve::RustIrDatabase> for RustIrDatabase<'t fn impl_provided_for( &self, auto_trait_id: chalk_ir::TraitId>, - app_ty: &chalk_ir::ApplicationTy>, + chalk_ty: &chalk_ir::TyKind>, ) -> bool { use chalk_ir::Scalar::*; - use chalk_ir::TypeName::*; + use chalk_ir::TyKind::*; let trait_def_id = auto_trait_id.0; let all_impls = self.interner.tcx.all_impls(trait_def_id); for impl_def_id in all_impls { let trait_ref = self.interner.tcx.impl_trait_ref(impl_def_id).unwrap(); let self_ty = trait_ref.self_ty(); - let provides = match (self_ty.kind(), app_ty.name) { - (&ty::Adt(impl_adt_def, ..), Adt(id)) => impl_adt_def.did == id.0.did, - (_, AssociatedType(_ty_id)) => { + let provides = match (self_ty.kind(), chalk_ty) { + (&ty::Adt(impl_adt_def, ..), Adt(id, ..)) => impl_adt_def.did == id.0.did, + (_, AssociatedType(_ty_id, ..)) => { // FIXME(chalk): See https://github.com/rust-lang/rust/pull/77152#discussion_r494484774 false } @@ -365,10 +367,10 @@ impl<'tcx> chalk_solve::RustIrDatabase> for RustIrDatabase<'t (ast::FloatTy::F32, chalk_ir::FloatTy::F32) | (ast::FloatTy::F64, chalk_ir::FloatTy::F64) ), - (&ty::Tuple(..), Tuple(..)) => true, - (&ty::Array(..), Array) => true, - (&ty::Slice(..), Slice) => true, - (&ty::RawPtr(type_and_mut), Raw(mutability)) => { + (&ty::Tuple(substs), Tuple(len, _)) => substs.len() == *len, + (&ty::Array(..), Array(..)) => true, + (&ty::Slice(..), Slice(..)) => true, + (&ty::RawPtr(type_and_mut), Raw(mutability, _)) => { match (type_and_mut.mutbl, mutability) { (ast::Mutability::Mut, chalk_ir::Mutability::Mut) => true, (ast::Mutability::Mut, chalk_ir::Mutability::Not) => false, @@ -376,17 +378,19 @@ impl<'tcx> chalk_solve::RustIrDatabase> for RustIrDatabase<'t (ast::Mutability::Not, chalk_ir::Mutability::Not) => true, } } - (&ty::Ref(.., mutability1), Ref(mutability2)) => match (mutability1, mutability2) { - (ast::Mutability::Mut, chalk_ir::Mutability::Mut) => true, - (ast::Mutability::Mut, chalk_ir::Mutability::Not) => false, - (ast::Mutability::Not, chalk_ir::Mutability::Mut) => false, - (ast::Mutability::Not, chalk_ir::Mutability::Not) => true, - }, - (&ty::Opaque(def_id, ..), OpaqueType(opaque_ty_id)) => def_id == opaque_ty_id.0, - (&ty::FnDef(def_id, ..), FnDef(fn_def_id)) => def_id == fn_def_id.0, + (&ty::Ref(.., mutability1), Ref(mutability2, ..)) => { + match (mutability1, mutability2) { + (ast::Mutability::Mut, chalk_ir::Mutability::Mut) => true, + (ast::Mutability::Mut, chalk_ir::Mutability::Not) => false, + (ast::Mutability::Not, chalk_ir::Mutability::Mut) => false, + (ast::Mutability::Not, chalk_ir::Mutability::Not) => true, + } + } + (&ty::Opaque(def_id, ..), OpaqueType(opaque_ty_id, ..)) => def_id == opaque_ty_id.0, + (&ty::FnDef(def_id, ..), FnDef(fn_def_id, ..)) => def_id == fn_def_id.0, (&ty::Str, Str) => true, (&ty::Never, Never) => true, - (&ty::Closure(def_id, ..), Closure(closure_id)) => def_id == closure_id.0, + (&ty::Closure(def_id, ..), Closure(closure_id, _)) => def_id == closure_id.0, (&ty::Foreign(def_id), Foreign(foreign_def_id)) => def_id == foreign_def_id.0, (&ty::Error(..), Error) => false, _ => false, @@ -404,24 +408,38 @@ impl<'tcx> chalk_solve::RustIrDatabase> for RustIrDatabase<'t ) -> Arc>> { let def_id = associated_ty_id.0; let assoc_item = self.interner.tcx.associated_item(def_id); - let impl_id = match assoc_item.container { - AssocItemContainer::TraitContainer(def_id) => def_id, - _ => unimplemented!("Not possible??"), + let (impl_id, trait_id) = match assoc_item.container { + AssocItemContainer::TraitContainer(def_id) => (def_id, def_id), + AssocItemContainer::ImplContainer(def_id) => { + (def_id, self.interner.tcx.impl_trait_ref(def_id).unwrap().def_id) + } }; match assoc_item.kind { AssocKind::Type => {} _ => unimplemented!("Not possible??"), } + + let trait_item = self + .interner + .tcx + .associated_items(trait_id) + .find_by_name_and_kind(self.interner.tcx, assoc_item.ident, assoc_item.kind, trait_id) + .unwrap(); let bound_vars = bound_vars_for_item(self.interner.tcx, def_id); let binders = binders_for(&self.interner, bound_vars); - let ty = self.interner.tcx.type_of(def_id); + let ty = self + .interner + .tcx + .type_of(def_id) + .subst(self.interner.tcx, bound_vars) + .lower_into(&self.interner); Arc::new(chalk_solve::rust_ir::AssociatedTyValue { impl_id: chalk_ir::ImplId(impl_id), - associated_ty_id: chalk_ir::AssocTypeId(def_id), + associated_ty_id: chalk_ir::AssocTypeId(trait_item.def_id), value: chalk_ir::Binders::new( binders, - chalk_solve::rust_ir::AssociatedTyValueBound { ty: ty.lower_into(&self.interner) }, + chalk_solve::rust_ir::AssociatedTyValueBound { ty }, ), }) } @@ -441,19 +459,61 @@ impl<'tcx> chalk_solve::RustIrDatabase> for RustIrDatabase<'t &self, opaque_ty_id: chalk_ir::OpaqueTyId>, ) -> Arc>> { - let bound_vars = bound_vars_for_item(self.interner.tcx, opaque_ty_id.0); - let binders = binders_for(&self.interner, bound_vars); + let bound_vars = ty::fold::shift_vars( + self.interner.tcx, + bound_vars_for_item(self.interner.tcx, opaque_ty_id.0), + 1, + ); let where_clauses = self.where_clauses_for(opaque_ty_id.0, bound_vars); - let bounds = self.bounds_for(opaque_ty_id.0, bound_vars); + + let identity_substs = InternalSubsts::identity_for_item(self.interner.tcx, opaque_ty_id.0); + + let bounds = + self.interner + .tcx + .explicit_item_bounds(opaque_ty_id.0) + .iter() + .map(|(bound, _)| bound.subst(self.interner.tcx, &bound_vars)) + .map(|bound| { + bound.fold_with(&mut ty::fold::BottomUpFolder { + tcx: self.interner.tcx, + ty_op: |ty| { + if let ty::Opaque(def_id, substs) = *ty.kind() { + if def_id == opaque_ty_id.0 && substs == identity_substs { + return self.interner.tcx.mk_ty(ty::Bound( + ty::INNERMOST, + ty::BoundTy::from(ty::BoundVar::from_u32(0)), + )); + } + } + ty + }, + lt_op: |lt| lt, + ct_op: |ct| ct, + }) + }) + .filter_map(|bound| { + LowerInto::< + Option>> + >::lower_into(bound, &self.interner) + }) + .collect(); + + // Binder for the bound variable representing the concrete impl Trait type. + let existential_binder = chalk_ir::VariableKinds::from1( + &self.interner, + chalk_ir::VariableKind::Ty(chalk_ir::TyVariableKind::General), + ); let value = chalk_solve::rust_ir::OpaqueTyDatumBound { - bounds: chalk_ir::Binders::new(binders.clone(), bounds), - where_clauses: chalk_ir::Binders::new(binders, where_clauses), + bounds: chalk_ir::Binders::new(existential_binder.clone(), bounds), + where_clauses: chalk_ir::Binders::new(existential_binder, where_clauses), }; + let binders = binders_for(&self.interner, bound_vars); Arc::new(chalk_solve::rust_ir::OpaqueTyDatum { opaque_ty_id, - bound: chalk_ir::Binders::empty(&self.interner, value), + bound: chalk_ir::Binders::new(binders, value), }) } @@ -506,17 +566,11 @@ impl<'tcx> chalk_solve::RustIrDatabase> for RustIrDatabase<'t substs: &chalk_ir::Substitution>, ) -> chalk_solve::rust_ir::ClosureKind { let kind = &substs.as_slice(&self.interner)[substs.len(&self.interner) - 3]; - match kind.assert_ty_ref(&self.interner).data(&self.interner) { - chalk_ir::TyData::Apply(apply) => match apply.name { - chalk_ir::TypeName::Scalar(scalar) => match scalar { - chalk_ir::Scalar::Int(int_ty) => match int_ty { - chalk_ir::IntTy::I8 => chalk_solve::rust_ir::ClosureKind::Fn, - chalk_ir::IntTy::I16 => chalk_solve::rust_ir::ClosureKind::FnMut, - chalk_ir::IntTy::I32 => chalk_solve::rust_ir::ClosureKind::FnOnce, - _ => bug!("bad closure kind"), - }, - _ => bug!("bad closure kind"), - }, + match kind.assert_ty_ref(&self.interner).kind(&self.interner) { + chalk_ir::TyKind::Scalar(chalk_ir::Scalar::Int(int_ty)) => match int_ty { + chalk_ir::IntTy::I8 => chalk_solve::rust_ir::ClosureKind::Fn, + chalk_ir::IntTy::I16 => chalk_solve::rust_ir::ClosureKind::FnMut, + chalk_ir::IntTy::I32 => chalk_solve::rust_ir::ClosureKind::FnOnce, _ => bug!("bad closure kind"), }, _ => bug!("bad closure kind"), @@ -530,23 +584,19 @@ impl<'tcx> chalk_solve::RustIrDatabase> for RustIrDatabase<'t ) -> chalk_ir::Binders>> { let sig = &substs.as_slice(&self.interner)[substs.len(&self.interner) - 2]; - match sig.assert_ty_ref(&self.interner).data(&self.interner) { - chalk_ir::TyData::Function(f) => { + match sig.assert_ty_ref(&self.interner).kind(&self.interner) { + chalk_ir::TyKind::Function(f) => { let substitution = f.substitution.as_slice(&self.interner); let return_type = substitution.last().unwrap().assert_ty_ref(&self.interner).clone(); // Closure arguments are tupled let argument_tuple = substitution[0].assert_ty_ref(&self.interner); - let argument_types = match argument_tuple.data(&self.interner) { - chalk_ir::TyData::Apply(apply) => match apply.name { - chalk_ir::TypeName::Tuple(_) => apply - .substitution - .iter(&self.interner) - .map(|arg| arg.assert_ty_ref(&self.interner)) - .cloned() - .collect(), - _ => bug!("Expecting closure FnSig args to be tupled."), - }, + let argument_types = match argument_tuple.kind(&self.interner) { + chalk_ir::TyKind::Tuple(_len, substitution) => substitution + .iter(&self.interner) + .map(|arg| arg.assert_ty_ref(&self.interner)) + .cloned() + .collect(), _ => bug!("Expecting closure FnSig args to be tupled."), }; @@ -637,7 +687,7 @@ fn binders_for<'tcx>( bound_vars.iter().map(|arg| match arg.unpack() { ty::subst::GenericArgKind::Lifetime(_re) => chalk_ir::VariableKind::Lifetime, ty::subst::GenericArgKind::Type(_ty) => { - chalk_ir::VariableKind::Ty(chalk_ir::TyKind::General) + chalk_ir::VariableKind::Ty(chalk_ir::TyVariableKind::General) } ty::subst::GenericArgKind::Const(c) => { chalk_ir::VariableKind::Const(c.ty.lower_into(interner)) diff --git a/compiler/rustc_traits/src/chalk/lowering.rs b/compiler/rustc_traits/src/chalk/lowering.rs index 01c4dd12487..9afb980f84d 100644 --- a/compiler/rustc_traits/src/chalk/lowering.rs +++ b/compiler/rustc_traits/src/chalk/lowering.rs @@ -31,11 +31,12 @@ //! not. To lower anything wrapped in a `Binder`, we first deeply find any bound //! variables from the current `Binder`. +use rustc_ast::ast; use rustc_middle::traits::{ChalkEnvironmentAndGoal, ChalkRustInterner as RustInterner}; use rustc_middle::ty::fold::TypeFolder; use rustc_middle::ty::subst::{GenericArg, GenericArgKind, SubstsRef}; use rustc_middle::ty::{ - self, Binder, BoundRegion, Region, RegionKind, Ty, TyCtxt, TyKind, TypeFoldable, TypeVisitor, + self, Binder, BoundRegion, Region, RegionKind, Ty, TyCtxt, TypeFoldable, TypeVisitor, }; use rustc_span::def_id::DefId; @@ -85,7 +86,7 @@ impl<'tcx> LowerInto<'tcx, chalk_ir::InEnvironment { @@ -140,7 +141,7 @@ impl<'tcx> LowerInto<'tcx, chalk_ir::GoalData>> for ty::Predi let (predicate, binders, _named_regions) = collect_bound_vars( interner, interner.tcx, - &self.bound_atom_with_opt_escaping(interner.tcx), + self.bound_atom_with_opt_escaping(interner.tcx), ); let value = match predicate { @@ -240,24 +241,16 @@ impl<'tcx> LowerInto<'tcx, chalk_ir::AliasEq>> impl<'tcx> LowerInto<'tcx, chalk_ir::Ty>> for Ty<'tcx> { fn lower_into(self, interner: &RustInterner<'tcx>) -> chalk_ir::Ty> { - use chalk_ir::TyData; use rustc_ast as ast; - use TyKind::*; - let empty = || chalk_ir::Substitution::empty(interner); - let struct_ty = - |def_id| chalk_ir::TypeName::Adt(chalk_ir::AdtId(interner.tcx.adt_def(def_id))); - let apply = |name, substitution| { - TyData::Apply(chalk_ir::ApplicationTy { name, substitution }).intern(interner) - }; - let int = |i| apply(chalk_ir::TypeName::Scalar(chalk_ir::Scalar::Int(i)), empty()); - let uint = |i| apply(chalk_ir::TypeName::Scalar(chalk_ir::Scalar::Uint(i)), empty()); - let float = |f| apply(chalk_ir::TypeName::Scalar(chalk_ir::Scalar::Float(f)), empty()); + let int = |i| chalk_ir::TyKind::Scalar(chalk_ir::Scalar::Int(i)); + let uint = |i| chalk_ir::TyKind::Scalar(chalk_ir::Scalar::Uint(i)); + let float = |f| chalk_ir::TyKind::Scalar(chalk_ir::Scalar::Float(f)); match *self.kind() { - Bool => apply(chalk_ir::TypeName::Scalar(chalk_ir::Scalar::Bool), empty()), - Char => apply(chalk_ir::TypeName::Scalar(chalk_ir::Scalar::Char), empty()), - Int(ty) => match ty { + ty::Bool => chalk_ir::TyKind::Scalar(chalk_ir::Scalar::Bool), + ty::Char => chalk_ir::TyKind::Scalar(chalk_ir::Scalar::Char), + ty::Int(ty) => match ty { ast::IntTy::Isize => int(chalk_ir::IntTy::Isize), ast::IntTy::I8 => int(chalk_ir::IntTy::I8), ast::IntTy::I16 => int(chalk_ir::IntTy::I16), @@ -265,7 +258,7 @@ impl<'tcx> LowerInto<'tcx, chalk_ir::Ty>> for Ty<'tcx> { ast::IntTy::I64 => int(chalk_ir::IntTy::I64), ast::IntTy::I128 => int(chalk_ir::IntTy::I128), }, - Uint(ty) => match ty { + ty::Uint(ty) => match ty { ast::UintTy::Usize => uint(chalk_ir::UintTy::Usize), ast::UintTy::U8 => uint(chalk_ir::UintTy::U8), ast::UintTy::U16 => uint(chalk_ir::UintTy::U16), @@ -273,80 +266,35 @@ impl<'tcx> LowerInto<'tcx, chalk_ir::Ty>> for Ty<'tcx> { ast::UintTy::U64 => uint(chalk_ir::UintTy::U64), ast::UintTy::U128 => uint(chalk_ir::UintTy::U128), }, - Float(ty) => match ty { + ty::Float(ty) => match ty { ast::FloatTy::F32 => float(chalk_ir::FloatTy::F32), ast::FloatTy::F64 => float(chalk_ir::FloatTy::F64), }, - Adt(def, substs) => apply(struct_ty(def.did), substs.lower_into(interner)), - Foreign(def_id) => apply(chalk_ir::TypeName::Foreign(ForeignDefId(def_id)), empty()), - Str => apply(chalk_ir::TypeName::Str, empty()), - Array(ty, len) => { - let value = match len.val { - ty::ConstKind::Value(val) => { - chalk_ir::ConstValue::Concrete(chalk_ir::ConcreteConst { interned: val }) - } - ty::ConstKind::Bound(db, bound) => { - chalk_ir::ConstValue::BoundVar(chalk_ir::BoundVar::new( - chalk_ir::DebruijnIndex::new(db.as_u32()), - bound.index(), - )) - } - _ => unimplemented!("Const not implemented. {:?}", len.val), - }; - apply( - chalk_ir::TypeName::Array, - chalk_ir::Substitution::from_iter( - interner, - &[ - chalk_ir::GenericArgData::Ty(ty.lower_into(interner)).intern(interner), - chalk_ir::GenericArgData::Const( - chalk_ir::ConstData { ty: len.ty.lower_into(interner), value } - .intern(interner), - ) - .intern(interner), - ], - ), - ) + ty::Adt(def, substs) => { + chalk_ir::TyKind::Adt(chalk_ir::AdtId(def), substs.lower_into(interner)) } - Slice(ty) => apply( - chalk_ir::TypeName::Slice, - chalk_ir::Substitution::from1( - interner, - chalk_ir::GenericArgData::Ty(ty.lower_into(interner)).intern(interner), - ), - ), - RawPtr(ptr) => { - let name = match ptr.mutbl { - ast::Mutability::Mut => chalk_ir::TypeName::Raw(chalk_ir::Mutability::Mut), - ast::Mutability::Not => chalk_ir::TypeName::Raw(chalk_ir::Mutability::Not), - }; - apply(name, chalk_ir::Substitution::from1(interner, ptr.ty.lower_into(interner))) + ty::Foreign(def_id) => chalk_ir::TyKind::Foreign(ForeignDefId(def_id)), + ty::Str => chalk_ir::TyKind::Str, + ty::Array(ty, len) => { + chalk_ir::TyKind::Array(ty.lower_into(interner), len.lower_into(interner)) } - Ref(region, ty, mutability) => { - let name = match mutability { - ast::Mutability::Mut => chalk_ir::TypeName::Ref(chalk_ir::Mutability::Mut), - ast::Mutability::Not => chalk_ir::TypeName::Ref(chalk_ir::Mutability::Not), - }; - apply( - name, - chalk_ir::Substitution::from_iter( - interner, - &[ - chalk_ir::GenericArgData::Lifetime(region.lower_into(interner)) - .intern(interner), - chalk_ir::GenericArgData::Ty(ty.lower_into(interner)).intern(interner), - ], - ), - ) + ty::Slice(ty) => chalk_ir::TyKind::Slice(ty.lower_into(interner)), + + ty::RawPtr(ptr) => { + chalk_ir::TyKind::Raw(ptr.mutbl.lower_into(interner), ptr.ty.lower_into(interner)) } - FnDef(def_id, substs) => apply( - chalk_ir::TypeName::FnDef(chalk_ir::FnDefId(def_id)), - substs.lower_into(interner), + ty::Ref(region, ty, mutability) => chalk_ir::TyKind::Ref( + mutability.lower_into(interner), + region.lower_into(interner), + ty.lower_into(interner), ), - FnPtr(sig) => { + ty::FnDef(def_id, substs) => { + chalk_ir::TyKind::FnDef(chalk_ir::FnDefId(def_id), substs.lower_into(interner)) + } + ty::FnPtr(sig) => { let (inputs_and_outputs, binders, _named_regions) = - collect_bound_vars(interner, interner.tcx, &sig.inputs_and_output()); - TyData::Function(chalk_ir::FnPointer { + collect_bound_vars(interner, interner.tcx, sig.inputs_and_output()); + chalk_ir::TyKind::Function(chalk_ir::FnPointer { num_binders: binders.len(interner), sig: sig.lower_into(interner), substitution: chalk_ir::Substitution::from_iter( @@ -356,148 +304,115 @@ impl<'tcx> LowerInto<'tcx, chalk_ir::Ty>> for Ty<'tcx> { }), ), }) - .intern(interner) } - Dynamic(predicates, region) => TyData::Dyn(chalk_ir::DynTy { + ty::Dynamic(predicates, region) => chalk_ir::TyKind::Dyn(chalk_ir::DynTy { bounds: predicates.lower_into(interner), lifetime: region.lower_into(interner), - }) - .intern(interner), - Closure(def_id, substs) => apply( - chalk_ir::TypeName::Closure(chalk_ir::ClosureId(def_id)), - substs.lower_into(interner), - ), - Generator(_def_id, _substs, _) => unimplemented!(), - GeneratorWitness(_) => unimplemented!(), - Never => apply(chalk_ir::TypeName::Never, empty()), - Tuple(substs) => { - apply(chalk_ir::TypeName::Tuple(substs.len()), substs.lower_into(interner)) + }), + ty::Closure(def_id, substs) => { + chalk_ir::TyKind::Closure(chalk_ir::ClosureId(def_id), substs.lower_into(interner)) } - Projection(proj) => TyData::Alias(proj.lower_into(interner)).intern(interner), - Opaque(def_id, substs) => { - TyData::Alias(chalk_ir::AliasTy::Opaque(chalk_ir::OpaqueTy { + ty::Generator(_def_id, _substs, _) => unimplemented!(), + ty::GeneratorWitness(_) => unimplemented!(), + ty::Never => chalk_ir::TyKind::Never, + ty::Tuple(substs) => chalk_ir::TyKind::Tuple(substs.len(), substs.lower_into(interner)), + ty::Projection(proj) => chalk_ir::TyKind::Alias(proj.lower_into(interner)), + ty::Opaque(def_id, substs) => { + chalk_ir::TyKind::Alias(chalk_ir::AliasTy::Opaque(chalk_ir::OpaqueTy { opaque_ty_id: chalk_ir::OpaqueTyId(def_id), substitution: substs.lower_into(interner), })) - .intern(interner) } // This should have been done eagerly prior to this, and all Params // should have been substituted to placeholders - Param(_) => panic!("Lowering Param when not expected."), - Bound(db, bound) => TyData::BoundVar(chalk_ir::BoundVar::new( + ty::Param(_) => panic!("Lowering Param when not expected."), + ty::Bound(db, bound) => chalk_ir::TyKind::BoundVar(chalk_ir::BoundVar::new( chalk_ir::DebruijnIndex::new(db.as_u32()), bound.var.index(), - )) - .intern(interner), - Placeholder(_placeholder) => TyData::Placeholder(chalk_ir::PlaceholderIndex { - ui: chalk_ir::UniverseIndex { counter: _placeholder.universe.as_usize() }, - idx: _placeholder.name.as_usize(), - }) - .intern(interner), - Infer(_infer) => unimplemented!(), - Error(_) => apply(chalk_ir::TypeName::Error, empty()), + )), + ty::Placeholder(_placeholder) => { + chalk_ir::TyKind::Placeholder(chalk_ir::PlaceholderIndex { + ui: chalk_ir::UniverseIndex { counter: _placeholder.universe.as_usize() }, + idx: _placeholder.name.as_usize(), + }) + } + ty::Infer(_infer) => unimplemented!(), + ty::Error(_) => chalk_ir::TyKind::Error, } + .intern(interner) } } impl<'tcx> LowerInto<'tcx, Ty<'tcx>> for &chalk_ir::Ty> { fn lower_into(self, interner: &RustInterner<'tcx>) -> Ty<'tcx> { - use chalk_ir::TyData; - use rustc_ast::ast; + use chalk_ir::TyKind; - let kind = match self.data(interner) { - TyData::Apply(application_ty) => match application_ty.name { - chalk_ir::TypeName::Adt(struct_id) => { - ty::Adt(struct_id.0, application_ty.substitution.lower_into(interner)) - } - chalk_ir::TypeName::Scalar(scalar) => match scalar { - chalk_ir::Scalar::Bool => ty::Bool, - chalk_ir::Scalar::Char => ty::Char, - chalk_ir::Scalar::Int(int_ty) => match int_ty { - chalk_ir::IntTy::Isize => ty::Int(ast::IntTy::Isize), - chalk_ir::IntTy::I8 => ty::Int(ast::IntTy::I8), - chalk_ir::IntTy::I16 => ty::Int(ast::IntTy::I16), - chalk_ir::IntTy::I32 => ty::Int(ast::IntTy::I32), - chalk_ir::IntTy::I64 => ty::Int(ast::IntTy::I64), - chalk_ir::IntTy::I128 => ty::Int(ast::IntTy::I128), - }, - chalk_ir::Scalar::Uint(int_ty) => match int_ty { - chalk_ir::UintTy::Usize => ty::Uint(ast::UintTy::Usize), - chalk_ir::UintTy::U8 => ty::Uint(ast::UintTy::U8), - chalk_ir::UintTy::U16 => ty::Uint(ast::UintTy::U16), - chalk_ir::UintTy::U32 => ty::Uint(ast::UintTy::U32), - chalk_ir::UintTy::U64 => ty::Uint(ast::UintTy::U64), - chalk_ir::UintTy::U128 => ty::Uint(ast::UintTy::U128), - }, - chalk_ir::Scalar::Float(float_ty) => match float_ty { - chalk_ir::FloatTy::F32 => ty::Float(ast::FloatTy::F32), - chalk_ir::FloatTy::F64 => ty::Float(ast::FloatTy::F64), - }, + let kind = match self.kind(interner) { + TyKind::Adt(struct_id, substitution) => { + ty::Adt(struct_id.0, substitution.lower_into(interner)) + } + TyKind::Scalar(scalar) => match scalar { + chalk_ir::Scalar::Bool => ty::Bool, + chalk_ir::Scalar::Char => ty::Char, + chalk_ir::Scalar::Int(int_ty) => match int_ty { + chalk_ir::IntTy::Isize => ty::Int(ast::IntTy::Isize), + chalk_ir::IntTy::I8 => ty::Int(ast::IntTy::I8), + chalk_ir::IntTy::I16 => ty::Int(ast::IntTy::I16), + chalk_ir::IntTy::I32 => ty::Int(ast::IntTy::I32), + chalk_ir::IntTy::I64 => ty::Int(ast::IntTy::I64), + chalk_ir::IntTy::I128 => ty::Int(ast::IntTy::I128), + }, + chalk_ir::Scalar::Uint(int_ty) => match int_ty { + chalk_ir::UintTy::Usize => ty::Uint(ast::UintTy::Usize), + chalk_ir::UintTy::U8 => ty::Uint(ast::UintTy::U8), + chalk_ir::UintTy::U16 => ty::Uint(ast::UintTy::U16), + chalk_ir::UintTy::U32 => ty::Uint(ast::UintTy::U32), + chalk_ir::UintTy::U64 => ty::Uint(ast::UintTy::U64), + chalk_ir::UintTy::U128 => ty::Uint(ast::UintTy::U128), + }, + chalk_ir::Scalar::Float(float_ty) => match float_ty { + chalk_ir::FloatTy::F32 => ty::Float(ast::FloatTy::F32), + chalk_ir::FloatTy::F64 => ty::Float(ast::FloatTy::F64), }, - chalk_ir::TypeName::Array => { - let substs = application_ty.substitution.as_slice(interner); - let ty = substs[0].assert_ty_ref(interner).lower_into(interner); - let c = substs[1].assert_const_ref(interner).lower_into(interner); - ty::Array(ty, interner.tcx.mk_const(c)) - } - chalk_ir::TypeName::FnDef(id) => { - ty::FnDef(id.0, application_ty.substitution.lower_into(interner)) - } - chalk_ir::TypeName::Closure(closure) => { - ty::Closure(closure.0, application_ty.substitution.lower_into(interner)) - } - chalk_ir::TypeName::Generator(_) => unimplemented!(), - chalk_ir::TypeName::GeneratorWitness(_) => unimplemented!(), - chalk_ir::TypeName::Never => ty::Never, - chalk_ir::TypeName::Tuple(_size) => { - ty::Tuple(application_ty.substitution.lower_into(interner)) - } - chalk_ir::TypeName::Slice => ty::Slice( - application_ty.substitution.as_slice(interner)[0] - .ty(interner) - .unwrap() - .lower_into(interner), - ), - chalk_ir::TypeName::Raw(mutbl) => ty::RawPtr(ty::TypeAndMut { - ty: application_ty.substitution.as_slice(interner)[0] - .ty(interner) - .unwrap() - .lower_into(interner), - mutbl: match mutbl { - chalk_ir::Mutability::Mut => ast::Mutability::Mut, - chalk_ir::Mutability::Not => ast::Mutability::Not, - }, - }), - chalk_ir::TypeName::Ref(mutbl) => ty::Ref( - application_ty.substitution.as_slice(interner)[0] - .lifetime(interner) - .unwrap() - .lower_into(interner), - application_ty.substitution.as_slice(interner)[1] - .ty(interner) - .unwrap() - .lower_into(interner), - match mutbl { - chalk_ir::Mutability::Mut => ast::Mutability::Mut, - chalk_ir::Mutability::Not => ast::Mutability::Not, - }, - ), - chalk_ir::TypeName::Str => ty::Str, - chalk_ir::TypeName::OpaqueType(opaque_ty) => { - ty::Opaque(opaque_ty.0, application_ty.substitution.lower_into(interner)) - } - chalk_ir::TypeName::AssociatedType(assoc_ty) => ty::Projection(ty::ProjectionTy { - substs: application_ty.substitution.lower_into(interner), - item_def_id: assoc_ty.0, - }), - chalk_ir::TypeName::Foreign(def_id) => ty::Foreign(def_id.0), - chalk_ir::TypeName::Error => unimplemented!(), }, - TyData::Placeholder(placeholder) => ty::Placeholder(ty::Placeholder { + TyKind::Array(ty, c) => { + let ty = ty.lower_into(interner); + let c = c.lower_into(interner); + ty::Array(ty, interner.tcx.mk_const(c)) + } + TyKind::FnDef(id, substitution) => ty::FnDef(id.0, substitution.lower_into(interner)), + TyKind::Closure(closure, substitution) => { + ty::Closure(closure.0, substitution.lower_into(interner)) + } + TyKind::Generator(..) => unimplemented!(), + TyKind::GeneratorWitness(..) => unimplemented!(), + TyKind::Never => ty::Never, + TyKind::Tuple(_len, substitution) => ty::Tuple(substitution.lower_into(interner)), + TyKind::Slice(ty) => ty::Slice(ty.lower_into(interner)), + TyKind::Raw(mutbl, ty) => ty::RawPtr(ty::TypeAndMut { + ty: ty.lower_into(interner), + mutbl: mutbl.lower_into(interner), + }), + TyKind::Ref(mutbl, lifetime, ty) => ty::Ref( + lifetime.lower_into(interner), + ty.lower_into(interner), + mutbl.lower_into(interner), + ), + TyKind::Str => ty::Str, + TyKind::OpaqueType(opaque_ty, substitution) => { + ty::Opaque(opaque_ty.0, substitution.lower_into(interner)) + } + TyKind::AssociatedType(assoc_ty, substitution) => ty::Projection(ty::ProjectionTy { + substs: substitution.lower_into(interner), + item_def_id: assoc_ty.0, + }), + TyKind::Foreign(def_id) => ty::Foreign(def_id.0), + TyKind::Error => return interner.tcx.ty_error(), + TyKind::Placeholder(placeholder) => ty::Placeholder(ty::Placeholder { universe: ty::UniverseIndex::from_usize(placeholder.ui.counter), name: ty::BoundVar::from_usize(placeholder.idx), }), - chalk_ir::TyData::Alias(alias_ty) => match alias_ty { + TyKind::Alias(alias_ty) => match alias_ty { chalk_ir::AliasTy::Projection(projection) => ty::Projection(ty::ProjectionTy { item_def_id: projection.associated_ty_id.0, substs: projection.substitution.lower_into(interner), @@ -506,16 +421,16 @@ impl<'tcx> LowerInto<'tcx, Ty<'tcx>> for &chalk_ir::Ty> { ty::Opaque(opaque.opaque_ty_id.0, opaque.substitution.lower_into(interner)) } }, - TyData::Function(_quantified_ty) => unimplemented!(), - TyData::BoundVar(_bound) => ty::Bound( + TyKind::Function(_quantified_ty) => unimplemented!(), + TyKind::BoundVar(_bound) => ty::Bound( ty::DebruijnIndex::from_usize(_bound.debruijn.depth() as usize), ty::BoundTy { var: ty::BoundVar::from_usize(_bound.index), kind: ty::BoundTyKind::Anon, }, ), - TyData::InferenceVar(_, _) => unimplemented!(), - TyData::Dyn(_) => unimplemented!(), + TyKind::InferenceVar(_, _) => unimplemented!(), + TyKind::Dyn(_) => unimplemented!(), }; interner.tcx.mk_ty(kind) } @@ -663,7 +578,7 @@ impl<'tcx> LowerInto<'tcx, Option { @@ -706,8 +621,16 @@ impl<'tcx> LowerInto<'tcx, chalk_ir::Binders, ) -> chalk_ir::Binders>> { + // `Self` has one binder: + // Binder<&'tcx ty::List>> + // The return type has two: + // Binders<&[Binders>]> + // This means that any variables that are escaping `self` need to be + // shifted in by one so that they are still escaping. + let shifted_predicates = ty::fold::shift_vars(interner.tcx, self, 1); + let (predicates, binders, _named_regions) = - collect_bound_vars(interner, interner.tcx, &self); + collect_bound_vars(interner, interner.tcx, shifted_predicates); let self_ty = interner.tcx.mk_ty(ty::Bound( // This is going to be wrapped in a binder ty::DebruijnIndex::from_usize(1), @@ -716,7 +639,7 @@ impl<'tcx> LowerInto<'tcx, chalk_ir::Binders { chalk_ir::Binders::new( - chalk_ir::VariableKinds::empty(interner), + binders.clone(), chalk_ir::WhereClause::Implemented(chalk_ir::TraitRef { trait_id: chalk_ir::TraitId(def_id), substitution: interner @@ -727,25 +650,34 @@ impl<'tcx> LowerInto<'tcx, chalk_ir::Binders chalk_ir::Binders::new( - chalk_ir::VariableKinds::empty(interner), + binders.clone(), chalk_ir::WhereClause::AliasEq(chalk_ir::AliasEq { alias: chalk_ir::AliasTy::Projection(chalk_ir::ProjectionTy { associated_ty_id: chalk_ir::AssocTypeId(predicate.item_def_id), - substitution: predicate.substs.lower_into(interner), + substitution: interner + .tcx + .mk_substs_trait(self_ty, predicate.substs) + .lower_into(interner), }), ty: predicate.ty.lower_into(interner), }), ), ty::ExistentialPredicate::AutoTrait(def_id) => chalk_ir::Binders::new( - chalk_ir::VariableKinds::empty(interner), + binders.clone(), chalk_ir::WhereClause::Implemented(chalk_ir::TraitRef { trait_id: chalk_ir::TraitId(def_id), substitution: interner.tcx.mk_substs_trait(self_ty, &[]).lower_into(interner), }), ), }); + + // Binder for the bound variable representing the concrete underlying type. + let existential_binder = chalk_ir::VariableKinds::from1( + interner, + chalk_ir::VariableKind::Ty(chalk_ir::TyVariableKind::General), + ); let value = chalk_ir::QuantifiedWhereClauses::from_iter(interner, where_clauses); - chalk_ir::Binders::new(binders, value) + chalk_ir::Binders::new(existential_binder, value) } } @@ -775,7 +707,7 @@ impl<'tcx> LowerInto<'tcx, Option Some(chalk_ir::Binders::new( @@ -818,6 +750,35 @@ impl<'tcx> LowerInto<'tcx, chalk_solve::rust_ir::TraitBound>> } } +impl<'tcx> LowerInto<'tcx, chalk_ir::Mutability> for ast::Mutability { + fn lower_into(self, _interner: &RustInterner<'tcx>) -> chalk_ir::Mutability { + match self { + rustc_ast::Mutability::Mut => chalk_ir::Mutability::Mut, + rustc_ast::Mutability::Not => chalk_ir::Mutability::Not, + } + } +} + +impl<'tcx> LowerInto<'tcx, ast::Mutability> for chalk_ir::Mutability { + fn lower_into(self, _interner: &RustInterner<'tcx>) -> ast::Mutability { + match self { + chalk_ir::Mutability::Mut => ast::Mutability::Mut, + chalk_ir::Mutability::Not => ast::Mutability::Not, + } + } +} + +impl<'tcx> LowerInto<'tcx, chalk_solve::rust_ir::Polarity> for ty::ImplPolarity { + fn lower_into(self, _interner: &RustInterner<'tcx>) -> chalk_solve::rust_ir::Polarity { + match self { + ty::ImplPolarity::Positive => chalk_solve::rust_ir::Polarity::Positive, + ty::ImplPolarity::Negative => chalk_solve::rust_ir::Polarity::Negative, + // FIXME(chalk) reservation impls + ty::ImplPolarity::Reservation => chalk_solve::rust_ir::Polarity::Negative, + } + } +} + impl<'tcx> LowerInto<'tcx, chalk_solve::rust_ir::AliasEqBound>> for ty::ProjectionPredicate<'tcx> { @@ -847,10 +808,10 @@ impl<'tcx> LowerInto<'tcx, chalk_solve::rust_ir::AliasEqBound /// It's important to note that because of prior substitution, we may have /// late-bound regions, even outside of fn contexts, since this is the best way /// to prep types for chalk lowering. -crate fn collect_bound_vars<'a, 'tcx, T: TypeFoldable<'tcx>>( +crate fn collect_bound_vars<'tcx, T: TypeFoldable<'tcx>>( interner: &RustInterner<'tcx>, tcx: TyCtxt<'tcx>, - ty: &'a Binder, + ty: Binder, ) -> (T, chalk_ir::VariableKinds>, BTreeMap) { let mut bound_vars_collector = BoundVarsCollector::new(); ty.as_ref().skip_binder().visit_with(&mut bound_vars_collector); @@ -863,7 +824,7 @@ crate fn collect_bound_vars<'a, 'tcx, T: TypeFoldable<'tcx>>( .collect(); let mut bound_var_substitutor = NamedBoundVarSubstitutor::new(tcx, &named_parameters); - let new_ty = ty.as_ref().skip_binder().fold_with(&mut bound_var_substitutor); + let new_ty = ty.skip_binder().fold_with(&mut bound_var_substitutor); for var in named_parameters.values() { parameters.insert(*var, chalk_ir::VariableKind::Lifetime); @@ -872,7 +833,7 @@ crate fn collect_bound_vars<'a, 'tcx, T: TypeFoldable<'tcx>>( (0..parameters.len()).for_each(|i| { parameters .get(&(i as u32)) - .or_else(|| bug!("Skipped bound var index: ty={:?}, parameters={:?}", ty, parameters)); + .or_else(|| bug!("Skipped bound var index: parameters={:?}", parameters)); }); let binders = @@ -898,19 +859,19 @@ impl<'tcx> BoundVarsCollector<'tcx> { } impl<'tcx> TypeVisitor<'tcx> for BoundVarsCollector<'tcx> { - fn visit_binder>(&mut self, t: &Binder) -> ControlFlow<()> { + fn visit_binder>(&mut self, t: &Binder) -> ControlFlow { self.binder_index.shift_in(1); let result = t.super_visit_with(self); self.binder_index.shift_out(1); result } - fn visit_ty(&mut self, t: Ty<'tcx>) -> ControlFlow<()> { + fn visit_ty(&mut self, t: Ty<'tcx>) -> ControlFlow { match *t.kind() { ty::Bound(debruijn, bound_ty) if debruijn == self.binder_index => { match self.parameters.entry(bound_ty.var.as_u32()) { Entry::Vacant(entry) => { - entry.insert(chalk_ir::VariableKind::Ty(chalk_ir::TyKind::General)); + entry.insert(chalk_ir::VariableKind::Ty(chalk_ir::TyVariableKind::General)); } Entry::Occupied(entry) => match entry.get() { chalk_ir::VariableKind::Ty(_) => {} @@ -925,7 +886,7 @@ impl<'tcx> TypeVisitor<'tcx> for BoundVarsCollector<'tcx> { t.super_visit_with(self) } - fn visit_region(&mut self, r: Region<'tcx>) -> ControlFlow<()> { + fn visit_region(&mut self, r: Region<'tcx>) -> ControlFlow { match r { ty::ReLateBound(index, br) if *index == self.binder_index => match br { ty::BoundRegion::BrNamed(def_id, _name) => { @@ -980,7 +941,7 @@ impl<'a, 'tcx> TypeFolder<'tcx> for NamedBoundVarSubstitutor<'a, 'tcx> { self.tcx } - fn fold_binder>(&mut self, t: &Binder) -> Binder { + fn fold_binder>(&mut self, t: Binder) -> Binder { self.binder_index.shift_in(1); let result = t.super_fold_with(self); self.binder_index.shift_out(1); @@ -1040,7 +1001,7 @@ impl<'tcx> TypeFolder<'tcx> for ParamsSubstitutor<'tcx> { self.tcx } - fn fold_binder>(&mut self, t: &Binder) -> Binder { + fn fold_binder>(&mut self, t: Binder) -> Binder { self.binder_index.shift_in(1); let result = t.super_fold_with(self); self.binder_index.shift_out(1); @@ -1115,7 +1076,7 @@ impl PlaceholdersCollector { } impl<'tcx> TypeVisitor<'tcx> for PlaceholdersCollector { - fn visit_ty(&mut self, t: Ty<'tcx>) -> ControlFlow<()> { + fn visit_ty(&mut self, t: Ty<'tcx>) -> ControlFlow { match t.kind() { ty::Placeholder(p) if p.universe == self.universe_index => { self.next_ty_placeholder = self.next_ty_placeholder.max(p.name.as_usize() + 1); @@ -1127,7 +1088,7 @@ impl<'tcx> TypeVisitor<'tcx> for PlaceholdersCollector { t.super_visit_with(self) } - fn visit_region(&mut self, r: Region<'tcx>) -> ControlFlow<()> { + fn visit_region(&mut self, r: Region<'tcx>) -> ControlFlow { match r { ty::RePlaceholder(p) if p.universe == self.universe_index => { if let ty::BoundRegion::BrAnon(anon) = p.name { diff --git a/compiler/rustc_traits/src/chalk/mod.rs b/compiler/rustc_traits/src/chalk/mod.rs index f174a92274e..b117e28875e 100644 --- a/compiler/rustc_traits/src/chalk/mod.rs +++ b/compiler/rustc_traits/src/chalk/mod.rs @@ -69,15 +69,15 @@ crate fn evaluate_goal<'tcx>( CanonicalVarKind::PlaceholderRegion(_ui) => unimplemented!(), CanonicalVarKind::Ty(ty) => match ty { CanonicalTyVarKind::General(ui) => chalk_ir::WithKind::new( - chalk_ir::VariableKind::Ty(chalk_ir::TyKind::General), + chalk_ir::VariableKind::Ty(chalk_ir::TyVariableKind::General), chalk_ir::UniverseIndex { counter: ui.index() }, ), CanonicalTyVarKind::Int => chalk_ir::WithKind::new( - chalk_ir::VariableKind::Ty(chalk_ir::TyKind::Integer), + chalk_ir::VariableKind::Ty(chalk_ir::TyVariableKind::Integer), chalk_ir::UniverseIndex::root(), ), CanonicalTyVarKind::Float => chalk_ir::WithKind::new( - chalk_ir::VariableKind::Ty(chalk_ir::TyKind::Float), + chalk_ir::VariableKind::Ty(chalk_ir::TyVariableKind::Float), chalk_ir::UniverseIndex::root(), ), }, @@ -97,7 +97,8 @@ crate fn evaluate_goal<'tcx>( use chalk_solve::Solver; let mut solver = chalk_engine::solve::SLGSolver::new(32, None); let db = ChalkRustIrDatabase { interner, reempty_placeholder }; - let solution = chalk_solve::logging::with_tracing_logs(|| solver.solve(&db, &lowered_goal)); + let solution = solver.solve(&db, &lowered_goal); + debug!(?obligation, ?solution, "evaluatate goal"); // Ideally, the code to convert *back* to rustc types would live close to // the code to convert *from* rustc types. Right now though, we don't diff --git a/compiler/rustc_traits/src/dropck_outlives.rs b/compiler/rustc_traits/src/dropck_outlives.rs index 6cffa6d02a4..2827163d854 100644 --- a/compiler/rustc_traits/src/dropck_outlives.rs +++ b/compiler/rustc_traits/src/dropck_outlives.rs @@ -106,7 +106,7 @@ fn dropck_outlives<'tcx>( // do not themselves define a destructor", more or less. We have // to push them onto the stack to be expanded. for ty in constraints.dtorck_types.drain(..) { - match infcx.at(&cause, param_env).normalize(&ty) { + match infcx.at(&cause, param_env).normalize(ty) { Ok(Normalized { value: ty, obligations }) => { fulfill_cx.register_predicate_obligations(infcx, obligations); diff --git a/compiler/rustc_traits/src/implied_outlives_bounds.rs b/compiler/rustc_traits/src/implied_outlives_bounds.rs index c44fd1d5859..97017fbf2e5 100644 --- a/compiler/rustc_traits/src/implied_outlives_bounds.rs +++ b/compiler/rustc_traits/src/implied_outlives_bounds.rs @@ -115,7 +115,7 @@ fn compute_implied_outlives_bounds<'tcx>( } ty::PredicateAtom::TypeOutlives(ty::OutlivesPredicate(ty_a, r_b)) => { - let ty_a = infcx.resolve_vars_if_possible(&ty_a); + let ty_a = infcx.resolve_vars_if_possible(ty_a); let mut components = smallvec![]; tcx.push_outlives_components(ty_a, &mut components); implied_bounds_from_components(r_b, components) diff --git a/compiler/rustc_traits/src/normalize_erasing_regions.rs b/compiler/rustc_traits/src/normalize_erasing_regions.rs index 3e7c9ac62eb..750a0922be4 100644 --- a/compiler/rustc_traits/src/normalize_erasing_regions.rs +++ b/compiler/rustc_traits/src/normalize_erasing_regions.rs @@ -21,7 +21,7 @@ fn normalize_generic_arg_after_erasing_regions<'tcx>( tcx.sess.perf_stats.normalize_generic_arg_after_erasing_regions.fetch_add(1, Ordering::Relaxed); tcx.infer_ctxt().enter(|infcx| { let cause = ObligationCause::dummy(); - match infcx.at(&cause, param_env).normalize(&value) { + match infcx.at(&cause, param_env).normalize(value) { Ok(Normalized { value: normalized_value, obligations: normalized_obligations }) => { // We don't care about the `obligations`; they are // always only region relations, and we are about to @@ -31,8 +31,8 @@ fn normalize_generic_arg_after_erasing_regions<'tcx>( None, ); - let normalized_value = infcx.resolve_vars_if_possible(&normalized_value); - infcx.tcx.erase_regions(&normalized_value) + let normalized_value = infcx.resolve_vars_if_possible(normalized_value); + infcx.tcx.erase_regions(normalized_value) } Err(NoSolution) => bug!("could not fully normalize `{:?}`", value), } diff --git a/compiler/rustc_traits/src/type_op.rs b/compiler/rustc_traits/src/type_op.rs index 139ed6dcd35..0addde5c44c 100644 --- a/compiler/rustc_traits/src/type_op.rs +++ b/compiler/rustc_traits/src/type_op.rs @@ -70,7 +70,7 @@ impl AscribeUserTypeCx<'me, 'tcx> { DUMMY_SP, hir::CRATE_HIR_ID, self.param_env, - &value, + value, ) .into_value_registering_obligations(self.infcx, self.fulfill_cx) } @@ -184,7 +184,7 @@ where { let (param_env, Normalize { value }) = key.into_parts(); let Normalized { value, obligations } = - infcx.at(&ObligationCause::dummy(), param_env).normalize(&value)?; + infcx.at(&ObligationCause::dummy(), param_env).normalize(value)?; fulfill_cx.register_predicate_obligations(infcx, obligations); Ok(value) } diff --git a/compiler/rustc_ty/Cargo.toml b/compiler/rustc_ty_utils/Cargo.toml similarity index 95% rename from compiler/rustc_ty/Cargo.toml rename to compiler/rustc_ty_utils/Cargo.toml index acb011b2dc0..5020437bcf9 100644 --- a/compiler/rustc_ty/Cargo.toml +++ b/compiler/rustc_ty_utils/Cargo.toml @@ -1,6 +1,6 @@ [package] authors = ["The Rust Project Developers"] -name = "rustc_ty" +name = "rustc_ty_utils" version = "0.0.0" edition = "2018" diff --git a/compiler/rustc_ty/src/common_traits.rs b/compiler/rustc_ty_utils/src/common_traits.rs similarity index 100% rename from compiler/rustc_ty/src/common_traits.rs rename to compiler/rustc_ty_utils/src/common_traits.rs diff --git a/compiler/rustc_ty/src/instance.rs b/compiler/rustc_ty_utils/src/instance.rs similarity index 98% rename from compiler/rustc_ty/src/instance.rs rename to compiler/rustc_ty_utils/src/instance.rs index 220f4cec742..cf2c6efb471 100644 --- a/compiler/rustc_ty/src/instance.rs +++ b/compiler/rustc_ty_utils/src/instance.rs @@ -51,7 +51,7 @@ fn inner_resolve_instance<'tcx>( resolve_associated_item(tcx, &item, param_env, trait_def_id, substs) } else { let ty = tcx.type_of(def.def_id_for_type_of()); - let item_type = tcx.subst_and_normalize_erasing_regions(substs, param_env, &ty); + let item_type = tcx.subst_and_normalize_erasing_regions(substs, param_env, ty); let def = match *item_type.kind() { ty::FnDef(..) @@ -146,7 +146,7 @@ fn resolve_associated_item<'tcx>( substs, leaf_def.defining_node, ); - infcx.tcx.erase_regions(&substs) + infcx.tcx.erase_regions(substs) }); // Since this is a trait item, we need to see if the item is either a trait default item @@ -172,7 +172,7 @@ fn resolve_associated_item<'tcx>( return Ok(None); } - let substs = tcx.erase_regions(&substs); + let substs = tcx.erase_regions(substs); // Check if we just resolved an associated `const` declaration from // a `trait` to an associated `const` definition in an `impl`, where @@ -192,7 +192,7 @@ fn resolve_associated_item<'tcx>( && leaf_def.item.def_id.is_local() { let normalized_type_of = |def_id, substs| { - tcx.subst_and_normalize_erasing_regions(substs, param_env, &tcx.type_of(def_id)) + tcx.subst_and_normalize_erasing_regions(substs, param_env, tcx.type_of(def_id)) }; let original_ty = normalized_type_of(trait_item.def_id, rcvr_substs); @@ -264,7 +264,7 @@ fn resolve_associated_item<'tcx>( assert_eq!(name, sym::clone_from); // Use the default `fn clone_from` from `trait Clone`. - let substs = tcx.erase_regions(&rcvr_substs); + let substs = tcx.erase_regions(rcvr_substs); Some(ty::Instance::new(def_id, substs)) } } else { diff --git a/compiler/rustc_ty/src/lib.rs b/compiler/rustc_ty_utils/src/lib.rs similarity index 100% rename from compiler/rustc_ty/src/lib.rs rename to compiler/rustc_ty_utils/src/lib.rs diff --git a/compiler/rustc_ty/src/needs_drop.rs b/compiler/rustc_ty_utils/src/needs_drop.rs similarity index 98% rename from compiler/rustc_ty/src/needs_drop.rs rename to compiler/rustc_ty_utils/src/needs_drop.rs index 0356bcec549..d62fc764c76 100644 --- a/compiler/rustc_ty/src/needs_drop.rs +++ b/compiler/rustc_ty_utils/src/needs_drop.rs @@ -107,7 +107,7 @@ where let witness = substs.witness(); let interior_tys = match witness.kind() { - ty::GeneratorWitness(tys) => tcx.erase_late_bound_regions(tys), + &ty::GeneratorWitness(tys) => tcx.erase_late_bound_regions(tys), _ => { tcx.sess.delay_span_bug( tcx.hir().span_if_local(def_id).unwrap_or(DUMMY_SP), diff --git a/compiler/rustc_ty/src/ty.rs b/compiler/rustc_ty_utils/src/ty.rs similarity index 99% rename from compiler/rustc_ty/src/ty.rs rename to compiler/rustc_ty_utils/src/ty.rs index 2562140bb5d..720ad42da2a 100644 --- a/compiler/rustc_ty/src/ty.rs +++ b/compiler/rustc_ty_utils/src/ty.rs @@ -363,7 +363,7 @@ fn well_formed_types_in_env<'tcx>( // well-formed. NodeKind::Fn => { let fn_sig = tcx.fn_sig(def_id); - let fn_sig = tcx.liberate_late_bound_regions(def_id, &fn_sig); + let fn_sig = tcx.liberate_late_bound_regions(def_id, fn_sig); inputs.extend(fn_sig.inputs().iter().flat_map(|ty| ty.walk())); } diff --git a/compiler/rustc_typeck/src/astconv/generics.rs b/compiler/rustc_typeck/src/astconv/generics.rs index 3bfb2d3f1b0..0db5fda272a 100644 --- a/compiler/rustc_typeck/src/astconv/generics.rs +++ b/compiler/rustc_typeck/src/astconv/generics.rs @@ -1,12 +1,13 @@ use crate::astconv::{ - AstConv, ExplicitLateBound, GenericArgCountMismatch, GenericArgCountResult, GenericArgPosition, + AstConv, CreateSubstsForGenericArgsCtxt, ExplicitLateBound, GenericArgCountMismatch, + GenericArgCountResult, GenericArgPosition, }; use crate::errors::AssocTypeBindingNotAllowed; use rustc_ast::ast::ParamKindOrd; use rustc_errors::{pluralize, struct_span_err, Applicability, DiagnosticId, ErrorReported}; use rustc_hir as hir; use rustc_hir::def_id::DefId; -use rustc_hir::{GenericArg, GenericArgs}; +use rustc_hir::GenericArg; use rustc_middle::ty::{ self, subst, subst::SubstsRef, GenericParamDef, GenericParamDefKind, Ty, TyCtxt, }; @@ -22,6 +23,7 @@ impl<'o, 'tcx> dyn AstConv<'tcx> + 'o { sess: &Session, arg: &GenericArg<'_>, kind: &'static str, + possible_ordering_error: bool, help: Option<&str>, ) { let mut err = struct_span_err!( @@ -48,8 +50,23 @@ impl<'o, 'tcx> dyn AstConv<'tcx> + 'o { GenericArg::Const(_) => ParamKindOrd::Const { unordered }, }; + if matches!(arg, GenericArg::Type(hir::Ty { kind: hir::TyKind::Path { .. }, .. })) + && matches!(kind_ord, ParamKindOrd::Const { .. }) + { + let suggestions = vec![ + (arg.span().shrink_to_lo(), String::from("{ ")), + (arg.span().shrink_to_hi(), String::from(" }")), + ]; + err.multipart_suggestion( + "if this generic argument was intended as a const parameter, \ + try surrounding it with braces:", + suggestions, + Applicability::MaybeIncorrect, + ); + } + // This note is only true when generic parameters are strictly ordered by their kind. - if kind_ord.cmp(&arg_ord) != core::cmp::Ordering::Equal { + if possible_ordering_error && kind_ord.cmp(&arg_ord) != core::cmp::Ordering::Equal { let (first, last) = if kind_ord < arg_ord { (kind, arg.descr()) } else { (arg.descr(), kind) }; err.note(&format!("{} arguments must be provided before {} arguments", first, last)); @@ -90,20 +107,14 @@ impl<'o, 'tcx> dyn AstConv<'tcx> + 'o { /// instantiate a `GenericArg`. /// - `inferred_kind`: if no parameter was provided, and inference is enabled, then /// creates a suitable inference variable. - pub fn create_substs_for_generic_args<'b>( + pub fn create_substs_for_generic_args<'a>( tcx: TyCtxt<'tcx>, def_id: DefId, parent_substs: &[subst::GenericArg<'tcx>], has_self: bool, self_ty: Option>, arg_count: GenericArgCountResult, - args_for_def_id: impl Fn(DefId) -> (Option<&'b GenericArgs<'b>>, bool), - mut provided_kind: impl FnMut(&GenericParamDef, &GenericArg<'_>) -> subst::GenericArg<'tcx>, - mut inferred_kind: impl FnMut( - Option<&[subst::GenericArg<'tcx>]>, - &GenericParamDef, - bool, - ) -> subst::GenericArg<'tcx>, + ctx: &mut impl CreateSubstsForGenericArgsCtxt<'a, 'tcx>, ) -> SubstsRef<'tcx> { // Collect the segments of the path; we need to substitute arguments // for parameters throughout the entire path (wherever there are @@ -142,7 +153,7 @@ impl<'o, 'tcx> dyn AstConv<'tcx> + 'o { substs.push( self_ty .map(|ty| ty.into()) - .unwrap_or_else(|| inferred_kind(None, param, true)), + .unwrap_or_else(|| ctx.inferred_kind(None, param, true)), ); params.next(); } @@ -151,10 +162,10 @@ impl<'o, 'tcx> dyn AstConv<'tcx> + 'o { } // Check whether this segment takes generic arguments and the user has provided any. - let (generic_args, infer_args) = args_for_def_id(def_id); + let (generic_args, infer_args) = ctx.args_for_def_id(def_id); - let mut args = - generic_args.iter().flat_map(|generic_args| generic_args.args.iter()).peekable(); + let args_iter = generic_args.iter().flat_map(|generic_args| generic_args.args.iter()); + let mut args = args_iter.clone().peekable(); // If we encounter a type or const when we expect a lifetime, we infer the lifetimes. // If we later encounter a lifetime, we know that the arguments were provided in the @@ -173,7 +184,7 @@ impl<'o, 'tcx> dyn AstConv<'tcx> + 'o { (GenericArg::Lifetime(_), GenericParamDefKind::Lifetime, _) | (GenericArg::Type(_), GenericParamDefKind::Type { .. }, _) | (GenericArg::Const(_), GenericParamDefKind::Const, _) => { - substs.push(provided_kind(param, arg)); + substs.push(ctx.provided_kind(param, arg)); args.next(); params.next(); } @@ -184,7 +195,7 @@ impl<'o, 'tcx> dyn AstConv<'tcx> + 'o { ) => { // We expected a lifetime argument, but got a type or const // argument. That means we're inferring the lifetimes. - substs.push(inferred_kind(None, param, infer_args)); + substs.push(ctx.inferred_kind(None, param, infer_args)); force_infer_lt = Some(arg); params.next(); } @@ -221,8 +232,7 @@ impl<'o, 'tcx> dyn AstConv<'tcx> + 'o { GenericParamDefKind::Const => { ParamKindOrd::Const { unordered: tcx - .sess - .features_untracked() + .features() .const_generics, } } @@ -242,6 +252,13 @@ impl<'o, 'tcx> dyn AstConv<'tcx> + 'o { tcx.sess, arg, kind.descr(), + !args_iter.clone().is_sorted_by_key(|arg| match arg { + GenericArg::Lifetime(_) => ParamKindOrd::Lifetime, + GenericArg::Type(_) => ParamKindOrd::Type, + GenericArg::Const(_) => ParamKindOrd::Const { + unordered: tcx.features().const_generics, + }, + }), Some(&format!( "reorder the arguments: {}: `<{}>`", param_types_present @@ -293,7 +310,7 @@ impl<'o, 'tcx> dyn AstConv<'tcx> + 'o { assert_eq!(kind, "lifetime"); let provided = force_infer_lt.expect("lifetimes ought to have been inferred"); - Self::generic_arg_mismatch_err(tcx.sess, provided, kind, None); + Self::generic_arg_mismatch_err(tcx.sess, provided, kind, false, None); } break; @@ -302,7 +319,7 @@ impl<'o, 'tcx> dyn AstConv<'tcx> + 'o { (None, Some(¶m)) => { // If there are fewer arguments than parameters, it means // we're inferring the remaining arguments. - substs.push(inferred_kind(Some(&substs), param, infer_args)); + substs.push(ctx.inferred_kind(Some(&substs), param, infer_args)); params.next(); } @@ -351,6 +368,7 @@ impl<'o, 'tcx> dyn AstConv<'tcx> + 'o { // that lifetimes will proceed types. So it suffices to check the number of each generic // arguments in order to validate them with respect to the generic parameters. let param_counts = def.own_counts(); + let named_type_param_count = param_counts.types - has_self as usize; let arg_counts = args.own_counts(); let infer_lifetimes = position != GenericArgPosition::Type && arg_counts.lifetimes == 0; @@ -389,11 +407,11 @@ impl<'o, 'tcx> dyn AstConv<'tcx> + 'o { // For kinds without defaults (e.g.., lifetimes), `required == permitted`. // For other kinds (i.e., types), `permitted` may be greater than `required`. if required <= provided && provided <= permitted { - return Ok(()); + return true; } if silent { - return Err((0i32, None)); + return false; } // Unfortunately lifetime and type parameter mismatches are typically styled @@ -409,25 +427,26 @@ impl<'o, 'tcx> dyn AstConv<'tcx> + 'o { (required, "") }; - let (spans, label) = if required == permitted && provided > permitted { + let (spans, labels) = if provided > permitted { // In the case when the user has provided too many arguments, // we want to point to the unexpected arguments. - let spans: Vec = args.args[offset + permitted..offset + provided] + let (spans, labels): (Vec, Vec) = args.args + [offset + permitted..offset + provided] .iter() - .map(|arg| arg.span()) - .collect(); + .map(|arg| (arg.span(), format!("unexpected {} argument", arg.short_descr()))) + .unzip(); unexpected_spans.extend(spans.clone()); - (spans, format!("unexpected {} argument", kind)) + (spans, labels) } else { ( vec![span], - format!( + vec![format!( "expected {}{} {} argument{}", quantifier, bound, kind, pluralize!(bound), - ), + )], ) }; @@ -439,105 +458,57 @@ impl<'o, 'tcx> dyn AstConv<'tcx> + 'o { ), DiagnosticId::Error("E0107".into()), ); - for span in spans { + for (span, label) in spans.into_iter().zip(labels) { err.span_label(span, label.as_str()); } - - assert_ne!(bound, provided); - Err((bound as i32 - provided as i32, Some(err))) + err.emit(); + false }; let mut unexpected_spans = vec![]; - let mut lifetime_count_correct = Ok(()); - if !infer_lifetimes || arg_counts.lifetimes > param_counts.lifetimes { - lifetime_count_correct = check_kind_count( - "lifetime", - param_counts.lifetimes, - param_counts.lifetimes, - arg_counts.lifetimes, - 0, - &mut unexpected_spans, - explicit_late_bound == ExplicitLateBound::Yes, - ); - } - - // FIXME(const_generics:defaults) - let mut const_count_correct = Ok(()); - if !infer_args || arg_counts.consts > param_counts.consts { - const_count_correct = check_kind_count( - "const", - param_counts.consts, - param_counts.consts, - arg_counts.consts, - arg_counts.lifetimes + arg_counts.types, - &mut unexpected_spans, - false, - ); - } - - // Note that type errors are currently be emitted *after* const errors. - let mut type_count_correct = Ok(()); - if !infer_args || arg_counts.types > param_counts.types - defaults.types - has_self as usize - { - type_count_correct = check_kind_count( - "type", - param_counts.types - defaults.types - has_self as usize, - param_counts.types - has_self as usize, - arg_counts.types, - arg_counts.lifetimes, - &mut unexpected_spans, - false, - ); - } - - // Emit a help message if it's possible that a type could be surrounded in braces - if let Err((c_mismatch, Some(ref mut _const_err))) = const_count_correct { - if let Err((_, Some(ref mut type_err))) = type_count_correct { - let possible_matches = args.args[arg_counts.lifetimes..] - .iter() - .filter(|arg| { - matches!( - arg, - GenericArg::Type(hir::Ty { kind: hir::TyKind::Path { .. }, .. }) - ) - }) - .take(c_mismatch.max(0) as usize); - for arg in possible_matches { - let suggestions = vec![ - (arg.span().shrink_to_lo(), String::from("{ ")), - (arg.span().shrink_to_hi(), String::from(" }")), - ]; - type_err.multipart_suggestion( - "If this generic argument was intended as a const parameter, \ - try surrounding it with braces:", - suggestions, - Applicability::MaybeIncorrect, - ); - } - } - } + let lifetime_count_correct = check_kind_count( + "lifetime", + if infer_lifetimes { 0 } else { param_counts.lifetimes }, + param_counts.lifetimes, + arg_counts.lifetimes, + 0, + &mut unexpected_spans, + explicit_late_bound == ExplicitLateBound::Yes, + ); - let emit_correct = - |correct: Result<(), (_, Option>)>| match correct { - Ok(()) => Ok(()), - Err((_, None)) => Err(()), - Err((_, Some(mut err))) => { - err.emit(); - Err(()) - } - }; + let kind_str = if param_counts.consts + arg_counts.consts == 0 { + "type" + } else if named_type_param_count + arg_counts.types == 0 { + "const" + } else { + "generic" + }; - let arg_count_correct = emit_correct(lifetime_count_correct) - .and(emit_correct(const_count_correct)) - .and(emit_correct(type_count_correct)); + let arg_count_correct = check_kind_count( + kind_str, + if infer_args { + 0 + } else { + param_counts.consts + named_type_param_count - defaults.types + }, + param_counts.consts + named_type_param_count, + arg_counts.consts + arg_counts.types, + arg_counts.lifetimes, + &mut unexpected_spans, + false, + ); GenericArgCountResult { explicit_late_bound, - correct: arg_count_correct.map_err(|()| GenericArgCountMismatch { - reported: Some(ErrorReported), - invalid_args: unexpected_spans, - }), + correct: if lifetime_count_correct && arg_count_correct { + Ok(()) + } else { + Err(GenericArgCountMismatch { + reported: Some(ErrorReported), + invalid_args: unexpected_spans, + }) + }, } } diff --git a/compiler/rustc_typeck/src/astconv/mod.rs b/compiler/rustc_typeck/src/astconv/mod.rs index 07e523af3eb..89c5adfa14c 100644 --- a/compiler/rustc_typeck/src/astconv/mod.rs +++ b/compiler/rustc_typeck/src/astconv/mod.rs @@ -165,6 +165,23 @@ pub struct GenericArgCountResult { pub correct: Result<(), GenericArgCountMismatch>, } +pub trait CreateSubstsForGenericArgsCtxt<'a, 'tcx> { + fn args_for_def_id(&mut self, def_id: DefId) -> (Option<&'a GenericArgs<'a>>, bool); + + fn provided_kind( + &mut self, + param: &ty::GenericParamDef, + arg: &GenericArg<'_>, + ) -> subst::GenericArg<'tcx>; + + fn inferred_kind( + &mut self, + substs: Option<&[subst::GenericArg<'tcx>]>, + param: &ty::GenericParamDef, + infer_args: bool, + ) -> subst::GenericArg<'tcx>; +} + impl<'o, 'tcx> dyn AstConv<'tcx> + 'o { pub fn ast_region_to_region( &self, @@ -321,81 +338,102 @@ impl<'o, 'tcx> dyn AstConv<'tcx> + 'o { ); let is_object = self_ty.map_or(false, |ty| ty == self.tcx().types.trait_object_dummy_self); - let default_needs_object_self = |param: &ty::GenericParamDef| { - if let GenericParamDefKind::Type { has_default, .. } = param.kind { - if is_object && has_default { - let default_ty = tcx.at(span).type_of(param.def_id); - let self_param = tcx.types.self_param; - if default_ty.walk().any(|arg| arg == self_param.into()) { - // There is no suitable inference default for a type parameter - // that references self, in an object type. - return true; + + struct SubstsForAstPathCtxt<'a, 'tcx> { + astconv: &'a (dyn AstConv<'tcx> + 'a), + def_id: DefId, + generic_args: &'a GenericArgs<'a>, + span: Span, + missing_type_params: Vec, + inferred_params: Vec, + infer_args: bool, + is_object: bool, + } + + impl<'tcx, 'a> SubstsForAstPathCtxt<'tcx, 'a> { + fn default_needs_object_self(&mut self, param: &ty::GenericParamDef) -> bool { + let tcx = self.astconv.tcx(); + if let GenericParamDefKind::Type { has_default, .. } = param.kind { + if self.is_object && has_default { + let default_ty = tcx.at(self.span).type_of(param.def_id); + let self_param = tcx.types.self_param; + if default_ty.walk().any(|arg| arg == self_param.into()) { + // There is no suitable inference default for a type parameter + // that references self, in an object type. + return true; + } } } - } - false - }; + false + } + } - let mut missing_type_params = vec![]; - let mut inferred_params = vec![]; - let substs = Self::create_substs_for_generic_args( - tcx, - def_id, - parent_substs, - self_ty.is_some(), - self_ty, - arg_count.clone(), - // Provide the generic args, and whether types should be inferred. - |did| { - if did == def_id { - (Some(generic_args), infer_args) + impl<'a, 'tcx> CreateSubstsForGenericArgsCtxt<'a, 'tcx> for SubstsForAstPathCtxt<'a, 'tcx> { + fn args_for_def_id(&mut self, did: DefId) -> (Option<&'a GenericArgs<'a>>, bool) { + if did == self.def_id { + (Some(self.generic_args), self.infer_args) } else { // The last component of this tuple is unimportant. (None, false) } - }, - // Provide substitutions for parameters for which (valid) arguments have been provided. - |param, arg| match (¶m.kind, arg) { - (GenericParamDefKind::Lifetime, GenericArg::Lifetime(lt)) => { - self.ast_region_to_region(<, Some(param)).into() - } - (GenericParamDefKind::Type { has_default, .. }, GenericArg::Type(ty)) => { - if *has_default { - tcx.check_optional_stability( - param.def_id, - Some(arg.id()), - arg.span(), - |_, _| { - // Default generic parameters may not be marked - // with stability attributes, i.e. when the - // default parameter was defined at the same time - // as the rest of the type. As such, we ignore missing - // stability attributes. + } + + fn provided_kind( + &mut self, + param: &ty::GenericParamDef, + arg: &GenericArg<'_>, + ) -> subst::GenericArg<'tcx> { + let tcx = self.astconv.tcx(); + match (¶m.kind, arg) { + (GenericParamDefKind::Lifetime, GenericArg::Lifetime(lt)) => { + self.astconv.ast_region_to_region(<, Some(param)).into() + } + (&GenericParamDefKind::Type { has_default, .. }, GenericArg::Type(ty)) => { + if has_default { + tcx.check_optional_stability( + param.def_id, + Some(arg.id()), + arg.span(), + |_, _| { + // Default generic parameters may not be marked + // with stability attributes, i.e. when the + // default parameter was defined at the same time + // as the rest of the type. As such, we ignore missing + // stability attributes. + }, + ) + } + if let (hir::TyKind::Infer, false) = + (&ty.kind, self.astconv.allow_ty_infer()) + { + self.inferred_params.push(ty.span); + tcx.ty_error().into() + } else { + self.astconv.ast_ty_to_ty(&ty).into() + } + } + (GenericParamDefKind::Const, GenericArg::Const(ct)) => { + ty::Const::from_opt_const_arg_anon_const( + tcx, + ty::WithOptConstParam { + did: tcx.hir().local_def_id(ct.value.hir_id), + const_param_did: Some(param.def_id), }, ) + .into() } - if let (hir::TyKind::Infer, false) = (&ty.kind, self.allow_ty_infer()) { - inferred_params.push(ty.span); - tcx.ty_error().into() - } else { - self.ast_ty_to_ty(&ty).into() - } - } - (GenericParamDefKind::Const, GenericArg::Const(ct)) => { - ty::Const::from_opt_const_arg_anon_const( - tcx, - ty::WithOptConstParam { - did: tcx.hir().local_def_id(ct.value.hir_id), - const_param_did: Some(param.def_id), - }, - ) - .into() + _ => unreachable!(), } - _ => unreachable!(), - }, - // Provide substitutions for parameters for which arguments are inferred. - |substs, param, infer_args| { + } + + fn inferred_kind( + &mut self, + substs: Option<&[subst::GenericArg<'tcx>]>, + param: &ty::GenericParamDef, + infer_args: bool, + ) -> subst::GenericArg<'tcx> { + let tcx = self.astconv.tcx(); match param.kind { GenericParamDefKind::Lifetime => tcx.lifetimes.re_static.into(), GenericParamDefKind::Type { has_default, .. } => { @@ -407,48 +445,72 @@ impl<'o, 'tcx> dyn AstConv<'tcx> + 'o { // other type parameters may reference `Self` in their // defaults. This will lead to an ICE if we are not // careful! - if default_needs_object_self(param) { - missing_type_params.push(param.name.to_string()); + if self.default_needs_object_self(param) { + self.missing_type_params.push(param.name.to_string()); tcx.ty_error().into() } else { // This is a default type parameter. - self.normalize_ty( - span, - tcx.at(span).type_of(param.def_id).subst_spanned( - tcx, - substs.unwrap(), - Some(span), - ), - ) - .into() + self.astconv + .normalize_ty( + self.span, + tcx.at(self.span).type_of(param.def_id).subst_spanned( + tcx, + substs.unwrap(), + Some(self.span), + ), + ) + .into() } } else if infer_args { // No type parameters were provided, we can infer all. - let param = - if !default_needs_object_self(param) { Some(param) } else { None }; - self.ty_infer(param, span).into() + let param = if !self.default_needs_object_self(param) { + Some(param) + } else { + None + }; + self.astconv.ty_infer(param, self.span).into() } else { // We've already errored above about the mismatch. tcx.ty_error().into() } } GenericParamDefKind::Const => { - let ty = tcx.at(span).type_of(param.def_id); + let ty = tcx.at(self.span).type_of(param.def_id); // FIXME(const_generics:defaults) if infer_args { // No const parameters were provided, we can infer all. - self.ct_infer(ty, Some(param), span).into() + self.astconv.ct_infer(ty, Some(param), self.span).into() } else { // We've already errored above about the mismatch. tcx.const_error(ty).into() } } } - }, + } + } + + let mut substs_ctx = SubstsForAstPathCtxt { + astconv: self, + def_id, + span, + generic_args, + missing_type_params: vec![], + inferred_params: vec![], + infer_args, + is_object, + }; + let substs = Self::create_substs_for_generic_args( + tcx, + def_id, + parent_substs, + self_ty.is_some(), + self_ty, + arg_count.clone(), + &mut substs_ctx, ); self.complain_about_missing_type_params( - missing_type_params, + substs_ctx.missing_type_params, def_id, span, generic_args.args.is_empty(), diff --git a/compiler/rustc_typeck/src/check/_match.rs b/compiler/rustc_typeck/src/check/_match.rs index e8eea65137f..3a5eeb5381b 100644 --- a/compiler/rustc_typeck/src/check/_match.rs +++ b/compiler/rustc_typeck/src/check/_match.rs @@ -131,7 +131,7 @@ impl<'a, 'tcx> FnCtxt<'a, 'tcx> { id, self.body_id, self.param_env, - &ty, + ty, arm.body.span, ); let mut suggest_box = !impl_trait_ret_ty.obligations.is_empty(); diff --git a/compiler/rustc_typeck/src/check/callee.rs b/compiler/rustc_typeck/src/check/callee.rs index a38fb9642b9..ebfb401fcf3 100644 --- a/compiler/rustc_typeck/src/check/callee.rs +++ b/compiler/rustc_typeck/src/check/callee.rs @@ -133,7 +133,7 @@ impl<'a, 'tcx> FnCtxt<'a, 'tcx> { .replace_bound_vars_with_fresh_vars( call_expr.span, infer::FnCall, - &closure_sig, + closure_sig, ) .0; let adjustments = self.adjust_steps(autoderef); @@ -407,8 +407,8 @@ impl<'a, 'tcx> FnCtxt<'a, 'tcx> { // previously appeared within a `Binder<>` and hence would not // have been normalized before. let fn_sig = - self.replace_bound_vars_with_fresh_vars(call_expr.span, infer::FnCall, &fn_sig).0; - let fn_sig = self.normalize_associated_types_in(call_expr.span, &fn_sig); + self.replace_bound_vars_with_fresh_vars(call_expr.span, infer::FnCall, fn_sig).0; + let fn_sig = self.normalize_associated_types_in(call_expr.span, fn_sig); // Call the generic checker. let expected_arg_tys = self.expected_inputs_for_expected_output( diff --git a/compiler/rustc_typeck/src/check/cast.rs b/compiler/rustc_typeck/src/check/cast.rs index 5c2bdb86f76..36240a9b41e 100644 --- a/compiler/rustc_typeck/src/check/cast.rs +++ b/compiler/rustc_typeck/src/check/cast.rs @@ -87,7 +87,7 @@ impl<'a, 'tcx> FnCtxt<'a, 'tcx> { ) -> Result>, ErrorReported> { debug!("pointer_kind({:?}, {:?})", t, span); - let t = self.resolve_vars_if_possible(&t); + let t = self.resolve_vars_if_possible(t); if t.references_error() { return Err(ErrorReported); @@ -377,12 +377,12 @@ impl<'a, 'tcx> CastCheck<'tcx> { // Check `impl From for self.cast_ty {}` for accurate suggestion: if let Ok(snippet) = fcx.tcx.sess.source_map().span_to_snippet(self.expr.span) { if let Some(from_trait) = fcx.tcx.get_diagnostic_item(sym::from_trait) { - let ty = fcx.resolve_vars_if_possible(&self.cast_ty); + let ty = fcx.resolve_vars_if_possible(self.cast_ty); // Erase regions to avoid panic in `prove_value` when calling // `type_implements_trait`. - let ty = fcx.tcx.erase_regions(&ty); - let expr_ty = fcx.resolve_vars_if_possible(&self.expr_ty); - let expr_ty = fcx.tcx.erase_regions(&expr_ty); + let ty = fcx.tcx.erase_regions(ty); + let expr_ty = fcx.resolve_vars_if_possible(self.expr_ty); + let expr_ty = fcx.tcx.erase_regions(expr_ty); let ty_params = fcx.tcx.mk_substs_trait(expr_ty, &[]); // Check for infer types because cases like `Option<{integer}>` would // panic otherwise. @@ -471,7 +471,7 @@ impl<'a, 'tcx> CastCheck<'tcx> { self.expr_ty, E0620, "cast to unsized type: `{}` as `{}`", - fcx.resolve_vars_if_possible(&self.expr_ty), + fcx.resolve_vars_if_possible(self.expr_ty), tstr ); match self.expr_ty.kind() { @@ -607,7 +607,7 @@ impl<'a, 'tcx> CastCheck<'tcx> { // Attempt a coercion to a fn pointer type. let f = fcx.normalize_associated_types_in( self.expr.span, - &self.expr_ty.fn_sig(fcx.tcx), + self.expr_ty.fn_sig(fcx.tcx), ); let res = fcx.try_coerce( self.expr, diff --git a/compiler/rustc_typeck/src/check/check.rs b/compiler/rustc_typeck/src/check/check.rs index 70d94ef869d..d5518dfc15a 100644 --- a/compiler/rustc_typeck/src/check/check.rs +++ b/compiler/rustc_typeck/src/check/check.rs @@ -75,7 +75,7 @@ pub(super) fn check_fn<'a, 'tcx>( let declared_ret_ty = fn_sig.output(); let revealed_ret_ty = - fcx.instantiate_opaque_types_from_value(fn_id, &declared_ret_ty, decl.output.span()); + fcx.instantiate_opaque_types_from_value(fn_id, declared_ret_ty, decl.output.span()); debug!("check_fn: declared_ret_ty: {}, revealed_ret_ty: {}", declared_ret_ty, revealed_ret_ty); fcx.ret_coercion = Some(RefCell::new(CoerceMany::new(revealed_ret_ty))); fcx.ret_type_span = Some(decl.output.span()); @@ -94,6 +94,37 @@ pub(super) fn check_fn<'a, 'tcx>( fn_maybe_err(tcx, span, fn_sig.abi); + if fn_sig.abi == Abi::RustCall { + let expected_args = if let ImplicitSelfKind::None = decl.implicit_self { 1 } else { 2 }; + + let err = || { + let item = match tcx.hir().get(fn_id) { + Node::Item(hir::Item { kind: ItemKind::Fn(header, ..), .. }) => Some(header), + Node::ImplItem(hir::ImplItem { + kind: hir::ImplItemKind::Fn(header, ..), .. + }) => Some(header), + // Closures are RustCall, but they tuple their arguments, so shouldn't be checked + Node::Expr(hir::Expr { kind: hir::ExprKind::Closure(..), .. }) => None, + node => bug!("Item being checked wasn't a function/closure: {:?}", node), + }; + + if let Some(header) = item { + tcx.sess.span_err(header.span, "A function with the \"rust-call\" ABI must take a single non-self argument that is a tuple") + } + }; + + if fn_sig.inputs().len() != expected_args { + err() + } else { + // FIXME(CraftSpider) Add a check on parameter expansion, so we don't just make the ICE happen later on + // This will probably require wide-scale changes to support a TupleKind obligation + // We can't resolve this without knowing the type of the param + if !matches!(fn_sig.inputs()[expected_args - 1].kind(), ty::Tuple(_) | ty::Param(_)) { + err() + } + } + } + if body.generator_kind.is_some() && can_be_generator.is_some() { let yield_ty = fcx .next_ty_var(TypeVariableOrigin { kind: TypeVariableOriginKind::TypeInference, span }); @@ -446,24 +477,24 @@ pub(super) fn check_opaque_for_inheriting_lifetimes( struct ProhibitOpaqueVisitor<'tcx> { opaque_identity_ty: Ty<'tcx>, generics: &'tcx ty::Generics, - ty: Option>, }; impl<'tcx> ty::fold::TypeVisitor<'tcx> for ProhibitOpaqueVisitor<'tcx> { - fn visit_ty(&mut self, t: Ty<'tcx>) -> ControlFlow<()> { + type BreakTy = Option>; + + fn visit_ty(&mut self, t: Ty<'tcx>) -> ControlFlow { debug!("check_opaque_for_inheriting_lifetimes: (visit_ty) t={:?}", t); if t != self.opaque_identity_ty && t.super_visit_with(self).is_break() { - self.ty = Some(t); - return ControlFlow::BREAK; + return ControlFlow::Break(Some(t)); } ControlFlow::CONTINUE } - fn visit_region(&mut self, r: ty::Region<'tcx>) -> ControlFlow<()> { + fn visit_region(&mut self, r: ty::Region<'tcx>) -> ControlFlow { debug!("check_opaque_for_inheriting_lifetimes: (visit_region) r={:?}", r); if let RegionKind::ReEarlyBound(ty::EarlyBoundRegion { index, .. }) = r { if *index < self.generics.parent_count as u32 { - return ControlFlow::BREAK; + return ControlFlow::Break(None); } else { return ControlFlow::CONTINUE; } @@ -472,7 +503,7 @@ pub(super) fn check_opaque_for_inheriting_lifetimes( r.super_visit_with(self) } - fn visit_const(&mut self, c: &'tcx ty::Const<'tcx>) -> ControlFlow<()> { + fn visit_const(&mut self, c: &'tcx ty::Const<'tcx>) -> ControlFlow { if let ty::ConstKind::Unevaluated(..) = c.val { // FIXME(#72219) We currenctly don't detect lifetimes within substs // which would violate this check. Even though the particular substitution is not used @@ -494,18 +525,17 @@ pub(super) fn check_opaque_for_inheriting_lifetimes( InternalSubsts::identity_for_item(tcx, def_id.to_def_id()), ), generics: tcx.generics_of(def_id), - ty: None, }; let prohibit_opaque = tcx .explicit_item_bounds(def_id) .iter() - .any(|(predicate, _)| predicate.visit_with(&mut visitor).is_break()); + .try_for_each(|(predicate, _)| predicate.visit_with(&mut visitor)); debug!( "check_opaque_for_inheriting_lifetimes: prohibit_opaque={:?}, visitor={:?}", prohibit_opaque, visitor ); - if prohibit_opaque { + if let Some(ty) = prohibit_opaque.break_value() { let is_async = match item.kind { ItemKind::OpaqueTy(hir::OpaqueTy { origin, .. }) => match origin { hir::OpaqueTyOrigin::AsyncFn => true, @@ -525,7 +555,7 @@ pub(super) fn check_opaque_for_inheriting_lifetimes( if let Ok(snippet) = tcx.sess.source_map().span_to_snippet(span) { if snippet == "Self" { - if let Some(ty) = visitor.ty { + if let Some(ty) = ty { err.span_suggestion( span, "consider spelling out the type instead", @@ -601,7 +631,7 @@ fn check_opaque_meets_bounds<'tcx>( let misc_cause = traits::ObligationCause::misc(span, hir_id); let (_, opaque_type_map) = inh.register_infer_ok_obligations( - infcx.instantiate_opaque_types(def_id, hir_id, param_env, &opaque_ty, span), + infcx.instantiate_opaque_types(def_id, hir_id, param_env, opaque_ty, span), ); for (def_id, opaque_defn) in opaque_type_map { @@ -1455,7 +1485,7 @@ fn opaque_type_cycle_error(tcx: TyCtxt<'tcx>, def_id: LocalDefId, span: Span) { { struct VisitTypes(Vec); impl<'tcx> ty::fold::TypeVisitor<'tcx> for VisitTypes { - fn visit_ty(&mut self, t: Ty<'tcx>) -> ControlFlow<()> { + fn visit_ty(&mut self, t: Ty<'tcx>) -> ControlFlow { match *t.kind() { ty::Opaque(def, _) => { self.0.push(def); diff --git a/compiler/rustc_typeck/src/check/closure.rs b/compiler/rustc_typeck/src/check/closure.rs index 2ba05071c05..8082a230216 100644 --- a/compiler/rustc_typeck/src/check/closure.rs +++ b/compiler/rustc_typeck/src/check/closure.rs @@ -257,7 +257,7 @@ impl<'a, 'tcx> FnCtxt<'a, 'tcx> { let input_tys = if is_fn { let arg_param_ty = trait_ref.skip_binder().substs.type_at(1); - let arg_param_ty = self.resolve_vars_if_possible(&arg_param_ty); + let arg_param_ty = self.resolve_vars_if_possible(arg_param_ty); debug!("deduce_sig_from_projection: arg_param_ty={:?}", arg_param_ty); match arg_param_ty.kind() { @@ -271,7 +271,7 @@ impl<'a, 'tcx> FnCtxt<'a, 'tcx> { }; let ret_param_ty = projection.skip_binder().ty; - let ret_param_ty = self.resolve_vars_if_possible(&ret_param_ty); + let ret_param_ty = self.resolve_vars_if_possible(ret_param_ty); debug!("deduce_sig_from_projection: ret_param_ty={:?}", ret_param_ty); let sig = self.tcx.mk_fn_sig( @@ -400,7 +400,7 @@ impl<'a, 'tcx> FnCtxt<'a, 'tcx> { // `deduce_expectations_from_expected_type` introduces // late-bound lifetimes defined elsewhere, which we now // anonymize away, so as not to confuse the user. - let bound_sig = self.tcx.anonymize_late_bound_regions(&bound_sig); + let bound_sig = self.tcx.anonymize_late_bound_regions(bound_sig); let closure_sigs = self.closure_sigs(expr_def_id, body, bound_sig); @@ -500,7 +500,7 @@ impl<'a, 'tcx> FnCtxt<'a, 'tcx> { let (supplied_ty, _) = self.infcx.replace_bound_vars_with_fresh_vars( hir_ty.span, LateBoundRegionConversionTime::FnCall, - &ty::Binder::bind(supplied_ty), + ty::Binder::bind(supplied_ty), ); // recreated from (*) above // Check that E' = S'. @@ -513,7 +513,7 @@ impl<'a, 'tcx> FnCtxt<'a, 'tcx> { let (supplied_output_ty, _) = self.infcx.replace_bound_vars_with_fresh_vars( decl.output.span(), LateBoundRegionConversionTime::FnCall, - &supplied_sig.output(), + supplied_sig.output(), ); let cause = &self.misc(decl.output.span()); let InferOk { value: (), obligations } = self @@ -578,7 +578,7 @@ impl<'a, 'tcx> FnCtxt<'a, 'tcx> { debug!("supplied_sig_of_closure: result={:?}", result); - let c_result = self.inh.infcx.canonicalize_response(&result); + let c_result = self.inh.infcx.canonicalize_response(result); self.typeck_results.borrow_mut().user_provided_sigs.insert(expr_def_id, c_result); result @@ -683,7 +683,7 @@ impl<'a, 'tcx> FnCtxt<'a, 'tcx> { // Extract the type from the projection. Note that there can // be no bound variables in this type because the "self type" // does not have any regions in it. - let output_ty = self.resolve_vars_if_possible(&predicate.ty); + let output_ty = self.resolve_vars_if_possible(predicate.ty); debug!("deduce_future_output_from_projection: output_ty={:?}", output_ty); Some(output_ty) } @@ -723,12 +723,12 @@ impl<'a, 'tcx> FnCtxt<'a, 'tcx> { body: &hir::Body<'_>, bound_sig: ty::PolyFnSig<'tcx>, ) -> ClosureSignatures<'tcx> { - let liberated_sig = self.tcx().liberate_late_bound_regions(expr_def_id, &bound_sig); + let liberated_sig = self.tcx().liberate_late_bound_regions(expr_def_id, bound_sig); let liberated_sig = self.inh.normalize_associated_types_in( body.value.span, body.value.hir_id, self.param_env, - &liberated_sig, + liberated_sig, ); ClosureSignatures { bound_sig, liberated_sig } } diff --git a/compiler/rustc_typeck/src/check/coercion.rs b/compiler/rustc_typeck/src/check/coercion.rs index 6da3ecde329..0f5f0ab0260 100644 --- a/compiler/rustc_typeck/src/check/coercion.rs +++ b/compiler/rustc_typeck/src/check/coercion.rs @@ -606,7 +606,7 @@ impl<'f, 'tcx> Coerce<'f, 'tcx> { // Uncertain or unimplemented. Ok(None) => { if trait_pred.def_id() == unsize_did { - let trait_pred = self.resolve_vars_if_possible(&trait_pred); + let trait_pred = self.resolve_vars_if_possible(trait_pred); let self_ty = trait_pred.skip_binder().self_ty(); let unsize_ty = trait_pred.skip_binder().trait_ref.substs[1].expect_ty(); debug!("coerce_unsized: ambiguous unsize case for {:?}", trait_pred); @@ -732,7 +732,7 @@ impl<'f, 'tcx> Coerce<'f, 'tcx> { } let InferOk { value: a_sig, mut obligations } = - self.normalize_associated_types_in_as_infer_ok(self.cause.span, &a_sig); + self.normalize_associated_types_in_as_infer_ok(self.cause.span, a_sig); let a_fn_pointer = self.tcx.mk_fn_ptr(a_sig); let InferOk { value, obligations: o2 } = self.coerce_from_safe_fn( @@ -973,8 +973,8 @@ impl<'a, 'tcx> FnCtxt<'a, 'tcx> { }; if let (Some(a_sig), Some(b_sig)) = (a_sig, b_sig) { // The signature must match. - let a_sig = self.normalize_associated_types_in(new.span, &a_sig); - let b_sig = self.normalize_associated_types_in(new.span, &b_sig); + let a_sig = self.normalize_associated_types_in(new.span, a_sig); + let b_sig = self.normalize_associated_types_in(new.span, b_sig); let sig = self .at(cause, self.param_env) .trace(prev_ty, new_ty) @@ -1490,7 +1490,7 @@ impl<'tcx, 'exprs, E: AsCoercionSite> CoerceMany<'tcx, 'exprs, E> { *sp, &format!( "return type inferred to be `{}` here", - fcx.resolve_vars_if_possible(&expected) + fcx.resolve_vars_if_possible(expected) ), ); } diff --git a/compiler/rustc_typeck/src/check/compare_method.rs b/compiler/rustc_typeck/src/check/compare_method.rs index 4acc7451a21..20090d37606 100644 --- a/compiler/rustc_typeck/src/check/compare_method.rs +++ b/compiler/rustc_typeck/src/check/compare_method.rs @@ -224,11 +224,11 @@ fn compare_predicate_entailment<'tcx>( let (impl_m_own_bounds, _) = infcx.replace_bound_vars_with_fresh_vars( impl_m_span, infer::HigherRankedType, - &ty::Binder::bind(impl_m_own_bounds.predicates), + ty::Binder::bind(impl_m_own_bounds.predicates), ); for predicate in impl_m_own_bounds { let traits::Normalized { value: predicate, obligations } = - traits::normalize(&mut selcx, param_env, normalize_cause.clone(), &predicate); + traits::normalize(&mut selcx, param_env, normalize_cause.clone(), predicate); inh.register_predicates(obligations); inh.register_predicate(traits::Obligation::new(cause.clone(), param_env, predicate)); @@ -253,17 +253,17 @@ fn compare_predicate_entailment<'tcx>( let (impl_sig, _) = infcx.replace_bound_vars_with_fresh_vars( impl_m_span, infer::HigherRankedType, - &tcx.fn_sig(impl_m.def_id), + tcx.fn_sig(impl_m.def_id), ); let impl_sig = - inh.normalize_associated_types_in(impl_m_span, impl_m_hir_id, param_env, &impl_sig); + inh.normalize_associated_types_in(impl_m_span, impl_m_hir_id, param_env, impl_sig); let impl_fty = tcx.mk_fn_ptr(ty::Binder::bind(impl_sig)); debug!("compare_impl_method: impl_fty={:?}", impl_fty); - let trait_sig = tcx.liberate_late_bound_regions(impl_m.def_id, &tcx.fn_sig(trait_m.def_id)); + let trait_sig = tcx.liberate_late_bound_regions(impl_m.def_id, tcx.fn_sig(trait_m.def_id)); let trait_sig = trait_sig.subst(tcx, trait_to_placeholder_substs); let trait_sig = - inh.normalize_associated_types_in(impl_m_span, impl_m_hir_id, param_env, &trait_sig); + inh.normalize_associated_types_in(impl_m_span, impl_m_hir_id, param_env, trait_sig); let trait_fty = tcx.mk_fn_ptr(ty::Binder::bind(trait_sig)); debug!("compare_impl_method: trait_fty={:?}", trait_fty); @@ -499,7 +499,7 @@ fn compare_self_type<'tcx>( tcx.infer_ctxt().enter(|infcx| { let self_arg_ty = - tcx.liberate_late_bound_regions(method.def_id, &ty::Binder::bind(self_arg_ty)); + tcx.liberate_late_bound_regions(method.def_id, ty::Binder::bind(self_arg_ty)); let can_eq_self = |ty| infcx.can_eq(param_env, untransformed_self_ty, ty).is_ok(); match ExplicitSelf::determine(self_arg_ty, can_eq_self) { ExplicitSelf::ByValue => "self".to_owned(), @@ -968,12 +968,12 @@ crate fn compare_const_impl<'tcx>( // There is no "body" here, so just pass dummy id. let impl_ty = - inh.normalize_associated_types_in(impl_c_span, impl_c_hir_id, param_env, &impl_ty); + inh.normalize_associated_types_in(impl_c_span, impl_c_hir_id, param_env, impl_ty); debug!("compare_const_impl: impl_ty={:?}", impl_ty); let trait_ty = - inh.normalize_associated_types_in(impl_c_span, impl_c_hir_id, param_env, &trait_ty); + inh.normalize_associated_types_in(impl_c_span, impl_c_hir_id, param_env, trait_ty); debug!("compare_const_impl: trait_ty={:?}", trait_ty); @@ -1136,7 +1136,7 @@ fn compare_type_predicate_entailment<'tcx>( for predicate in impl_ty_own_bounds.predicates { let traits::Normalized { value: predicate, obligations } = - traits::normalize(&mut selcx, param_env, normalize_cause.clone(), &predicate); + traits::normalize(&mut selcx, param_env, normalize_cause.clone(), predicate); inh.register_predicates(obligations); inh.register_predicate(traits::Obligation::new(cause.clone(), param_env, predicate)); @@ -1261,7 +1261,7 @@ pub fn check_type_bounds<'tcx>( &mut selcx, normalize_param_env, normalize_cause.clone(), - &obligation.predicate, + obligation.predicate, ); debug!("compare_projection_bounds: normalized predicate = {:?}", normalized_predicate); obligation.predicate = normalized_predicate; diff --git a/compiler/rustc_typeck/src/check/dropck.rs b/compiler/rustc_typeck/src/check/dropck.rs index 5650b2cdd3c..ad675f1e383 100644 --- a/compiler/rustc_typeck/src/check/dropck.rs +++ b/compiler/rustc_typeck/src/check/dropck.rs @@ -366,8 +366,8 @@ impl TypeRelation<'tcx> for SimpleEqRelation<'tcx> { // Anonymizing the LBRs is necessary to solve (Issue #59497). // After we do so, it should be totally fine to skip the binders. - let anon_a = self.tcx.anonymize_late_bound_regions(&a); - let anon_b = self.tcx.anonymize_late_bound_regions(&b); + let anon_a = self.tcx.anonymize_late_bound_regions(a); + let anon_b = self.tcx.anonymize_late_bound_regions(b); self.relate(anon_a.skip_binder(), anon_b.skip_binder())?; Ok(a) diff --git a/compiler/rustc_typeck/src/check/expectation.rs b/compiler/rustc_typeck/src/check/expectation.rs index fd6fe1406c8..5a5fc893d65 100644 --- a/compiler/rustc_typeck/src/check/expectation.rs +++ b/compiler/rustc_typeck/src/check/expectation.rs @@ -83,9 +83,9 @@ impl<'a, 'tcx> Expectation<'tcx> { fn resolve(self, fcx: &FnCtxt<'a, 'tcx>) -> Expectation<'tcx> { match self { NoExpectation => NoExpectation, - ExpectCastableToType(t) => ExpectCastableToType(fcx.resolve_vars_if_possible(&t)), - ExpectHasType(t) => ExpectHasType(fcx.resolve_vars_if_possible(&t)), - ExpectRvalueLikeUnsized(t) => ExpectRvalueLikeUnsized(fcx.resolve_vars_if_possible(&t)), + ExpectCastableToType(t) => ExpectCastableToType(fcx.resolve_vars_if_possible(t)), + ExpectHasType(t) => ExpectHasType(fcx.resolve_vars_if_possible(t)), + ExpectRvalueLikeUnsized(t) => ExpectRvalueLikeUnsized(fcx.resolve_vars_if_possible(t)), } } diff --git a/compiler/rustc_typeck/src/check/expr.rs b/compiler/rustc_typeck/src/check/expr.rs index af19ad08c1d..f7f9e607a74 100644 --- a/compiler/rustc_typeck/src/check/expr.rs +++ b/compiler/rustc_typeck/src/check/expr.rs @@ -494,7 +494,7 @@ impl<'a, 'tcx> FnCtxt<'a, 'tcx> { .replace_bound_vars_with_fresh_vars( expr.span, infer::LateBoundRegionConversionTime::FnCall, - &fn_sig.input(i), + fn_sig.input(i), ) .0; self.require_type_is_sized_deferred( @@ -514,7 +514,7 @@ impl<'a, 'tcx> FnCtxt<'a, 'tcx> { .replace_bound_vars_with_fresh_vars( expr.span, infer::LateBoundRegionConversionTime::FnCall, - &fn_sig.output(), + fn_sig.output(), ) .0; self.require_type_is_sized_deferred(output, expr.span, traits::SizedReturnType); @@ -963,9 +963,9 @@ impl<'a, 'tcx> FnCtxt<'a, 'tcx> { // Find the type of `e`. Supply hints based on the type we are casting to, // if appropriate. let t_cast = self.to_ty_saving_user_provided_ty(t); - let t_cast = self.resolve_vars_if_possible(&t_cast); + let t_cast = self.resolve_vars_if_possible(t_cast); let t_expr = self.check_expr_with_expectation(e, ExpectCastableToType(t_cast)); - let t_cast = self.resolve_vars_if_possible(&t_cast); + let t_cast = self.resolve_vars_if_possible(t_cast); // Eagerly check for some obvious errors. if t_expr.references_error() || t_cast.references_error() { @@ -1139,7 +1139,7 @@ impl<'a, 'tcx> FnCtxt<'a, 'tcx> { .map(|f| { self.normalize_associated_types_in( expr.span, - &f.ty(self.tcx, substs), + f.ty(self.tcx, substs), ) }) .collect(); @@ -1571,7 +1571,7 @@ impl<'a, 'tcx> FnCtxt<'a, 'tcx> { ty: Ty<'tcx>, ) { let output_ty = match self.infcx.get_impl_future_output_ty(ty) { - Some(output_ty) => self.resolve_vars_if_possible(&output_ty), + Some(output_ty) => self.resolve_vars_if_possible(output_ty), _ => return, }; let mut add_label = true; diff --git a/compiler/rustc_typeck/src/check/fn_ctxt/_impl.rs b/compiler/rustc_typeck/src/check/fn_ctxt/_impl.rs index f87e6b607d4..e1a2f593b8d 100644 --- a/compiler/rustc_typeck/src/check/fn_ctxt/_impl.rs +++ b/compiler/rustc_typeck/src/check/fn_ctxt/_impl.rs @@ -1,5 +1,6 @@ use crate::astconv::{ - AstConv, ExplicitLateBound, GenericArgCountMismatch, GenericArgCountResult, PathSeg, + AstConv, CreateSubstsForGenericArgsCtxt, ExplicitLateBound, GenericArgCountMismatch, + GenericArgCountResult, PathSeg, }; use crate::check::callee::{self, DeferredCallResolution}; use crate::check::method::{self, MethodCallee, SelfSource}; @@ -87,7 +88,7 @@ impl<'a, 'tcx> FnCtxt<'a, 'tcx> { } // If `ty` is a type variable, see whether we already know what it is. - ty = self.resolve_vars_if_possible(&ty); + ty = self.resolve_vars_if_possible(ty); if !ty.has_infer_types_or_consts() { debug!("resolve_vars_with_obligations: ty={:?}", ty); return ty; @@ -98,7 +99,7 @@ impl<'a, 'tcx> FnCtxt<'a, 'tcx> { // indirect dependencies that don't seem worth tracking // precisely. self.select_obligations_where_possible(false, |_| {}); - ty = self.resolve_vars_if_possible(&ty); + ty = self.resolve_vars_if_possible(ty); debug!("resolve_vars_with_obligations: ty={:?}", ty); ty @@ -118,7 +119,7 @@ impl<'a, 'tcx> FnCtxt<'a, 'tcx> { closure_def_id: DefId, ) -> Vec> { let mut deferred_call_resolutions = self.deferred_call_resolutions.borrow_mut(); - deferred_call_resolutions.remove(&closure_def_id).unwrap_or(vec![]) + deferred_call_resolutions.remove(&closure_def_id).unwrap_or_default() } pub fn tag(&self) -> String { @@ -133,12 +134,7 @@ impl<'a, 'tcx> FnCtxt<'a, 'tcx> { #[inline] pub fn write_ty(&self, id: hir::HirId, ty: Ty<'tcx>) { - debug!( - "write_ty({:?}, {:?}) in fcx {}", - id, - self.resolve_vars_if_possible(&ty), - self.tag() - ); + debug!("write_ty({:?}, {:?}) in fcx {}", id, self.resolve_vars_if_possible(ty), self.tag()); self.typeck_results.borrow_mut().node_types_mut().insert(id, ty); if ty.references_error() { @@ -194,7 +190,7 @@ impl<'a, 'tcx> FnCtxt<'a, 'tcx> { user_self_ty: None, // not relevant here }; - self.infcx.canonicalize_user_type_annotation(&UserType::TypeOf( + self.infcx.canonicalize_user_type_annotation(UserType::TypeOf( method.def_id, user_substs, )) @@ -239,7 +235,7 @@ impl<'a, 'tcx> FnCtxt<'a, 'tcx> { ); if Self::can_contain_user_lifetime_bounds((substs, user_self_ty)) { - let canonicalized = self.infcx.canonicalize_user_type_annotation(&UserType::TypeOf( + let canonicalized = self.infcx.canonicalize_user_type_annotation(UserType::TypeOf( def_id, UserSubsts { substs, user_self_ty }, )); @@ -325,13 +321,14 @@ impl<'a, 'tcx> FnCtxt<'a, 'tcx> { /// Basically whenever we are converting from a type scheme into /// the fn body space, we always want to normalize associated /// types as well. This function combines the two. - fn instantiate_type_scheme(&self, span: Span, substs: SubstsRef<'tcx>, value: &T) -> T + fn instantiate_type_scheme(&self, span: Span, substs: SubstsRef<'tcx>, value: T) -> T where T: TypeFoldable<'tcx>, { + debug!("instantiate_type_scheme(value={:?}, substs={:?})", value, substs); let value = value.subst(self.tcx, substs); - let result = self.normalize_associated_types_in(span, &value); - debug!("instantiate_type_scheme(value={:?}, substs={:?}) = {:?}", value, substs, result); + let result = self.normalize_associated_types_in(span, value); + debug!("instantiate_type_scheme = {:?}", result); result } @@ -346,7 +343,7 @@ impl<'a, 'tcx> FnCtxt<'a, 'tcx> { let bounds = self.tcx.predicates_of(def_id); let spans: Vec = bounds.predicates.iter().map(|(_, span)| *span).collect(); let result = bounds.instantiate(self.tcx, substs); - let result = self.normalize_associated_types_in(span, &result); + let result = self.normalize_associated_types_in(span, result); debug!( "instantiate_bounds(bounds={:?}, substs={:?}) = {:?}, {:?}", bounds, substs, result, spans, @@ -360,7 +357,7 @@ impl<'a, 'tcx> FnCtxt<'a, 'tcx> { pub(in super::super) fn instantiate_opaque_types_from_value>( &self, parent_id: hir::HirId, - value: &T, + value: T, value_span: Span, ) -> T { let parent_def_id = self.tcx.hir().local_def_id(parent_id); @@ -388,7 +385,7 @@ impl<'a, 'tcx> FnCtxt<'a, 'tcx> { value } - pub(in super::super) fn normalize_associated_types_in(&self, span: Span, value: &T) -> T + pub(in super::super) fn normalize_associated_types_in(&self, span: Span, value: T) -> T where T: TypeFoldable<'tcx>, { @@ -398,7 +395,7 @@ impl<'a, 'tcx> FnCtxt<'a, 'tcx> { pub(in super::super) fn normalize_associated_types_in_as_infer_ok( &self, span: Span, - value: &T, + value: T, ) -> InferOk<'tcx, T> where T: TypeFoldable<'tcx>, @@ -467,7 +464,7 @@ impl<'a, 'tcx> FnCtxt<'a, 'tcx> { debug!("to_ty_saving_user_provided_ty: ty={:?}", ty); if Self::can_contain_user_lifetime_bounds(ty) { - let c_ty = self.infcx.canonicalize_response(&UserType::Ty(ty)); + let c_ty = self.infcx.canonicalize_response(UserType::Ty(ty)); debug!("to_ty_saving_user_provided_ty: c_ty={:?}", c_ty); self.typeck_results.borrow_mut().user_provided_types_mut().insert(ast_ty.hir_id, c_ty); } @@ -850,7 +847,7 @@ impl<'a, 'tcx> FnCtxt<'a, 'tcx> { // Record all the argument types, with the substitutions // produced from the above subtyping unification. - Ok(formal_args.iter().map(|ty| self.resolve_vars_if_possible(ty)).collect()) + Ok(formal_args.iter().map(|&ty| self.resolve_vars_if_possible(ty)).collect()) }) .unwrap_or_default(); debug!( @@ -1203,7 +1200,7 @@ impl<'a, 'tcx> FnCtxt<'a, 'tcx> { if let Res::Local(hid) = res { let ty = self.local_ty(span, hid).decl_ty; - let ty = self.normalize_associated_types_in(span, &ty); + let ty = self.normalize_associated_types_in(span, ty); self.write_ty(hir_id, ty); return (ty, res); } @@ -1298,76 +1295,108 @@ impl<'a, 'tcx> FnCtxt<'a, 'tcx> { }, }; - let substs = self_ctor_substs.unwrap_or_else(|| { - AstConv::create_substs_for_generic_args( - tcx, - def_id, - &[][..], - has_self, - self_ty, - arg_count, - // Provide the generic args, and whether types should be inferred. - |def_id| { - if let Some(&PathSeg(_, index)) = - path_segs.iter().find(|&PathSeg(did, _)| *did == def_id) - { - // If we've encountered an `impl Trait`-related error, we're just - // going to infer the arguments for better error messages. - if !infer_args_for_err.contains(&index) { - // Check whether the user has provided generic arguments. - if let Some(ref data) = segments[index].args { - return (Some(data), segments[index].infer_args); - } + struct CreateCtorSubstsContext<'a, 'tcx> { + fcx: &'a FnCtxt<'a, 'tcx>, + span: Span, + path_segs: &'a [PathSeg], + infer_args_for_err: &'a FxHashSet, + segments: &'a [hir::PathSegment<'a>], + } + impl<'tcx, 'a> CreateSubstsForGenericArgsCtxt<'a, 'tcx> for CreateCtorSubstsContext<'a, 'tcx> { + fn args_for_def_id( + &mut self, + def_id: DefId, + ) -> (Option<&'a hir::GenericArgs<'a>>, bool) { + if let Some(&PathSeg(_, index)) = + self.path_segs.iter().find(|&PathSeg(did, _)| *did == def_id) + { + // If we've encountered an `impl Trait`-related error, we're just + // going to infer the arguments for better error messages. + if !self.infer_args_for_err.contains(&index) { + // Check whether the user has provided generic arguments. + if let Some(ref data) = self.segments[index].args { + return (Some(data), self.segments[index].infer_args); } - return (None, segments[index].infer_args); } + return (None, self.segments[index].infer_args); + } - (None, true) - }, - // Provide substitutions for parameters for which (valid) arguments have been provided. - |param, arg| match (¶m.kind, arg) { + (None, true) + } + + fn provided_kind( + &mut self, + param: &ty::GenericParamDef, + arg: &GenericArg<'_>, + ) -> subst::GenericArg<'tcx> { + match (¶m.kind, arg) { (GenericParamDefKind::Lifetime, GenericArg::Lifetime(lt)) => { - AstConv::ast_region_to_region(self, lt, Some(param)).into() + AstConv::ast_region_to_region(self.fcx, lt, Some(param)).into() } (GenericParamDefKind::Type { .. }, GenericArg::Type(ty)) => { - self.to_ty(ty).into() + self.fcx.to_ty(ty).into() } (GenericParamDefKind::Const, GenericArg::Const(ct)) => { - self.const_arg_to_const(&ct.value, param.def_id).into() + self.fcx.const_arg_to_const(&ct.value, param.def_id).into() } _ => unreachable!(), - }, - // Provide substitutions for parameters for which arguments are inferred. - |substs, param, infer_args| { - match param.kind { - GenericParamDefKind::Lifetime => { - self.re_infer(Some(param), span).unwrap().into() - } - GenericParamDefKind::Type { has_default, .. } => { - if !infer_args && has_default { - // If we have a default, then we it doesn't matter that we're not - // inferring the type arguments: we provide the default where any - // is missing. - let default = tcx.type_of(param.def_id); - self.normalize_ty( - span, - default.subst_spanned(tcx, substs.unwrap(), Some(span)), + } + } + + fn inferred_kind( + &mut self, + substs: Option<&[subst::GenericArg<'tcx>]>, + param: &ty::GenericParamDef, + infer_args: bool, + ) -> subst::GenericArg<'tcx> { + let tcx = self.fcx.tcx(); + match param.kind { + GenericParamDefKind::Lifetime => { + self.fcx.re_infer(Some(param), self.span).unwrap().into() + } + GenericParamDefKind::Type { has_default, .. } => { + if !infer_args && has_default { + // If we have a default, then we it doesn't matter that we're not + // inferring the type arguments: we provide the default where any + // is missing. + let default = tcx.type_of(param.def_id); + self.fcx + .normalize_ty( + self.span, + default.subst_spanned(tcx, substs.unwrap(), Some(self.span)), ) .into() - } else { - // If no type arguments were provided, we have to infer them. - // This case also occurs as a result of some malformed input, e.g. - // a lifetime argument being given instead of a type parameter. - // Using inference instead of `Error` gives better error messages. - self.var_for_def(span, param) - } - } - GenericParamDefKind::Const => { - // FIXME(const_generics:defaults) - // No const parameters were provided, we have to infer them. - self.var_for_def(span, param) + } else { + // If no type arguments were provided, we have to infer them. + // This case also occurs as a result of some malformed input, e.g. + // a lifetime argument being given instead of a type parameter. + // Using inference instead of `Error` gives better error messages. + self.fcx.var_for_def(self.span, param) } } + GenericParamDefKind::Const => { + // FIXME(const_generics:defaults) + // No const parameters were provided, we have to infer them. + self.fcx.var_for_def(self.span, param) + } + } + } + } + + let substs = self_ctor_substs.unwrap_or_else(|| { + AstConv::create_substs_for_generic_args( + tcx, + def_id, + &[][..], + has_self, + self_ty, + arg_count, + &mut CreateCtorSubstsContext { + fcx: self, + span, + path_segs: &path_segs, + infer_args_for_err: &infer_args_for_err, + segments, }, ) }); @@ -1381,7 +1410,7 @@ impl<'a, 'tcx> FnCtxt<'a, 'tcx> { // Substitute the values for the type parameters into the type of // the referenced item. - let ty_substituted = self.instantiate_type_scheme(span, &substs, &ty); + let ty_substituted = self.instantiate_type_scheme(span, &substs, ty); if let Some(UserSelfTy { impl_def_id, self_ty }) = user_self_ty { // In the case of `Foo::method` and `>::method`, if `method` @@ -1391,7 +1420,7 @@ impl<'a, 'tcx> FnCtxt<'a, 'tcx> { // This also occurs for an enum variant on a type alias. let ty = tcx.type_of(impl_def_id); - let impl_ty = self.instantiate_type_scheme(span, &substs, &ty); + let impl_ty = self.instantiate_type_scheme(span, &substs, ty); match self.at(&self.misc(span), self.param_env).sup(impl_ty, self_ty) { Ok(ok) => self.register_infer_ok_obligations(ok), Err(_) => { diff --git a/compiler/rustc_typeck/src/check/fn_ctxt/checks.rs b/compiler/rustc_typeck/src/check/fn_ctxt/checks.rs index a820661d843..333bda00dbe 100644 --- a/compiler/rustc_typeck/src/check/fn_ctxt/checks.rs +++ b/compiler/rustc_typeck/src/check/fn_ctxt/checks.rs @@ -261,9 +261,9 @@ impl<'a, 'tcx> FnCtxt<'a, 'tcx> { } else { // is the missing argument of type `()`? let sugg_unit = if expected_arg_tys.len() == 1 && supplied_arg_count == 0 { - self.resolve_vars_if_possible(&expected_arg_tys[0]).is_unit() + self.resolve_vars_if_possible(expected_arg_tys[0]).is_unit() } else if fn_inputs.len() == 1 && supplied_arg_count == 0 { - self.resolve_vars_if_possible(&fn_inputs[0]).is_unit() + self.resolve_vars_if_possible(fn_inputs[0]).is_unit() } else { false }; @@ -384,7 +384,7 @@ impl<'a, 'tcx> FnCtxt<'a, 'tcx> { } ty::FnDef(..) => { let ptr_ty = self.tcx.mk_fn_ptr(arg_ty.fn_sig(self.tcx)); - let ptr_ty = self.resolve_vars_if_possible(&ptr_ty); + let ptr_ty = self.resolve_vars_if_possible(ptr_ty); variadic_error(tcx.sess, arg.span, arg_ty, &ptr_ty.to_string()); } _ => {} @@ -927,7 +927,7 @@ impl<'a, 'tcx> FnCtxt<'a, 'tcx> { .map(|&(i, checked_ty, _)| (i, checked_ty)) .chain(final_arg_types.iter().map(|&(i, _, coerced_ty)| (i, coerced_ty))) .flat_map(|(i, ty)| { - let ty = self.resolve_vars_if_possible(&ty); + let ty = self.resolve_vars_if_possible(ty); // We walk the argument type because the argument's type could have // been `Option`, but the `FulfillmentError` references `T`. if ty.walk().any(|arg| arg == predicate.self_ty().into()) { @@ -989,7 +989,7 @@ impl<'a, 'tcx> FnCtxt<'a, 'tcx> { // from `typeck-default-trait-impl-assoc-type.rs`. } else { let ty = AstConv::ast_ty_to_ty(self, hir_ty); - let ty = self.resolve_vars_if_possible(&ty); + let ty = self.resolve_vars_if_possible(ty); if ty == predicate.self_ty() { error.obligation.cause.make_mut().span = hir_ty.span; } diff --git a/compiler/rustc_typeck/src/check/fn_ctxt/mod.rs b/compiler/rustc_typeck/src/check/fn_ctxt/mod.rs index 72c3b233ed9..f635e0b6f93 100644 --- a/compiler/rustc_typeck/src/check/fn_ctxt/mod.rs +++ b/compiler/rustc_typeck/src/check/fn_ctxt/mod.rs @@ -262,7 +262,7 @@ impl<'a, 'tcx> AstConv<'tcx> for FnCtxt<'a, 'tcx> { let (trait_ref, _) = self.replace_bound_vars_with_fresh_vars( span, infer::LateBoundRegionConversionTime::AssocTypeProjection(item_def_id), - &poly_trait_ref, + poly_trait_ref, ); let item_substs = >::create_substs_for_associated_item( diff --git a/compiler/rustc_typeck/src/check/fn_ctxt/suggestions.rs b/compiler/rustc_typeck/src/check/fn_ctxt/suggestions.rs index a8ad9f4fdf8..17dbf989d66 100644 --- a/compiler/rustc_typeck/src/check/fn_ctxt/suggestions.rs +++ b/compiler/rustc_typeck/src/check/fn_ctxt/suggestions.rs @@ -73,8 +73,8 @@ impl<'a, 'tcx> FnCtxt<'a, 'tcx> { _ => return false, }; - let sig = self.replace_bound_vars_with_fresh_vars(expr.span, infer::FnCall, &sig).0; - let sig = self.normalize_associated_types_in(expr.span, &sig); + let sig = self.replace_bound_vars_with_fresh_vars(expr.span, infer::FnCall, sig).0; + let sig = self.normalize_associated_types_in(expr.span, sig); if self.can_coerce(sig.output(), expected) { let (mut sugg_call, applicability) = if sig.inputs().is_empty() { (String::new(), Applicability::MachineApplicable) diff --git a/compiler/rustc_typeck/src/check/gather_locals.rs b/compiler/rustc_typeck/src/check/gather_locals.rs index af552389de0..825ebc19fa6 100644 --- a/compiler/rustc_typeck/src/check/gather_locals.rs +++ b/compiler/rustc_typeck/src/check/gather_locals.rs @@ -59,16 +59,13 @@ impl<'a, 'tcx> Visitor<'tcx> for GatherLocalsVisitor<'a, 'tcx> { let o_ty = self.fcx.to_ty(&ty); let revealed_ty = if self.fcx.tcx.features().impl_trait_in_bindings { - self.fcx.instantiate_opaque_types_from_value(self.parent_id, &o_ty, ty.span) + self.fcx.instantiate_opaque_types_from_value(self.parent_id, o_ty, ty.span) } else { o_ty }; - let c_ty = self - .fcx - .inh - .infcx - .canonicalize_user_type_annotation(&UserType::Ty(revealed_ty)); + let c_ty = + self.fcx.inh.infcx.canonicalize_user_type_annotation(UserType::Ty(revealed_ty)); debug!( "visit_local: ty.hir_id={:?} o_ty={:?} revealed_ty={:?} c_ty={:?}", ty.hir_id, o_ty, revealed_ty, c_ty diff --git a/compiler/rustc_typeck/src/check/generator_interior.rs b/compiler/rustc_typeck/src/check/generator_interior.rs index 293a995887c..602b79802b3 100644 --- a/compiler/rustc_typeck/src/check/generator_interior.rs +++ b/compiler/rustc_typeck/src/check/generator_interior.rs @@ -80,7 +80,7 @@ impl<'a, 'tcx> InteriorVisitor<'a, 'tcx> { }); if let Some(yield_data) = live_across_yield { - let ty = self.fcx.resolve_vars_if_possible(&ty); + let ty = self.fcx.resolve_vars_if_possible(ty); debug!( "type in expr = {:?}, scope = {:?}, type = {:?}, count = {}, yield_span = {:?}", expr, scope, ty, self.expr_count, yield_data.span @@ -120,7 +120,7 @@ impl<'a, 'tcx> InteriorVisitor<'a, 'tcx> { self.expr_count, expr.map(|e| e.span) ); - let ty = self.fcx.resolve_vars_if_possible(&ty); + let ty = self.fcx.resolve_vars_if_possible(ty); if let Some((unresolved_type, unresolved_type_span)) = self.fcx.unresolved_type_vars(&ty) { @@ -179,13 +179,13 @@ pub fn resolve_interior<'a, 'tcx>( .filter_map(|mut cause| { // Erase regions and canonicalize late-bound regions to deduplicate as many types as we // can. - let erased = fcx.tcx.erase_regions(&cause.ty); + let erased = fcx.tcx.erase_regions(cause.ty); if captured_tys.insert(erased) { // Replace all regions inside the generator interior with late bound regions. // Note that each region slot in the types gets a new fresh late bound region, // which means that none of the regions inside relate to any other, even if // typeck had previously found constraints that would cause them to be related. - let folded = fcx.tcx.fold_regions(&erased, &mut false, |_, current_depth| { + let folded = fcx.tcx.fold_regions(erased, &mut false, |_, current_depth| { let r = fcx.tcx.mk_region(ty::ReLateBound(current_depth, ty::BrAnon(counter))); counter += 1; r diff --git a/compiler/rustc_typeck/src/check/inherited.rs b/compiler/rustc_typeck/src/check/inherited.rs index 7e580485c3d..0011a3fc71b 100644 --- a/compiler/rustc_typeck/src/check/inherited.rs +++ b/compiler/rustc_typeck/src/check/inherited.rs @@ -156,7 +156,7 @@ impl Inherited<'a, 'tcx> { span: Span, body_id: hir::HirId, param_env: ty::ParamEnv<'tcx>, - value: &T, + value: T, ) -> T where T: TypeFoldable<'tcx>, diff --git a/compiler/rustc_typeck/src/check/method/confirm.rs b/compiler/rustc_typeck/src/check/method/confirm.rs index fd2700b85e2..8ef723d5902 100644 --- a/compiler/rustc_typeck/src/check/method/confirm.rs +++ b/compiler/rustc_typeck/src/check/method/confirm.rs @@ -1,6 +1,6 @@ use super::{probe, MethodCallee}; -use crate::astconv::AstConv; +use crate::astconv::{AstConv, CreateSubstsForGenericArgsCtxt}; use crate::check::{callee, FnCtxt}; use crate::hir::def_id::DefId; use crate::hir::GenericArg; @@ -10,7 +10,7 @@ use rustc_middle::traits::{ObligationCauseCode, UnifyReceiverContext}; use rustc_middle::ty::adjustment::{Adjust, Adjustment, PointerCast}; use rustc_middle::ty::adjustment::{AllowTwoPhase, AutoBorrow, AutoBorrowMutability}; use rustc_middle::ty::fold::TypeFoldable; -use rustc_middle::ty::subst::{Subst, SubstsRef}; +use rustc_middle::ty::subst::{self, Subst, SubstsRef}; use rustc_middle::ty::{self, GenericParamDefKind, Ty}; use rustc_span::Span; use rustc_trait_selection::traits; @@ -90,8 +90,7 @@ impl<'a, 'tcx> ConfirmContext<'a, 'tcx> { // traits, no trait system method can be called before this point because they // could alter our Self-type, except for normalizing the receiver from the // signature (which is also done during probing). - let method_sig_rcvr = - self.normalize_associated_types_in(self.span, &method_sig.inputs()[0]); + let method_sig_rcvr = self.normalize_associated_types_in(self.span, method_sig.inputs()[0]); debug!( "confirm: self_ty={:?} method_sig_rcvr={:?} method_sig={:?} method_predicates={:?}", self_ty, method_sig_rcvr, method_sig, method_predicates @@ -99,7 +98,7 @@ impl<'a, 'tcx> ConfirmContext<'a, 'tcx> { self.unify_receivers(self_ty, method_sig_rcvr, &pick, all_substs); let (method_sig, method_predicates) = - self.normalize_associated_types_in(self.span, &(method_sig, method_predicates)); + self.normalize_associated_types_in(self.span, (method_sig, method_predicates)); // Make sure nobody calls `drop()` explicitly. self.enforce_illegal_method_limitations(&pick); @@ -229,7 +228,7 @@ impl<'a, 'tcx> ConfirmContext<'a, 'tcx> { let original_poly_trait_ref = principal.with_self_ty(this.tcx, object_ty); let upcast_poly_trait_ref = this.upcast(original_poly_trait_ref, trait_def_id); let upcast_trait_ref = - this.replace_bound_vars_with_fresh_vars(&upcast_poly_trait_ref); + this.replace_bound_vars_with_fresh_vars(upcast_poly_trait_ref); debug!( "original_poly_trait_ref={:?} upcast_trait_ref={:?} target_trait={:?}", original_poly_trait_ref, upcast_trait_ref, trait_def_id @@ -249,10 +248,10 @@ impl<'a, 'tcx> ConfirmContext<'a, 'tcx> { self.fresh_substs_for_item(self.span, trait_def_id) } - probe::WhereClausePick(ref poly_trait_ref) => { + probe::WhereClausePick(poly_trait_ref) => { // Where clauses can have bound regions in them. We need to instantiate // those to convert from a poly-trait-ref to a trait-ref. - self.replace_bound_vars_with_fresh_vars(&poly_trait_ref).substs + self.replace_bound_vars_with_fresh_vars(poly_trait_ref).substs } } } @@ -307,6 +306,52 @@ impl<'a, 'tcx> ConfirmContext<'a, 'tcx> { // parameters from the type and those from the method. assert_eq!(generics.parent_count, parent_substs.len()); + struct MethodSubstsCtxt<'a, 'tcx> { + cfcx: &'a ConfirmContext<'a, 'tcx>, + pick: &'a probe::Pick<'tcx>, + seg: &'a hir::PathSegment<'a>, + } + impl<'a, 'tcx> CreateSubstsForGenericArgsCtxt<'a, 'tcx> for MethodSubstsCtxt<'a, 'tcx> { + fn args_for_def_id( + &mut self, + def_id: DefId, + ) -> (Option<&'a hir::GenericArgs<'a>>, bool) { + if def_id == self.pick.item.def_id { + if let Some(ref data) = self.seg.args { + return (Some(data), false); + } + } + (None, false) + } + + fn provided_kind( + &mut self, + param: &ty::GenericParamDef, + arg: &GenericArg<'_>, + ) -> subst::GenericArg<'tcx> { + match (¶m.kind, arg) { + (GenericParamDefKind::Lifetime, GenericArg::Lifetime(lt)) => { + AstConv::ast_region_to_region(self.cfcx.fcx, lt, Some(param)).into() + } + (GenericParamDefKind::Type { .. }, GenericArg::Type(ty)) => { + self.cfcx.to_ty(ty).into() + } + (GenericParamDefKind::Const, GenericArg::Const(ct)) => { + self.cfcx.const_arg_to_const(&ct.value, param.def_id).into() + } + _ => unreachable!(), + } + } + + fn inferred_kind( + &mut self, + _substs: Option<&[subst::GenericArg<'tcx>]>, + param: &ty::GenericParamDef, + _infer_args: bool, + ) -> subst::GenericArg<'tcx> { + self.cfcx.var_for_def(self.cfcx.span, param) + } + } AstConv::create_substs_for_generic_args( self.tcx, pick.item.def_id, @@ -314,29 +359,7 @@ impl<'a, 'tcx> ConfirmContext<'a, 'tcx> { false, None, arg_count_correct, - // Provide the generic args, and whether types should be inferred. - |def_id| { - // The last component of the returned tuple here is unimportant. - if def_id == pick.item.def_id { - if let Some(ref data) = seg.args { - return (Some(data), false); - } - } - (None, false) - }, - // Provide substitutions for parameters for which (valid) arguments have been provided. - |param, arg| match (¶m.kind, arg) { - (GenericParamDefKind::Lifetime, GenericArg::Lifetime(lt)) => { - AstConv::ast_region_to_region(self.fcx, lt, Some(param)).into() - } - (GenericParamDefKind::Type { .. }, GenericArg::Type(ty)) => self.to_ty(ty).into(), - (GenericParamDefKind::Const, GenericArg::Const(ct)) => { - self.const_arg_to_const(&ct.value, param.def_id).into() - } - _ => unreachable!(), - }, - // Provide substitutions for parameters for which arguments are inferred. - |_, param, _| self.var_for_def(self.span, param), + &mut MethodSubstsCtxt { cfcx: self, pick, seg }, ) } @@ -400,7 +423,7 @@ impl<'a, 'tcx> ConfirmContext<'a, 'tcx> { // N.B., instantiate late-bound regions first so that // `instantiate_type_scheme` can normalize associated types that // may reference those regions. - let method_sig = self.replace_bound_vars_with_fresh_vars(&sig); + let method_sig = self.replace_bound_vars_with_fresh_vars(sig); debug!("late-bound lifetimes from method instantiated, method_sig={:?}", method_sig); let method_sig = method_sig.subst(self.tcx, all_substs); @@ -506,7 +529,7 @@ impl<'a, 'tcx> ConfirmContext<'a, 'tcx> { upcast_trait_refs.into_iter().next().unwrap() } - fn replace_bound_vars_with_fresh_vars(&self, value: &ty::Binder) -> T + fn replace_bound_vars_with_fresh_vars(&self, value: ty::Binder) -> T where T: TypeFoldable<'tcx>, { diff --git a/compiler/rustc_typeck/src/check/method/mod.rs b/compiler/rustc_typeck/src/check/method/mod.rs index 84bc3979e12..8e13b374699 100644 --- a/compiler/rustc_typeck/src/check/method/mod.rs +++ b/compiler/rustc_typeck/src/check/method/mod.rs @@ -265,7 +265,7 @@ impl<'a, 'tcx> FnCtxt<'a, 'tcx> { scope: ProbeScope, ) -> probe::PickResult<'tcx> { let mode = probe::Mode::MethodCall; - let self_ty = self.resolve_vars_if_possible(&self_ty); + let self_ty = self.resolve_vars_if_possible(self_ty); self.probe_for_name( span, mode, @@ -358,11 +358,11 @@ impl<'a, 'tcx> FnCtxt<'a, 'tcx> { // `instantiate_type_scheme` can normalize associated types that // may reference those regions. let fn_sig = tcx.fn_sig(def_id); - let fn_sig = self.replace_bound_vars_with_fresh_vars(span, infer::FnCall, &fn_sig).0; + let fn_sig = self.replace_bound_vars_with_fresh_vars(span, infer::FnCall, fn_sig).0; let fn_sig = fn_sig.subst(self.tcx, substs); let InferOk { value, obligations: o } = - self.normalize_associated_types_in_as_infer_ok(span, &fn_sig); + self.normalize_associated_types_in_as_infer_ok(span, fn_sig); let fn_sig = { obligations.extend(o); value @@ -379,7 +379,7 @@ impl<'a, 'tcx> FnCtxt<'a, 'tcx> { let bounds = self.tcx.predicates_of(def_id).instantiate(self.tcx, substs); let InferOk { value, obligations: o } = - self.normalize_associated_types_in_as_infer_ok(span, &bounds); + self.normalize_associated_types_in_as_infer_ok(span, bounds); let bounds = { obligations.extend(o); value diff --git a/compiler/rustc_typeck/src/check/method/probe.rs b/compiler/rustc_typeck/src/check/method/probe.rs index d403e259398..478f8a16169 100644 --- a/compiler/rustc_typeck/src/check/method/probe.rs +++ b/compiler/rustc_typeck/src/check/method/probe.rs @@ -25,7 +25,6 @@ use rustc_middle::ty::GenericParamDefKind; use rustc_middle::ty::{ self, ParamEnvAnd, ToPolyTraitRef, ToPredicate, Ty, TyCtxt, TypeFoldable, WithConstness, }; -use rustc_session::config::nightly_options; use rustc_session::lint; use rustc_span::def_id::LocalDefId; use rustc_span::{symbol::Ident, Span, Symbol, DUMMY_SP}; @@ -244,7 +243,7 @@ impl<'a, 'tcx> FnCtxt<'a, 'tcx> { ProbeScope::AllTraits, |probe_cx| Ok(probe_cx.candidate_method_names()), ) - .unwrap_or(vec![]); + .unwrap_or_default(); method_names .iter() .flat_map(|&method_name| { @@ -309,7 +308,7 @@ impl<'a, 'tcx> FnCtxt<'a, 'tcx> { { let mut orig_values = OriginalQueryValues::default(); let param_env_and_self_ty = self.infcx.canonicalize_query( - &ParamEnvAnd { param_env: self.param_env, value: self_ty }, + ParamEnvAnd { param_env: self.param_env, value: self_ty }, &mut orig_values, ); @@ -731,7 +730,7 @@ impl<'a, 'tcx> ProbeContext<'a, 'tcx> { let cause = traits::ObligationCause::misc(self.span, self.body_id); let selcx = &mut traits::SelectionContext::new(self.fcx); let traits::Normalized { value: (xform_self_ty, xform_ret_ty), obligations } = - traits::normalize(selcx, self.param_env, cause, &xform_tys); + traits::normalize(selcx, self.param_env, cause, xform_tys); debug!( "assemble_inherent_impl_probe: xform_self_ty = {:?}/{:?}", xform_self_ty, xform_ret_ty @@ -775,7 +774,7 @@ impl<'a, 'tcx> ProbeContext<'a, 'tcx> { // argument type like `&Trait`. let trait_ref = principal.with_self_ty(self.tcx, self_ty); self.elaborate_bounds(iter::once(trait_ref), |this, new_trait_ref, item| { - let new_trait_ref = this.erase_late_bound_regions(&new_trait_ref); + let new_trait_ref = this.erase_late_bound_regions(new_trait_ref); let (xform_self_ty, xform_ret_ty) = this.xform_self_ty(&item, new_trait_ref.self_ty(), new_trait_ref.substs); @@ -821,7 +820,7 @@ impl<'a, 'tcx> ProbeContext<'a, 'tcx> { }); self.elaborate_bounds(bounds, |this, poly_trait_ref, item| { - let trait_ref = this.erase_late_bound_regions(&poly_trait_ref); + let trait_ref = this.erase_late_bound_regions(poly_trait_ref); let (xform_self_ty, xform_ret_ty) = this.xform_self_ty(&item, trait_ref.self_ty(), trait_ref.substs); @@ -912,7 +911,7 @@ impl<'a, 'tcx> ProbeContext<'a, 'tcx> { let substs = self.fresh_substs_for_item(self.span, method.def_id); let fty = fty.subst(self.tcx, substs); let (fty, _) = - self.replace_bound_vars_with_fresh_vars(self.span, infer::FnCall, &fty); + self.replace_bound_vars_with_fresh_vars(self.span, infer::FnCall, fty); if let Some(self_ty) = self_ty { if self @@ -943,7 +942,7 @@ impl<'a, 'tcx> ProbeContext<'a, 'tcx> { // For trait aliases, assume all super-traits are relevant. let bounds = iter::once(trait_ref.to_poly_trait_ref()); self.elaborate_bounds(bounds, |this, new_trait_ref, item| { - let new_trait_ref = this.erase_late_bound_regions(&new_trait_ref); + let new_trait_ref = this.erase_late_bound_regions(new_trait_ref); let (xform_self_ty, xform_ret_ty) = this.xform_self_ty(&item, new_trait_ref.self_ty(), new_trait_ref.substs); @@ -1272,7 +1271,7 @@ impl<'a, 'tcx> ProbeContext<'a, 'tcx> { self.tcx.def_path_str(stable_pick.item.def_id), )); - if nightly_options::is_nightly_build() { + if self.tcx.sess.is_nightly_build() { for (candidate, feature) in unstable_candidates { diag.help(&format!( "add `#![feature({})]` to the crate attributes to enable `{}`", @@ -1356,7 +1355,7 @@ impl<'a, 'tcx> ProbeContext<'a, 'tcx> { let impl_bounds = self.tcx.predicates_of(impl_def_id); let impl_bounds = impl_bounds.instantiate(self.tcx, substs); let traits::Normalized { value: impl_bounds, obligations: norm_obligations } = - traits::normalize(selcx, self.param_env, cause.clone(), &impl_bounds); + traits::normalize(selcx, self.param_env, cause.clone(), impl_bounds); // Convert the bounds into obligations. let impl_obligations = @@ -1367,7 +1366,7 @@ impl<'a, 'tcx> ProbeContext<'a, 'tcx> { .chain(ref_obligations.iter().cloned()); // Evaluate those obligations to see if they might possibly hold. for o in candidate_obligations { - let o = self.resolve_vars_if_possible(&o); + let o = self.resolve_vars_if_possible(o); if !self.predicate_may_hold(&o) { result = ProbeResult::NoMatch; possibly_unsatisfied_predicates.push((o.predicate, None)); @@ -1393,25 +1392,27 @@ impl<'a, 'tcx> ProbeContext<'a, 'tcx> { for obligation in impl_source.borrow_nested_obligations() { // Determine exactly which obligation wasn't met, so // that we can give more context in the error. - if !self.predicate_may_hold(&obligation) { - let o = self.resolve_vars_if_possible(obligation); + if !self.predicate_may_hold(obligation) { + let nested_predicate = + self.resolve_vars_if_possible(obligation.predicate); let predicate = - self.resolve_vars_if_possible(&predicate); - let p = if predicate == o.predicate { + self.resolve_vars_if_possible(predicate); + let p = if predicate == nested_predicate { // Avoid "`MyStruct: Foo` which is required by // `MyStruct: Foo`" in E0599. None } else { Some(predicate) }; - possibly_unsatisfied_predicates.push((o.predicate, p)); + possibly_unsatisfied_predicates + .push((nested_predicate, p)); } } } _ => { // Some nested subobligation of this predicate // failed. - let predicate = self.resolve_vars_if_possible(&predicate); + let predicate = self.resolve_vars_if_possible(predicate); possibly_unsatisfied_predicates.push((predicate, None)); } } @@ -1428,7 +1429,7 @@ impl<'a, 'tcx> ProbeContext<'a, 'tcx> { // Evaluate those obligations to see if they might possibly hold. for o in sub_obligations { - let o = self.resolve_vars_if_possible(&o); + let o = self.resolve_vars_if_possible(o); if !self.predicate_may_hold(&o) { result = ProbeResult::NoMatch; possibly_unsatisfied_predicates.push((o.predicate, None)); @@ -1439,7 +1440,7 @@ impl<'a, 'tcx> ProbeContext<'a, 'tcx> { if let (Some(return_ty), Some(xform_ret_ty)) = (self.return_type, probe.xform_ret_ty) { - let xform_ret_ty = self.resolve_vars_if_possible(&xform_ret_ty); + let xform_ret_ty = self.resolve_vars_if_possible(xform_ret_ty); debug!( "comparing return_ty {:?} with xform ret ty {:?}", return_ty, probe.xform_ret_ty @@ -1605,7 +1606,7 @@ impl<'a, 'tcx> ProbeContext<'a, 'tcx> { // Erase any late-bound regions from the method and substitute // in the values from the substitution. - let xform_fn_sig = self.erase_late_bound_regions(&fn_sig); + let xform_fn_sig = self.erase_late_bound_regions(fn_sig); if generics.params.is_empty() { xform_fn_sig.subst(self.tcx, substs) @@ -1673,7 +1674,7 @@ impl<'a, 'tcx> ProbeContext<'a, 'tcx> { /// region got replaced with the same variable, which requires a bit more coordination /// and/or tracking the substitution and /// so forth. - fn erase_late_bound_regions(&self, value: &ty::Binder) -> T + fn erase_late_bound_regions(&self, value: ty::Binder) -> T where T: TypeFoldable<'tcx>, { diff --git a/compiler/rustc_typeck/src/check/method/suggest.rs b/compiler/rustc_typeck/src/check/method/suggest.rs index 46afe4892db..3d5ce57a491 100644 --- a/compiler/rustc_typeck/src/check/method/suggest.rs +++ b/compiler/rustc_typeck/src/check/method/suggest.rs @@ -248,7 +248,7 @@ impl<'a, 'tcx> FnCtxt<'a, 'tcx> { }) => { let tcx = self.tcx; - let actual = self.resolve_vars_if_possible(&rcvr_ty); + let actual = self.resolve_vars_if_possible(rcvr_ty); let ty_str = self.ty_to_string(actual); let is_method = mode == Mode::MethodCall; let item_kind = if is_method { @@ -870,7 +870,7 @@ impl<'a, 'tcx> FnCtxt<'a, 'tcx> { span: Span, ) { let output_ty = match self.infcx.get_impl_future_output_ty(ty) { - Some(output_ty) => self.resolve_vars_if_possible(&output_ty), + Some(output_ty) => self.resolve_vars_if_possible(output_ty), _ => return, }; let method_exists = self.method_exists(item_name, output_ty, call.hir_id, true); diff --git a/compiler/rustc_typeck/src/check/mod.rs b/compiler/rustc_typeck/src/check/mod.rs index 169ad0df3a5..1479eadf1b0 100644 --- a/compiler/rustc_typeck/src/check/mod.rs +++ b/compiler/rustc_typeck/src/check/mod.rs @@ -108,7 +108,7 @@ use rustc_hir::def::Res; use rustc_hir::def_id::{CrateNum, DefId, LocalDefId, LOCAL_CRATE}; use rustc_hir::intravisit::Visitor; use rustc_hir::itemlikevisit::ItemLikeVisitor; -use rustc_hir::{HirIdMap, Node}; +use rustc_hir::{HirIdMap, ImplicitSelfKind, Node}; use rustc_index::bit_set::BitSet; use rustc_index::vec::Idx; use rustc_infer::infer::type_variable::{TypeVariableOrigin, TypeVariableOriginKind}; @@ -362,7 +362,7 @@ fn used_trait_imports(tcx: TyCtxt<'_>, def_id: LocalDefId) -> &FxHashSet(tcx: TyCtxt<'tcx>, val: &T) -> T +fn fixup_opaque_types<'tcx, T>(tcx: TyCtxt<'tcx>, val: T) -> T where T: TypeFoldable<'tcx>, { @@ -510,15 +510,15 @@ fn typeck_with_fallback<'tcx>( check_abi(tcx, span, fn_sig.abi()); // Compute the fty from point of view of inside the fn. - let fn_sig = tcx.liberate_late_bound_regions(def_id.to_def_id(), &fn_sig); + let fn_sig = tcx.liberate_late_bound_regions(def_id.to_def_id(), fn_sig); let fn_sig = inh.normalize_associated_types_in( body.value.span, body_id.hir_id, param_env, - &fn_sig, + fn_sig, ); - let fn_sig = fixup_opaque_types(tcx, &fn_sig); + let fn_sig = fixup_opaque_types(tcx, fn_sig); let fcx = check_fn(&inh, param_env, fn_sig, decl, id, body, None).0; fcx @@ -543,11 +543,11 @@ fn typeck_with_fallback<'tcx>( _ => fallback(), }); - let expected_type = fcx.normalize_associated_types_in(body.value.span, &expected_type); + let expected_type = fcx.normalize_associated_types_in(body.value.span, expected_type); fcx.require_type_is_sized(expected_type, body.value.span, traits::ConstSized); let revealed_ty = if tcx.features().impl_trait_in_bindings { - fcx.instantiate_opaque_types_from_value(id, &expected_type, body.value.span) + fcx.instantiate_opaque_types_from_value(id, expected_type, body.value.span) } else { expected_type }; diff --git a/compiler/rustc_typeck/src/check/op.rs b/compiler/rustc_typeck/src/check/op.rs index 247b5256726..854bc70108f 100644 --- a/compiler/rustc_typeck/src/check/op.rs +++ b/compiler/rustc_typeck/src/check/op.rs @@ -660,7 +660,7 @@ impl<'a, 'tcx> FnCtxt<'a, 'tcx> { method.sig.output() } Err(()) => { - let actual = self.resolve_vars_if_possible(&operand_ty); + let actual = self.resolve_vars_if_possible(operand_ty); if !actual.references_error() { let mut err = struct_span_err!( self.tcx.sess, @@ -983,7 +983,7 @@ fn suggest_constraining_param( struct TypeParamVisitor<'tcx>(Vec>); impl<'tcx> TypeVisitor<'tcx> for TypeParamVisitor<'tcx> { - fn visit_ty(&mut self, ty: Ty<'tcx>) -> ControlFlow<()> { + fn visit_ty(&mut self, ty: Ty<'tcx>) -> ControlFlow { if let ty::Param(_) = ty.kind() { self.0.push(ty); } diff --git a/compiler/rustc_typeck/src/check/pat.rs b/compiler/rustc_typeck/src/check/pat.rs index 6489b7838d6..a729912126e 100644 --- a/compiler/rustc_typeck/src/check/pat.rs +++ b/compiler/rustc_typeck/src/check/pat.rs @@ -149,6 +149,7 @@ impl<'a, 'tcx> FnCtxt<'a, 'tcx> { /// /// Outside of this module, `check_pat_top` should always be used. /// Conversely, inside this module, `check_pat_top` should never be used. + #[instrument(skip(self, ti))] fn check_pat( &self, pat: &'tcx Pat<'tcx>, @@ -156,8 +157,6 @@ impl<'a, 'tcx> FnCtxt<'a, 'tcx> { def_bm: BindingMode, ti: TopInfo<'tcx>, ) { - debug!("check_pat(pat={:?},expected={:?},def_bm={:?})", pat, expected, def_bm); - let path_res = match &pat.kind { PatKind::Path(qpath) => Some(self.resolve_ty_and_res_ufcs(qpath, pat.hir_id, pat.span)), _ => None, @@ -398,6 +397,11 @@ impl<'a, 'tcx> FnCtxt<'a, 'tcx> { if let ty::Ref(_, inner_ty, _) = expected.kind() { if matches!(inner_ty.kind(), ty::Slice(_)) { let tcx = self.tcx; + trace!(?lt.hir_id.local_id, "polymorphic byte string lit"); + self.typeck_results + .borrow_mut() + .treat_byte_string_as_slice + .insert(lt.hir_id.local_id); pat_ty = tcx.mk_imm_ref(tcx.lifetimes.re_static, tcx.mk_slice(tcx.types.u8)); } } @@ -459,7 +463,7 @@ impl<'a, 'tcx> FnCtxt<'a, 'tcx> { // Now that we know the types can be unified we find the unified type // and use it to type the entire expression. - let common_type = self.resolve_vars_if_possible(&lhs_ty.or(rhs_ty).unwrap_or(expected)); + let common_type = self.resolve_vars_if_possible(lhs_ty.or(rhs_ty).unwrap_or(expected)); // Subtyping doesn't matter here, as the value is some kind of scalar. let demand_eqtype = |x, y| { diff --git a/compiler/rustc_typeck/src/check/regionck.rs b/compiler/rustc_typeck/src/check/regionck.rs index 7b31b9f3915..b8b98cef763 100644 --- a/compiler/rustc_typeck/src/check/regionck.rs +++ b/compiler/rustc_typeck/src/check/regionck.rs @@ -229,7 +229,7 @@ impl<'a, 'tcx> RegionCtxt<'a, 'tcx> { /// of b will be `&.i32` and then `*b` will require that `` be bigger than the let and /// the `*b` expression, so we will effectively resolve `` to be the block B. pub fn resolve_type(&self, unresolved_ty: Ty<'tcx>) -> Ty<'tcx> { - self.resolve_vars_if_possible(&unresolved_ty) + self.resolve_vars_if_possible(unresolved_ty) } /// Try to resolve the type for the given node. diff --git a/compiler/rustc_typeck/src/check/upvar.rs b/compiler/rustc_typeck/src/check/upvar.rs index e9dfef718fd..019fa78fb1e 100644 --- a/compiler/rustc_typeck/src/check/upvar.rs +++ b/compiler/rustc_typeck/src/check/upvar.rs @@ -39,10 +39,21 @@ use rustc_hir::def_id::DefId; use rustc_hir::def_id::LocalDefId; use rustc_hir::intravisit::{self, NestedVisitorMap, Visitor}; use rustc_infer::infer::UpvarRegion; -use rustc_middle::hir::place::{PlaceBase, PlaceWithHirId}; +use rustc_middle::hir::place::{Place, PlaceBase, PlaceWithHirId, ProjectionKind}; use rustc_middle::ty::{self, Ty, TyCtxt, UpvarSubsts}; +use rustc_span::sym; use rustc_span::{Span, Symbol}; -use std::collections::hash_map::Entry; + +/// Describe the relationship between the paths of two places +/// eg: +/// - `foo` is ancestor of `foo.bar.baz` +/// - `foo.bar.baz` is an descendant of `foo.bar` +/// - `foo.bar` and `foo.baz` are divergent +enum PlaceAncestryRelation { + Ancestor, + Descendant, + Divergent, +} impl<'a, 'tcx> FnCtxt<'a, 'tcx> { pub fn closure_analyze(&self, body: &'tcx hir::Body<'tcx>) { @@ -111,40 +122,23 @@ impl<'a, 'tcx> FnCtxt<'a, 'tcx> { None }; - if let Some(upvars) = self.tcx.upvars_mentioned(closure_def_id) { - let mut closure_captures: FxIndexMap = - FxIndexMap::with_capacity_and_hasher(upvars.len(), Default::default()); - for (&var_hir_id, _) in upvars.iter() { - let upvar_id = ty::UpvarId { - var_path: ty::UpvarPath { hir_id: var_hir_id }, - closure_expr_id: closure_def_id.expect_local(), - }; - debug!("seed upvar_id {:?}", upvar_id); - // Adding the upvar Id to the list of Upvars, which will be added - // to the map for the closure at the end of the for loop. - closure_captures.insert(var_hir_id, upvar_id); - - let capture_kind = match capture_clause { - hir::CaptureBy::Value => ty::UpvarCapture::ByValue(None), - hir::CaptureBy::Ref => { - let origin = UpvarRegion(upvar_id, span); - let upvar_region = self.next_region_var(origin); - let upvar_borrow = - ty::UpvarBorrow { kind: ty::ImmBorrow, region: upvar_region }; - ty::UpvarCapture::ByRef(upvar_borrow) - } - }; + let local_def_id = closure_def_id.expect_local(); - self.typeck_results.borrow_mut().upvar_capture_map.insert(upvar_id, capture_kind); - } - // Add the vector of upvars to the map keyed with the closure id. - // This gives us an easier access to them without having to call - // tcx.upvars again.. - if !closure_captures.is_empty() { - self.typeck_results - .borrow_mut() - .closure_captures - .insert(closure_def_id, closure_captures); + let mut capture_information: FxIndexMap, ty::CaptureInfo<'tcx>> = + Default::default(); + if !self.tcx.features().capture_disjoint_fields { + if let Some(upvars) = self.tcx.upvars_mentioned(closure_def_id) { + for (&var_hir_id, _) in upvars.iter() { + let place = self.place_for_root_variable(local_def_id, var_hir_id); + + debug!("seed place {:?}", place); + + let upvar_id = ty::UpvarId::new(var_hir_id, local_def_id); + let capture_kind = self.init_capture_kind(capture_clause, upvar_id, span); + let info = ty::CaptureInfo { expr_id: None, capture_kind }; + + capture_information.insert(place, info); + } } } @@ -153,9 +147,11 @@ impl<'a, 'tcx> FnCtxt<'a, 'tcx> { let mut delegate = InferBorrowKind { fcx: self, closure_def_id, + closure_span: span, + capture_clause, current_closure_kind: ty::ClosureKind::LATTICE_BOTTOM, current_origin: None, - adjust_upvar_captures: ty::UpvarCaptureMap::default(), + capture_information, }; euv::ExprUseVisitor::new( &mut delegate, @@ -166,6 +162,12 @@ impl<'a, 'tcx> FnCtxt<'a, 'tcx> { ) .consume_body(body); + debug!( + "For closure={:?}, capture_information={:#?}", + closure_def_id, delegate.capture_information + ); + self.log_capture_analysis_first_pass(closure_def_id, &delegate.capture_information, span); + if let Some(closure_substs) = infer_kind { // Unify the (as yet unbound) type variable in the closure // substs with the kind we inferred. @@ -182,7 +184,10 @@ impl<'a, 'tcx> FnCtxt<'a, 'tcx> { } } - self.typeck_results.borrow_mut().upvar_capture_map.extend(delegate.adjust_upvar_captures); + self.compute_min_captures(closure_def_id, delegate); + self.log_closure_min_capture_info(closure_def_id, span); + + self.min_captures_to_closure_captures_bridge(closure_def_id); // Now that we've analyzed the closure, we know how each // variable is borrowed, and we know what traits the closure @@ -226,15 +231,15 @@ impl<'a, 'tcx> FnCtxt<'a, 'tcx> { let tcx = self.tcx; let closure_def_id = tcx.hir().local_def_id(closure_id); - tcx.upvars_mentioned(closure_def_id) + self.typeck_results + .borrow() + .closure_captures + .get(&closure_def_id.to_def_id()) .iter() .flat_map(|upvars| { upvars.iter().map(|(&var_hir_id, _)| { let upvar_ty = self.node_ty(var_hir_id); - let upvar_id = ty::UpvarId { - var_path: ty::UpvarPath { hir_id: var_hir_id }, - closure_expr_id: closure_def_id, - }; + let upvar_id = ty::UpvarId::new(var_hir_id, closure_def_id); let capture = self.typeck_results.borrow().upvar_capture(upvar_id); debug!("var_id={:?} upvar_ty={:?} capture={:?}", var_hir_id, upvar_ty, capture); @@ -250,6 +255,296 @@ impl<'a, 'tcx> FnCtxt<'a, 'tcx> { }) .collect() } + + /// Bridge for closure analysis + /// ---------------------------- + /// + /// For closure with DefId `c`, the bridge converts structures required for supporting RFC 2229, + /// to structures currently used in the compiler for handling closure captures. + /// + /// For example the following structure will be converted: + /// + /// closure_min_captures + /// foo -> [ {foo.x, ImmBorrow}, {foo.y, MutBorrow} ] + /// bar -> [ {bar.z, ByValue}, {bar.q, MutBorrow} ] + /// + /// to + /// + /// 1. closure_captures + /// foo -> UpvarId(foo, c), bar -> UpvarId(bar, c) + /// + /// 2. upvar_capture_map + /// UpvarId(foo,c) -> MutBorrow, UpvarId(bar, c) -> ByValue + fn min_captures_to_closure_captures_bridge(&self, closure_def_id: DefId) { + let mut closure_captures: FxIndexMap = Default::default(); + let mut upvar_capture_map = ty::UpvarCaptureMap::default(); + + if let Some(min_captures) = + self.typeck_results.borrow().closure_min_captures.get(&closure_def_id) + { + for (var_hir_id, min_list) in min_captures.iter() { + for captured_place in min_list { + let place = &captured_place.place; + let capture_info = captured_place.info; + + let upvar_id = match place.base { + PlaceBase::Upvar(upvar_id) => upvar_id, + base => bug!("Expected upvar, found={:?}", base), + }; + + assert_eq!(upvar_id.var_path.hir_id, *var_hir_id); + assert_eq!(upvar_id.closure_expr_id, closure_def_id.expect_local()); + + closure_captures.insert(*var_hir_id, upvar_id); + + let new_capture_kind = if let Some(capture_kind) = + upvar_capture_map.get(&upvar_id) + { + // upvar_capture_map only stores the UpvarCapture (CaptureKind), + // so we create a fake capture info with no expression. + let fake_capture_info = + ty::CaptureInfo { expr_id: None, capture_kind: capture_kind.clone() }; + determine_capture_info(fake_capture_info, capture_info).capture_kind + } else { + capture_info.capture_kind + }; + upvar_capture_map.insert(upvar_id, new_capture_kind); + } + } + } + debug!("For closure_def_id={:?}, closure_captures={:#?}", closure_def_id, closure_captures); + debug!( + "For closure_def_id={:?}, upvar_capture_map={:#?}", + closure_def_id, upvar_capture_map + ); + + if !closure_captures.is_empty() { + self.typeck_results + .borrow_mut() + .closure_captures + .insert(closure_def_id, closure_captures); + + self.typeck_results.borrow_mut().upvar_capture_map.extend(upvar_capture_map); + } + } + + /// Analyzes the information collected by `InferBorrowKind` to compute the min number of + /// Places (and corresponding capture kind) that we need to keep track of to support all + /// the required captured paths. + /// + /// Eg: + /// ```rust,no_run + /// struct Point { x: i32, y: i32 } + /// + /// let s: String; // hir_id_s + /// let mut p: Point; // his_id_p + /// let c = || { + /// println!("{}", s); // L1 + /// p.x += 10; // L2 + /// println!("{}" , p.y) // L3 + /// println!("{}", p) // L4 + /// drop(s); // L5 + /// }; + /// ``` + /// and let hir_id_L1..5 be the expressions pointing to use of a captured variable on + /// the lines L1..5 respectively. + /// + /// InferBorrowKind results in a structure like this: + /// + /// ``` + /// { + /// Place(base: hir_id_s, projections: [], ....) -> (hir_id_L5, ByValue), + /// Place(base: hir_id_p, projections: [Field(0, 0)], ...) -> (hir_id_L2, ByRef(MutBorrow)) + /// Place(base: hir_id_p, projections: [Field(1, 0)], ...) -> (hir_id_L3, ByRef(ImmutBorrow)) + /// Place(base: hir_id_p, projections: [], ...) -> (hir_id_L4, ByRef(ImmutBorrow)) + /// ``` + /// + /// After the min capture analysis, we get: + /// ``` + /// { + /// hir_id_s -> [ + /// Place(base: hir_id_s, projections: [], ....) -> (hir_id_L4, ByValue) + /// ], + /// hir_id_p -> [ + /// Place(base: hir_id_p, projections: [], ...) -> (hir_id_L2, ByRef(MutBorrow)), + /// ], + /// ``` + fn compute_min_captures( + &self, + closure_def_id: DefId, + inferred_info: InferBorrowKind<'_, 'tcx>, + ) { + let mut root_var_min_capture_list: ty::RootVariableMinCaptureList<'_> = Default::default(); + + for (place, capture_info) in inferred_info.capture_information.into_iter() { + let var_hir_id = match place.base { + PlaceBase::Upvar(upvar_id) => upvar_id.var_path.hir_id, + base => bug!("Expected upvar, found={:?}", base), + }; + + // Arrays are captured in entirety, drop Index projections and projections + // after Index projections. + let first_index_projection = + place.projections.split(|proj| ProjectionKind::Index == proj.kind).next(); + let place = Place { + base_ty: place.base_ty, + base: place.base, + projections: first_index_projection.map_or(Vec::new(), |p| p.to_vec()), + }; + + let min_cap_list = match root_var_min_capture_list.get_mut(&var_hir_id) { + None => { + let min_cap_list = vec![ty::CapturedPlace { place: place, info: capture_info }]; + root_var_min_capture_list.insert(var_hir_id, min_cap_list); + continue; + } + Some(min_cap_list) => min_cap_list, + }; + + // Go through each entry in the current list of min_captures + // - if ancestor is found, update it's capture kind to account for current place's + // capture information. + // + // - if descendant is found, remove it from the list, and update the current place's + // capture information to account for the descendants's capture kind. + // + // We can never be in a case where the list contains both an ancestor and a descendant + // Also there can only be ancestor but in case of descendants there might be + // multiple. + + let mut descendant_found = false; + let mut updated_capture_info = capture_info; + min_cap_list.retain(|possible_descendant| { + match determine_place_ancestry_relation(&place, &possible_descendant.place) { + // current place is ancestor of possible_descendant + PlaceAncestryRelation::Ancestor => { + descendant_found = true; + updated_capture_info = + determine_capture_info(updated_capture_info, possible_descendant.info); + false + } + + _ => true, + } + }); + + let mut ancestor_found = false; + if !descendant_found { + for possible_ancestor in min_cap_list.iter_mut() { + match determine_place_ancestry_relation(&place, &possible_ancestor.place) { + // current place is descendant of possible_ancestor + PlaceAncestryRelation::Descendant => { + ancestor_found = true; + possible_ancestor.info = + determine_capture_info(possible_ancestor.info, capture_info); + + // Only one ancestor of the current place will be in the list. + break; + } + _ => {} + } + } + } + + // Only need to insert when we don't have an ancestor in the existing min capture list + if !ancestor_found { + let captured_place = + ty::CapturedPlace { place: place.clone(), info: updated_capture_info }; + min_cap_list.push(captured_place); + } + } + + debug!("For closure={:?}, min_captures={:#?}", closure_def_id, root_var_min_capture_list); + + if !root_var_min_capture_list.is_empty() { + self.typeck_results + .borrow_mut() + .closure_min_captures + .insert(closure_def_id, root_var_min_capture_list); + } + } + + fn init_capture_kind( + &self, + capture_clause: hir::CaptureBy, + upvar_id: ty::UpvarId, + closure_span: Span, + ) -> ty::UpvarCapture<'tcx> { + match capture_clause { + hir::CaptureBy::Value => ty::UpvarCapture::ByValue(None), + hir::CaptureBy::Ref => { + let origin = UpvarRegion(upvar_id, closure_span); + let upvar_region = self.next_region_var(origin); + let upvar_borrow = ty::UpvarBorrow { kind: ty::ImmBorrow, region: upvar_region }; + ty::UpvarCapture::ByRef(upvar_borrow) + } + } + } + + fn place_for_root_variable( + &self, + closure_def_id: LocalDefId, + var_hir_id: hir::HirId, + ) -> Place<'tcx> { + let upvar_id = ty::UpvarId::new(var_hir_id, closure_def_id); + + Place { + base_ty: self.node_ty(var_hir_id), + base: PlaceBase::Upvar(upvar_id), + projections: Default::default(), + } + } + + fn should_log_capture_analysis(&self, closure_def_id: DefId) -> bool { + self.tcx.has_attr(closure_def_id, sym::rustc_capture_analysis) + } + + fn log_capture_analysis_first_pass( + &self, + closure_def_id: rustc_hir::def_id::DefId, + capture_information: &FxIndexMap, ty::CaptureInfo<'tcx>>, + closure_span: Span, + ) { + if self.should_log_capture_analysis(closure_def_id) { + let mut diag = + self.tcx.sess.struct_span_err(closure_span, "First Pass analysis includes:"); + for (place, capture_info) in capture_information { + let capture_str = construct_capture_info_string(self.tcx, place, capture_info); + let output_str = format!("Capturing {}", capture_str); + + let span = capture_info.expr_id.map_or(closure_span, |e| self.tcx.hir().span(e)); + diag.span_note(span, &output_str); + } + diag.emit(); + } + } + + fn log_closure_min_capture_info(&self, closure_def_id: DefId, closure_span: Span) { + if self.should_log_capture_analysis(closure_def_id) { + if let Some(min_captures) = + self.typeck_results.borrow().closure_min_captures.get(&closure_def_id) + { + let mut diag = + self.tcx.sess.struct_span_err(closure_span, "Min Capture analysis includes:"); + + for (_, min_captures_for_var) in min_captures { + for capture in min_captures_for_var { + let place = &capture.place; + let capture_info = &capture.info; + + let capture_str = + construct_capture_info_string(self.tcx, place, capture_info); + let output_str = format!("Min Capture {}", capture_str); + + let span = + capture_info.expr_id.map_or(closure_span, |e| self.tcx.hir().span(e)); + diag.span_note(span, &output_str); + } + } + diag.emit(); + } + } + } } struct InferBorrowKind<'a, 'tcx> { @@ -258,6 +553,10 @@ struct InferBorrowKind<'a, 'tcx> { // The def-id of the closure whose kind and upvar accesses are being inferred. closure_def_id: DefId, + closure_span: Span, + + capture_clause: hir::CaptureBy, + // The kind that we have inferred that the current closure // requires. Note that we *always* infer a minimal kind, even if // we don't always *use* that in the final result (i.e., sometimes @@ -270,9 +569,31 @@ struct InferBorrowKind<'a, 'tcx> { // variable access that caused us to do so. current_origin: Option<(Span, Symbol)>, - // For each upvar that we access, we track the minimal kind of - // access we need (ref, ref mut, move, etc). - adjust_upvar_captures: ty::UpvarCaptureMap<'tcx>, + /// For each Place that is captured by the closure, we track the minimal kind of + /// access we need (ref, ref mut, move, etc) and the expression that resulted in such access. + /// + /// Consider closure where s.str1 is captured via an ImmutableBorrow and + /// s.str2 via a MutableBorrow + /// + /// ```rust,no_run + /// struct SomeStruct { str1: String, str2: String } + /// + /// // Assume that the HirId for the variable definition is `V1` + /// let mut s = SomeStruct { str1: format!("s1"), str2: format!("s2") } + /// + /// let fix_s = |new_s2| { + /// // Assume that the HirId for the expression `s.str1` is `E1` + /// println!("Updating SomeStruct with str1=", s.str1); + /// // Assume that the HirId for the expression `*s.str2` is `E2` + /// s.str2 = new_s2; + /// }; + /// ``` + /// + /// For closure `fix_s`, (at a high level) the map contains + /// + /// Place { V1, [ProjectionKind::Field(Index=0, Variant=0)] } : CaptureKind { E1, ImmutableBorrow } + /// Place { V1, [ProjectionKind::Field(Index=1, Variant=0)] } : CaptureKind { E2, MutableBorrow } + capture_information: FxIndexMap, ty::CaptureInfo<'tcx>>, } impl<'a, 'tcx> InferBorrowKind<'a, 'tcx> { @@ -314,26 +635,15 @@ impl<'a, 'tcx> InferBorrowKind<'a, 'tcx> { var_name(tcx, upvar_id.var_path.hir_id), ); - let new_capture = ty::UpvarCapture::ByValue(Some(usage_span)); - match self.adjust_upvar_captures.entry(upvar_id) { - Entry::Occupied(mut e) => { - match e.get() { - // We always overwrite `ByRef`, since we require - // that the upvar be available by value. - // - // If we had a previous by-value usage without a specific - // span, use ours instead. Otherwise, keep the first span - // we encountered, since there isn't an obviously better one. - ty::UpvarCapture::ByRef(_) | ty::UpvarCapture::ByValue(None) => { - e.insert(new_capture); - } - _ => {} - } - } - Entry::Vacant(e) => { - e.insert(new_capture); - } - } + let capture_info = ty::CaptureInfo { + expr_id: Some(diag_expr_id), + capture_kind: ty::UpvarCapture::ByValue(Some(usage_span)), + }; + + let curr_info = self.capture_information[&place_with_id.place]; + let updated_info = determine_capture_info(curr_info, capture_info); + + self.capture_information[&place_with_id.place] = updated_info; } /// Indicates that `place_with_id` is being directly mutated (e.g., assigned @@ -349,7 +659,7 @@ impl<'a, 'tcx> InferBorrowKind<'a, 'tcx> { place_with_id, diag_expr_id ); - if let PlaceBase::Upvar(upvar_id) = place_with_id.place.base { + if let PlaceBase::Upvar(_) = place_with_id.place.base { let mut borrow_kind = ty::MutBorrow; for pointer_ty in place_with_id.place.deref_tys() { match pointer_ty.kind() { @@ -363,7 +673,7 @@ impl<'a, 'tcx> InferBorrowKind<'a, 'tcx> { _ => (), } } - self.adjust_upvar_deref(upvar_id, self.fcx.tcx.hir().span(diag_expr_id), borrow_kind); + self.adjust_upvar_deref(place_with_id, diag_expr_id, borrow_kind); } } @@ -377,24 +687,20 @@ impl<'a, 'tcx> InferBorrowKind<'a, 'tcx> { place_with_id, diag_expr_id ); - if let PlaceBase::Upvar(upvar_id) = place_with_id.place.base { + if let PlaceBase::Upvar(_) = place_with_id.place.base { if place_with_id.place.deref_tys().any(ty::TyS::is_unsafe_ptr) { // Raw pointers don't inherit mutability. return; } // for a borrowed pointer to be unique, its base must be unique - self.adjust_upvar_deref( - upvar_id, - self.fcx.tcx.hir().span(diag_expr_id), - ty::UniqueImmBorrow, - ); + self.adjust_upvar_deref(place_with_id, diag_expr_id, ty::UniqueImmBorrow); } } fn adjust_upvar_deref( &mut self, - upvar_id: ty::UpvarId, - place_span: Span, + place_with_id: &PlaceWithHirId<'tcx>, + diag_expr_id: hir::HirId, borrow_kind: ty::BorrowKind, ) { assert!(match borrow_kind { @@ -411,15 +717,16 @@ impl<'a, 'tcx> InferBorrowKind<'a, 'tcx> { // upvar, then we need to modify the // borrow_kind of the upvar to make sure it // is inferred to mutable if necessary - self.adjust_upvar_borrow_kind(upvar_id, borrow_kind); + self.adjust_upvar_borrow_kind(place_with_id, diag_expr_id, borrow_kind); - // also need to be in an FnMut closure since this is not an ImmBorrow - self.adjust_closure_kind( - upvar_id.closure_expr_id, - ty::ClosureKind::FnMut, - place_span, - var_name(tcx, upvar_id.var_path.hir_id), - ); + if let PlaceBase::Upvar(upvar_id) = place_with_id.place.base { + self.adjust_closure_kind( + upvar_id.closure_expr_id, + ty::ClosureKind::FnMut, + tcx.hir().span(diag_expr_id), + var_name(tcx, upvar_id.var_path.hir_id), + ); + } } /// We infer the borrow_kind with which to borrow upvars in a stack closure. @@ -427,37 +734,34 @@ impl<'a, 'tcx> InferBorrowKind<'a, 'tcx> { /// moving from left to right as needed (but never right to left). /// Here the argument `mutbl` is the borrow_kind that is required by /// some particular use. - fn adjust_upvar_borrow_kind(&mut self, upvar_id: ty::UpvarId, kind: ty::BorrowKind) { - let upvar_capture = self - .adjust_upvar_captures - .get(&upvar_id) - .copied() - .unwrap_or_else(|| self.fcx.typeck_results.borrow().upvar_capture(upvar_id)); + fn adjust_upvar_borrow_kind( + &mut self, + place_with_id: &PlaceWithHirId<'tcx>, + diag_expr_id: hir::HirId, + kind: ty::BorrowKind, + ) { + let curr_capture_info = self.capture_information[&place_with_id.place]; + debug!( - "adjust_upvar_borrow_kind(upvar_id={:?}, upvar_capture={:?}, kind={:?})", - upvar_id, upvar_capture, kind + "adjust_upvar_borrow_kind(place={:?}, diag_expr_id={:?}, capture_info={:?}, kind={:?})", + place_with_id, diag_expr_id, curr_capture_info, kind ); - match upvar_capture { - ty::UpvarCapture::ByValue(_) => { - // Upvar is already by-value, the strongest criteria. - } - ty::UpvarCapture::ByRef(mut upvar_borrow) => { - match (upvar_borrow.kind, kind) { - // Take RHS: - (ty::ImmBorrow, ty::UniqueImmBorrow | ty::MutBorrow) - | (ty::UniqueImmBorrow, ty::MutBorrow) => { - upvar_borrow.kind = kind; - self.adjust_upvar_captures - .insert(upvar_id, ty::UpvarCapture::ByRef(upvar_borrow)); - } - // Take LHS: - (ty::ImmBorrow, ty::ImmBorrow) - | (ty::UniqueImmBorrow, ty::ImmBorrow | ty::UniqueImmBorrow) - | (ty::MutBorrow, _) => {} - } - } - } + if let ty::UpvarCapture::ByValue(_) = curr_capture_info.capture_kind { + // It's already captured by value, we don't need to do anything here + return; + } else if let ty::UpvarCapture::ByRef(curr_upvar_borrow) = curr_capture_info.capture_kind { + // Use the same region as the current capture information + // Doesn't matter since only one of the UpvarBorrow will be used. + let new_upvar_borrow = ty::UpvarBorrow { kind, region: curr_upvar_borrow.region }; + + let capture_info = ty::CaptureInfo { + expr_id: Some(diag_expr_id), + capture_kind: ty::UpvarCapture::ByRef(new_upvar_borrow), + }; + let updated_info = determine_capture_info(curr_capture_info, capture_info); + self.capture_information[&place_with_id.place] = updated_info; + }; } fn adjust_closure_kind( @@ -501,6 +805,28 @@ impl<'a, 'tcx> InferBorrowKind<'a, 'tcx> { } } } + + fn init_capture_info_for_place( + &mut self, + place_with_id: &PlaceWithHirId<'tcx>, + diag_expr_id: hir::HirId, + ) { + if let PlaceBase::Upvar(upvar_id) = place_with_id.place.base { + assert_eq!(self.closure_def_id.expect_local(), upvar_id.closure_expr_id); + + let capture_kind = + self.fcx.init_capture_kind(self.capture_clause, upvar_id, self.closure_span); + + let expr_id = Some(diag_expr_id); + let capture_info = ty::CaptureInfo { expr_id, capture_kind }; + + debug!("Capturing new place {:?}, capture_info={:?}", place_with_id, capture_info); + + self.capture_information.insert(place_with_id.place.clone(), capture_info); + } else { + debug!("Not upvar: {:?}", place_with_id); + } + } } impl<'a, 'tcx> euv::Delegate<'tcx> for InferBorrowKind<'a, 'tcx> { @@ -514,7 +840,11 @@ impl<'a, 'tcx> euv::Delegate<'tcx> for InferBorrowKind<'a, 'tcx> { "consume(place_with_id={:?}, diag_expr_id={:?}, mode={:?})", place_with_id, diag_expr_id, mode ); - self.adjust_upvar_borrow_kind_for_consume(&place_with_id, diag_expr_id, mode); + if !self.capture_information.contains_key(&place_with_id.place) { + self.init_capture_info_for_place(place_with_id, diag_expr_id); + } + + self.adjust_upvar_borrow_kind_for_consume(place_with_id, diag_expr_id, mode); } fn borrow( @@ -528,6 +858,10 @@ impl<'a, 'tcx> euv::Delegate<'tcx> for InferBorrowKind<'a, 'tcx> { place_with_id, diag_expr_id, bk ); + if !self.capture_information.contains_key(&place_with_id.place) { + self.init_capture_info_for_place(place_with_id, diag_expr_id); + } + match bk { ty::ImmBorrow => {} ty::UniqueImmBorrow => { @@ -541,10 +875,175 @@ impl<'a, 'tcx> euv::Delegate<'tcx> for InferBorrowKind<'a, 'tcx> { fn mutate(&mut self, assignee_place: &PlaceWithHirId<'tcx>, diag_expr_id: hir::HirId) { debug!("mutate(assignee_place={:?}, diag_expr_id={:?})", assignee_place, diag_expr_id); + + if !self.capture_information.contains_key(&assignee_place.place) { + self.init_capture_info_for_place(assignee_place, diag_expr_id); + } + self.adjust_upvar_borrow_kind_for_mut(assignee_place, diag_expr_id); } } +fn construct_capture_info_string( + tcx: TyCtxt<'_>, + place: &Place<'tcx>, + capture_info: &ty::CaptureInfo<'tcx>, +) -> String { + let variable_name = match place.base { + PlaceBase::Upvar(upvar_id) => var_name(tcx, upvar_id.var_path.hir_id).to_string(), + _ => bug!("Capture_information should only contain upvars"), + }; + + let mut projections_str = String::new(); + for (i, item) in place.projections.iter().enumerate() { + let proj = match item.kind { + ProjectionKind::Field(a, b) => format!("({:?}, {:?})", a, b), + ProjectionKind::Deref => String::from("Deref"), + ProjectionKind::Index => String::from("Index"), + ProjectionKind::Subslice => String::from("Subslice"), + }; + if i != 0 { + projections_str.push_str(","); + } + projections_str.push_str(proj.as_str()); + } + + let capture_kind_str = match capture_info.capture_kind { + ty::UpvarCapture::ByValue(_) => "ByValue".into(), + ty::UpvarCapture::ByRef(borrow) => format!("{:?}", borrow.kind), + }; + format!("{}[{}] -> {}", variable_name, projections_str, capture_kind_str) +} + fn var_name(tcx: TyCtxt<'_>, var_hir_id: hir::HirId) -> Symbol { tcx.hir().name(var_hir_id) } + +/// Helper function to determine if we need to escalate CaptureKind from +/// CaptureInfo A to B and returns the escalated CaptureInfo. +/// (Note: CaptureInfo contains CaptureKind and an expression that led to capture it in that way) +/// +/// If both `CaptureKind`s are considered equivalent, then the CaptureInfo is selected based +/// on the `CaptureInfo` containing an associated expression id. +/// +/// If both the CaptureKind and Expression are considered to be equivalent, +/// then `CaptureInfo` A is preferred. This can be useful in cases where we want to priortize +/// expressions reported back to the user as part of diagnostics based on which appears earlier +/// in the closure. This can be acheived simply by calling +/// `determine_capture_info(existing_info, current_info)`. This works out because the +/// expressions that occur earlier in the closure body than the current expression are processed before. +/// Consider the following example +/// ```rust,no_run +/// struct Point { x: i32, y: i32 } +/// let mut p: Point { x: 10, y: 10 }; +/// +/// let c = || { +/// p.x += 10; +/// // ^ E1 ^ +/// // ... +/// // More code +/// // ... +/// p.x += 10; // E2 +/// // ^ E2 ^ +/// }; +/// ``` +/// `CaptureKind` associated with both `E1` and `E2` will be ByRef(MutBorrow), +/// and both have an expression associated, however for diagnostics we prefer reporting +/// `E1` since it appears earlier in the closure body. When `E2` is being processed we +/// would've already handled `E1`, and have an existing capture_information for it. +/// Calling `determine_capture_info(existing_info_e1, current_info_e2)` will return +/// `existing_info_e1` in this case, allowing us to point to `E1` in case of diagnostics. +fn determine_capture_info( + capture_info_a: ty::CaptureInfo<'tcx>, + capture_info_b: ty::CaptureInfo<'tcx>, +) -> ty::CaptureInfo<'tcx> { + // If the capture kind is equivalent then, we don't need to escalate and can compare the + // expressions. + let eq_capture_kind = match (capture_info_a.capture_kind, capture_info_b.capture_kind) { + (ty::UpvarCapture::ByValue(_), ty::UpvarCapture::ByValue(_)) => { + // We don't need to worry about the spans being ignored here. + // + // The expr_id in capture_info corresponds to the span that is stored within + // ByValue(span) and therefore it gets handled with priortizing based on + // expressions below. + true + } + (ty::UpvarCapture::ByRef(ref_a), ty::UpvarCapture::ByRef(ref_b)) => { + ref_a.kind == ref_b.kind + } + (ty::UpvarCapture::ByValue(_), _) | (ty::UpvarCapture::ByRef(_), _) => false, + }; + + if eq_capture_kind { + match (capture_info_a.expr_id, capture_info_b.expr_id) { + (Some(_), _) | (None, None) => capture_info_a, + (None, Some(_)) => capture_info_b, + } + } else { + // We select the CaptureKind which ranks higher based the following priority order: + // ByValue > MutBorrow > UniqueImmBorrow > ImmBorrow + match (capture_info_a.capture_kind, capture_info_b.capture_kind) { + (ty::UpvarCapture::ByValue(_), _) => capture_info_a, + (_, ty::UpvarCapture::ByValue(_)) => capture_info_b, + (ty::UpvarCapture::ByRef(ref_a), ty::UpvarCapture::ByRef(ref_b)) => { + match (ref_a.kind, ref_b.kind) { + // Take LHS: + (ty::UniqueImmBorrow | ty::MutBorrow, ty::ImmBorrow) + | (ty::MutBorrow, ty::UniqueImmBorrow) => capture_info_a, + + // Take RHS: + (ty::ImmBorrow, ty::UniqueImmBorrow | ty::MutBorrow) + | (ty::UniqueImmBorrow, ty::MutBorrow) => capture_info_b, + + (ty::ImmBorrow, ty::ImmBorrow) + | (ty::UniqueImmBorrow, ty::UniqueImmBorrow) + | (ty::MutBorrow, ty::MutBorrow) => { + bug!("Expected unequal capture kinds"); + } + } + } + } + } +} + +/// Determines the Ancestry relationship of Place A relative to Place B +/// +/// `PlaceAncestryRelation::Ancestor` implies Place A is ancestor of Place B +/// `PlaceAncestryRelation::Descendant` implies Place A is descendant of Place B +/// `PlaceAncestryRelation::Divergent` implies neither of them is the ancestor of the other. +fn determine_place_ancestry_relation( + place_a: &Place<'tcx>, + place_b: &Place<'tcx>, +) -> PlaceAncestryRelation { + // If Place A and Place B, don't start off from the same root variable, they are divergent. + if place_a.base != place_b.base { + return PlaceAncestryRelation::Divergent; + } + + // Assume of length of projections_a = n + let projections_a = &place_a.projections; + + // Assume of length of projections_b = m + let projections_b = &place_b.projections; + + let mut same_initial_projections = true; + + for (proj_a, proj_b) in projections_a.iter().zip(projections_b.iter()) { + if proj_a != proj_b { + same_initial_projections = false; + break; + } + } + + if same_initial_projections { + // First min(n, m) projections are the same + // Select Ancestor/Descendant + if projections_b.len() >= projections_a.len() { + PlaceAncestryRelation::Ancestor + } else { + PlaceAncestryRelation::Descendant + } + } else { + PlaceAncestryRelation::Divergent + } +} diff --git a/compiler/rustc_typeck/src/check/wfcheck.rs b/compiler/rustc_typeck/src/check/wfcheck.rs index 1e27357ce44..5dffe5107b5 100644 --- a/compiler/rustc_typeck/src/check/wfcheck.rs +++ b/compiler/rustc_typeck/src/check/wfcheck.rs @@ -403,12 +403,12 @@ fn check_associated_item( match item.kind { ty::AssocKind::Const => { let ty = fcx.tcx.type_of(item.def_id); - let ty = fcx.normalize_associated_types_in(span, &ty); + let ty = fcx.normalize_associated_types_in(span, ty); fcx.register_wf_obligation(ty.into(), span, code.clone()); } ty::AssocKind::Fn => { let sig = fcx.tcx.fn_sig(item.def_id); - let sig = fcx.normalize_associated_types_in(span, &sig); + let sig = fcx.normalize_associated_types_in(span, sig); let hir_sig = sig_if_method.expect("bad signature for method"); check_fn_or_method( tcx, @@ -427,7 +427,7 @@ fn check_associated_item( } if item.defaultness.has_value() { let ty = fcx.tcx.type_of(item.def_id); - let ty = fcx.normalize_associated_types_in(span, &ty); + let ty = fcx.normalize_associated_types_in(span, ty); fcx.register_wf_obligation(ty.into(), span, code.clone()); } } @@ -480,7 +480,7 @@ fn check_type_defn<'tcx, F>( let needs_drop_copy = || { packed && { let ty = variant.fields.last().unwrap().ty; - let ty = fcx.tcx.erase_regions(&ty); + let ty = fcx.tcx.erase_regions(ty); if ty.needs_infer() { fcx_tcx .sess @@ -592,7 +592,7 @@ fn check_associated_type_bounds(fcx: &FnCtxt<'_, '_>, item: &ty::AssocItem, span debug!("check_associated_type_bounds: bounds={:?}", bounds); let wf_obligations = bounds.iter().flat_map(|&(bound, bound_span)| { - let normalized_bound = fcx.normalize_associated_types_in(span, &bound); + let normalized_bound = fcx.normalize_associated_types_in(span, bound); traits::wf::predicate_obligations( fcx, fcx.param_env, @@ -618,7 +618,7 @@ fn check_item_fn( for_id(tcx, item_id, span).with_fcx(|fcx, tcx| { let def_id = fcx.tcx.hir().local_def_id(item_id); let sig = fcx.tcx.fn_sig(def_id); - let sig = fcx.normalize_associated_types_in(span, &sig); + let sig = fcx.normalize_associated_types_in(span, sig); let mut implied_bounds = vec![]; check_fn_or_method( tcx, @@ -638,7 +638,7 @@ fn check_item_type(tcx: TyCtxt<'_>, item_id: hir::HirId, ty_span: Span, allow_fo for_id(tcx, item_id, ty_span).with_fcx(|fcx, tcx| { let ty = tcx.type_of(tcx.hir().local_def_id(item_id)); - let item_ty = fcx.normalize_associated_types_in(ty_span, &ty); + let item_ty = fcx.normalize_associated_types_in(ty_span, ty); let mut forbid_unsized = true; if allow_foreign_ty { @@ -680,7 +680,7 @@ fn check_impl<'tcx>( // won't hold). let trait_ref = fcx.tcx.impl_trait_ref(item_def_id).unwrap(); let trait_ref = - fcx.normalize_associated_types_in(ast_trait_ref.path.span, &trait_ref); + fcx.normalize_associated_types_in(ast_trait_ref.path.span, trait_ref); let obligations = traits::wf::trait_obligations( fcx, fcx.param_env, @@ -695,7 +695,7 @@ fn check_impl<'tcx>( } None => { let self_ty = fcx.tcx.type_of(item_def_id); - let self_ty = fcx.normalize_associated_types_in(item.span, &self_ty); + let self_ty = fcx.normalize_associated_types_in(item.span, self_ty); fcx.register_wf_obligation( self_ty.into(), ast_self_ty.span, @@ -800,18 +800,20 @@ fn check_where_clauses<'tcx, 'fcx>( params: FxHashSet, } impl<'tcx> ty::fold::TypeVisitor<'tcx> for CountParams { - fn visit_ty(&mut self, t: Ty<'tcx>) -> ControlFlow<()> { + type BreakTy = (); + + fn visit_ty(&mut self, t: Ty<'tcx>) -> ControlFlow { if let ty::Param(param) = t.kind() { self.params.insert(param.index); } t.super_visit_with(self) } - fn visit_region(&mut self, _: ty::Region<'tcx>) -> ControlFlow<()> { + fn visit_region(&mut self, _: ty::Region<'tcx>) -> ControlFlow { ControlFlow::BREAK } - fn visit_const(&mut self, c: &'tcx ty::Const<'tcx>) -> ControlFlow<()> { + fn visit_const(&mut self, c: &'tcx ty::Const<'tcx>) -> ControlFlow { if let ty::ConstKind::Param(param) = c.val { self.params.insert(param.index); } @@ -845,7 +847,7 @@ fn check_where_clauses<'tcx, 'fcx>( // Note the subtle difference from how we handle `predicates` // below: there, we are not trying to prove those predicates // to be *true* but merely *well-formed*. - let pred = fcx.normalize_associated_types_in(sp, &pred); + let pred = fcx.normalize_associated_types_in(sp, pred); let cause = traits::ObligationCause::new(sp, fcx.body_id, traits::ItemObligation(def_id)); traits::Obligation::new(cause, fcx.param_env, pred) @@ -856,12 +858,12 @@ fn check_where_clauses<'tcx, 'fcx>( if let Some((mut return_ty, span)) = return_ty { if return_ty.has_infer_types_or_consts() { fcx.select_obligations_where_possible(false, |_| {}); - return_ty = fcx.resolve_vars_if_possible(&return_ty); + return_ty = fcx.resolve_vars_if_possible(return_ty); } check_opaque_types(tcx, fcx, def_id.expect_local(), span, return_ty); } - let predicates = fcx.normalize_associated_types_in(span, &predicates); + let predicates = fcx.normalize_associated_types_in(span, predicates); debug!("check_where_clauses: predicates={:?}", predicates.predicates); assert_eq!(predicates.predicates.len(), predicates.spans.len()); @@ -885,8 +887,8 @@ fn check_fn_or_method<'fcx, 'tcx>( def_id: DefId, implied_bounds: &mut Vec>, ) { - let sig = fcx.normalize_associated_types_in(span, &sig); - let sig = fcx.tcx.liberate_late_bound_regions(def_id, &sig); + let sig = fcx.normalize_associated_types_in(span, sig); + let sig = fcx.tcx.liberate_late_bound_regions(def_id, sig); for (&input_ty, span) in sig.inputs().iter().zip(hir_decl.inputs.iter().map(|t| t.span)) { fcx.register_wf_obligation(input_ty.into(), span, ObligationCauseCode::MiscObligation); @@ -1063,19 +1065,19 @@ fn check_method_receiver<'fcx, 'tcx>( let span = fn_sig.decl.inputs[0].span; let sig = fcx.tcx.fn_sig(method.def_id); - let sig = fcx.normalize_associated_types_in(span, &sig); - let sig = fcx.tcx.liberate_late_bound_regions(method.def_id, &sig); + let sig = fcx.normalize_associated_types_in(span, sig); + let sig = fcx.tcx.liberate_late_bound_regions(method.def_id, sig); debug!("check_method_receiver: sig={:?}", sig); - let self_ty = fcx.normalize_associated_types_in(span, &self_ty); - let self_ty = fcx.tcx.liberate_late_bound_regions(method.def_id, &ty::Binder::bind(self_ty)); + let self_ty = fcx.normalize_associated_types_in(span, self_ty); + let self_ty = fcx.tcx.liberate_late_bound_regions(method.def_id, ty::Binder::bind(self_ty)); let receiver_ty = sig.inputs()[0]; - let receiver_ty = fcx.normalize_associated_types_in(span, &receiver_ty); + let receiver_ty = fcx.normalize_associated_types_in(span, receiver_ty); let receiver_ty = - fcx.tcx.liberate_late_bound_regions(method.def_id, &ty::Binder::bind(receiver_ty)); + fcx.tcx.liberate_late_bound_regions(method.def_id, ty::Binder::bind(receiver_ty)); if fcx.tcx.features().arbitrary_self_types { if !receiver_is_valid(fcx, span, receiver_ty, self_ty, true) { @@ -1307,7 +1309,7 @@ fn check_false_global_bounds(fcx: &FnCtxt<'_, '_>, span: Span, id: hir::HirId) { let pred = obligation.predicate; // Match the existing behavior. if pred.is_global() && !pred.has_late_bound_regions() { - let pred = fcx.normalize_associated_types_in(span, &pred); + let pred = fcx.normalize_associated_types_in(span, pred); let obligation = traits::Obligation::new( traits::ObligationCause::new(span, id, traits::TrivialBound), empty_env, @@ -1405,8 +1407,8 @@ impl<'a, 'tcx> FnCtxt<'a, 'tcx> { .iter() .map(|field| { let field_ty = self.tcx.type_of(self.tcx.hir().local_def_id(field.hir_id)); - let field_ty = self.normalize_associated_types_in(field.ty.span, &field_ty); - let field_ty = self.resolve_vars_if_possible(&field_ty); + let field_ty = self.normalize_associated_types_in(field.ty.span, field_ty); + let field_ty = self.resolve_vars_if_possible(field_ty); debug!("non_enum_variant: type of field {:?} is {:?}", field, field_ty); AdtField { ty: field_ty, span: field.ty.span } }) @@ -1429,7 +1431,7 @@ impl<'a, 'tcx> FnCtxt<'a, 'tcx> { pub(super) fn impl_implied_bounds(&self, impl_def_id: DefId, span: Span) -> Vec> { match self.tcx.impl_trait_ref(impl_def_id) { - Some(ref trait_ref) => { + Some(trait_ref) => { // Trait impl: take implied bounds from all types that // appear in the trait reference. let trait_ref = self.normalize_associated_types_in(span, trait_ref); @@ -1439,7 +1441,7 @@ impl<'a, 'tcx> FnCtxt<'a, 'tcx> { None => { // Inherent impl: take implied bounds from the `self` type. let self_ty = self.tcx.type_of(impl_def_id); - let self_ty = self.normalize_associated_types_in(span, &self_ty); + let self_ty = self.normalize_associated_types_in(span, self_ty); vec![self_ty] } } diff --git a/compiler/rustc_typeck/src/check/writeback.rs b/compiler/rustc_typeck/src/check/writeback.rs index 5363702a5be..335f2cc2716 100644 --- a/compiler/rustc_typeck/src/check/writeback.rs +++ b/compiler/rustc_typeck/src/check/writeback.rs @@ -70,6 +70,9 @@ impl<'a, 'tcx> FnCtxt<'a, 'tcx> { debug!("used_trait_imports({:?}) = {:?}", item_def_id, used_trait_imports); wbcx.typeck_results.used_trait_imports = used_trait_imports; + wbcx.typeck_results.treat_byte_string_as_slice = + mem::take(&mut self.typeck_results.borrow_mut().treat_byte_string_as_slice); + wbcx.typeck_results.closure_captures = mem::take(&mut self.typeck_results.borrow_mut().closure_captures); @@ -136,7 +139,7 @@ impl<'cx, 'tcx> WritebackCx<'cx, 'tcx> { match e.kind { hir::ExprKind::Unary(hir::UnOp::UnNeg | hir::UnOp::UnNot, ref inner) => { let inner_ty = self.fcx.node_ty(inner.hir_id); - let inner_ty = self.fcx.resolve_vars_if_possible(&inner_ty); + let inner_ty = self.fcx.resolve_vars_if_possible(inner_ty); if inner_ty.is_scalar() { let mut typeck_results = self.fcx.typeck_results.borrow_mut(); @@ -147,10 +150,10 @@ impl<'cx, 'tcx> WritebackCx<'cx, 'tcx> { hir::ExprKind::Binary(ref op, ref lhs, ref rhs) | hir::ExprKind::AssignOp(ref op, ref lhs, ref rhs) => { let lhs_ty = self.fcx.node_ty(lhs.hir_id); - let lhs_ty = self.fcx.resolve_vars_if_possible(&lhs_ty); + let lhs_ty = self.fcx.resolve_vars_if_possible(lhs_ty); let rhs_ty = self.fcx.node_ty(rhs.hir_id); - let rhs_ty = self.fcx.resolve_vars_if_possible(&rhs_ty); + let rhs_ty = self.fcx.resolve_vars_if_possible(rhs_ty); if lhs_ty.is_scalar() && rhs_ty.is_scalar() { let mut typeck_results = self.fcx.typeck_results.borrow_mut(); @@ -209,7 +212,7 @@ impl<'cx, 'tcx> WritebackCx<'cx, 'tcx> { &format!("bad index {:?} for base: `{:?}`", index, base), ) }); - let index_ty = self.fcx.resolve_vars_if_possible(&index_ty); + let index_ty = self.fcx.resolve_vars_if_possible(index_ty); if base_ty.builtin_index().is_some() && index_ty == self.fcx.tcx.types.usize { // Remove the method call record @@ -313,14 +316,14 @@ impl<'cx, 'tcx> Visitor<'tcx> for WritebackCx<'cx, 'tcx> { fn visit_local(&mut self, l: &'tcx hir::Local<'tcx>) { intravisit::walk_local(self, l); let var_ty = self.fcx.local_ty(l.span, l.hir_id).decl_ty; - let var_ty = self.resolve(&var_ty, &l.span); + let var_ty = self.resolve(var_ty, &l.span); self.write_ty_to_typeck_results(l.hir_id, var_ty); } fn visit_ty(&mut self, hir_ty: &'tcx hir::Ty<'tcx>) { intravisit::walk_ty(self, hir_ty); let ty = self.fcx.node_ty(hir_ty.hir_id); - let ty = self.resolve(&ty, &hir_ty.span); + let ty = self.resolve(ty, &hir_ty.span); self.write_ty_to_typeck_results(hir_ty.hir_id, ty); } } @@ -432,7 +435,7 @@ impl<'cx, 'tcx> WritebackCx<'cx, 'tcx> { fn visit_opaque_types(&mut self, span: Span) { for (&def_id, opaque_defn) in self.fcx.opaque_types.borrow().iter() { let hir_id = self.tcx().hir().local_def_id_to_hir_id(def_id.expect_local()); - let instantiated_ty = self.resolve(&opaque_defn.concrete_ty, &hir_id); + let instantiated_ty = self.resolve(opaque_defn.concrete_ty, &hir_id); debug_assert!(!instantiated_ty.has_escaping_bound_vars()); @@ -522,13 +525,13 @@ impl<'cx, 'tcx> WritebackCx<'cx, 'tcx> { // Resolve the type of the node with id `node_id` let n_ty = self.fcx.node_ty(hir_id); - let n_ty = self.resolve(&n_ty, &span); + let n_ty = self.resolve(n_ty, &span); self.write_ty_to_typeck_results(hir_id, n_ty); debug!("node {:?} has type {:?}", hir_id, n_ty); // Resolve any substitutions if let Some(substs) = self.fcx.typeck_results.borrow().node_substs_opt(hir_id) { - let substs = self.resolve(&substs, &span); + let substs = self.resolve(substs, &span); debug!("write_substs_to_tcx({:?}, {:?})", hir_id, substs); assert!(!substs.needs_infer() && !substs.has_placeholders()); self.typeck_results.node_substs_mut().insert(hir_id, substs); @@ -543,7 +546,7 @@ impl<'cx, 'tcx> WritebackCx<'cx, 'tcx> { } Some(adjustment) => { - let resolved_adjustment = self.resolve(&adjustment, &span); + let resolved_adjustment = self.resolve(adjustment, &span); debug!("adjustments for node {:?}: {:?}", hir_id, resolved_adjustment); self.typeck_results.adjustments_mut().insert(hir_id, resolved_adjustment); } @@ -558,7 +561,7 @@ impl<'cx, 'tcx> WritebackCx<'cx, 'tcx> { } Some(adjustment) => { - let resolved_adjustment = self.resolve(&adjustment, &span); + let resolved_adjustment = self.resolve(adjustment, &span); debug!("pat_adjustments for node {:?}: {:?}", hir_id, resolved_adjustment); self.typeck_results.pat_adjustments_mut().insert(hir_id, resolved_adjustment); } @@ -570,7 +573,7 @@ impl<'cx, 'tcx> WritebackCx<'cx, 'tcx> { assert_eq!(fcx_typeck_results.hir_owner, self.typeck_results.hir_owner); let common_hir_owner = fcx_typeck_results.hir_owner; - for (&local_id, fn_sig) in fcx_typeck_results.liberated_fn_sigs().iter() { + for (&local_id, &fn_sig) in fcx_typeck_results.liberated_fn_sigs().iter() { let hir_id = hir::HirId { owner: common_hir_owner, local_id }; let fn_sig = self.resolve(fn_sig, &hir_id); self.typeck_results.liberated_fn_sigs_mut().insert(hir_id, fn_sig); @@ -584,12 +587,12 @@ impl<'cx, 'tcx> WritebackCx<'cx, 'tcx> { for (&local_id, ftys) in fcx_typeck_results.fru_field_types().iter() { let hir_id = hir::HirId { owner: common_hir_owner, local_id }; - let ftys = self.resolve(ftys, &hir_id); + let ftys = self.resolve(ftys.clone(), &hir_id); self.typeck_results.fru_field_types_mut().insert(hir_id, ftys); } } - fn resolve(&mut self, x: &T, span: &dyn Locatable) -> T + fn resolve(&mut self, x: T, span: &dyn Locatable) -> T where T: TypeFoldable<'tcx>, { @@ -681,8 +684,8 @@ impl<'cx, 'tcx> TypeFolder<'tcx> for Resolver<'cx, 'tcx> { } fn fold_ty(&mut self, t: Ty<'tcx>) -> Ty<'tcx> { - match self.infcx.fully_resolve(&t) { - Ok(t) => self.infcx.tcx.erase_regions(&t), + match self.infcx.fully_resolve(t) { + Ok(t) => self.infcx.tcx.erase_regions(t), Err(_) => { debug!("Resolver::fold_ty: input type `{:?}` not fully resolvable", t); self.report_type_error(t); @@ -698,8 +701,8 @@ impl<'cx, 'tcx> TypeFolder<'tcx> for Resolver<'cx, 'tcx> { } fn fold_const(&mut self, ct: &'tcx ty::Const<'tcx>) -> &'tcx ty::Const<'tcx> { - match self.infcx.fully_resolve(&ct) { - Ok(ct) => self.infcx.tcx.erase_regions(&ct), + match self.infcx.fully_resolve(ct) { + Ok(ct) => self.infcx.tcx.erase_regions(ct), Err(_) => { debug!("Resolver::fold_const: input const `{:?}` not fully resolvable", ct); self.report_const_error(ct); diff --git a/compiler/rustc_typeck/src/collect.rs b/compiler/rustc_typeck/src/collect.rs index f014ea3d5a9..dee0e6c2ebb 100644 --- a/compiler/rustc_typeck/src/collect.rs +++ b/compiler/rustc_typeck/src/collect.rs @@ -386,7 +386,7 @@ impl AstConv<'tcx> for ItemCtxt<'tcx> { "{}::{}", // Replace the existing lifetimes with a new named lifetime. self.tcx - .replace_late_bound_regions(&poly_trait_ref, |_| { + .replace_late_bound_regions(poly_trait_ref, |_| { self.tcx.mk_region(ty::ReEarlyBound( ty::EarlyBoundRegion { def_id: item_def_id, @@ -424,7 +424,7 @@ impl AstConv<'tcx> for ItemCtxt<'tcx> { format!( "{}::{}", // Erase named lt, we want `::C`, not `::C`. - self.tcx.anonymize_late_bound_regions(&poly_trait_ref).skip_binder(), + self.tcx.anonymize_late_bound_regions(poly_trait_ref).skip_binder(), item_segment.ident ), Applicability::MaybeIncorrect, @@ -2062,7 +2062,7 @@ fn const_evaluatable_predicates_of<'tcx>( } impl<'a, 'tcx> TypeVisitor<'tcx> for TyAliasVisitor<'a, 'tcx> { - fn visit_const(&mut self, ct: &'tcx Const<'tcx>) -> ControlFlow<()> { + fn visit_const(&mut self, ct: &'tcx Const<'tcx>) -> ControlFlow { if let ty::ConstKind::Unevaluated(def, substs, None) = ct.val { self.preds.insert(( ty::PredicateAtom::ConstEvaluatable(def, substs).to_predicate(self.tcx), @@ -2653,7 +2653,7 @@ fn codegen_fn_attrs(tcx: TyCtxt<'_>, id: DefId) -> CodegenFnAttrs { set.path.segments.iter().map(|x| x.ident.name).collect::>(); match segments.as_slice() { [sym::arm, sym::a32] | [sym::arm, sym::t32] => { - if !tcx.sess.target.options.has_thumb_interworking { + if !tcx.sess.target.has_thumb_interworking { struct_span_err!( tcx.sess.diagnostic(), attr.span, diff --git a/compiler/rustc_typeck/src/collect/item_bounds.rs b/compiler/rustc_typeck/src/collect/item_bounds.rs index 9c29ceeb593..e596dd1a396 100644 --- a/compiler/rustc_typeck/src/collect/item_bounds.rs +++ b/compiler/rustc_typeck/src/collect/item_bounds.rs @@ -61,23 +61,23 @@ fn opaque_type_bounds<'tcx>( bounds: &'tcx [hir::GenericBound<'tcx>], span: Span, ) -> &'tcx [(ty::Predicate<'tcx>, Span)] { - let item_ty = - tcx.mk_opaque(opaque_def_id, InternalSubsts::identity_for_item(tcx, opaque_def_id)); + ty::print::with_no_queries(|| { + let item_ty = + tcx.mk_opaque(opaque_def_id, InternalSubsts::identity_for_item(tcx, opaque_def_id)); - let bounds = ty::print::with_no_queries(|| { - AstConv::compute_bounds( + let bounds = AstConv::compute_bounds( &ItemCtxt::new(tcx, opaque_def_id), item_ty, bounds, SizedByDefault::Yes, span, ) - }); + .predicates(tcx, item_ty); - let bounds = bounds.predicates(tcx, item_ty); - debug!("opaque_type_bounds({}) = {:?}", tcx.def_path_str(opaque_def_id), bounds); + debug!("opaque_type_bounds({}) = {:?}", tcx.def_path_str(opaque_def_id), bounds); - tcx.arena.alloc_slice(&bounds) + tcx.arena.alloc_slice(&bounds) + }) } pub(super) fn explicit_item_bounds( diff --git a/compiler/rustc_typeck/src/collect/type_of.rs b/compiler/rustc_typeck/src/collect/type_of.rs index 61d1efc837b..c4f4c8bc76b 100644 --- a/compiler/rustc_typeck/src/collect/type_of.rs +++ b/compiler/rustc_typeck/src/collect/type_of.rs @@ -637,7 +637,7 @@ fn infer_placeholder_type( } // Typeck doesn't expect erased regions to be returned from `type_of`. - tcx.fold_regions(&ty, &mut false, |r, _| match r { + tcx.fold_regions(ty, &mut false, |r, _| match r { ty::ReErased => tcx.lifetimes.re_static, _ => r, }) diff --git a/compiler/rustc_typeck/src/constrained_generic_params.rs b/compiler/rustc_typeck/src/constrained_generic_params.rs index bae5bde7002..e389fd4d9f0 100644 --- a/compiler/rustc_typeck/src/constrained_generic_params.rs +++ b/compiler/rustc_typeck/src/constrained_generic_params.rs @@ -57,7 +57,7 @@ struct ParameterCollector { } impl<'tcx> TypeVisitor<'tcx> for ParameterCollector { - fn visit_ty(&mut self, t: Ty<'tcx>) -> ControlFlow<()> { + fn visit_ty(&mut self, t: Ty<'tcx>) -> ControlFlow { match *t.kind() { ty::Projection(..) | ty::Opaque(..) if !self.include_nonconstraining => { // projections are not injective @@ -72,14 +72,14 @@ impl<'tcx> TypeVisitor<'tcx> for ParameterCollector { t.super_visit_with(self) } - fn visit_region(&mut self, r: ty::Region<'tcx>) -> ControlFlow<()> { + fn visit_region(&mut self, r: ty::Region<'tcx>) -> ControlFlow { if let ty::ReEarlyBound(data) = *r { self.parameters.push(Parameter::from(data)); } ControlFlow::CONTINUE } - fn visit_const(&mut self, c: &'tcx ty::Const<'tcx>) -> ControlFlow<()> { + fn visit_const(&mut self, c: &'tcx ty::Const<'tcx>) -> ControlFlow { match c.val { ty::ConstKind::Unevaluated(..) if !self.include_nonconstraining => { // Constant expressions are not injective diff --git a/compiler/rustc_typeck/src/expr_use_visitor.rs b/compiler/rustc_typeck/src/expr_use_visitor.rs index 57bd89b9d3d..1b51d5e0182 100644 --- a/compiler/rustc_typeck/src/expr_use_visitor.rs +++ b/compiler/rustc_typeck/src/expr_use_visitor.rs @@ -15,10 +15,10 @@ use rustc_index::vec::Idx; use rustc_infer::infer::InferCtxt; use rustc_middle::hir::place::ProjectionKind; use rustc_middle::ty::{self, adjustment, TyCtxt}; +use rustc_span::Span; use rustc_target::abi::VariantIdx; use crate::mem_categorization as mc; -use rustc_span::Span; /////////////////////////////////////////////////////////////////////////// // The Delegate trait @@ -73,6 +73,7 @@ pub enum MutateMode { // This is the code that actually walks the tree. pub struct ExprUseVisitor<'a, 'tcx> { mc: mc::MemCategorizationContext<'a, 'tcx>, + body_owner: LocalDefId, delegate: &'a mut dyn Delegate<'tcx>, } @@ -110,6 +111,7 @@ impl<'a, 'tcx> ExprUseVisitor<'a, 'tcx> { ) -> Self { ExprUseVisitor { mc: mc::MemCategorizationContext::new(infcx, param_env, body_owner, typeck_results), + body_owner, delegate, } } @@ -329,8 +331,8 @@ impl<'a, 'tcx> ExprUseVisitor<'a, 'tcx> { self.consume_expr(base); } - hir::ExprKind::Closure(_, _, _, fn_decl_span, _) => { - self.walk_captures(expr, fn_decl_span); + hir::ExprKind::Closure(..) => { + self.walk_captures(expr); } hir::ExprKind::Box(ref base) => { @@ -529,7 +531,7 @@ impl<'a, 'tcx> ExprUseVisitor<'a, 'tcx> { debug!("walk_pat(discr_place={:?}, pat={:?})", discr_place, pat); let tcx = self.tcx(); - let ExprUseVisitor { ref mc, ref mut delegate } = *self; + let ExprUseVisitor { ref mc, body_owner: _, ref mut delegate } = *self; return_if_err!(mc.cat_pattern(discr_place.clone(), pat, |place, pat| { if let PatKind::Binding(_, canonical_id, ..) = pat.kind { debug!("walk_pat: binding place={:?} pat={:?}", place, pat,); @@ -569,36 +571,112 @@ impl<'a, 'tcx> ExprUseVisitor<'a, 'tcx> { })); } - fn walk_captures(&mut self, closure_expr: &hir::Expr<'_>, fn_decl_span: Span) { + /// Walk closure captures but using `closure_caputes` instead + /// of `closure_min_captures`. + /// + /// This is needed because clippy uses `ExprUseVisitor` after TypeckResults + /// are written back. We don't currently writeback min_captures to + /// TypeckResults. + fn walk_captures_closure_captures(&mut self, closure_expr: &hir::Expr<'_>) { + // FIXME(arora-aman): Remove this function once rust-lang/project-rfc-2229#18 + // is completed. + debug!("walk_captures_closure_captures({:?}), ", closure_expr); + + let closure_def_id = self.tcx().hir().local_def_id(closure_expr.hir_id).to_def_id(); + let cl_span = self.tcx().hir().span(closure_expr.hir_id); + + let captures = &self.mc.typeck_results.closure_captures[&closure_def_id]; + + for (&var_id, &upvar_id) in captures { + let upvar_capture = self.mc.typeck_results.upvar_capture(upvar_id); + let captured_place = + return_if_err!(self.cat_captured_var(closure_expr.hir_id, cl_span, var_id)); + match upvar_capture { + ty::UpvarCapture::ByValue(_) => { + let mode = copy_or_move(&self.mc, &captured_place); + self.delegate.consume(&captured_place, captured_place.hir_id, mode); + } + ty::UpvarCapture::ByRef(upvar_borrow) => { + self.delegate.borrow(&captured_place, captured_place.hir_id, upvar_borrow.kind); + } + } + } + } + + /// Handle the case where the current body contains a closure. + /// + /// When the current body being handled is a closure, then we must make sure that + /// - The parent closure only captures Places from the nested closure that are not local to it. + /// + /// In the following example the closures `c` only captures `p.x`` even though `incr` + /// is a capture of the nested closure + /// + /// ```rust,ignore(cannot-test-this-because-pseduo-code) + /// let p = ..; + /// let c = || { + /// let incr = 10; + /// let nested = || p.x += incr; + /// } + /// ``` + /// + /// - When reporting the Place back to the Delegate, ensure that the UpvarId uses the enclosing + /// closure as the DefId. + fn walk_captures(&mut self, closure_expr: &hir::Expr<'_>) { debug!("walk_captures({:?})", closure_expr); - let closure_def_id = self.tcx().hir().local_def_id(closure_expr.hir_id); - if let Some(upvars) = self.tcx().upvars_mentioned(closure_def_id) { - for &var_id in upvars.keys() { - let upvar_id = ty::UpvarId { - var_path: ty::UpvarPath { hir_id: var_id }, - closure_expr_id: closure_def_id, - }; - let upvar_capture = self.mc.typeck_results.upvar_capture(upvar_id); - let captured_place = return_if_err!(self.cat_captured_var( - closure_expr.hir_id, - fn_decl_span, - var_id, - )); - match upvar_capture { - ty::UpvarCapture::ByValue(_) => { - let mode = copy_or_move(&self.mc, &captured_place); - self.delegate.consume(&captured_place, captured_place.hir_id, mode); - } - ty::UpvarCapture::ByRef(upvar_borrow) => { - self.delegate.borrow( - &captured_place, - captured_place.hir_id, - upvar_borrow.kind, - ); + let closure_def_id = self.tcx().hir().local_def_id(closure_expr.hir_id).to_def_id(); + let upvars = self.tcx().upvars_mentioned(self.body_owner); + + // For purposes of this function, generator and closures are equivalent. + let body_owner_is_closure = match self.tcx().type_of(self.body_owner.to_def_id()).kind() { + ty::Closure(..) | ty::Generator(..) => true, + _ => false, + }; + + if let Some(min_captures) = self.mc.typeck_results.closure_min_captures.get(&closure_def_id) + { + for (var_hir_id, min_list) in min_captures.iter() { + if upvars.map_or(body_owner_is_closure, |upvars| !upvars.contains_key(var_hir_id)) { + // The nested closure might be capturing the current (enclosing) closure's local variables. + // We check if the root variable is ever mentioned within the enclosing closure, if not + // then for the current body (if it's a closure) these aren't captures, we will ignore them. + continue; + } + for captured_place in min_list { + let place = &captured_place.place; + let capture_info = captured_place.info; + + let upvar_id = if body_owner_is_closure { + // Mark the place to be captured by the enclosing closure + ty::UpvarId::new(*var_hir_id, self.body_owner) + } else { + ty::UpvarId::new(*var_hir_id, closure_def_id.expect_local()) + }; + let place_with_id = PlaceWithHirId::new( + capture_info.expr_id.unwrap_or(closure_expr.hir_id), + place.base_ty, + PlaceBase::Upvar(upvar_id), + place.projections.clone(), + ); + + match capture_info.capture_kind { + ty::UpvarCapture::ByValue(_) => { + let mode = copy_or_move(&self.mc, &place_with_id); + self.delegate.consume(&place_with_id, place_with_id.hir_id, mode); + } + ty::UpvarCapture::ByRef(upvar_borrow) => { + self.delegate.borrow( + &place_with_id, + place_with_id.hir_id, + upvar_borrow.kind, + ); + } } } } + } else if self.mc.typeck_results.closure_captures.contains_key(&closure_def_id) { + // Handle the case where clippy calls ExprUseVisitor after + self.walk_captures_closure_captures(closure_expr) } } diff --git a/compiler/rustc_typeck/src/impl_wf_check/min_specialization.rs b/compiler/rustc_typeck/src/impl_wf_check/min_specialization.rs index 4cf3efcf513..5db9ff9524d 100644 --- a/compiler/rustc_typeck/src/impl_wf_check/min_specialization.rs +++ b/compiler/rustc_typeck/src/impl_wf_check/min_specialization.rs @@ -165,7 +165,7 @@ fn get_impl_substs<'tcx>( // Conservatively use an empty `ParamEnv`. let outlives_env = OutlivesEnvironment::new(ty::ParamEnv::empty()); infcx.resolve_regions_and_report_errors(impl1_def_id, &outlives_env, RegionckMode::default()); - let impl2_substs = match infcx.fully_resolve(&impl2_substs) { + let impl2_substs = match infcx.fully_resolve(impl2_substs) { Ok(s) => s, Err(_) => { tcx.sess.struct_span_err(span, "could not resolve substs on overridden impl").emit(); diff --git a/compiler/rustc_typeck/src/lib.rs b/compiler/rustc_typeck/src/lib.rs index 30904091c1b..929c88455f0 100644 --- a/compiler/rustc_typeck/src/lib.rs +++ b/compiler/rustc_typeck/src/lib.rs @@ -61,6 +61,7 @@ This API is completely unstable and subject to change. #![feature(box_syntax)] #![feature(crate_visibility_modifier)] #![feature(in_band_lifetimes)] +#![feature(is_sorted)] #![feature(nll)] #![feature(or_patterns)] #![feature(try_blocks)] diff --git a/compiler/rustc_typeck/src/mem_categorization.rs b/compiler/rustc_typeck/src/mem_categorization.rs index f6ac7aa9155..9992094117d 100644 --- a/compiler/rustc_typeck/src/mem_categorization.rs +++ b/compiler/rustc_typeck/src/mem_categorization.rs @@ -124,7 +124,7 @@ impl<'a, 'tcx> MemCategorizationContext<'a, 'tcx> { self.infcx.type_is_copy_modulo_regions(self.param_env, ty, span) } - fn resolve_vars_if_possible(&self, value: &T) -> T + fn resolve_vars_if_possible(&self, value: T) -> T where T: TypeFoldable<'tcx>, { @@ -142,7 +142,7 @@ impl<'a, 'tcx> MemCategorizationContext<'a, 'tcx> { ) -> McResult> { match ty { Some(ty) => { - let ty = self.resolve_vars_if_possible(&ty); + let ty = self.resolve_vars_if_possible(ty); if ty.references_error() || ty.is_ty_var() { debug!("resolve_type_vars_or_error: error from {:?}", ty); Err(()) @@ -274,7 +274,7 @@ impl<'a, 'tcx> MemCategorizationContext<'a, 'tcx> { F: FnOnce() -> McResult>, { debug!("cat_expr_adjusted_with({:?}): {:?}", adjustment, expr); - let target = self.resolve_vars_if_possible(&adjustment.target); + let target = self.resolve_vars_if_possible(adjustment.target); match adjustment.kind { adjustment::Adjust::Deref(overloaded) => { // Equivalent to *expr or something similar. diff --git a/config.toml.example b/config.toml.example index 1edb390e0fe..5b045d4e32d 100644 --- a/config.toml.example +++ b/config.toml.example @@ -138,6 +138,9 @@ changelog-seen = 2 # Whether or not to specify `-DLLVM_TEMPORARILY_ALLOW_OLD_TOOLCHAIN=YES` #allow-old-toolchain = false +# Whether to include the Polly optimizer. +#polly = false + # ============================================================================= # General build configuration options # ============================================================================= @@ -443,6 +446,11 @@ changelog-seen = 2 # nightly features #channel = "dev" +# A descriptive string to be appended to `rustc --version` output, which is +# also used in places like debuginfo `DW_AT_producer`. This may be useful for +# supplementary build information, like distro-specific package versions. +#description = "" + # The root location of the musl installation directory. #musl-root = "..." @@ -583,6 +591,15 @@ changelog-seen = 2 # build native code. #android-ndk = "/path/to/ndk" +# Build the sanitizer runtimes for this target. +# This option will override the same option under [build] section. +#sanitizers = false + +# Build the profiler runtime for this target(required when compiling with options that depend +# on this runtime, such as `-C profile-generate` or `-Z instrument-coverage`). +# This option will override the same option under [build] section. +#profiler = false + # Force static or dynamic linkage of the standard library for this target. If # this target is a host for rustc, this will also affect the linkage of the # compiler itself. This is useful for building rustc on targets that normally diff --git a/library/alloc/Cargo.toml b/library/alloc/Cargo.toml index 381750a5198..eff197d9988 100644 --- a/library/alloc/Cargo.toml +++ b/library/alloc/Cargo.toml @@ -31,3 +31,5 @@ harness = false [features] compiler-builtins-mem = ['compiler_builtins/mem'] compiler-builtins-c = ["compiler_builtins/c"] +compiler-builtins-asm = ["compiler_builtins/asm"] +compiler-builtins-mangled-names = ["compiler_builtins/mangled-names"] diff --git a/library/alloc/src/alloc.rs b/library/alloc/src/alloc.rs index 0a4f88dedbb..b1bfc2abe44 100644 --- a/library/alloc/src/alloc.rs +++ b/library/alloc/src/alloc.rs @@ -23,6 +23,8 @@ extern "Rust" { // (the code expanding that attribute macro generates those functions), or to call // the default implementations in libstd (`__rdl_alloc` etc. in `library/std/src/alloc.rs`) // otherwise. + // The rustc fork of LLVM also special-cases these function names to be able to optimize them + // like `malloc`, `realloc`, and `free`, respectively. #[rustc_allocator] #[rustc_allocator_nounwind] fn __rust_alloc(size: usize, align: usize) -> *mut u8; @@ -356,8 +358,9 @@ extern "Rust" { /// [`set_alloc_error_hook`]: ../../std/alloc/fn.set_alloc_error_hook.html /// [`take_alloc_error_hook`]: ../../std/alloc/fn.take_alloc_error_hook.html #[stable(feature = "global_alloc", since = "1.28.0")] -#[cfg(not(any(test, bootstrap)))] +#[cfg(not(test))] #[rustc_allocator_nounwind] +#[cold] pub fn handle_alloc_error(layout: Layout) -> ! { unsafe { __rust_alloc_error_handler(layout.size(), layout.align()); @@ -368,22 +371,7 @@ pub fn handle_alloc_error(layout: Layout) -> ! { #[cfg(test)] pub use std::alloc::handle_alloc_error; -// In stage0 (bootstrap) `__rust_alloc_error_handler`, -// might not be generated yet, because an old compiler is used, -// so use the old direct call. -#[cfg(all(bootstrap, not(test)))] -#[stable(feature = "global_alloc", since = "1.28.0")] -#[doc(hidden)] -#[rustc_allocator_nounwind] -pub fn handle_alloc_error(layout: Layout) -> ! { - extern "Rust" { - #[lang = "oom"] - fn oom_impl(layout: Layout) -> !; - } - unsafe { oom_impl(layout) } -} - -#[cfg(not(any(target_os = "hermit", test, bootstrap)))] +#[cfg(not(any(target_os = "hermit", test)))] #[doc(hidden)] #[allow(unused_attributes)] #[unstable(feature = "alloc_internals", issue = "none")] diff --git a/library/alloc/src/boxed.rs b/library/alloc/src/boxed.rs index 1512235da6a..f56e3af4ff2 100644 --- a/library/alloc/src/boxed.rs +++ b/library/alloc/src/boxed.rs @@ -62,6 +62,13 @@ //! T` obtained from [`Box::::into_raw`] may be deallocated using the //! [`Global`] allocator with [`Layout::for_value(&*value)`]. //! +//! For zero-sized values, the `Box` pointer still has to be [valid] for reads +//! and writes and sufficiently aligned. In particular, casting any aligned +//! non-zero integer literal to a raw pointer produces a valid pointer, but a +//! pointer pointing into previously allocated memory that since got freed is +//! not valid. The recommended way to build a Box to a ZST if `Box::new` cannot +//! be used is to use [`ptr::NonNull::dangling`]. +//! //! So long as `T: Sized`, a `Box` is guaranteed to be represented //! as a single pointer and is also ABI-compatible with C pointers //! (i.e. the C type `T*`). This means that if you have extern "C" @@ -125,6 +132,7 @@ //! [`Global`]: crate::alloc::Global //! [`Layout`]: crate::alloc::Layout //! [`Layout::for_value(&*value)`]: crate::alloc::Layout::for_value +//! [valid]: ptr#safety #![stable(feature = "rust1", since = "1.0.0")] @@ -530,7 +538,10 @@ impl Box { /// memory problems. For example, a double-free may occur if the /// function is called twice on the same raw pointer. /// + /// The safety conditions are described in the [memory layout] section. + /// /// # Examples + /// /// Recreate a `Box` which was previously converted to a raw pointer /// using [`Box::into_raw`]: /// ``` @@ -1336,9 +1347,10 @@ impl FromIterator for Box<[I]> { } #[stable(feature = "box_slice_clone", since = "1.3.0")] -impl Clone for Box<[T]> { +impl Clone for Box<[T], A> { fn clone(&self) -> Self { - self.to_vec().into_boxed_slice() + let alloc = Box::alloc_ref(self).clone(); + self.to_vec_in(alloc).into_boxed_slice() } fn clone_from(&mut self, other: &Self) { diff --git a/library/alloc/src/collections/binary_heap.rs b/library/alloc/src/collections/binary_heap.rs index b67c72d7136..97ebc12175f 100644 --- a/library/alloc/src/collections/binary_heap.rs +++ b/library/alloc/src/collections/binary_heap.rs @@ -495,7 +495,14 @@ impl BinaryHeap { let mut end = self.len(); while end > 1 { end -= 1; - self.data.swap(0, end); + // SAFETY: `end` goes from `self.len() - 1` to 1 (both included), + // so it's always a valid index to access. + // It is safe to access index 0 (i.e. `ptr`), because + // 1 <= end < self.len(), which means self.len() >= 2. + unsafe { + let ptr = self.data.as_mut_ptr(); + ptr::swap(ptr, ptr.add(end)); + } self.sift_down_range(0, end); } self.into_vec() @@ -531,19 +538,19 @@ impl BinaryHeap { unsafe { let mut hole = Hole::new(&mut self.data, pos); let mut child = 2 * pos + 1; - while child < end { - let right = child + 1; + while child < end - 1 { // compare with the greater of the two children - if right < end && hole.get(child) <= hole.get(right) { - child = right; - } + child += (hole.get(child) <= hole.get(child + 1)) as usize; // if we are already in order, stop. if hole.element() >= hole.get(child) { - break; + return; } hole.move_to(child); child = 2 * hole.pos() + 1; } + if child == end - 1 && hole.element() < hole.get(child) { + hole.move_to(child); + } } } @@ -563,15 +570,14 @@ impl BinaryHeap { unsafe { let mut hole = Hole::new(&mut self.data, pos); let mut child = 2 * pos + 1; - while child < end { - let right = child + 1; - // compare with the greater of the two children - if right < end && hole.get(child) <= hole.get(right) { - child = right; - } + while child < end - 1 { + child += (hole.get(child) <= hole.get(child + 1)) as usize; hole.move_to(child); child = 2 * hole.pos() + 1; } + if child == end - 1 { + hole.move_to(child); + } pos = hole.pos; } self.sift_up(start, pos); diff --git a/library/alloc/src/collections/btree/append.rs b/library/alloc/src/collections/btree/append.rs new file mode 100644 index 00000000000..bd99c4ed2f1 --- /dev/null +++ b/library/alloc/src/collections/btree/append.rs @@ -0,0 +1,119 @@ +use super::map::MIN_LEN; +use super::merge_iter::MergeIterInner; +use super::node::{self, ForceResult::*, Root}; +use core::iter::FusedIterator; + +impl Root { + /// Appends all key-value pairs from the union of two ascending iterators, + /// incrementing a `length` variable along the way. The latter makes it + /// easier for the caller to avoid a leak when a drop handler panicks. + /// + /// If both iterators produce the same key, this method drops the pair from + /// the left iterator and appends the pair from the right iterator. + /// + /// If you want the tree to end up in a strictly ascending order, like for + /// a `BTreeMap`, both iterators should produce keys in strictly ascending + /// order, each greater than all keys in the tree, including any keys + /// already in the tree upon entry. + pub fn append_from_sorted_iters(&mut self, left: I, right: I, length: &mut usize) + where + K: Ord, + I: Iterator + FusedIterator, + { + // We prepare to merge `left` and `right` into a sorted sequence in linear time. + let iter = MergeIter(MergeIterInner::new(left, right)); + + // Meanwhile, we build a tree from the sorted sequence in linear time. + self.bulk_push(iter, length) + } + + /// Pushes all key-value pairs to the end of the tree, incrementing a + /// `length` variable along the way. The latter makes it easier for the + /// caller to avoid a leak when the iterator panicks. + fn bulk_push(&mut self, iter: I, length: &mut usize) + where + I: Iterator, + { + let mut cur_node = self.borrow_mut().last_leaf_edge().into_node(); + // Iterate through all key-value pairs, pushing them into nodes at the right level. + for (key, value) in iter { + // Try to push key-value pair into the current leaf node. + if cur_node.len() < node::CAPACITY { + cur_node.push(key, value); + } else { + // No space left, go up and push there. + let mut open_node; + let mut test_node = cur_node.forget_type(); + loop { + match test_node.ascend() { + Ok(parent) => { + let parent = parent.into_node(); + if parent.len() < node::CAPACITY { + // Found a node with space left, push here. + open_node = parent; + break; + } else { + // Go up again. + test_node = parent.forget_type(); + } + } + Err(_) => { + // We are at the top, create a new root node and push there. + open_node = self.push_internal_level(); + break; + } + } + } + + // Push key-value pair and new right subtree. + let tree_height = open_node.height() - 1; + let mut right_tree = Root::new(); + for _ in 0..tree_height { + right_tree.push_internal_level(); + } + open_node.push(key, value, right_tree); + + // Go down to the right-most leaf again. + cur_node = open_node.forget_type().last_leaf_edge().into_node(); + } + + // Increment length every iteration, to make sure the map drops + // the appended elements even if advancing the iterator panicks. + *length += 1; + } + self.fix_right_edge(); + } + + fn fix_right_edge(&mut self) { + // Handle underfull nodes, start from the top. + let mut cur_node = self.borrow_mut(); + while let Internal(internal) = cur_node.force() { + // Check if right-most child is underfull. + let mut last_kv = internal.last_kv().consider_for_balancing(); + let right_child_len = last_kv.right_child_len(); + if right_child_len < MIN_LEN { + // We need to steal. + last_kv.bulk_steal_left(MIN_LEN - right_child_len); + } + + // Go further down. + cur_node = last_kv.into_right_child(); + } + } +} + +// An iterator for merging two sorted sequences into one +struct MergeIter>(MergeIterInner); + +impl Iterator for MergeIter +where + I: Iterator + FusedIterator, +{ + type Item = (K, V); + + /// If two keys are equal, returns the key-value pair from the right source. + fn next(&mut self) -> Option<(K, V)> { + let (a_next, b_next) = self.0.nexts(|a: &(K, V), b: &(K, V)| K::cmp(&a.0, &b.0)); + b_next.or(a_next) + } +} diff --git a/library/alloc/src/collections/btree/map.rs b/library/alloc/src/collections/btree/map.rs index 07c23d29e20..383f4487aff 100644 --- a/library/alloc/src/collections/btree/map.rs +++ b/library/alloc/src/collections/btree/map.rs @@ -9,8 +9,7 @@ use core::ops::{Index, RangeBounds}; use core::ptr; use super::borrow::DormantMutRef; -use super::merge_iter::MergeIterInner; -use super::node::{self, marker, ForceResult::*, Handle, NodeRef}; +use super::node::{self, marker, ForceResult::*, Handle, NodeRef, Root}; use super::search::{self, SearchResult::*}; use super::unwrap_unchecked; @@ -129,7 +128,7 @@ pub(super) const MIN_LEN: usize = node::MIN_LEN_AFTER_SPLIT; /// ``` #[stable(feature = "rust1", since = "1.0.0")] pub struct BTreeMap { - root: Option>, + root: Option>, length: usize, } @@ -146,7 +145,7 @@ unsafe impl<#[may_dangle] K, #[may_dangle] V> Drop for BTreeMap { impl Clone for BTreeMap { fn clone(&self) -> BTreeMap { fn clone_subtree<'a, K: Clone, V: Clone>( - node: node::NodeRef, K, V, marker::LeafOrInternal>, + node: NodeRef, K, V, marker::LeafOrInternal>, ) -> BTreeMap where K: 'a, @@ -154,11 +153,11 @@ impl Clone for BTreeMap { { match node.force() { Leaf(leaf) => { - let mut out_tree = BTreeMap { root: Some(node::Root::new_leaf()), length: 0 }; + let mut out_tree = BTreeMap { root: Some(Root::new()), length: 0 }; { let root = out_tree.root.as_mut().unwrap(); // unwrap succeeds because we just wrapped - let mut out_node = match root.node_as_mut().force() { + let mut out_node = match root.borrow_mut().force() { Leaf(leaf) => leaf, Internal(_) => unreachable!(), }; @@ -199,7 +198,7 @@ impl Clone for BTreeMap { (root, length) }; - out_node.push(k, v, subroot.unwrap_or_else(node::Root::new_leaf)); + out_node.push(k, v, subroot.unwrap_or_else(Root::new)); out_tree.length += 1 + sublength; } } @@ -214,7 +213,7 @@ impl Clone for BTreeMap { // Ord` constraint, which this method lacks. BTreeMap { root: None, length: 0 } } else { - clone_subtree(self.root.as_ref().unwrap().node_as_ref()) // unwrap succeeds because not empty + clone_subtree(self.root.as_ref().unwrap().reborrow()) // unwrap succeeds because not empty } } } @@ -227,7 +226,7 @@ where type Key = K; fn get(&self, key: &Q) -> Option<&K> { - let root_node = self.root.as_ref()?.node_as_ref(); + let root_node = self.root.as_ref()?.reborrow(); match search::search_tree(root_node, key) { Found(handle) => Some(handle.into_kv().0), GoDown(_) => None, @@ -236,7 +235,7 @@ where fn take(&mut self, key: &Q) -> Option { let (map, dormant_map) = DormantMutRef::new(self); - let root_node = map.root.as_mut()?.node_as_mut(); + let root_node = map.root.as_mut()?.borrow_mut(); match search::search_tree(root_node, key) { Found(handle) => { Some(OccupiedEntry { handle, dormant_map, _marker: PhantomData }.remove_kv().0) @@ -247,7 +246,7 @@ where fn replace(&mut self, key: K) -> Option { let (map, dormant_map) = DormantMutRef::new(self); - let root_node = Self::ensure_is_owned(&mut map.root).node_as_mut(); + let root_node = Self::ensure_is_owned(&mut map.root).borrow_mut(); match search::search_tree::, K, (), K>(root_node, &key) { Found(handle) => Some(mem::replace(handle.into_key_mut(), key)), GoDown(handle) => { @@ -458,9 +457,6 @@ impl fmt::Debug for RangeMut<'_, K, V> { } } -// An iterator for merging two sorted sequences into one -struct MergeIter>(MergeIterInner); - impl BTreeMap { /// Makes a new empty BTreeMap. /// @@ -526,7 +522,7 @@ impl BTreeMap { K: Borrow, Q: Ord, { - let root_node = self.root.as_ref()?.node_as_ref(); + let root_node = self.root.as_ref()?.reborrow(); match search::search_tree(root_node, key) { Found(handle) => Some(handle.into_kv().1), GoDown(_) => None, @@ -554,7 +550,7 @@ impl BTreeMap { K: Borrow, Q: Ord, { - let root_node = self.root.as_ref()?.node_as_ref(); + let root_node = self.root.as_ref()?.reborrow(); match search::search_tree(root_node, k) { Found(handle) => Some(handle.into_kv()), GoDown(_) => None, @@ -580,7 +576,7 @@ impl BTreeMap { /// ``` #[unstable(feature = "map_first_last", issue = "62924")] pub fn first_key_value(&self) -> Option<(&K, &V)> { - let root_node = self.root.as_ref()?.node_as_ref(); + let root_node = self.root.as_ref()?.reborrow(); root_node.first_leaf_edge().right_kv().ok().map(Handle::into_kv) } @@ -607,7 +603,7 @@ impl BTreeMap { #[unstable(feature = "map_first_last", issue = "62924")] pub fn first_entry(&mut self) -> Option> { let (map, dormant_map) = DormantMutRef::new(self); - let root_node = map.root.as_mut()?.node_as_mut(); + let root_node = map.root.as_mut()?.borrow_mut(); let kv = root_node.first_leaf_edge().right_kv().ok()?; Some(OccupiedEntry { handle: kv.forget_node_type(), dormant_map, _marker: PhantomData }) } @@ -654,7 +650,7 @@ impl BTreeMap { /// ``` #[unstable(feature = "map_first_last", issue = "62924")] pub fn last_key_value(&self) -> Option<(&K, &V)> { - let root_node = self.root.as_ref()?.node_as_ref(); + let root_node = self.root.as_ref()?.reborrow(); root_node.last_leaf_edge().left_kv().ok().map(Handle::into_kv) } @@ -681,7 +677,7 @@ impl BTreeMap { #[unstable(feature = "map_first_last", issue = "62924")] pub fn last_entry(&mut self) -> Option> { let (map, dormant_map) = DormantMutRef::new(self); - let root_node = map.root.as_mut()?.node_as_mut(); + let root_node = map.root.as_mut()?.borrow_mut(); let kv = root_node.last_leaf_edge().left_kv().ok()?; Some(OccupiedEntry { handle: kv.forget_node_type(), dormant_map, _marker: PhantomData }) } @@ -762,7 +758,7 @@ impl BTreeMap { K: Borrow, Q: Ord, { - let root_node = self.root.as_mut()?.node_as_mut(); + let root_node = self.root.as_mut()?.borrow_mut(); match search::search_tree(root_node, key) { Found(handle) => Some(handle.into_val_mut()), GoDown(_) => None, @@ -858,7 +854,7 @@ impl BTreeMap { Q: Ord, { let (map, dormant_map) = DormantMutRef::new(self); - let root_node = map.root.as_mut()?.node_as_mut(); + let root_node = map.root.as_mut()?.borrow_mut(); match search::search_tree(root_node, key) { Found(handle) => { Some(OccupiedEntry { handle, dormant_map, _marker: PhantomData }.remove_entry()) @@ -867,6 +863,30 @@ impl BTreeMap { } } + /// Retains only the elements specified by the predicate. + /// + /// In other words, remove all pairs `(k, v)` such that `f(&k, &mut v)` returns `false`. + /// + /// # Examples + /// + /// ``` + /// #![feature(btree_retain)] + /// use std::collections::BTreeMap; + /// + /// let mut map: BTreeMap = (0..8).map(|x| (x, x*10)).collect(); + /// // Keep only the elements with even-numbered keys. + /// map.retain(|&k, _| k % 2 == 0); + /// assert!(map.into_iter().eq(vec![(0, 0), (2, 20), (4, 40), (6, 60)])); + /// ``` + #[inline] + #[unstable(feature = "btree_retain", issue = "79025")] + pub fn retain(&mut self, mut f: F) + where + F: FnMut(&K, &mut V) -> bool, + { + self.drain_filter(|k, v| !f(k, v)); + } + /// Moves all elements from `other` into `Self`, leaving `other` empty. /// /// # Examples @@ -908,13 +928,10 @@ impl BTreeMap { return; } - // First, we merge `self` and `other` into a sorted sequence in linear time. let self_iter = mem::take(self).into_iter(); let other_iter = mem::take(other).into_iter(); - let iter = MergeIter(MergeIterInner::new(self_iter, other_iter)); - - // Second, we build a tree from the sorted sequence in linear time. - self.from_sorted_iter(iter); + let root = BTreeMap::ensure_is_owned(&mut self.root); + root.append_from_sorted_iters(self_iter, other_iter, &mut self.length) } /// Constructs a double-ended iterator over a sub-range of elements in the map. @@ -954,7 +971,7 @@ impl BTreeMap { R: RangeBounds, { if let Some(root) = &self.root { - let (f, b) = root.node_as_ref().range_search(range); + let (f, b) = root.reborrow().range_search(range); Range { front: Some(f), back: Some(b) } } else { @@ -1000,7 +1017,7 @@ impl BTreeMap { R: RangeBounds, { if let Some(root) = &mut self.root { - let (f, b) = root.node_as_valmut().range_search(range); + let (f, b) = root.borrow_valmut().range_search(range); RangeMut { front: Some(f), back: Some(b), _marker: PhantomData } } else { @@ -1020,7 +1037,7 @@ impl BTreeMap { /// let mut count: BTreeMap<&str, usize> = BTreeMap::new(); /// /// // count the number of occurrences of letters in the vec - /// for x in vec!["a","b","a","c","a","b"] { + /// for x in vec!["a", "b", "a", "c", "a", "b"] { /// *count.entry(x).or_insert(0) += 1; /// } /// @@ -1030,7 +1047,7 @@ impl BTreeMap { pub fn entry(&mut self, key: K) -> Entry<'_, K, V> { // FIXME(@porglezomp) Avoid allocating if we don't insert let (map, dormant_map) = DormantMutRef::new(self); - let root_node = Self::ensure_is_owned(&mut map.root).node_as_mut(); + let root_node = Self::ensure_is_owned(&mut map.root).borrow_mut(); match search::search_tree(root_node, &key) { Found(handle) => Occupied(OccupiedEntry { handle, dormant_map, _marker: PhantomData }), GoDown(handle) => { @@ -1039,78 +1056,6 @@ impl BTreeMap { } } - fn from_sorted_iter>(&mut self, iter: I) { - let root = Self::ensure_is_owned(&mut self.root); - let mut cur_node = root.node_as_mut().last_leaf_edge().into_node(); - // Iterate through all key-value pairs, pushing them into nodes at the right level. - for (key, value) in iter { - // Try to push key-value pair into the current leaf node. - if cur_node.len() < node::CAPACITY { - cur_node.push(key, value); - } else { - // No space left, go up and push there. - let mut open_node; - let mut test_node = cur_node.forget_type(); - loop { - match test_node.ascend() { - Ok(parent) => { - let parent = parent.into_node(); - if parent.len() < node::CAPACITY { - // Found a node with space left, push here. - open_node = parent; - break; - } else { - // Go up again. - test_node = parent.forget_type(); - } - } - Err(_) => { - // We are at the top, create a new root node and push there. - open_node = root.push_internal_level(); - break; - } - } - } - - // Push key-value pair and new right subtree. - let tree_height = open_node.height() - 1; - let mut right_tree = node::Root::new_leaf(); - for _ in 0..tree_height { - right_tree.push_internal_level(); - } - open_node.push(key, value, right_tree); - - // Go down to the right-most leaf again. - cur_node = open_node.forget_type().last_leaf_edge().into_node(); - } - - self.length += 1; - } - Self::fix_right_edge(root) - } - - fn fix_right_edge(root: &mut node::Root) { - // Handle underfull nodes, start from the top. - let mut cur_node = root.node_as_mut(); - while let Internal(internal) = cur_node.force() { - // Check if right-most child is underfull. - let mut last_edge = internal.last_edge(); - let right_child_len = last_edge.reborrow().descend().len(); - if right_child_len < MIN_LEN { - // We need to steal. - let mut last_kv = match last_edge.left_kv() { - Ok(left) => left, - Err(_) => unreachable!(), - }; - last_kv.bulk_steal_left(MIN_LEN - right_child_len); - last_edge = last_kv.right_edge(); - } - - // Go further down. - cur_node = last_edge.descend(); - } - } - /// Splits the collection into two at the given key. Returns everything after the given key, /// including the key. /// @@ -1158,10 +1103,10 @@ impl BTreeMap { left_root.split_off(right_root, key); if left_root.height() < right_root.height() { - self.length = left_root.node_as_ref().calc_length(); + self.length = left_root.reborrow().calc_length(); right.length = total_num - self.len(); } else { - right.length = right_root.node_as_ref().calc_length(); + right.length = right_root.reborrow().calc_length(); self.length = total_num - right.len(); } @@ -1209,7 +1154,7 @@ impl BTreeMap { pub(super) fn drain_filter_inner(&mut self) -> DrainFilterInner<'_, K, V> { if let Some(root) = self.root.as_mut() { let (root, dormant_root) = DormantMutRef::new(root); - let front = root.node_as_mut().first_leaf_edge(); + let front = root.borrow_mut().first_leaf_edge(); DrainFilterInner { length: &mut self.length, dormant_root: Some(dormant_root), @@ -1416,7 +1361,7 @@ impl IntoIterator for BTreeMap { fn into_iter(self) -> IntoIter { let mut me = ManuallyDrop::new(self); if let Some(root) = me.root.take() { - let (f, b) = root.into_ref().full_range(); + let (f, b) = root.full_range(); IntoIter { front: Some(f), back: Some(b), length: me.length } } else { @@ -1613,7 +1558,7 @@ pub(super) struct DrainFilterInner<'a, K: 'a, V: 'a> { length: &'a mut usize, /// Burried reference to the root field in the borrowed map. /// Wrapped in `Option` to allow drop handler to `take` it. - dormant_root: Option>>, + dormant_root: Option>>, /// Contains a leaf edge preceding the next element to be returned, or the last leaf edge. /// Empty if the map has no root, if iteration went beyond the last leaf edge, /// or if a panic occurred in the predicate. @@ -2062,7 +2007,7 @@ impl BTreeMap { #[stable(feature = "rust1", since = "1.0.0")] pub fn iter(&self) -> Iter<'_, K, V> { if let Some(root) = &self.root { - let (f, b) = root.node_as_ref().full_range(); + let (f, b) = root.reborrow().full_range(); Iter { range: Range { front: Some(f), back: Some(b) }, length: self.length } } else { @@ -2094,7 +2039,7 @@ impl BTreeMap { #[stable(feature = "rust1", since = "1.0.0")] pub fn iter_mut(&mut self) -> IterMut<'_, K, V> { if let Some(root) = &mut self.root { - let (f, b) = root.node_as_valmut().full_range(); + let (f, b) = root.borrow_valmut().full_range(); IterMut { range: RangeMut { front: Some(f), back: Some(b), _marker: PhantomData }, @@ -2215,21 +2160,8 @@ impl BTreeMap { /// If the root node is the empty (non-allocated) root node, allocate our /// own node. Is an associated function to avoid borrowing the entire BTreeMap. - fn ensure_is_owned(root: &mut Option>) -> &mut node::Root { - root.get_or_insert_with(node::Root::new_leaf) - } -} - -impl Iterator for MergeIter -where - I: Iterator + ExactSizeIterator + FusedIterator, -{ - type Item = (K, V); - - /// If two keys are equal, returns the key/value-pair from the right source. - fn next(&mut self) -> Option<(K, V)> { - let (a_next, b_next) = self.0.nexts(|a: &(K, V), b: &(K, V)| K::cmp(&a.0, &b.0)); - b_next.or(a_next) + fn ensure_is_owned(root: &mut Option>) -> &mut Root { + root.get_or_insert_with(Root::new) } } diff --git a/library/alloc/src/collections/btree/map/entry.rs b/library/alloc/src/collections/btree/map/entry.rs index 73a0ca21f67..69926ac2aff 100644 --- a/library/alloc/src/collections/btree/map/entry.rs +++ b/library/alloc/src/collections/btree/map/entry.rs @@ -286,7 +286,7 @@ impl<'a, K: Ord, V> VacantEntry<'a, K, V> { // Safety: We have consumed self.handle and the reference returned. let map = unsafe { self.dormant_map.awaken() }; let root = map.root.as_mut().unwrap(); - root.push_internal_level().push(ins.k, ins.v, ins.right); + root.push_internal_level().push(ins.kv.0, ins.kv.1, ins.right); map.length += 1; val_ptr } diff --git a/library/alloc/src/collections/btree/map/tests.rs b/library/alloc/src/collections/btree/map/tests.rs index 4fea6adf541..23cd4f3d83d 100644 --- a/library/alloc/src/collections/btree/map/tests.rs +++ b/library/alloc/src/collections/btree/map/tests.rs @@ -6,13 +6,17 @@ use crate::fmt::Debug; use crate::rc::Rc; use crate::string::{String, ToString}; use crate::vec::Vec; +use std::cmp::Ordering; use std::convert::TryFrom; use std::iter::{self, FromIterator}; use std::mem; use std::ops::Bound::{self, Excluded, Included, Unbounded}; use std::ops::RangeBounds; use std::panic::{catch_unwind, AssertUnwindSafe}; -use std::sync::atomic::{AtomicUsize, Ordering}; +use std::sync::atomic::{AtomicUsize, Ordering::SeqCst}; + +mod ord_chaos; +use ord_chaos::{Cyclic3, Governed, Governor}; // Capacity of a tree with a single level, // i.e., a tree who's root is a leaf node at height 0. @@ -28,7 +32,7 @@ const MIN_INSERTS_HEIGHT_1: usize = NODE_CAPACITY + 1; // It's not the minimum size: removing an element from such a tree does not always reduce height. const MIN_INSERTS_HEIGHT_2: usize = 89; -// Gather all references from a mutable iterator and make sure Miri notices if +// Gathers all references from a mutable iterator and makes sure Miri notices if // using them is dangerous. fn test_all_refs<'a, T: 'a>(dummy: &mut T, iter: impl Iterator) { // Gather all those references. @@ -43,28 +47,42 @@ fn test_all_refs<'a, T: 'a>(dummy: &mut T, iter: impl Iterator } impl BTreeMap { - /// Panics if the map (or the code navigating it) is corrupted. - fn check(&self) - where - K: Copy + Debug + Ord, - { + // Panics if the map (or the code navigating it) is corrupted. + fn check_invariants(&self) { if let Some(root) = &self.root { - let root_node = root.node_as_ref(); + let root_node = root.reborrow(); + // Check the back pointers top-down, before we attempt to rely on + // more serious navigation code. assert!(root_node.ascend().is_err()); root_node.assert_back_pointers(); + // Check consistency of `length` with what navigation code encounters. assert_eq!(self.length, root_node.calc_length()); + // Lastly, check the invariant causing the least harm. root_node.assert_min_len(if root_node.height() > 0 { 1 } else { 0 }); } else { assert_eq!(self.length, 0); } - self.assert_ascending(); + // Check that `assert_strictly_ascending` will encounter all keys. + assert_eq!(self.length, self.keys().count()); } - /// Returns the height of the root, if any. + // Panics if the map is corrupted or if the keys are not in strictly + // ascending order, in the current opinion of the `Ord` implementation. + // If the `Ord` implementation violates transitivity, this method does not + // guarantee that all keys are unique, just that adjacent keys are unique. + fn check(&self) + where + K: Debug + Ord, + { + self.check_invariants(); + self.assert_strictly_ascending(); + } + + // Returns the height of the root, if any. fn height(&self) -> Option { self.root.as_ref().map(node::Root::height) } @@ -74,28 +92,24 @@ impl BTreeMap { K: Debug, { if let Some(root) = self.root.as_ref() { - root.node_as_ref().dump_keys() + root.reborrow().dump_keys() } else { String::from("not yet allocated") } } - /// Asserts that the keys are in strictly ascending order. - fn assert_ascending(&self) + // Panics if the keys are not in strictly ascending order. + fn assert_strictly_ascending(&self) where - K: Copy + Debug + Ord, + K: Debug + Ord, { - let mut num_seen = 0; let mut keys = self.keys(); if let Some(mut previous) = keys.next() { - num_seen = 1; for next in keys { assert!(previous < next, "{:?} >= {:?}", previous, next); previous = next; - num_seen += 1; } } - assert_eq!(num_seen, self.len()); } } @@ -111,7 +125,7 @@ impl<'a, K: 'a, V: 'a> NodeRef, K, V, marker::LeafOrInternal> } } -// Test our value of MIN_INSERTS_HEIGHT_2. It may change according to the +// Tests our value of MIN_INSERTS_HEIGHT_2. It may change according to the // implementation of insertion, but it's best to be aware of when it does. #[test] fn test_levels() { @@ -149,6 +163,25 @@ fn test_levels() { assert_eq!(map.len(), MIN_INSERTS_HEIGHT_2, "{}", map.dump_keys()); } +// Ensures the testing infrastructure usually notices order violations. +#[test] +#[should_panic] +fn test_check_ord_chaos() { + let gov = Governor::new(); + let map: BTreeMap<_, _> = (0..2).map(|i| (Governed(i, &gov), ())).collect(); + gov.flip(); + map.check(); +} + +// Ensures the testing infrastructure doesn't always mind order violations. +#[test] +fn test_check_invariants_ord_chaos() { + let gov = Governor::new(); + let map: BTreeMap<_, _> = (0..2).map(|i| (Governed(i, &gov), ())).collect(); + gov.flip(); + map.check_invariants(); +} + #[test] fn test_basic_large() { let mut map = BTreeMap::new(); @@ -334,7 +367,7 @@ fn test_iter_rev() { test(size, map.into_iter().rev()); } -/// Specifically tests iter_mut's ability to mutate the value of pairs in-line +// Specifically tests iter_mut's ability to mutate the value of pairs in-line. fn do_test_iter_mut_mutation(size: usize) where T: Copy + Debug + Ord + TryFrom, @@ -439,6 +472,8 @@ fn test_iter_entering_root_twice() { *back.1 = 42; assert_eq!(front, (&0, &mut 24)); assert_eq!(back, (&1, &mut 42)); + assert_eq!(it.next(), None); + assert_eq!(it.next_back(), None); map.check(); } @@ -591,11 +626,12 @@ fn test_range_small() { #[test] fn test_range_height_1() { - // Tests tree with a root and 2 leaves. Depending on details we don't want or need - // to rely upon, the single key at the root will be 6 or 7. + // Tests tree with a root and 2 leaves. The single key in the root node is + // close to the middle among the keys. - let map: BTreeMap<_, _> = (1..=MIN_INSERTS_HEIGHT_1 as i32).map(|i| (i, i)).collect(); - for &root in &[6, 7] { + let map: BTreeMap<_, _> = (0..MIN_INSERTS_HEIGHT_1 as i32).map(|i| (i, i)).collect(); + let middle = MIN_INSERTS_HEIGHT_1 as i32 / 2; + for root in middle - 2..=middle + 2 { assert_eq!(range_keys(&map, (Excluded(root), Excluded(root + 1))), vec![]); assert_eq!(range_keys(&map, (Excluded(root), Included(root + 1))), vec![root + 1]); assert_eq!(range_keys(&map, (Included(root), Excluded(root + 1))), vec![root]); @@ -727,6 +763,19 @@ fn test_range_backwards_4() { map.range((Excluded(3), Excluded(2))); } +#[test] +#[should_panic] +fn test_range_backwards_5() { + let mut map = BTreeMap::new(); + map.insert(Cyclic3::B, ()); + // Lacking static_assert, call `range` conditionally, to emphasise that + // we cause a different panic than `test_range_backwards_1` does. + // A more refined `should_panic` would be welcome. + if Cyclic3::C < Cyclic3::A { + map.range(Cyclic3::C..=Cyclic3::A); + } +} + #[test] fn test_range_1000() { // Miri is too slow @@ -808,6 +857,17 @@ fn test_range_mut() { map.check(); } +#[test] +fn test_retain() { + let mut map: BTreeMap = (0..100).map(|x| (x, x * 10)).collect(); + + map.retain(|&k, _| k % 2 == 0); + assert_eq!(map.len(), 50); + assert_eq!(map[&2], 20); + assert_eq!(map[&4], 40); + assert_eq!(map[&6], 60); +} + mod test_drain_filter { use super::*; @@ -819,22 +879,26 @@ mod test_drain_filter { map.check(); } + // Explicitly consumes the iterator, where most test cases drop it instantly. #[test] - fn consuming_nothing() { + fn consumed_keeping_all() { let pairs = (0..3).map(|i| (i, i)); let mut map: BTreeMap<_, _> = pairs.collect(); assert!(map.drain_filter(|_, _| false).eq(iter::empty())); map.check(); } + // Explicitly consumes the iterator, where most test cases drop it instantly. #[test] - fn consuming_all() { + fn consumed_removing_all() { let pairs = (0..3).map(|i| (i, i)); let mut map: BTreeMap<_, _> = pairs.clone().collect(); assert!(map.drain_filter(|_, _| true).eq(pairs)); + assert!(map.is_empty()); map.check(); } + // Explicitly consumes the iterator and modifies values through it. #[test] fn mutating_and_keeping() { let pairs = (0..3).map(|i| (i, i)); @@ -851,6 +915,7 @@ mod test_drain_filter { map.check(); } + // Explicitly consumes the iterator and modifies values through it. #[test] fn mutating_and_removing() { let pairs = (0..3).map(|i| (i, i)); @@ -1024,7 +1089,7 @@ mod test_drain_filter { struct D; impl Drop for D { fn drop(&mut self) { - if DROPS.fetch_add(1, Ordering::SeqCst) == 1 { + if DROPS.fetch_add(1, SeqCst) == 1 { panic!("panic in `drop`"); } } @@ -1035,14 +1100,14 @@ mod test_drain_filter { catch_unwind(move || { drop(map.drain_filter(|i, _| { - PREDS.fetch_add(1usize << i, Ordering::SeqCst); + PREDS.fetch_add(1usize << i, SeqCst); true })) }) .unwrap_err(); - assert_eq!(PREDS.load(Ordering::SeqCst), 0x011); - assert_eq!(DROPS.load(Ordering::SeqCst), 3); + assert_eq!(PREDS.load(SeqCst), 0x011); + assert_eq!(DROPS.load(SeqCst), 3); } #[test] @@ -1053,7 +1118,7 @@ mod test_drain_filter { struct D; impl Drop for D { fn drop(&mut self) { - DROPS.fetch_add(1, Ordering::SeqCst); + DROPS.fetch_add(1, SeqCst); } } @@ -1062,7 +1127,7 @@ mod test_drain_filter { catch_unwind(AssertUnwindSafe(|| { drop(map.drain_filter(|i, _| { - PREDS.fetch_add(1usize << i, Ordering::SeqCst); + PREDS.fetch_add(1usize << i, SeqCst); match i { 0 => true, _ => panic!(), @@ -1071,8 +1136,8 @@ mod test_drain_filter { })) .unwrap_err(); - assert_eq!(PREDS.load(Ordering::SeqCst), 0x011); - assert_eq!(DROPS.load(Ordering::SeqCst), 1); + assert_eq!(PREDS.load(SeqCst), 0x011); + assert_eq!(DROPS.load(SeqCst), 1); assert_eq!(map.len(), 2); assert_eq!(map.first_entry().unwrap().key(), &4); assert_eq!(map.last_entry().unwrap().key(), &8); @@ -1088,7 +1153,7 @@ mod test_drain_filter { struct D; impl Drop for D { fn drop(&mut self) { - DROPS.fetch_add(1, Ordering::SeqCst); + DROPS.fetch_add(1, SeqCst); } } @@ -1097,7 +1162,7 @@ mod test_drain_filter { { let mut it = map.drain_filter(|i, _| { - PREDS.fetch_add(1usize << i, Ordering::SeqCst); + PREDS.fetch_add(1usize << i, SeqCst); match i { 0 => true, _ => panic!(), @@ -1110,8 +1175,8 @@ mod test_drain_filter { assert!(matches!(result, Ok(None))); } - assert_eq!(PREDS.load(Ordering::SeqCst), 0x011); - assert_eq!(DROPS.load(Ordering::SeqCst), 1); + assert_eq!(PREDS.load(SeqCst), 0x011); + assert_eq!(DROPS.load(SeqCst), 1); assert_eq!(map.len(), 2); assert_eq!(map.first_entry().unwrap().key(), &4); assert_eq!(map.last_entry().unwrap().key(), &8); @@ -1245,8 +1310,6 @@ fn test_zst() { // undefined. #[test] fn test_bad_zst() { - use std::cmp::Ordering; - #[derive(Clone, Copy, Debug)] struct Bad; @@ -1685,6 +1748,54 @@ create_append_test!(test_append_239, 239); #[cfg(not(miri))] // Miri is too slow create_append_test!(test_append_1700, 1700); +#[test] +fn test_append_drop_leak() { + static DROPS: AtomicUsize = AtomicUsize::new(0); + + struct D; + + impl Drop for D { + fn drop(&mut self) { + if DROPS.fetch_add(1, SeqCst) == 0 { + panic!("panic in `drop`"); + } + } + } + + let mut left = BTreeMap::new(); + let mut right = BTreeMap::new(); + left.insert(0, D); + left.insert(1, D); // first to be dropped during append + left.insert(2, D); + right.insert(1, D); + right.insert(2, D); + + catch_unwind(move || left.append(&mut right)).unwrap_err(); + + assert_eq!(DROPS.load(SeqCst), 4); // Rust issue #47949 ate one little piggy +} + +#[test] +fn test_append_ord_chaos() { + let mut map1 = BTreeMap::new(); + map1.insert(Cyclic3::A, ()); + map1.insert(Cyclic3::B, ()); + let mut map2 = BTreeMap::new(); + map2.insert(Cyclic3::A, ()); + map2.insert(Cyclic3::B, ()); + map2.insert(Cyclic3::C, ()); // lands first, before A + map2.insert(Cyclic3::B, ()); // lands first, before C + map1.check(); + map2.check(); // keys are not unique but still strictly ascending + assert_eq!(map1.len(), 2); + assert_eq!(map2.len(), 4); + map1.append(&mut map2); + assert_eq!(map1.len(), 5); + assert_eq!(map2.len(), 0); + map1.check(); + map2.check(); +} + fn rand_data(len: usize) -> Vec<(u32, u32)> { assert!(len * 2 <= 70029); // from that point on numbers repeat let mut rng = DeterministicRng::new(); @@ -1776,7 +1887,7 @@ fn test_into_iter_drop_leak_height_0() { impl Drop for D { fn drop(&mut self) { - if DROPS.fetch_add(1, Ordering::SeqCst) == 3 { + if DROPS.fetch_add(1, SeqCst) == 3 { panic!("panic in `drop`"); } } @@ -1791,7 +1902,7 @@ fn test_into_iter_drop_leak_height_0() { catch_unwind(move || drop(map.into_iter())).unwrap_err(); - assert_eq!(DROPS.load(Ordering::SeqCst), 5); + assert_eq!(DROPS.load(SeqCst), 5); } #[test] @@ -1803,18 +1914,18 @@ fn test_into_iter_drop_leak_height_1() { struct D; impl Drop for D { fn drop(&mut self) { - if DROPS.fetch_add(1, Ordering::SeqCst) == PANIC_POINT.load(Ordering::SeqCst) { + if DROPS.fetch_add(1, SeqCst) == PANIC_POINT.load(SeqCst) { panic!("panic in `drop`"); } } } for panic_point in vec![0, 1, size - 2, size - 1] { - DROPS.store(0, Ordering::SeqCst); - PANIC_POINT.store(panic_point, Ordering::SeqCst); + DROPS.store(0, SeqCst); + PANIC_POINT.store(panic_point, SeqCst); let map: BTreeMap<_, _> = (0..size).map(|i| (i, D)).collect(); catch_unwind(move || drop(map.into_iter())).unwrap_err(); - assert_eq!(DROPS.load(Ordering::SeqCst), size); + assert_eq!(DROPS.load(SeqCst), size); } } @@ -1847,11 +1958,27 @@ fn test_insert_remove_intertwined() { let loops = if cfg!(miri) { 100 } else { 1_000_000 }; let mut map = BTreeMap::new(); let mut i = 1; + let offset = 165; // somewhat arbitrarily chosen to cover some code paths for _ in 0..loops { - i = (i + 421) & 0xFF; + i = (i + offset) & 0xFF; map.insert(i, i); map.remove(&(0xFF - i)); } - map.check(); } + +#[test] +fn test_insert_remove_intertwined_ord_chaos() { + let loops = if cfg!(miri) { 100 } else { 1_000_000 }; + let gov = Governor::new(); + let mut map = BTreeMap::new(); + let mut i = 1; + let offset = 165; // more arbitrarily copied from above + for _ in 0..loops { + i = (i + offset) & 0xFF; + map.insert(Governed(i, &gov), ()); + map.remove(&Governed(0xFF - i, &gov)); + gov.flip(); + } + map.check_invariants(); +} diff --git a/library/alloc/src/collections/btree/map/tests/ord_chaos.rs b/library/alloc/src/collections/btree/map/tests/ord_chaos.rs new file mode 100644 index 00000000000..96ce7c15790 --- /dev/null +++ b/library/alloc/src/collections/btree/map/tests/ord_chaos.rs @@ -0,0 +1,81 @@ +use std::cell::Cell; +use std::cmp::Ordering::{self, *}; +use std::ptr; + +// Minimal type with an `Ord` implementation violating transitivity. +#[derive(Debug)] +pub enum Cyclic3 { + A, + B, + C, +} +use Cyclic3::*; + +impl PartialOrd for Cyclic3 { + fn partial_cmp(&self, other: &Self) -> Option { + Some(self.cmp(other)) + } +} + +impl Ord for Cyclic3 { + fn cmp(&self, other: &Self) -> Ordering { + match (self, other) { + (A, A) | (B, B) | (C, C) => Equal, + (A, B) | (B, C) | (C, A) => Less, + (A, C) | (B, A) | (C, B) => Greater, + } + } +} + +impl PartialEq for Cyclic3 { + fn eq(&self, other: &Self) -> bool { + self.cmp(&other) == Equal + } +} + +impl Eq for Cyclic3 {} + +// Controls the ordering of values wrapped by `Governed`. +#[derive(Debug)] +pub struct Governor { + flipped: Cell, +} + +impl Governor { + pub fn new() -> Self { + Governor { flipped: Cell::new(false) } + } + + pub fn flip(&self) { + self.flipped.set(!self.flipped.get()); + } +} + +// Type with an `Ord` implementation that forms a total order at any moment +// (assuming that `T` respects total order), but can suddenly be made to invert +// that total order. +#[derive(Debug)] +pub struct Governed<'a, T>(pub T, pub &'a Governor); + +impl PartialOrd for Governed<'_, T> { + fn partial_cmp(&self, other: &Self) -> Option { + Some(self.cmp(other)) + } +} + +impl Ord for Governed<'_, T> { + fn cmp(&self, other: &Self) -> Ordering { + assert!(ptr::eq(self.1, other.1)); + let ord = self.0.cmp(&other.0); + if self.1.flipped.get() { ord.reverse() } else { ord } + } +} + +impl PartialEq for Governed<'_, T> { + fn eq(&self, other: &Self) -> bool { + assert!(ptr::eq(self.1, other.1)); + self.0.eq(&other.0) + } +} + +impl Eq for Governed<'_, T> {} diff --git a/library/alloc/src/collections/btree/mem.rs b/library/alloc/src/collections/btree/mem.rs index 5e7d9fa3f91..e1363d1ae1f 100644 --- a/library/alloc/src/collections/btree/mem.rs +++ b/library/alloc/src/collections/btree/mem.rs @@ -6,6 +6,7 @@ use core::ptr; /// relevant function. /// /// If a panic occurs in the `change` closure, the entire process will be aborted. +#[allow(dead_code)] // keep as illustration and for future use #[inline] pub fn take_mut(v: &mut T, change: impl FnOnce(T) -> T) { replace(v, |value| (change(value), ())) diff --git a/library/alloc/src/collections/btree/merge_iter.rs b/library/alloc/src/collections/btree/merge_iter.rs index 88e6f86c2c6..7f23d93b990 100644 --- a/library/alloc/src/collections/btree/merge_iter.rs +++ b/library/alloc/src/collections/btree/merge_iter.rs @@ -2,27 +2,25 @@ use core::cmp::Ordering; use core::fmt::{self, Debug}; use core::iter::FusedIterator; -/// Core of an iterator that merges the output of two ascending iterators, +/// Core of an iterator that merges the output of two strictly ascending iterators, /// for instance a union or a symmetric difference. -pub struct MergeIterInner -where - I: Iterator, -{ +pub struct MergeIterInner { a: I, b: I, peeked: Option>, } -/// Benchmarks faster than wrapping both iterators in a Peekable. +/// Benchmarks faster than wrapping both iterators in a Peekable, +/// probably because we can afford to impose a FusedIterator bound. #[derive(Clone, Debug)] enum Peeked { A(I::Item), B(I::Item), } -impl Clone for MergeIterInner +impl Clone for MergeIterInner where - I: Clone + Iterator, + I: Clone, I::Item: Clone, { fn clone(&self) -> Self { @@ -30,20 +28,17 @@ where } } -impl Debug for MergeIterInner +impl Debug for MergeIterInner where - I: Iterator + Debug, + I: Debug, I::Item: Debug, { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { - f.debug_tuple("MergeIterInner").field(&self.a).field(&self.b).finish() + f.debug_tuple("MergeIterInner").field(&self.a).field(&self.b).field(&self.peeked).finish() } } -impl MergeIterInner -where - I: ExactSizeIterator + FusedIterator, -{ +impl MergeIterInner { /// Creates a new core for an iterator merging a pair of sources. pub fn new(a: I, b: I) -> Self { MergeIterInner { a, b, peeked: None } @@ -52,13 +47,17 @@ where /// Returns the next pair of items stemming from the pair of sources /// being merged. If both returned options contain a value, that value /// is equal and occurs in both sources. If one of the returned options - /// contains a value, that value doesn't occur in the other source. - /// If neither returned option contains a value, iteration has finished - /// and subsequent calls will return the same empty pair. + /// contains a value, that value doesn't occur in the other source (or + /// the sources are not strictly ascending). If neither returned option + /// contains a value, iteration has finished and subsequent calls will + /// return the same empty pair. pub fn nexts Ordering>( &mut self, cmp: Cmp, - ) -> (Option, Option) { + ) -> (Option, Option) + where + I: FusedIterator, + { let mut a_next; let mut b_next; match self.peeked.take() { @@ -86,7 +85,10 @@ where } /// Returns a pair of upper bounds for the `size_hint` of the final iterator. - pub fn lens(&self) -> (usize, usize) { + pub fn lens(&self) -> (usize, usize) + where + I: ExactSizeIterator, + { match self.peeked { Some(Peeked::A(_)) => (1 + self.a.len(), self.b.len()), Some(Peeked::B(_)) => (self.a.len(), 1 + self.b.len()), diff --git a/library/alloc/src/collections/btree/mod.rs b/library/alloc/src/collections/btree/mod.rs index 7bf1706dd6d..ebcbb0e467c 100644 --- a/library/alloc/src/collections/btree/mod.rs +++ b/library/alloc/src/collections/btree/mod.rs @@ -1,3 +1,4 @@ +mod append; mod borrow; pub mod map; mod mem; diff --git a/library/alloc/src/collections/btree/navigate.rs b/library/alloc/src/collections/btree/navigate.rs index de78148fc82..ef6f888693f 100644 --- a/library/alloc/src/collections/btree/navigate.rs +++ b/library/alloc/src/collections/btree/navigate.rs @@ -362,20 +362,6 @@ impl<'a, K, V> Handle, K, V, marker::Leaf>, marker::E } } -impl<'a, K, V> Handle, K, V, marker::Leaf>, marker::Edge> { - /// Moves the leaf edge handle to the next leaf edge. - /// - /// # Safety - /// There must be another KV in the direction travelled. - pub unsafe fn move_next_unchecked(&mut self) { - super::mem::take_mut(self, |leaf_edge| { - let kv = leaf_edge.next_kv(); - let kv = unsafe { unwrap_unchecked(kv.ok()) }; - kv.next_leaf_edge() - }) - } -} - impl Handle, marker::Edge> { /// Moves the leaf edge handle to the next leaf edge and returns the key and value /// in between, deallocating any node left behind while leaving the corresponding diff --git a/library/alloc/src/collections/btree/node.rs b/library/alloc/src/collections/btree/node.rs index 433074027e7..e3e555a72de 100644 --- a/library/alloc/src/collections/btree/node.rs +++ b/library/alloc/src/collections/btree/node.rs @@ -31,8 +31,7 @@ use core::cmp::Ordering; use core::marker::PhantomData; use core::mem::{self, MaybeUninit}; -use core::ptr::{self, NonNull, Unique}; -use core::slice; +use core::ptr::{self, NonNull}; use crate::alloc::{AllocRef, Global, Layout}; use crate::boxed::Box; @@ -115,95 +114,80 @@ impl InternalNode { /// of nodes it actually contains, and, partially due to this lack of information, /// has no destructor. struct BoxedNode { - ptr: Unique>, + ptr: NonNull>, } impl BoxedNode { - fn from_leaf(node: Box>) -> Self { - BoxedNode { ptr: Box::into_unique(node).0 } - } - - fn from_internal(node: Box>) -> Self { - BoxedNode { ptr: Unique::from(&mut Box::leak(node).data) } + fn from_owned(ptr: NonNull>) -> Self { + BoxedNode { ptr } } fn as_ptr(&self) -> NonNull> { - NonNull::from(self.ptr) + self.ptr } } /// An owned tree. /// /// Note that this does not have a destructor, and must be cleaned up manually. -pub struct Root { - node: BoxedNode, - /// The number of levels below the root node. - height: usize, -} - -unsafe impl Sync for Root {} -unsafe impl Send for Root {} +pub type Root = NodeRef; impl Root { - /// Returns the number of levels below the root. - pub fn height(&self) -> usize { - self.height - } - /// Returns a new owned tree, with its own root node that is initially empty. - pub fn new_leaf() -> Self { - Root { node: BoxedNode::from_leaf(Box::new(unsafe { LeafNode::new() })), height: 0 } + pub fn new() -> Self { + NodeRef::new_leaf().forget_type() } +} - /// Borrows and returns an immutable reference to the node owned by the root. - pub fn node_as_ref(&self) -> NodeRef, K, V, marker::LeafOrInternal> { - NodeRef { height: self.height, node: self.node.as_ptr(), _marker: PhantomData } +impl NodeRef { + fn new_leaf() -> Self { + Self::from_new_leaf(Box::new(unsafe { LeafNode::new() })) } - /// Borrows and returns a mutable reference to the node owned by the root. - pub fn node_as_mut(&mut self) -> NodeRef, K, V, marker::LeafOrInternal> { - NodeRef { height: self.height, node: self.node.as_ptr(), _marker: PhantomData } + fn from_new_leaf(leaf: Box>) -> Self { + NodeRef { height: 0, node: NonNull::from(Box::leak(leaf)), _marker: PhantomData } } +} - /// Borrows and returns a mutable reference to the leaf node owned by the root. - /// # Safety - /// The root node is a leaf. - unsafe fn leaf_node_as_mut(&mut self) -> NodeRef, K, V, marker::Leaf> { - debug_assert!(self.height == 0); - NodeRef { height: self.height, node: self.node.as_ptr(), _marker: PhantomData } +impl NodeRef { + fn from_new_internal(internal: Box>, height: usize) -> Self { + NodeRef { height, node: NonNull::from(Box::leak(internal)).cast(), _marker: PhantomData } } +} - /// Borrows and returns a mutable reference to the internal node owned by the root. - /// # Safety - /// The root node is not a leaf. - unsafe fn internal_node_as_mut(&mut self) -> NodeRef, K, V, marker::Internal> { - debug_assert!(self.height > 0); - NodeRef { height: self.height, node: self.node.as_ptr(), _marker: PhantomData } +impl NodeRef { + /// Mutably borrows the owned node. Unlike `reborrow_mut`, this is safe, + /// because the return value cannot be used to destroy the node itself, + /// and there cannot be other references to the tree (except during the + /// process of `into_iter` or `drop`, but that is a horrific already). + pub fn borrow_mut(&mut self) -> NodeRef, K, V, Type> { + NodeRef { height: self.height, node: self.node, _marker: PhantomData } } - pub fn node_as_valmut(&mut self) -> NodeRef, K, V, marker::LeafOrInternal> { - NodeRef { height: self.height, node: self.node.as_ptr(), _marker: PhantomData } + /// Slightly mutably borrows the owned node. + pub fn borrow_valmut(&mut self) -> NodeRef, K, V, Type> { + NodeRef { height: self.height, node: self.node, _marker: PhantomData } } - pub fn into_ref(self) -> NodeRef { - NodeRef { height: self.height, node: self.node.as_ptr(), _marker: PhantomData } + /// Packs the reference, aware of type and height, into a type-agnostic pointer. + fn into_boxed_node(self) -> BoxedNode { + BoxedNode::from_owned(self.node) } +} +impl NodeRef { /// Adds a new internal node with a single edge pointing to the previous root node, /// make that new node the root node, and return it. This increases the height by 1 /// and is the opposite of `pop_internal_level`. pub fn push_internal_level(&mut self) -> NodeRef, K, V, marker::Internal> { let mut new_node = Box::new(unsafe { InternalNode::new() }); - new_node.edges[0].write(unsafe { ptr::read(&mut self.node) }); + new_node.edges[0].write(BoxedNode::from_owned(self.node)); + let mut new_root = NodeRef::from_new_internal(new_node, self.height + 1); + new_root.borrow_mut().first_edge().correct_parent_link(); + *self = new_root.forget_type(); - self.node = BoxedNode::from_internal(new_node); - self.height += 1; - - unsafe { - let mut ret = self.internal_node_as_mut(); - ret.reborrow_mut().first_edge().correct_parent_link(); - ret - } + // `self.borrow_mut()`, except that we just forgot we're internal now: + NodeRef { height: self.height, node: self.node, _marker: PhantomData } } /// Removes the internal root node, using its first child as the new root node. @@ -212,21 +196,20 @@ impl Root { /// This decreases the height by 1 and is the opposite of `push_internal_level`. /// /// Requires exclusive access to the `Root` object but not to the root node; - /// it will not invalidate existing handles or references to the root node. + /// it will not invalidate other handles or references to the root node. /// /// Panics if there is no internal level, i.e., if the root node is a leaf. pub fn pop_internal_level(&mut self) { assert!(self.height > 0); - let top = self.node.ptr; + let top = self.node; - let mut internal_node = unsafe { self.internal_node_as_mut() }; - self.node = unsafe { internal_node.as_internal_mut().edges[0].assume_init_read() }; - self.height -= 1; - self.node_as_mut().as_leaf_mut().parent = None; + let internal_node = NodeRef { height: self.height, node: top, _marker: PhantomData }; + *self = internal_node.first_edge().descend(); + self.borrow_mut().clear_parent_link(); unsafe { - Global.dealloc(NonNull::from(top).cast(), Layout::new::>()); + Global.dealloc(top.cast(), Layout::new::>()); } } } @@ -236,21 +219,49 @@ impl Root { // internal use of `NodeRef` because we stay completely generic over `K` and `V`. // However, whenever a public type wraps `NodeRef`, make sure that it has the // correct variance. +/// /// A reference to a node. /// /// This type has a number of parameters that controls how it acts: -/// - `BorrowType`: This can be `Immut<'a>`, `Mut<'a>` or `ValMut<'a>' for some `'a` -/// or `Owned`. -/// When this is `Immut<'a>`, the `NodeRef` acts roughly like `&'a Node`, -/// when this is `Mut<'a>`, the `NodeRef` acts roughly like `&'a mut Node`, -/// when this is `ValMut<'a>`, the `NodeRef` acts as immutable with respect -/// to keys and tree structure, but allows mutable references to values, -/// and when this is `Owned`, the `NodeRef` acts roughly like `Box`. -/// - `K` and `V`: These control what types of things are stored in the nodes. +/// - `BorrowType`: A dummy type that describes the kind of borrow and carries a lifetime. +/// - When this is `Immut<'a>`, the `NodeRef` acts roughly like `&'a Node`. +/// - When this is `ValMut<'a>`, the `NodeRef` acts roughly like `&'a Node` +/// with respect to keys and tree structure, but also allows many +/// mutable references to values throughout the tree to coexist. +/// - When this is `Mut<'a>`, the `NodeRef` acts roughly like `&'a mut Node`, +/// although insert methods allow a mutable pointer to a value to coexist. +/// - When this is `Owned`, the `NodeRef` acts roughly like `Box`, +/// but does not have a destructor, and must be cleaned up manually. +/// - `K` and `V`: These are the types of keys and values stored in the nodes. /// - `Type`: This can be `Leaf`, `Internal`, or `LeafOrInternal`. When this is /// `Leaf`, the `NodeRef` points to a leaf node, when this is `Internal` the /// `NodeRef` points to an internal node, and when this is `LeafOrInternal` the /// `NodeRef` could be pointing to either type of node. +/// `Type` is named `NodeType` when used outside `NodeRef`. +/// +/// Both `BorrowType` and `NodeType` restrict what methods we implement, to +/// exploit static type safety. There are limitations in the way we can apply +/// such restrictions: +/// - For each type parameter, we can only define a method either generically +/// or for one particular type. For example, we cannot define a method like +/// `key_at` generically for all `BorrowType`, because we want to return +/// `&'a K` for most choices of `BorrowType`, but plain `K` for `Owned`. +/// We cannot define `key_at` once for all types that have a lifetime. +/// Therefore, we define it only for the least powerful type `Immut<'a>`. +/// - We cannot get implicit coercion from say `Mut<'a>` to `Immut<'a>`. +/// Therefore, we have to explicitly call `reborrow` on a more powerfull +/// `NodeRef` in order to reach a method like `key_at`. +/// - All methods on `NodeRef` that return some kind of reference, except +/// `reborrow` and `reborrow_mut`, take `self` by value and not by reference. +/// This avoids silently returning a second reference somewhere in the tree. +/// That is irrelevant when `BorrowType` is `Immut<'a>`, but the rule does +/// no harm because we make those `NodeRef` implicitly `Copy`. +/// The rule also avoids implicitly returning the lifetime of `&self`, +/// instead of the lifetime contained in `BorrowType`. +/// An exception to this rule are the insert functions. +/// - Given the above, we need a `reborrow_mut` to explicitly copy a `Mut<'a>` +/// `NodeRef` whenever we want to invoke a method returning an extra reference +/// somewhere in the tree. pub struct NodeRef { /// The number of levels below the node, a property of the node that cannot be /// entirely described by `Type` and that the node does not store itself either. @@ -277,30 +288,45 @@ unsafe impl<'a, K: Send + 'a, V: Send + 'a, Type> Send for NodeRef Send for NodeRef, K, V, Type> {} unsafe impl Send for NodeRef {} +impl NodeRef { + /// Unpack a node reference that was packed by `Root::into_boxed_node`. + fn from_boxed_node(boxed_node: BoxedNode, height: usize) -> Self { + NodeRef { height, node: boxed_node.as_ptr(), _marker: PhantomData } + } +} + +impl NodeRef { + /// Unpack a node reference that was packed as `NodeRef::parent`. + fn from_internal(node: NonNull>, height: usize) -> Self { + debug_assert!(height > 0); + NodeRef { height, node: node.cast(), _marker: PhantomData } + } +} + impl NodeRef { - /// Exposes the data of an internal node for reading. + /// Exposes the data of an internal node. /// - /// Returns a raw ptr to avoid invalidating other references to this node, - /// which is possible when BorrowType is marker::ValMut. - fn as_internal_ptr(&self) -> *const InternalNode { - self.node.as_ptr() as *const InternalNode + /// Returns a raw ptr to avoid invalidating other references to this node. + fn as_internal_ptr(this: &Self) -> *mut InternalNode { + // SAFETY: the static node type is `Internal`. + this.node.as_ptr() as *mut InternalNode } } -impl<'a, K, V> NodeRef, K, V, marker::Internal> { - /// Exposes the data of an internal node for reading, - /// when we know we have exclusive access. - fn as_internal(&mut self) -> &InternalNode { - unsafe { &*self.as_internal_ptr() } +impl<'a, K, V> NodeRef, K, V, marker::Internal> { + /// Exposes the data of an internal node in an immutable tree. + fn as_internal(this: &Self) -> &'a InternalNode { + let ptr = Self::as_internal_ptr(this); + // SAFETY: there can be no mutable references into this tree borrowed as `Immut`. + unsafe { &*ptr } } } impl<'a, K, V> NodeRef, K, V, marker::Internal> { - /// Exposes the data of an internal node for writing. - /// - /// We don't need to return a raw ptr because we have unique access to the entire node. - fn as_internal_mut(&mut self) -> &mut InternalNode { - unsafe { &mut *(self.node.as_ptr() as *mut InternalNode) } + /// Offers exclusive access to the data of an internal node. + fn as_internal_mut(this: &mut Self) -> &'a mut InternalNode { + let ptr = Self::as_internal_ptr(this); + unsafe { &mut *ptr } } } @@ -312,7 +338,7 @@ impl NodeRef { pub fn len(&self) -> usize { // Crucially, we only access the `len` field here. If BorrowType is marker::ValMut, // there might be outstanding mutable references to values that we must not invalidate. - unsafe { usize::from((*self.as_leaf_ptr()).len) } + unsafe { usize::from((*Self::as_leaf_ptr(self)).len) } } /// Returns the height of this node with respect to the leaf level. Zero height means the @@ -322,48 +348,49 @@ impl NodeRef { } /// Temporarily takes out another, immutable reference to the same node. - fn reborrow(&self) -> NodeRef, K, V, Type> { + pub fn reborrow(&self) -> NodeRef, K, V, Type> { NodeRef { height: self.height, node: self.node, _marker: PhantomData } } /// Exposes the leaf portion of any leaf or internal node. /// - /// Returns a raw ptr to avoid invalidating other references to this node, - /// which is possible when BorrowType is marker::ValMut. - fn as_leaf_ptr(&self) -> *const LeafNode { + /// Returns a raw ptr to avoid invalidating other references to this node. + fn as_leaf_ptr(this: &Self) -> *mut LeafNode { // The node must be valid for at least the LeafNode portion. // This is not a reference in the NodeRef type because we don't know if // it should be unique or shared. - self.node.as_ptr() + this.node.as_ptr() } +} - /// Borrows a reference to one of the keys stored in the node. +impl<'a, K: 'a, V: 'a, Type> NodeRef, K, V, Type> { + /// Exposes one of the keys stored in the node. /// /// # Safety /// The node has more than `idx` initialized elements. - pub unsafe fn key_at(&self, idx: usize) -> &K { - unsafe { self.reborrow().into_key_at(idx) } + pub unsafe fn key_at(self, idx: usize) -> &'a K { + debug_assert!(idx < self.len()); + unsafe { Self::as_leaf(&self).keys.get_unchecked(idx).assume_init_ref() } } - /// Borrows a reference to one of the values stored in the node. + /// Exposes one of the values stored in the node. /// /// # Safety /// The node has more than `idx` initialized elements. - unsafe fn val_at(&self, idx: usize) -> &V { - unsafe { self.reborrow().into_val_at(idx) } + unsafe fn val_at(self, idx: usize) -> &'a V { + debug_assert!(idx < self.len()); + unsafe { Self::as_leaf(&self).vals.get_unchecked(idx).assume_init_ref() } } } -impl NodeRef { - /// Borrows a reference to the contents of one of the edges that delimit - /// the elements of the node, without invalidating other references. +impl<'a, K, V> NodeRef, K, V, marker::Internal> { + /// Exposes the contents of one of the edges in the node. /// /// # Safety /// The node has more than `idx` initialized elements. - unsafe fn edge_at(&self, idx: usize) -> &BoxedNode { + unsafe fn edge_at(self, idx: usize) -> &'a BoxedNode { debug_assert!(idx <= self.len()); - let node = self.as_internal_ptr(); - unsafe { (*node).edges.get_unchecked(idx).assume_init_ref() } + unsafe { Self::as_internal(&self).edges.get_unchecked(idx).assume_init_ref() } } } @@ -380,15 +407,11 @@ impl NodeRef { ) -> Result, marker::Edge>, Self> { // We need to use raw pointers to nodes because, if BorrowType is marker::ValMut, // there might be outstanding mutable references to values that we must not invalidate. - let leaf_ptr = self.as_leaf_ptr(); + let leaf_ptr: *const _ = Self::as_leaf_ptr(&self); unsafe { (*leaf_ptr).parent } .as_ref() .map(|parent| Handle { - node: NodeRef { - height: self.height + 1, - node: parent.cast(), - _marker: PhantomData, - }, + node: NodeRef::from_internal(*parent, self.height + 1), idx: unsafe { usize::from((*leaf_ptr).parent_idx.assume_init()) }, _marker: PhantomData, }) @@ -420,11 +443,11 @@ impl NodeRef { } impl<'a, K: 'a, V: 'a, Type> NodeRef, K, V, Type> { - /// Exposes the data of a leaf node for reading in an immutable tree. - fn into_leaf(self) -> &'a LeafNode { - // SAFETY: we can access the entire node freely and do no need raw pointers, - // because there can be no mutable references to this Immut tree. - unsafe { &(*self.as_leaf_ptr()) } + /// Exposes the leaf portion of any leaf or internal node in an immutable tree. + fn as_leaf(this: &Self) -> &'a LeafNode { + let ptr = Self::as_leaf_ptr(this); + // SAFETY: there can be no mutable references into this tree borrowed as `Immut`. + unsafe { &*ptr } } } @@ -453,6 +476,12 @@ impl NodeRef { } impl<'a, K, V, Type> NodeRef, K, V, Type> { + /// Unsafely asserts to the compiler the static information that this node is a `Leaf`. + unsafe fn cast_to_leaf_unchecked(self) -> NodeRef, K, V, marker::Leaf> { + debug_assert!(self.height == 0); + NodeRef { height: self.height, node: self.node, _marker: PhantomData } + } + /// Unsafely asserts to the compiler the static information that this node is an `Internal`. unsafe fn cast_to_internal_unchecked(self) -> NodeRef, K, V, marker::Internal> { debug_assert!(self.height > 0); @@ -473,139 +502,155 @@ impl<'a, K, V, Type> NodeRef, K, V, Type> { NodeRef { height: self.height, node: self.node, _marker: PhantomData } } - /// Exposes the leaf portion of any leaf or internal node for writing. - /// - /// We don't need to return a raw ptr because we have unique access to the entire node. - fn as_leaf_mut(&mut self) -> &'a mut LeafNode { - unsafe { &mut (*self.node.as_ptr()) } + /// Offers exclusive access to the leaf portion of any leaf or internal node. + fn as_leaf_mut(this: &mut Self) -> &'a mut LeafNode { + let ptr = Self::as_leaf_ptr(this); + // SAFETY: we have exclusive access to the entire node. + unsafe { &mut *ptr } } +} - /// Borrows a mutable reference to one of the keys stored in the node. +impl<'a, K: 'a, V: 'a, Type> NodeRef, K, V, Type> { + /// Offers exclusive access to a part of the key storage area. /// /// # Safety /// The node has more than `idx` initialized elements. - unsafe fn key_mut_at(&mut self, idx: usize) -> &mut K { - unsafe { self.reborrow_mut().into_key_mut_at(idx) } + unsafe fn into_key_area_mut_at(mut self, idx: usize) -> &'a mut MaybeUninit { + debug_assert!(idx < self.len()); + unsafe { Self::as_leaf_mut(&mut self).keys.get_unchecked_mut(idx) } } - /// Borrows a mutable reference to one of the values stored in the node. + /// Offers exclusive access to a part of the value storage area. /// /// # Safety /// The node has more than `idx` initialized elements. - unsafe fn val_mut_at(&mut self, idx: usize) -> &mut V { - unsafe { self.reborrow_mut().into_val_mut_at(idx) } - } - - fn keys_mut(&mut self) -> &mut [K] - where - K: 'a, - V: 'a, - { - // SAFETY: the caller will not be able to call further methods on self - // until the key slice reference is dropped, as we have unique access - // for the lifetime of the borrow. - // SAFETY: The keys of a node must always be initialized up to length. - unsafe { - slice::from_raw_parts_mut( - MaybeUninit::slice_as_mut_ptr(&mut self.as_leaf_mut().keys), - self.len(), - ) - } - } - - fn vals_mut(&mut self) -> &mut [V] - where - K: 'a, - V: 'a, - { - // SAFETY: the caller will not be able to call further methods on self - // until the value slice reference is dropped, as we have unique access - // for the lifetime of the borrow. - // SAFETY: The values of a node must always be initialized up to length. - unsafe { - slice::from_raw_parts_mut( - MaybeUninit::slice_as_mut_ptr(&mut self.as_leaf_mut().vals), - self.len(), - ) - } + unsafe fn into_val_area_mut_at(mut self, idx: usize) -> &'a mut MaybeUninit { + debug_assert!(idx < self.len()); + unsafe { Self::as_leaf_mut(&mut self).vals.get_unchecked_mut(idx) } } } -impl<'a, K, V> NodeRef, K, V, marker::Internal> { - fn edges_mut(&mut self) -> &mut [BoxedNode] { - unsafe { - slice::from_raw_parts_mut( - MaybeUninit::slice_as_mut_ptr(&mut self.as_internal_mut().edges), - self.len() + 1, - ) - } +impl<'a, K: 'a, V: 'a> NodeRef, K, V, marker::Internal> { + /// Offers exclusive access to a part of the storage area for edge contents. + /// + /// # Safety + /// The node has at least `idx` initialized elements. + unsafe fn into_edge_area_mut_at(mut self, idx: usize) -> &'a mut MaybeUninit> { + debug_assert!(idx <= self.len()); + unsafe { Self::as_internal_mut(&mut self).edges.get_unchecked_mut(idx) } } } impl<'a, K: 'a, V: 'a, Type> NodeRef, K, V, Type> { - /// # Safety - /// The node has more than `idx` initialized elements. - unsafe fn into_key_at(self, idx: usize) -> &'a K { - unsafe { self.into_leaf().keys.get_unchecked(idx).assume_init_ref() } + /// Exposes the entire key storage area in the node, + /// regardless of the node's current length, + /// having exclusive access to the entire node. + unsafe fn key_area(self) -> &'a [MaybeUninit] { + Self::as_leaf(&self).keys.as_slice() } - /// # Safety - /// The node has more than `idx` initialized elements. - unsafe fn into_val_at(self, idx: usize) -> &'a V { - unsafe { self.into_leaf().vals.get_unchecked(idx).assume_init_ref() } + /// Exposes the entire value storage area in the node, + /// regardless of the node's current length, + /// having exclusive access to the entire node. + unsafe fn val_area(self) -> &'a [MaybeUninit] { + Self::as_leaf(&self).vals.as_slice() } } -impl<'a, K: 'a, V: 'a, Type> NodeRef, K, V, Type> { - /// # Safety - /// The node has more than `idx` initialized elements. - unsafe fn into_key_mut_at(mut self, idx: usize) -> &'a mut K { - debug_assert!(idx < self.len()); +impl<'a, K: 'a, V: 'a> NodeRef, K, V, marker::Internal> { + /// Exposes the entire storage area for edge contents in the node, + /// regardless of the node's current length, + /// having exclusive access to the entire node. + unsafe fn edge_area(self) -> &'a [MaybeUninit>] { + Self::as_internal(&self).edges.as_slice() + } +} - let leaf = self.as_leaf_mut(); - unsafe { leaf.keys.get_unchecked_mut(idx).assume_init_mut() } +impl<'a, K: 'a, V: 'a, Type> NodeRef, K, V, Type> { + /// Offers exclusive access to a sized slice of key storage area in the node. + unsafe fn into_key_area_slice(mut self) -> &'a mut [MaybeUninit] { + let len = self.len(); + // SAFETY: the caller will not be able to call further methods on self + // until the key slice reference is dropped, as we have unique access + // for the lifetime of the borrow. + unsafe { Self::as_leaf_mut(&mut self).keys.get_unchecked_mut(..len) } } - /// # Safety - /// The node has more than `idx` initialized elements. - unsafe fn into_val_mut_at(mut self, idx: usize) -> &'a mut V { - debug_assert!(idx < self.len()); + /// Offers exclusive access to a sized slice of value storage area in the node. + unsafe fn into_val_area_slice(mut self) -> &'a mut [MaybeUninit] { + let len = self.len(); + // SAFETY: the caller will not be able to call further methods on self + // until the value slice reference is dropped, as we have unique access + // for the lifetime of the borrow. + unsafe { Self::as_leaf_mut(&mut self).vals.get_unchecked_mut(..len) } + } +} - let leaf = self.as_leaf_mut(); - unsafe { leaf.vals.get_unchecked_mut(idx).assume_init_mut() } +impl<'a, K: 'a, V: 'a> NodeRef, K, V, marker::Internal> { + /// Offers exclusive access to a sized slice of storage area for edge contents in the node. + unsafe fn into_edge_area_slice(mut self) -> &'a mut [MaybeUninit>] { + let len = self.len(); + // SAFETY: the caller will not be able to call further methods on self + // until the edge slice reference is dropped, as we have unique access + // for the lifetime of the borrow. + unsafe { Self::as_internal_mut(&mut self).edges.get_unchecked_mut(..len + 1) } } } impl<'a, K, V, Type> NodeRef, K, V, Type> { /// # Safety - /// The node has more than `idx` initialized elements. - unsafe fn into_key_val_mut_at(self, idx: usize) -> (&'a K, &'a mut V) { + /// - The node has more than `idx` initialized elements. + /// - The keys and values of the node must be initialized up to its current length. + unsafe fn into_key_val_mut_at(mut self, idx: usize) -> (&'a K, &'a mut V) { // We only create a reference to the one element we are interested in, // to avoid aliasing with outstanding references to other elements, // in particular, those returned to the caller in earlier iterations. - let leaf = self.node.as_ptr(); + let leaf = Self::as_leaf_ptr(&mut self); let keys = unsafe { &raw const (*leaf).keys }; let vals = unsafe { &raw mut (*leaf).vals }; // We must coerce to unsized array pointers because of Rust issue #74679. let keys: *const [_] = keys; let vals: *mut [_] = vals; - // SAFETY: The keys and values of a node must always be initialized up to length. let key = unsafe { (&*keys.get_unchecked(idx)).assume_init_ref() }; let val = unsafe { (&mut *vals.get_unchecked_mut(idx)).assume_init_mut() }; (key, val) } } +impl<'a, K: 'a, V: 'a, Type> NodeRef, K, V, Type> { + /// Exposes exclusive access to the length of the node. + pub fn into_len_mut(mut self) -> &'a mut u16 { + &mut (*Self::as_leaf_mut(&mut self)).len + } +} + +impl<'a, K: 'a, V: 'a> NodeRef, K, V, marker::LeafOrInternal> { + /// Set or clear the node's link to its parent edge, + /// without invalidating other references to the node. + fn set_parent_link(&mut self, parent: NonNull>, parent_idx: usize) { + let leaf = Self::as_leaf_ptr(self); + unsafe { (*leaf).parent = Some(parent) }; + unsafe { (*leaf).parent_idx.write(parent_idx as u16) }; + } + + /// Clear the node's link to its parent edge, freeing it from its tree. + /// This only makes sense when there are no other references to the node. + fn clear_parent_link(&mut self) { + let leaf = Self::as_leaf_mut(self); + leaf.parent = None; + } +} + impl<'a, K: 'a, V: 'a> NodeRef, K, V, marker::Leaf> { /// Adds a key/value pair to the end of the node. pub fn push(&mut self, key: K, val: V) { - let len = &mut self.as_leaf_mut().len; + let len = unsafe { self.reborrow_mut().into_len_mut() }; let idx = usize::from(*len); assert!(idx < CAPACITY); *len += 1; unsafe { - ptr::write(self.key_mut_at(idx), key); - ptr::write(self.val_mut_at(idx), val); + self.reborrow_mut().into_key_area_mut_at(idx).write(key); + self.reborrow_mut().into_val_area_mut_at(idx).write(val); } } @@ -614,10 +659,10 @@ impl<'a, K: 'a, V: 'a> NodeRef, K, V, marker::Leaf> { assert!(self.len() < CAPACITY); unsafe { - slice_insert(self.keys_mut(), 0, key); - slice_insert(self.vals_mut(), 0, val); + *self.reborrow_mut().into_len_mut() += 1; + slice_insert(self.reborrow_mut().into_key_area_slice(), 0, key); + slice_insert(self.reborrow_mut().into_val_area_slice(), 0, val); } - self.as_leaf_mut().len += 1; } } @@ -643,14 +688,14 @@ impl<'a, K: 'a, V: 'a> NodeRef, K, V, marker::Internal> { pub fn push(&mut self, key: K, val: V, edge: Root) { assert!(edge.height == self.height - 1); - let len = &mut self.as_leaf_mut().len; + let len = unsafe { self.reborrow_mut().into_len_mut() }; let idx = usize::from(*len); assert!(idx < CAPACITY); *len += 1; unsafe { - ptr::write(self.key_mut_at(idx), key); - ptr::write(self.val_mut_at(idx), val); - self.as_internal_mut().edges.get_unchecked_mut(idx + 1).write(edge.node); + self.reborrow_mut().into_key_area_mut_at(idx).write(key); + self.reborrow_mut().into_val_area_mut_at(idx).write(val); + self.reborrow_mut().into_edge_area_mut_at(idx + 1).write(edge.into_boxed_node()); Handle::new_edge(self.reborrow_mut(), idx + 1).correct_parent_link(); } } @@ -662,13 +707,12 @@ impl<'a, K: 'a, V: 'a> NodeRef, K, V, marker::Internal> { assert!(self.len() < CAPACITY); unsafe { - slice_insert(self.keys_mut(), 0, key); - slice_insert(self.vals_mut(), 0, val); - slice_insert(self.edges_mut(), 0, edge.node); + *self.reborrow_mut().into_len_mut() += 1; + slice_insert(self.reborrow_mut().into_key_area_slice(), 0, key); + slice_insert(self.reborrow_mut().into_val_area_slice(), 0, val); + slice_insert(self.reborrow_mut().into_edge_area_slice(), 0, edge.into_boxed_node()); } - self.as_leaf_mut().len += 1; - self.correct_all_childrens_parent_links(); } } @@ -683,19 +727,21 @@ impl<'a, K: 'a, V: 'a> NodeRef, K, V, marker::LeafOrInternal> { let idx = self.len() - 1; unsafe { - let key = ptr::read(self.key_at(idx)); - let val = ptr::read(self.val_at(idx)); + let key = ptr::read(self.reborrow().key_at(idx)); + let val = ptr::read(self.reborrow().val_at(idx)); let edge = match self.reborrow_mut().force() { ForceResult::Leaf(_) => None, ForceResult::Internal(internal) => { - let edge = ptr::read(internal.edge_at(idx + 1)); - let mut new_root = Root { node: edge, height: internal.height - 1 }; - new_root.node_as_mut().as_leaf_mut().parent = None; - Some(new_root) + let boxed_node = ptr::read(internal.reborrow().edge_at(idx + 1)); + let mut edge = Root::from_boxed_node(boxed_node, internal.height - 1); + // In practice, clearing the parent is a waste of time, because we will + // insert the node elsewhere and set its parent link again. + edge.borrow_mut().clear_parent_link(); + Some(edge) } }; - self.as_leaf_mut().len -= 1; + *self.reborrow_mut().into_len_mut() -= 1; (key, val, edge) } } @@ -709,29 +755,35 @@ impl<'a, K: 'a, V: 'a> NodeRef, K, V, marker::LeafOrInternal> { let old_len = self.len(); unsafe { - let key = slice_remove(self.keys_mut(), 0); - let val = slice_remove(self.vals_mut(), 0); + let key = slice_remove(self.reborrow_mut().into_key_area_slice(), 0); + let val = slice_remove(self.reborrow_mut().into_val_area_slice(), 0); let edge = match self.reborrow_mut().force() { ForceResult::Leaf(_) => None, ForceResult::Internal(mut internal) => { - let edge = slice_remove(internal.edges_mut(), 0); - let mut new_root = Root { node: edge, height: internal.height - 1 }; - new_root.node_as_mut().as_leaf_mut().parent = None; + let boxed_node = + slice_remove(internal.reborrow_mut().into_edge_area_slice(), 0); + let mut edge = Root::from_boxed_node(boxed_node, internal.height - 1); + // In practice, clearing the parent is a waste of time, because we will + // insert the node elsewhere and set its parent link again. + edge.borrow_mut().clear_parent_link(); internal.correct_childrens_parent_links(0..old_len); - Some(new_root) + Some(edge) } }; - self.as_leaf_mut().len -= 1; + *self.reborrow_mut().into_len_mut() -= 1; (key, val, edge) } } fn into_kv_pointers_mut(mut self) -> (*mut K, *mut V) { - (self.keys_mut().as_mut_ptr(), self.vals_mut().as_mut_ptr()) + let leaf = Self::as_leaf_mut(&mut self); + let keys = MaybeUninit::slice_as_mut_ptr(&mut leaf.keys); + let vals = MaybeUninit::slice_as_mut_ptr(&mut leaf.vals); + (keys, vals) } } @@ -816,7 +868,7 @@ impl NodeRef { /// Could be a public implementation of PartialEq, but only used in this module. fn eq(&self, other: &Self) -> bool { let Self { node, height, _marker: _ } = self; - if *node == other.node { + if node.eq(&other.node) { debug_assert_eq!(*height, other.height); true } else { @@ -854,6 +906,14 @@ impl } impl<'a, K, V, NodeType, HandleType> Handle, K, V, NodeType>, HandleType> { + /// Unsafely asserts to the compiler the static information that the handle's node is a `Leaf`. + pub unsafe fn cast_to_leaf_unchecked( + self, + ) -> Handle, K, V, marker::Leaf>, HandleType> { + let node = unsafe { self.node.cast_to_leaf_unchecked() }; + Handle { node, idx: self.idx, _marker: PhantomData } + } + /// Temporarily takes out another, mutable handle on the same location. Beware, as /// this method is very dangerous, doubly so since it may not immediately appear /// dangerous. @@ -893,9 +953,9 @@ impl Handle, mar } } -enum InsertionPlace { - Left(usize), - Right(usize), +pub enum LeftOrRight { + Left(T), + Right(T), } /// Given an edge index where we want to insert into a node filled to capacity, @@ -903,14 +963,14 @@ enum InsertionPlace { /// The goal of the split point is for its key and value to end up in a parent node; /// the keys, values and edges to the left of the split point become the left child; /// the keys, values and edges to the right of the split point become the right child. -fn splitpoint(edge_idx: usize) -> (usize, InsertionPlace) { +fn splitpoint(edge_idx: usize) -> (usize, LeftOrRight) { debug_assert!(edge_idx <= CAPACITY); // Rust issue #74834 tries to explain these symmetric rules. match edge_idx { - 0..EDGE_IDX_LEFT_OF_CENTER => (KV_IDX_CENTER - 1, InsertionPlace::Left(edge_idx)), - EDGE_IDX_LEFT_OF_CENTER => (KV_IDX_CENTER, InsertionPlace::Left(edge_idx)), - EDGE_IDX_RIGHT_OF_CENTER => (KV_IDX_CENTER, InsertionPlace::Right(0)), - _ => (KV_IDX_CENTER + 1, InsertionPlace::Right(edge_idx - (KV_IDX_CENTER + 1 + 1))), + 0..EDGE_IDX_LEFT_OF_CENTER => (KV_IDX_CENTER - 1, LeftOrRight::Left(edge_idx)), + EDGE_IDX_LEFT_OF_CENTER => (KV_IDX_CENTER, LeftOrRight::Left(edge_idx)), + EDGE_IDX_RIGHT_OF_CENTER => (KV_IDX_CENTER, LeftOrRight::Right(0)), + _ => (KV_IDX_CENTER + 1, LeftOrRight::Right(edge_idx - (KV_IDX_CENTER + 1 + 1))), } } @@ -924,11 +984,11 @@ impl<'a, K: 'a, V: 'a> Handle, K, V, marker::Leaf>, mark debug_assert!(self.node.len() < CAPACITY); unsafe { - slice_insert(self.node.keys_mut(), self.idx, key); - slice_insert(self.node.vals_mut(), self.idx, val); - self.node.as_leaf_mut().len += 1; + *self.node.reborrow_mut().into_len_mut() += 1; + slice_insert(self.node.reborrow_mut().into_key_area_slice(), self.idx, key); + slice_insert(self.node.reborrow_mut().into_val_area_slice(), self.idx, val); - self.node.val_mut_at(self.idx) + self.node.reborrow_mut().into_val_area_mut_at(self.idx).assume_init_mut() } } } @@ -946,17 +1006,17 @@ impl<'a, K: 'a, V: 'a> Handle, K, V, marker::Leaf>, mark } else { let (middle_kv_idx, insertion) = splitpoint(self.idx); let middle = unsafe { Handle::new_kv(self.node, middle_kv_idx) }; - let (mut left, k, v, mut right) = middle.split(); + let mut result = middle.split(); let mut insertion_edge = match insertion { - InsertionPlace::Left(insert_idx) => unsafe { - Handle::new_edge(left.reborrow_mut(), insert_idx) + LeftOrRight::Left(insert_idx) => unsafe { + Handle::new_edge(result.left.reborrow_mut(), insert_idx) }, - InsertionPlace::Right(insert_idx) => unsafe { - Handle::new_edge(right.leaf_node_as_mut(), insert_idx) + LeftOrRight::Right(insert_idx) => unsafe { + Handle::new_edge(result.right.borrow_mut(), insert_idx) }, }; let val_ptr = insertion_edge.insert_fit(key, val); - (InsertResult::Split(SplitResult { left: left.forget_type(), k, v, right }), val_ptr) + (InsertResult::Split(result), val_ptr) } } } @@ -964,12 +1024,12 @@ impl<'a, K: 'a, V: 'a> Handle, K, V, marker::Leaf>, mark impl<'a, K, V> Handle, K, V, marker::Internal>, marker::Edge> { /// Fixes the parent pointer and index in the child node below this edge. This is useful /// when the ordering of edges has been changed, such as in the various `insert` methods. - fn correct_parent_link(mut self) { - let idx = self.idx as u16; - let ptr = NonNull::new(self.node.as_internal_mut()); + fn correct_parent_link(self) { + // Create backpointer without invalidating other references to the node. + let ptr = unsafe { NonNull::new_unchecked(NodeRef::as_internal_ptr(&self.node)) }; + let idx = self.idx; let mut child = self.descend(); - child.as_leaf_mut().parent = ptr; - child.as_leaf_mut().parent_idx.write(idx); + child.set_parent_link(ptr, idx); } } @@ -981,11 +1041,12 @@ impl<'a, K: 'a, V: 'a> Handle, K, V, marker::Internal>, debug_assert!(self.node.len() < CAPACITY); debug_assert!(edge.height == self.node.height - 1); + let boxed_node = edge.into_boxed_node(); unsafe { - slice_insert(self.node.keys_mut(), self.idx, key); - slice_insert(self.node.vals_mut(), self.idx, val); - slice_insert(self.node.edges_mut(), self.idx + 1, edge.node); - self.node.as_leaf_mut().len += 1; + *self.node.reborrow_mut().into_len_mut() += 1; + slice_insert(self.node.reborrow_mut().into_key_area_slice(), self.idx, key); + slice_insert(self.node.reborrow_mut().into_val_area_slice(), self.idx, val); + slice_insert(self.node.reborrow_mut().into_edge_area_slice(), self.idx + 1, boxed_node); self.node.correct_childrens_parent_links((self.idx + 1)..=self.node.len()); } @@ -1009,17 +1070,17 @@ impl<'a, K: 'a, V: 'a> Handle, K, V, marker::Internal>, } else { let (middle_kv_idx, insertion) = splitpoint(self.idx); let middle = unsafe { Handle::new_kv(self.node, middle_kv_idx) }; - let (mut left, k, v, mut right) = middle.split(); + let mut result = middle.split(); let mut insertion_edge = match insertion { - InsertionPlace::Left(insert_idx) => unsafe { - Handle::new_edge(left.reborrow_mut(), insert_idx) + LeftOrRight::Left(insert_idx) => unsafe { + Handle::new_edge(result.left.reborrow_mut(), insert_idx) }, - InsertionPlace::Right(insert_idx) => unsafe { - Handle::new_edge(right.internal_node_as_mut(), insert_idx) + LeftOrRight::Right(insert_idx) => unsafe { + Handle::new_edge(result.right.borrow_mut(), insert_idx) }, }; insertion_edge.insert_fit(key, val, edge); - InsertResult::Split(SplitResult { left: left.forget_type(), k, v, right }) + InsertResult::Split(result) } } } @@ -1041,16 +1102,16 @@ impl<'a, K: 'a, V: 'a> Handle, K, V, marker::Leaf>, mark (InsertResult::Fit(handle), ptr) => { return (InsertResult::Fit(handle.forget_node_type()), ptr); } - (InsertResult::Split(split), val_ptr) => (split, val_ptr), + (InsertResult::Split(split), val_ptr) => (split.forget_node_type(), val_ptr), }; loop { split = match split.left.ascend() { - Ok(parent) => match parent.insert(split.k, split.v, split.right) { + Ok(parent) => match parent.insert(split.kv.0, split.kv.1, split.right) { InsertResult::Fit(handle) => { return (InsertResult::Fit(handle.forget_node_type()), val_ptr); } - InsertResult::Split(split) => split, + InsertResult::Split(split) => split.forget_node_type(), }, Err(root) => { return (InsertResult::Split(SplitResult { left: root, ..split }), val_ptr); @@ -1073,28 +1134,25 @@ impl Handle, marke // node pointer is dereferenced, we access the edges array with a // reference (Rust issue #73987) and invalidate any other references // to or inside the array, should any be around. - let internal_node = self.node.as_internal_ptr(); - NodeRef { - height: self.node.height - 1, - node: unsafe { (&*(*internal_node).edges.get_unchecked(self.idx).as_ptr()).as_ptr() }, - _marker: PhantomData, - } + let parent_ptr = NodeRef::as_internal_ptr(&self.node); + let boxed_node = unsafe { (*parent_ptr).edges.get_unchecked(self.idx).assume_init_read() }; + NodeRef::from_boxed_node(boxed_node, self.node.height - 1) } } impl<'a, K: 'a, V: 'a, NodeType> Handle, K, V, NodeType>, marker::KV> { pub fn into_kv(self) -> (&'a K, &'a V) { - (unsafe { self.node.into_key_at(self.idx) }, unsafe { self.node.into_val_at(self.idx) }) + (unsafe { self.node.key_at(self.idx) }, unsafe { self.node.val_at(self.idx) }) } } impl<'a, K: 'a, V: 'a, NodeType> Handle, K, V, NodeType>, marker::KV> { pub fn into_key_mut(self) -> &'a mut K { - unsafe { self.node.into_key_mut_at(self.idx) } + unsafe { self.node.into_key_area_mut_at(self.idx).assume_init_mut() } } pub fn into_val_mut(self) -> &'a mut V { - unsafe { self.node.into_val_mut_at(self.idx) } + unsafe { self.node.into_val_area_mut_at(self.idx).assume_init_mut() } } } @@ -1106,12 +1164,14 @@ impl<'a, K, V, NodeType> Handle, K, V, NodeType>, mar impl<'a, K: 'a, V: 'a, NodeType> Handle, K, V, NodeType>, marker::KV> { pub fn kv_mut(&mut self) -> (&mut K, &mut V) { - // We cannot call into_key_mut_at and into_val_mut_at, because calling the second one + // We cannot call separate key and value methods, because calling the second one // invalidates the reference returned by the first. - let leaf = self.node.as_leaf_mut(); - let key = unsafe { leaf.keys.get_unchecked_mut(self.idx).assume_init_mut() }; - let val = unsafe { leaf.vals.get_unchecked_mut(self.idx).assume_init_mut() }; - (key, val) + unsafe { + let leaf = NodeRef::as_leaf_mut(&mut self.node.reborrow_mut()); + let key = leaf.keys.get_unchecked_mut(self.idx).assume_init_mut(); + let val = leaf.vals.get_unchecked_mut(self.idx).assume_init_mut(); + (key, val) + } } } @@ -1127,23 +1187,23 @@ impl<'a, K: 'a, V: 'a, NodeType> Handle, K, V, NodeType> /// by taking care of leaf data. fn split_leaf_data(&mut self, new_node: &mut LeafNode) -> (K, V) { let new_len = self.split_new_node_len(); + new_node.len = new_len as u16; unsafe { - let k = ptr::read(self.node.key_at(self.idx)); - let v = ptr::read(self.node.val_at(self.idx)); + let k = ptr::read(self.node.reborrow().key_at(self.idx)); + let v = ptr::read(self.node.reborrow().val_at(self.idx)); ptr::copy_nonoverlapping( - self.node.key_at(self.idx + 1), - MaybeUninit::slice_as_mut_ptr(&mut new_node.keys), + self.node.reborrow().key_area().as_ptr().add(self.idx + 1), + new_node.keys.as_mut_ptr(), new_len, ); ptr::copy_nonoverlapping( - self.node.val_at(self.idx + 1), - MaybeUninit::slice_as_mut_ptr(&mut new_node.vals), + self.node.reborrow().val_area().as_ptr().add(self.idx + 1), + new_node.vals.as_mut_ptr(), new_len, ); - self.node.as_leaf_mut().len = self.idx as u16; - new_node.len = new_len as u16; + *self.node.reborrow_mut().into_len_mut() = self.idx as u16; (k, v) } } @@ -1157,14 +1217,14 @@ impl<'a, K: 'a, V: 'a> Handle, K, V, marker::Leaf>, mark /// - The key and value pointed to by this handle are extracted. /// - All the key/value pairs to the right of this handle are put into a newly /// allocated node. - pub fn split(mut self) -> (NodeRef, K, V, marker::Leaf>, K, V, Root) { + pub fn split(mut self) -> SplitResult<'a, K, V, marker::Leaf> { unsafe { let mut new_node = Box::new(LeafNode::new()); - let (k, v) = self.split_leaf_data(&mut new_node); + let kv = self.split_leaf_data(&mut new_node); - let right = Root { node: BoxedNode::from_leaf(new_node), height: 0 }; - (self.node, k, v, right) + let right = NodeRef::from_new_leaf(new_node); + SplitResult { left: self.node, kv, right } } } @@ -1174,26 +1234,14 @@ impl<'a, K: 'a, V: 'a> Handle, K, V, marker::Leaf>, mark mut self, ) -> ((K, V), Handle, K, V, marker::Leaf>, marker::Edge>) { unsafe { - let k = slice_remove(self.node.keys_mut(), self.idx); - let v = slice_remove(self.node.vals_mut(), self.idx); - self.node.as_leaf_mut().len -= 1; + let k = slice_remove(self.node.reborrow_mut().into_key_area_slice(), self.idx); + let v = slice_remove(self.node.reborrow_mut().into_val_area_slice(), self.idx); + *self.node.reborrow_mut().into_len_mut() -= 1; ((k, v), self.left_edge()) } } } -impl<'a, K, V> Handle, K, V, marker::Internal>, marker::KV> { - /// Returns `true` if it is valid to call `.merge()`, i.e., whether there is enough room in - /// a node to hold the combination of the nodes to the left and right of this handle along - /// with the key/value pair at this handle. - pub fn can_merge(&self) -> bool { - (self.reborrow().left_edge().descend().len() - + self.reborrow().right_edge().descend().len() - + 1) - <= CAPACITY - } -} - impl<'a, K: 'a, V: 'a> Handle, K, V, marker::Internal>, marker::KV> { /// Splits the underlying node into three parts: /// @@ -1202,79 +1250,172 @@ impl<'a, K: 'a, V: 'a> Handle, K, V, marker::Internal>, /// - The key and value pointed to by this handle are extracted. /// - All the edges and key/value pairs to the right of this handle are put into /// a newly allocated node. - pub fn split(mut self) -> (NodeRef, K, V, marker::Internal>, K, V, Root) { + pub fn split(mut self) -> SplitResult<'a, K, V, marker::Internal> { unsafe { let mut new_node = Box::new(InternalNode::new()); - // Move edges out before reducing length: let new_len = self.split_new_node_len(); + // Move edges out before reducing length: ptr::copy_nonoverlapping( - self.node.edge_at(self.idx + 1), - MaybeUninit::slice_as_mut_ptr(&mut new_node.edges), + self.node.reborrow().edge_area().as_ptr().add(self.idx + 1), + new_node.edges.as_mut_ptr(), new_len + 1, ); - let (k, v) = self.split_leaf_data(&mut new_node.data); + let kv = self.split_leaf_data(&mut new_node.data); let height = self.node.height; - let mut right = Root { node: BoxedNode::from_internal(new_node), height }; + let mut right = NodeRef::from_new_internal(new_node, height); - right.internal_node_as_mut().correct_childrens_parent_links(0..=new_len); + right.borrow_mut().correct_childrens_parent_links(0..=new_len); - (self.node, k, v, right) + SplitResult { left: self.node, kv, right } } } +} + +/// Represents a session for evaluating and performing a balancing operation +/// around an internal key/value pair. +pub struct BalancingContext<'a, K, V> { + parent: Handle, K, V, marker::Internal>, marker::KV>, + left_child: NodeRef, K, V, marker::LeafOrInternal>, + right_child: NodeRef, K, V, marker::LeafOrInternal>, +} - /// Combines the node immediately to the left of this handle, the key/value pair pointed - /// to by this handle, and the node immediately to the right of this handle into one new - /// child of the underlying node, returning an edge referencing that new child. +impl<'a, K, V> Handle, K, V, marker::Internal>, marker::KV> { + pub fn consider_for_balancing(self) -> BalancingContext<'a, K, V> { + let self1 = unsafe { ptr::read(&self) }; + let self2 = unsafe { ptr::read(&self) }; + BalancingContext { + parent: self, + left_child: self1.left_edge().descend(), + right_child: self2.right_edge().descend(), + } + } +} + +impl<'a, K, V> NodeRef, K, V, marker::LeafOrInternal> { + /// Chooses a balancing context involving the node as a child, thus between + /// the KV immediately to the left or to the right in the parent node. + /// Returns an `Err` if there is no parent. /// - /// Panics unless this edge `.can_merge()`. + /// This method optimizes for a node that has fewer elements than its left + /// and right siblings, if they exist, by preferring the left parent KV. + /// Merging with the left sibling is faster, since we only need to move + /// the node's N elements, instead of shifting them to the right and moving + /// more than N elements in front. Stealing from the left sibling is also + /// typically faster, since we only need to shift the node's N elements to + /// the right, instead of shifting at least N of the sibling's elements to + /// the left. + pub fn choose_parent_kv(self) -> Result>, Self> { + match unsafe { ptr::read(&self) }.ascend() { + Ok(parent) => match parent.left_kv() { + Ok(left_parent_kv) => Ok(LeftOrRight::Left(BalancingContext { + parent: unsafe { ptr::read(&left_parent_kv) }, + left_child: left_parent_kv.left_edge().descend(), + right_child: self, + })), + Err(parent) => match parent.right_kv() { + Ok(right_parent_kv) => Ok(LeftOrRight::Right(BalancingContext { + parent: unsafe { ptr::read(&right_parent_kv) }, + left_child: self, + right_child: right_parent_kv.right_edge().descend(), + })), + Err(_) => unreachable!("empty non-root node"), + }, + }, + Err(root) => Err(root), + } + } +} + +impl<'a, K, V> BalancingContext<'a, K, V> { + pub fn left_child_len(&self) -> usize { + self.left_child.len() + } + + pub fn right_child_len(&self) -> usize { + self.right_child.len() + } + + pub fn into_left_child(self) -> NodeRef, K, V, marker::LeafOrInternal> { + self.left_child + } + + pub fn into_right_child(self) -> NodeRef, K, V, marker::LeafOrInternal> { + self.right_child + } + + /// Returns `true` if it is valid to call `.merge()` in the balancing context, + /// i.e., whether there is enough room in a node to hold the combination of + /// both adjacent child nodes, along with the key/value pair in the parent. + pub fn can_merge(&self) -> bool { + self.left_child.len() + 1 + self.right_child.len() <= CAPACITY + } +} + +impl<'a, K: 'a, V: 'a> BalancingContext<'a, K, V> { + /// Merges the parent's key/value pair and both adjacent child nodes into + /// the left node and returns an edge handle in that expanded left node. + /// If `track_edge_idx` is given some value, the returned edge corresponds + /// to where the edge in that child node ended up, + /// + /// Panics unless we `.can_merge()`. pub fn merge( mut self, - ) -> Handle, K, V, marker::Internal>, marker::Edge> { - let self1 = unsafe { ptr::read(&self) }; - let self2 = unsafe { ptr::read(&self) }; - let mut left_node = self1.left_edge().descend(); + track_edge_idx: Option>, + ) -> Handle, K, V, marker::LeafOrInternal>, marker::Edge> { + let mut left_node = self.left_child; let left_len = left_node.len(); - let right_node = self2.right_edge().descend(); + let right_node = self.right_child; let right_len = right_node.len(); assert!(left_len + right_len < CAPACITY); + assert!(match track_edge_idx { + None => true, + Some(LeftOrRight::Left(idx)) => idx <= left_len, + Some(LeftOrRight::Right(idx)) => idx <= right_len, + }); unsafe { - ptr::write( - left_node.keys_mut().get_unchecked_mut(left_len), - slice_remove(self.node.keys_mut(), self.idx), + *left_node.reborrow_mut().into_len_mut() += right_len as u16 + 1; + + let parent_key = slice_remove( + self.parent.node.reborrow_mut().into_key_area_slice(), + self.parent.idx, ); + left_node.reborrow_mut().into_key_area_mut_at(left_len).write(parent_key); ptr::copy_nonoverlapping( - right_node.key_at(0), - left_node.keys_mut().as_mut_ptr().add(left_len + 1), + right_node.reborrow().key_area().as_ptr(), + left_node.reborrow_mut().into_key_area_slice().as_mut_ptr().add(left_len + 1), right_len, ); - ptr::write( - left_node.vals_mut().get_unchecked_mut(left_len), - slice_remove(self.node.vals_mut(), self.idx), + + let parent_val = slice_remove( + self.parent.node.reborrow_mut().into_val_area_slice(), + self.parent.idx, ); + left_node.reborrow_mut().into_val_area_mut_at(left_len).write(parent_val); ptr::copy_nonoverlapping( - right_node.val_at(0), - left_node.vals_mut().as_mut_ptr().add(left_len + 1), + right_node.reborrow().val_area().as_ptr(), + left_node.reborrow_mut().into_val_area_slice().as_mut_ptr().add(left_len + 1), right_len, ); - slice_remove(&mut self.node.edges_mut(), self.idx + 1); - let self_len = self.node.len(); - self.node.correct_childrens_parent_links(self.idx + 1..self_len); - self.node.as_leaf_mut().len -= 1; - - left_node.as_leaf_mut().len += right_len as u16 + 1; + slice_remove( + &mut self.parent.node.reborrow_mut().into_edge_area_slice(), + self.parent.idx + 1, + ); + let parent_old_len = self.parent.node.len(); + self.parent.node.correct_childrens_parent_links(self.parent.idx + 1..parent_old_len); + *self.parent.node.reborrow_mut().into_len_mut() -= 1; - if self.node.height > 1 { + if self.parent.node.height > 1 { // SAFETY: the height of the nodes being merged is one below the height // of the node of this edge, thus above zero, so they are internal. - let mut left_node = left_node.cast_to_internal_unchecked(); + let mut left_node = left_node.reborrow_mut().cast_to_internal_unchecked(); let right_node = right_node.cast_to_internal_unchecked(); ptr::copy_nonoverlapping( - right_node.edge_at(0), - left_node.edges_mut().as_mut_ptr().add(left_len + 1), + right_node.reborrow().edge_area().as_ptr(), + left_node.reborrow_mut().into_edge_area_slice().as_mut_ptr().add(left_len + 1), right_len + 1, ); @@ -1285,50 +1426,67 @@ impl<'a, K: 'a, V: 'a> Handle, K, V, marker::Internal>, Global.dealloc(right_node.node.cast(), Layout::new::>()); } - Handle::new_edge(self.node, self.idx) + let new_idx = match track_edge_idx { + None => 0, + Some(LeftOrRight::Left(idx)) => idx, + Some(LeftOrRight::Right(idx)) => left_len + 1 + idx, + }; + Handle::new_edge(left_node, new_idx) } } - /// This removes a key/value pair from the left child and places it in the key/value storage - /// pointed to by this handle while pushing the old key/value pair of this handle into the right - /// child. - pub fn steal_left(&mut self) { + /// Removes a key/value pair from the left child and places it in the key/value storage + /// of the parent, while pushing the old parent key/value pair into the right child. + /// Returns a handle to the edge in the right child corresponding to where the original + /// edge specified by `track_right_edge_idx` ended up. + pub fn steal_left( + mut self, + track_right_edge_idx: usize, + ) -> Handle, K, V, marker::LeafOrInternal>, marker::Edge> { unsafe { - let (k, v, edge) = self.reborrow_mut().left_edge().descend().pop(); + let (k, v, edge) = self.left_child.pop(); - let k = mem::replace(self.kv_mut().0, k); - let v = mem::replace(self.kv_mut().1, v); + let k = mem::replace(self.parent.kv_mut().0, k); + let v = mem::replace(self.parent.kv_mut().1, v); - match self.reborrow_mut().right_edge().descend().force() { + match self.right_child.reborrow_mut().force() { ForceResult::Leaf(mut leaf) => leaf.push_front(k, v), ForceResult::Internal(mut internal) => internal.push_front(k, v, edge.unwrap()), } + + Handle::new_edge(self.right_child, 1 + track_right_edge_idx) } } - /// This removes a key/value pair from the right child and places it in the key/value storage - /// pointed to by this handle while pushing the old key/value pair of this handle into the left - /// child. - pub fn steal_right(&mut self) { + /// Removes a key/value pair from the right child and places it in the key/value storage + /// of the parent, while pushing the old parent key/value pair onto the left child. + /// Returns a handle to the edge in the left child specified by `track_left_edge_idx`, + /// which didn't move. + pub fn steal_right( + mut self, + track_left_edge_idx: usize, + ) -> Handle, K, V, marker::LeafOrInternal>, marker::Edge> { unsafe { - let (k, v, edge) = self.reborrow_mut().right_edge().descend().pop_front(); + let (k, v, edge) = self.right_child.pop_front(); - let k = mem::replace(self.kv_mut().0, k); - let v = mem::replace(self.kv_mut().1, v); + let k = mem::replace(self.parent.kv_mut().0, k); + let v = mem::replace(self.parent.kv_mut().1, v); - match self.reborrow_mut().left_edge().descend().force() { + match self.left_child.reborrow_mut().force() { ForceResult::Leaf(mut leaf) => leaf.push(k, v), ForceResult::Internal(mut internal) => internal.push(k, v, edge.unwrap()), } + + Handle::new_edge(self.left_child, track_left_edge_idx) } } /// This does stealing similar to `steal_left` but steals multiple elements at once. pub fn bulk_steal_left(&mut self, count: usize) { unsafe { - let mut left_node = ptr::read(self).left_edge().descend(); + let left_node = &mut self.left_child; let left_len = left_node.len(); - let mut right_node = ptr::read(self).right_edge().descend(); + let right_node = &mut self.right_child; let right_len = right_node.len(); // Make sure that we may steal safely. @@ -1342,7 +1500,7 @@ impl<'a, K: 'a, V: 'a> Handle, K, V, marker::Internal>, let left_kv = left_node.reborrow_mut().into_kv_pointers_mut(); let right_kv = right_node.reborrow_mut().into_kv_pointers_mut(); let parent_kv = { - let kv = self.kv_mut(); + let kv = self.parent.kv_mut(); (kv.0 as *mut K, kv.1 as *mut V) }; @@ -1360,13 +1518,14 @@ impl<'a, K: 'a, V: 'a> Handle, K, V, marker::Internal>, move_kv(left_kv, new_left_len, parent_kv, 0, 1); } - left_node.as_leaf_mut().len -= count as u16; - right_node.as_leaf_mut().len += count as u16; + *left_node.reborrow_mut().into_len_mut() -= count as u16; + *right_node.reborrow_mut().into_len_mut() += count as u16; - match (left_node.force(), right_node.force()) { + match (left_node.reborrow_mut().force(), right_node.reborrow_mut().force()) { (ForceResult::Internal(left), ForceResult::Internal(mut right)) => { // Make room for stolen edges. - let right_edges = right.reborrow_mut().as_internal_mut().edges.as_mut_ptr(); + let left = left.reborrow(); + let right_edges = right.reborrow_mut().into_edge_area_slice().as_mut_ptr(); ptr::copy(right_edges, right_edges.add(count), right_len + 1); right.correct_childrens_parent_links(count..count + right_len + 1); @@ -1381,9 +1540,9 @@ impl<'a, K: 'a, V: 'a> Handle, K, V, marker::Internal>, /// The symmetric clone of `bulk_steal_left`. pub fn bulk_steal_right(&mut self, count: usize) { unsafe { - let mut left_node = ptr::read(self).left_edge().descend(); + let left_node = &mut self.left_child; let left_len = left_node.len(); - let mut right_node = ptr::read(self).right_edge().descend(); + let right_node = &mut self.right_child; let right_len = right_node.len(); // Make sure that we may steal safely. @@ -1397,7 +1556,7 @@ impl<'a, K: 'a, V: 'a> Handle, K, V, marker::Internal>, let left_kv = left_node.reborrow_mut().into_kv_pointers_mut(); let right_kv = right_node.reborrow_mut().into_kv_pointers_mut(); let parent_kv = { - let kv = self.kv_mut(); + let kv = self.parent.kv_mut(); (kv.0 as *mut K, kv.1 as *mut V) }; @@ -1415,15 +1574,15 @@ impl<'a, K: 'a, V: 'a> Handle, K, V, marker::Internal>, ptr::copy(right_kv.1.add(count), right_kv.1, new_right_len); } - left_node.as_leaf_mut().len += count as u16; - right_node.as_leaf_mut().len -= count as u16; + *left_node.reborrow_mut().into_len_mut() += count as u16; + *right_node.reborrow_mut().into_len_mut() -= count as u16; - match (left_node.force(), right_node.force()) { + match (left_node.reborrow_mut().force(), right_node.reborrow_mut().force()) { (ForceResult::Internal(left), ForceResult::Internal(mut right)) => { - move_edges(right.reborrow_mut(), 0, left, left_len + 1, count); + move_edges(right.reborrow(), 0, left, left_len + 1, count); // Fix right indexing. - let right_edges = right.reborrow_mut().as_internal_mut().edges.as_mut_ptr(); + let right_edges = right.reborrow_mut().into_edge_area_slice().as_mut_ptr(); ptr::copy(right_edges.add(count), right_edges, new_right_len + 1); right.correct_childrens_parent_links(0..=new_right_len); } @@ -1448,16 +1607,16 @@ unsafe fn move_kv( } // Source and destination must have the same height. -unsafe fn move_edges( - mut source: NodeRef, K, V, marker::Internal>, +unsafe fn move_edges<'a, K: 'a, V: 'a>( + source: NodeRef, K, V, marker::Internal>, source_offset: usize, - mut dest: NodeRef, K, V, marker::Internal>, + mut dest: NodeRef, K, V, marker::Internal>, dest_offset: usize, count: usize, ) { - let source_ptr = source.as_internal().edges.as_ptr(); - let dest_ptr = dest.as_internal_mut().edges.as_mut_ptr(); unsafe { + let source_ptr = source.edge_area().as_ptr(); + let dest_ptr = dest.reborrow_mut().into_edge_area_slice().as_mut_ptr(); ptr::copy_nonoverlapping(source_ptr.add(source_offset), dest_ptr.add(dest_offset), count); dest.correct_childrens_parent_links(dest_offset..dest_offset + count); } @@ -1553,11 +1712,12 @@ impl<'a, K, V> Handle, K, V, marker::LeafOrInternal>, ma move_kv(left_kv, left_new_len, right_kv, 0, right_new_len); - left_node.as_leaf_mut().len = left_new_len as u16; - right_node.as_leaf_mut().len = right_new_len as u16; + *left_node.reborrow_mut().into_len_mut() = left_new_len as u16; + *right_node.reborrow_mut().into_len_mut() = right_new_len as u16; match (left_node.force(), right_node.force()) { (ForceResult::Internal(left), ForceResult::Internal(right)) => { + let left = left.reborrow(); move_edges(left, left_new_len + 1, right, 1, right_new_len); } (ForceResult::Leaf(_), ForceResult::Leaf(_)) => {} @@ -1574,20 +1734,30 @@ pub enum ForceResult { } /// Result of insertion, when a node needed to expand beyond its capacity. -/// Does not distinguish between `Leaf` and `Internal` because `Root` doesn't. -pub struct SplitResult<'a, K, V> { - // Altered node in existing tree with elements and edges that belong to the left of `k`. - pub left: NodeRef, K, V, marker::LeafOrInternal>, +pub struct SplitResult<'a, K, V, NodeType> { + // Altered node in existing tree with elements and edges that belong to the left of `kv`. + pub left: NodeRef, K, V, NodeType>, // Some key and value split off, to be inserted elsewhere. - pub k: K, - pub v: V, - // Owned, unattached, new node with elements and edges that belong to the right of `k`. - pub right: Root, + pub kv: (K, V), + // Owned, unattached, new node with elements and edges that belong to the right of `kv`. + pub right: NodeRef, +} + +impl<'a, K, V> SplitResult<'a, K, V, marker::Leaf> { + pub fn forget_node_type(self) -> SplitResult<'a, K, V, marker::LeafOrInternal> { + SplitResult { left: self.left.forget_type(), kv: self.kv, right: self.right.forget_type() } + } +} + +impl<'a, K, V> SplitResult<'a, K, V, marker::Internal> { + pub fn forget_node_type(self) -> SplitResult<'a, K, V, marker::LeafOrInternal> { + SplitResult { left: self.left.forget_type(), kv: self.kv, right: self.right.forget_type() } + } } -pub enum InsertResult<'a, K, V, Type> { - Fit(Handle, K, V, Type>, marker::KV>), - Split(SplitResult<'a, K, V>), +pub enum InsertResult<'a, K, V, NodeType> { + Fit(Handle, K, V, NodeType>, marker::KV>), + Split(SplitResult<'a, K, V, NodeType>), } pub mod marker { @@ -1606,20 +1776,33 @@ pub mod marker { pub enum Edge {} } -unsafe fn slice_insert(slice: &mut [T], idx: usize, val: T) { +/// Inserts a value into a slice of initialized elements followed by one uninitialized element. +/// +/// # Safety +/// The slice has more than `idx` elements. +unsafe fn slice_insert(slice: &mut [MaybeUninit], idx: usize, val: T) { unsafe { let len = slice.len(); + debug_assert!(len > idx); let slice_ptr = slice.as_mut_ptr(); - ptr::copy(slice_ptr.add(idx), slice_ptr.add(idx + 1), len - idx); - ptr::write(slice_ptr.add(idx), val); + if len > idx + 1 { + ptr::copy(slice_ptr.add(idx), slice_ptr.add(idx + 1), len - idx - 1); + } + (*slice_ptr.add(idx)).write(val); } } -unsafe fn slice_remove(slice: &mut [T], idx: usize) -> T { +/// Removes and returns a value from a slice of all initialized elements, leaving behind one +/// trailing uninitialized element. +/// +/// # Safety +/// The slice has more than `idx` elements. +unsafe fn slice_remove(slice: &mut [MaybeUninit], idx: usize) -> T { unsafe { let len = slice.len(); + debug_assert!(idx < len); let slice_ptr = slice.as_mut_ptr(); - let ret = ptr::read(slice_ptr.add(idx)); + let ret = (*slice_ptr.add(idx)).assume_init_read(); ptr::copy(slice_ptr.add(idx + 1), slice_ptr.add(idx), len - idx - 1); ret } diff --git a/library/alloc/src/collections/btree/node/tests.rs b/library/alloc/src/collections/btree/node/tests.rs index 38c75de34ee..6886962106b 100644 --- a/library/alloc/src/collections/btree/node/tests.rs +++ b/library/alloc/src/collections/btree/node/tests.rs @@ -5,7 +5,7 @@ use crate::string::String; use core::cmp::Ordering::*; impl<'a, K: 'a, V: 'a> NodeRef, K, V, marker::LeafOrInternal> { - /// Asserts that the back pointer in each reachable node points to its parent. + // Asserts that the back pointer in each reachable node points to its parent. pub fn assert_back_pointers(self) { if let ForceResult::Internal(node) = self.force() { for idx in 0..=node.len() { @@ -17,6 +17,9 @@ impl<'a, K: 'a, V: 'a> NodeRef, K, V, marker::LeafOrInternal> } } + // Renders a multi-line display of the keys in order and in tree hierarchy, + // picturing the tree growing sideways from its root on the left to its + // leaves on the right. pub fn dump_keys(self) -> String where K: Debug, @@ -54,11 +57,11 @@ fn test_splitpoint() { let mut left_len = middle_kv_idx; let mut right_len = CAPACITY - middle_kv_idx - 1; match insertion { - InsertionPlace::Left(edge_idx) => { + LeftOrRight::Left(edge_idx) => { assert!(edge_idx <= left_len); left_len += 1; } - InsertionPlace::Right(edge_idx) => { + LeftOrRight::Right(edge_idx) => { assert!(edge_idx <= right_len); right_len += 1; } @@ -71,16 +74,19 @@ fn test_splitpoint() { #[test] fn test_partial_cmp_eq() { - let mut root1: Root = Root::new_leaf(); - let mut leaf1 = unsafe { root1.leaf_node_as_mut() }; + let mut root1 = NodeRef::new_leaf(); + let mut leaf1 = root1.borrow_mut(); leaf1.push(1, ()); + let mut root1 = root1.forget_type(); root1.push_internal_level(); - let root2: Root = Root::new_leaf(); + let root2 = Root::new(); + root1.reborrow().assert_back_pointers(); + root2.reborrow().assert_back_pointers(); - let leaf_edge_1a = root1.node_as_ref().first_leaf_edge().forget_node_type(); - let leaf_edge_1b = root1.node_as_ref().last_leaf_edge().forget_node_type(); - let top_edge_1 = root1.node_as_ref().first_edge(); - let top_edge_2 = root2.node_as_ref().first_edge(); + let leaf_edge_1a = root1.reborrow().first_leaf_edge().forget_node_type(); + let leaf_edge_1b = root1.reborrow().last_leaf_edge().forget_node_type(); + let top_edge_1 = root1.reborrow().first_edge(); + let top_edge_2 = root2.reborrow().first_edge(); assert!(leaf_edge_1a == leaf_edge_1a); assert!(leaf_edge_1a != leaf_edge_1b); @@ -97,8 +103,8 @@ fn test_partial_cmp_eq() { assert_eq!(top_edge_1.partial_cmp(&top_edge_2), None); root1.pop_internal_level(); - unsafe { root1.into_ref().deallocate_and_ascend() }; - unsafe { root2.into_ref().deallocate_and_ascend() }; + unsafe { root1.deallocate_and_ascend() }; + unsafe { root2.deallocate_and_ascend() }; } #[test] diff --git a/library/alloc/src/collections/btree/remove.rs b/library/alloc/src/collections/btree/remove.rs index 99655d3e2bf..c4253d4221b 100644 --- a/library/alloc/src/collections/btree/remove.rs +++ b/library/alloc/src/collections/btree/remove.rs @@ -1,133 +1,153 @@ use super::map::MIN_LEN; -use super::node::{marker, ForceResult, Handle, NodeRef}; +use super::node::{marker, ForceResult::*, Handle, LeftOrRight::*, NodeRef}; use super::unwrap_unchecked; use core::mem; -use core::ptr; impl<'a, K: 'a, V: 'a> Handle, K, V, marker::LeafOrInternal>, marker::KV> { - /// Removes a key/value-pair from the map, and returns that pair, as well as - /// the leaf edge corresponding to that former pair. + /// Removes a key/value-pair from the tree, and returns that pair, as well as + /// the leaf edge corresponding to that former pair. It's possible this empties + /// a root node that is internal, which the caller should pop from the map + /// holding the tree. The caller should also decrement the map's length. pub fn remove_kv_tracking( self, handle_emptied_internal_root: F, ) -> ((K, V), Handle, K, V, marker::Leaf>, marker::Edge>) { - let (old_kv, mut pos, was_internal) = match self.force() { - ForceResult::Leaf(leaf) => { - let (old_kv, pos) = leaf.remove(); - (old_kv, pos, false) - } - ForceResult::Internal(mut internal) => { - // Replace the location freed in the internal node with an - // adjacent KV, and remove that adjacent KV from its leaf. - // Always choose the adjacent KV on the left side because - // it is typically faster to pop an element from the end - // of the KV arrays without needing to shift other elements. - - let key_loc = internal.kv_mut().0 as *mut K; - let val_loc = internal.kv_mut().1 as *mut V; - - let to_remove = internal.left_edge().descend().last_leaf_edge().left_kv().ok(); - let to_remove = unsafe { unwrap_unchecked(to_remove) }; - - let (kv, pos) = to_remove.remove(); - - let old_key = unsafe { mem::replace(&mut *key_loc, kv.0) }; - let old_val = unsafe { mem::replace(&mut *val_loc, kv.1) }; - - ((old_key, old_val), pos, true) - } - }; - - // Handle underflow - let mut cur_node = unsafe { ptr::read(&pos).into_node().forget_type() }; - let mut at_leaf = true; - while cur_node.len() < MIN_LEN { - match handle_underfull_node(cur_node) { - UnderflowResult::AtRoot => break, - UnderflowResult::Merged(edge, merged_with_left, offset) => { - // If we merged with our right sibling then our tracked - // position has not changed. However if we merged with our - // left sibling then our tracked position is now dangling. - if at_leaf && merged_with_left { - let idx = pos.idx() + offset; - let node = match unsafe { ptr::read(&edge).descend().force() } { - ForceResult::Leaf(leaf) => leaf, - ForceResult::Internal(_) => unreachable!(), - }; - pos = unsafe { Handle::new_edge(node, idx) }; - } + match self.force() { + Leaf(node) => node.remove_leaf_kv(handle_emptied_internal_root), + Internal(node) => node.remove_internal_kv(handle_emptied_internal_root), + } + } +} - let parent = edge.into_node(); - if parent.len() == 0 { - // The parent that was just emptied must be the root, - // because nodes on a lower level would not have been - // left with a single child. - handle_emptied_internal_root(); - break; +impl<'a, K: 'a, V: 'a> Handle, K, V, marker::Leaf>, marker::KV> { + fn remove_leaf_kv( + self, + handle_emptied_internal_root: F, + ) -> ((K, V), Handle, K, V, marker::Leaf>, marker::Edge>) { + let (old_kv, mut pos) = self.remove(); + let len = pos.reborrow().into_node().len(); + if len < MIN_LEN { + let idx = pos.idx(); + // We have to temporarily forget the child type, because there is no + // distinct node type for the immediate parents of a leaf. + let new_pos = match pos.into_node().forget_type().choose_parent_kv() { + Ok(Left(left_parent_kv)) => { + debug_assert!(left_parent_kv.right_child_len() == MIN_LEN - 1); + if left_parent_kv.can_merge() { + left_parent_kv.merge(Some(Right(idx))) } else { - cur_node = parent.forget_type(); - at_leaf = false; + debug_assert!(left_parent_kv.left_child_len() > MIN_LEN); + left_parent_kv.steal_left(idx) } } - UnderflowResult::Stole(stole_from_left) => { - // Adjust the tracked position if we stole from a left sibling - if stole_from_left && at_leaf { - // SAFETY: This is safe since we just added an element to our node. - unsafe { - pos.move_next_unchecked(); - } + Ok(Right(right_parent_kv)) => { + debug_assert!(right_parent_kv.left_child_len() == MIN_LEN - 1); + if right_parent_kv.can_merge() { + right_parent_kv.merge(Some(Left(idx))) + } else { + debug_assert!(right_parent_kv.right_child_len() > MIN_LEN); + right_parent_kv.steal_right(idx) } - break; } - } - } + Err(pos) => unsafe { Handle::new_edge(pos, idx) }, + }; + // SAFETY: `new_pos` is the leaf we started from or a sibling. + pos = unsafe { new_pos.cast_to_leaf_unchecked() }; - // If we deleted from an internal node then we need to compensate for - // the earlier swap and adjust the tracked position to point to the - // next element. - if was_internal { - pos = unsafe { unwrap_unchecked(pos.next_kv().ok()).next_leaf_edge() }; + // Only if we merged, the parent (if any) has shrunk, but skipping + // the following step does not pay off in benchmarks. + // + // SAFETY: We won't destroy or rearrange the leaf where `pos` is at + // by handling its parent recursively; at worst we will destroy or + // rearrange the parent through the grandparent, thus change the + // leaf's parent pointer. + if let Ok(parent) = unsafe { pos.reborrow_mut() }.into_node().ascend() { + parent.into_node().handle_shrunk_node_recursively(handle_emptied_internal_root); + } } - (old_kv, pos) } } -enum UnderflowResult<'a, K, V> { - AtRoot, - Merged(Handle, K, V, marker::Internal>, marker::Edge>, bool, usize), - Stole(bool), -} +impl<'a, K: 'a, V: 'a> Handle, K, V, marker::Internal>, marker::KV> { + fn remove_internal_kv( + self, + handle_emptied_internal_root: F, + ) -> ((K, V), Handle, K, V, marker::Leaf>, marker::Edge>) { + // Remove an adjacent KV from its leaf and then put it back in place of + // the element we were asked to remove. Prefer the left adjacent KV, + // for the reasons listed in `choose_parent_kv`. + let left_leaf_kv = self.left_edge().descend().last_leaf_edge().left_kv(); + let left_leaf_kv = unsafe { unwrap_unchecked(left_leaf_kv.ok()) }; + let (left_kv, left_hole) = left_leaf_kv.remove_leaf_kv(handle_emptied_internal_root); -fn handle_underfull_node<'a, K: 'a, V: 'a>( - node: NodeRef, K, V, marker::LeafOrInternal>, -) -> UnderflowResult<'_, K, V> { - let parent = match node.ascend() { - Ok(parent) => parent, - Err(_) => return UnderflowResult::AtRoot, - }; + // The internal node may have been stolen from or merged. Go back right + // to find where the original KV ended up. + let mut internal = unsafe { unwrap_unchecked(left_hole.next_kv().ok()) }; + let old_key = mem::replace(internal.kv_mut().0, left_kv.0); + let old_val = mem::replace(internal.kv_mut().1, left_kv.1); + let pos = internal.next_leaf_edge(); + ((old_key, old_val), pos) + } +} - // Prefer the left KV if it exists. Merging with the left side is faster, - // since merging happens towards the left and `node` has fewer elements. - // Stealing from the left side is faster, since we can pop from the end of - // the KV arrays. - let (is_left, mut handle) = match parent.left_kv() { - Ok(left) => (true, left), - Err(parent) => { - let right = unsafe { unwrap_unchecked(parent.right_kv().ok()) }; - (false, right) +impl<'a, K: 'a, V: 'a> NodeRef, K, V, marker::Internal> { + /// Stocks up a possibly underfull internal node, recursively. + /// Climbs up until it reaches an ancestor that has elements to spare or the root. + fn handle_shrunk_node_recursively(mut self, handle_emptied_internal_root: F) { + loop { + self = match self.len() { + 0 => { + // An empty node must be the root, because length is only + // reduced by one, and non-root underfull nodes are stocked up, + // so non-root nodes never have fewer than MIN_LEN - 1 elements. + debug_assert!(self.ascend().is_err()); + handle_emptied_internal_root(); + return; + } + 1..MIN_LEN => { + if let Some(parent) = self.handle_underfull_node_locally() { + parent + } else { + return; + } + } + _ => return, + } } - }; + } - if handle.can_merge() { - let offset = if is_left { handle.reborrow().left_edge().descend().len() + 1 } else { 0 }; - UnderflowResult::Merged(handle.merge(), is_left, offset) - } else { - if is_left { - handle.steal_left(); - } else { - handle.steal_right(); + /// Stocks up an underfull internal node, possibly at the cost of shrinking + /// its parent instead, which is then returned. + fn handle_underfull_node_locally( + self, + ) -> Option, K, V, marker::Internal>> { + match self.forget_type().choose_parent_kv() { + Ok(Left(left_parent_kv)) => { + debug_assert!(left_parent_kv.right_child_len() == MIN_LEN - 1); + if left_parent_kv.can_merge() { + let pos = left_parent_kv.merge(None); + let parent_edge = unsafe { unwrap_unchecked(pos.into_node().ascend().ok()) }; + Some(parent_edge.into_node()) + } else { + debug_assert!(left_parent_kv.left_child_len() > MIN_LEN); + left_parent_kv.steal_left(0); + None + } + } + Ok(Right(right_parent_kv)) => { + debug_assert!(right_parent_kv.left_child_len() == MIN_LEN - 1); + if right_parent_kv.can_merge() { + let pos = right_parent_kv.merge(None); + let parent_edge = unsafe { unwrap_unchecked(pos.into_node().ascend().ok()) }; + Some(parent_edge.into_node()) + } else { + debug_assert!(right_parent_kv.right_child_len() > MIN_LEN); + right_parent_kv.steal_right(0); + None + } + } + Err(_) => None, } - UnderflowResult::Stole(is_left) } } diff --git a/library/alloc/src/collections/btree/search.rs b/library/alloc/src/collections/btree/search.rs index 1526c0673c6..93de2d829ac 100644 --- a/library/alloc/src/collections/btree/search.rs +++ b/library/alloc/src/collections/btree/search.rs @@ -50,7 +50,7 @@ where { match search_linear(&node, key) { (idx, true) => Found(unsafe { Handle::new_kv(node, idx) }), - (idx, false) => SearchResult::GoDown(unsafe { Handle::new_edge(node, idx) }), + (idx, false) => GoDown(unsafe { Handle::new_edge(node, idx) }), } } @@ -72,7 +72,7 @@ where // is an index -- not a reference. let len = node.len(); for i in 0..len { - let k = unsafe { node.key_at(i) }; + let k = unsafe { node.reborrow().key_at(i) }; match key.cmp(k.borrow()) { Ordering::Greater => {} Ordering::Equal => return (i, true), diff --git a/library/alloc/src/collections/btree/set.rs b/library/alloc/src/collections/btree/set.rs index 684019f8f5f..1a807100653 100644 --- a/library/alloc/src/collections/btree/set.rs +++ b/library/alloc/src/collections/btree/set.rs @@ -798,6 +798,30 @@ impl BTreeSet { Recover::take(&mut self.map, value) } + /// Retains only the elements specified by the predicate. + /// + /// In other words, remove all elements `e` such that `f(&e)` returns `false`. + /// + /// # Examples + /// + /// ``` + /// #![feature(btree_retain)] + /// use std::collections::BTreeSet; + /// + /// let xs = [1, 2, 3, 4, 5, 6]; + /// let mut set: BTreeSet = xs.iter().cloned().collect(); + /// // Keep only the even numbers. + /// set.retain(|&k| k % 2 == 0); + /// assert!(set.iter().eq([2, 4, 6].iter())); + /// ``` + #[unstable(feature = "btree_retain", issue = "79025")] + pub fn retain(&mut self, mut f: F) + where + F: FnMut(&T) -> bool, + { + self.drain_filter(|v| !f(v)); + } + /// Moves all elements from `other` into `Self`, leaving `other` empty. /// /// # Examples diff --git a/library/alloc/src/collections/btree/set/tests.rs b/library/alloc/src/collections/btree/set/tests.rs index 52cde8299e4..4d05bc4ebfa 100644 --- a/library/alloc/src/collections/btree/set/tests.rs +++ b/library/alloc/src/collections/btree/set/tests.rs @@ -1,9 +1,10 @@ use super::super::DeterministicRng; use super::*; use crate::vec::Vec; +use std::cmp::Ordering; use std::iter::FromIterator; use std::panic::{catch_unwind, AssertUnwindSafe}; -use std::sync::atomic::{AtomicU32, Ordering}; +use std::sync::atomic::{AtomicU32, Ordering::SeqCst}; #[test] fn test_clone_eq() { @@ -324,6 +325,17 @@ fn test_is_subset() { assert_eq!(is_subset(&[99, 100], &large), false); } +#[test] +fn test_retain() { + let xs = [1, 2, 3, 4, 5, 6]; + let mut set: BTreeSet = xs.iter().cloned().collect(); + set.retain(|&k| k % 2 == 0); + assert_eq!(set.len(), 3); + assert!(set.contains(&2)); + assert!(set.contains(&4)); + assert!(set.contains(&6)); +} + #[test] fn test_drain_filter() { let mut x: BTreeSet<_> = [1].iter().copied().collect(); @@ -344,7 +356,7 @@ fn test_drain_filter_drop_panic_leak() { struct D(i32); impl Drop for D { fn drop(&mut self) { - if DROPS.fetch_add(1, Ordering::SeqCst) == 1 { + if DROPS.fetch_add(1, SeqCst) == 1 { panic!("panic in `drop`"); } } @@ -357,14 +369,14 @@ fn test_drain_filter_drop_panic_leak() { catch_unwind(move || { drop(set.drain_filter(|d| { - PREDS.fetch_add(1u32 << d.0, Ordering::SeqCst); + PREDS.fetch_add(1u32 << d.0, SeqCst); true })) }) .ok(); - assert_eq!(PREDS.load(Ordering::SeqCst), 0x011); - assert_eq!(DROPS.load(Ordering::SeqCst), 3); + assert_eq!(PREDS.load(SeqCst), 0x011); + assert_eq!(DROPS.load(SeqCst), 3); } #[test] @@ -376,7 +388,7 @@ fn test_drain_filter_pred_panic_leak() { struct D(i32); impl Drop for D { fn drop(&mut self) { - DROPS.fetch_add(1, Ordering::SeqCst); + DROPS.fetch_add(1, SeqCst); } } @@ -387,7 +399,7 @@ fn test_drain_filter_pred_panic_leak() { catch_unwind(AssertUnwindSafe(|| { drop(set.drain_filter(|d| { - PREDS.fetch_add(1u32 << d.0, Ordering::SeqCst); + PREDS.fetch_add(1u32 << d.0, SeqCst); match d.0 { 0 => true, _ => panic!(), @@ -396,8 +408,8 @@ fn test_drain_filter_pred_panic_leak() { })) .ok(); - assert_eq!(PREDS.load(Ordering::SeqCst), 0x011); - assert_eq!(DROPS.load(Ordering::SeqCst), 1); + assert_eq!(PREDS.load(SeqCst), 0x011); + assert_eq!(DROPS.load(SeqCst), 1); assert_eq!(set.len(), 2); assert_eq!(set.first().unwrap().0, 4); assert_eq!(set.last().unwrap().0, 8); @@ -487,8 +499,6 @@ fn test_extend_ref() { #[test] fn test_recovery() { - use std::cmp::Ordering; - #[derive(Debug)] struct Foo(&'static str, i32); diff --git a/library/alloc/src/collections/btree/split.rs b/library/alloc/src/collections/btree/split.rs index 5f00a5a25ab..701f36c37ee 100644 --- a/library/alloc/src/collections/btree/split.rs +++ b/library/alloc/src/collections/btree/split.rs @@ -9,7 +9,7 @@ impl Root { K: Borrow, { debug_assert!(right_root.height() == 0); - debug_assert!(right_root.node_as_ref().len() == 0); + debug_assert!(right_root.len() == 0); let left_root = self; for _ in 0..left_root.height() { @@ -17,8 +17,8 @@ impl Root { } { - let mut left_node = left_root.node_as_mut(); - let mut right_node = right_root.node_as_mut(); + let mut left_node = left_root.borrow_mut(); + let mut right_node = right_root.borrow_mut(); loop { let mut split_edge = match search_node(left_node, key) { @@ -48,7 +48,7 @@ impl Root { /// Removes empty levels on the top, but keeps an empty leaf if the entire tree is empty. fn fix_top(&mut self) { - while self.height() > 0 && self.node_as_ref().len() == 0 { + while self.height() > 0 && self.len() == 0 { self.pop_internal_level(); } } @@ -57,20 +57,20 @@ impl Root { self.fix_top(); { - let mut cur_node = self.node_as_mut(); + let mut cur_node = self.borrow_mut(); while let Internal(node) = cur_node.force() { - let mut last_kv = node.last_kv(); + let mut last_kv = node.last_kv().consider_for_balancing(); if last_kv.can_merge() { - cur_node = last_kv.merge().descend(); + cur_node = last_kv.merge(None).into_node(); } else { - let right_len = last_kv.reborrow().right_edge().descend().len(); + let right_len = last_kv.right_child_len(); // `MIN_LEN + 1` to avoid readjust if merge happens on the next level. if right_len < MIN_LEN + 1 { last_kv.bulk_steal_left(MIN_LEN + 1 - right_len); } - cur_node = last_kv.right_edge().descend(); + cur_node = last_kv.into_right_child(); } } } @@ -83,20 +83,20 @@ impl Root { self.fix_top(); { - let mut cur_node = self.node_as_mut(); + let mut cur_node = self.borrow_mut(); while let Internal(node) = cur_node.force() { - let mut first_kv = node.first_kv(); + let mut first_kv = node.first_kv().consider_for_balancing(); if first_kv.can_merge() { - cur_node = first_kv.merge().descend(); + cur_node = first_kv.merge(None).into_node(); } else { - let left_len = first_kv.reborrow().left_edge().descend().len(); + let left_len = first_kv.left_child_len(); // `MIN_LEN + 1` to avoid readjust if merge happens on the next level. if left_len < MIN_LEN + 1 { first_kv.bulk_steal_right(MIN_LEN + 1 - left_len); } - cur_node = first_kv.left_edge().descend(); + cur_node = first_kv.into_left_child(); } } } diff --git a/library/alloc/src/collections/mod.rs b/library/alloc/src/collections/mod.rs index 6b21e54f66a..8213e904fba 100644 --- a/library/alloc/src/collections/mod.rs +++ b/library/alloc/src/collections/mod.rs @@ -41,7 +41,7 @@ pub use linked_list::LinkedList; #[doc(no_inline)] pub use vec_deque::VecDeque; -use crate::alloc::{Layout, LayoutErr}; +use crate::alloc::{Layout, LayoutError}; use core::fmt::Display; /// The error type for `try_reserve` methods. @@ -71,9 +71,9 @@ pub enum TryReserveError { } #[unstable(feature = "try_reserve", reason = "new API", issue = "48043")] -impl From for TryReserveError { +impl From for TryReserveError { #[inline] - fn from(_: LayoutErr) -> Self { + fn from(_: LayoutError) -> Self { TryReserveError::CapacityOverflow } } diff --git a/library/alloc/src/collections/vec_deque/into_iter.rs b/library/alloc/src/collections/vec_deque/into_iter.rs new file mode 100644 index 00000000000..465b058cd98 --- /dev/null +++ b/library/alloc/src/collections/vec_deque/into_iter.rs @@ -0,0 +1,57 @@ +use core::fmt; +use core::iter::FusedIterator; + +use super::VecDeque; + +/// An owning iterator over the elements of a `VecDeque`. +/// +/// This `struct` is created by the [`into_iter`] method on [`VecDeque`] +/// (provided by the `IntoIterator` trait). See its documentation for more. +/// +/// [`into_iter`]: VecDeque::into_iter +#[derive(Clone)] +#[stable(feature = "rust1", since = "1.0.0")] +pub struct IntoIter { + pub(crate) inner: VecDeque, +} + +#[stable(feature = "collection_debug", since = "1.17.0")] +impl fmt::Debug for IntoIter { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + f.debug_tuple("IntoIter").field(&self.inner).finish() + } +} + +#[stable(feature = "rust1", since = "1.0.0")] +impl Iterator for IntoIter { + type Item = T; + + #[inline] + fn next(&mut self) -> Option { + self.inner.pop_front() + } + + #[inline] + fn size_hint(&self) -> (usize, Option) { + let len = self.inner.len(); + (len, Some(len)) + } +} + +#[stable(feature = "rust1", since = "1.0.0")] +impl DoubleEndedIterator for IntoIter { + #[inline] + fn next_back(&mut self) -> Option { + self.inner.pop_back() + } +} + +#[stable(feature = "rust1", since = "1.0.0")] +impl ExactSizeIterator for IntoIter { + fn is_empty(&self) -> bool { + self.inner.is_empty() + } +} + +#[stable(feature = "fused", since = "1.26.0")] +impl FusedIterator for IntoIter {} diff --git a/library/alloc/src/collections/vec_deque/iter.rs b/library/alloc/src/collections/vec_deque/iter.rs new file mode 100644 index 00000000000..ad31b991cb6 --- /dev/null +++ b/library/alloc/src/collections/vec_deque/iter.rs @@ -0,0 +1,159 @@ +use core::fmt; +use core::iter::FusedIterator; +use core::ops::Try; + +use super::{count, wrap_index, RingSlices}; + +/// An iterator over the elements of a `VecDeque`. +/// +/// This `struct` is created by the [`iter`] method on [`super::VecDeque`]. See its +/// documentation for more. +/// +/// [`iter`]: super::VecDeque::iter +#[stable(feature = "rust1", since = "1.0.0")] +pub struct Iter<'a, T: 'a> { + pub(crate) ring: &'a [T], + pub(crate) tail: usize, + pub(crate) head: usize, +} + +#[stable(feature = "collection_debug", since = "1.17.0")] +impl fmt::Debug for Iter<'_, T> { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + let (front, back) = RingSlices::ring_slices(self.ring, self.head, self.tail); + f.debug_tuple("Iter").field(&front).field(&back).finish() + } +} + +// FIXME(#26925) Remove in favor of `#[derive(Clone)]` +#[stable(feature = "rust1", since = "1.0.0")] +impl Clone for Iter<'_, T> { + fn clone(&self) -> Self { + Iter { ring: self.ring, tail: self.tail, head: self.head } + } +} + +#[stable(feature = "rust1", since = "1.0.0")] +impl<'a, T> Iterator for Iter<'a, T> { + type Item = &'a T; + + #[inline] + fn next(&mut self) -> Option<&'a T> { + if self.tail == self.head { + return None; + } + let tail = self.tail; + self.tail = wrap_index(self.tail.wrapping_add(1), self.ring.len()); + unsafe { Some(self.ring.get_unchecked(tail)) } + } + + #[inline] + fn size_hint(&self) -> (usize, Option) { + let len = count(self.tail, self.head, self.ring.len()); + (len, Some(len)) + } + + fn fold(self, mut accum: Acc, mut f: F) -> Acc + where + F: FnMut(Acc, Self::Item) -> Acc, + { + let (front, back) = RingSlices::ring_slices(self.ring, self.head, self.tail); + accum = front.iter().fold(accum, &mut f); + back.iter().fold(accum, &mut f) + } + + fn try_fold(&mut self, init: B, mut f: F) -> R + where + Self: Sized, + F: FnMut(B, Self::Item) -> R, + R: Try, + { + let (mut iter, final_res); + if self.tail <= self.head { + // single slice self.ring[self.tail..self.head] + iter = self.ring[self.tail..self.head].iter(); + final_res = iter.try_fold(init, &mut f); + } else { + // two slices: self.ring[self.tail..], self.ring[..self.head] + let (front, back) = self.ring.split_at(self.tail); + let mut back_iter = back.iter(); + let res = back_iter.try_fold(init, &mut f); + let len = self.ring.len(); + self.tail = (self.ring.len() - back_iter.len()) & (len - 1); + iter = front[..self.head].iter(); + final_res = iter.try_fold(res?, &mut f); + } + self.tail = self.head - iter.len(); + final_res + } + + fn nth(&mut self, n: usize) -> Option { + if n >= count(self.tail, self.head, self.ring.len()) { + self.tail = self.head; + None + } else { + self.tail = wrap_index(self.tail.wrapping_add(n), self.ring.len()); + self.next() + } + } + + #[inline] + fn last(mut self) -> Option<&'a T> { + self.next_back() + } +} + +#[stable(feature = "rust1", since = "1.0.0")] +impl<'a, T> DoubleEndedIterator for Iter<'a, T> { + #[inline] + fn next_back(&mut self) -> Option<&'a T> { + if self.tail == self.head { + return None; + } + self.head = wrap_index(self.head.wrapping_sub(1), self.ring.len()); + unsafe { Some(self.ring.get_unchecked(self.head)) } + } + + fn rfold(self, mut accum: Acc, mut f: F) -> Acc + where + F: FnMut(Acc, Self::Item) -> Acc, + { + let (front, back) = RingSlices::ring_slices(self.ring, self.head, self.tail); + accum = back.iter().rfold(accum, &mut f); + front.iter().rfold(accum, &mut f) + } + + fn try_rfold(&mut self, init: B, mut f: F) -> R + where + Self: Sized, + F: FnMut(B, Self::Item) -> R, + R: Try, + { + let (mut iter, final_res); + if self.tail <= self.head { + // single slice self.ring[self.tail..self.head] + iter = self.ring[self.tail..self.head].iter(); + final_res = iter.try_rfold(init, &mut f); + } else { + // two slices: self.ring[self.tail..], self.ring[..self.head] + let (front, back) = self.ring.split_at(self.tail); + let mut front_iter = front[..self.head].iter(); + let res = front_iter.try_rfold(init, &mut f); + self.head = front_iter.len(); + iter = back.iter(); + final_res = iter.try_rfold(res?, &mut f); + } + self.head = self.tail + iter.len(); + final_res + } +} + +#[stable(feature = "rust1", since = "1.0.0")] +impl ExactSizeIterator for Iter<'_, T> { + fn is_empty(&self) -> bool { + self.head == self.tail + } +} + +#[stable(feature = "fused", since = "1.26.0")] +impl FusedIterator for Iter<'_, T> {} diff --git a/library/alloc/src/collections/vec_deque/iter_mut.rs b/library/alloc/src/collections/vec_deque/iter_mut.rs new file mode 100644 index 00000000000..3d0c3094e26 --- /dev/null +++ b/library/alloc/src/collections/vec_deque/iter_mut.rs @@ -0,0 +1,128 @@ +use core::fmt; +use core::iter::FusedIterator; +use core::marker::PhantomData; + +use super::{count, wrap_index, RingSlices}; + +/// A mutable iterator over the elements of a `VecDeque`. +/// +/// This `struct` is created by the [`iter_mut`] method on [`super::VecDeque`]. See its +/// documentation for more. +/// +/// [`iter_mut`]: super::VecDeque::iter_mut +#[stable(feature = "rust1", since = "1.0.0")] +pub struct IterMut<'a, T: 'a> { + // Internal safety invariant: the entire slice is dereferencable. + pub(crate) ring: *mut [T], + pub(crate) tail: usize, + pub(crate) head: usize, + pub(crate) phantom: PhantomData<&'a mut [T]>, +} + +// SAFETY: we do nothing thread-local and there is no interior mutability, +// so the usual structural `Send`/`Sync` apply. +#[stable(feature = "rust1", since = "1.0.0")] +unsafe impl Send for IterMut<'_, T> {} +#[stable(feature = "rust1", since = "1.0.0")] +unsafe impl Sync for IterMut<'_, T> {} + +#[stable(feature = "collection_debug", since = "1.17.0")] +impl fmt::Debug for IterMut<'_, T> { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + let (front, back) = RingSlices::ring_slices(self.ring, self.head, self.tail); + // SAFETY: these are the elements we have not handed out yet, so aliasing is fine. + // The `IterMut` invariant also ensures everything is dereferencable. + let (front, back) = unsafe { (&*front, &*back) }; + f.debug_tuple("IterMut").field(&front).field(&back).finish() + } +} + +#[stable(feature = "rust1", since = "1.0.0")] +impl<'a, T> Iterator for IterMut<'a, T> { + type Item = &'a mut T; + + #[inline] + fn next(&mut self) -> Option<&'a mut T> { + if self.tail == self.head { + return None; + } + let tail = self.tail; + self.tail = wrap_index(self.tail.wrapping_add(1), self.ring.len()); + + unsafe { + let elem = self.ring.get_unchecked_mut(tail); + Some(&mut *elem) + } + } + + #[inline] + fn size_hint(&self) -> (usize, Option) { + let len = count(self.tail, self.head, self.ring.len()); + (len, Some(len)) + } + + fn fold(self, mut accum: Acc, mut f: F) -> Acc + where + F: FnMut(Acc, Self::Item) -> Acc, + { + let (front, back) = RingSlices::ring_slices(self.ring, self.head, self.tail); + // SAFETY: these are the elements we have not handed out yet, so aliasing is fine. + // The `IterMut` invariant also ensures everything is dereferencable. + let (front, back) = unsafe { (&mut *front, &mut *back) }; + accum = front.iter_mut().fold(accum, &mut f); + back.iter_mut().fold(accum, &mut f) + } + + fn nth(&mut self, n: usize) -> Option { + if n >= count(self.tail, self.head, self.ring.len()) { + self.tail = self.head; + None + } else { + self.tail = wrap_index(self.tail.wrapping_add(n), self.ring.len()); + self.next() + } + } + + #[inline] + fn last(mut self) -> Option<&'a mut T> { + self.next_back() + } +} + +#[stable(feature = "rust1", since = "1.0.0")] +impl<'a, T> DoubleEndedIterator for IterMut<'a, T> { + #[inline] + fn next_back(&mut self) -> Option<&'a mut T> { + if self.tail == self.head { + return None; + } + self.head = wrap_index(self.head.wrapping_sub(1), self.ring.len()); + + unsafe { + let elem = self.ring.get_unchecked_mut(self.head); + Some(&mut *elem) + } + } + + fn rfold(self, mut accum: Acc, mut f: F) -> Acc + where + F: FnMut(Acc, Self::Item) -> Acc, + { + let (front, back) = RingSlices::ring_slices(self.ring, self.head, self.tail); + // SAFETY: these are the elements we have not handed out yet, so aliasing is fine. + // The `IterMut` invariant also ensures everything is dereferencable. + let (front, back) = unsafe { (&mut *front, &mut *back) }; + accum = back.iter_mut().rfold(accum, &mut f); + front.iter_mut().rfold(accum, &mut f) + } +} + +#[stable(feature = "rust1", since = "1.0.0")] +impl ExactSizeIterator for IterMut<'_, T> { + fn is_empty(&self) -> bool { + self.head == self.tail + } +} + +#[stable(feature = "fused", since = "1.26.0")] +impl FusedIterator for IterMut<'_, T> {} diff --git a/library/alloc/src/collections/vec_deque/macros.rs b/library/alloc/src/collections/vec_deque/macros.rs new file mode 100644 index 00000000000..0d59d312cf4 --- /dev/null +++ b/library/alloc/src/collections/vec_deque/macros.rs @@ -0,0 +1,19 @@ +macro_rules! __impl_slice_eq1 { + ([$($vars:tt)*] $lhs:ty, $rhs:ty, $($constraints:tt)*) => { + #[stable(feature = "vec_deque_partial_eq_slice", since = "1.17.0")] + impl PartialEq<$rhs> for $lhs + where + A: PartialEq, + $($constraints)* + { + fn eq(&self, other: &$rhs) -> bool { + if self.len() != other.len() { + return false; + } + let (sa, sb) = self.as_slices(); + let (oa, ob) = other[..].split_at(sa.len()); + sa == oa && sb == ob + } + } + } +} diff --git a/library/alloc/src/collections/vec_deque.rs b/library/alloc/src/collections/vec_deque/mod.rs similarity index 86% rename from library/alloc/src/collections/vec_deque.rs rename to library/alloc/src/collections/vec_deque/mod.rs index 22b02a4f849..85c809e0d18 100644 --- a/library/alloc/src/collections/vec_deque.rs +++ b/library/alloc/src/collections/vec_deque/mod.rs @@ -7,16 +7,13 @@ #![stable(feature = "rust1", since = "1.0.0")] -// ignore-tidy-filelength - -use core::array; use core::cmp::{self, Ordering}; use core::fmt; use core::hash::{Hash, Hasher}; -use core::iter::{repeat_with, FromIterator, FusedIterator}; +use core::iter::{repeat_with, FromIterator}; use core::marker::PhantomData; -use core::mem::{self, replace, ManuallyDrop}; -use core::ops::{Index, IndexMut, Range, RangeBounds, Try}; +use core::mem::{self, ManuallyDrop}; +use core::ops::{Index, IndexMut, Range, RangeBounds}; use core::ptr::{self, NonNull}; use core::slice; @@ -24,11 +21,37 @@ use crate::collections::TryReserveError; use crate::raw_vec::RawVec; use crate::vec::Vec; +#[macro_use] +mod macros; + #[stable(feature = "drain", since = "1.6.0")] pub use self::drain::Drain; mod drain; +#[stable(feature = "rust1", since = "1.0.0")] +pub use self::iter_mut::IterMut; + +mod iter_mut; + +#[stable(feature = "rust1", since = "1.0.0")] +pub use self::into_iter::IntoIter; + +mod into_iter; + +#[stable(feature = "rust1", since = "1.0.0")] +pub use self::iter::Iter; + +mod iter; + +use self::pair_slices::PairSlices; + +mod pair_slices; + +use self::ring_slices::RingSlices; + +mod ring_slices; + #[cfg(test)] mod tests; @@ -68,67 +91,6 @@ pub struct VecDeque { buf: RawVec, } -/// PairSlices pairs up equal length slice parts of two deques -/// -/// For example, given deques "A" and "B" with the following division into slices: -/// -/// A: [0 1 2] [3 4 5] -/// B: [a b] [c d e] -/// -/// It produces the following sequence of matching slices: -/// -/// ([0 1], [a b]) -/// (\[2\], \[c\]) -/// ([3 4], [d e]) -/// -/// and the uneven remainder of either A or B is skipped. -struct PairSlices<'a, 'b, T> { - a0: &'a mut [T], - a1: &'a mut [T], - b0: &'b [T], - b1: &'b [T], -} - -impl<'a, 'b, T> PairSlices<'a, 'b, T> { - fn from(to: &'a mut VecDeque, from: &'b VecDeque) -> Self { - let (a0, a1) = to.as_mut_slices(); - let (b0, b1) = from.as_slices(); - PairSlices { a0, a1, b0, b1 } - } - - fn has_remainder(&self) -> bool { - !self.b0.is_empty() - } - - fn remainder(self) -> impl Iterator { - array::IntoIter::new([self.b0, self.b1]) - } -} - -impl<'a, 'b, T> Iterator for PairSlices<'a, 'b, T> { - type Item = (&'a mut [T], &'b [T]); - fn next(&mut self) -> Option { - // Get next part length - let part = cmp::min(self.a0.len(), self.b0.len()); - if part == 0 { - return None; - } - let (p0, p1) = replace(&mut self.a0, &mut []).split_at_mut(part); - let (q0, q1) = self.b0.split_at(part); - - // Move a1 into a0, if it's empty (and b1, b0 the same way). - self.a0 = p1; - self.b0 = q1; - if self.a0.is_empty() { - self.a0 = replace(&mut self.a1, &mut []); - } - if self.b0.is_empty() { - self.b0 = replace(&mut self.b1, &[]); - } - Some((p0, q0)) - } -} - #[stable(feature = "rust1", since = "1.0.0")] impl Clone for VecDeque { fn clone(&self) -> VecDeque { @@ -2000,7 +1962,7 @@ impl VecDeque { /// ``` /// use std::collections::VecDeque; /// - /// let mut buf: VecDeque<_> = vec![1,2,3].into_iter().collect(); + /// let mut buf: VecDeque<_> = vec![1, 2, 3].into_iter().collect(); /// let buf2 = buf.split_off(1); /// assert_eq!(buf, [1]); /// assert_eq!(buf2, [2, 3]); @@ -2552,10 +2514,10 @@ impl VecDeque { /// (3, 1), (1, 2), (2, 3), (4, 5), (5, 8), (3, 13), /// (1, 21), (2, 34), (4, 55)].into(); /// - /// assert_eq!(deque.binary_search_by_key(&13, |&(a,b)| b), Ok(9)); - /// assert_eq!(deque.binary_search_by_key(&4, |&(a,b)| b), Err(7)); - /// assert_eq!(deque.binary_search_by_key(&100, |&(a,b)| b), Err(13)); - /// let r = deque.binary_search_by_key(&1, |&(a,b)| b); + /// assert_eq!(deque.binary_search_by_key(&13, |&(a, b)| b), Ok(9)); + /// assert_eq!(deque.binary_search_by_key(&4, |&(a, b)| b), Err(7)); + /// assert_eq!(deque.binary_search_by_key(&100, |&(a, b)| b), Err(13)); + /// let r = deque.binary_search_by_key(&1, |&(a, b)| b); /// assert!(matches!(r, Ok(1..=4))); /// ``` #[unstable(feature = "vecdeque_binary_search", issue = "78021")] @@ -2605,61 +2567,6 @@ fn wrap_index(index: usize, size: usize) -> usize { index & (size - 1) } -/// Returns the two slices that cover the `VecDeque`'s valid range -trait RingSlices: Sized { - fn slice(self, from: usize, to: usize) -> Self; - fn split_at(self, i: usize) -> (Self, Self); - - fn ring_slices(buf: Self, head: usize, tail: usize) -> (Self, Self) { - let contiguous = tail <= head; - if contiguous { - let (empty, buf) = buf.split_at(0); - (buf.slice(tail, head), empty) - } else { - let (mid, right) = buf.split_at(tail); - let (left, _) = mid.split_at(head); - (right, left) - } - } -} - -impl RingSlices for &[T] { - fn slice(self, from: usize, to: usize) -> Self { - &self[from..to] - } - fn split_at(self, i: usize) -> (Self, Self) { - (*self).split_at(i) - } -} - -impl RingSlices for &mut [T] { - fn slice(self, from: usize, to: usize) -> Self { - &mut self[from..to] - } - fn split_at(self, i: usize) -> (Self, Self) { - (*self).split_at_mut(i) - } -} - -impl RingSlices for *mut [T] { - fn slice(self, from: usize, to: usize) -> Self { - assert!(from <= to && to < self.len()); - // Not using `get_unchecked_mut` to keep this a safe operation. - let len = to - from; - ptr::slice_from_raw_parts_mut(self.as_mut_ptr().wrapping_add(from), len) - } - - fn split_at(self, mid: usize) -> (Self, Self) { - let len = self.len(); - let ptr = self.as_mut_ptr(); - assert!(mid <= len); - ( - ptr::slice_from_raw_parts_mut(ptr, mid), - ptr::slice_from_raw_parts_mut(ptr.wrapping_add(mid), len - mid), - ) - } -} - /// Calculate the number of elements left to be read in the buffer #[inline] fn count(tail: usize, head: usize, size: usize) -> usize { @@ -2667,336 +2574,6 @@ fn count(tail: usize, head: usize, size: usize) -> usize { (head.wrapping_sub(tail)) & (size - 1) } -/// An iterator over the elements of a `VecDeque`. -/// -/// This `struct` is created by the [`iter`] method on [`VecDeque`]. See its -/// documentation for more. -/// -/// [`iter`]: VecDeque::iter -#[stable(feature = "rust1", since = "1.0.0")] -pub struct Iter<'a, T: 'a> { - ring: &'a [T], - tail: usize, - head: usize, -} - -#[stable(feature = "collection_debug", since = "1.17.0")] -impl fmt::Debug for Iter<'_, T> { - fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { - let (front, back) = RingSlices::ring_slices(self.ring, self.head, self.tail); - f.debug_tuple("Iter").field(&front).field(&back).finish() - } -} - -// FIXME(#26925) Remove in favor of `#[derive(Clone)]` -#[stable(feature = "rust1", since = "1.0.0")] -impl Clone for Iter<'_, T> { - fn clone(&self) -> Self { - Iter { ring: self.ring, tail: self.tail, head: self.head } - } -} - -#[stable(feature = "rust1", since = "1.0.0")] -impl<'a, T> Iterator for Iter<'a, T> { - type Item = &'a T; - - #[inline] - fn next(&mut self) -> Option<&'a T> { - if self.tail == self.head { - return None; - } - let tail = self.tail; - self.tail = wrap_index(self.tail.wrapping_add(1), self.ring.len()); - unsafe { Some(self.ring.get_unchecked(tail)) } - } - - #[inline] - fn size_hint(&self) -> (usize, Option) { - let len = count(self.tail, self.head, self.ring.len()); - (len, Some(len)) - } - - fn fold(self, mut accum: Acc, mut f: F) -> Acc - where - F: FnMut(Acc, Self::Item) -> Acc, - { - let (front, back) = RingSlices::ring_slices(self.ring, self.head, self.tail); - accum = front.iter().fold(accum, &mut f); - back.iter().fold(accum, &mut f) - } - - fn try_fold(&mut self, init: B, mut f: F) -> R - where - Self: Sized, - F: FnMut(B, Self::Item) -> R, - R: Try, - { - let (mut iter, final_res); - if self.tail <= self.head { - // single slice self.ring[self.tail..self.head] - iter = self.ring[self.tail..self.head].iter(); - final_res = iter.try_fold(init, &mut f); - } else { - // two slices: self.ring[self.tail..], self.ring[..self.head] - let (front, back) = self.ring.split_at(self.tail); - let mut back_iter = back.iter(); - let res = back_iter.try_fold(init, &mut f); - let len = self.ring.len(); - self.tail = (self.ring.len() - back_iter.len()) & (len - 1); - iter = front[..self.head].iter(); - final_res = iter.try_fold(res?, &mut f); - } - self.tail = self.head - iter.len(); - final_res - } - - fn nth(&mut self, n: usize) -> Option { - if n >= count(self.tail, self.head, self.ring.len()) { - self.tail = self.head; - None - } else { - self.tail = wrap_index(self.tail.wrapping_add(n), self.ring.len()); - self.next() - } - } - - #[inline] - fn last(mut self) -> Option<&'a T> { - self.next_back() - } -} - -#[stable(feature = "rust1", since = "1.0.0")] -impl<'a, T> DoubleEndedIterator for Iter<'a, T> { - #[inline] - fn next_back(&mut self) -> Option<&'a T> { - if self.tail == self.head { - return None; - } - self.head = wrap_index(self.head.wrapping_sub(1), self.ring.len()); - unsafe { Some(self.ring.get_unchecked(self.head)) } - } - - fn rfold(self, mut accum: Acc, mut f: F) -> Acc - where - F: FnMut(Acc, Self::Item) -> Acc, - { - let (front, back) = RingSlices::ring_slices(self.ring, self.head, self.tail); - accum = back.iter().rfold(accum, &mut f); - front.iter().rfold(accum, &mut f) - } - - fn try_rfold(&mut self, init: B, mut f: F) -> R - where - Self: Sized, - F: FnMut(B, Self::Item) -> R, - R: Try, - { - let (mut iter, final_res); - if self.tail <= self.head { - // single slice self.ring[self.tail..self.head] - iter = self.ring[self.tail..self.head].iter(); - final_res = iter.try_rfold(init, &mut f); - } else { - // two slices: self.ring[self.tail..], self.ring[..self.head] - let (front, back) = self.ring.split_at(self.tail); - let mut front_iter = front[..self.head].iter(); - let res = front_iter.try_rfold(init, &mut f); - self.head = front_iter.len(); - iter = back.iter(); - final_res = iter.try_rfold(res?, &mut f); - } - self.head = self.tail + iter.len(); - final_res - } -} - -#[stable(feature = "rust1", since = "1.0.0")] -impl ExactSizeIterator for Iter<'_, T> { - fn is_empty(&self) -> bool { - self.head == self.tail - } -} - -#[stable(feature = "fused", since = "1.26.0")] -impl FusedIterator for Iter<'_, T> {} - -/// A mutable iterator over the elements of a `VecDeque`. -/// -/// This `struct` is created by the [`iter_mut`] method on [`VecDeque`]. See its -/// documentation for more. -/// -/// [`iter_mut`]: VecDeque::iter_mut -#[stable(feature = "rust1", since = "1.0.0")] -pub struct IterMut<'a, T: 'a> { - // Internal safety invariant: the entire slice is dereferencable. - ring: *mut [T], - tail: usize, - head: usize, - phantom: PhantomData<&'a mut [T]>, -} - -// SAFETY: we do nothing thread-local and there is no interior mutability, -// so the usual structural `Send`/`Sync` apply. -#[stable(feature = "rust1", since = "1.0.0")] -unsafe impl Send for IterMut<'_, T> {} -#[stable(feature = "rust1", since = "1.0.0")] -unsafe impl Sync for IterMut<'_, T> {} - -#[stable(feature = "collection_debug", since = "1.17.0")] -impl fmt::Debug for IterMut<'_, T> { - fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { - let (front, back) = RingSlices::ring_slices(self.ring, self.head, self.tail); - // SAFETY: these are the elements we have not handed out yet, so aliasing is fine. - // The `IterMut` invariant also ensures everything is dereferencable. - let (front, back) = unsafe { (&*front, &*back) }; - f.debug_tuple("IterMut").field(&front).field(&back).finish() - } -} - -#[stable(feature = "rust1", since = "1.0.0")] -impl<'a, T> Iterator for IterMut<'a, T> { - type Item = &'a mut T; - - #[inline] - fn next(&mut self) -> Option<&'a mut T> { - if self.tail == self.head { - return None; - } - let tail = self.tail; - self.tail = wrap_index(self.tail.wrapping_add(1), self.ring.len()); - - unsafe { - let elem = self.ring.get_unchecked_mut(tail); - Some(&mut *elem) - } - } - - #[inline] - fn size_hint(&self) -> (usize, Option) { - let len = count(self.tail, self.head, self.ring.len()); - (len, Some(len)) - } - - fn fold(self, mut accum: Acc, mut f: F) -> Acc - where - F: FnMut(Acc, Self::Item) -> Acc, - { - let (front, back) = RingSlices::ring_slices(self.ring, self.head, self.tail); - // SAFETY: these are the elements we have not handed out yet, so aliasing is fine. - // The `IterMut` invariant also ensures everything is dereferencable. - let (front, back) = unsafe { (&mut *front, &mut *back) }; - accum = front.iter_mut().fold(accum, &mut f); - back.iter_mut().fold(accum, &mut f) - } - - fn nth(&mut self, n: usize) -> Option { - if n >= count(self.tail, self.head, self.ring.len()) { - self.tail = self.head; - None - } else { - self.tail = wrap_index(self.tail.wrapping_add(n), self.ring.len()); - self.next() - } - } - - #[inline] - fn last(mut self) -> Option<&'a mut T> { - self.next_back() - } -} - -#[stable(feature = "rust1", since = "1.0.0")] -impl<'a, T> DoubleEndedIterator for IterMut<'a, T> { - #[inline] - fn next_back(&mut self) -> Option<&'a mut T> { - if self.tail == self.head { - return None; - } - self.head = wrap_index(self.head.wrapping_sub(1), self.ring.len()); - - unsafe { - let elem = self.ring.get_unchecked_mut(self.head); - Some(&mut *elem) - } - } - - fn rfold(self, mut accum: Acc, mut f: F) -> Acc - where - F: FnMut(Acc, Self::Item) -> Acc, - { - let (front, back) = RingSlices::ring_slices(self.ring, self.head, self.tail); - // SAFETY: these are the elements we have not handed out yet, so aliasing is fine. - // The `IterMut` invariant also ensures everything is dereferencable. - let (front, back) = unsafe { (&mut *front, &mut *back) }; - accum = back.iter_mut().rfold(accum, &mut f); - front.iter_mut().rfold(accum, &mut f) - } -} - -#[stable(feature = "rust1", since = "1.0.0")] -impl ExactSizeIterator for IterMut<'_, T> { - fn is_empty(&self) -> bool { - self.head == self.tail - } -} - -#[stable(feature = "fused", since = "1.26.0")] -impl FusedIterator for IterMut<'_, T> {} - -/// An owning iterator over the elements of a `VecDeque`. -/// -/// This `struct` is created by the [`into_iter`] method on [`VecDeque`] -/// (provided by the `IntoIterator` trait). See its documentation for more. -/// -/// [`into_iter`]: VecDeque::into_iter -#[derive(Clone)] -#[stable(feature = "rust1", since = "1.0.0")] -pub struct IntoIter { - inner: VecDeque, -} - -#[stable(feature = "collection_debug", since = "1.17.0")] -impl fmt::Debug for IntoIter { - fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { - f.debug_tuple("IntoIter").field(&self.inner).finish() - } -} - -#[stable(feature = "rust1", since = "1.0.0")] -impl Iterator for IntoIter { - type Item = T; - - #[inline] - fn next(&mut self) -> Option { - self.inner.pop_front() - } - - #[inline] - fn size_hint(&self) -> (usize, Option) { - let len = self.inner.len(); - (len, Some(len)) - } -} - -#[stable(feature = "rust1", since = "1.0.0")] -impl DoubleEndedIterator for IntoIter { - #[inline] - fn next_back(&mut self) -> Option { - self.inner.pop_back() - } -} - -#[stable(feature = "rust1", since = "1.0.0")] -impl ExactSizeIterator for IntoIter { - fn is_empty(&self) -> bool { - self.inner.is_empty() - } -} - -#[stable(feature = "fused", since = "1.26.0")] -impl FusedIterator for IntoIter {} - #[stable(feature = "rust1", since = "1.0.0")] impl PartialEq for VecDeque { fn eq(&self, other: &VecDeque) -> bool { @@ -3039,26 +2616,6 @@ impl PartialEq for VecDeque { #[stable(feature = "rust1", since = "1.0.0")] impl Eq for VecDeque {} -macro_rules! __impl_slice_eq1 { - ([$($vars:tt)*] $lhs:ty, $rhs:ty, $($constraints:tt)*) => { - #[stable(feature = "vec_deque_partial_eq_slice", since = "1.17.0")] - impl PartialEq<$rhs> for $lhs - where - A: PartialEq, - $($constraints)* - { - fn eq(&self, other: &$rhs) -> bool { - if self.len() != other.len() { - return false; - } - let (sa, sb) = self.as_slices(); - let (oa, ob) = other[..].split_at(sa.len()); - sa == oa && sb == ob - } - } - } -} - __impl_slice_eq1! { [] VecDeque, Vec, } __impl_slice_eq1! { [] VecDeque, &[B], } __impl_slice_eq1! { [] VecDeque, &mut [B], } diff --git a/library/alloc/src/collections/vec_deque/pair_slices.rs b/library/alloc/src/collections/vec_deque/pair_slices.rs new file mode 100644 index 00000000000..812765d0b0d --- /dev/null +++ b/library/alloc/src/collections/vec_deque/pair_slices.rs @@ -0,0 +1,66 @@ +use core::array; +use core::cmp::{self}; +use core::mem::replace; + +use super::VecDeque; + +/// PairSlices pairs up equal length slice parts of two deques +/// +/// For example, given deques "A" and "B" with the following division into slices: +/// +/// A: [0 1 2] [3 4 5] +/// B: [a b] [c d e] +/// +/// It produces the following sequence of matching slices: +/// +/// ([0 1], [a b]) +/// (\[2\], \[c\]) +/// ([3 4], [d e]) +/// +/// and the uneven remainder of either A or B is skipped. +pub struct PairSlices<'a, 'b, T> { + pub(crate) a0: &'a mut [T], + pub(crate) a1: &'a mut [T], + pub(crate) b0: &'b [T], + pub(crate) b1: &'b [T], +} + +impl<'a, 'b, T> PairSlices<'a, 'b, T> { + pub fn from(to: &'a mut VecDeque, from: &'b VecDeque) -> Self { + let (a0, a1) = to.as_mut_slices(); + let (b0, b1) = from.as_slices(); + PairSlices { a0, a1, b0, b1 } + } + + pub fn has_remainder(&self) -> bool { + !self.b0.is_empty() + } + + pub fn remainder(self) -> impl Iterator { + array::IntoIter::new([self.b0, self.b1]) + } +} + +impl<'a, 'b, T> Iterator for PairSlices<'a, 'b, T> { + type Item = (&'a mut [T], &'b [T]); + fn next(&mut self) -> Option { + // Get next part length + let part = cmp::min(self.a0.len(), self.b0.len()); + if part == 0 { + return None; + } + let (p0, p1) = replace(&mut self.a0, &mut []).split_at_mut(part); + let (q0, q1) = self.b0.split_at(part); + + // Move a1 into a0, if it's empty (and b1, b0 the same way). + self.a0 = p1; + self.b0 = q1; + if self.a0.is_empty() { + self.a0 = replace(&mut self.a1, &mut []); + } + if self.b0.is_empty() { + self.b0 = replace(&mut self.b1, &[]); + } + Some((p0, q0)) + } +} diff --git a/library/alloc/src/collections/vec_deque/ring_slices.rs b/library/alloc/src/collections/vec_deque/ring_slices.rs new file mode 100644 index 00000000000..dd0fa7d6074 --- /dev/null +++ b/library/alloc/src/collections/vec_deque/ring_slices.rs @@ -0,0 +1,56 @@ +use core::ptr::{self}; + +/// Returns the two slices that cover the `VecDeque`'s valid range +pub trait RingSlices: Sized { + fn slice(self, from: usize, to: usize) -> Self; + fn split_at(self, i: usize) -> (Self, Self); + + fn ring_slices(buf: Self, head: usize, tail: usize) -> (Self, Self) { + let contiguous = tail <= head; + if contiguous { + let (empty, buf) = buf.split_at(0); + (buf.slice(tail, head), empty) + } else { + let (mid, right) = buf.split_at(tail); + let (left, _) = mid.split_at(head); + (right, left) + } + } +} + +impl RingSlices for &[T] { + fn slice(self, from: usize, to: usize) -> Self { + &self[from..to] + } + fn split_at(self, i: usize) -> (Self, Self) { + (*self).split_at(i) + } +} + +impl RingSlices for &mut [T] { + fn slice(self, from: usize, to: usize) -> Self { + &mut self[from..to] + } + fn split_at(self, i: usize) -> (Self, Self) { + (*self).split_at_mut(i) + } +} + +impl RingSlices for *mut [T] { + fn slice(self, from: usize, to: usize) -> Self { + assert!(from <= to && to < self.len()); + // Not using `get_unchecked_mut` to keep this a safe operation. + let len = to - from; + ptr::slice_from_raw_parts_mut(self.as_mut_ptr().wrapping_add(from), len) + } + + fn split_at(self, mid: usize) -> (Self, Self) { + let len = self.len(); + let ptr = self.as_mut_ptr(); + assert!(mid <= len); + ( + ptr::slice_from_raw_parts_mut(ptr, mid), + ptr::slice_from_raw_parts_mut(ptr.wrapping_add(mid), len - mid), + ) + } +} diff --git a/library/alloc/src/lib.rs b/library/alloc/src/lib.rs index 15092d463ec..dc5db49ee98 100644 --- a/library/alloc/src/lib.rs +++ b/library/alloc/src/lib.rs @@ -72,12 +72,13 @@ #![allow(explicit_outlives_requirements)] #![allow(incomplete_features)] #![deny(unsafe_op_in_unsafe_fn)] -#![cfg_attr(not(bootstrap), feature(rustc_allow_const_fn_unstable))] +#![feature(rustc_allow_const_fn_unstable)] #![cfg_attr(not(test), feature(generator_trait))] #![cfg_attr(test, feature(test))] #![cfg_attr(test, feature(new_uninit))] #![feature(allocator_api)] #![feature(array_chunks)] +#![feature(array_methods)] #![feature(array_value_iter)] #![feature(array_windows)] #![feature(allow_internal_unstable)] @@ -129,8 +130,7 @@ #![feature(unicode_internals)] #![feature(unsafe_block_in_unsafe_fn)] #![feature(unsize)] -#![cfg_attr(not(bootstrap), feature(unsized_fn_params))] -#![cfg_attr(bootstrap, feature(unsized_locals))] +#![feature(unsized_fn_params)] #![feature(allocator_internals)] #![feature(slice_partition_dedup)] #![feature(maybe_uninit_extra, maybe_uninit_slice, maybe_uninit_uninit_array)] diff --git a/library/alloc/src/raw_vec.rs b/library/alloc/src/raw_vec.rs index a4240308bb3..edab576bea8 100644 --- a/library/alloc/src/raw_vec.rs +++ b/library/alloc/src/raw_vec.rs @@ -1,7 +1,7 @@ #![unstable(feature = "raw_vec_internals", reason = "implementation detail", issue = "none")] #![doc(hidden)] -use core::alloc::LayoutErr; +use core::alloc::LayoutError; use core::cmp; use core::intrinsics; use core::mem::{self, ManuallyDrop, MaybeUninit}; @@ -116,8 +116,7 @@ impl RawVec { impl RawVec { /// Like `new`, but parameterized over the choice of allocator for /// the returned `RawVec`. - #[cfg_attr(not(bootstrap), rustc_allow_const_fn_unstable(const_fn))] - #[cfg_attr(bootstrap, allow_internal_unstable(const_fn))] + #[rustc_allow_const_fn_unstable(const_fn)] pub const fn new_in(alloc: A) -> Self { // `cap: 0` means "unallocated". zero-sized types are ignored. Self { ptr: Unique::dangling(), cap: 0, alloc } @@ -233,15 +232,10 @@ impl RawVec { } /// Returns a shared reference to the allocator backing this `RawVec`. - pub fn alloc(&self) -> &A { + pub fn alloc_ref(&self) -> &A { &self.alloc } - /// Returns a mutable reference to the allocator backing this `RawVec`. - pub fn alloc_mut(&mut self) -> &mut A { - &mut self.alloc - } - fn current_memory(&self) -> Option<(NonNull, Layout)> { if mem::size_of::() == 0 || self.cap == 0 { None @@ -472,7 +466,7 @@ impl RawVec { // significant, because the number of different `A` types seen in practice is // much smaller than the number of `T` types.) fn finish_grow( - new_layout: Result, + new_layout: Result, current_memory: Option<(NonNull, Layout)>, alloc: &mut A, ) -> Result, TryReserveError> diff --git a/library/alloc/src/slice.rs b/library/alloc/src/slice.rs index 3db66964941..41ebb1cf654 100644 --- a/library/alloc/src/slice.rs +++ b/library/alloc/src/slice.rs @@ -87,6 +87,7 @@ use core::cmp::Ordering::{self, Less}; use core::mem::{self, size_of}; use core::ptr; +use crate::alloc::{AllocRef, Global}; use crate::borrow::ToOwned; use crate::boxed::Box; use crate::vec::Vec; @@ -137,26 +138,28 @@ pub use hack::to_vec; // `core::slice::SliceExt` - we need to supply these functions for the // `test_permutations` test mod hack { + use core::alloc::AllocRef; + use crate::boxed::Box; use crate::vec::Vec; // We shouldn't add inline attribute to this since this is used in // `vec!` macro mostly and causes perf regression. See #71204 for // discussion and perf results. - pub fn into_vec(b: Box<[T]>) -> Vec { + pub fn into_vec(b: Box<[T], A>) -> Vec { unsafe { let len = b.len(); - let b = Box::into_raw(b); - Vec::from_raw_parts(b as *mut T, len, len) + let (b, alloc) = Box::into_raw_with_alloc(b); + Vec::from_raw_parts_in(b as *mut T, len, len, alloc) } } #[inline] - pub fn to_vec(s: &[T]) -> Vec + pub fn to_vec(s: &[T], alloc: A) -> Vec where T: Clone, { - let mut vec = Vec::with_capacity(s.len()); + let mut vec = Vec::with_capacity_in(s.len(), alloc); vec.extend_from_slice(s); vec } @@ -388,11 +391,33 @@ impl [T] { #[stable(feature = "rust1", since = "1.0.0")] #[inline] pub fn to_vec(&self) -> Vec + where + T: Clone, + { + self.to_vec_in(Global) + } + + /// Copies `self` into a new `Vec` with an allocator. + /// + /// # Examples + /// + /// ``` + /// #![feature(allocator_api)] + /// + /// use std::alloc::System; + /// + /// let s = [10, 40, 30]; + /// let x = s.to_vec_in(System); + /// // Here, `s` and `x` can be modified independently. + /// ``` + #[inline] + #[unstable(feature = "allocator_api", issue = "32838")] + pub fn to_vec_in(&self, alloc: A) -> Vec where T: Clone, { // N.B., see the `hack` module in this file for more details. - hack::to_vec(self) + hack::to_vec(self, alloc) } /// Converts `self` into a vector without clones or allocation. @@ -411,7 +436,7 @@ impl [T] { /// ``` #[stable(feature = "rust1", since = "1.0.0")] #[inline] - pub fn into_vec(self: Box) -> Vec { + pub fn into_vec(self: Box) -> Vec { // N.B., see the `hack` module in this file for more details. hack::into_vec(self) } @@ -730,7 +755,7 @@ impl ToOwned for [T] { #[cfg(test)] fn to_owned(&self) -> Vec { - hack::to_vec(self) + hack::to_vec(self, Global) } fn clone_into(&self, target: &mut Vec) { diff --git a/library/alloc/src/vec.rs b/library/alloc/src/vec.rs index 202e3a83638..2225bf63e3c 100644 --- a/library/alloc/src/vec.rs +++ b/library/alloc/src/vec.rs @@ -68,6 +68,7 @@ use core::ops::{self, Index, IndexMut, Range, RangeBounds}; use core::ptr::{self, NonNull}; use core::slice::{self, SliceIndex}; +use crate::alloc::{AllocRef, Global}; use crate::borrow::{Cow, ToOwned}; use crate::boxed::Box; use crate::collections::TryReserveError; @@ -297,8 +298,8 @@ use crate::raw_vec::RawVec; /// [`&`]: ../../std/primitive.reference.html #[stable(feature = "rust1", since = "1.0.0")] #[cfg_attr(not(test), rustc_diagnostic_item = "vec_type")] -pub struct Vec { - buf: RawVec, +pub struct Vec { + buf: RawVec, len: usize, } @@ -320,7 +321,7 @@ impl Vec { #[inline] #[rustc_const_stable(feature = "const_vec_new", since = "1.39.0")] #[stable(feature = "rust1", since = "1.0.0")] - pub const fn new() -> Vec { + pub const fn new() -> Self { Vec { buf: RawVec::NEW, len: 0 } } @@ -359,49 +360,145 @@ impl Vec { /// ``` #[inline] #[stable(feature = "rust1", since = "1.0.0")] - pub fn with_capacity(capacity: usize) -> Vec { - Vec { buf: RawVec::with_capacity(capacity), len: 0 } + pub fn with_capacity(capacity: usize) -> Self { + Self::with_capacity_in(capacity, Global) } - /// Decomposes a `Vec` into its raw components. + /// Creates a `Vec` directly from the raw components of another vector. /// - /// Returns the raw pointer to the underlying data, the length of - /// the vector (in elements), and the allocated capacity of the - /// data (in elements). These are the same arguments in the same - /// order as the arguments to [`from_raw_parts`]. + /// # Safety /// - /// After calling this function, the caller is responsible for the - /// memory previously managed by the `Vec`. The only way to do - /// this is to convert the raw pointer, length, and capacity back - /// into a `Vec` with the [`from_raw_parts`] function, allowing - /// the destructor to perform the cleanup. + /// This is highly unsafe, due to the number of invariants that aren't + /// checked: /// - /// [`from_raw_parts`]: Vec::from_raw_parts + /// * `ptr` needs to have been previously allocated via [`String`]/`Vec` + /// (at least, it's highly likely to be incorrect if it wasn't). + /// * `T` needs to have the same size and alignment as what `ptr` was allocated with. + /// (`T` having a less strict alignment is not sufficient, the alignment really + /// needs to be equal to satisfy the [`dealloc`] requirement that memory must be + /// allocated and deallocated with the same layout.) + /// * `length` needs to be less than or equal to `capacity`. + /// * `capacity` needs to be the capacity that the pointer was allocated with. + /// + /// Violating these may cause problems like corrupting the allocator's + /// internal data structures. For example it is **not** safe + /// to build a `Vec` from a pointer to a C `char` array with length `size_t`. + /// It's also not safe to build one from a `Vec` and its length, because + /// the allocator cares about the alignment, and these two types have different + /// alignments. The buffer was allocated with alignment 2 (for `u16`), but after + /// turning it into a `Vec` it'll be deallocated with alignment 1. + /// + /// The ownership of `ptr` is effectively transferred to the + /// `Vec` which may then deallocate, reallocate or change the + /// contents of memory pointed to by the pointer at will. Ensure + /// that nothing else uses the pointer after calling this + /// function. + /// + /// [`String`]: crate::string::String + /// [`dealloc`]: crate::alloc::GlobalAlloc::dealloc /// /// # Examples /// /// ``` - /// #![feature(vec_into_raw_parts)] - /// let v: Vec = vec![-1, 0, 1]; + /// use std::ptr; + /// use std::mem; /// - /// let (ptr, len, cap) = v.into_raw_parts(); + /// let v = vec![1, 2, 3]; /// - /// let rebuilt = unsafe { - /// // We can now make changes to the components, such as - /// // transmuting the raw pointer to a compatible type. - /// let ptr = ptr as *mut u32; + // FIXME Update this when vec_into_raw_parts is stabilized + /// // Prevent running `v`'s destructor so we are in complete control + /// // of the allocation. + /// let mut v = mem::ManuallyDrop::new(v); /// - /// Vec::from_raw_parts(ptr, len, cap) - /// }; - /// assert_eq!(rebuilt, [4294967295, 0, 1]); + /// // Pull out the various important pieces of information about `v` + /// let p = v.as_mut_ptr(); + /// let len = v.len(); + /// let cap = v.capacity(); + /// + /// unsafe { + /// // Overwrite memory with 4, 5, 6 + /// for i in 0..len as isize { + /// ptr::write(p.offset(i), 4 + i); + /// } + /// + /// // Put everything back together into a Vec + /// let rebuilt = Vec::from_raw_parts(p, len, cap); + /// assert_eq!(rebuilt, [4, 5, 6]); + /// } /// ``` - #[unstable(feature = "vec_into_raw_parts", reason = "new API", issue = "65816")] - pub fn into_raw_parts(self) -> (*mut T, usize, usize) { - let mut me = ManuallyDrop::new(self); - (me.as_mut_ptr(), me.len(), me.capacity()) + #[inline] + #[stable(feature = "rust1", since = "1.0.0")] + pub unsafe fn from_raw_parts(ptr: *mut T, length: usize, capacity: usize) -> Self { + unsafe { Self::from_raw_parts_in(ptr, length, capacity, Global) } } +} - /// Creates a `Vec` directly from the raw components of another vector. +impl Vec { + /// Constructs a new, empty `Vec`. + /// + /// The vector will not allocate until elements are pushed onto it. + /// + /// # Examples + /// + /// ``` + /// #![feature(allocator_api)] + /// + /// use std::alloc::System; + /// + /// # #[allow(unused_mut)] + /// let mut vec: Vec = Vec::new_in(System); + /// ``` + #[inline] + #[unstable(feature = "allocator_api", issue = "32838")] + pub const fn new_in(alloc: A) -> Self { + Vec { buf: RawVec::new_in(alloc), len: 0 } + } + + /// Constructs a new, empty `Vec` with the specified capacity with the provided + /// allocator. + /// + /// The vector will be able to hold exactly `capacity` elements without + /// reallocating. If `capacity` is 0, the vector will not allocate. + /// + /// It is important to note that although the returned vector has the + /// *capacity* specified, the vector will have a zero *length*. For an + /// explanation of the difference between length and capacity, see + /// *[Capacity and reallocation]*. + /// + /// [Capacity and reallocation]: #capacity-and-reallocation + /// + /// # Examples + /// + /// ``` + /// #![feature(allocator_api)] + /// + /// use std::alloc::System; + /// + /// let mut vec = Vec::with_capacity_in(10, System); + /// + /// // The vector contains no items, even though it has capacity for more + /// assert_eq!(vec.len(), 0); + /// assert_eq!(vec.capacity(), 10); + /// + /// // These are all done without reallocating... + /// for i in 0..10 { + /// vec.push(i); + /// } + /// assert_eq!(vec.len(), 10); + /// assert_eq!(vec.capacity(), 10); + /// + /// // ...but this may make the vector reallocate + /// vec.push(11); + /// assert_eq!(vec.len(), 11); + /// assert!(vec.capacity() >= 11); + /// ``` + #[inline] + #[unstable(feature = "allocator_api", issue = "32838")] + pub fn with_capacity_in(capacity: usize, alloc: A) -> Self { + Vec { buf: RawVec::with_capacity_in(capacity, alloc), len: 0 } + } + + /// Creates a `Vec` directly from the raw components of another vector. /// /// # Safety /// @@ -437,10 +534,17 @@ impl Vec { /// # Examples /// /// ``` + /// #![feature(allocator_api)] + /// + /// use std::alloc::System; + /// /// use std::ptr; /// use std::mem; /// - /// let v = vec![1, 2, 3]; + /// let mut v = Vec::with_capacity_in(3, System); + /// v.push(1); + /// v.push(2); + /// v.push(3); /// // FIXME Update this when vec_into_raw_parts is stabilized /// // Prevent running `v`'s destructor so we are in complete control @@ -451,6 +555,7 @@ impl Vec { /// let p = v.as_mut_ptr(); /// let len = v.len(); /// let cap = v.capacity(); + /// let alloc = v.alloc_ref(); /// /// unsafe { /// // Overwrite memory with 4, 5, 6 @@ -459,13 +564,100 @@ impl Vec { /// } /// /// // Put everything back together into a Vec - /// let rebuilt = Vec::from_raw_parts(p, len, cap); + /// let rebuilt = Vec::from_raw_parts_in(p, len, cap, alloc.clone()); /// assert_eq!(rebuilt, [4, 5, 6]); /// } /// ``` - #[stable(feature = "rust1", since = "1.0.0")] - pub unsafe fn from_raw_parts(ptr: *mut T, length: usize, capacity: usize) -> Vec { - unsafe { Vec { buf: RawVec::from_raw_parts(ptr, capacity), len: length } } + #[inline] + #[unstable(feature = "allocator_api", issue = "32838")] + pub unsafe fn from_raw_parts_in(ptr: *mut T, length: usize, capacity: usize, alloc: A) -> Self { + unsafe { Vec { buf: RawVec::from_raw_parts_in(ptr, capacity, alloc), len: length } } + } + + /// Decomposes a `Vec` into its raw components. + /// + /// Returns the raw pointer to the underlying data, the length of + /// the vector (in elements), and the allocated capacity of the + /// data (in elements). These are the same arguments in the same + /// order as the arguments to [`from_raw_parts`]. + /// + /// After calling this function, the caller is responsible for the + /// memory previously managed by the `Vec`. The only way to do + /// this is to convert the raw pointer, length, and capacity back + /// into a `Vec` with the [`from_raw_parts`] function, allowing + /// the destructor to perform the cleanup. + /// + /// [`from_raw_parts`]: Vec::from_raw_parts + /// + /// # Examples + /// + /// ``` + /// #![feature(vec_into_raw_parts)] + /// let v: Vec = vec![-1, 0, 1]; + /// + /// let (ptr, len, cap) = v.into_raw_parts(); + /// + /// let rebuilt = unsafe { + /// // We can now make changes to the components, such as + /// // transmuting the raw pointer to a compatible type. + /// let ptr = ptr as *mut u32; + /// + /// Vec::from_raw_parts(ptr, len, cap) + /// }; + /// assert_eq!(rebuilt, [4294967295, 0, 1]); + /// ``` + #[unstable(feature = "vec_into_raw_parts", reason = "new API", issue = "65816")] + pub fn into_raw_parts(self) -> (*mut T, usize, usize) { + let mut me = ManuallyDrop::new(self); + (me.as_mut_ptr(), me.len(), me.capacity()) + } + + /// Decomposes a `Vec` into its raw components. + /// + /// Returns the raw pointer to the underlying data, the length of the vector (in elements), + /// the allocated capacity of the data (in elements), and the allocator. These are the same + /// arguments in the same order as the arguments to [`from_raw_parts_in`]. + /// + /// After calling this function, the caller is responsible for the + /// memory previously managed by the `Vec`. The only way to do + /// this is to convert the raw pointer, length, and capacity back + /// into a `Vec` with the [`from_raw_parts_in`] function, allowing + /// the destructor to perform the cleanup. + /// + /// [`from_raw_parts_in`]: Vec::from_raw_parts_in + /// + /// # Examples + /// + /// ``` + /// #![feature(allocator_api, vec_into_raw_parts)] + /// + /// use std::alloc::System; + /// + /// let mut v: Vec = Vec::new_in(System); + /// v.push(-1); + /// v.push(0); + /// v.push(1); + /// + /// let (ptr, len, cap, alloc) = v.into_raw_parts_with_alloc(); + /// + /// let rebuilt = unsafe { + /// // We can now make changes to the components, such as + /// // transmuting the raw pointer to a compatible type. + /// let ptr = ptr as *mut u32; + /// + /// Vec::from_raw_parts_in(ptr, len, cap, alloc) + /// }; + /// assert_eq!(rebuilt, [4294967295, 0, 1]); + /// ``` + #[unstable(feature = "allocator_api", issue = "32838")] + // #[unstable(feature = "vec_into_raw_parts", reason = "new API", issue = "65816")] + pub fn into_raw_parts_with_alloc(self) -> (*mut T, usize, usize, A) { + let mut me = ManuallyDrop::new(self); + let len = me.len(); + let capacity = me.capacity(); + let ptr = me.as_mut_ptr(); + let alloc = unsafe { ptr::read(me.alloc_ref()) }; + (ptr, len, capacity, alloc) } /// Returns the number of elements the vector can hold without @@ -684,7 +876,7 @@ impl Vec { /// assert_eq!(slice.into_vec().capacity(), 3); /// ``` #[stable(feature = "rust1", since = "1.0.0")] - pub fn into_boxed_slice(mut self) -> Box<[T]> { + pub fn into_boxed_slice(mut self) -> Box<[T], A> { unsafe { self.shrink_to_fit(); let me = ManuallyDrop::new(self); @@ -849,7 +1041,7 @@ impl Vec { /// } /// x.set_len(size); /// } - /// assert_eq!(&*x, &[0,1,2,3]); + /// assert_eq!(&*x, &[0, 1, 2, 3]); /// ``` #[stable(feature = "vec_as_ptr", since = "1.37.0")] #[inline] @@ -863,6 +1055,13 @@ impl Vec { ptr } + /// Returns a reference to the underlying allocator. + #[unstable(feature = "allocator_api", issue = "32838")] + #[inline] + pub fn alloc_ref(&self) -> &A { + self.buf.alloc_ref() + } + /// Forces the length of the vector to `new_len`. /// /// This is a low-level operation that maintains none of the normal @@ -1299,7 +1498,7 @@ impl Vec { /// assert_eq!(v, &[]); /// ``` #[stable(feature = "drain", since = "1.6.0")] - pub fn drain(&mut self, range: R) -> Drain<'_, T> + pub fn drain(&mut self, range: R) -> Drain<'_, T, A> where R: RangeBounds, { @@ -1395,7 +1594,7 @@ impl Vec { /// # Examples /// /// ``` - /// let mut vec = vec![1,2,3]; + /// let mut vec = vec![1, 2, 3]; /// let vec2 = vec.split_off(1); /// assert_eq!(vec, [1]); /// assert_eq!(vec2, [2, 3]); @@ -1403,7 +1602,10 @@ impl Vec { #[inline] #[must_use = "use `.truncate()` if you don't need the other half"] #[stable(feature = "split_off", since = "1.4.0")] - pub fn split_off(&mut self, at: usize) -> Self { + pub fn split_off(&mut self, at: usize) -> Self + where + A: Clone, + { #[cold] #[inline(never)] fn assert_failed(at: usize, len: usize) -> ! { @@ -1416,11 +1618,14 @@ impl Vec { if at == 0 { // the new vector can take over the original buffer and avoid the copy - return mem::replace(self, Vec::with_capacity(self.capacity())); + return mem::replace( + self, + Vec::with_capacity_in(self.capacity(), self.alloc_ref().clone()), + ); } let other_len = self.len - at; - let mut other = Vec::with_capacity(other_len); + let mut other = Vec::with_capacity_in(other_len, self.alloc_ref().clone()); // Unsafely `set_len` and copy items to `other`. unsafe { @@ -1497,7 +1702,7 @@ impl Vec { #[inline] pub fn leak<'a>(self) -> &'a mut [T] where - T: 'a, // Technically not needed, but kept to be explicit. + A: 'a, { Box::leak(self.into_boxed_slice()) } @@ -1544,7 +1749,7 @@ impl Vec { } } -impl Vec { +impl Vec { /// Resizes the `Vec` in-place so that `len` is equal to `new_len`. /// /// If `new_len` is greater than `len`, the `Vec` is extended by the @@ -1639,7 +1844,7 @@ impl T> ExtendWith for ExtendFunc { } } -impl Vec { +impl Vec { /// Extend the vector by `n` values, using the given generator. fn extend_with>(&mut self, n: usize, mut value: E) { self.reserve(n); @@ -1699,7 +1904,7 @@ impl Drop for SetLenOnDrop<'_> { } } -impl Vec { +impl Vec { /// Removes consecutive repeated elements in the vector according to the /// [`PartialEq`] trait implementation. /// @@ -1721,7 +1926,7 @@ impl Vec { } } -impl Vec { +impl Vec { /// Removes the first instance of `item` from the vector if the item exists. /// /// This method will be removed soon. @@ -1749,17 +1954,23 @@ impl Vec { #[doc(hidden)] #[stable(feature = "rust1", since = "1.0.0")] pub fn from_elem(elem: T, n: usize) -> Vec { - ::from_elem(elem, n) + ::from_elem(elem, n, Global) +} + +#[doc(hidden)] +#[unstable(feature = "allocator_api", issue = "32838")] +pub fn from_elem_in(elem: T, n: usize, alloc: A) -> Vec { + ::from_elem(elem, n, alloc) } // Specialization trait used for Vec::from_elem trait SpecFromElem: Sized { - fn from_elem(elem: Self, n: usize) -> Vec; + fn from_elem(elem: Self, n: usize, alloc: A) -> Vec; } impl SpecFromElem for T { - default fn from_elem(elem: Self, n: usize) -> Vec { - let mut v = Vec::with_capacity(n); + default fn from_elem(elem: Self, n: usize, alloc: A) -> Vec { + let mut v = Vec::with_capacity_in(n, alloc); v.extend_with(n, ExtendElement(elem)); v } @@ -1767,12 +1978,12 @@ impl SpecFromElem for T { impl SpecFromElem for i8 { #[inline] - fn from_elem(elem: i8, n: usize) -> Vec { + fn from_elem(elem: i8, n: usize, alloc: A) -> Vec { if elem == 0 { - return Vec { buf: RawVec::with_capacity_zeroed(n), len: n }; + return Vec { buf: RawVec::with_capacity_zeroed_in(n, alloc), len: n }; } unsafe { - let mut v = Vec::with_capacity(n); + let mut v = Vec::with_capacity_in(n, alloc); ptr::write_bytes(v.as_mut_ptr(), elem as u8, n); v.set_len(n); v @@ -1782,12 +1993,12 @@ impl SpecFromElem for i8 { impl SpecFromElem for u8 { #[inline] - fn from_elem(elem: u8, n: usize) -> Vec { + fn from_elem(elem: u8, n: usize, alloc: A) -> Vec { if elem == 0 { - return Vec { buf: RawVec::with_capacity_zeroed(n), len: n }; + return Vec { buf: RawVec::with_capacity_zeroed_in(n, alloc), len: n }; } unsafe { - let mut v = Vec::with_capacity(n); + let mut v = Vec::with_capacity_in(n, alloc); ptr::write_bytes(v.as_mut_ptr(), elem, n); v.set_len(n); v @@ -1797,11 +2008,11 @@ impl SpecFromElem for u8 { impl SpecFromElem for T { #[inline] - fn from_elem(elem: T, n: usize) -> Vec { + fn from_elem(elem: T, n: usize, alloc: A) -> Vec { if elem.is_zero() { - return Vec { buf: RawVec::with_capacity_zeroed(n), len: n }; + return Vec { buf: RawVec::with_capacity_zeroed_in(n, alloc), len: n }; } - let mut v = Vec::with_capacity(n); + let mut v = Vec::with_capacity_in(n, alloc); v.extend_with(n, ExtendElement(elem)); v } @@ -1882,7 +2093,7 @@ unsafe impl IsZero for Option> { //////////////////////////////////////////////////////////////////////////////// #[stable(feature = "rust1", since = "1.0.0")] -impl ops::Deref for Vec { +impl ops::Deref for Vec { type Target = [T]; fn deref(&self) -> &[T] { @@ -1891,17 +2102,18 @@ impl ops::Deref for Vec { } #[stable(feature = "rust1", since = "1.0.0")] -impl ops::DerefMut for Vec { +impl ops::DerefMut for Vec { fn deref_mut(&mut self) -> &mut [T] { unsafe { slice::from_raw_parts_mut(self.as_mut_ptr(), self.len) } } } #[stable(feature = "rust1", since = "1.0.0")] -impl Clone for Vec { +impl Clone for Vec { #[cfg(not(test))] - fn clone(&self) -> Vec { - <[T]>::to_vec(&**self) + fn clone(&self) -> Self { + let alloc = self.alloc_ref().clone(); + <[T]>::to_vec_in(&**self, alloc) } // HACK(japaric): with cfg(test) the inherent `[T]::to_vec` method, which is @@ -1909,17 +2121,27 @@ impl Clone for Vec { // `slice::to_vec` function which is only available with cfg(test) // NB see the slice::hack module in slice.rs for more information #[cfg(test)] - fn clone(&self) -> Vec { - crate::slice::to_vec(&**self) + fn clone(&self) -> Self { + let alloc = self.alloc_ref().clone(); + crate::slice::to_vec(&**self, alloc) } - fn clone_from(&mut self, other: &Vec) { - other.as_slice().clone_into(self); + fn clone_from(&mut self, other: &Self) { + // drop anything that will not be overwritten + self.truncate(other.len()); + + // self.len <= other.len due to the truncate above, so the + // slices here are always in-bounds. + let (init, tail) = other.split_at(self.len()); + + // reuse the contained values' allocations/resources. + self.clone_from_slice(init); + self.extend_from_slice(tail); } } #[stable(feature = "rust1", since = "1.0.0")] -impl Hash for Vec { +impl Hash for Vec { #[inline] fn hash(&self, state: &mut H) { Hash::hash(&**self, state) @@ -1931,7 +2153,7 @@ impl Hash for Vec { message = "vector indices are of type `usize` or ranges of `usize`", label = "vector indices are of type `usize` or ranges of `usize`" )] -impl> Index for Vec { +impl, A: AllocRef> Index for Vec { type Output = I::Output; #[inline] @@ -1945,7 +2167,7 @@ impl> Index for Vec { message = "vector indices are of type `usize` or ranges of `usize`", label = "vector indices are of type `usize` or ranges of `usize`" )] -impl> IndexMut for Vec { +impl, A: AllocRef> IndexMut for Vec { #[inline] fn index_mut(&mut self, index: I) -> &mut Self::Output { IndexMut::index_mut(&mut **self, index) @@ -1961,9 +2183,9 @@ impl FromIterator for Vec { } #[stable(feature = "rust1", since = "1.0.0")] -impl IntoIterator for Vec { +impl IntoIterator for Vec { type Item = T; - type IntoIter = IntoIter; + type IntoIter = IntoIter; /// Creates a consuming iterator, that is, one that moves each value out of /// the vector (from start to end). The vector cannot be used after calling @@ -1979,9 +2201,10 @@ impl IntoIterator for Vec { /// } /// ``` #[inline] - fn into_iter(self) -> IntoIter { + fn into_iter(self) -> IntoIter { unsafe { let mut me = ManuallyDrop::new(self); + let alloc = ptr::read(me.alloc_ref()); let begin = me.as_mut_ptr(); let end = if mem::size_of::() == 0 { arith_offset(begin as *const i8, me.len() as isize) as *const T @@ -1993,6 +2216,7 @@ impl IntoIterator for Vec { buf: NonNull::new_unchecked(begin), phantom: PhantomData, cap, + alloc, ptr: begin, end, } @@ -2001,7 +2225,7 @@ impl IntoIterator for Vec { } #[stable(feature = "rust1", since = "1.0.0")] -impl<'a, T> IntoIterator for &'a Vec { +impl<'a, T, A: AllocRef> IntoIterator for &'a Vec { type Item = &'a T; type IntoIter = slice::Iter<'a, T>; @@ -2011,7 +2235,7 @@ impl<'a, T> IntoIterator for &'a Vec { } #[stable(feature = "rust1", since = "1.0.0")] -impl<'a, T> IntoIterator for &'a mut Vec { +impl<'a, T, A: AllocRef> IntoIterator for &'a mut Vec { type Item = &'a mut T; type IntoIter = slice::IterMut<'a, T>; @@ -2021,7 +2245,7 @@ impl<'a, T> IntoIterator for &'a mut Vec { } #[stable(feature = "rust1", since = "1.0.0")] -impl Extend for Vec { +impl Extend for Vec { #[inline] fn extend>(&mut self, iter: I) { >::spec_extend(self, iter.into_iter()) @@ -2136,10 +2360,8 @@ impl InPlaceDrop { impl Drop for InPlaceDrop { #[inline] fn drop(&mut self) { - if mem::needs_drop::() { - unsafe { - ptr::drop_in_place(slice::from_raw_parts_mut(self.inner, self.len())); - } + unsafe { + ptr::drop_in_place(slice::from_raw_parts_mut(self.inner, self.len())); } } } @@ -2305,7 +2527,7 @@ trait SpecExtend { fn spec_extend(&mut self, iter: I); } -impl SpecExtend for Vec +impl SpecExtend for Vec where I: Iterator, { @@ -2314,7 +2536,7 @@ where } } -impl SpecExtend for Vec +impl SpecExtend for Vec where I: TrustedLen, { @@ -2347,7 +2569,7 @@ where } } -impl SpecExtend> for Vec { +impl SpecExtend> for Vec { fn spec_extend(&mut self, mut iterator: IntoIter) { unsafe { self.append_elements(iterator.as_slice() as _); @@ -2356,7 +2578,7 @@ impl SpecExtend> for Vec { } } -impl<'a, T: 'a, I> SpecExtend<&'a T, I> for Vec +impl<'a, T: 'a, I, A: AllocRef + 'a> SpecExtend<&'a T, I> for Vec where I: Iterator, T: Clone, @@ -2366,7 +2588,7 @@ where } } -impl<'a, T: 'a> SpecExtend<&'a T, slice::Iter<'a, T>> for Vec +impl<'a, T: 'a, A: AllocRef + 'a> SpecExtend<&'a T, slice::Iter<'a, T>> for Vec where T: Copy, { @@ -2376,7 +2598,7 @@ where } } -impl Vec { +impl Vec { // leaf method to which various SpecFrom/SpecExtend implementations delegate when // they have no further optimizations to apply fn extend_desugared>(&mut self, mut iterator: I) { @@ -2436,7 +2658,7 @@ impl Vec { /// ``` #[inline] #[stable(feature = "vec_splice", since = "1.21.0")] - pub fn splice(&mut self, range: R, replace_with: I) -> Splice<'_, I::IntoIter> + pub fn splice(&mut self, range: R, replace_with: I) -> Splice<'_, I::IntoIter, A> where R: RangeBounds, I: IntoIterator, @@ -2489,7 +2711,7 @@ impl Vec { /// assert_eq!(odds, vec![1, 3, 5, 9, 11, 13, 15]); /// ``` #[unstable(feature = "drain_filter", reason = "recently added", issue = "43244")] - pub fn drain_filter(&mut self, filter: F) -> DrainFilter<'_, T, F> + pub fn drain_filter(&mut self, filter: F) -> DrainFilter<'_, T, F, A> where F: FnMut(&mut T) -> bool, { @@ -2511,7 +2733,7 @@ impl Vec { /// /// [`copy_from_slice`]: ../../std/primitive.slice.html#method.copy_from_slice #[stable(feature = "extend_ref", since = "1.2.0")] -impl<'a, T: 'a + Copy> Extend<&'a T> for Vec { +impl<'a, T: Copy + 'a, A: AllocRef + 'a> Extend<&'a T> for Vec { fn extend>(&mut self, iter: I) { self.spec_extend(iter.into_iter()) } @@ -2530,9 +2752,9 @@ impl<'a, T: 'a + Copy> Extend<&'a T> for Vec { macro_rules! __impl_slice_eq1 { ([$($vars:tt)*] $lhs:ty, $rhs:ty $(where $ty:ty: $bound:ident)?, #[$stability:meta]) => { #[$stability] - impl PartialEq<$rhs> for $lhs + impl PartialEq<$rhs> for $lhs where - A: PartialEq, + T: PartialEq, $($ty: $bound)? { #[inline] @@ -2543,18 +2765,18 @@ macro_rules! __impl_slice_eq1 { } } -__impl_slice_eq1! { [] Vec, Vec, #[stable(feature = "rust1", since = "1.0.0")] } -__impl_slice_eq1! { [] Vec, &[B], #[stable(feature = "rust1", since = "1.0.0")] } -__impl_slice_eq1! { [] Vec, &mut [B], #[stable(feature = "rust1", since = "1.0.0")] } -__impl_slice_eq1! { [] &[A], Vec, #[stable(feature = "partialeq_vec_for_ref_slice", since = "1.46.0")] } -__impl_slice_eq1! { [] &mut [A], Vec, #[stable(feature = "partialeq_vec_for_ref_slice", since = "1.46.0")] } -__impl_slice_eq1! { [] Vec, [B], #[stable(feature = "partialeq_vec_for_slice", since = "1.48.0")] } -__impl_slice_eq1! { [] [A], Vec, #[stable(feature = "partialeq_vec_for_slice", since = "1.48.0")] } -__impl_slice_eq1! { [] Cow<'_, [A]>, Vec where A: Clone, #[stable(feature = "rust1", since = "1.0.0")] } -__impl_slice_eq1! { [] Cow<'_, [A]>, &[B] where A: Clone, #[stable(feature = "rust1", since = "1.0.0")] } -__impl_slice_eq1! { [] Cow<'_, [A]>, &mut [B] where A: Clone, #[stable(feature = "rust1", since = "1.0.0")] } -__impl_slice_eq1! { [const N: usize] Vec, [B; N], #[stable(feature = "rust1", since = "1.0.0")] } -__impl_slice_eq1! { [const N: usize] Vec, &[B; N], #[stable(feature = "rust1", since = "1.0.0")] } +__impl_slice_eq1! { [A: AllocRef] Vec, Vec, #[stable(feature = "rust1", since = "1.0.0")] } +__impl_slice_eq1! { [A: AllocRef] Vec, &[U], #[stable(feature = "rust1", since = "1.0.0")] } +__impl_slice_eq1! { [A: AllocRef] Vec, &mut [U], #[stable(feature = "rust1", since = "1.0.0")] } +__impl_slice_eq1! { [A: AllocRef] &[T], Vec, #[stable(feature = "partialeq_vec_for_ref_slice", since = "1.46.0")] } +__impl_slice_eq1! { [A: AllocRef] &mut [T], Vec, #[stable(feature = "partialeq_vec_for_ref_slice", since = "1.46.0")] } +__impl_slice_eq1! { [A: AllocRef] Vec, [U], #[stable(feature = "partialeq_vec_for_slice", since = "1.48.0")] } +__impl_slice_eq1! { [A: AllocRef] [T], Vec, #[stable(feature = "partialeq_vec_for_slice", since = "1.48.0")] } +__impl_slice_eq1! { [A: AllocRef] Cow<'_, [T]>, Vec where T: Clone, #[stable(feature = "rust1", since = "1.0.0")] } +__impl_slice_eq1! { [] Cow<'_, [T]>, &[U] where T: Clone, #[stable(feature = "rust1", since = "1.0.0")] } +__impl_slice_eq1! { [] Cow<'_, [T]>, &mut [U] where T: Clone, #[stable(feature = "rust1", since = "1.0.0")] } +__impl_slice_eq1! { [A: AllocRef, const N: usize] Vec, [U; N], #[stable(feature = "rust1", since = "1.0.0")] } +__impl_slice_eq1! { [A: AllocRef, const N: usize] Vec, &[U; N], #[stable(feature = "rust1", since = "1.0.0")] } // NOTE: some less important impls are omitted to reduce code bloat // FIXME(Centril): Reconsider this? @@ -2568,27 +2790,27 @@ __impl_slice_eq1! { [const N: usize] Vec, &[B; N], #[stable(feature = "rust1" /// Implements comparison of vectors, [lexicographically](core::cmp::Ord#lexicographical-comparison). #[stable(feature = "rust1", since = "1.0.0")] -impl PartialOrd for Vec { +impl PartialOrd for Vec { #[inline] - fn partial_cmp(&self, other: &Vec) -> Option { + fn partial_cmp(&self, other: &Self) -> Option { PartialOrd::partial_cmp(&**self, &**other) } } #[stable(feature = "rust1", since = "1.0.0")] -impl Eq for Vec {} +impl Eq for Vec {} /// Implements ordering of vectors, [lexicographically](core::cmp::Ord#lexicographical-comparison). #[stable(feature = "rust1", since = "1.0.0")] -impl Ord for Vec { +impl Ord for Vec { #[inline] - fn cmp(&self, other: &Vec) -> Ordering { + fn cmp(&self, other: &Self) -> Ordering { Ord::cmp(&**self, &**other) } } #[stable(feature = "rust1", since = "1.0.0")] -unsafe impl<#[may_dangle] T> Drop for Vec { +unsafe impl<#[may_dangle] T, A: AllocRef> Drop for Vec { fn drop(&mut self) { unsafe { // use drop for [T] @@ -2609,35 +2831,35 @@ impl Default for Vec { } #[stable(feature = "rust1", since = "1.0.0")] -impl fmt::Debug for Vec { +impl fmt::Debug for Vec { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { fmt::Debug::fmt(&**self, f) } } #[stable(feature = "rust1", since = "1.0.0")] -impl AsRef> for Vec { - fn as_ref(&self) -> &Vec { +impl AsRef> for Vec { + fn as_ref(&self) -> &Vec { self } } #[stable(feature = "vec_as_mut", since = "1.5.0")] -impl AsMut> for Vec { - fn as_mut(&mut self) -> &mut Vec { +impl AsMut> for Vec { + fn as_mut(&mut self) -> &mut Vec { self } } #[stable(feature = "rust1", since = "1.0.0")] -impl AsRef<[T]> for Vec { +impl AsRef<[T]> for Vec { fn as_ref(&self) -> &[T] { self } } #[stable(feature = "vec_as_mut", since = "1.5.0")] -impl AsMut<[T]> for Vec { +impl AsMut<[T]> for Vec { fn as_mut(&mut self) -> &mut [T] { self } @@ -2651,7 +2873,7 @@ impl From<&[T]> for Vec { } #[cfg(test)] fn from(s: &[T]) -> Vec { - crate::slice::to_vec(s) + crate::slice::to_vec(s, Global) } } @@ -2663,7 +2885,7 @@ impl From<&mut [T]> for Vec { } #[cfg(test)] fn from(s: &mut [T]) -> Vec { - crate::slice::to_vec(s) + crate::slice::to_vec(s, Global) } } @@ -2692,17 +2914,18 @@ where // note: test pulls in libstd, which causes errors here #[cfg(not(test))] #[stable(feature = "vec_from_box", since = "1.18.0")] -impl From> for Vec { - fn from(s: Box<[T]>) -> Vec { - s.into_vec() +impl From> for Vec { + fn from(s: Box<[T], A>) -> Self { + let len = s.len(); + Self { buf: RawVec::from_box(s), len } } } // note: test pulls in libstd, which causes errors here #[cfg(not(test))] #[stable(feature = "box_from_vec", since = "1.20.0")] -impl From> for Box<[T]> { - fn from(v: Vec) -> Box<[T]> { +impl From> for Box<[T], A> { + fn from(v: Vec) -> Self { v.into_boxed_slice() } } @@ -2715,8 +2938,8 @@ impl From<&str> for Vec { } #[stable(feature = "array_try_from_vec", since = "1.48.0")] -impl TryFrom> for [T; N] { - type Error = Vec; +impl TryFrom> for [T; N] { + type Error = Vec; /// Gets the entire contents of the `Vec` as an array, /// if its size exactly matches that of the requested array. @@ -2747,7 +2970,7 @@ impl TryFrom> for [T; N] { /// assert_eq!(a, b' '); /// assert_eq!(b, b'd'); /// ``` - fn try_from(mut vec: Vec) -> Result<[T; N], Vec> { + fn try_from(mut vec: Vec) -> Result<[T; N], Vec> { if vec.len() != N { return Err(vec); } @@ -2816,22 +3039,24 @@ where /// let iter: std::vec::IntoIter<_> = v.into_iter(); /// ``` #[stable(feature = "rust1", since = "1.0.0")] -pub struct IntoIter { +pub struct IntoIter +{ buf: NonNull, phantom: PhantomData, cap: usize, + alloc: A, ptr: *const T, end: *const T, } #[stable(feature = "vec_intoiter_debug", since = "1.13.0")] -impl fmt::Debug for IntoIter { +impl fmt::Debug for IntoIter { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { f.debug_tuple("IntoIter").field(&self.as_slice()).finish() } } -impl IntoIter { +impl IntoIter { /// Returns the remaining items of this iterator as a slice. /// /// # Examples @@ -2866,15 +3091,20 @@ impl IntoIter { unsafe { &mut *self.as_raw_mut_slice() } } + /// Returns a reference to the underlying allocator. + #[unstable(feature = "allocator_api", issue = "32838")] + #[inline] + pub fn alloc_ref(&self) -> &A { + &self.alloc + } + fn as_raw_mut_slice(&mut self) -> *mut [T] { ptr::slice_from_raw_parts_mut(self.ptr as *mut T, self.len()) } fn drop_remaining(&mut self) { - if mem::needs_drop::() { - unsafe { - ptr::drop_in_place(self.as_mut_slice()); - } + unsafe { + ptr::drop_in_place(self.as_mut_slice()); } self.ptr = self.end; } @@ -2890,19 +3120,19 @@ impl IntoIter { } #[stable(feature = "vec_intoiter_as_ref", since = "1.46.0")] -impl AsRef<[T]> for IntoIter { +impl AsRef<[T]> for IntoIter { fn as_ref(&self) -> &[T] { self.as_slice() } } #[stable(feature = "rust1", since = "1.0.0")] -unsafe impl Send for IntoIter {} +unsafe impl Send for IntoIter {} #[stable(feature = "rust1", since = "1.0.0")] -unsafe impl Sync for IntoIter {} +unsafe impl Sync for IntoIter {} #[stable(feature = "rust1", since = "1.0.0")] -impl Iterator for IntoIter { +impl Iterator for IntoIter { type Item = T; #[inline] @@ -2959,7 +3189,7 @@ impl Iterator for IntoIter { } #[stable(feature = "rust1", since = "1.0.0")] -impl DoubleEndedIterator for IntoIter { +impl DoubleEndedIterator for IntoIter { #[inline] fn next_back(&mut self) -> Option { if self.end == self.ptr { @@ -2979,23 +3209,23 @@ impl DoubleEndedIterator for IntoIter { } #[stable(feature = "rust1", since = "1.0.0")] -impl ExactSizeIterator for IntoIter { +impl ExactSizeIterator for IntoIter { fn is_empty(&self) -> bool { self.ptr == self.end } } #[stable(feature = "fused", since = "1.26.0")] -impl FusedIterator for IntoIter {} +impl FusedIterator for IntoIter {} #[unstable(feature = "trusted_len", issue = "37572")] -unsafe impl TrustedLen for IntoIter {} +unsafe impl TrustedLen for IntoIter {} #[doc(hidden)] #[unstable(issue = "none", feature = "std_internals")] // T: Copy as approximation for !Drop since get_unchecked does not advance self.ptr // and thus we can't implement drop-handling -unsafe impl TrustedRandomAccess for IntoIter +unsafe impl TrustedRandomAccess for IntoIter where T: Copy, { @@ -3005,21 +3235,30 @@ where } #[stable(feature = "vec_into_iter_clone", since = "1.8.0")] -impl Clone for IntoIter { - fn clone(&self) -> IntoIter { - self.as_slice().to_owned().into_iter() +impl Clone for IntoIter { + #[cfg(not(test))] + fn clone(&self) -> Self { + self.as_slice().to_vec_in(self.alloc.clone()).into_iter() + } + #[cfg(test)] + fn clone(&self) -> Self { + crate::slice::to_vec(self.as_slice(), self.alloc.clone()).into_iter() } } #[stable(feature = "rust1", since = "1.0.0")] -unsafe impl<#[may_dangle] T> Drop for IntoIter { +unsafe impl<#[may_dangle] T, A: AllocRef> Drop for IntoIter { fn drop(&mut self) { - struct DropGuard<'a, T>(&'a mut IntoIter); + struct DropGuard<'a, T, A: AllocRef>(&'a mut IntoIter); - impl Drop for DropGuard<'_, T> { + impl Drop for DropGuard<'_, T, A> { fn drop(&mut self) { - // RawVec handles deallocation - let _ = unsafe { RawVec::from_raw_parts(self.0.buf.as_ptr(), self.0.cap) }; + unsafe { + // `IntoIter::alloc` is not used anymore after this + let alloc = ptr::read(&self.0.alloc); + // RawVec handles deallocation + let _ = RawVec::from_raw_parts_in(self.0.buf.as_ptr(), self.0.cap, alloc); + } } } @@ -3033,11 +3272,11 @@ unsafe impl<#[may_dangle] T> Drop for IntoIter { } #[unstable(issue = "none", feature = "inplace_iteration")] -unsafe impl InPlaceIterable for IntoIter {} +unsafe impl InPlaceIterable for IntoIter {} #[unstable(issue = "none", feature = "inplace_iteration")] -unsafe impl SourceIter for IntoIter { - type Source = IntoIter; +unsafe impl SourceIter for IntoIter { + type Source = Self; #[inline] unsafe fn as_inner(&mut self) -> &mut Self::Source { @@ -3072,24 +3311,28 @@ impl AsIntoIter for IntoIter { /// let iter: std::vec::Drain<_> = v.drain(..); /// ``` #[stable(feature = "drain", since = "1.6.0")] -pub struct Drain<'a, T: 'a> { +pub struct Drain< + 'a, + T: 'a, + #[unstable(feature = "allocator_api", issue = "32838")] A: AllocRef + 'a = Global, +> { /// Index of tail to preserve tail_start: usize, /// Length of tail tail_len: usize, /// Current remaining range to remove iter: slice::Iter<'a, T>, - vec: NonNull>, + vec: NonNull>, } #[stable(feature = "collection_debug", since = "1.17.0")] -impl fmt::Debug for Drain<'_, T> { +impl fmt::Debug for Drain<'_, T, A> { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { f.debug_tuple("Drain").field(&self.iter.as_slice()).finish() } } -impl<'a, T> Drain<'a, T> { +impl<'a, T, A: AllocRef> Drain<'a, T, A> { /// Returns the remaining items of this iterator as a slice. /// /// # Examples @@ -3105,22 +3348,29 @@ impl<'a, T> Drain<'a, T> { pub fn as_slice(&self) -> &[T] { self.iter.as_slice() } + + /// Returns a reference to the underlying allocator. + #[unstable(feature = "allocator_api", issue = "32838")] + #[inline] + pub fn alloc_ref(&self) -> &A { + unsafe { self.vec.as_ref().alloc_ref() } + } } #[stable(feature = "vec_drain_as_slice", since = "1.46.0")] -impl<'a, T> AsRef<[T]> for Drain<'a, T> { +impl<'a, T, A: AllocRef> AsRef<[T]> for Drain<'a, T, A> { fn as_ref(&self) -> &[T] { self.as_slice() } } #[stable(feature = "drain", since = "1.6.0")] -unsafe impl Sync for Drain<'_, T> {} +unsafe impl Sync for Drain<'_, T, A> {} #[stable(feature = "drain", since = "1.6.0")] -unsafe impl Send for Drain<'_, T> {} +unsafe impl Send for Drain<'_, T, A> {} #[stable(feature = "drain", since = "1.6.0")] -impl Iterator for Drain<'_, T> { +impl Iterator for Drain<'_, T, A> { type Item = T; #[inline] @@ -3134,7 +3384,7 @@ impl Iterator for Drain<'_, T> { } #[stable(feature = "drain", since = "1.6.0")] -impl DoubleEndedIterator for Drain<'_, T> { +impl DoubleEndedIterator for Drain<'_, T, A> { #[inline] fn next_back(&mut self) -> Option { self.iter.next_back().map(|elt| unsafe { ptr::read(elt as *const _) }) @@ -3142,13 +3392,13 @@ impl DoubleEndedIterator for Drain<'_, T> { } #[stable(feature = "drain", since = "1.6.0")] -impl Drop for Drain<'_, T> { +impl Drop for Drain<'_, T, A> { fn drop(&mut self) { /// Continues dropping the remaining elements in the `Drain`, then moves back the /// un-`Drain`ed elements to restore the original `Vec`. - struct DropGuard<'r, 'a, T>(&'r mut Drain<'a, T>); + struct DropGuard<'r, 'a, T, A: AllocRef>(&'r mut Drain<'a, T, A>); - impl<'r, 'a, T> Drop for DropGuard<'r, 'a, T> { + impl<'r, 'a, T, A: AllocRef> Drop for DropGuard<'r, 'a, T, A> { fn drop(&mut self) { // Continue the same loop we have below. If the loop already finished, this does // nothing. @@ -3184,17 +3434,17 @@ impl Drop for Drain<'_, T> { } #[stable(feature = "drain", since = "1.6.0")] -impl ExactSizeIterator for Drain<'_, T> { +impl ExactSizeIterator for Drain<'_, T, A> { fn is_empty(&self) -> bool { self.iter.is_empty() } } #[unstable(feature = "trusted_len", issue = "37572")] -unsafe impl TrustedLen for Drain<'_, T> {} +unsafe impl TrustedLen for Drain<'_, T, A> {} #[stable(feature = "fused", since = "1.26.0")] -impl FusedIterator for Drain<'_, T> {} +impl FusedIterator for Drain<'_, T, A> {} /// A splicing iterator for `Vec`. /// @@ -3210,13 +3460,17 @@ impl FusedIterator for Drain<'_, T> {} /// ``` #[derive(Debug)] #[stable(feature = "vec_splice", since = "1.21.0")] -pub struct Splice<'a, I: Iterator + 'a> { - drain: Drain<'a, I::Item>, +pub struct Splice< + 'a, + I: Iterator + 'a, + #[unstable(feature = "allocator_api", issue = "32838")] A: AllocRef + 'a = Global, +> { + drain: Drain<'a, I::Item, A>, replace_with: I, } #[stable(feature = "vec_splice", since = "1.21.0")] -impl Iterator for Splice<'_, I> { +impl Iterator for Splice<'_, I, A> { type Item = I::Item; fn next(&mut self) -> Option { @@ -3229,17 +3483,17 @@ impl Iterator for Splice<'_, I> { } #[stable(feature = "vec_splice", since = "1.21.0")] -impl DoubleEndedIterator for Splice<'_, I> { +impl DoubleEndedIterator for Splice<'_, I, A> { fn next_back(&mut self) -> Option { self.drain.next_back() } } #[stable(feature = "vec_splice", since = "1.21.0")] -impl ExactSizeIterator for Splice<'_, I> {} +impl ExactSizeIterator for Splice<'_, I, A> {} #[stable(feature = "vec_splice", since = "1.21.0")] -impl Drop for Splice<'_, I> { +impl Drop for Splice<'_, I, A> { fn drop(&mut self) { self.drain.by_ref().for_each(drop); @@ -3280,7 +3534,7 @@ impl Drop for Splice<'_, I> { } /// Private helper methods for `Splice::drop` -impl Drain<'_, T> { +impl Drain<'_, T, A> { /// The range from `self.vec.len` to `self.tail_start` contains elements /// that have been moved out. /// Fill that range as much as possible with new elements from the `replace_with` iterator. @@ -3335,11 +3589,15 @@ impl Drain<'_, T> { /// ``` #[unstable(feature = "drain_filter", reason = "recently added", issue = "43244")] #[derive(Debug)] -pub struct DrainFilter<'a, T, F> -where +pub struct DrainFilter< + 'a, + T, + F, + #[unstable(feature = "allocator_api", issue = "32838")] A: AllocRef = Global, +> where F: FnMut(&mut T) -> bool, { - vec: &'a mut Vec, + vec: &'a mut Vec, /// The index of the item that will be inspected by the next call to `next`. idx: usize, /// The number of items that have been drained (removed) thus far. @@ -3356,8 +3614,20 @@ where panic_flag: bool, } +impl DrainFilter<'_, T, F, A> +where + F: FnMut(&mut T) -> bool, +{ + /// Returns a reference to the underlying allocator. + #[unstable(feature = "allocator_api", issue = "32838")] + #[inline] + pub fn alloc_ref(&self) -> &A { + self.vec.alloc_ref() + } +} + #[unstable(feature = "drain_filter", reason = "recently added", issue = "43244")] -impl Iterator for DrainFilter<'_, T, F> +impl Iterator for DrainFilter<'_, T, F, A> where F: FnMut(&mut T) -> bool, { @@ -3395,19 +3665,19 @@ where } #[unstable(feature = "drain_filter", reason = "recently added", issue = "43244")] -impl Drop for DrainFilter<'_, T, F> +impl Drop for DrainFilter<'_, T, F, A> where F: FnMut(&mut T) -> bool, { fn drop(&mut self) { - struct BackshiftOnDrop<'a, 'b, T, F> + struct BackshiftOnDrop<'a, 'b, T, F, A: AllocRef> where F: FnMut(&mut T) -> bool, { - drain: &'b mut DrainFilter<'a, T, F>, + drain: &'b mut DrainFilter<'a, T, F, A>, } - impl<'a, 'b, T, F> Drop for BackshiftOnDrop<'a, 'b, T, F> + impl<'a, 'b, T, F, A: AllocRef> Drop for BackshiftOnDrop<'a, 'b, T, F, A> where F: FnMut(&mut T) -> bool, { diff --git a/library/backtrace b/library/backtrace index 8b8ea53b56f..af078ecc0b0 160000 --- a/library/backtrace +++ b/library/backtrace @@ -1 +1 @@ -Subproject commit 8b8ea53b56f519dd7780defdd4254daaec892584 +Subproject commit af078ecc0b069ec594982f92d4c6c58af99efbb5 diff --git a/library/core/src/alloc/layout.rs b/library/core/src/alloc/layout.rs index a3fbed2ec12..339d85902b8 100644 --- a/library/core/src/alloc/layout.rs +++ b/library/core/src/alloc/layout.rs @@ -39,7 +39,7 @@ pub struct Layout { impl Layout { /// Constructs a `Layout` from a given `size` and `align`, - /// or returns `LayoutErr` if any of the following conditions + /// or returns `LayoutError` if any of the following conditions /// are not met: /// /// * `align` must not be zero, @@ -50,11 +50,11 @@ impl Layout { /// must not overflow (i.e., the rounded value must be less than /// or equal to `usize::MAX`). #[stable(feature = "alloc_layout", since = "1.28.0")] - #[rustc_const_unstable(feature = "const_alloc_layout", issue = "67521")] + #[rustc_const_stable(feature = "const_alloc_layout", since = "1.50.0")] #[inline] - pub const fn from_size_align(size: usize, align: usize) -> Result { + pub const fn from_size_align(size: usize, align: usize) -> Result { if !align.is_power_of_two() { - return Err(LayoutErr { private: () }); + return Err(LayoutError { private: () }); } // (power-of-two implies align != 0.) @@ -72,7 +72,7 @@ impl Layout { // Above implies that checking for summation overflow is both // necessary and sufficient. if size > usize::MAX - (align - 1) { - return Err(LayoutErr { private: () }); + return Err(LayoutError { private: () }); } // SAFETY: the conditions for `from_size_align_unchecked` have been @@ -96,7 +96,7 @@ impl Layout { /// The minimum size in bytes for a memory block of this layout. #[stable(feature = "alloc_layout", since = "1.28.0")] - #[rustc_const_unstable(feature = "const_alloc_layout", issue = "67521")] + #[rustc_const_stable(feature = "const_alloc_layout", since = "1.50.0")] #[inline] pub const fn size(&self) -> usize { self.size_ @@ -104,7 +104,7 @@ impl Layout { /// The minimum byte alignment for a memory block of this layout. #[stable(feature = "alloc_layout", since = "1.28.0")] - #[rustc_const_unstable(feature = "const_alloc_layout", issue = "67521")] + #[rustc_const_stable(feature = "const_alloc_layout", since = "1.50.0")] #[inline] pub const fn align(&self) -> usize { self.align_.get() @@ -200,7 +200,7 @@ impl Layout { /// `align` violates the conditions listed in [`Layout::from_size_align`]. #[stable(feature = "alloc_layout_manipulation", since = "1.44.0")] #[inline] - pub fn align_to(&self, align: usize) -> Result { + pub fn align_to(&self, align: usize) -> Result { Layout::from_size_align(self.size(), cmp::max(self.align(), align)) } @@ -274,16 +274,16 @@ impl Layout { /// layout of the array and `offs` is the distance between the start /// of each element in the array. /// - /// On arithmetic overflow, returns `LayoutErr`. + /// On arithmetic overflow, returns `LayoutError`. #[unstable(feature = "alloc_layout_extra", issue = "55724")] #[inline] - pub fn repeat(&self, n: usize) -> Result<(Self, usize), LayoutErr> { + pub fn repeat(&self, n: usize) -> Result<(Self, usize), LayoutError> { // This cannot overflow. Quoting from the invariant of Layout: // > `size`, when rounded up to the nearest multiple of `align`, // > must not overflow (i.e., the rounded value must be less than // > `usize::MAX`) let padded_size = self.size() + self.padding_needed_for(self.align()); - let alloc_size = padded_size.checked_mul(n).ok_or(LayoutErr { private: () })?; + let alloc_size = padded_size.checked_mul(n).ok_or(LayoutError { private: () })?; // SAFETY: self.align is already known to be valid and alloc_size has been // padded already. @@ -307,7 +307,7 @@ impl Layout { /// start of the `next` embedded within the concatenated record /// (assuming that the record itself starts at offset 0). /// - /// On arithmetic overflow, returns `LayoutErr`. + /// On arithmetic overflow, returns `LayoutError`. /// /// # Examples /// @@ -315,8 +315,8 @@ impl Layout { /// the fields from its fields' layouts: /// /// ```rust - /// # use std::alloc::{Layout, LayoutErr}; - /// pub fn repr_c(fields: &[Layout]) -> Result<(Layout, Vec), LayoutErr> { + /// # use std::alloc::{Layout, LayoutError}; + /// pub fn repr_c(fields: &[Layout]) -> Result<(Layout, Vec), LayoutError> { /// let mut offsets = Vec::new(); /// let mut layout = Layout::from_size_align(0, 1)?; /// for &field in fields { @@ -337,12 +337,12 @@ impl Layout { /// ``` #[stable(feature = "alloc_layout_manipulation", since = "1.44.0")] #[inline] - pub fn extend(&self, next: Self) -> Result<(Self, usize), LayoutErr> { + pub fn extend(&self, next: Self) -> Result<(Self, usize), LayoutError> { let new_align = cmp::max(self.align(), next.align()); let pad = self.padding_needed_for(next.align()); - let offset = self.size().checked_add(pad).ok_or(LayoutErr { private: () })?; - let new_size = offset.checked_add(next.size()).ok_or(LayoutErr { private: () })?; + let offset = self.size().checked_add(pad).ok_or(LayoutError { private: () })?; + let new_size = offset.checked_add(next.size()).ok_or(LayoutError { private: () })?; let layout = Layout::from_size_align(new_size, new_align)?; Ok((layout, offset)) @@ -359,11 +359,11 @@ impl Layout { /// guaranteed that all elements in the array will be properly /// aligned. /// - /// On arithmetic overflow, returns `LayoutErr`. + /// On arithmetic overflow, returns `LayoutError`. #[unstable(feature = "alloc_layout_extra", issue = "55724")] #[inline] - pub fn repeat_packed(&self, n: usize) -> Result { - let size = self.size().checked_mul(n).ok_or(LayoutErr { private: () })?; + pub fn repeat_packed(&self, n: usize) -> Result { + let size = self.size().checked_mul(n).ok_or(LayoutError { private: () })?; Layout::from_size_align(size, self.align()) } @@ -372,38 +372,46 @@ impl Layout { /// padding is inserted, the alignment of `next` is irrelevant, /// and is not incorporated *at all* into the resulting layout. /// - /// On arithmetic overflow, returns `LayoutErr`. + /// On arithmetic overflow, returns `LayoutError`. #[unstable(feature = "alloc_layout_extra", issue = "55724")] #[inline] - pub fn extend_packed(&self, next: Self) -> Result { - let new_size = self.size().checked_add(next.size()).ok_or(LayoutErr { private: () })?; + pub fn extend_packed(&self, next: Self) -> Result { + let new_size = self.size().checked_add(next.size()).ok_or(LayoutError { private: () })?; Layout::from_size_align(new_size, self.align()) } /// Creates a layout describing the record for a `[T; n]`. /// - /// On arithmetic overflow, returns `LayoutErr`. + /// On arithmetic overflow, returns `LayoutError`. #[stable(feature = "alloc_layout_manipulation", since = "1.44.0")] #[inline] - pub fn array(n: usize) -> Result { + pub fn array(n: usize) -> Result { let (layout, offset) = Layout::new::().repeat(n)?; debug_assert_eq!(offset, mem::size_of::()); Ok(layout.pad_to_align()) } } +#[stable(feature = "alloc_layout", since = "1.28.0")] +#[rustc_deprecated( + since = "1.51.0", + reason = "Name does not follow std convention, use LayoutError", + suggestion = "LayoutError" +)] +pub type LayoutErr = LayoutError; + /// The parameters given to `Layout::from_size_align` /// or some other `Layout` constructor /// do not satisfy its documented constraints. -#[stable(feature = "alloc_layout", since = "1.28.0")] +#[stable(feature = "alloc_layout_error", since = "1.49.0")] #[derive(Clone, PartialEq, Eq, Debug)] -pub struct LayoutErr { +pub struct LayoutError { private: (), } // (we need this for downstream impl of trait Error) #[stable(feature = "alloc_layout", since = "1.28.0")] -impl fmt::Display for LayoutErr { +impl fmt::Display for LayoutError { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { f.write_str("invalid parameters to Layout::from_size_align") } diff --git a/library/core/src/alloc/mod.rs b/library/core/src/alloc/mod.rs index c61c19cc7d1..bc874e2e522 100644 --- a/library/core/src/alloc/mod.rs +++ b/library/core/src/alloc/mod.rs @@ -8,7 +8,18 @@ mod layout; #[stable(feature = "global_alloc", since = "1.28.0")] pub use self::global::GlobalAlloc; #[stable(feature = "alloc_layout", since = "1.28.0")] -pub use self::layout::{Layout, LayoutErr}; +pub use self::layout::Layout; +#[stable(feature = "alloc_layout", since = "1.28.0")] +#[rustc_deprecated( + since = "1.51.0", + reason = "Name does not follow std convention, use LayoutError", + suggestion = "LayoutError" +)] +#[allow(deprecated, deprecated_in_future)] +pub use self::layout::LayoutErr; + +#[stable(feature = "alloc_layout_error", since = "1.49.0")] +pub use self::layout::LayoutError; use crate::fmt; use crate::ptr::{self, NonNull}; diff --git a/library/core/src/array/iter.rs b/library/core/src/array/iter.rs index cafb002c01a..706f865b4d1 100644 --- a/library/core/src/array/iter.rs +++ b/library/core/src/array/iter.rs @@ -69,7 +69,8 @@ impl IntoIter { /// Returns an immutable slice of all elements that have not been yielded /// yet. - fn as_slice(&self) -> &[T] { + #[unstable(feature = "array_value_iter_slice", issue = "65798")] + pub fn as_slice(&self) -> &[T] { // SAFETY: We know that all elements within `alive` are properly initialized. unsafe { let slice = self.data.get_unchecked(self.alive.clone()); @@ -78,7 +79,8 @@ impl IntoIter { } /// Returns a mutable slice of all elements that have not been yielded yet. - fn as_mut_slice(&mut self) -> &mut [T] { + #[unstable(feature = "array_value_iter_slice", issue = "65798")] + pub fn as_mut_slice(&mut self) -> &mut [T] { // SAFETY: We know that all elements within `alive` are properly initialized. unsafe { let slice = self.data.get_unchecked_mut(self.alive.clone()); diff --git a/library/core/src/array/mod.rs b/library/core/src/array/mod.rs index 123a191dd2c..a7cb1023229 100644 --- a/library/core/src/array/mod.rs +++ b/library/core/src/array/mod.rs @@ -12,6 +12,7 @@ use crate::convert::{Infallible, TryFrom}; use crate::fmt; use crate::hash::{self, Hash}; use crate::marker::Unsize; +use crate::ops::{Index, IndexMut}; use crate::slice::{Iter, IterMut}; mod iter; @@ -208,6 +209,30 @@ impl<'a, T, const N: usize> IntoIterator for &'a mut [T; N] { } } +#[stable(feature = "index_trait_on_arrays", since = "1.50.0")] +impl Index for [T; N] +where + [T]: Index, +{ + type Output = <[T] as Index>::Output; + + #[inline] + fn index(&self, index: I) -> &Self::Output { + Index::index(self as &[T], index) + } +} + +#[stable(feature = "index_trait_on_arrays", since = "1.50.0")] +impl IndexMut for [T; N] +where + [T]: IndexMut, +{ + #[inline] + fn index_mut(&mut self, index: I) -> &mut Self::Output { + IndexMut::index_mut(self as &mut [T], index) + } +} + #[stable(feature = "rust1", since = "1.0.0")] impl PartialEq<[B; N]> for [A; N] where @@ -254,22 +279,22 @@ where } #[stable(feature = "rust1", since = "1.0.0")] -impl<'b, A, B, const N: usize> PartialEq<&'b [B]> for [A; N] +impl PartialEq<&[B]> for [A; N] where A: PartialEq, { #[inline] - fn eq(&self, other: &&'b [B]) -> bool { + fn eq(&self, other: &&[B]) -> bool { self[..] == other[..] } #[inline] - fn ne(&self, other: &&'b [B]) -> bool { + fn ne(&self, other: &&[B]) -> bool { self[..] != other[..] } } #[stable(feature = "rust1", since = "1.0.0")] -impl<'b, A, B, const N: usize> PartialEq<[A; N]> for &'b [B] +impl PartialEq<[A; N]> for &[B] where B: PartialEq, { @@ -284,22 +309,22 @@ where } #[stable(feature = "rust1", since = "1.0.0")] -impl<'b, A, B, const N: usize> PartialEq<&'b mut [B]> for [A; N] +impl PartialEq<&mut [B]> for [A; N] where A: PartialEq, { #[inline] - fn eq(&self, other: &&'b mut [B]) -> bool { + fn eq(&self, other: &&mut [B]) -> bool { self[..] == other[..] } #[inline] - fn ne(&self, other: &&'b mut [B]) -> bool { + fn ne(&self, other: &&mut [B]) -> bool { self[..] != other[..] } } #[stable(feature = "rust1", since = "1.0.0")] -impl<'b, A, B, const N: usize> PartialEq<[A; N]> for &'b mut [B] +impl PartialEq<[A; N]> for &mut [B] where B: PartialEq, { diff --git a/library/core/src/bool.rs b/library/core/src/bool.rs index 6e0865e8653..00164c631b3 100644 --- a/library/core/src/bool.rs +++ b/library/core/src/bool.rs @@ -23,12 +23,10 @@ impl bool { /// # Examples /// /// ``` - /// #![feature(bool_to_option)] - /// /// assert_eq!(false.then(|| 0), None); /// assert_eq!(true.then(|| 0), Some(0)); /// ``` - #[unstable(feature = "bool_to_option", issue = "64260")] + #[stable(feature = "lazy_bool_to_option", since = "1.50.0")] #[inline] pub fn then T>(self, f: F) -> Option { if self { Some(f()) } else { None } diff --git a/library/core/src/cell.rs b/library/core/src/cell.rs index b2afb702eeb..e1b6307613b 100644 --- a/library/core/src/cell.rs +++ b/library/core/src/cell.rs @@ -1027,7 +1027,6 @@ impl RefCell { /// # Examples /// /// ``` - /// #![feature(refcell_take)] /// use std::cell::RefCell; /// /// let c = RefCell::new(5); @@ -1036,7 +1035,7 @@ impl RefCell { /// assert_eq!(five, 5); /// assert_eq!(c.into_inner(), 0); /// ``` - #[unstable(feature = "refcell_take", issue = "71395")] + #[stable(feature = "refcell_take", since = "1.50.0")] pub fn take(&self) -> T { self.replace(Default::default()) } diff --git a/library/core/src/cmp.rs b/library/core/src/cmp.rs index bbb3a3dea43..47ae1a64190 100644 --- a/library/core/src/cmp.rs +++ b/library/core/src/cmp.rs @@ -641,14 +641,12 @@ pub trait Ord: Eq + PartialOrd { /// # Examples /// /// ``` - /// #![feature(clamp)] - /// /// assert!((-3).clamp(-2, 1) == -2); /// assert!(0.clamp(-2, 1) == 0); /// assert!(2.clamp(-2, 1) == 1); /// ``` #[must_use] - #[unstable(feature = "clamp", issue = "44095")] + #[stable(feature = "clamp", since = "1.50.0")] fn clamp(self, min: Self, max: Self) -> Self where Self: Sized, diff --git a/library/core/src/intrinsics.rs b/library/core/src/intrinsics.rs index 0e3129607a6..433f0129306 100644 --- a/library/core/src/intrinsics.rs +++ b/library/core/src/intrinsics.rs @@ -788,7 +788,7 @@ extern "rust-intrinsic" { /// The size of the referenced value in bytes. /// - /// The stabilized version of this intrinsic is [`size_of_val`]. + /// The stabilized version of this intrinsic is [`mem::size_of_val`]. #[rustc_const_unstable(feature = "const_size_of_val", issue = "46571")] pub fn size_of_val(_: *const T) -> usize; /// The required alignment of the referenced value. @@ -1704,7 +1704,7 @@ extern "rust-intrinsic" { /// Returns the number of variants of the type `T` cast to a `usize`; /// if `T` has no variants, returns 0. Uninhabited variants will be counted. /// - /// The to-be-stabilized version of this intrinsic is [`variant_count`]. + /// The to-be-stabilized version of this intrinsic is [`mem::variant_count`]. #[rustc_const_unstable(feature = "variant_count", issue = "73662")] pub fn variant_count() -> usize; diff --git a/library/core/src/iter/adapters/chain.rs b/library/core/src/iter/adapters/chain.rs index 2e070d71224..9753e1b43ba 100644 --- a/library/core/src/iter/adapters/chain.rs +++ b/library/core/src/iter/adapters/chain.rs @@ -1,6 +1,5 @@ use crate::iter::{DoubleEndedIterator, FusedIterator, Iterator, TrustedLen}; -use crate::ops::Try; -use crate::usize; +use crate::{ops::Try, usize}; /// An iterator that links two iterators together, in a chain. /// diff --git a/library/core/src/iter/adapters/cloned.rs b/library/core/src/iter/adapters/cloned.rs new file mode 100644 index 00000000000..7da47dcd2d1 --- /dev/null +++ b/library/core/src/iter/adapters/cloned.rs @@ -0,0 +1,139 @@ +use crate::iter::adapters::{zip::try_get_unchecked, TrustedRandomAccess}; +use crate::iter::{FusedIterator, TrustedLen}; +use crate::ops::Try; + +/// An iterator that clones the elements of an underlying iterator. +/// +/// This `struct` is created by the [`cloned`] method on [`Iterator`]. See its +/// documentation for more. +/// +/// [`cloned`]: Iterator::cloned +/// [`Iterator`]: trait.Iterator.html +#[stable(feature = "iter_cloned", since = "1.1.0")] +#[must_use = "iterators are lazy and do nothing unless consumed"] +#[derive(Clone, Debug)] +pub struct Cloned { + it: I, +} + +impl Cloned { + pub(in crate::iter) fn new(it: I) -> Cloned { + Cloned { it } + } +} + +fn clone_try_fold(mut f: impl FnMut(Acc, T) -> R) -> impl FnMut(Acc, &T) -> R { + move |acc, elt| f(acc, elt.clone()) +} + +#[stable(feature = "iter_cloned", since = "1.1.0")] +impl<'a, I, T: 'a> Iterator for Cloned +where + I: Iterator, + T: Clone, +{ + type Item = T; + + fn next(&mut self) -> Option { + self.it.next().cloned() + } + + fn size_hint(&self) -> (usize, Option) { + self.it.size_hint() + } + + fn try_fold(&mut self, init: B, f: F) -> R + where + Self: Sized, + F: FnMut(B, Self::Item) -> R, + R: Try, + { + self.it.try_fold(init, clone_try_fold(f)) + } + + fn fold(self, init: Acc, f: F) -> Acc + where + F: FnMut(Acc, Self::Item) -> Acc, + { + self.it.map(T::clone).fold(init, f) + } + + unsafe fn __iterator_get_unchecked(&mut self, idx: usize) -> T + where + Self: TrustedRandomAccess, + { + // SAFETY: the caller must uphold the contract for + // `Iterator::__iterator_get_unchecked`. + unsafe { try_get_unchecked(&mut self.it, idx).clone() } + } +} + +#[stable(feature = "iter_cloned", since = "1.1.0")] +impl<'a, I, T: 'a> DoubleEndedIterator for Cloned +where + I: DoubleEndedIterator, + T: Clone, +{ + fn next_back(&mut self) -> Option { + self.it.next_back().cloned() + } + + fn try_rfold(&mut self, init: B, f: F) -> R + where + Self: Sized, + F: FnMut(B, Self::Item) -> R, + R: Try, + { + self.it.try_rfold(init, clone_try_fold(f)) + } + + fn rfold(self, init: Acc, f: F) -> Acc + where + F: FnMut(Acc, Self::Item) -> Acc, + { + self.it.map(T::clone).rfold(init, f) + } +} + +#[stable(feature = "iter_cloned", since = "1.1.0")] +impl<'a, I, T: 'a> ExactSizeIterator for Cloned +where + I: ExactSizeIterator, + T: Clone, +{ + fn len(&self) -> usize { + self.it.len() + } + + fn is_empty(&self) -> bool { + self.it.is_empty() + } +} + +#[stable(feature = "fused", since = "1.26.0")] +impl<'a, I, T: 'a> FusedIterator for Cloned +where + I: FusedIterator, + T: Clone, +{ +} + +#[doc(hidden)] +#[unstable(feature = "trusted_random_access", issue = "none")] +unsafe impl TrustedRandomAccess for Cloned +where + I: TrustedRandomAccess, +{ + #[inline] + fn may_have_side_effect() -> bool { + true + } +} + +#[unstable(feature = "trusted_len", issue = "37572")] +unsafe impl<'a, I, T: 'a> TrustedLen for Cloned +where + I: TrustedLen, + T: Clone, +{ +} diff --git a/library/core/src/iter/adapters/copied.rs b/library/core/src/iter/adapters/copied.rs new file mode 100644 index 00000000000..46f22354111 --- /dev/null +++ b/library/core/src/iter/adapters/copied.rs @@ -0,0 +1,155 @@ +use crate::iter::adapters::{zip::try_get_unchecked, TrustedRandomAccess}; +use crate::iter::{FusedIterator, TrustedLen}; +use crate::ops::Try; + +/// An iterator that copies the elements of an underlying iterator. +/// +/// This `struct` is created by the [`copied`] method on [`Iterator`]. See its +/// documentation for more. +/// +/// [`copied`]: Iterator::copied +/// [`Iterator`]: trait.Iterator.html +#[stable(feature = "iter_copied", since = "1.36.0")] +#[must_use = "iterators are lazy and do nothing unless consumed"] +#[derive(Clone, Debug)] +pub struct Copied { + it: I, +} + +impl Copied { + pub(in crate::iter) fn new(it: I) -> Copied { + Copied { it } + } +} + +fn copy_fold(mut f: impl FnMut(Acc, T) -> Acc) -> impl FnMut(Acc, &T) -> Acc { + move |acc, &elt| f(acc, elt) +} + +fn copy_try_fold(mut f: impl FnMut(Acc, T) -> R) -> impl FnMut(Acc, &T) -> R { + move |acc, &elt| f(acc, elt) +} + +#[stable(feature = "iter_copied", since = "1.36.0")] +impl<'a, I, T: 'a> Iterator for Copied +where + I: Iterator, + T: Copy, +{ + type Item = T; + + fn next(&mut self) -> Option { + self.it.next().copied() + } + + fn size_hint(&self) -> (usize, Option) { + self.it.size_hint() + } + + fn try_fold(&mut self, init: B, f: F) -> R + where + Self: Sized, + F: FnMut(B, Self::Item) -> R, + R: Try, + { + self.it.try_fold(init, copy_try_fold(f)) + } + + fn fold(self, init: Acc, f: F) -> Acc + where + F: FnMut(Acc, Self::Item) -> Acc, + { + self.it.fold(init, copy_fold(f)) + } + + fn nth(&mut self, n: usize) -> Option { + self.it.nth(n).copied() + } + + fn last(self) -> Option { + self.it.last().copied() + } + + fn count(self) -> usize { + self.it.count() + } + + unsafe fn __iterator_get_unchecked(&mut self, idx: usize) -> T + where + Self: TrustedRandomAccess, + { + // SAFETY: the caller must uphold the contract for + // `Iterator::__iterator_get_unchecked`. + *unsafe { try_get_unchecked(&mut self.it, idx) } + } +} + +#[stable(feature = "iter_copied", since = "1.36.0")] +impl<'a, I, T: 'a> DoubleEndedIterator for Copied +where + I: DoubleEndedIterator, + T: Copy, +{ + fn next_back(&mut self) -> Option { + self.it.next_back().copied() + } + + fn try_rfold(&mut self, init: B, f: F) -> R + where + Self: Sized, + F: FnMut(B, Self::Item) -> R, + R: Try, + { + self.it.try_rfold(init, copy_try_fold(f)) + } + + fn rfold(self, init: Acc, f: F) -> Acc + where + F: FnMut(Acc, Self::Item) -> Acc, + { + self.it.rfold(init, copy_fold(f)) + } +} + +#[stable(feature = "iter_copied", since = "1.36.0")] +impl<'a, I, T: 'a> ExactSizeIterator for Copied +where + I: ExactSizeIterator, + T: Copy, +{ + fn len(&self) -> usize { + self.it.len() + } + + fn is_empty(&self) -> bool { + self.it.is_empty() + } +} + +#[stable(feature = "iter_copied", since = "1.36.0")] +impl<'a, I, T: 'a> FusedIterator for Copied +where + I: FusedIterator, + T: Copy, +{ +} + +#[doc(hidden)] +#[unstable(feature = "trusted_random_access", issue = "none")] +unsafe impl TrustedRandomAccess for Copied +where + I: TrustedRandomAccess, +{ + #[inline] + fn may_have_side_effect() -> bool { + I::may_have_side_effect() + } +} + +#[stable(feature = "iter_copied", since = "1.36.0")] +unsafe impl<'a, I, T: 'a> TrustedLen for Copied +where + I: TrustedLen, + T: Copy, +{ +} diff --git a/library/core/src/iter/adapters/cycle.rs b/library/core/src/iter/adapters/cycle.rs new file mode 100644 index 00000000000..6e9a011f819 --- /dev/null +++ b/library/core/src/iter/adapters/cycle.rs @@ -0,0 +1,87 @@ +use crate::{iter::FusedIterator, ops::Try}; + +/// An iterator that repeats endlessly. +/// +/// This `struct` is created by the [`cycle`] method on [`Iterator`]. See its +/// documentation for more. +/// +/// [`cycle`]: Iterator::cycle +/// [`Iterator`]: trait.Iterator.html +#[derive(Clone, Debug)] +#[must_use = "iterators are lazy and do nothing unless consumed"] +#[stable(feature = "rust1", since = "1.0.0")] +pub struct Cycle { + orig: I, + iter: I, +} + +impl Cycle { + pub(in crate::iter) fn new(iter: I) -> Cycle { + Cycle { orig: iter.clone(), iter } + } +} + +#[stable(feature = "rust1", since = "1.0.0")] +impl Iterator for Cycle +where + I: Clone + Iterator, +{ + type Item = ::Item; + + #[inline] + fn next(&mut self) -> Option<::Item> { + match self.iter.next() { + None => { + self.iter = self.orig.clone(); + self.iter.next() + } + y => y, + } + } + + #[inline] + fn size_hint(&self) -> (usize, Option) { + // the cycle iterator is either empty or infinite + match self.orig.size_hint() { + sz @ (0, Some(0)) => sz, + (0, _) => (0, None), + _ => (usize::MAX, None), + } + } + + #[inline] + fn try_fold(&mut self, mut acc: Acc, mut f: F) -> R + where + F: FnMut(Acc, Self::Item) -> R, + R: Try, + { + // fully iterate the current iterator. this is necessary because + // `self.iter` may be empty even when `self.orig` isn't + acc = self.iter.try_fold(acc, &mut f)?; + self.iter = self.orig.clone(); + + // complete a full cycle, keeping track of whether the cycled + // iterator is empty or not. we need to return early in case + // of an empty iterator to prevent an infinite loop + let mut is_empty = true; + acc = self.iter.try_fold(acc, |acc, x| { + is_empty = false; + f(acc, x) + })?; + + if is_empty { + return try { acc }; + } + + loop { + self.iter = self.orig.clone(); + acc = self.iter.try_fold(acc, &mut f)?; + } + } + + // No `fold` override, because `fold` doesn't make much sense for `Cycle`, + // and we can't do anything better than the default. +} + +#[stable(feature = "fused", since = "1.26.0")] +impl FusedIterator for Cycle where I: Clone + Iterator {} diff --git a/library/core/src/iter/adapters/enumerate.rs b/library/core/src/iter/adapters/enumerate.rs new file mode 100644 index 00000000000..5978c2da98c --- /dev/null +++ b/library/core/src/iter/adapters/enumerate.rs @@ -0,0 +1,238 @@ +use crate::iter::adapters::{zip::try_get_unchecked, SourceIter, TrustedRandomAccess}; +use crate::iter::{FusedIterator, InPlaceIterable, TrustedLen}; +use crate::ops::{Add, AddAssign, Try}; + +/// An iterator that yields the current count and the element during iteration. +/// +/// This `struct` is created by the [`enumerate`] method on [`Iterator`]. See its +/// documentation for more. +/// +/// [`enumerate`]: Iterator::enumerate +/// [`Iterator`]: trait.Iterator.html +#[derive(Clone, Debug)] +#[must_use = "iterators are lazy and do nothing unless consumed"] +#[stable(feature = "rust1", since = "1.0.0")] +pub struct Enumerate { + iter: I, + count: usize, +} +impl Enumerate { + pub(in crate::iter) fn new(iter: I) -> Enumerate { + Enumerate { iter, count: 0 } + } +} + +#[stable(feature = "rust1", since = "1.0.0")] +impl Iterator for Enumerate +where + I: Iterator, +{ + type Item = (usize, ::Item); + + /// # Overflow Behavior + /// + /// The method does no guarding against overflows, so enumerating more than + /// `usize::MAX` elements either produces the wrong result or panics. If + /// debug assertions are enabled, a panic is guaranteed. + /// + /// # Panics + /// + /// Might panic if the index of the element overflows a `usize`. + #[inline] + fn next(&mut self) -> Option<(usize, ::Item)> { + let a = self.iter.next()?; + let i = self.count; + // Possible undefined overflow. + AddAssign::add_assign(&mut self.count, 1); + Some((i, a)) + } + + #[inline] + fn size_hint(&self) -> (usize, Option) { + self.iter.size_hint() + } + + #[inline] + fn nth(&mut self, n: usize) -> Option<(usize, I::Item)> { + let a = self.iter.nth(n)?; + // Possible undefined overflow. + let i = Add::add(self.count, n); + self.count = Add::add(i, 1); + Some((i, a)) + } + + #[inline] + fn count(self) -> usize { + self.iter.count() + } + + #[inline] + fn try_fold(&mut self, init: Acc, fold: Fold) -> R + where + Self: Sized, + Fold: FnMut(Acc, Self::Item) -> R, + R: Try, + { + #[inline] + fn enumerate<'a, T, Acc, R>( + count: &'a mut usize, + mut fold: impl FnMut(Acc, (usize, T)) -> R + 'a, + ) -> impl FnMut(Acc, T) -> R + 'a { + move |acc, item| { + let acc = fold(acc, (*count, item)); + // Possible undefined overflow. + AddAssign::add_assign(count, 1); + acc + } + } + + self.iter.try_fold(init, enumerate(&mut self.count, fold)) + } + + #[inline] + fn fold(self, init: Acc, fold: Fold) -> Acc + where + Fold: FnMut(Acc, Self::Item) -> Acc, + { + #[inline] + fn enumerate( + mut count: usize, + mut fold: impl FnMut(Acc, (usize, T)) -> Acc, + ) -> impl FnMut(Acc, T) -> Acc { + move |acc, item| { + let acc = fold(acc, (count, item)); + // Possible undefined overflow. + AddAssign::add_assign(&mut count, 1); + acc + } + } + + self.iter.fold(init, enumerate(self.count, fold)) + } + + unsafe fn __iterator_get_unchecked(&mut self, idx: usize) -> ::Item + where + Self: TrustedRandomAccess, + { + // SAFETY: the caller must uphold the contract for + // `Iterator::__iterator_get_unchecked`. + let value = unsafe { try_get_unchecked(&mut self.iter, idx) }; + (Add::add(self.count, idx), value) + } +} + +#[stable(feature = "rust1", since = "1.0.0")] +impl DoubleEndedIterator for Enumerate +where + I: ExactSizeIterator + DoubleEndedIterator, +{ + #[inline] + fn next_back(&mut self) -> Option<(usize, ::Item)> { + let a = self.iter.next_back()?; + let len = self.iter.len(); + // Can safely add, `ExactSizeIterator` promises that the number of + // elements fits into a `usize`. + Some((self.count + len, a)) + } + + #[inline] + fn nth_back(&mut self, n: usize) -> Option<(usize, ::Item)> { + let a = self.iter.nth_back(n)?; + let len = self.iter.len(); + // Can safely add, `ExactSizeIterator` promises that the number of + // elements fits into a `usize`. + Some((self.count + len, a)) + } + + #[inline] + fn try_rfold(&mut self, init: Acc, fold: Fold) -> R + where + Self: Sized, + Fold: FnMut(Acc, Self::Item) -> R, + R: Try, + { + // Can safely add and subtract the count, as `ExactSizeIterator` promises + // that the number of elements fits into a `usize`. + fn enumerate( + mut count: usize, + mut fold: impl FnMut(Acc, (usize, T)) -> R, + ) -> impl FnMut(Acc, T) -> R { + move |acc, item| { + count -= 1; + fold(acc, (count, item)) + } + } + + let count = self.count + self.iter.len(); + self.iter.try_rfold(init, enumerate(count, fold)) + } + + #[inline] + fn rfold(self, init: Acc, fold: Fold) -> Acc + where + Fold: FnMut(Acc, Self::Item) -> Acc, + { + // Can safely add and subtract the count, as `ExactSizeIterator` promises + // that the number of elements fits into a `usize`. + fn enumerate( + mut count: usize, + mut fold: impl FnMut(Acc, (usize, T)) -> Acc, + ) -> impl FnMut(Acc, T) -> Acc { + move |acc, item| { + count -= 1; + fold(acc, (count, item)) + } + } + + let count = self.count + self.iter.len(); + self.iter.rfold(init, enumerate(count, fold)) + } +} + +#[stable(feature = "rust1", since = "1.0.0")] +impl ExactSizeIterator for Enumerate +where + I: ExactSizeIterator, +{ + fn len(&self) -> usize { + self.iter.len() + } + + fn is_empty(&self) -> bool { + self.iter.is_empty() + } +} + +#[doc(hidden)] +#[unstable(feature = "trusted_random_access", issue = "none")] +unsafe impl TrustedRandomAccess for Enumerate +where + I: TrustedRandomAccess, +{ + fn may_have_side_effect() -> bool { + I::may_have_side_effect() + } +} + +#[stable(feature = "fused", since = "1.26.0")] +impl FusedIterator for Enumerate where I: FusedIterator {} + +#[unstable(feature = "trusted_len", issue = "37572")] +unsafe impl TrustedLen for Enumerate where I: TrustedLen {} + +#[unstable(issue = "none", feature = "inplace_iteration")] +unsafe impl SourceIter for Enumerate +where + I: SourceIter, +{ + type Source = S; + + #[inline] + unsafe fn as_inner(&mut self) -> &mut S { + // SAFETY: unsafe function forwarding to unsafe function with the same requirements + unsafe { SourceIter::as_inner(&mut self.iter) } + } +} + +#[unstable(issue = "none", feature = "inplace_iteration")] +unsafe impl InPlaceIterable for Enumerate {} diff --git a/library/core/src/iter/adapters/filter.rs b/library/core/src/iter/adapters/filter.rs new file mode 100644 index 00000000000..f8d684fcdda --- /dev/null +++ b/library/core/src/iter/adapters/filter.rs @@ -0,0 +1,152 @@ +use crate::fmt; +use crate::iter::{adapters::SourceIter, FusedIterator, InPlaceIterable}; +use crate::ops::Try; + +/// An iterator that filters the elements of `iter` with `predicate`. +/// +/// This `struct` is created by the [`filter`] method on [`Iterator`]. See its +/// documentation for more. +/// +/// [`filter`]: Iterator::filter +/// [`Iterator`]: trait.Iterator.html +#[must_use = "iterators are lazy and do nothing unless consumed"] +#[stable(feature = "rust1", since = "1.0.0")] +#[derive(Clone)] +pub struct Filter { + iter: I, + predicate: P, +} +impl Filter { + pub(in crate::iter) fn new(iter: I, predicate: P) -> Filter { + Filter { iter, predicate } + } +} + +#[stable(feature = "core_impl_debug", since = "1.9.0")] +impl fmt::Debug for Filter { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + f.debug_struct("Filter").field("iter", &self.iter).finish() + } +} + +fn filter_fold( + mut predicate: impl FnMut(&T) -> bool, + mut fold: impl FnMut(Acc, T) -> Acc, +) -> impl FnMut(Acc, T) -> Acc { + move |acc, item| if predicate(&item) { fold(acc, item) } else { acc } +} + +fn filter_try_fold<'a, T, Acc, R: Try>( + predicate: &'a mut impl FnMut(&T) -> bool, + mut fold: impl FnMut(Acc, T) -> R + 'a, +) -> impl FnMut(Acc, T) -> R + 'a { + move |acc, item| if predicate(&item) { fold(acc, item) } else { try { acc } } +} + +#[stable(feature = "rust1", since = "1.0.0")] +impl Iterator for Filter +where + P: FnMut(&I::Item) -> bool, +{ + type Item = I::Item; + + #[inline] + fn next(&mut self) -> Option { + self.iter.find(&mut self.predicate) + } + + #[inline] + fn size_hint(&self) -> (usize, Option) { + let (_, upper) = self.iter.size_hint(); + (0, upper) // can't know a lower bound, due to the predicate + } + + // this special case allows the compiler to make `.filter(_).count()` + // branchless. Barring perfect branch prediction (which is unattainable in + // the general case), this will be much faster in >90% of cases (containing + // virtually all real workloads) and only a tiny bit slower in the rest. + // + // Having this specialization thus allows us to write `.filter(p).count()` + // where we would otherwise write `.map(|x| p(x) as usize).sum()`, which is + // less readable and also less backwards-compatible to Rust before 1.10. + // + // Using the branchless version will also simplify the LLVM byte code, thus + // leaving more budget for LLVM optimizations. + #[inline] + fn count(self) -> usize { + #[inline] + fn to_usize(mut predicate: impl FnMut(&T) -> bool) -> impl FnMut(T) -> usize { + move |x| predicate(&x) as usize + } + + self.iter.map(to_usize(self.predicate)).sum() + } + + #[inline] + fn try_fold(&mut self, init: Acc, fold: Fold) -> R + where + Self: Sized, + Fold: FnMut(Acc, Self::Item) -> R, + R: Try, + { + self.iter.try_fold(init, filter_try_fold(&mut self.predicate, fold)) + } + + #[inline] + fn fold(self, init: Acc, fold: Fold) -> Acc + where + Fold: FnMut(Acc, Self::Item) -> Acc, + { + self.iter.fold(init, filter_fold(self.predicate, fold)) + } +} + +#[stable(feature = "rust1", since = "1.0.0")] +impl DoubleEndedIterator for Filter +where + P: FnMut(&I::Item) -> bool, +{ + #[inline] + fn next_back(&mut self) -> Option { + self.iter.rfind(&mut self.predicate) + } + + #[inline] + fn try_rfold(&mut self, init: Acc, fold: Fold) -> R + where + Self: Sized, + Fold: FnMut(Acc, Self::Item) -> R, + R: Try, + { + self.iter.try_rfold(init, filter_try_fold(&mut self.predicate, fold)) + } + + #[inline] + fn rfold(self, init: Acc, fold: Fold) -> Acc + where + Fold: FnMut(Acc, Self::Item) -> Acc, + { + self.iter.rfold(init, filter_fold(self.predicate, fold)) + } +} + +#[stable(feature = "fused", since = "1.26.0")] +impl FusedIterator for Filter where P: FnMut(&I::Item) -> bool {} + +#[unstable(issue = "none", feature = "inplace_iteration")] +unsafe impl SourceIter for Filter +where + P: FnMut(&I::Item) -> bool, + I: SourceIter, +{ + type Source = S; + + #[inline] + unsafe fn as_inner(&mut self) -> &mut S { + // SAFETY: unsafe function forwarding to unsafe function with the same requirements + unsafe { SourceIter::as_inner(&mut self.iter) } + } +} + +#[unstable(issue = "none", feature = "inplace_iteration")] +unsafe impl InPlaceIterable for Filter where P: FnMut(&I::Item) -> bool {} diff --git a/library/core/src/iter/adapters/filter_map.rs b/library/core/src/iter/adapters/filter_map.rs new file mode 100644 index 00000000000..0dccf2c533b --- /dev/null +++ b/library/core/src/iter/adapters/filter_map.rs @@ -0,0 +1,150 @@ +use crate::fmt; +use crate::iter::{adapters::SourceIter, FusedIterator, InPlaceIterable}; +use crate::ops::{ControlFlow, Try}; + +/// An iterator that uses `f` to both filter and map elements from `iter`. +/// +/// This `struct` is created by the [`filter_map`] method on [`Iterator`]. See its +/// documentation for more. +/// +/// [`filter_map`]: Iterator::filter_map +/// [`Iterator`]: trait.Iterator.html +#[must_use = "iterators are lazy and do nothing unless consumed"] +#[stable(feature = "rust1", since = "1.0.0")] +#[derive(Clone)] +pub struct FilterMap { + iter: I, + f: F, +} +impl FilterMap { + pub(in crate::iter) fn new(iter: I, f: F) -> FilterMap { + FilterMap { iter, f } + } +} + +#[stable(feature = "core_impl_debug", since = "1.9.0")] +impl fmt::Debug for FilterMap { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + f.debug_struct("FilterMap").field("iter", &self.iter).finish() + } +} + +fn filter_map_fold( + mut f: impl FnMut(T) -> Option, + mut fold: impl FnMut(Acc, B) -> Acc, +) -> impl FnMut(Acc, T) -> Acc { + move |acc, item| match f(item) { + Some(x) => fold(acc, x), + None => acc, + } +} + +fn filter_map_try_fold<'a, T, B, Acc, R: Try>( + f: &'a mut impl FnMut(T) -> Option, + mut fold: impl FnMut(Acc, B) -> R + 'a, +) -> impl FnMut(Acc, T) -> R + 'a { + move |acc, item| match f(item) { + Some(x) => fold(acc, x), + None => try { acc }, + } +} + +#[stable(feature = "rust1", since = "1.0.0")] +impl Iterator for FilterMap +where + F: FnMut(I::Item) -> Option, +{ + type Item = B; + + #[inline] + fn next(&mut self) -> Option { + self.iter.find_map(&mut self.f) + } + + #[inline] + fn size_hint(&self) -> (usize, Option) { + let (_, upper) = self.iter.size_hint(); + (0, upper) // can't know a lower bound, due to the predicate + } + + #[inline] + fn try_fold(&mut self, init: Acc, fold: Fold) -> R + where + Self: Sized, + Fold: FnMut(Acc, Self::Item) -> R, + R: Try, + { + self.iter.try_fold(init, filter_map_try_fold(&mut self.f, fold)) + } + + #[inline] + fn fold(self, init: Acc, fold: Fold) -> Acc + where + Fold: FnMut(Acc, Self::Item) -> Acc, + { + self.iter.fold(init, filter_map_fold(self.f, fold)) + } +} + +#[stable(feature = "rust1", since = "1.0.0")] +impl DoubleEndedIterator for FilterMap +where + F: FnMut(I::Item) -> Option, +{ + #[inline] + fn next_back(&mut self) -> Option { + #[inline] + fn find( + f: &mut impl FnMut(T) -> Option, + ) -> impl FnMut((), T) -> ControlFlow + '_ { + move |(), x| match f(x) { + Some(x) => ControlFlow::Break(x), + None => ControlFlow::CONTINUE, + } + } + + self.iter.try_rfold((), find(&mut self.f)).break_value() + } + + #[inline] + fn try_rfold(&mut self, init: Acc, fold: Fold) -> R + where + Self: Sized, + Fold: FnMut(Acc, Self::Item) -> R, + R: Try, + { + self.iter.try_rfold(init, filter_map_try_fold(&mut self.f, fold)) + } + + #[inline] + fn rfold(self, init: Acc, fold: Fold) -> Acc + where + Fold: FnMut(Acc, Self::Item) -> Acc, + { + self.iter.rfold(init, filter_map_fold(self.f, fold)) + } +} + +#[stable(feature = "fused", since = "1.26.0")] +impl FusedIterator for FilterMap where F: FnMut(I::Item) -> Option {} + +#[unstable(issue = "none", feature = "inplace_iteration")] +unsafe impl SourceIter for FilterMap +where + F: FnMut(I::Item) -> Option, + I: SourceIter, +{ + type Source = S; + + #[inline] + unsafe fn as_inner(&mut self) -> &mut S { + // SAFETY: unsafe function forwarding to unsafe function with the same requirements + unsafe { SourceIter::as_inner(&mut self.iter) } + } +} + +#[unstable(issue = "none", feature = "inplace_iteration")] +unsafe impl InPlaceIterable for FilterMap where + F: FnMut(I::Item) -> Option +{ +} diff --git a/library/core/src/iter/adapters/flatten.rs b/library/core/src/iter/adapters/flatten.rs index 96d0a60a327..ff85e114dc9 100644 --- a/library/core/src/iter/adapters/flatten.rs +++ b/library/core/src/iter/adapters/flatten.rs @@ -1,9 +1,7 @@ use crate::fmt; +use crate::iter::{DoubleEndedIterator, Fuse, FusedIterator, Iterator, Map}; use crate::ops::Try; -use super::super::{DoubleEndedIterator, Fuse, FusedIterator, Iterator}; -use super::Map; - /// An iterator that maps each element to an iterator, and yields the elements /// of the produced iterators. /// @@ -14,8 +12,9 @@ use super::Map; pub struct FlatMap { inner: FlattenCompat, ::IntoIter>, } + impl U> FlatMap { - pub(in super::super) fn new(iter: I, f: F) -> FlatMap { + pub(in crate::iter) fn new(iter: I, f: F) -> FlatMap { FlatMap { inner: FlattenCompat::new(iter.map(f)) } } } diff --git a/library/core/src/iter/adapters/fuse.rs b/library/core/src/iter/adapters/fuse.rs index 60ac3524e66..ae074065315 100644 --- a/library/core/src/iter/adapters/fuse.rs +++ b/library/core/src/iter/adapters/fuse.rs @@ -1,9 +1,6 @@ -use super::InPlaceIterable; use crate::intrinsics; -use crate::iter::adapters::zip::try_get_unchecked; -use crate::iter::adapters::SourceIter; -use crate::iter::TrustedRandomAccess; -use crate::iter::{DoubleEndedIterator, ExactSizeIterator, FusedIterator, Iterator}; +use crate::iter::adapters::{zip::try_get_unchecked, InPlaceIterable, SourceIter}; +use crate::iter::{DoubleEndedIterator, ExactSizeIterator, FusedIterator, TrustedRandomAccess}; use crate::ops::Try; /// An iterator that yields `None` forever after the underlying iterator diff --git a/library/core/src/iter/adapters/inspect.rs b/library/core/src/iter/adapters/inspect.rs new file mode 100644 index 00000000000..88f5ee61b6b --- /dev/null +++ b/library/core/src/iter/adapters/inspect.rs @@ -0,0 +1,167 @@ +use crate::fmt; +use crate::iter::{adapters::SourceIter, FusedIterator, InPlaceIterable}; +use crate::ops::Try; + +/// An iterator that calls a function with a reference to each element before +/// yielding it. +/// +/// This `struct` is created by the [`inspect`] method on [`Iterator`]. See its +/// documentation for more. +/// +/// [`inspect`]: Iterator::inspect +/// [`Iterator`]: trait.Iterator.html +#[must_use = "iterators are lazy and do nothing unless consumed"] +#[stable(feature = "rust1", since = "1.0.0")] +#[derive(Clone)] +pub struct Inspect { + iter: I, + f: F, +} +impl Inspect { + pub(in crate::iter) fn new(iter: I, f: F) -> Inspect { + Inspect { iter, f } + } +} + +#[stable(feature = "core_impl_debug", since = "1.9.0")] +impl fmt::Debug for Inspect { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + f.debug_struct("Inspect").field("iter", &self.iter).finish() + } +} + +impl Inspect +where + F: FnMut(&I::Item), +{ + #[inline] + fn do_inspect(&mut self, elt: Option) -> Option { + if let Some(ref a) = elt { + (self.f)(a); + } + + elt + } +} + +fn inspect_fold( + mut f: impl FnMut(&T), + mut fold: impl FnMut(Acc, T) -> Acc, +) -> impl FnMut(Acc, T) -> Acc { + move |acc, item| { + f(&item); + fold(acc, item) + } +} + +fn inspect_try_fold<'a, T, Acc, R>( + f: &'a mut impl FnMut(&T), + mut fold: impl FnMut(Acc, T) -> R + 'a, +) -> impl FnMut(Acc, T) -> R + 'a { + move |acc, item| { + f(&item); + fold(acc, item) + } +} + +#[stable(feature = "rust1", since = "1.0.0")] +impl Iterator for Inspect +where + F: FnMut(&I::Item), +{ + type Item = I::Item; + + #[inline] + fn next(&mut self) -> Option { + let next = self.iter.next(); + self.do_inspect(next) + } + + #[inline] + fn size_hint(&self) -> (usize, Option) { + self.iter.size_hint() + } + + #[inline] + fn try_fold(&mut self, init: Acc, fold: Fold) -> R + where + Self: Sized, + Fold: FnMut(Acc, Self::Item) -> R, + R: Try, + { + self.iter.try_fold(init, inspect_try_fold(&mut self.f, fold)) + } + + #[inline] + fn fold(self, init: Acc, fold: Fold) -> Acc + where + Fold: FnMut(Acc, Self::Item) -> Acc, + { + self.iter.fold(init, inspect_fold(self.f, fold)) + } +} + +#[stable(feature = "rust1", since = "1.0.0")] +impl DoubleEndedIterator for Inspect +where + F: FnMut(&I::Item), +{ + #[inline] + fn next_back(&mut self) -> Option { + let next = self.iter.next_back(); + self.do_inspect(next) + } + + #[inline] + fn try_rfold(&mut self, init: Acc, fold: Fold) -> R + where + Self: Sized, + Fold: FnMut(Acc, Self::Item) -> R, + R: Try, + { + self.iter.try_rfold(init, inspect_try_fold(&mut self.f, fold)) + } + + #[inline] + fn rfold(self, init: Acc, fold: Fold) -> Acc + where + Fold: FnMut(Acc, Self::Item) -> Acc, + { + self.iter.rfold(init, inspect_fold(self.f, fold)) + } +} + +#[stable(feature = "rust1", since = "1.0.0")] +impl ExactSizeIterator for Inspect +where + F: FnMut(&I::Item), +{ + fn len(&self) -> usize { + self.iter.len() + } + + fn is_empty(&self) -> bool { + self.iter.is_empty() + } +} + +#[stable(feature = "fused", since = "1.26.0")] +impl FusedIterator for Inspect where F: FnMut(&I::Item) {} + +#[unstable(issue = "none", feature = "inplace_iteration")] +unsafe impl SourceIter for Inspect +where + F: FnMut(&I::Item), + I: SourceIter, +{ + type Source = S; + + #[inline] + unsafe fn as_inner(&mut self) -> &mut S { + // SAFETY: unsafe function forwarding to unsafe function with the same requirements + unsafe { SourceIter::as_inner(&mut self.iter) } + } +} + +#[unstable(issue = "none", feature = "inplace_iteration")] +unsafe impl InPlaceIterable for Inspect where F: FnMut(&I::Item) {} diff --git a/library/core/src/iter/adapters/map.rs b/library/core/src/iter/adapters/map.rs new file mode 100644 index 00000000000..12673806ec4 --- /dev/null +++ b/library/core/src/iter/adapters/map.rs @@ -0,0 +1,213 @@ +use crate::fmt; +use crate::iter::adapters::{zip::try_get_unchecked, SourceIter, TrustedRandomAccess}; +use crate::iter::{FusedIterator, InPlaceIterable, TrustedLen}; +use crate::ops::Try; + +/// An iterator that maps the values of `iter` with `f`. +/// +/// This `struct` is created by the [`map`] method on [`Iterator`]. See its +/// documentation for more. +/// +/// [`map`]: Iterator::map +/// [`Iterator`]: trait.Iterator.html +/// +/// # Notes about side effects +/// +/// The [`map`] iterator implements [`DoubleEndedIterator`], meaning that +/// you can also [`map`] backwards: +/// +/// ```rust +/// let v: Vec = vec![1, 2, 3].into_iter().map(|x| x + 1).rev().collect(); +/// +/// assert_eq!(v, [4, 3, 2]); +/// ``` +/// +/// [`DoubleEndedIterator`]: trait.DoubleEndedIterator.html +/// +/// But if your closure has state, iterating backwards may act in a way you do +/// not expect. Let's go through an example. First, in the forward direction: +/// +/// ```rust +/// let mut c = 0; +/// +/// for pair in vec!['a', 'b', 'c'].into_iter() +/// .map(|letter| { c += 1; (letter, c) }) { +/// println!("{:?}", pair); +/// } +/// ``` +/// +/// This will print "('a', 1), ('b', 2), ('c', 3)". +/// +/// Now consider this twist where we add a call to `rev`. This version will +/// print `('c', 1), ('b', 2), ('a', 3)`. Note that the letters are reversed, +/// but the values of the counter still go in order. This is because `map()` is +/// still being called lazily on each item, but we are popping items off the +/// back of the vector now, instead of shifting them from the front. +/// +/// ```rust +/// let mut c = 0; +/// +/// for pair in vec!['a', 'b', 'c'].into_iter() +/// .map(|letter| { c += 1; (letter, c) }) +/// .rev() { +/// println!("{:?}", pair); +/// } +/// ``` +#[must_use = "iterators are lazy and do nothing unless consumed"] +#[stable(feature = "rust1", since = "1.0.0")] +#[derive(Clone)] +pub struct Map { + iter: I, + f: F, +} +impl Map { + pub(in crate::iter) fn new(iter: I, f: F) -> Map { + Map { iter, f } + } +} + +#[stable(feature = "core_impl_debug", since = "1.9.0")] +impl fmt::Debug for Map { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + f.debug_struct("Map").field("iter", &self.iter).finish() + } +} + +fn map_fold( + mut f: impl FnMut(T) -> B, + mut g: impl FnMut(Acc, B) -> Acc, +) -> impl FnMut(Acc, T) -> Acc { + move |acc, elt| g(acc, f(elt)) +} + +fn map_try_fold<'a, T, B, Acc, R>( + f: &'a mut impl FnMut(T) -> B, + mut g: impl FnMut(Acc, B) -> R + 'a, +) -> impl FnMut(Acc, T) -> R + 'a { + move |acc, elt| g(acc, f(elt)) +} + +#[stable(feature = "rust1", since = "1.0.0")] +impl Iterator for Map +where + F: FnMut(I::Item) -> B, +{ + type Item = B; + + #[inline] + fn next(&mut self) -> Option { + self.iter.next().map(&mut self.f) + } + + #[inline] + fn size_hint(&self) -> (usize, Option) { + self.iter.size_hint() + } + + fn try_fold(&mut self, init: Acc, g: G) -> R + where + Self: Sized, + G: FnMut(Acc, Self::Item) -> R, + R: Try, + { + self.iter.try_fold(init, map_try_fold(&mut self.f, g)) + } + + fn fold(self, init: Acc, g: G) -> Acc + where + G: FnMut(Acc, Self::Item) -> Acc, + { + self.iter.fold(init, map_fold(self.f, g)) + } + + unsafe fn __iterator_get_unchecked(&mut self, idx: usize) -> B + where + Self: TrustedRandomAccess, + { + // SAFETY: the caller must uphold the contract for + // `Iterator::__iterator_get_unchecked`. + unsafe { (self.f)(try_get_unchecked(&mut self.iter, idx)) } + } +} + +#[stable(feature = "rust1", since = "1.0.0")] +impl DoubleEndedIterator for Map +where + F: FnMut(I::Item) -> B, +{ + #[inline] + fn next_back(&mut self) -> Option { + self.iter.next_back().map(&mut self.f) + } + + fn try_rfold(&mut self, init: Acc, g: G) -> R + where + Self: Sized, + G: FnMut(Acc, Self::Item) -> R, + R: Try, + { + self.iter.try_rfold(init, map_try_fold(&mut self.f, g)) + } + + fn rfold(self, init: Acc, g: G) -> Acc + where + G: FnMut(Acc, Self::Item) -> Acc, + { + self.iter.rfold(init, map_fold(self.f, g)) + } +} + +#[stable(feature = "rust1", since = "1.0.0")] +impl ExactSizeIterator for Map +where + F: FnMut(I::Item) -> B, +{ + fn len(&self) -> usize { + self.iter.len() + } + + fn is_empty(&self) -> bool { + self.iter.is_empty() + } +} + +#[stable(feature = "fused", since = "1.26.0")] +impl FusedIterator for Map where F: FnMut(I::Item) -> B {} + +#[unstable(feature = "trusted_len", issue = "37572")] +unsafe impl TrustedLen for Map +where + I: TrustedLen, + F: FnMut(I::Item) -> B, +{ +} + +#[doc(hidden)] +#[unstable(feature = "trusted_random_access", issue = "none")] +unsafe impl TrustedRandomAccess for Map +where + I: TrustedRandomAccess, +{ + #[inline] + fn may_have_side_effect() -> bool { + true + } +} + +#[unstable(issue = "none", feature = "inplace_iteration")] +unsafe impl SourceIter for Map +where + F: FnMut(I::Item) -> B, + I: SourceIter, +{ + type Source = S; + + #[inline] + unsafe fn as_inner(&mut self) -> &mut S { + // SAFETY: unsafe function forwarding to unsafe function with the same requirements + unsafe { SourceIter::as_inner(&mut self.iter) } + } +} + +#[unstable(issue = "none", feature = "inplace_iteration")] +unsafe impl InPlaceIterable for Map where F: FnMut(I::Item) -> B {} diff --git a/library/core/src/iter/adapters/map_while.rs b/library/core/src/iter/adapters/map_while.rs new file mode 100644 index 00000000000..26114d53284 --- /dev/null +++ b/library/core/src/iter/adapters/map_while.rs @@ -0,0 +1,101 @@ +use crate::fmt; +use crate::iter::{adapters::SourceIter, InPlaceIterable}; +use crate::ops::{ControlFlow, Try}; + +/// An iterator that only accepts elements while `predicate` returns `Some(_)`. +/// +/// This `struct` is created by the [`map_while`] method on [`Iterator`]. See its +/// documentation for more. +/// +/// [`map_while`]: Iterator::map_while +/// [`Iterator`]: trait.Iterator.html +#[must_use = "iterators are lazy and do nothing unless consumed"] +#[unstable(feature = "iter_map_while", reason = "recently added", issue = "68537")] +#[derive(Clone)] +pub struct MapWhile { + iter: I, + predicate: P, +} + +impl MapWhile { + pub(in crate::iter) fn new(iter: I, predicate: P) -> MapWhile { + MapWhile { iter, predicate } + } +} + +#[unstable(feature = "iter_map_while", reason = "recently added", issue = "68537")] +impl fmt::Debug for MapWhile { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + f.debug_struct("MapWhile").field("iter", &self.iter).finish() + } +} + +#[unstable(feature = "iter_map_while", reason = "recently added", issue = "68537")] +impl Iterator for MapWhile +where + P: FnMut(I::Item) -> Option, +{ + type Item = B; + + #[inline] + fn next(&mut self) -> Option { + let x = self.iter.next()?; + (self.predicate)(x) + } + + #[inline] + fn size_hint(&self) -> (usize, Option) { + let (_, upper) = self.iter.size_hint(); + (0, upper) // can't know a lower bound, due to the predicate + } + + #[inline] + fn try_fold(&mut self, init: Acc, mut fold: Fold) -> R + where + Self: Sized, + Fold: FnMut(Acc, Self::Item) -> R, + R: Try, + { + let Self { iter, predicate } = self; + iter.try_fold(init, |acc, x| match predicate(x) { + Some(item) => ControlFlow::from_try(fold(acc, item)), + None => ControlFlow::Break(try { acc }), + }) + .into_try() + } + + #[inline] + fn fold(mut self, init: Acc, fold: Fold) -> Acc + where + Self: Sized, + Fold: FnMut(Acc, Self::Item) -> Acc, + { + #[inline] + fn ok(mut f: impl FnMut(B, T) -> B) -> impl FnMut(B, T) -> Result { + move |acc, x| Ok(f(acc, x)) + } + + self.try_fold(init, ok(fold)).unwrap() + } +} + +#[unstable(issue = "none", feature = "inplace_iteration")] +unsafe impl SourceIter for MapWhile +where + P: FnMut(I::Item) -> Option, + I: SourceIter, +{ + type Source = S; + + #[inline] + unsafe fn as_inner(&mut self) -> &mut S { + // SAFETY: unsafe function forwarding to unsafe function with the same requirements + unsafe { SourceIter::as_inner(&mut self.iter) } + } +} + +#[unstable(issue = "none", feature = "inplace_iteration")] +unsafe impl InPlaceIterable for MapWhile where + P: FnMut(I::Item) -> Option +{ +} diff --git a/library/core/src/iter/adapters/mod.rs b/library/core/src/iter/adapters/mod.rs index 9586284e1d7..5ef5717085e 100644 --- a/library/core/src/iter/adapters/mod.rs +++ b/library/core/src/iter/adapters/mod.rs @@ -1,26 +1,51 @@ -use crate::cmp; -use crate::fmt; -use crate::intrinsics; -use crate::ops::{Add, AddAssign, ControlFlow, Try}; - -use super::from_fn; -use super::{ - DoubleEndedIterator, ExactSizeIterator, FusedIterator, InPlaceIterable, Iterator, TrustedLen, -}; +use crate::iter::{InPlaceIterable, Iterator}; +use crate::ops::{ControlFlow, Try}; mod chain; +mod cloned; +mod copied; +mod cycle; +mod enumerate; +mod filter; +mod filter_map; mod flatten; mod fuse; +mod inspect; +mod map; +mod map_while; +mod peekable; +mod rev; +mod scan; +mod skip; +mod skip_while; +mod step_by; +mod take; +mod take_while; mod zip; -pub use self::chain::Chain; -#[stable(feature = "rust1", since = "1.0.0")] -pub use self::flatten::{FlatMap, Flatten}; -pub use self::fuse::Fuse; -use self::zip::try_get_unchecked; +pub use self::{ + chain::Chain, cycle::Cycle, enumerate::Enumerate, filter::Filter, filter_map::FilterMap, + flatten::FlatMap, fuse::Fuse, inspect::Inspect, map::Map, peekable::Peekable, rev::Rev, + scan::Scan, skip::Skip, skip_while::SkipWhile, take::Take, take_while::TakeWhile, zip::Zip, +}; + +#[stable(feature = "iter_cloned", since = "1.1.0")] +pub use self::cloned::Cloned; + +#[stable(feature = "iterator_step_by", since = "1.28.0")] +pub use self::step_by::StepBy; + +#[stable(feature = "iterator_flatten", since = "1.29.0")] +pub use self::flatten::Flatten; + +#[stable(feature = "iter_copied", since = "1.36.0")] +pub use self::copied::Copied; + +#[unstable(feature = "iter_map_while", reason = "recently added", issue = "68537")] +pub use self::map_while::MapWhile; + #[unstable(feature = "trusted_random_access", issue = "none")] pub use self::zip::TrustedRandomAccess; -pub use self::zip::Zip; /// This trait provides transitive access to source-stage in an interator-adapter pipeline /// under the conditions that @@ -89,2810 +114,6 @@ pub unsafe trait SourceIter { unsafe fn as_inner(&mut self) -> &mut Self::Source; } -/// A double-ended iterator with the direction inverted. -/// -/// This `struct` is created by the [`rev`] method on [`Iterator`]. See its -/// documentation for more. -/// -/// [`rev`]: Iterator::rev -/// [`Iterator`]: trait.Iterator.html -#[derive(Clone, Debug)] -#[must_use = "iterators are lazy and do nothing unless consumed"] -#[stable(feature = "rust1", since = "1.0.0")] -pub struct Rev { - iter: T, -} -impl Rev { - pub(super) fn new(iter: T) -> Rev { - Rev { iter } - } -} - -#[stable(feature = "rust1", since = "1.0.0")] -impl Iterator for Rev -where - I: DoubleEndedIterator, -{ - type Item = ::Item; - - #[inline] - fn next(&mut self) -> Option<::Item> { - self.iter.next_back() - } - #[inline] - fn size_hint(&self) -> (usize, Option) { - self.iter.size_hint() - } - - #[inline] - fn advance_by(&mut self, n: usize) -> Result<(), usize> { - self.iter.advance_back_by(n) - } - - #[inline] - fn nth(&mut self, n: usize) -> Option<::Item> { - self.iter.nth_back(n) - } - - fn try_fold(&mut self, init: B, f: F) -> R - where - Self: Sized, - F: FnMut(B, Self::Item) -> R, - R: Try, - { - self.iter.try_rfold(init, f) - } - - fn fold(self, init: Acc, f: F) -> Acc - where - F: FnMut(Acc, Self::Item) -> Acc, - { - self.iter.rfold(init, f) - } - - #[inline] - fn find

(&mut self, predicate: P) -> Option - where - P: FnMut(&Self::Item) -> bool, - { - self.iter.rfind(predicate) - } -} - -#[stable(feature = "rust1", since = "1.0.0")] -impl DoubleEndedIterator for Rev -where - I: DoubleEndedIterator, -{ - #[inline] - fn next_back(&mut self) -> Option<::Item> { - self.iter.next() - } - - #[inline] - fn advance_back_by(&mut self, n: usize) -> Result<(), usize> { - self.iter.advance_by(n) - } - - #[inline] - fn nth_back(&mut self, n: usize) -> Option<::Item> { - self.iter.nth(n) - } - - fn try_rfold(&mut self, init: B, f: F) -> R - where - Self: Sized, - F: FnMut(B, Self::Item) -> R, - R: Try, - { - self.iter.try_fold(init, f) - } - - fn rfold(self, init: Acc, f: F) -> Acc - where - F: FnMut(Acc, Self::Item) -> Acc, - { - self.iter.fold(init, f) - } - - fn rfind

(&mut self, predicate: P) -> Option - where - P: FnMut(&Self::Item) -> bool, - { - self.iter.find(predicate) - } -} - -#[stable(feature = "rust1", since = "1.0.0")] -impl ExactSizeIterator for Rev -where - I: ExactSizeIterator + DoubleEndedIterator, -{ - fn len(&self) -> usize { - self.iter.len() - } - - fn is_empty(&self) -> bool { - self.iter.is_empty() - } -} - -#[stable(feature = "fused", since = "1.26.0")] -impl FusedIterator for Rev where I: FusedIterator + DoubleEndedIterator {} - -#[unstable(feature = "trusted_len", issue = "37572")] -unsafe impl TrustedLen for Rev where I: TrustedLen + DoubleEndedIterator {} - -/// An iterator that copies the elements of an underlying iterator. -/// -/// This `struct` is created by the [`copied`] method on [`Iterator`]. See its -/// documentation for more. -/// -/// [`copied`]: Iterator::copied -/// [`Iterator`]: trait.Iterator.html -#[stable(feature = "iter_copied", since = "1.36.0")] -#[must_use = "iterators are lazy and do nothing unless consumed"] -#[derive(Clone, Debug)] -pub struct Copied { - it: I, -} - -impl Copied { - pub(super) fn new(it: I) -> Copied { - Copied { it } - } -} - -fn copy_fold(mut f: impl FnMut(Acc, T) -> Acc) -> impl FnMut(Acc, &T) -> Acc { - move |acc, &elt| f(acc, elt) -} - -fn copy_try_fold(mut f: impl FnMut(Acc, T) -> R) -> impl FnMut(Acc, &T) -> R { - move |acc, &elt| f(acc, elt) -} - -#[stable(feature = "iter_copied", since = "1.36.0")] -impl<'a, I, T: 'a> Iterator for Copied -where - I: Iterator, - T: Copy, -{ - type Item = T; - - fn next(&mut self) -> Option { - self.it.next().copied() - } - - fn size_hint(&self) -> (usize, Option) { - self.it.size_hint() - } - - fn try_fold(&mut self, init: B, f: F) -> R - where - Self: Sized, - F: FnMut(B, Self::Item) -> R, - R: Try, - { - self.it.try_fold(init, copy_try_fold(f)) - } - - fn fold(self, init: Acc, f: F) -> Acc - where - F: FnMut(Acc, Self::Item) -> Acc, - { - self.it.fold(init, copy_fold(f)) - } - - fn nth(&mut self, n: usize) -> Option { - self.it.nth(n).copied() - } - - fn last(self) -> Option { - self.it.last().copied() - } - - fn count(self) -> usize { - self.it.count() - } - - unsafe fn __iterator_get_unchecked(&mut self, idx: usize) -> T - where - Self: TrustedRandomAccess, - { - // SAFETY: the caller must uphold the contract for - // `Iterator::__iterator_get_unchecked`. - *unsafe { try_get_unchecked(&mut self.it, idx) } - } -} - -#[stable(feature = "iter_copied", since = "1.36.0")] -impl<'a, I, T: 'a> DoubleEndedIterator for Copied -where - I: DoubleEndedIterator, - T: Copy, -{ - fn next_back(&mut self) -> Option { - self.it.next_back().copied() - } - - fn try_rfold(&mut self, init: B, f: F) -> R - where - Self: Sized, - F: FnMut(B, Self::Item) -> R, - R: Try, - { - self.it.try_rfold(init, copy_try_fold(f)) - } - - fn rfold(self, init: Acc, f: F) -> Acc - where - F: FnMut(Acc, Self::Item) -> Acc, - { - self.it.rfold(init, copy_fold(f)) - } -} - -#[stable(feature = "iter_copied", since = "1.36.0")] -impl<'a, I, T: 'a> ExactSizeIterator for Copied -where - I: ExactSizeIterator, - T: Copy, -{ - fn len(&self) -> usize { - self.it.len() - } - - fn is_empty(&self) -> bool { - self.it.is_empty() - } -} - -#[stable(feature = "iter_copied", since = "1.36.0")] -impl<'a, I, T: 'a> FusedIterator for Copied -where - I: FusedIterator, - T: Copy, -{ -} - -#[doc(hidden)] -#[unstable(feature = "trusted_random_access", issue = "none")] -unsafe impl TrustedRandomAccess for Copied -where - I: TrustedRandomAccess, -{ - #[inline] - fn may_have_side_effect() -> bool { - I::may_have_side_effect() - } -} - -#[stable(feature = "iter_copied", since = "1.36.0")] -unsafe impl<'a, I, T: 'a> TrustedLen for Copied -where - I: TrustedLen, - T: Copy, -{ -} - -/// An iterator that clones the elements of an underlying iterator. -/// -/// This `struct` is created by the [`cloned`] method on [`Iterator`]. See its -/// documentation for more. -/// -/// [`cloned`]: Iterator::cloned -/// [`Iterator`]: trait.Iterator.html -#[stable(feature = "iter_cloned", since = "1.1.0")] -#[must_use = "iterators are lazy and do nothing unless consumed"] -#[derive(Clone, Debug)] -pub struct Cloned { - it: I, -} -impl Cloned { - pub(super) fn new(it: I) -> Cloned { - Cloned { it } - } -} - -fn clone_try_fold(mut f: impl FnMut(Acc, T) -> R) -> impl FnMut(Acc, &T) -> R { - move |acc, elt| f(acc, elt.clone()) -} - -#[stable(feature = "iter_cloned", since = "1.1.0")] -impl<'a, I, T: 'a> Iterator for Cloned -where - I: Iterator, - T: Clone, -{ - type Item = T; - - fn next(&mut self) -> Option { - self.it.next().cloned() - } - - fn size_hint(&self) -> (usize, Option) { - self.it.size_hint() - } - - fn try_fold(&mut self, init: B, f: F) -> R - where - Self: Sized, - F: FnMut(B, Self::Item) -> R, - R: Try, - { - self.it.try_fold(init, clone_try_fold(f)) - } - - fn fold(self, init: Acc, f: F) -> Acc - where - F: FnMut(Acc, Self::Item) -> Acc, - { - self.it.map(T::clone).fold(init, f) - } - - unsafe fn __iterator_get_unchecked(&mut self, idx: usize) -> T - where - Self: TrustedRandomAccess, - { - // SAFETY: the caller must uphold the contract for - // `Iterator::__iterator_get_unchecked`. - unsafe { try_get_unchecked(&mut self.it, idx).clone() } - } -} - -#[stable(feature = "iter_cloned", since = "1.1.0")] -impl<'a, I, T: 'a> DoubleEndedIterator for Cloned -where - I: DoubleEndedIterator, - T: Clone, -{ - fn next_back(&mut self) -> Option { - self.it.next_back().cloned() - } - - fn try_rfold(&mut self, init: B, f: F) -> R - where - Self: Sized, - F: FnMut(B, Self::Item) -> R, - R: Try, - { - self.it.try_rfold(init, clone_try_fold(f)) - } - - fn rfold(self, init: Acc, f: F) -> Acc - where - F: FnMut(Acc, Self::Item) -> Acc, - { - self.it.map(T::clone).rfold(init, f) - } -} - -#[stable(feature = "iter_cloned", since = "1.1.0")] -impl<'a, I, T: 'a> ExactSizeIterator for Cloned -where - I: ExactSizeIterator, - T: Clone, -{ - fn len(&self) -> usize { - self.it.len() - } - - fn is_empty(&self) -> bool { - self.it.is_empty() - } -} - -#[stable(feature = "fused", since = "1.26.0")] -impl<'a, I, T: 'a> FusedIterator for Cloned -where - I: FusedIterator, - T: Clone, -{ -} - -#[doc(hidden)] -#[unstable(feature = "trusted_random_access", issue = "none")] -unsafe impl TrustedRandomAccess for Cloned -where - I: TrustedRandomAccess, -{ - #[inline] - fn may_have_side_effect() -> bool { - true - } -} - -#[unstable(feature = "trusted_len", issue = "37572")] -unsafe impl<'a, I, T: 'a> TrustedLen for Cloned -where - I: TrustedLen, - T: Clone, -{ -} - -/// An iterator that repeats endlessly. -/// -/// This `struct` is created by the [`cycle`] method on [`Iterator`]. See its -/// documentation for more. -/// -/// [`cycle`]: Iterator::cycle -/// [`Iterator`]: trait.Iterator.html -#[derive(Clone, Debug)] -#[must_use = "iterators are lazy and do nothing unless consumed"] -#[stable(feature = "rust1", since = "1.0.0")] -pub struct Cycle { - orig: I, - iter: I, -} -impl Cycle { - pub(super) fn new(iter: I) -> Cycle { - Cycle { orig: iter.clone(), iter } - } -} - -#[stable(feature = "rust1", since = "1.0.0")] -impl Iterator for Cycle -where - I: Clone + Iterator, -{ - type Item = ::Item; - - #[inline] - fn next(&mut self) -> Option<::Item> { - match self.iter.next() { - None => { - self.iter = self.orig.clone(); - self.iter.next() - } - y => y, - } - } - - #[inline] - fn size_hint(&self) -> (usize, Option) { - // the cycle iterator is either empty or infinite - match self.orig.size_hint() { - sz @ (0, Some(0)) => sz, - (0, _) => (0, None), - _ => (usize::MAX, None), - } - } - - #[inline] - fn try_fold(&mut self, mut acc: Acc, mut f: F) -> R - where - F: FnMut(Acc, Self::Item) -> R, - R: Try, - { - // fully iterate the current iterator. this is necessary because - // `self.iter` may be empty even when `self.orig` isn't - acc = self.iter.try_fold(acc, &mut f)?; - self.iter = self.orig.clone(); - - // complete a full cycle, keeping track of whether the cycled - // iterator is empty or not. we need to return early in case - // of an empty iterator to prevent an infinite loop - let mut is_empty = true; - acc = self.iter.try_fold(acc, |acc, x| { - is_empty = false; - f(acc, x) - })?; - - if is_empty { - return try { acc }; - } - - loop { - self.iter = self.orig.clone(); - acc = self.iter.try_fold(acc, &mut f)?; - } - } - - // No `fold` override, because `fold` doesn't make much sense for `Cycle`, - // and we can't do anything better than the default. -} - -#[stable(feature = "fused", since = "1.26.0")] -impl FusedIterator for Cycle where I: Clone + Iterator {} - -/// An iterator for stepping iterators by a custom amount. -/// -/// This `struct` is created by the [`step_by`] method on [`Iterator`]. See -/// its documentation for more. -/// -/// [`step_by`]: Iterator::step_by -/// [`Iterator`]: trait.Iterator.html -#[must_use = "iterators are lazy and do nothing unless consumed"] -#[stable(feature = "iterator_step_by", since = "1.28.0")] -#[derive(Clone, Debug)] -pub struct StepBy { - iter: I, - step: usize, - first_take: bool, -} -impl StepBy { - pub(super) fn new(iter: I, step: usize) -> StepBy { - assert!(step != 0); - StepBy { iter, step: step - 1, first_take: true } - } -} - -#[stable(feature = "iterator_step_by", since = "1.28.0")] -impl Iterator for StepBy -where - I: Iterator, -{ - type Item = I::Item; - - #[inline] - fn next(&mut self) -> Option { - if self.first_take { - self.first_take = false; - self.iter.next() - } else { - self.iter.nth(self.step) - } - } - - #[inline] - fn size_hint(&self) -> (usize, Option) { - #[inline] - fn first_size(step: usize) -> impl Fn(usize) -> usize { - move |n| if n == 0 { 0 } else { 1 + (n - 1) / (step + 1) } - } - - #[inline] - fn other_size(step: usize) -> impl Fn(usize) -> usize { - move |n| n / (step + 1) - } - - let (low, high) = self.iter.size_hint(); - - if self.first_take { - let f = first_size(self.step); - (f(low), high.map(f)) - } else { - let f = other_size(self.step); - (f(low), high.map(f)) - } - } - - #[inline] - fn nth(&mut self, mut n: usize) -> Option { - if self.first_take { - self.first_take = false; - let first = self.iter.next(); - if n == 0 { - return first; - } - n -= 1; - } - // n and self.step are indices, we need to add 1 to get the amount of elements - // When calling `.nth`, we need to subtract 1 again to convert back to an index - // step + 1 can't overflow because `.step_by` sets `self.step` to `step - 1` - let mut step = self.step + 1; - // n + 1 could overflow - // thus, if n is usize::MAX, instead of adding one, we call .nth(step) - if n == usize::MAX { - self.iter.nth(step - 1); - } else { - n += 1; - } - - // overflow handling - loop { - let mul = n.checked_mul(step); - { - if intrinsics::likely(mul.is_some()) { - return self.iter.nth(mul.unwrap() - 1); - } - } - let div_n = usize::MAX / n; - let div_step = usize::MAX / step; - let nth_n = div_n * n; - let nth_step = div_step * step; - let nth = if nth_n > nth_step { - step -= div_n; - nth_n - } else { - n -= div_step; - nth_step - }; - self.iter.nth(nth - 1); - } - } - - fn try_fold(&mut self, mut acc: Acc, mut f: F) -> R - where - F: FnMut(Acc, Self::Item) -> R, - R: Try, - { - #[inline] - fn nth(iter: &mut I, step: usize) -> impl FnMut() -> Option + '_ { - move || iter.nth(step) - } - - if self.first_take { - self.first_take = false; - match self.iter.next() { - None => return try { acc }, - Some(x) => acc = f(acc, x)?, - } - } - from_fn(nth(&mut self.iter, self.step)).try_fold(acc, f) - } - - fn fold(mut self, mut acc: Acc, mut f: F) -> Acc - where - F: FnMut(Acc, Self::Item) -> Acc, - { - #[inline] - fn nth(iter: &mut I, step: usize) -> impl FnMut() -> Option + '_ { - move || iter.nth(step) - } - - if self.first_take { - self.first_take = false; - match self.iter.next() { - None => return acc, - Some(x) => acc = f(acc, x), - } - } - from_fn(nth(&mut self.iter, self.step)).fold(acc, f) - } -} - -impl StepBy -where - I: ExactSizeIterator, -{ - // The zero-based index starting from the end of the iterator of the - // last element. Used in the `DoubleEndedIterator` implementation. - fn next_back_index(&self) -> usize { - let rem = self.iter.len() % (self.step + 1); - if self.first_take { - if rem == 0 { self.step } else { rem - 1 } - } else { - rem - } - } -} - -#[stable(feature = "double_ended_step_by_iterator", since = "1.38.0")] -impl DoubleEndedIterator for StepBy -where - I: DoubleEndedIterator + ExactSizeIterator, -{ - #[inline] - fn next_back(&mut self) -> Option { - self.iter.nth_back(self.next_back_index()) - } - - #[inline] - fn nth_back(&mut self, n: usize) -> Option { - // `self.iter.nth_back(usize::MAX)` does the right thing here when `n` - // is out of bounds because the length of `self.iter` does not exceed - // `usize::MAX` (because `I: ExactSizeIterator`) and `nth_back` is - // zero-indexed - let n = n.saturating_mul(self.step + 1).saturating_add(self.next_back_index()); - self.iter.nth_back(n) - } - - fn try_rfold(&mut self, init: Acc, mut f: F) -> R - where - F: FnMut(Acc, Self::Item) -> R, - R: Try, - { - #[inline] - fn nth_back( - iter: &mut I, - step: usize, - ) -> impl FnMut() -> Option + '_ { - move || iter.nth_back(step) - } - - match self.next_back() { - None => try { init }, - Some(x) => { - let acc = f(init, x)?; - from_fn(nth_back(&mut self.iter, self.step)).try_fold(acc, f) - } - } - } - - #[inline] - fn rfold(mut self, init: Acc, mut f: F) -> Acc - where - Self: Sized, - F: FnMut(Acc, Self::Item) -> Acc, - { - #[inline] - fn nth_back( - iter: &mut I, - step: usize, - ) -> impl FnMut() -> Option + '_ { - move || iter.nth_back(step) - } - - match self.next_back() { - None => init, - Some(x) => { - let acc = f(init, x); - from_fn(nth_back(&mut self.iter, self.step)).fold(acc, f) - } - } - } -} - -// StepBy can only make the iterator shorter, so the len will still fit. -#[stable(feature = "iterator_step_by", since = "1.28.0")] -impl ExactSizeIterator for StepBy where I: ExactSizeIterator {} - -/// An iterator that maps the values of `iter` with `f`. -/// -/// This `struct` is created by the [`map`] method on [`Iterator`]. See its -/// documentation for more. -/// -/// [`map`]: Iterator::map -/// [`Iterator`]: trait.Iterator.html -/// -/// # Notes about side effects -/// -/// The [`map`] iterator implements [`DoubleEndedIterator`], meaning that -/// you can also [`map`] backwards: -/// -/// ```rust -/// let v: Vec = vec![1, 2, 3].into_iter().map(|x| x + 1).rev().collect(); -/// -/// assert_eq!(v, [4, 3, 2]); -/// ``` -/// -/// [`DoubleEndedIterator`]: trait.DoubleEndedIterator.html -/// -/// But if your closure has state, iterating backwards may act in a way you do -/// not expect. Let's go through an example. First, in the forward direction: -/// -/// ```rust -/// let mut c = 0; -/// -/// for pair in vec!['a', 'b', 'c'].into_iter() -/// .map(|letter| { c += 1; (letter, c) }) { -/// println!("{:?}", pair); -/// } -/// ``` -/// -/// This will print "('a', 1), ('b', 2), ('c', 3)". -/// -/// Now consider this twist where we add a call to `rev`. This version will -/// print `('c', 1), ('b', 2), ('a', 3)`. Note that the letters are reversed, -/// but the values of the counter still go in order. This is because `map()` is -/// still being called lazily on each item, but we are popping items off the -/// back of the vector now, instead of shifting them from the front. -/// -/// ```rust -/// let mut c = 0; -/// -/// for pair in vec!['a', 'b', 'c'].into_iter() -/// .map(|letter| { c += 1; (letter, c) }) -/// .rev() { -/// println!("{:?}", pair); -/// } -/// ``` -#[must_use = "iterators are lazy and do nothing unless consumed"] -#[stable(feature = "rust1", since = "1.0.0")] -#[derive(Clone)] -pub struct Map { - iter: I, - f: F, -} -impl Map { - pub(super) fn new(iter: I, f: F) -> Map { - Map { iter, f } - } -} - -#[stable(feature = "core_impl_debug", since = "1.9.0")] -impl fmt::Debug for Map { - fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { - f.debug_struct("Map").field("iter", &self.iter).finish() - } -} - -fn map_fold( - mut f: impl FnMut(T) -> B, - mut g: impl FnMut(Acc, B) -> Acc, -) -> impl FnMut(Acc, T) -> Acc { - move |acc, elt| g(acc, f(elt)) -} - -fn map_try_fold<'a, T, B, Acc, R>( - f: &'a mut impl FnMut(T) -> B, - mut g: impl FnMut(Acc, B) -> R + 'a, -) -> impl FnMut(Acc, T) -> R + 'a { - move |acc, elt| g(acc, f(elt)) -} - -#[stable(feature = "rust1", since = "1.0.0")] -impl Iterator for Map -where - F: FnMut(I::Item) -> B, -{ - type Item = B; - - #[inline] - fn next(&mut self) -> Option { - self.iter.next().map(&mut self.f) - } - - #[inline] - fn size_hint(&self) -> (usize, Option) { - self.iter.size_hint() - } - - fn try_fold(&mut self, init: Acc, g: G) -> R - where - Self: Sized, - G: FnMut(Acc, Self::Item) -> R, - R: Try, - { - self.iter.try_fold(init, map_try_fold(&mut self.f, g)) - } - - fn fold(self, init: Acc, g: G) -> Acc - where - G: FnMut(Acc, Self::Item) -> Acc, - { - self.iter.fold(init, map_fold(self.f, g)) - } - - unsafe fn __iterator_get_unchecked(&mut self, idx: usize) -> B - where - Self: TrustedRandomAccess, - { - // SAFETY: the caller must uphold the contract for - // `Iterator::__iterator_get_unchecked`. - unsafe { (self.f)(try_get_unchecked(&mut self.iter, idx)) } - } -} - -#[stable(feature = "rust1", since = "1.0.0")] -impl DoubleEndedIterator for Map -where - F: FnMut(I::Item) -> B, -{ - #[inline] - fn next_back(&mut self) -> Option { - self.iter.next_back().map(&mut self.f) - } - - fn try_rfold(&mut self, init: Acc, g: G) -> R - where - Self: Sized, - G: FnMut(Acc, Self::Item) -> R, - R: Try, - { - self.iter.try_rfold(init, map_try_fold(&mut self.f, g)) - } - - fn rfold(self, init: Acc, g: G) -> Acc - where - G: FnMut(Acc, Self::Item) -> Acc, - { - self.iter.rfold(init, map_fold(self.f, g)) - } -} - -#[stable(feature = "rust1", since = "1.0.0")] -impl ExactSizeIterator for Map -where - F: FnMut(I::Item) -> B, -{ - fn len(&self) -> usize { - self.iter.len() - } - - fn is_empty(&self) -> bool { - self.iter.is_empty() - } -} - -#[stable(feature = "fused", since = "1.26.0")] -impl FusedIterator for Map where F: FnMut(I::Item) -> B {} - -#[unstable(feature = "trusted_len", issue = "37572")] -unsafe impl TrustedLen for Map -where - I: TrustedLen, - F: FnMut(I::Item) -> B, -{ -} - -#[doc(hidden)] -#[unstable(feature = "trusted_random_access", issue = "none")] -unsafe impl TrustedRandomAccess for Map -where - I: TrustedRandomAccess, -{ - #[inline] - fn may_have_side_effect() -> bool { - true - } -} - -#[unstable(issue = "none", feature = "inplace_iteration")] -unsafe impl SourceIter for Map -where - F: FnMut(I::Item) -> B, - I: SourceIter, -{ - type Source = S; - - #[inline] - unsafe fn as_inner(&mut self) -> &mut S { - // SAFETY: unsafe function forwarding to unsafe function with the same requirements - unsafe { SourceIter::as_inner(&mut self.iter) } - } -} - -#[unstable(issue = "none", feature = "inplace_iteration")] -unsafe impl InPlaceIterable for Map where F: FnMut(I::Item) -> B {} - -/// An iterator that filters the elements of `iter` with `predicate`. -/// -/// This `struct` is created by the [`filter`] method on [`Iterator`]. See its -/// documentation for more. -/// -/// [`filter`]: Iterator::filter -/// [`Iterator`]: trait.Iterator.html -#[must_use = "iterators are lazy and do nothing unless consumed"] -#[stable(feature = "rust1", since = "1.0.0")] -#[derive(Clone)] -pub struct Filter { - iter: I, - predicate: P, -} -impl Filter { - pub(super) fn new(iter: I, predicate: P) -> Filter { - Filter { iter, predicate } - } -} - -#[stable(feature = "core_impl_debug", since = "1.9.0")] -impl fmt::Debug for Filter { - fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { - f.debug_struct("Filter").field("iter", &self.iter).finish() - } -} - -fn filter_fold( - mut predicate: impl FnMut(&T) -> bool, - mut fold: impl FnMut(Acc, T) -> Acc, -) -> impl FnMut(Acc, T) -> Acc { - move |acc, item| if predicate(&item) { fold(acc, item) } else { acc } -} - -fn filter_try_fold<'a, T, Acc, R: Try>( - predicate: &'a mut impl FnMut(&T) -> bool, - mut fold: impl FnMut(Acc, T) -> R + 'a, -) -> impl FnMut(Acc, T) -> R + 'a { - move |acc, item| if predicate(&item) { fold(acc, item) } else { try { acc } } -} - -#[stable(feature = "rust1", since = "1.0.0")] -impl Iterator for Filter -where - P: FnMut(&I::Item) -> bool, -{ - type Item = I::Item; - - #[inline] - fn next(&mut self) -> Option { - self.iter.find(&mut self.predicate) - } - - #[inline] - fn size_hint(&self) -> (usize, Option) { - let (_, upper) = self.iter.size_hint(); - (0, upper) // can't know a lower bound, due to the predicate - } - - // this special case allows the compiler to make `.filter(_).count()` - // branchless. Barring perfect branch prediction (which is unattainable in - // the general case), this will be much faster in >90% of cases (containing - // virtually all real workloads) and only a tiny bit slower in the rest. - // - // Having this specialization thus allows us to write `.filter(p).count()` - // where we would otherwise write `.map(|x| p(x) as usize).sum()`, which is - // less readable and also less backwards-compatible to Rust before 1.10. - // - // Using the branchless version will also simplify the LLVM byte code, thus - // leaving more budget for LLVM optimizations. - #[inline] - fn count(self) -> usize { - #[inline] - fn to_usize(mut predicate: impl FnMut(&T) -> bool) -> impl FnMut(T) -> usize { - move |x| predicate(&x) as usize - } - - self.iter.map(to_usize(self.predicate)).sum() - } - - #[inline] - fn try_fold(&mut self, init: Acc, fold: Fold) -> R - where - Self: Sized, - Fold: FnMut(Acc, Self::Item) -> R, - R: Try, - { - self.iter.try_fold(init, filter_try_fold(&mut self.predicate, fold)) - } - - #[inline] - fn fold(self, init: Acc, fold: Fold) -> Acc - where - Fold: FnMut(Acc, Self::Item) -> Acc, - { - self.iter.fold(init, filter_fold(self.predicate, fold)) - } -} - -#[stable(feature = "rust1", since = "1.0.0")] -impl DoubleEndedIterator for Filter -where - P: FnMut(&I::Item) -> bool, -{ - #[inline] - fn next_back(&mut self) -> Option { - self.iter.rfind(&mut self.predicate) - } - - #[inline] - fn try_rfold(&mut self, init: Acc, fold: Fold) -> R - where - Self: Sized, - Fold: FnMut(Acc, Self::Item) -> R, - R: Try, - { - self.iter.try_rfold(init, filter_try_fold(&mut self.predicate, fold)) - } - - #[inline] - fn rfold(self, init: Acc, fold: Fold) -> Acc - where - Fold: FnMut(Acc, Self::Item) -> Acc, - { - self.iter.rfold(init, filter_fold(self.predicate, fold)) - } -} - -#[stable(feature = "fused", since = "1.26.0")] -impl FusedIterator for Filter where P: FnMut(&I::Item) -> bool {} - -#[unstable(issue = "none", feature = "inplace_iteration")] -unsafe impl SourceIter for Filter -where - P: FnMut(&I::Item) -> bool, - I: SourceIter, -{ - type Source = S; - - #[inline] - unsafe fn as_inner(&mut self) -> &mut S { - // SAFETY: unsafe function forwarding to unsafe function with the same requirements - unsafe { SourceIter::as_inner(&mut self.iter) } - } -} - -#[unstable(issue = "none", feature = "inplace_iteration")] -unsafe impl InPlaceIterable for Filter where P: FnMut(&I::Item) -> bool {} - -/// An iterator that uses `f` to both filter and map elements from `iter`. -/// -/// This `struct` is created by the [`filter_map`] method on [`Iterator`]. See its -/// documentation for more. -/// -/// [`filter_map`]: Iterator::filter_map -/// [`Iterator`]: trait.Iterator.html -#[must_use = "iterators are lazy and do nothing unless consumed"] -#[stable(feature = "rust1", since = "1.0.0")] -#[derive(Clone)] -pub struct FilterMap { - iter: I, - f: F, -} -impl FilterMap { - pub(super) fn new(iter: I, f: F) -> FilterMap { - FilterMap { iter, f } - } -} - -#[stable(feature = "core_impl_debug", since = "1.9.0")] -impl fmt::Debug for FilterMap { - fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { - f.debug_struct("FilterMap").field("iter", &self.iter).finish() - } -} - -fn filter_map_fold( - mut f: impl FnMut(T) -> Option, - mut fold: impl FnMut(Acc, B) -> Acc, -) -> impl FnMut(Acc, T) -> Acc { - move |acc, item| match f(item) { - Some(x) => fold(acc, x), - None => acc, - } -} - -fn filter_map_try_fold<'a, T, B, Acc, R: Try>( - f: &'a mut impl FnMut(T) -> Option, - mut fold: impl FnMut(Acc, B) -> R + 'a, -) -> impl FnMut(Acc, T) -> R + 'a { - move |acc, item| match f(item) { - Some(x) => fold(acc, x), - None => try { acc }, - } -} - -#[stable(feature = "rust1", since = "1.0.0")] -impl Iterator for FilterMap -where - F: FnMut(I::Item) -> Option, -{ - type Item = B; - - #[inline] - fn next(&mut self) -> Option { - self.iter.find_map(&mut self.f) - } - - #[inline] - fn size_hint(&self) -> (usize, Option) { - let (_, upper) = self.iter.size_hint(); - (0, upper) // can't know a lower bound, due to the predicate - } - - #[inline] - fn try_fold(&mut self, init: Acc, fold: Fold) -> R - where - Self: Sized, - Fold: FnMut(Acc, Self::Item) -> R, - R: Try, - { - self.iter.try_fold(init, filter_map_try_fold(&mut self.f, fold)) - } - - #[inline] - fn fold(self, init: Acc, fold: Fold) -> Acc - where - Fold: FnMut(Acc, Self::Item) -> Acc, - { - self.iter.fold(init, filter_map_fold(self.f, fold)) - } -} - -#[stable(feature = "rust1", since = "1.0.0")] -impl DoubleEndedIterator for FilterMap -where - F: FnMut(I::Item) -> Option, -{ - #[inline] - fn next_back(&mut self) -> Option { - #[inline] - fn find( - f: &mut impl FnMut(T) -> Option, - ) -> impl FnMut((), T) -> ControlFlow + '_ { - move |(), x| match f(x) { - Some(x) => ControlFlow::Break(x), - None => ControlFlow::CONTINUE, - } - } - - self.iter.try_rfold((), find(&mut self.f)).break_value() - } - - #[inline] - fn try_rfold(&mut self, init: Acc, fold: Fold) -> R - where - Self: Sized, - Fold: FnMut(Acc, Self::Item) -> R, - R: Try, - { - self.iter.try_rfold(init, filter_map_try_fold(&mut self.f, fold)) - } - - #[inline] - fn rfold(self, init: Acc, fold: Fold) -> Acc - where - Fold: FnMut(Acc, Self::Item) -> Acc, - { - self.iter.rfold(init, filter_map_fold(self.f, fold)) - } -} - -#[stable(feature = "fused", since = "1.26.0")] -impl FusedIterator for FilterMap where F: FnMut(I::Item) -> Option {} - -#[unstable(issue = "none", feature = "inplace_iteration")] -unsafe impl SourceIter for FilterMap -where - F: FnMut(I::Item) -> Option, - I: SourceIter, -{ - type Source = S; - - #[inline] - unsafe fn as_inner(&mut self) -> &mut S { - // SAFETY: unsafe function forwarding to unsafe function with the same requirements - unsafe { SourceIter::as_inner(&mut self.iter) } - } -} - -#[unstable(issue = "none", feature = "inplace_iteration")] -unsafe impl InPlaceIterable for FilterMap where - F: FnMut(I::Item) -> Option -{ -} - -/// An iterator that yields the current count and the element during iteration. -/// -/// This `struct` is created by the [`enumerate`] method on [`Iterator`]. See its -/// documentation for more. -/// -/// [`enumerate`]: Iterator::enumerate -/// [`Iterator`]: trait.Iterator.html -#[derive(Clone, Debug)] -#[must_use = "iterators are lazy and do nothing unless consumed"] -#[stable(feature = "rust1", since = "1.0.0")] -pub struct Enumerate { - iter: I, - count: usize, -} -impl Enumerate { - pub(super) fn new(iter: I) -> Enumerate { - Enumerate { iter, count: 0 } - } -} - -#[stable(feature = "rust1", since = "1.0.0")] -impl Iterator for Enumerate -where - I: Iterator, -{ - type Item = (usize, ::Item); - - /// # Overflow Behavior - /// - /// The method does no guarding against overflows, so enumerating more than - /// `usize::MAX` elements either produces the wrong result or panics. If - /// debug assertions are enabled, a panic is guaranteed. - /// - /// # Panics - /// - /// Might panic if the index of the element overflows a `usize`. - #[inline] - fn next(&mut self) -> Option<(usize, ::Item)> { - let a = self.iter.next()?; - let i = self.count; - // Possible undefined overflow. - AddAssign::add_assign(&mut self.count, 1); - Some((i, a)) - } - - #[inline] - fn size_hint(&self) -> (usize, Option) { - self.iter.size_hint() - } - - #[inline] - fn nth(&mut self, n: usize) -> Option<(usize, I::Item)> { - let a = self.iter.nth(n)?; - // Possible undefined overflow. - let i = Add::add(self.count, n); - self.count = Add::add(i, 1); - Some((i, a)) - } - - #[inline] - fn count(self) -> usize { - self.iter.count() - } - - #[inline] - fn try_fold(&mut self, init: Acc, fold: Fold) -> R - where - Self: Sized, - Fold: FnMut(Acc, Self::Item) -> R, - R: Try, - { - #[inline] - fn enumerate<'a, T, Acc, R>( - count: &'a mut usize, - mut fold: impl FnMut(Acc, (usize, T)) -> R + 'a, - ) -> impl FnMut(Acc, T) -> R + 'a { - move |acc, item| { - let acc = fold(acc, (*count, item)); - // Possible undefined overflow. - AddAssign::add_assign(count, 1); - acc - } - } - - self.iter.try_fold(init, enumerate(&mut self.count, fold)) - } - - #[inline] - fn fold(self, init: Acc, fold: Fold) -> Acc - where - Fold: FnMut(Acc, Self::Item) -> Acc, - { - #[inline] - fn enumerate( - mut count: usize, - mut fold: impl FnMut(Acc, (usize, T)) -> Acc, - ) -> impl FnMut(Acc, T) -> Acc { - move |acc, item| { - let acc = fold(acc, (count, item)); - // Possible undefined overflow. - AddAssign::add_assign(&mut count, 1); - acc - } - } - - self.iter.fold(init, enumerate(self.count, fold)) - } - - unsafe fn __iterator_get_unchecked(&mut self, idx: usize) -> ::Item - where - Self: TrustedRandomAccess, - { - // SAFETY: the caller must uphold the contract for - // `Iterator::__iterator_get_unchecked`. - let value = unsafe { try_get_unchecked(&mut self.iter, idx) }; - (Add::add(self.count, idx), value) - } -} - -#[stable(feature = "rust1", since = "1.0.0")] -impl DoubleEndedIterator for Enumerate -where - I: ExactSizeIterator + DoubleEndedIterator, -{ - #[inline] - fn next_back(&mut self) -> Option<(usize, ::Item)> { - let a = self.iter.next_back()?; - let len = self.iter.len(); - // Can safely add, `ExactSizeIterator` promises that the number of - // elements fits into a `usize`. - Some((self.count + len, a)) - } - - #[inline] - fn nth_back(&mut self, n: usize) -> Option<(usize, ::Item)> { - let a = self.iter.nth_back(n)?; - let len = self.iter.len(); - // Can safely add, `ExactSizeIterator` promises that the number of - // elements fits into a `usize`. - Some((self.count + len, a)) - } - - #[inline] - fn try_rfold(&mut self, init: Acc, fold: Fold) -> R - where - Self: Sized, - Fold: FnMut(Acc, Self::Item) -> R, - R: Try, - { - // Can safely add and subtract the count, as `ExactSizeIterator` promises - // that the number of elements fits into a `usize`. - fn enumerate( - mut count: usize, - mut fold: impl FnMut(Acc, (usize, T)) -> R, - ) -> impl FnMut(Acc, T) -> R { - move |acc, item| { - count -= 1; - fold(acc, (count, item)) - } - } - - let count = self.count + self.iter.len(); - self.iter.try_rfold(init, enumerate(count, fold)) - } - - #[inline] - fn rfold(self, init: Acc, fold: Fold) -> Acc - where - Fold: FnMut(Acc, Self::Item) -> Acc, - { - // Can safely add and subtract the count, as `ExactSizeIterator` promises - // that the number of elements fits into a `usize`. - fn enumerate( - mut count: usize, - mut fold: impl FnMut(Acc, (usize, T)) -> Acc, - ) -> impl FnMut(Acc, T) -> Acc { - move |acc, item| { - count -= 1; - fold(acc, (count, item)) - } - } - - let count = self.count + self.iter.len(); - self.iter.rfold(init, enumerate(count, fold)) - } -} - -#[stable(feature = "rust1", since = "1.0.0")] -impl ExactSizeIterator for Enumerate -where - I: ExactSizeIterator, -{ - fn len(&self) -> usize { - self.iter.len() - } - - fn is_empty(&self) -> bool { - self.iter.is_empty() - } -} - -#[doc(hidden)] -#[unstable(feature = "trusted_random_access", issue = "none")] -unsafe impl TrustedRandomAccess for Enumerate -where - I: TrustedRandomAccess, -{ - fn may_have_side_effect() -> bool { - I::may_have_side_effect() - } -} - -#[stable(feature = "fused", since = "1.26.0")] -impl FusedIterator for Enumerate where I: FusedIterator {} - -#[unstable(feature = "trusted_len", issue = "37572")] -unsafe impl TrustedLen for Enumerate where I: TrustedLen {} - -#[unstable(issue = "none", feature = "inplace_iteration")] -unsafe impl SourceIter for Enumerate -where - I: SourceIter, -{ - type Source = S; - - #[inline] - unsafe fn as_inner(&mut self) -> &mut S { - // SAFETY: unsafe function forwarding to unsafe function with the same requirements - unsafe { SourceIter::as_inner(&mut self.iter) } - } -} - -#[unstable(issue = "none", feature = "inplace_iteration")] -unsafe impl InPlaceIterable for Enumerate {} - -/// An iterator with a `peek()` that returns an optional reference to the next -/// element. -/// -/// This `struct` is created by the [`peekable`] method on [`Iterator`]. See its -/// documentation for more. -/// -/// [`peekable`]: Iterator::peekable -/// [`Iterator`]: trait.Iterator.html -#[derive(Clone, Debug)] -#[must_use = "iterators are lazy and do nothing unless consumed"] -#[stable(feature = "rust1", since = "1.0.0")] -pub struct Peekable { - iter: I, - /// Remember a peeked value, even if it was None. - peeked: Option>, -} -impl Peekable { - pub(super) fn new(iter: I) -> Peekable { - Peekable { iter, peeked: None } - } -} - -// Peekable must remember if a None has been seen in the `.peek()` method. -// It ensures that `.peek(); .peek();` or `.peek(); .next();` only advances the -// underlying iterator at most once. This does not by itself make the iterator -// fused. -#[stable(feature = "rust1", since = "1.0.0")] -impl Iterator for Peekable { - type Item = I::Item; - - #[inline] - fn next(&mut self) -> Option { - match self.peeked.take() { - Some(v) => v, - None => self.iter.next(), - } - } - - #[inline] - #[rustc_inherit_overflow_checks] - fn count(mut self) -> usize { - match self.peeked.take() { - Some(None) => 0, - Some(Some(_)) => 1 + self.iter.count(), - None => self.iter.count(), - } - } - - #[inline] - fn nth(&mut self, n: usize) -> Option { - match self.peeked.take() { - Some(None) => None, - Some(v @ Some(_)) if n == 0 => v, - Some(Some(_)) => self.iter.nth(n - 1), - None => self.iter.nth(n), - } - } - - #[inline] - fn last(mut self) -> Option { - let peek_opt = match self.peeked.take() { - Some(None) => return None, - Some(v) => v, - None => None, - }; - self.iter.last().or(peek_opt) - } - - #[inline] - fn size_hint(&self) -> (usize, Option) { - let peek_len = match self.peeked { - Some(None) => return (0, Some(0)), - Some(Some(_)) => 1, - None => 0, - }; - let (lo, hi) = self.iter.size_hint(); - let lo = lo.saturating_add(peek_len); - let hi = match hi { - Some(x) => x.checked_add(peek_len), - None => None, - }; - (lo, hi) - } - - #[inline] - fn try_fold(&mut self, init: B, mut f: F) -> R - where - Self: Sized, - F: FnMut(B, Self::Item) -> R, - R: Try, - { - let acc = match self.peeked.take() { - Some(None) => return try { init }, - Some(Some(v)) => f(init, v)?, - None => init, - }; - self.iter.try_fold(acc, f) - } - - #[inline] - fn fold(self, init: Acc, mut fold: Fold) -> Acc - where - Fold: FnMut(Acc, Self::Item) -> Acc, - { - let acc = match self.peeked { - Some(None) => return init, - Some(Some(v)) => fold(init, v), - None => init, - }; - self.iter.fold(acc, fold) - } -} - -#[stable(feature = "double_ended_peek_iterator", since = "1.38.0")] -impl DoubleEndedIterator for Peekable -where - I: DoubleEndedIterator, -{ - #[inline] - fn next_back(&mut self) -> Option { - match self.peeked.as_mut() { - Some(v @ Some(_)) => self.iter.next_back().or_else(|| v.take()), - Some(None) => None, - None => self.iter.next_back(), - } - } - - #[inline] - fn try_rfold(&mut self, init: B, mut f: F) -> R - where - Self: Sized, - F: FnMut(B, Self::Item) -> R, - R: Try, - { - match self.peeked.take() { - Some(None) => try { init }, - Some(Some(v)) => match self.iter.try_rfold(init, &mut f).into_result() { - Ok(acc) => f(acc, v), - Err(e) => { - self.peeked = Some(Some(v)); - Try::from_error(e) - } - }, - None => self.iter.try_rfold(init, f), - } - } - - #[inline] - fn rfold(self, init: Acc, mut fold: Fold) -> Acc - where - Fold: FnMut(Acc, Self::Item) -> Acc, - { - match self.peeked { - Some(None) => init, - Some(Some(v)) => { - let acc = self.iter.rfold(init, &mut fold); - fold(acc, v) - } - None => self.iter.rfold(init, fold), - } - } -} - -#[stable(feature = "rust1", since = "1.0.0")] -impl ExactSizeIterator for Peekable {} - -#[stable(feature = "fused", since = "1.26.0")] -impl FusedIterator for Peekable {} - -impl Peekable { - /// Returns a reference to the next() value without advancing the iterator. - /// - /// Like [`next`], if there is a value, it is wrapped in a `Some(T)`. - /// But if the iteration is over, `None` is returned. - /// - /// [`next`]: Iterator::next - /// - /// Because `peek()` returns a reference, and many iterators iterate over - /// references, there can be a possibly confusing situation where the - /// return value is a double reference. You can see this effect in the - /// examples below. - /// - /// # Examples - /// - /// Basic usage: - /// - /// ``` - /// let xs = [1, 2, 3]; - /// - /// let mut iter = xs.iter().peekable(); - /// - /// // peek() lets us see into the future - /// assert_eq!(iter.peek(), Some(&&1)); - /// assert_eq!(iter.next(), Some(&1)); - /// - /// assert_eq!(iter.next(), Some(&2)); - /// - /// // The iterator does not advance even if we `peek` multiple times - /// assert_eq!(iter.peek(), Some(&&3)); - /// assert_eq!(iter.peek(), Some(&&3)); - /// - /// assert_eq!(iter.next(), Some(&3)); - /// - /// // After the iterator is finished, so is `peek()` - /// assert_eq!(iter.peek(), None); - /// assert_eq!(iter.next(), None); - /// ``` - #[inline] - #[stable(feature = "rust1", since = "1.0.0")] - pub fn peek(&mut self) -> Option<&I::Item> { - let iter = &mut self.iter; - self.peeked.get_or_insert_with(|| iter.next()).as_ref() - } - - /// Consume and return the next value of this iterator if a condition is true. - /// - /// If `func` returns `true` for the next value of this iterator, consume and return it. - /// Otherwise, return `None`. - /// - /// # Examples - /// Consume a number if it's equal to 0. - /// ``` - /// #![feature(peekable_next_if)] - /// let mut iter = (0..5).peekable(); - /// // The first item of the iterator is 0; consume it. - /// assert_eq!(iter.next_if(|&x| x == 0), Some(0)); - /// // The next item returned is now 1, so `consume` will return `false`. - /// assert_eq!(iter.next_if(|&x| x == 0), None); - /// // `next_if` saves the value of the next item if it was not equal to `expected`. - /// assert_eq!(iter.next(), Some(1)); - /// ``` - /// - /// Consume any number less than 10. - /// ``` - /// #![feature(peekable_next_if)] - /// let mut iter = (1..20).peekable(); - /// // Consume all numbers less than 10 - /// while iter.next_if(|&x| x < 10).is_some() {} - /// // The next value returned will be 10 - /// assert_eq!(iter.next(), Some(10)); - /// ``` - #[unstable(feature = "peekable_next_if", issue = "72480")] - pub fn next_if(&mut self, func: impl FnOnce(&I::Item) -> bool) -> Option { - match self.next() { - Some(matched) if func(&matched) => Some(matched), - other => { - // Since we called `self.next()`, we consumed `self.peeked`. - assert!(self.peeked.is_none()); - self.peeked = Some(other); - None - } - } - } - - /// Consume and return the next item if it is equal to `expected`. - /// - /// # Example - /// Consume a number if it's equal to 0. - /// ``` - /// #![feature(peekable_next_if)] - /// let mut iter = (0..5).peekable(); - /// // The first item of the iterator is 0; consume it. - /// assert_eq!(iter.next_if_eq(&0), Some(0)); - /// // The next item returned is now 1, so `consume` will return `false`. - /// assert_eq!(iter.next_if_eq(&0), None); - /// // `next_if_eq` saves the value of the next item if it was not equal to `expected`. - /// assert_eq!(iter.next(), Some(1)); - /// ``` - #[unstable(feature = "peekable_next_if", issue = "72480")] - pub fn next_if_eq(&mut self, expected: &T) -> Option - where - T: ?Sized, - I::Item: PartialEq, - { - self.next_if(|next| next == expected) - } -} - -#[unstable(feature = "trusted_len", issue = "37572")] -unsafe impl TrustedLen for Peekable where I: TrustedLen {} - -#[unstable(issue = "none", feature = "inplace_iteration")] -unsafe impl SourceIter for Peekable -where - I: SourceIter, -{ - type Source = S; - - #[inline] - unsafe fn as_inner(&mut self) -> &mut S { - // SAFETY: unsafe function forwarding to unsafe function with the same requirements - unsafe { SourceIter::as_inner(&mut self.iter) } - } -} - -#[unstable(issue = "none", feature = "inplace_iteration")] -unsafe impl InPlaceIterable for Peekable {} - -/// An iterator that rejects elements while `predicate` returns `true`. -/// -/// This `struct` is created by the [`skip_while`] method on [`Iterator`]. See its -/// documentation for more. -/// -/// [`skip_while`]: Iterator::skip_while -/// [`Iterator`]: trait.Iterator.html -#[must_use = "iterators are lazy and do nothing unless consumed"] -#[stable(feature = "rust1", since = "1.0.0")] -#[derive(Clone)] -pub struct SkipWhile { - iter: I, - flag: bool, - predicate: P, -} -impl SkipWhile { - pub(super) fn new(iter: I, predicate: P) -> SkipWhile { - SkipWhile { iter, flag: false, predicate } - } -} - -#[stable(feature = "core_impl_debug", since = "1.9.0")] -impl fmt::Debug for SkipWhile { - fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { - f.debug_struct("SkipWhile").field("iter", &self.iter).field("flag", &self.flag).finish() - } -} - -#[stable(feature = "rust1", since = "1.0.0")] -impl Iterator for SkipWhile -where - P: FnMut(&I::Item) -> bool, -{ - type Item = I::Item; - - #[inline] - fn next(&mut self) -> Option { - fn check<'a, T>( - flag: &'a mut bool, - pred: &'a mut impl FnMut(&T) -> bool, - ) -> impl FnMut(&T) -> bool + 'a { - move |x| { - if *flag || !pred(x) { - *flag = true; - true - } else { - false - } - } - } - - let flag = &mut self.flag; - let pred = &mut self.predicate; - self.iter.find(check(flag, pred)) - } - - #[inline] - fn size_hint(&self) -> (usize, Option) { - let (_, upper) = self.iter.size_hint(); - (0, upper) // can't know a lower bound, due to the predicate - } - - #[inline] - fn try_fold(&mut self, mut init: Acc, mut fold: Fold) -> R - where - Self: Sized, - Fold: FnMut(Acc, Self::Item) -> R, - R: Try, - { - if !self.flag { - match self.next() { - Some(v) => init = fold(init, v)?, - None => return try { init }, - } - } - self.iter.try_fold(init, fold) - } - - #[inline] - fn fold(mut self, mut init: Acc, mut fold: Fold) -> Acc - where - Fold: FnMut(Acc, Self::Item) -> Acc, - { - if !self.flag { - match self.next() { - Some(v) => init = fold(init, v), - None => return init, - } - } - self.iter.fold(init, fold) - } -} - -#[stable(feature = "fused", since = "1.26.0")] -impl FusedIterator for SkipWhile -where - I: FusedIterator, - P: FnMut(&I::Item) -> bool, -{ -} - -#[unstable(issue = "none", feature = "inplace_iteration")] -unsafe impl SourceIter for SkipWhile -where - P: FnMut(&I::Item) -> bool, - I: SourceIter, -{ - type Source = S; - - #[inline] - unsafe fn as_inner(&mut self) -> &mut S { - // SAFETY: unsafe function forwarding to unsafe function with the same requirements - unsafe { SourceIter::as_inner(&mut self.iter) } - } -} - -#[unstable(issue = "none", feature = "inplace_iteration")] -unsafe impl InPlaceIterable for SkipWhile where - F: FnMut(&I::Item) -> bool -{ -} - -/// An iterator that only accepts elements while `predicate` returns `true`. -/// -/// This `struct` is created by the [`take_while`] method on [`Iterator`]. See its -/// documentation for more. -/// -/// [`take_while`]: Iterator::take_while -/// [`Iterator`]: trait.Iterator.html -#[must_use = "iterators are lazy and do nothing unless consumed"] -#[stable(feature = "rust1", since = "1.0.0")] -#[derive(Clone)] -pub struct TakeWhile { - iter: I, - flag: bool, - predicate: P, -} -impl TakeWhile { - pub(super) fn new(iter: I, predicate: P) -> TakeWhile { - TakeWhile { iter, flag: false, predicate } - } -} - -#[stable(feature = "core_impl_debug", since = "1.9.0")] -impl fmt::Debug for TakeWhile { - fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { - f.debug_struct("TakeWhile").field("iter", &self.iter).field("flag", &self.flag).finish() - } -} - -#[stable(feature = "rust1", since = "1.0.0")] -impl Iterator for TakeWhile -where - P: FnMut(&I::Item) -> bool, -{ - type Item = I::Item; - - #[inline] - fn next(&mut self) -> Option { - if self.flag { - None - } else { - let x = self.iter.next()?; - if (self.predicate)(&x) { - Some(x) - } else { - self.flag = true; - None - } - } - } - - #[inline] - fn size_hint(&self) -> (usize, Option) { - if self.flag { - (0, Some(0)) - } else { - let (_, upper) = self.iter.size_hint(); - (0, upper) // can't know a lower bound, due to the predicate - } - } - - #[inline] - fn try_fold(&mut self, init: Acc, fold: Fold) -> R - where - Self: Sized, - Fold: FnMut(Acc, Self::Item) -> R, - R: Try, - { - fn check<'a, T, Acc, R: Try>( - flag: &'a mut bool, - p: &'a mut impl FnMut(&T) -> bool, - mut fold: impl FnMut(Acc, T) -> R + 'a, - ) -> impl FnMut(Acc, T) -> ControlFlow + 'a { - move |acc, x| { - if p(&x) { - ControlFlow::from_try(fold(acc, x)) - } else { - *flag = true; - ControlFlow::Break(try { acc }) - } - } - } - - if self.flag { - try { init } - } else { - let flag = &mut self.flag; - let p = &mut self.predicate; - self.iter.try_fold(init, check(flag, p, fold)).into_try() - } - } - - #[inline] - fn fold(mut self, init: Acc, fold: Fold) -> Acc - where - Self: Sized, - Fold: FnMut(Acc, Self::Item) -> Acc, - { - #[inline] - fn ok(mut f: impl FnMut(B, T) -> B) -> impl FnMut(B, T) -> Result { - move |acc, x| Ok(f(acc, x)) - } - - self.try_fold(init, ok(fold)).unwrap() - } -} - -#[stable(feature = "fused", since = "1.26.0")] -impl FusedIterator for TakeWhile -where - I: FusedIterator, - P: FnMut(&I::Item) -> bool, -{ -} - -#[unstable(issue = "none", feature = "inplace_iteration")] -unsafe impl SourceIter for TakeWhile -where - P: FnMut(&I::Item) -> bool, - I: SourceIter, -{ - type Source = S; - - #[inline] - unsafe fn as_inner(&mut self) -> &mut S { - // SAFETY: unsafe function forwarding to unsafe function with the same requirements - unsafe { SourceIter::as_inner(&mut self.iter) } - } -} - -#[unstable(issue = "none", feature = "inplace_iteration")] -unsafe impl InPlaceIterable for TakeWhile where - F: FnMut(&I::Item) -> bool -{ -} - -/// An iterator that only accepts elements while `predicate` returns `Some(_)`. -/// -/// This `struct` is created by the [`map_while`] method on [`Iterator`]. See its -/// documentation for more. -/// -/// [`map_while`]: Iterator::map_while -/// [`Iterator`]: trait.Iterator.html -#[must_use = "iterators are lazy and do nothing unless consumed"] -#[unstable(feature = "iter_map_while", reason = "recently added", issue = "68537")] -#[derive(Clone)] -pub struct MapWhile { - iter: I, - predicate: P, -} - -impl MapWhile { - pub(super) fn new(iter: I, predicate: P) -> MapWhile { - MapWhile { iter, predicate } - } -} - -#[unstable(feature = "iter_map_while", reason = "recently added", issue = "68537")] -impl fmt::Debug for MapWhile { - fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { - f.debug_struct("MapWhile").field("iter", &self.iter).finish() - } -} - -#[unstable(feature = "iter_map_while", reason = "recently added", issue = "68537")] -impl Iterator for MapWhile -where - P: FnMut(I::Item) -> Option, -{ - type Item = B; - - #[inline] - fn next(&mut self) -> Option { - let x = self.iter.next()?; - (self.predicate)(x) - } - - #[inline] - fn size_hint(&self) -> (usize, Option) { - let (_, upper) = self.iter.size_hint(); - (0, upper) // can't know a lower bound, due to the predicate - } - - #[inline] - fn try_fold(&mut self, init: Acc, mut fold: Fold) -> R - where - Self: Sized, - Fold: FnMut(Acc, Self::Item) -> R, - R: Try, - { - let Self { iter, predicate } = self; - iter.try_fold(init, |acc, x| match predicate(x) { - Some(item) => ControlFlow::from_try(fold(acc, item)), - None => ControlFlow::Break(try { acc }), - }) - .into_try() - } - - #[inline] - fn fold(mut self, init: Acc, fold: Fold) -> Acc - where - Self: Sized, - Fold: FnMut(Acc, Self::Item) -> Acc, - { - #[inline] - fn ok(mut f: impl FnMut(B, T) -> B) -> impl FnMut(B, T) -> Result { - move |acc, x| Ok(f(acc, x)) - } - - self.try_fold(init, ok(fold)).unwrap() - } -} - -#[unstable(issue = "none", feature = "inplace_iteration")] -unsafe impl SourceIter for MapWhile -where - P: FnMut(I::Item) -> Option, - I: SourceIter, -{ - type Source = S; - - #[inline] - unsafe fn as_inner(&mut self) -> &mut S { - // SAFETY: unsafe function forwarding to unsafe function with the same requirements - unsafe { SourceIter::as_inner(&mut self.iter) } - } -} - -#[unstable(issue = "none", feature = "inplace_iteration")] -unsafe impl InPlaceIterable for MapWhile where - P: FnMut(I::Item) -> Option -{ -} - -/// An iterator that skips over `n` elements of `iter`. -/// -/// This `struct` is created by the [`skip`] method on [`Iterator`]. See its -/// documentation for more. -/// -/// [`skip`]: Iterator::skip -/// [`Iterator`]: trait.Iterator.html -#[derive(Clone, Debug)] -#[must_use = "iterators are lazy and do nothing unless consumed"] -#[stable(feature = "rust1", since = "1.0.0")] -pub struct Skip { - iter: I, - n: usize, -} -impl Skip { - pub(super) fn new(iter: I, n: usize) -> Skip { - Skip { iter, n } - } -} - -#[stable(feature = "rust1", since = "1.0.0")] -impl Iterator for Skip -where - I: Iterator, -{ - type Item = ::Item; - - #[inline] - fn next(&mut self) -> Option { - if self.n == 0 { - self.iter.next() - } else { - let old_n = self.n; - self.n = 0; - self.iter.nth(old_n) - } - } - - #[inline] - fn nth(&mut self, n: usize) -> Option { - // Can't just add n + self.n due to overflow. - if self.n > 0 { - let to_skip = self.n; - self.n = 0; - // nth(n) skips n+1 - self.iter.nth(to_skip - 1)?; - } - self.iter.nth(n) - } - - #[inline] - fn count(mut self) -> usize { - if self.n > 0 { - // nth(n) skips n+1 - if self.iter.nth(self.n - 1).is_none() { - return 0; - } - } - self.iter.count() - } - - #[inline] - fn last(mut self) -> Option { - if self.n > 0 { - // nth(n) skips n+1 - self.iter.nth(self.n - 1)?; - } - self.iter.last() - } - - #[inline] - fn size_hint(&self) -> (usize, Option) { - let (lower, upper) = self.iter.size_hint(); - - let lower = lower.saturating_sub(self.n); - let upper = match upper { - Some(x) => Some(x.saturating_sub(self.n)), - None => None, - }; - - (lower, upper) - } - - #[inline] - fn try_fold(&mut self, init: Acc, fold: Fold) -> R - where - Self: Sized, - Fold: FnMut(Acc, Self::Item) -> R, - R: Try, - { - let n = self.n; - self.n = 0; - if n > 0 { - // nth(n) skips n+1 - if self.iter.nth(n - 1).is_none() { - return try { init }; - } - } - self.iter.try_fold(init, fold) - } - - #[inline] - fn fold(mut self, init: Acc, fold: Fold) -> Acc - where - Fold: FnMut(Acc, Self::Item) -> Acc, - { - if self.n > 0 { - // nth(n) skips n+1 - if self.iter.nth(self.n - 1).is_none() { - return init; - } - } - self.iter.fold(init, fold) - } -} - -#[stable(feature = "rust1", since = "1.0.0")] -impl ExactSizeIterator for Skip where I: ExactSizeIterator {} - -#[stable(feature = "double_ended_skip_iterator", since = "1.9.0")] -impl DoubleEndedIterator for Skip -where - I: DoubleEndedIterator + ExactSizeIterator, -{ - fn next_back(&mut self) -> Option { - if self.len() > 0 { self.iter.next_back() } else { None } - } - - #[inline] - fn nth_back(&mut self, n: usize) -> Option { - let len = self.len(); - if n < len { - self.iter.nth_back(n) - } else { - if len > 0 { - // consume the original iterator - self.iter.nth_back(len - 1); - } - None - } - } - - fn try_rfold(&mut self, init: Acc, fold: Fold) -> R - where - Self: Sized, - Fold: FnMut(Acc, Self::Item) -> R, - R: Try, - { - fn check>( - mut n: usize, - mut fold: impl FnMut(Acc, T) -> R, - ) -> impl FnMut(Acc, T) -> ControlFlow { - move |acc, x| { - n -= 1; - let r = fold(acc, x); - if n == 0 { ControlFlow::Break(r) } else { ControlFlow::from_try(r) } - } - } - - let n = self.len(); - if n == 0 { try { init } } else { self.iter.try_rfold(init, check(n, fold)).into_try() } - } - - fn rfold(mut self, init: Acc, fold: Fold) -> Acc - where - Fold: FnMut(Acc, Self::Item) -> Acc, - { - #[inline] - fn ok(mut f: impl FnMut(Acc, T) -> Acc) -> impl FnMut(Acc, T) -> Result { - move |acc, x| Ok(f(acc, x)) - } - - self.try_rfold(init, ok(fold)).unwrap() - } -} - -#[stable(feature = "fused", since = "1.26.0")] -impl FusedIterator for Skip where I: FusedIterator {} - -#[unstable(issue = "none", feature = "inplace_iteration")] -unsafe impl SourceIter for Skip -where - I: SourceIter, -{ - type Source = S; - - #[inline] - unsafe fn as_inner(&mut self) -> &mut S { - // SAFETY: unsafe function forwarding to unsafe function with the same requirements - unsafe { SourceIter::as_inner(&mut self.iter) } - } -} - -#[unstable(issue = "none", feature = "inplace_iteration")] -unsafe impl InPlaceIterable for Skip {} - -/// An iterator that only iterates over the first `n` iterations of `iter`. -/// -/// This `struct` is created by the [`take`] method on [`Iterator`]. See its -/// documentation for more. -/// -/// [`take`]: Iterator::take -/// [`Iterator`]: trait.Iterator.html -#[derive(Clone, Debug)] -#[must_use = "iterators are lazy and do nothing unless consumed"] -#[stable(feature = "rust1", since = "1.0.0")] -pub struct Take { - pub(super) iter: I, - pub(super) n: usize, -} -impl Take { - pub(super) fn new(iter: I, n: usize) -> Take { - Take { iter, n } - } -} - -#[stable(feature = "rust1", since = "1.0.0")] -impl Iterator for Take -where - I: Iterator, -{ - type Item = ::Item; - - #[inline] - fn next(&mut self) -> Option<::Item> { - if self.n != 0 { - self.n -= 1; - self.iter.next() - } else { - None - } - } - - #[inline] - fn nth(&mut self, n: usize) -> Option { - if self.n > n { - self.n -= n + 1; - self.iter.nth(n) - } else { - if self.n > 0 { - self.iter.nth(self.n - 1); - self.n = 0; - } - None - } - } - - #[inline] - fn size_hint(&self) -> (usize, Option) { - if self.n == 0 { - return (0, Some(0)); - } - - let (lower, upper) = self.iter.size_hint(); - - let lower = cmp::min(lower, self.n); - - let upper = match upper { - Some(x) if x < self.n => Some(x), - _ => Some(self.n), - }; - - (lower, upper) - } - - #[inline] - fn try_fold(&mut self, init: Acc, fold: Fold) -> R - where - Self: Sized, - Fold: FnMut(Acc, Self::Item) -> R, - R: Try, - { - fn check<'a, T, Acc, R: Try>( - n: &'a mut usize, - mut fold: impl FnMut(Acc, T) -> R + 'a, - ) -> impl FnMut(Acc, T) -> ControlFlow + 'a { - move |acc, x| { - *n -= 1; - let r = fold(acc, x); - if *n == 0 { ControlFlow::Break(r) } else { ControlFlow::from_try(r) } - } - } - - if self.n == 0 { - try { init } - } else { - let n = &mut self.n; - self.iter.try_fold(init, check(n, fold)).into_try() - } - } - - #[inline] - fn fold(mut self, init: Acc, fold: Fold) -> Acc - where - Self: Sized, - Fold: FnMut(Acc, Self::Item) -> Acc, - { - #[inline] - fn ok(mut f: impl FnMut(B, T) -> B) -> impl FnMut(B, T) -> Result { - move |acc, x| Ok(f(acc, x)) - } - - self.try_fold(init, ok(fold)).unwrap() - } -} - -#[unstable(issue = "none", feature = "inplace_iteration")] -unsafe impl SourceIter for Take -where - I: SourceIter, -{ - type Source = S; - - #[inline] - unsafe fn as_inner(&mut self) -> &mut S { - // SAFETY: unsafe function forwarding to unsafe function with the same requirements - unsafe { SourceIter::as_inner(&mut self.iter) } - } -} - -#[unstable(issue = "none", feature = "inplace_iteration")] -unsafe impl InPlaceIterable for Take {} - -#[stable(feature = "double_ended_take_iterator", since = "1.38.0")] -impl DoubleEndedIterator for Take -where - I: DoubleEndedIterator + ExactSizeIterator, -{ - #[inline] - fn next_back(&mut self) -> Option { - if self.n == 0 { - None - } else { - let n = self.n; - self.n -= 1; - self.iter.nth_back(self.iter.len().saturating_sub(n)) - } - } - - #[inline] - fn nth_back(&mut self, n: usize) -> Option { - let len = self.iter.len(); - if self.n > n { - let m = len.saturating_sub(self.n) + n; - self.n -= n + 1; - self.iter.nth_back(m) - } else { - if len > 0 { - self.iter.nth_back(len - 1); - } - None - } - } - - #[inline] - fn try_rfold(&mut self, init: Acc, fold: Fold) -> R - where - Self: Sized, - Fold: FnMut(Acc, Self::Item) -> R, - R: Try, - { - if self.n == 0 { - try { init } - } else { - let len = self.iter.len(); - if len > self.n && self.iter.nth_back(len - self.n - 1).is_none() { - try { init } - } else { - self.iter.try_rfold(init, fold) - } - } - } - - #[inline] - fn rfold(mut self, init: Acc, fold: Fold) -> Acc - where - Self: Sized, - Fold: FnMut(Acc, Self::Item) -> Acc, - { - if self.n == 0 { - init - } else { - let len = self.iter.len(); - if len > self.n && self.iter.nth_back(len - self.n - 1).is_none() { - init - } else { - self.iter.rfold(init, fold) - } - } - } -} - -#[stable(feature = "rust1", since = "1.0.0")] -impl ExactSizeIterator for Take where I: ExactSizeIterator {} - -#[stable(feature = "fused", since = "1.26.0")] -impl FusedIterator for Take where I: FusedIterator {} - -#[unstable(feature = "trusted_len", issue = "37572")] -unsafe impl TrustedLen for Take {} - -/// An iterator to maintain state while iterating another iterator. -/// -/// This `struct` is created by the [`scan`] method on [`Iterator`]. See its -/// documentation for more. -/// -/// [`scan`]: Iterator::scan -/// [`Iterator`]: trait.Iterator.html -#[must_use = "iterators are lazy and do nothing unless consumed"] -#[stable(feature = "rust1", since = "1.0.0")] -#[derive(Clone)] -pub struct Scan { - iter: I, - f: F, - state: St, -} -impl Scan { - pub(super) fn new(iter: I, state: St, f: F) -> Scan { - Scan { iter, state, f } - } -} - -#[stable(feature = "core_impl_debug", since = "1.9.0")] -impl fmt::Debug for Scan { - fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { - f.debug_struct("Scan").field("iter", &self.iter).field("state", &self.state).finish() - } -} - -#[stable(feature = "rust1", since = "1.0.0")] -impl Iterator for Scan -where - I: Iterator, - F: FnMut(&mut St, I::Item) -> Option, -{ - type Item = B; - - #[inline] - fn next(&mut self) -> Option { - let a = self.iter.next()?; - (self.f)(&mut self.state, a) - } - - #[inline] - fn size_hint(&self) -> (usize, Option) { - let (_, upper) = self.iter.size_hint(); - (0, upper) // can't know a lower bound, due to the scan function - } - - #[inline] - fn try_fold(&mut self, init: Acc, fold: Fold) -> R - where - Self: Sized, - Fold: FnMut(Acc, Self::Item) -> R, - R: Try, - { - fn scan<'a, T, St, B, Acc, R: Try>( - state: &'a mut St, - f: &'a mut impl FnMut(&mut St, T) -> Option, - mut fold: impl FnMut(Acc, B) -> R + 'a, - ) -> impl FnMut(Acc, T) -> ControlFlow + 'a { - move |acc, x| match f(state, x) { - None => ControlFlow::Break(try { acc }), - Some(x) => ControlFlow::from_try(fold(acc, x)), - } - } - - let state = &mut self.state; - let f = &mut self.f; - self.iter.try_fold(init, scan(state, f, fold)).into_try() - } - - #[inline] - fn fold(mut self, init: Acc, fold: Fold) -> Acc - where - Self: Sized, - Fold: FnMut(Acc, Self::Item) -> Acc, - { - #[inline] - fn ok(mut f: impl FnMut(B, T) -> B) -> impl FnMut(B, T) -> Result { - move |acc, x| Ok(f(acc, x)) - } - - self.try_fold(init, ok(fold)).unwrap() - } -} - -#[unstable(issue = "none", feature = "inplace_iteration")] -unsafe impl SourceIter for Scan -where - I: SourceIter, - F: FnMut(&mut St, I::Item) -> Option, -{ - type Source = S; - - #[inline] - unsafe fn as_inner(&mut self) -> &mut S { - // SAFETY: unsafe function forwarding to unsafe function with the same requirements - unsafe { SourceIter::as_inner(&mut self.iter) } - } -} - -#[unstable(issue = "none", feature = "inplace_iteration")] -unsafe impl InPlaceIterable for Scan where - F: FnMut(&mut St, I::Item) -> Option -{ -} - -/// An iterator that calls a function with a reference to each element before -/// yielding it. -/// -/// This `struct` is created by the [`inspect`] method on [`Iterator`]. See its -/// documentation for more. -/// -/// [`inspect`]: Iterator::inspect -/// [`Iterator`]: trait.Iterator.html -#[must_use = "iterators are lazy and do nothing unless consumed"] -#[stable(feature = "rust1", since = "1.0.0")] -#[derive(Clone)] -pub struct Inspect { - iter: I, - f: F, -} -impl Inspect { - pub(super) fn new(iter: I, f: F) -> Inspect { - Inspect { iter, f } - } -} - -#[stable(feature = "core_impl_debug", since = "1.9.0")] -impl fmt::Debug for Inspect { - fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { - f.debug_struct("Inspect").field("iter", &self.iter).finish() - } -} - -impl Inspect -where - F: FnMut(&I::Item), -{ - #[inline] - fn do_inspect(&mut self, elt: Option) -> Option { - if let Some(ref a) = elt { - (self.f)(a); - } - - elt - } -} - -fn inspect_fold( - mut f: impl FnMut(&T), - mut fold: impl FnMut(Acc, T) -> Acc, -) -> impl FnMut(Acc, T) -> Acc { - move |acc, item| { - f(&item); - fold(acc, item) - } -} - -fn inspect_try_fold<'a, T, Acc, R>( - f: &'a mut impl FnMut(&T), - mut fold: impl FnMut(Acc, T) -> R + 'a, -) -> impl FnMut(Acc, T) -> R + 'a { - move |acc, item| { - f(&item); - fold(acc, item) - } -} - -#[stable(feature = "rust1", since = "1.0.0")] -impl Iterator for Inspect -where - F: FnMut(&I::Item), -{ - type Item = I::Item; - - #[inline] - fn next(&mut self) -> Option { - let next = self.iter.next(); - self.do_inspect(next) - } - - #[inline] - fn size_hint(&self) -> (usize, Option) { - self.iter.size_hint() - } - - #[inline] - fn try_fold(&mut self, init: Acc, fold: Fold) -> R - where - Self: Sized, - Fold: FnMut(Acc, Self::Item) -> R, - R: Try, - { - self.iter.try_fold(init, inspect_try_fold(&mut self.f, fold)) - } - - #[inline] - fn fold(self, init: Acc, fold: Fold) -> Acc - where - Fold: FnMut(Acc, Self::Item) -> Acc, - { - self.iter.fold(init, inspect_fold(self.f, fold)) - } -} - -#[stable(feature = "rust1", since = "1.0.0")] -impl DoubleEndedIterator for Inspect -where - F: FnMut(&I::Item), -{ - #[inline] - fn next_back(&mut self) -> Option { - let next = self.iter.next_back(); - self.do_inspect(next) - } - - #[inline] - fn try_rfold(&mut self, init: Acc, fold: Fold) -> R - where - Self: Sized, - Fold: FnMut(Acc, Self::Item) -> R, - R: Try, - { - self.iter.try_rfold(init, inspect_try_fold(&mut self.f, fold)) - } - - #[inline] - fn rfold(self, init: Acc, fold: Fold) -> Acc - where - Fold: FnMut(Acc, Self::Item) -> Acc, - { - self.iter.rfold(init, inspect_fold(self.f, fold)) - } -} - -#[stable(feature = "rust1", since = "1.0.0")] -impl ExactSizeIterator for Inspect -where - F: FnMut(&I::Item), -{ - fn len(&self) -> usize { - self.iter.len() - } - - fn is_empty(&self) -> bool { - self.iter.is_empty() - } -} - -#[stable(feature = "fused", since = "1.26.0")] -impl FusedIterator for Inspect where F: FnMut(&I::Item) {} - -#[unstable(issue = "none", feature = "inplace_iteration")] -unsafe impl SourceIter for Inspect -where - F: FnMut(&I::Item), - I: SourceIter, -{ - type Source = S; - - #[inline] - unsafe fn as_inner(&mut self) -> &mut S { - // SAFETY: unsafe function forwarding to unsafe function with the same requirements - unsafe { SourceIter::as_inner(&mut self.iter) } - } -} - -#[unstable(issue = "none", feature = "inplace_iteration")] -unsafe impl InPlaceIterable for Inspect where F: FnMut(&I::Item) {} - /// An iterator adapter that produces output as long as the underlying /// iterator produces `Result::Ok` values. /// diff --git a/library/core/src/iter/adapters/peekable.rs b/library/core/src/iter/adapters/peekable.rs new file mode 100644 index 00000000000..e7fb3abc942 --- /dev/null +++ b/library/core/src/iter/adapters/peekable.rs @@ -0,0 +1,301 @@ +use crate::iter::{adapters::SourceIter, FusedIterator, InPlaceIterable, TrustedLen}; +use crate::ops::Try; + +/// An iterator with a `peek()` that returns an optional reference to the next +/// element. +/// +/// This `struct` is created by the [`peekable`] method on [`Iterator`]. See its +/// documentation for more. +/// +/// [`peekable`]: Iterator::peekable +/// [`Iterator`]: trait.Iterator.html +#[derive(Clone, Debug)] +#[must_use = "iterators are lazy and do nothing unless consumed"] +#[stable(feature = "rust1", since = "1.0.0")] +pub struct Peekable { + iter: I, + /// Remember a peeked value, even if it was None. + peeked: Option>, +} + +impl Peekable { + pub(in crate::iter) fn new(iter: I) -> Peekable { + Peekable { iter, peeked: None } + } +} + +// Peekable must remember if a None has been seen in the `.peek()` method. +// It ensures that `.peek(); .peek();` or `.peek(); .next();` only advances the +// underlying iterator at most once. This does not by itself make the iterator +// fused. +#[stable(feature = "rust1", since = "1.0.0")] +impl Iterator for Peekable { + type Item = I::Item; + + #[inline] + fn next(&mut self) -> Option { + match self.peeked.take() { + Some(v) => v, + None => self.iter.next(), + } + } + + #[inline] + #[rustc_inherit_overflow_checks] + fn count(mut self) -> usize { + match self.peeked.take() { + Some(None) => 0, + Some(Some(_)) => 1 + self.iter.count(), + None => self.iter.count(), + } + } + + #[inline] + fn nth(&mut self, n: usize) -> Option { + match self.peeked.take() { + Some(None) => None, + Some(v @ Some(_)) if n == 0 => v, + Some(Some(_)) => self.iter.nth(n - 1), + None => self.iter.nth(n), + } + } + + #[inline] + fn last(mut self) -> Option { + let peek_opt = match self.peeked.take() { + Some(None) => return None, + Some(v) => v, + None => None, + }; + self.iter.last().or(peek_opt) + } + + #[inline] + fn size_hint(&self) -> (usize, Option) { + let peek_len = match self.peeked { + Some(None) => return (0, Some(0)), + Some(Some(_)) => 1, + None => 0, + }; + let (lo, hi) = self.iter.size_hint(); + let lo = lo.saturating_add(peek_len); + let hi = match hi { + Some(x) => x.checked_add(peek_len), + None => None, + }; + (lo, hi) + } + + #[inline] + fn try_fold(&mut self, init: B, mut f: F) -> R + where + Self: Sized, + F: FnMut(B, Self::Item) -> R, + R: Try, + { + let acc = match self.peeked.take() { + Some(None) => return try { init }, + Some(Some(v)) => f(init, v)?, + None => init, + }; + self.iter.try_fold(acc, f) + } + + #[inline] + fn fold(self, init: Acc, mut fold: Fold) -> Acc + where + Fold: FnMut(Acc, Self::Item) -> Acc, + { + let acc = match self.peeked { + Some(None) => return init, + Some(Some(v)) => fold(init, v), + None => init, + }; + self.iter.fold(acc, fold) + } +} + +#[stable(feature = "double_ended_peek_iterator", since = "1.38.0")] +impl DoubleEndedIterator for Peekable +where + I: DoubleEndedIterator, +{ + #[inline] + fn next_back(&mut self) -> Option { + match self.peeked.as_mut() { + Some(v @ Some(_)) => self.iter.next_back().or_else(|| v.take()), + Some(None) => None, + None => self.iter.next_back(), + } + } + + #[inline] + fn try_rfold(&mut self, init: B, mut f: F) -> R + where + Self: Sized, + F: FnMut(B, Self::Item) -> R, + R: Try, + { + match self.peeked.take() { + Some(None) => try { init }, + Some(Some(v)) => match self.iter.try_rfold(init, &mut f).into_result() { + Ok(acc) => f(acc, v), + Err(e) => { + self.peeked = Some(Some(v)); + Try::from_error(e) + } + }, + None => self.iter.try_rfold(init, f), + } + } + + #[inline] + fn rfold(self, init: Acc, mut fold: Fold) -> Acc + where + Fold: FnMut(Acc, Self::Item) -> Acc, + { + match self.peeked { + Some(None) => init, + Some(Some(v)) => { + let acc = self.iter.rfold(init, &mut fold); + fold(acc, v) + } + None => self.iter.rfold(init, fold), + } + } +} + +#[stable(feature = "rust1", since = "1.0.0")] +impl ExactSizeIterator for Peekable {} + +#[stable(feature = "fused", since = "1.26.0")] +impl FusedIterator for Peekable {} + +impl Peekable { + /// Returns a reference to the next() value without advancing the iterator. + /// + /// Like [`next`], if there is a value, it is wrapped in a `Some(T)`. + /// But if the iteration is over, `None` is returned. + /// + /// [`next`]: Iterator::next + /// + /// Because `peek()` returns a reference, and many iterators iterate over + /// references, there can be a possibly confusing situation where the + /// return value is a double reference. You can see this effect in the + /// examples below. + /// + /// # Examples + /// + /// Basic usage: + /// + /// ``` + /// let xs = [1, 2, 3]; + /// + /// let mut iter = xs.iter().peekable(); + /// + /// // peek() lets us see into the future + /// assert_eq!(iter.peek(), Some(&&1)); + /// assert_eq!(iter.next(), Some(&1)); + /// + /// assert_eq!(iter.next(), Some(&2)); + /// + /// // The iterator does not advance even if we `peek` multiple times + /// assert_eq!(iter.peek(), Some(&&3)); + /// assert_eq!(iter.peek(), Some(&&3)); + /// + /// assert_eq!(iter.next(), Some(&3)); + /// + /// // After the iterator is finished, so is `peek()` + /// assert_eq!(iter.peek(), None); + /// assert_eq!(iter.next(), None); + /// ``` + #[inline] + #[stable(feature = "rust1", since = "1.0.0")] + pub fn peek(&mut self) -> Option<&I::Item> { + let iter = &mut self.iter; + self.peeked.get_or_insert_with(|| iter.next()).as_ref() + } + + /// Consume and return the next value of this iterator if a condition is true. + /// + /// If `func` returns `true` for the next value of this iterator, consume and return it. + /// Otherwise, return `None`. + /// + /// # Examples + /// Consume a number if it's equal to 0. + /// ``` + /// #![feature(peekable_next_if)] + /// let mut iter = (0..5).peekable(); + /// // The first item of the iterator is 0; consume it. + /// assert_eq!(iter.next_if(|&x| x == 0), Some(0)); + /// // The next item returned is now 1, so `consume` will return `false`. + /// assert_eq!(iter.next_if(|&x| x == 0), None); + /// // `next_if` saves the value of the next item if it was not equal to `expected`. + /// assert_eq!(iter.next(), Some(1)); + /// ``` + /// + /// Consume any number less than 10. + /// ``` + /// #![feature(peekable_next_if)] + /// let mut iter = (1..20).peekable(); + /// // Consume all numbers less than 10 + /// while iter.next_if(|&x| x < 10).is_some() {} + /// // The next value returned will be 10 + /// assert_eq!(iter.next(), Some(10)); + /// ``` + #[unstable(feature = "peekable_next_if", issue = "72480")] + pub fn next_if(&mut self, func: impl FnOnce(&I::Item) -> bool) -> Option { + match self.next() { + Some(matched) if func(&matched) => Some(matched), + other => { + // Since we called `self.next()`, we consumed `self.peeked`. + assert!(self.peeked.is_none()); + self.peeked = Some(other); + None + } + } + } + + /// Consume and return the next item if it is equal to `expected`. + /// + /// # Example + /// Consume a number if it's equal to 0. + /// ``` + /// #![feature(peekable_next_if)] + /// let mut iter = (0..5).peekable(); + /// // The first item of the iterator is 0; consume it. + /// assert_eq!(iter.next_if_eq(&0), Some(0)); + /// // The next item returned is now 1, so `consume` will return `false`. + /// assert_eq!(iter.next_if_eq(&0), None); + /// // `next_if_eq` saves the value of the next item if it was not equal to `expected`. + /// assert_eq!(iter.next(), Some(1)); + /// ``` + #[unstable(feature = "peekable_next_if", issue = "72480")] + pub fn next_if_eq(&mut self, expected: &T) -> Option + where + T: ?Sized, + I::Item: PartialEq, + { + self.next_if(|next| next == expected) + } +} + +#[unstable(feature = "trusted_len", issue = "37572")] +unsafe impl TrustedLen for Peekable where I: TrustedLen {} + +#[unstable(issue = "none", feature = "inplace_iteration")] +unsafe impl SourceIter for Peekable +where + I: SourceIter, +{ + type Source = S; + + #[inline] + unsafe fn as_inner(&mut self) -> &mut S { + // SAFETY: unsafe function forwarding to unsafe function with the same requirements + unsafe { SourceIter::as_inner(&mut self.iter) } + } +} + +#[unstable(issue = "none", feature = "inplace_iteration")] +unsafe impl InPlaceIterable for Peekable {} diff --git a/library/core/src/iter/adapters/rev.rs b/library/core/src/iter/adapters/rev.rs new file mode 100644 index 00000000000..105ed40a3ed --- /dev/null +++ b/library/core/src/iter/adapters/rev.rs @@ -0,0 +1,137 @@ +use crate::iter::{FusedIterator, TrustedLen}; +use crate::ops::Try; + +/// A double-ended iterator with the direction inverted. +/// +/// This `struct` is created by the [`rev`] method on [`Iterator`]. See its +/// documentation for more. +/// +/// [`rev`]: Iterator::rev +/// [`Iterator`]: trait.Iterator.html +#[derive(Clone, Debug)] +#[must_use = "iterators are lazy and do nothing unless consumed"] +#[stable(feature = "rust1", since = "1.0.0")] +pub struct Rev { + iter: T, +} + +impl Rev { + pub(in crate::iter) fn new(iter: T) -> Rev { + Rev { iter } + } +} + +#[stable(feature = "rust1", since = "1.0.0")] +impl Iterator for Rev +where + I: DoubleEndedIterator, +{ + type Item = ::Item; + + #[inline] + fn next(&mut self) -> Option<::Item> { + self.iter.next_back() + } + #[inline] + fn size_hint(&self) -> (usize, Option) { + self.iter.size_hint() + } + + #[inline] + fn advance_by(&mut self, n: usize) -> Result<(), usize> { + self.iter.advance_back_by(n) + } + + #[inline] + fn nth(&mut self, n: usize) -> Option<::Item> { + self.iter.nth_back(n) + } + + fn try_fold(&mut self, init: B, f: F) -> R + where + Self: Sized, + F: FnMut(B, Self::Item) -> R, + R: Try, + { + self.iter.try_rfold(init, f) + } + + fn fold(self, init: Acc, f: F) -> Acc + where + F: FnMut(Acc, Self::Item) -> Acc, + { + self.iter.rfold(init, f) + } + + #[inline] + fn find

(&mut self, predicate: P) -> Option + where + P: FnMut(&Self::Item) -> bool, + { + self.iter.rfind(predicate) + } +} + +#[stable(feature = "rust1", since = "1.0.0")] +impl DoubleEndedIterator for Rev +where + I: DoubleEndedIterator, +{ + #[inline] + fn next_back(&mut self) -> Option<::Item> { + self.iter.next() + } + + #[inline] + fn advance_back_by(&mut self, n: usize) -> Result<(), usize> { + self.iter.advance_by(n) + } + + #[inline] + fn nth_back(&mut self, n: usize) -> Option<::Item> { + self.iter.nth(n) + } + + fn try_rfold(&mut self, init: B, f: F) -> R + where + Self: Sized, + F: FnMut(B, Self::Item) -> R, + R: Try, + { + self.iter.try_fold(init, f) + } + + fn rfold(self, init: Acc, f: F) -> Acc + where + F: FnMut(Acc, Self::Item) -> Acc, + { + self.iter.fold(init, f) + } + + fn rfind

{}
\n", STYLE, out) - }; - expect_file!["fixtures/sample.html"].assert_eq(&html); -} - const STYLE: &str = r#" "#; + +#[test] +fn test_html_highlighting() { + let src = include_str!("fixtures/sample.rs"); + let html = { + let mut out = String::new(); + write_code(&mut out, src); + format!("{}
{}
\n", STYLE, out) + }; + expect_file!["fixtures/sample.html"].assert_eq(&html); +} + +#[test] +fn test_dos_backline() { + let src = "pub fn foo() {\r\n\ + println!(\"foo\");\r\n\ +}\r\n"; + let mut html = String::new(); + write_code(&mut html, src); + expect_file!["fixtures/dos_line.html"].assert_eq(&html); +} diff --git a/src/librustdoc/html/layout.rs b/src/librustdoc/html/layout.rs index db73af7ec16..e8039942f4f 100644 --- a/src/librustdoc/html/layout.rs +++ b/src/librustdoc/html/layout.rs @@ -7,33 +7,33 @@ use crate::html::format::{Buffer, Print}; use crate::html::render::{ensure_trailing_slash, StylePath}; #[derive(Clone)] -pub struct Layout { - pub logo: String, - pub favicon: String, - pub external_html: ExternalHtml, - pub default_settings: HashMap, - pub krate: String, +crate struct Layout { + crate logo: String, + crate favicon: String, + crate external_html: ExternalHtml, + crate default_settings: HashMap, + crate krate: String, /// The given user css file which allow to customize the generated /// documentation theme. - pub css_file_extension: Option, + crate css_file_extension: Option, /// If false, the `select` element to have search filtering by crates on rendered docs /// won't be generated. - pub generate_search_filter: bool, + crate generate_search_filter: bool, } -pub struct Page<'a> { - pub title: &'a str, - pub css_class: &'a str, - pub root_path: &'a str, - pub static_root_path: Option<&'a str>, - pub description: &'a str, - pub keywords: &'a str, - pub resource_suffix: &'a str, - pub extra_scripts: &'a [&'a str], - pub static_extra_scripts: &'a [&'a str], +crate struct Page<'a> { + crate title: &'a str, + crate css_class: &'a str, + crate root_path: &'a str, + crate static_root_path: Option<&'a str>, + crate description: &'a str, + crate keywords: &'a str, + crate resource_suffix: &'a str, + crate extra_scripts: &'a [&'a str], + crate static_extra_scripts: &'a [&'a str], } -pub fn render( +crate fn render( layout: &Layout, page: &Page<'_>, sidebar: S, @@ -228,7 +228,7 @@ pub fn render( ) } -pub fn redirect(url: &str) -> String { +crate fn redirect(url: &str) -> String { //

(&mut self, predicate: P) -> Option + where + P: FnMut(&Self::Item) -> bool, + { + self.iter.find(predicate) + } +} + +#[stable(feature = "rust1", since = "1.0.0")] +impl ExactSizeIterator for Rev +where + I: ExactSizeIterator + DoubleEndedIterator, +{ + fn len(&self) -> usize { + self.iter.len() + } + + fn is_empty(&self) -> bool { + self.iter.is_empty() + } +} + +#[stable(feature = "fused", since = "1.26.0")] +impl FusedIterator for Rev where I: FusedIterator + DoubleEndedIterator {} + +#[unstable(feature = "trusted_len", issue = "37572")] +unsafe impl TrustedLen for Rev where I: TrustedLen + DoubleEndedIterator {} diff --git a/library/core/src/iter/adapters/scan.rs b/library/core/src/iter/adapters/scan.rs new file mode 100644 index 00000000000..0214899295e --- /dev/null +++ b/library/core/src/iter/adapters/scan.rs @@ -0,0 +1,111 @@ +use crate::fmt; +use crate::iter::{adapters::SourceIter, InPlaceIterable}; +use crate::ops::{ControlFlow, Try}; + +/// An iterator to maintain state while iterating another iterator. +/// +/// This `struct` is created by the [`scan`] method on [`Iterator`]. See its +/// documentation for more. +/// +/// [`scan`]: Iterator::scan +/// [`Iterator`]: trait.Iterator.html +#[must_use = "iterators are lazy and do nothing unless consumed"] +#[stable(feature = "rust1", since = "1.0.0")] +#[derive(Clone)] +pub struct Scan { + iter: I, + f: F, + state: St, +} + +impl Scan { + pub(in crate::iter) fn new(iter: I, state: St, f: F) -> Scan { + Scan { iter, state, f } + } +} + +#[stable(feature = "core_impl_debug", since = "1.9.0")] +impl fmt::Debug for Scan { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + f.debug_struct("Scan").field("iter", &self.iter).field("state", &self.state).finish() + } +} + +#[stable(feature = "rust1", since = "1.0.0")] +impl Iterator for Scan +where + I: Iterator, + F: FnMut(&mut St, I::Item) -> Option, +{ + type Item = B; + + #[inline] + fn next(&mut self) -> Option { + let a = self.iter.next()?; + (self.f)(&mut self.state, a) + } + + #[inline] + fn size_hint(&self) -> (usize, Option) { + let (_, upper) = self.iter.size_hint(); + (0, upper) // can't know a lower bound, due to the scan function + } + + #[inline] + fn try_fold(&mut self, init: Acc, fold: Fold) -> R + where + Self: Sized, + Fold: FnMut(Acc, Self::Item) -> R, + R: Try, + { + fn scan<'a, T, St, B, Acc, R: Try>( + state: &'a mut St, + f: &'a mut impl FnMut(&mut St, T) -> Option, + mut fold: impl FnMut(Acc, B) -> R + 'a, + ) -> impl FnMut(Acc, T) -> ControlFlow + 'a { + move |acc, x| match f(state, x) { + None => ControlFlow::Break(try { acc }), + Some(x) => ControlFlow::from_try(fold(acc, x)), + } + } + + let state = &mut self.state; + let f = &mut self.f; + self.iter.try_fold(init, scan(state, f, fold)).into_try() + } + + #[inline] + fn fold(mut self, init: Acc, fold: Fold) -> Acc + where + Self: Sized, + Fold: FnMut(Acc, Self::Item) -> Acc, + { + #[inline] + fn ok(mut f: impl FnMut(B, T) -> B) -> impl FnMut(B, T) -> Result { + move |acc, x| Ok(f(acc, x)) + } + + self.try_fold(init, ok(fold)).unwrap() + } +} + +#[unstable(issue = "none", feature = "inplace_iteration")] +unsafe impl SourceIter for Scan +where + I: SourceIter, + F: FnMut(&mut St, I::Item) -> Option, +{ + type Source = S; + + #[inline] + unsafe fn as_inner(&mut self) -> &mut S { + // SAFETY: unsafe function forwarding to unsafe function with the same requirements + unsafe { SourceIter::as_inner(&mut self.iter) } + } +} + +#[unstable(issue = "none", feature = "inplace_iteration")] +unsafe impl InPlaceIterable for Scan where + F: FnMut(&mut St, I::Item) -> Option +{ +} diff --git a/library/core/src/iter/adapters/skip.rs b/library/core/src/iter/adapters/skip.rs new file mode 100644 index 00000000000..dd5325660c3 --- /dev/null +++ b/library/core/src/iter/adapters/skip.rs @@ -0,0 +1,199 @@ +use crate::iter::{adapters::SourceIter, FusedIterator, InPlaceIterable}; +use crate::ops::{ControlFlow, Try}; + +/// An iterator that skips over `n` elements of `iter`. +/// +/// This `struct` is created by the [`skip`] method on [`Iterator`]. See its +/// documentation for more. +/// +/// [`skip`]: Iterator::skip +/// [`Iterator`]: trait.Iterator.html +#[derive(Clone, Debug)] +#[must_use = "iterators are lazy and do nothing unless consumed"] +#[stable(feature = "rust1", since = "1.0.0")] +pub struct Skip { + iter: I, + n: usize, +} + +impl Skip { + pub(in crate::iter) fn new(iter: I, n: usize) -> Skip { + Skip { iter, n } + } +} + +#[stable(feature = "rust1", since = "1.0.0")] +impl Iterator for Skip +where + I: Iterator, +{ + type Item = ::Item; + + #[inline] + fn next(&mut self) -> Option { + if self.n == 0 { + self.iter.next() + } else { + let old_n = self.n; + self.n = 0; + self.iter.nth(old_n) + } + } + + #[inline] + fn nth(&mut self, n: usize) -> Option { + // Can't just add n + self.n due to overflow. + if self.n > 0 { + let to_skip = self.n; + self.n = 0; + // nth(n) skips n+1 + self.iter.nth(to_skip - 1)?; + } + self.iter.nth(n) + } + + #[inline] + fn count(mut self) -> usize { + if self.n > 0 { + // nth(n) skips n+1 + if self.iter.nth(self.n - 1).is_none() { + return 0; + } + } + self.iter.count() + } + + #[inline] + fn last(mut self) -> Option { + if self.n > 0 { + // nth(n) skips n+1 + self.iter.nth(self.n - 1)?; + } + self.iter.last() + } + + #[inline] + fn size_hint(&self) -> (usize, Option) { + let (lower, upper) = self.iter.size_hint(); + + let lower = lower.saturating_sub(self.n); + let upper = match upper { + Some(x) => Some(x.saturating_sub(self.n)), + None => None, + }; + + (lower, upper) + } + + #[inline] + fn try_fold(&mut self, init: Acc, fold: Fold) -> R + where + Self: Sized, + Fold: FnMut(Acc, Self::Item) -> R, + R: Try, + { + let n = self.n; + self.n = 0; + if n > 0 { + // nth(n) skips n+1 + if self.iter.nth(n - 1).is_none() { + return try { init }; + } + } + self.iter.try_fold(init, fold) + } + + #[inline] + fn fold(mut self, init: Acc, fold: Fold) -> Acc + where + Fold: FnMut(Acc, Self::Item) -> Acc, + { + if self.n > 0 { + // nth(n) skips n+1 + if self.iter.nth(self.n - 1).is_none() { + return init; + } + } + self.iter.fold(init, fold) + } +} + +#[stable(feature = "rust1", since = "1.0.0")] +impl ExactSizeIterator for Skip where I: ExactSizeIterator {} + +#[stable(feature = "double_ended_skip_iterator", since = "1.9.0")] +impl DoubleEndedIterator for Skip +where + I: DoubleEndedIterator + ExactSizeIterator, +{ + fn next_back(&mut self) -> Option { + if self.len() > 0 { self.iter.next_back() } else { None } + } + + #[inline] + fn nth_back(&mut self, n: usize) -> Option { + let len = self.len(); + if n < len { + self.iter.nth_back(n) + } else { + if len > 0 { + // consume the original iterator + self.iter.nth_back(len - 1); + } + None + } + } + + fn try_rfold(&mut self, init: Acc, fold: Fold) -> R + where + Self: Sized, + Fold: FnMut(Acc, Self::Item) -> R, + R: Try, + { + fn check>( + mut n: usize, + mut fold: impl FnMut(Acc, T) -> R, + ) -> impl FnMut(Acc, T) -> ControlFlow { + move |acc, x| { + n -= 1; + let r = fold(acc, x); + if n == 0 { ControlFlow::Break(r) } else { ControlFlow::from_try(r) } + } + } + + let n = self.len(); + if n == 0 { try { init } } else { self.iter.try_rfold(init, check(n, fold)).into_try() } + } + + fn rfold(mut self, init: Acc, fold: Fold) -> Acc + where + Fold: FnMut(Acc, Self::Item) -> Acc, + { + #[inline] + fn ok(mut f: impl FnMut(Acc, T) -> Acc) -> impl FnMut(Acc, T) -> Result { + move |acc, x| Ok(f(acc, x)) + } + + self.try_rfold(init, ok(fold)).unwrap() + } +} + +#[stable(feature = "fused", since = "1.26.0")] +impl FusedIterator for Skip where I: FusedIterator {} + +#[unstable(issue = "none", feature = "inplace_iteration")] +unsafe impl SourceIter for Skip +where + I: SourceIter, +{ + type Source = S; + + #[inline] + unsafe fn as_inner(&mut self) -> &mut S { + // SAFETY: unsafe function forwarding to unsafe function with the same requirements + unsafe { SourceIter::as_inner(&mut self.iter) } + } +} + +#[unstable(issue = "none", feature = "inplace_iteration")] +unsafe impl InPlaceIterable for Skip {} diff --git a/library/core/src/iter/adapters/skip_while.rs b/library/core/src/iter/adapters/skip_while.rs new file mode 100644 index 00000000000..efcb469fc95 --- /dev/null +++ b/library/core/src/iter/adapters/skip_while.rs @@ -0,0 +1,126 @@ +use crate::fmt; +use crate::iter::{adapters::SourceIter, FusedIterator, InPlaceIterable}; +use crate::ops::Try; + +/// An iterator that rejects elements while `predicate` returns `true`. +/// +/// This `struct` is created by the [`skip_while`] method on [`Iterator`]. See its +/// documentation for more. +/// +/// [`skip_while`]: Iterator::skip_while +/// [`Iterator`]: trait.Iterator.html +#[must_use = "iterators are lazy and do nothing unless consumed"] +#[stable(feature = "rust1", since = "1.0.0")] +#[derive(Clone)] +pub struct SkipWhile { + iter: I, + flag: bool, + predicate: P, +} + +impl SkipWhile { + pub(in crate::iter) fn new(iter: I, predicate: P) -> SkipWhile { + SkipWhile { iter, flag: false, predicate } + } +} + +#[stable(feature = "core_impl_debug", since = "1.9.0")] +impl fmt::Debug for SkipWhile { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + f.debug_struct("SkipWhile").field("iter", &self.iter).field("flag", &self.flag).finish() + } +} + +#[stable(feature = "rust1", since = "1.0.0")] +impl Iterator for SkipWhile +where + P: FnMut(&I::Item) -> bool, +{ + type Item = I::Item; + + #[inline] + fn next(&mut self) -> Option { + fn check<'a, T>( + flag: &'a mut bool, + pred: &'a mut impl FnMut(&T) -> bool, + ) -> impl FnMut(&T) -> bool + 'a { + move |x| { + if *flag || !pred(x) { + *flag = true; + true + } else { + false + } + } + } + + let flag = &mut self.flag; + let pred = &mut self.predicate; + self.iter.find(check(flag, pred)) + } + + #[inline] + fn size_hint(&self) -> (usize, Option) { + let (_, upper) = self.iter.size_hint(); + (0, upper) // can't know a lower bound, due to the predicate + } + + #[inline] + fn try_fold(&mut self, mut init: Acc, mut fold: Fold) -> R + where + Self: Sized, + Fold: FnMut(Acc, Self::Item) -> R, + R: Try, + { + if !self.flag { + match self.next() { + Some(v) => init = fold(init, v)?, + None => return try { init }, + } + } + self.iter.try_fold(init, fold) + } + + #[inline] + fn fold(mut self, mut init: Acc, mut fold: Fold) -> Acc + where + Fold: FnMut(Acc, Self::Item) -> Acc, + { + if !self.flag { + match self.next() { + Some(v) => init = fold(init, v), + None => return init, + } + } + self.iter.fold(init, fold) + } +} + +#[stable(feature = "fused", since = "1.26.0")] +impl FusedIterator for SkipWhile +where + I: FusedIterator, + P: FnMut(&I::Item) -> bool, +{ +} + +#[unstable(issue = "none", feature = "inplace_iteration")] +unsafe impl SourceIter for SkipWhile +where + P: FnMut(&I::Item) -> bool, + I: SourceIter, +{ + type Source = S; + + #[inline] + unsafe fn as_inner(&mut self) -> &mut S { + // SAFETY: unsafe function forwarding to unsafe function with the same requirements + unsafe { SourceIter::as_inner(&mut self.iter) } + } +} + +#[unstable(issue = "none", feature = "inplace_iteration")] +unsafe impl InPlaceIterable for SkipWhile where + F: FnMut(&I::Item) -> bool +{ +} diff --git a/library/core/src/iter/adapters/step_by.rs b/library/core/src/iter/adapters/step_by.rs new file mode 100644 index 00000000000..2ba56eeccba --- /dev/null +++ b/library/core/src/iter/adapters/step_by.rs @@ -0,0 +1,235 @@ +use crate::{intrinsics, iter::from_fn, ops::Try}; + +/// An iterator for stepping iterators by a custom amount. +/// +/// This `struct` is created by the [`step_by`] method on [`Iterator`]. See +/// its documentation for more. +/// +/// [`step_by`]: Iterator::step_by +/// [`Iterator`]: trait.Iterator.html +#[must_use = "iterators are lazy and do nothing unless consumed"] +#[stable(feature = "iterator_step_by", since = "1.28.0")] +#[derive(Clone, Debug)] +pub struct StepBy { + iter: I, + step: usize, + first_take: bool, +} + +impl StepBy { + pub(in crate::iter) fn new(iter: I, step: usize) -> StepBy { + assert!(step != 0); + StepBy { iter, step: step - 1, first_take: true } + } +} + +#[stable(feature = "iterator_step_by", since = "1.28.0")] +impl Iterator for StepBy +where + I: Iterator, +{ + type Item = I::Item; + + #[inline] + fn next(&mut self) -> Option { + if self.first_take { + self.first_take = false; + self.iter.next() + } else { + self.iter.nth(self.step) + } + } + + #[inline] + fn size_hint(&self) -> (usize, Option) { + #[inline] + fn first_size(step: usize) -> impl Fn(usize) -> usize { + move |n| if n == 0 { 0 } else { 1 + (n - 1) / (step + 1) } + } + + #[inline] + fn other_size(step: usize) -> impl Fn(usize) -> usize { + move |n| n / (step + 1) + } + + let (low, high) = self.iter.size_hint(); + + if self.first_take { + let f = first_size(self.step); + (f(low), high.map(f)) + } else { + let f = other_size(self.step); + (f(low), high.map(f)) + } + } + + #[inline] + fn nth(&mut self, mut n: usize) -> Option { + if self.first_take { + self.first_take = false; + let first = self.iter.next(); + if n == 0 { + return first; + } + n -= 1; + } + // n and self.step are indices, we need to add 1 to get the amount of elements + // When calling `.nth`, we need to subtract 1 again to convert back to an index + // step + 1 can't overflow because `.step_by` sets `self.step` to `step - 1` + let mut step = self.step + 1; + // n + 1 could overflow + // thus, if n is usize::MAX, instead of adding one, we call .nth(step) + if n == usize::MAX { + self.iter.nth(step - 1); + } else { + n += 1; + } + + // overflow handling + loop { + let mul = n.checked_mul(step); + { + if intrinsics::likely(mul.is_some()) { + return self.iter.nth(mul.unwrap() - 1); + } + } + let div_n = usize::MAX / n; + let div_step = usize::MAX / step; + let nth_n = div_n * n; + let nth_step = div_step * step; + let nth = if nth_n > nth_step { + step -= div_n; + nth_n + } else { + n -= div_step; + nth_step + }; + self.iter.nth(nth - 1); + } + } + + fn try_fold(&mut self, mut acc: Acc, mut f: F) -> R + where + F: FnMut(Acc, Self::Item) -> R, + R: Try, + { + #[inline] + fn nth(iter: &mut I, step: usize) -> impl FnMut() -> Option + '_ { + move || iter.nth(step) + } + + if self.first_take { + self.first_take = false; + match self.iter.next() { + None => return try { acc }, + Some(x) => acc = f(acc, x)?, + } + } + from_fn(nth(&mut self.iter, self.step)).try_fold(acc, f) + } + + fn fold(mut self, mut acc: Acc, mut f: F) -> Acc + where + F: FnMut(Acc, Self::Item) -> Acc, + { + #[inline] + fn nth(iter: &mut I, step: usize) -> impl FnMut() -> Option + '_ { + move || iter.nth(step) + } + + if self.first_take { + self.first_take = false; + match self.iter.next() { + None => return acc, + Some(x) => acc = f(acc, x), + } + } + from_fn(nth(&mut self.iter, self.step)).fold(acc, f) + } +} + +impl StepBy +where + I: ExactSizeIterator, +{ + // The zero-based index starting from the end of the iterator of the + // last element. Used in the `DoubleEndedIterator` implementation. + fn next_back_index(&self) -> usize { + let rem = self.iter.len() % (self.step + 1); + if self.first_take { + if rem == 0 { self.step } else { rem - 1 } + } else { + rem + } + } +} + +#[stable(feature = "double_ended_step_by_iterator", since = "1.38.0")] +impl DoubleEndedIterator for StepBy +where + I: DoubleEndedIterator + ExactSizeIterator, +{ + #[inline] + fn next_back(&mut self) -> Option { + self.iter.nth_back(self.next_back_index()) + } + + #[inline] + fn nth_back(&mut self, n: usize) -> Option { + // `self.iter.nth_back(usize::MAX)` does the right thing here when `n` + // is out of bounds because the length of `self.iter` does not exceed + // `usize::MAX` (because `I: ExactSizeIterator`) and `nth_back` is + // zero-indexed + let n = n.saturating_mul(self.step + 1).saturating_add(self.next_back_index()); + self.iter.nth_back(n) + } + + fn try_rfold(&mut self, init: Acc, mut f: F) -> R + where + F: FnMut(Acc, Self::Item) -> R, + R: Try, + { + #[inline] + fn nth_back( + iter: &mut I, + step: usize, + ) -> impl FnMut() -> Option + '_ { + move || iter.nth_back(step) + } + + match self.next_back() { + None => try { init }, + Some(x) => { + let acc = f(init, x)?; + from_fn(nth_back(&mut self.iter, self.step)).try_fold(acc, f) + } + } + } + + #[inline] + fn rfold(mut self, init: Acc, mut f: F) -> Acc + where + Self: Sized, + F: FnMut(Acc, Self::Item) -> Acc, + { + #[inline] + fn nth_back( + iter: &mut I, + step: usize, + ) -> impl FnMut() -> Option + '_ { + move || iter.nth_back(step) + } + + match self.next_back() { + None => init, + Some(x) => { + let acc = f(init, x); + from_fn(nth_back(&mut self.iter, self.step)).fold(acc, f) + } + } + } +} + +// StepBy can only make the iterator shorter, so the len will still fit. +#[stable(feature = "iterator_step_by", since = "1.28.0")] +impl ExactSizeIterator for StepBy where I: ExactSizeIterator {} diff --git a/library/core/src/iter/adapters/take.rs b/library/core/src/iter/adapters/take.rs new file mode 100644 index 00000000000..9efc7a480ae --- /dev/null +++ b/library/core/src/iter/adapters/take.rs @@ -0,0 +1,209 @@ +use crate::cmp; +use crate::iter::{adapters::SourceIter, FusedIterator, InPlaceIterable, TrustedLen}; +use crate::ops::{ControlFlow, Try}; + +/// An iterator that only iterates over the first `n` iterations of `iter`. +/// +/// This `struct` is created by the [`take`] method on [`Iterator`]. See its +/// documentation for more. +/// +/// [`take`]: Iterator::take +/// [`Iterator`]: trait.Iterator.html +#[derive(Clone, Debug)] +#[must_use = "iterators are lazy and do nothing unless consumed"] +#[stable(feature = "rust1", since = "1.0.0")] +pub struct Take { + iter: I, + n: usize, +} + +impl Take { + pub(in crate::iter) fn new(iter: I, n: usize) -> Take { + Take { iter, n } + } +} + +#[stable(feature = "rust1", since = "1.0.0")] +impl Iterator for Take +where + I: Iterator, +{ + type Item = ::Item; + + #[inline] + fn next(&mut self) -> Option<::Item> { + if self.n != 0 { + self.n -= 1; + self.iter.next() + } else { + None + } + } + + #[inline] + fn nth(&mut self, n: usize) -> Option { + if self.n > n { + self.n -= n + 1; + self.iter.nth(n) + } else { + if self.n > 0 { + self.iter.nth(self.n - 1); + self.n = 0; + } + None + } + } + + #[inline] + fn size_hint(&self) -> (usize, Option) { + if self.n == 0 { + return (0, Some(0)); + } + + let (lower, upper) = self.iter.size_hint(); + + let lower = cmp::min(lower, self.n); + + let upper = match upper { + Some(x) if x < self.n => Some(x), + _ => Some(self.n), + }; + + (lower, upper) + } + + #[inline] + fn try_fold(&mut self, init: Acc, fold: Fold) -> R + where + Self: Sized, + Fold: FnMut(Acc, Self::Item) -> R, + R: Try, + { + fn check<'a, T, Acc, R: Try>( + n: &'a mut usize, + mut fold: impl FnMut(Acc, T) -> R + 'a, + ) -> impl FnMut(Acc, T) -> ControlFlow + 'a { + move |acc, x| { + *n -= 1; + let r = fold(acc, x); + if *n == 0 { ControlFlow::Break(r) } else { ControlFlow::from_try(r) } + } + } + + if self.n == 0 { + try { init } + } else { + let n = &mut self.n; + self.iter.try_fold(init, check(n, fold)).into_try() + } + } + + #[inline] + fn fold(mut self, init: Acc, fold: Fold) -> Acc + where + Self: Sized, + Fold: FnMut(Acc, Self::Item) -> Acc, + { + #[inline] + fn ok(mut f: impl FnMut(B, T) -> B) -> impl FnMut(B, T) -> Result { + move |acc, x| Ok(f(acc, x)) + } + + self.try_fold(init, ok(fold)).unwrap() + } +} + +#[unstable(issue = "none", feature = "inplace_iteration")] +unsafe impl SourceIter for Take +where + I: SourceIter, +{ + type Source = S; + + #[inline] + unsafe fn as_inner(&mut self) -> &mut S { + // SAFETY: unsafe function forwarding to unsafe function with the same requirements + unsafe { SourceIter::as_inner(&mut self.iter) } + } +} + +#[unstable(issue = "none", feature = "inplace_iteration")] +unsafe impl InPlaceIterable for Take {} + +#[stable(feature = "double_ended_take_iterator", since = "1.38.0")] +impl DoubleEndedIterator for Take +where + I: DoubleEndedIterator + ExactSizeIterator, +{ + #[inline] + fn next_back(&mut self) -> Option { + if self.n == 0 { + None + } else { + let n = self.n; + self.n -= 1; + self.iter.nth_back(self.iter.len().saturating_sub(n)) + } + } + + #[inline] + fn nth_back(&mut self, n: usize) -> Option { + let len = self.iter.len(); + if self.n > n { + let m = len.saturating_sub(self.n) + n; + self.n -= n + 1; + self.iter.nth_back(m) + } else { + if len > 0 { + self.iter.nth_back(len - 1); + } + None + } + } + + #[inline] + fn try_rfold(&mut self, init: Acc, fold: Fold) -> R + where + Self: Sized, + Fold: FnMut(Acc, Self::Item) -> R, + R: Try, + { + if self.n == 0 { + try { init } + } else { + let len = self.iter.len(); + if len > self.n && self.iter.nth_back(len - self.n - 1).is_none() { + try { init } + } else { + self.iter.try_rfold(init, fold) + } + } + } + + #[inline] + fn rfold(mut self, init: Acc, fold: Fold) -> Acc + where + Self: Sized, + Fold: FnMut(Acc, Self::Item) -> Acc, + { + if self.n == 0 { + init + } else { + let len = self.iter.len(); + if len > self.n && self.iter.nth_back(len - self.n - 1).is_none() { + init + } else { + self.iter.rfold(init, fold) + } + } + } +} + +#[stable(feature = "rust1", since = "1.0.0")] +impl ExactSizeIterator for Take where I: ExactSizeIterator {} + +#[stable(feature = "fused", since = "1.26.0")] +impl FusedIterator for Take where I: FusedIterator {} + +#[unstable(feature = "trusted_len", issue = "37572")] +unsafe impl TrustedLen for Take {} diff --git a/library/core/src/iter/adapters/take_while.rs b/library/core/src/iter/adapters/take_while.rs new file mode 100644 index 00000000000..746eb41f4c3 --- /dev/null +++ b/library/core/src/iter/adapters/take_while.rs @@ -0,0 +1,139 @@ +use crate::fmt; +use crate::iter::{adapters::SourceIter, FusedIterator, InPlaceIterable}; +use crate::ops::{ControlFlow, Try}; + +/// An iterator that only accepts elements while `predicate` returns `true`. +/// +/// This `struct` is created by the [`take_while`] method on [`Iterator`]. See its +/// documentation for more. +/// +/// [`take_while`]: Iterator::take_while +/// [`Iterator`]: trait.Iterator.html +#[must_use = "iterators are lazy and do nothing unless consumed"] +#[stable(feature = "rust1", since = "1.0.0")] +#[derive(Clone)] +pub struct TakeWhile { + iter: I, + flag: bool, + predicate: P, +} + +impl TakeWhile { + pub(in crate::iter) fn new(iter: I, predicate: P) -> TakeWhile { + TakeWhile { iter, flag: false, predicate } + } +} + +#[stable(feature = "core_impl_debug", since = "1.9.0")] +impl fmt::Debug for TakeWhile { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + f.debug_struct("TakeWhile").field("iter", &self.iter).field("flag", &self.flag).finish() + } +} + +#[stable(feature = "rust1", since = "1.0.0")] +impl Iterator for TakeWhile +where + P: FnMut(&I::Item) -> bool, +{ + type Item = I::Item; + + #[inline] + fn next(&mut self) -> Option { + if self.flag { + None + } else { + let x = self.iter.next()?; + if (self.predicate)(&x) { + Some(x) + } else { + self.flag = true; + None + } + } + } + + #[inline] + fn size_hint(&self) -> (usize, Option) { + if self.flag { + (0, Some(0)) + } else { + let (_, upper) = self.iter.size_hint(); + (0, upper) // can't know a lower bound, due to the predicate + } + } + + #[inline] + fn try_fold(&mut self, init: Acc, fold: Fold) -> R + where + Self: Sized, + Fold: FnMut(Acc, Self::Item) -> R, + R: Try, + { + fn check<'a, T, Acc, R: Try>( + flag: &'a mut bool, + p: &'a mut impl FnMut(&T) -> bool, + mut fold: impl FnMut(Acc, T) -> R + 'a, + ) -> impl FnMut(Acc, T) -> ControlFlow + 'a { + move |acc, x| { + if p(&x) { + ControlFlow::from_try(fold(acc, x)) + } else { + *flag = true; + ControlFlow::Break(try { acc }) + } + } + } + + if self.flag { + try { init } + } else { + let flag = &mut self.flag; + let p = &mut self.predicate; + self.iter.try_fold(init, check(flag, p, fold)).into_try() + } + } + + #[inline] + fn fold(mut self, init: Acc, fold: Fold) -> Acc + where + Self: Sized, + Fold: FnMut(Acc, Self::Item) -> Acc, + { + #[inline] + fn ok(mut f: impl FnMut(B, T) -> B) -> impl FnMut(B, T) -> Result { + move |acc, x| Ok(f(acc, x)) + } + + self.try_fold(init, ok(fold)).unwrap() + } +} + +#[stable(feature = "fused", since = "1.26.0")] +impl FusedIterator for TakeWhile +where + I: FusedIterator, + P: FnMut(&I::Item) -> bool, +{ +} + +#[unstable(issue = "none", feature = "inplace_iteration")] +unsafe impl SourceIter for TakeWhile +where + P: FnMut(&I::Item) -> bool, + I: SourceIter, +{ + type Source = S; + + #[inline] + unsafe fn as_inner(&mut self) -> &mut S { + // SAFETY: unsafe function forwarding to unsafe function with the same requirements + unsafe { SourceIter::as_inner(&mut self.iter) } + } +} + +#[unstable(issue = "none", feature = "inplace_iteration")] +unsafe impl InPlaceIterable for TakeWhile where + F: FnMut(&I::Item) -> bool +{ +} diff --git a/library/core/src/iter/adapters/zip.rs b/library/core/src/iter/adapters/zip.rs index 78712988eae..8cd4c775231 100644 --- a/library/core/src/iter/adapters/zip.rs +++ b/library/core/src/iter/adapters/zip.rs @@ -1,10 +1,7 @@ use crate::cmp; use crate::fmt::{self, Debug}; - -use super::super::{ - DoubleEndedIterator, ExactSizeIterator, FusedIterator, InPlaceIterable, Iterator, SourceIter, - TrustedLen, -}; +use crate::iter::{DoubleEndedIterator, ExactSizeIterator, FusedIterator, Iterator}; +use crate::iter::{InPlaceIterable, SourceIter, TrustedLen}; /// An iterator that iterates two other iterators simultaneously. /// @@ -21,7 +18,7 @@ pub struct Zip { len: usize, } impl Zip { - pub(in super::super) fn new(a: A, b: B) -> Zip { + pub(in crate::iter) fn new(a: A, b: B) -> Zip { ZipImpl::new(a, b) } fn super_nth(&mut self, mut n: usize) -> Option<(A::Item, B::Item)> { diff --git a/library/core/src/iter/mod.rs b/library/core/src/iter/mod.rs index 59f333e888b..072373c00f6 100644 --- a/library/core/src/iter/mod.rs +++ b/library/core/src/iter/mod.rs @@ -335,15 +335,14 @@ pub use self::sources::{successors, Successors}; #[stable(feature = "fused", since = "1.26.0")] pub use self::traits::FusedIterator; +#[unstable(issue = "none", feature = "inplace_iteration")] +pub use self::traits::InPlaceIterable; #[unstable(feature = "trusted_len", issue = "37572")] pub use self::traits::TrustedLen; #[stable(feature = "rust1", since = "1.0.0")] -pub use self::traits::{DoubleEndedIterator, Extend, FromIterator, IntoIterator}; -#[stable(feature = "rust1", since = "1.0.0")] -pub use self::traits::{ExactSizeIterator, Product, Sum}; - -#[unstable(issue = "none", feature = "inplace_iteration")] -pub use self::traits::InPlaceIterable; +pub use self::traits::{ + DoubleEndedIterator, ExactSizeIterator, Extend, FromIterator, IntoIterator, Product, Sum, +}; #[stable(feature = "iter_cloned", since = "1.1.0")] pub use self::adapters::Cloned; @@ -351,21 +350,19 @@ pub use self::adapters::Cloned; pub use self::adapters::Copied; #[stable(feature = "iterator_flatten", since = "1.29.0")] pub use self::adapters::Flatten; - #[unstable(feature = "iter_map_while", reason = "recently added", issue = "68537")] pub use self::adapters::MapWhile; -#[unstable(issue = "none", feature = "inplace_iteration")] +#[unstable(feature = "inplace_iteration", issue = "none")] pub use self::adapters::SourceIter; #[stable(feature = "iterator_step_by", since = "1.28.0")] pub use self::adapters::StepBy; #[unstable(feature = "trusted_random_access", issue = "none")] pub use self::adapters::TrustedRandomAccess; #[stable(feature = "rust1", since = "1.0.0")] -pub use self::adapters::{Chain, Cycle, Enumerate, Filter, FilterMap, Map, Rev, Zip}; -#[stable(feature = "rust1", since = "1.0.0")] -pub use self::adapters::{FlatMap, Peekable, Scan, Skip, SkipWhile, Take, TakeWhile}; -#[stable(feature = "rust1", since = "1.0.0")] -pub use self::adapters::{Fuse, Inspect}; +pub use self::adapters::{ + Chain, Cycle, Enumerate, Filter, FilterMap, FlatMap, Fuse, Inspect, Map, Peekable, Rev, Scan, + Skip, SkipWhile, Take, TakeWhile, Zip, +}; pub(crate) use self::adapters::process_results; diff --git a/library/core/src/iter/sources.rs b/library/core/src/iter/sources.rs index 44da8f4715c..de0663141e2 100644 --- a/library/core/src/iter/sources.rs +++ b/library/core/src/iter/sources.rs @@ -1,625 +1,27 @@ -use crate::fmt; -use crate::marker; +mod empty; +mod from_fn; +mod once; +mod once_with; +mod repeat; +mod repeat_with; +mod successors; -use super::{FusedIterator, TrustedLen}; +pub use self::repeat::{repeat, Repeat}; -/// An iterator that repeats an element endlessly. -/// -/// This `struct` is created by the [`repeat()`] function. See its documentation for more. -#[derive(Clone, Debug)] -#[stable(feature = "rust1", since = "1.0.0")] -pub struct Repeat { - element: A, -} - -#[stable(feature = "rust1", since = "1.0.0")] -impl Iterator for Repeat { - type Item = A; - - #[inline] - fn next(&mut self) -> Option { - Some(self.element.clone()) - } - #[inline] - fn size_hint(&self) -> (usize, Option) { - (usize::MAX, None) - } -} - -#[stable(feature = "rust1", since = "1.0.0")] -impl DoubleEndedIterator for Repeat { - #[inline] - fn next_back(&mut self) -> Option { - Some(self.element.clone()) - } -} - -#[stable(feature = "fused", since = "1.26.0")] -impl FusedIterator for Repeat {} - -#[unstable(feature = "trusted_len", issue = "37572")] -unsafe impl TrustedLen for Repeat {} - -/// Creates a new iterator that endlessly repeats a single element. -/// -/// The `repeat()` function repeats a single value over and over again. -/// -/// Infinite iterators like `repeat()` are often used with adapters like -/// [`Iterator::take()`], in order to make them finite. -/// -/// If the element type of the iterator you need does not implement `Clone`, -/// or if you do not want to keep the repeated element in memory, you can -/// instead use the [`repeat_with()`] function. -/// -/// # Examples -/// -/// Basic usage: -/// -/// ``` -/// use std::iter; -/// -/// // the number four 4ever: -/// let mut fours = iter::repeat(4); -/// -/// assert_eq!(Some(4), fours.next()); -/// assert_eq!(Some(4), fours.next()); -/// assert_eq!(Some(4), fours.next()); -/// assert_eq!(Some(4), fours.next()); -/// assert_eq!(Some(4), fours.next()); -/// -/// // yup, still four -/// assert_eq!(Some(4), fours.next()); -/// ``` -/// -/// Going finite with [`Iterator::take()`]: -/// -/// ``` -/// use std::iter; -/// -/// // that last example was too many fours. Let's only have four fours. -/// let mut four_fours = iter::repeat(4).take(4); -/// -/// assert_eq!(Some(4), four_fours.next()); -/// assert_eq!(Some(4), four_fours.next()); -/// assert_eq!(Some(4), four_fours.next()); -/// assert_eq!(Some(4), four_fours.next()); -/// -/// // ... and now we're done -/// assert_eq!(None, four_fours.next()); -/// ``` -#[inline] -#[stable(feature = "rust1", since = "1.0.0")] -pub fn repeat(elt: T) -> Repeat { - Repeat { element: elt } -} - -/// An iterator that repeats elements of type `A` endlessly by -/// applying the provided closure `F: FnMut() -> A`. -/// -/// This `struct` is created by the [`repeat_with()`] function. -/// See its documentation for more. -#[derive(Copy, Clone, Debug)] -#[stable(feature = "iterator_repeat_with", since = "1.28.0")] -pub struct RepeatWith { - repeater: F, -} - -#[stable(feature = "iterator_repeat_with", since = "1.28.0")] -impl A> Iterator for RepeatWith { - type Item = A; - - #[inline] - fn next(&mut self) -> Option { - Some((self.repeater)()) - } - - #[inline] - fn size_hint(&self) -> (usize, Option) { - (usize::MAX, None) - } -} - -#[stable(feature = "iterator_repeat_with", since = "1.28.0")] -impl A> FusedIterator for RepeatWith {} - -#[unstable(feature = "trusted_len", issue = "37572")] -unsafe impl A> TrustedLen for RepeatWith {} - -/// Creates a new iterator that repeats elements of type `A` endlessly by -/// applying the provided closure, the repeater, `F: FnMut() -> A`. -/// -/// The `repeat_with()` function calls the repeater over and over again. -/// -/// Infinite iterators like `repeat_with()` are often used with adapters like -/// [`Iterator::take()`], in order to make them finite. -/// -/// If the element type of the iterator you need implements [`Clone`], and -/// it is OK to keep the source element in memory, you should instead use -/// the [`repeat()`] function. -/// -/// An iterator produced by `repeat_with()` is not a [`DoubleEndedIterator`]. -/// If you need `repeat_with()` to return a [`DoubleEndedIterator`], -/// please open a GitHub issue explaining your use case. -/// -/// [`DoubleEndedIterator`]: crate::iter::DoubleEndedIterator -/// -/// # Examples -/// -/// Basic usage: -/// -/// ``` -/// use std::iter; -/// -/// // let's assume we have some value of a type that is not `Clone` -/// // or which don't want to have in memory just yet because it is expensive: -/// #[derive(PartialEq, Debug)] -/// struct Expensive; -/// -/// // a particular value forever: -/// let mut things = iter::repeat_with(|| Expensive); -/// -/// assert_eq!(Some(Expensive), things.next()); -/// assert_eq!(Some(Expensive), things.next()); -/// assert_eq!(Some(Expensive), things.next()); -/// assert_eq!(Some(Expensive), things.next()); -/// assert_eq!(Some(Expensive), things.next()); -/// ``` -/// -/// Using mutation and going finite: -/// -/// ```rust -/// use std::iter; -/// -/// // From the zeroth to the third power of two: -/// let mut curr = 1; -/// let mut pow2 = iter::repeat_with(|| { let tmp = curr; curr *= 2; tmp }) -/// .take(4); -/// -/// assert_eq!(Some(1), pow2.next()); -/// assert_eq!(Some(2), pow2.next()); -/// assert_eq!(Some(4), pow2.next()); -/// assert_eq!(Some(8), pow2.next()); -/// -/// // ... and now we're done -/// assert_eq!(None, pow2.next()); -/// ``` -#[inline] -#[stable(feature = "iterator_repeat_with", since = "1.28.0")] -pub fn repeat_with A>(repeater: F) -> RepeatWith { - RepeatWith { repeater } -} - -/// An iterator that yields nothing. -/// -/// This `struct` is created by the [`empty()`] function. See its documentation for more. #[stable(feature = "iter_empty", since = "1.2.0")] -pub struct Empty(marker::PhantomData); - -#[stable(feature = "iter_empty_send_sync", since = "1.42.0")] -unsafe impl Send for Empty {} -#[stable(feature = "iter_empty_send_sync", since = "1.42.0")] -unsafe impl Sync for Empty {} +pub use self::empty::{empty, Empty}; -#[stable(feature = "core_impl_debug", since = "1.9.0")] -impl fmt::Debug for Empty { - fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { - f.pad("Empty") - } -} - -#[stable(feature = "iter_empty", since = "1.2.0")] -impl Iterator for Empty { - type Item = T; - - fn next(&mut self) -> Option { - None - } - - fn size_hint(&self) -> (usize, Option) { - (0, Some(0)) - } -} - -#[stable(feature = "iter_empty", since = "1.2.0")] -impl DoubleEndedIterator for Empty { - fn next_back(&mut self) -> Option { - None - } -} - -#[stable(feature = "iter_empty", since = "1.2.0")] -impl ExactSizeIterator for Empty { - fn len(&self) -> usize { - 0 - } -} - -#[unstable(feature = "trusted_len", issue = "37572")] -unsafe impl TrustedLen for Empty {} - -#[stable(feature = "fused", since = "1.26.0")] -impl FusedIterator for Empty {} - -// not #[derive] because that adds a Clone bound on T, -// which isn't necessary. -#[stable(feature = "iter_empty", since = "1.2.0")] -impl Clone for Empty { - fn clone(&self) -> Empty { - Empty(marker::PhantomData) - } -} - -// not #[derive] because that adds a Default bound on T, -// which isn't necessary. -#[stable(feature = "iter_empty", since = "1.2.0")] -impl Default for Empty { - fn default() -> Empty { - Empty(marker::PhantomData) - } -} - -/// Creates an iterator that yields nothing. -/// -/// # Examples -/// -/// Basic usage: -/// -/// ``` -/// use std::iter; -/// -/// // this could have been an iterator over i32, but alas, it's just not. -/// let mut nope = iter::empty::(); -/// -/// assert_eq!(None, nope.next()); -/// ``` -#[stable(feature = "iter_empty", since = "1.2.0")] -#[rustc_const_stable(feature = "const_iter_empty", since = "1.32.0")] -pub const fn empty() -> Empty { - Empty(marker::PhantomData) -} - -/// An iterator that yields an element exactly once. -/// -/// This `struct` is created by the [`once()`] function. See its documentation for more. -#[derive(Clone, Debug)] #[stable(feature = "iter_once", since = "1.2.0")] -pub struct Once { - inner: crate::option::IntoIter, -} +pub use self::once::{once, Once}; -#[stable(feature = "iter_once", since = "1.2.0")] -impl Iterator for Once { - type Item = T; - - fn next(&mut self) -> Option { - self.inner.next() - } - - fn size_hint(&self) -> (usize, Option) { - self.inner.size_hint() - } -} - -#[stable(feature = "iter_once", since = "1.2.0")] -impl DoubleEndedIterator for Once { - fn next_back(&mut self) -> Option { - self.inner.next_back() - } -} - -#[stable(feature = "iter_once", since = "1.2.0")] -impl ExactSizeIterator for Once { - fn len(&self) -> usize { - self.inner.len() - } -} - -#[unstable(feature = "trusted_len", issue = "37572")] -unsafe impl TrustedLen for Once {} - -#[stable(feature = "fused", since = "1.26.0")] -impl FusedIterator for Once {} - -/// Creates an iterator that yields an element exactly once. -/// -/// This is commonly used to adapt a single value into a [`chain()`] of other -/// kinds of iteration. Maybe you have an iterator that covers almost -/// everything, but you need an extra special case. Maybe you have a function -/// which works on iterators, but you only need to process one value. -/// -/// [`chain()`]: Iterator::chain -/// -/// # Examples -/// -/// Basic usage: -/// -/// ``` -/// use std::iter; -/// -/// // one is the loneliest number -/// let mut one = iter::once(1); -/// -/// assert_eq!(Some(1), one.next()); -/// -/// // just one, that's all we get -/// assert_eq!(None, one.next()); -/// ``` -/// -/// Chaining together with another iterator. Let's say that we want to iterate -/// over each file of the `.foo` directory, but also a configuration file, -/// `.foorc`: -/// -/// ```no_run -/// use std::iter; -/// use std::fs; -/// use std::path::PathBuf; -/// -/// let dirs = fs::read_dir(".foo").unwrap(); -/// -/// // we need to convert from an iterator of DirEntry-s to an iterator of -/// // PathBufs, so we use map -/// let dirs = dirs.map(|file| file.unwrap().path()); -/// -/// // now, our iterator just for our config file -/// let config = iter::once(PathBuf::from(".foorc")); -/// -/// // chain the two iterators together into one big iterator -/// let files = dirs.chain(config); -/// -/// // this will give us all of the files in .foo as well as .foorc -/// for f in files { -/// println!("{:?}", f); -/// } -/// ``` -#[stable(feature = "iter_once", since = "1.2.0")] -pub fn once(value: T) -> Once { - Once { inner: Some(value).into_iter() } -} - -/// An iterator that yields a single element of type `A` by -/// applying the provided closure `F: FnOnce() -> A`. -/// -/// This `struct` is created by the [`once_with()`] function. -/// See its documentation for more. -#[derive(Clone, Debug)] -#[stable(feature = "iter_once_with", since = "1.43.0")] -pub struct OnceWith { - gen: Option, -} - -#[stable(feature = "iter_once_with", since = "1.43.0")] -impl A> Iterator for OnceWith { - type Item = A; - - #[inline] - fn next(&mut self) -> Option { - let f = self.gen.take()?; - Some(f()) - } - - #[inline] - fn size_hint(&self) -> (usize, Option) { - self.gen.iter().size_hint() - } -} - -#[stable(feature = "iter_once_with", since = "1.43.0")] -impl A> DoubleEndedIterator for OnceWith { - fn next_back(&mut self) -> Option { - self.next() - } -} - -#[stable(feature = "iter_once_with", since = "1.43.0")] -impl A> ExactSizeIterator for OnceWith { - fn len(&self) -> usize { - self.gen.iter().len() - } -} - -#[stable(feature = "iter_once_with", since = "1.43.0")] -impl A> FusedIterator for OnceWith {} - -#[stable(feature = "iter_once_with", since = "1.43.0")] -unsafe impl A> TrustedLen for OnceWith {} - -/// Creates an iterator that lazily generates a value exactly once by invoking -/// the provided closure. -/// -/// This is commonly used to adapt a single value generator into a [`chain()`] of -/// other kinds of iteration. Maybe you have an iterator that covers almost -/// everything, but you need an extra special case. Maybe you have a function -/// which works on iterators, but you only need to process one value. -/// -/// Unlike [`once()`], this function will lazily generate the value on request. -/// -/// [`chain()`]: Iterator::chain -/// -/// # Examples -/// -/// Basic usage: -/// -/// ``` -/// use std::iter; -/// -/// // one is the loneliest number -/// let mut one = iter::once_with(|| 1); -/// -/// assert_eq!(Some(1), one.next()); -/// -/// // just one, that's all we get -/// assert_eq!(None, one.next()); -/// ``` -/// -/// Chaining together with another iterator. Let's say that we want to iterate -/// over each file of the `.foo` directory, but also a configuration file, -/// `.foorc`: -/// -/// ```no_run -/// use std::iter; -/// use std::fs; -/// use std::path::PathBuf; -/// -/// let dirs = fs::read_dir(".foo").unwrap(); -/// -/// // we need to convert from an iterator of DirEntry-s to an iterator of -/// // PathBufs, so we use map -/// let dirs = dirs.map(|file| file.unwrap().path()); -/// -/// // now, our iterator just for our config file -/// let config = iter::once_with(|| PathBuf::from(".foorc")); -/// -/// // chain the two iterators together into one big iterator -/// let files = dirs.chain(config); -/// -/// // this will give us all of the files in .foo as well as .foorc -/// for f in files { -/// println!("{:?}", f); -/// } -/// ``` -#[inline] -#[stable(feature = "iter_once_with", since = "1.43.0")] -pub fn once_with A>(gen: F) -> OnceWith { - OnceWith { gen: Some(gen) } -} - -/// Creates a new iterator where each iteration calls the provided closure -/// `F: FnMut() -> Option`. -/// -/// This allows creating a custom iterator with any behavior -/// without using the more verbose syntax of creating a dedicated type -/// and implementing the [`Iterator`] trait for it. -/// -/// Note that the `FromFn` iterator doesn’t make assumptions about the behavior of the closure, -/// and therefore conservatively does not implement [`FusedIterator`], -/// or override [`Iterator::size_hint()`] from its default `(0, None)`. -/// -/// The closure can use captures and its environment to track state across iterations. Depending on -/// how the iterator is used, this may require specifying the [`move`] keyword on the closure. -/// -/// [`move`]: ../../std/keyword.move.html -/// -/// # Examples -/// -/// Let’s re-implement the counter iterator from the [module-level documentation]: -/// -/// [module-level documentation]: super -/// -/// ``` -/// let mut count = 0; -/// let counter = std::iter::from_fn(move || { -/// // Increment our count. This is why we started at zero. -/// count += 1; -/// -/// // Check to see if we've finished counting or not. -/// if count < 6 { -/// Some(count) -/// } else { -/// None -/// } -/// }); -/// assert_eq!(counter.collect::>(), &[1, 2, 3, 4, 5]); -/// ``` -#[inline] -#[stable(feature = "iter_from_fn", since = "1.34.0")] -pub fn from_fn(f: F) -> FromFn -where - F: FnMut() -> Option, -{ - FromFn(f) -} - -/// An iterator where each iteration calls the provided closure `F: FnMut() -> Option`. -/// -/// This `struct` is created by the [`iter::from_fn()`] function. -/// See its documentation for more. -/// -/// [`iter::from_fn()`]: from_fn -#[derive(Clone)] -#[stable(feature = "iter_from_fn", since = "1.34.0")] -pub struct FromFn(F); - -#[stable(feature = "iter_from_fn", since = "1.34.0")] -impl Iterator for FromFn -where - F: FnMut() -> Option, -{ - type Item = T; - - #[inline] - fn next(&mut self) -> Option { - (self.0)() - } -} +#[stable(feature = "iterator_repeat_with", since = "1.28.0")] +pub use self::repeat_with::{repeat_with, RepeatWith}; #[stable(feature = "iter_from_fn", since = "1.34.0")] -impl fmt::Debug for FromFn { - fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { - f.debug_struct("FromFn").finish() - } -} +pub use self::from_fn::{from_fn, FromFn}; -/// Creates a new iterator where each successive item is computed based on the preceding one. -/// -/// The iterator starts with the given first item (if any) -/// and calls the given `FnMut(&T) -> Option` closure to compute each item’s successor. -/// -/// ``` -/// use std::iter::successors; -/// -/// let powers_of_10 = successors(Some(1_u16), |n| n.checked_mul(10)); -/// assert_eq!(powers_of_10.collect::>(), &[1, 10, 100, 1_000, 10_000]); -/// ``` #[stable(feature = "iter_successors", since = "1.34.0")] -pub fn successors(first: Option, succ: F) -> Successors -where - F: FnMut(&T) -> Option, -{ - // If this function returned `impl Iterator` - // it could be based on `unfold` and not need a dedicated type. - // However having a named `Successors` type allows it to be `Clone` when `T` and `F` are. - Successors { next: first, succ } -} +pub use self::successors::{successors, Successors}; -/// An new iterator where each successive item is computed based on the preceding one. -/// -/// This `struct` is created by the [`iter::successors()`] function. -/// See its documentation for more. -/// -/// [`iter::successors()`]: successors -#[derive(Clone)] -#[stable(feature = "iter_successors", since = "1.34.0")] -pub struct Successors { - next: Option, - succ: F, -} - -#[stable(feature = "iter_successors", since = "1.34.0")] -impl Iterator for Successors -where - F: FnMut(&T) -> Option, -{ - type Item = T; - - #[inline] - fn next(&mut self) -> Option { - let item = self.next.take()?; - self.next = (self.succ)(&item); - Some(item) - } - - #[inline] - fn size_hint(&self) -> (usize, Option) { - if self.next.is_some() { (1, None) } else { (0, Some(0)) } - } -} - -#[stable(feature = "iter_successors", since = "1.34.0")] -impl FusedIterator for Successors where F: FnMut(&T) -> Option {} - -#[stable(feature = "iter_successors", since = "1.34.0")] -impl fmt::Debug for Successors { - fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { - f.debug_struct("Successors").field("next", &self.next).finish() - } -} +#[stable(feature = "iter_once_with", since = "1.43.0")] +pub use self::once_with::{once_with, OnceWith}; diff --git a/library/core/src/iter/sources/empty.rs b/library/core/src/iter/sources/empty.rs new file mode 100644 index 00000000000..5d4a9fe8c6c --- /dev/null +++ b/library/core/src/iter/sources/empty.rs @@ -0,0 +1,92 @@ +use crate::fmt; +use crate::iter::{FusedIterator, TrustedLen}; +use crate::marker; + +/// Creates an iterator that yields nothing. +/// +/// # Examples +/// +/// Basic usage: +/// +/// ``` +/// use std::iter; +/// +/// // this could have been an iterator over i32, but alas, it's just not. +/// let mut nope = iter::empty::(); +/// +/// assert_eq!(None, nope.next()); +/// ``` +#[stable(feature = "iter_empty", since = "1.2.0")] +#[rustc_const_stable(feature = "const_iter_empty", since = "1.32.0")] +pub const fn empty() -> Empty { + Empty(marker::PhantomData) +} + +/// An iterator that yields nothing. +/// +/// This `struct` is created by the [`empty()`] function. See its documentation for more. +#[stable(feature = "iter_empty", since = "1.2.0")] +pub struct Empty(marker::PhantomData); + +#[stable(feature = "iter_empty_send_sync", since = "1.42.0")] +unsafe impl Send for Empty {} +#[stable(feature = "iter_empty_send_sync", since = "1.42.0")] +unsafe impl Sync for Empty {} + +#[stable(feature = "core_impl_debug", since = "1.9.0")] +impl fmt::Debug for Empty { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + f.pad("Empty") + } +} + +#[stable(feature = "iter_empty", since = "1.2.0")] +impl Iterator for Empty { + type Item = T; + + fn next(&mut self) -> Option { + None + } + + fn size_hint(&self) -> (usize, Option) { + (0, Some(0)) + } +} + +#[stable(feature = "iter_empty", since = "1.2.0")] +impl DoubleEndedIterator for Empty { + fn next_back(&mut self) -> Option { + None + } +} + +#[stable(feature = "iter_empty", since = "1.2.0")] +impl ExactSizeIterator for Empty { + fn len(&self) -> usize { + 0 + } +} + +#[unstable(feature = "trusted_len", issue = "37572")] +unsafe impl TrustedLen for Empty {} + +#[stable(feature = "fused", since = "1.26.0")] +impl FusedIterator for Empty {} + +// not #[derive] because that adds a Clone bound on T, +// which isn't necessary. +#[stable(feature = "iter_empty", since = "1.2.0")] +impl Clone for Empty { + fn clone(&self) -> Empty { + Empty(marker::PhantomData) + } +} + +// not #[derive] because that adds a Default bound on T, +// which isn't necessary. +#[stable(feature = "iter_empty", since = "1.2.0")] +impl Default for Empty { + fn default() -> Empty { + Empty(marker::PhantomData) + } +} diff --git a/library/core/src/iter/sources/from_fn.rs b/library/core/src/iter/sources/from_fn.rs new file mode 100644 index 00000000000..3cd3830471c --- /dev/null +++ b/library/core/src/iter/sources/from_fn.rs @@ -0,0 +1,78 @@ +use crate::fmt; + +/// Creates a new iterator where each iteration calls the provided closure +/// `F: FnMut() -> Option`. +/// +/// This allows creating a custom iterator with any behavior +/// without using the more verbose syntax of creating a dedicated type +/// and implementing the [`Iterator`] trait for it. +/// +/// Note that the `FromFn` iterator doesn’t make assumptions about the behavior of the closure, +/// and therefore conservatively does not implement [`FusedIterator`], +/// or override [`Iterator::size_hint()`] from its default `(0, None)`. +/// +/// The closure can use captures and its environment to track state across iterations. Depending on +/// how the iterator is used, this may require specifying the [`move`] keyword on the closure. +/// +/// [`move`]: ../../std/keyword.move.html +/// [`FusedIterator`]: crate::iter::FusedIterator +/// +/// # Examples +/// +/// Let’s re-implement the counter iterator from [module-level documentation]: +/// +/// [module-level documentation]: crate::iter +/// +/// ``` +/// let mut count = 0; +/// let counter = std::iter::from_fn(move || { +/// // Increment our count. This is why we started at zero. +/// count += 1; +/// +/// // Check to see if we've finished counting or not. +/// if count < 6 { +/// Some(count) +/// } else { +/// None +/// } +/// }); +/// assert_eq!(counter.collect::>(), &[1, 2, 3, 4, 5]); +/// ``` +#[inline] +#[stable(feature = "iter_from_fn", since = "1.34.0")] +pub fn from_fn(f: F) -> FromFn +where + F: FnMut() -> Option, +{ + FromFn(f) +} + +/// An iterator where each iteration calls the provided closure `F: FnMut() -> Option`. +/// +/// This `struct` is created by the [`iter::from_fn()`] function. +/// See its documentation for more. +/// +/// [`iter::from_fn()`]: from_fn +#[derive(Clone)] +#[stable(feature = "iter_from_fn", since = "1.34.0")] +pub struct FromFn(F); + +#[stable(feature = "iter_from_fn", since = "1.34.0")] +impl Iterator for FromFn +where + F: FnMut() -> Option, +{ + type Item = T; + + #[inline] + fn next(&mut self) -> Option { + (self.0)() + } +} + +#[stable(feature = "iter_from_fn", since = "1.34.0")] +impl fmt::Debug for FromFn { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + f.debug_struct("FromFn").finish() + } +} diff --git a/library/core/src/iter/sources/once.rs b/library/core/src/iter/sources/once.rs new file mode 100644 index 00000000000..27bc3dcfd79 --- /dev/null +++ b/library/core/src/iter/sources/once.rs @@ -0,0 +1,99 @@ +use crate::iter::{FusedIterator, TrustedLen}; + +/// Creates an iterator that yields an element exactly once. +/// +/// This is commonly used to adapt a single value into a [`chain()`] of other +/// kinds of iteration. Maybe you have an iterator that covers almost +/// everything, but you need an extra special case. Maybe you have a function +/// which works on iterators, but you only need to process one value. +/// +/// [`chain()`]: Iterator::chain +/// +/// # Examples +/// +/// Basic usage: +/// +/// ``` +/// use std::iter; +/// +/// // one is the loneliest number +/// let mut one = iter::once(1); +/// +/// assert_eq!(Some(1), one.next()); +/// +/// // just one, that's all we get +/// assert_eq!(None, one.next()); +/// ``` +/// +/// Chaining together with another iterator. Let's say that we want to iterate +/// over each file of the `.foo` directory, but also a configuration file, +/// `.foorc`: +/// +/// ```no_run +/// use std::iter; +/// use std::fs; +/// use std::path::PathBuf; +/// +/// let dirs = fs::read_dir(".foo").unwrap(); +/// +/// // we need to convert from an iterator of DirEntry-s to an iterator of +/// // PathBufs, so we use map +/// let dirs = dirs.map(|file| file.unwrap().path()); +/// +/// // now, our iterator just for our config file +/// let config = iter::once(PathBuf::from(".foorc")); +/// +/// // chain the two iterators together into one big iterator +/// let files = dirs.chain(config); +/// +/// // this will give us all of the files in .foo as well as .foorc +/// for f in files { +/// println!("{:?}", f); +/// } +/// ``` +#[stable(feature = "iter_once", since = "1.2.0")] +pub fn once(value: T) -> Once { + Once { inner: Some(value).into_iter() } +} + +/// An iterator that yields an element exactly once. +/// +/// This `struct` is created by the [`once()`] function. See its documentation for more. +#[derive(Clone, Debug)] +#[stable(feature = "iter_once", since = "1.2.0")] +pub struct Once { + inner: crate::option::IntoIter, +} + +#[stable(feature = "iter_once", since = "1.2.0")] +impl Iterator for Once { + type Item = T; + + fn next(&mut self) -> Option { + self.inner.next() + } + + fn size_hint(&self) -> (usize, Option) { + self.inner.size_hint() + } +} + +#[stable(feature = "iter_once", since = "1.2.0")] +impl DoubleEndedIterator for Once { + fn next_back(&mut self) -> Option { + self.inner.next_back() + } +} + +#[stable(feature = "iter_once", since = "1.2.0")] +impl ExactSizeIterator for Once { + fn len(&self) -> usize { + self.inner.len() + } +} + +#[unstable(feature = "trusted_len", issue = "37572")] +unsafe impl TrustedLen for Once {} + +#[stable(feature = "fused", since = "1.26.0")] +impl FusedIterator for Once {} diff --git a/library/core/src/iter/sources/once_with.rs b/library/core/src/iter/sources/once_with.rs new file mode 100644 index 00000000000..cf6a3c11524 --- /dev/null +++ b/library/core/src/iter/sources/once_with.rs @@ -0,0 +1,109 @@ +use crate::iter::{FusedIterator, TrustedLen}; + +/// Creates an iterator that lazily generates a value exactly once by invoking +/// the provided closure. +/// +/// This is commonly used to adapt a single value generator into a [`chain()`] of +/// other kinds of iteration. Maybe you have an iterator that covers almost +/// everything, but you need an extra special case. Maybe you have a function +/// which works on iterators, but you only need to process one value. +/// +/// Unlike [`once()`], this function will lazily generate the value on request. +/// +/// [`chain()`]: Iterator::chain +/// [`once()`]: crate::iter::once +/// +/// # Examples +/// +/// Basic usage: +/// +/// ``` +/// use std::iter; +/// +/// // one is the loneliest number +/// let mut one = iter::once_with(|| 1); +/// +/// assert_eq!(Some(1), one.next()); +/// +/// // just one, that's all we get +/// assert_eq!(None, one.next()); +/// ``` +/// +/// Chaining together with another iterator. Let's say that we want to iterate +/// over each file of the `.foo` directory, but also a configuration file, +/// `.foorc`: +/// +/// ```no_run +/// use std::iter; +/// use std::fs; +/// use std::path::PathBuf; +/// +/// let dirs = fs::read_dir(".foo").unwrap(); +/// +/// // we need to convert from an iterator of DirEntry-s to an iterator of +/// // PathBufs, so we use map +/// let dirs = dirs.map(|file| file.unwrap().path()); +/// +/// // now, our iterator just for our config file +/// let config = iter::once_with(|| PathBuf::from(".foorc")); +/// +/// // chain the two iterators together into one big iterator +/// let files = dirs.chain(config); +/// +/// // this will give us all of the files in .foo as well as .foorc +/// for f in files { +/// println!("{:?}", f); +/// } +/// ``` +#[inline] +#[stable(feature = "iter_once_with", since = "1.43.0")] +pub fn once_with A>(gen: F) -> OnceWith { + OnceWith { gen: Some(gen) } +} + +/// An iterator that yields a single element of type `A` by +/// applying the provided closure `F: FnOnce() -> A`. +/// +/// This `struct` is created by the [`once_with()`] function. +/// See its documentation for more. +#[derive(Clone, Debug)] +#[stable(feature = "iter_once_with", since = "1.43.0")] +pub struct OnceWith { + gen: Option, +} + +#[stable(feature = "iter_once_with", since = "1.43.0")] +impl A> Iterator for OnceWith { + type Item = A; + + #[inline] + fn next(&mut self) -> Option { + let f = self.gen.take()?; + Some(f()) + } + + #[inline] + fn size_hint(&self) -> (usize, Option) { + self.gen.iter().size_hint() + } +} + +#[stable(feature = "iter_once_with", since = "1.43.0")] +impl A> DoubleEndedIterator for OnceWith { + fn next_back(&mut self) -> Option { + self.next() + } +} + +#[stable(feature = "iter_once_with", since = "1.43.0")] +impl A> ExactSizeIterator for OnceWith { + fn len(&self) -> usize { + self.gen.iter().len() + } +} + +#[stable(feature = "iter_once_with", since = "1.43.0")] +impl A> FusedIterator for OnceWith {} + +#[stable(feature = "iter_once_with", since = "1.43.0")] +unsafe impl A> TrustedLen for OnceWith {} diff --git a/library/core/src/iter/sources/repeat.rs b/library/core/src/iter/sources/repeat.rs new file mode 100644 index 00000000000..d1f2879235f --- /dev/null +++ b/library/core/src/iter/sources/repeat.rs @@ -0,0 +1,93 @@ +use crate::iter::{FusedIterator, TrustedLen}; + +/// Creates a new iterator that endlessly repeats a single element. +/// +/// The `repeat()` function repeats a single value over and over again. +/// +/// Infinite iterators like `repeat()` are often used with adapters like +/// [`Iterator::take()`], in order to make them finite. +/// +/// If the element type of the iterator you need does not implement `Clone`, +/// or if you do not want to keep the repeated element in memory, you can +/// instead use the [`repeat_with()`] function. +/// +/// [`repeat_with()`]: crate::iter::repeat_with +/// +/// # Examples +/// +/// Basic usage: +/// +/// ``` +/// use std::iter; +/// +/// // the number four 4ever: +/// let mut fours = iter::repeat(4); +/// +/// assert_eq!(Some(4), fours.next()); +/// assert_eq!(Some(4), fours.next()); +/// assert_eq!(Some(4), fours.next()); +/// assert_eq!(Some(4), fours.next()); +/// assert_eq!(Some(4), fours.next()); +/// +/// // yup, still four +/// assert_eq!(Some(4), fours.next()); +/// ``` +/// +/// Going finite with [`Iterator::take()`]: +/// +/// ``` +/// use std::iter; +/// +/// // that last example was too many fours. Let's only have four fours. +/// let mut four_fours = iter::repeat(4).take(4); +/// +/// assert_eq!(Some(4), four_fours.next()); +/// assert_eq!(Some(4), four_fours.next()); +/// assert_eq!(Some(4), four_fours.next()); +/// assert_eq!(Some(4), four_fours.next()); +/// +/// // ... and now we're done +/// assert_eq!(None, four_fours.next()); +/// ``` +#[inline] +#[stable(feature = "rust1", since = "1.0.0")] +pub fn repeat(elt: T) -> Repeat { + Repeat { element: elt } +} + +/// An iterator that repeats an element endlessly. +/// +/// This `struct` is created by the [`repeat()`] function. See its documentation for more. +#[derive(Clone, Debug)] +#[stable(feature = "rust1", since = "1.0.0")] +pub struct Repeat { + element: A, +} + +#[stable(feature = "rust1", since = "1.0.0")] +impl Iterator for Repeat { + type Item = A; + + #[inline] + fn next(&mut self) -> Option { + Some(self.element.clone()) + } + #[inline] + fn size_hint(&self) -> (usize, Option) { + (usize::MAX, None) + } +} + +#[stable(feature = "rust1", since = "1.0.0")] +impl DoubleEndedIterator for Repeat { + #[inline] + fn next_back(&mut self) -> Option { + Some(self.element.clone()) + } +} + +#[stable(feature = "fused", since = "1.26.0")] +impl FusedIterator for Repeat {} + +#[unstable(feature = "trusted_len", issue = "37572")] +unsafe impl TrustedLen for Repeat {} diff --git a/library/core/src/iter/sources/repeat_with.rs b/library/core/src/iter/sources/repeat_with.rs new file mode 100644 index 00000000000..44bc6890c55 --- /dev/null +++ b/library/core/src/iter/sources/repeat_with.rs @@ -0,0 +1,98 @@ +use crate::iter::{FusedIterator, TrustedLen}; + +/// Creates a new iterator that repeats elements of type `A` endlessly by +/// applying the provided closure, the repeater, `F: FnMut() -> A`. +/// +/// The `repeat_with()` function calls the repeater over and over again. +/// +/// Infinite iterators like `repeat_with()` are often used with adapters like +/// [`Iterator::take()`], in order to make them finite. +/// +/// If the element type of the iterator you need implements [`Clone`], and +/// it is OK to keep the source element in memory, you should instead use +/// the [`repeat()`] function. +/// +/// An iterator produced by `repeat_with()` is not a [`DoubleEndedIterator`]. +/// If you need `repeat_with()` to return a [`DoubleEndedIterator`], +/// please open a GitHub issue explaining your use case. +/// +/// [`repeat()`]: crate::iter::repeat +/// [`DoubleEndedIterator`]: crate::iter::DoubleEndedIterator +/// +/// # Examples +/// +/// Basic usage: +/// +/// ``` +/// use std::iter; +/// +/// // let's assume we have some value of a type that is not `Clone` +/// // or which don't want to have in memory just yet because it is expensive: +/// #[derive(PartialEq, Debug)] +/// struct Expensive; +/// +/// // a particular value forever: +/// let mut things = iter::repeat_with(|| Expensive); +/// +/// assert_eq!(Some(Expensive), things.next()); +/// assert_eq!(Some(Expensive), things.next()); +/// assert_eq!(Some(Expensive), things.next()); +/// assert_eq!(Some(Expensive), things.next()); +/// assert_eq!(Some(Expensive), things.next()); +/// ``` +/// +/// Using mutation and going finite: +/// +/// ```rust +/// use std::iter; +/// +/// // From the zeroth to the third power of two: +/// let mut curr = 1; +/// let mut pow2 = iter::repeat_with(|| { let tmp = curr; curr *= 2; tmp }) +/// .take(4); +/// +/// assert_eq!(Some(1), pow2.next()); +/// assert_eq!(Some(2), pow2.next()); +/// assert_eq!(Some(4), pow2.next()); +/// assert_eq!(Some(8), pow2.next()); +/// +/// // ... and now we're done +/// assert_eq!(None, pow2.next()); +/// ``` +#[inline] +#[stable(feature = "iterator_repeat_with", since = "1.28.0")] +pub fn repeat_with A>(repeater: F) -> RepeatWith { + RepeatWith { repeater } +} + +/// An iterator that repeats elements of type `A` endlessly by +/// applying the provided closure `F: FnMut() -> A`. +/// +/// This `struct` is created by the [`repeat_with()`] function. +/// See its documentation for more. +#[derive(Copy, Clone, Debug)] +#[stable(feature = "iterator_repeat_with", since = "1.28.0")] +pub struct RepeatWith { + repeater: F, +} + +#[stable(feature = "iterator_repeat_with", since = "1.28.0")] +impl A> Iterator for RepeatWith { + type Item = A; + + #[inline] + fn next(&mut self) -> Option { + Some((self.repeater)()) + } + + #[inline] + fn size_hint(&self) -> (usize, Option) { + (usize::MAX, None) + } +} + +#[stable(feature = "iterator_repeat_with", since = "1.28.0")] +impl A> FusedIterator for RepeatWith {} + +#[unstable(feature = "trusted_len", issue = "37572")] +unsafe impl A> TrustedLen for RepeatWith {} diff --git a/library/core/src/iter/sources/successors.rs b/library/core/src/iter/sources/successors.rs new file mode 100644 index 00000000000..99f058a901a --- /dev/null +++ b/library/core/src/iter/sources/successors.rs @@ -0,0 +1,66 @@ +use crate::{fmt, iter::FusedIterator}; + +/// Creates a new iterator where each successive item is computed based on the preceding one. +/// +/// The iterator starts with the given first item (if any) +/// and calls the given `FnMut(&T) -> Option` closure to compute each item’s successor. +/// +/// ``` +/// use std::iter::successors; +/// +/// let powers_of_10 = successors(Some(1_u16), |n| n.checked_mul(10)); +/// assert_eq!(powers_of_10.collect::>(), &[1, 10, 100, 1_000, 10_000]); +/// ``` +#[stable(feature = "iter_successors", since = "1.34.0")] +pub fn successors(first: Option, succ: F) -> Successors +where + F: FnMut(&T) -> Option, +{ + // If this function returned `impl Iterator` + // it could be based on `unfold` and not need a dedicated type. + // However having a named `Successors` type allows it to be `Clone` when `T` and `F` are. + Successors { next: first, succ } +} + +/// An new iterator where each successive item is computed based on the preceding one. +/// +/// This `struct` is created by the [`iter::successors()`] function. +/// See its documentation for more. +/// +/// [`iter::successors()`]: successors +#[derive(Clone)] +#[stable(feature = "iter_successors", since = "1.34.0")] +pub struct Successors { + next: Option, + succ: F, +} + +#[stable(feature = "iter_successors", since = "1.34.0")] +impl Iterator for Successors +where + F: FnMut(&T) -> Option, +{ + type Item = T; + + #[inline] + fn next(&mut self) -> Option { + let item = self.next.take()?; + self.next = (self.succ)(&item); + Some(item) + } + + #[inline] + fn size_hint(&self) -> (usize, Option) { + if self.next.is_some() { (1, None) } else { (0, Some(0)) } + } +} + +#[stable(feature = "iter_successors", since = "1.34.0")] +impl FusedIterator for Successors where F: FnMut(&T) -> Option {} + +#[stable(feature = "iter_successors", since = "1.34.0")] +impl fmt::Debug for Successors { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + f.debug_struct("Successors").field("next", &self.next).finish() + } +} diff --git a/library/core/src/lib.rs b/library/core/src/lib.rs index 41202546566..0f91ff418e3 100644 --- a/library/core/src/lib.rs +++ b/library/core/src/lib.rs @@ -63,7 +63,7 @@ #![warn(missing_debug_implementations)] #![allow(explicit_outlives_requirements)] #![allow(incomplete_features)] -#![cfg_attr(not(bootstrap), feature(rustc_allow_const_fn_unstable))] +#![feature(rustc_allow_const_fn_unstable)] #![feature(allow_internal_unstable)] #![feature(arbitrary_self_types)] #![feature(asm)] @@ -80,11 +80,12 @@ #![feature(const_mut_refs)] #![feature(const_int_pow)] #![feature(constctlz)] +#![feature(const_cttz)] #![feature(const_panic)] #![feature(const_pin)] #![feature(const_fn)] #![feature(const_fn_union)] -#![cfg_attr(not(bootstrap), feature(const_impl_trait))] +#![feature(const_impl_trait)] #![feature(const_fn_floating_point_arithmetic)] #![feature(const_fn_fn_ptr_basics)] #![feature(const_generics)] @@ -133,9 +134,7 @@ #![feature(transparent_unions)] #![feature(try_blocks)] #![feature(unboxed_closures)] -#![cfg_attr(not(bootstrap), feature(unsized_fn_params))] -#![cfg_attr(bootstrap, feature(unsized_locals))] -#![cfg_attr(bootstrap, feature(untagged_unions))] +#![feature(unsized_fn_params)] #![feature(unwind_attributes)] #![feature(variant_count)] #![feature(tbm_target_feature)] @@ -289,7 +288,7 @@ pub mod primitive; unused_imports, unsafe_op_in_unsafe_fn )] -#[cfg_attr(not(bootstrap), allow(non_autolinks))] +#[allow(non_autolinks)] // FIXME: This annotation should be moved into rust-lang/stdarch after clashing_extern_declarations is // merged. It currently cannot because bootstrap fails as the lint hasn't been defined yet. #[allow(clashing_extern_declarations)] diff --git a/library/core/src/macros/mod.rs b/library/core/src/macros/mod.rs index 079d9f6006a..0416a7614a3 100644 --- a/library/core/src/macros/mod.rs +++ b/library/core/src/macros/mod.rs @@ -2,6 +2,7 @@ #[macro_export] #[allow_internal_unstable(core_panic, const_caller_location)] #[stable(feature = "core", since = "1.6.0")] +#[cfg_attr(not(bootstrap), rustc_diagnostic_item = "core_panic_macro")] macro_rules! panic { () => ( $crate::panic!("explicit panic") @@ -162,6 +163,7 @@ macro_rules! assert_ne { /// ``` #[macro_export] #[stable(feature = "rust1", since = "1.0.0")] +#[cfg_attr(not(bootstrap), rustc_diagnostic_item = "debug_assert_macro")] macro_rules! debug_assert { ($($arg:tt)*) => (if $crate::cfg!(debug_assertions) { $crate::assert!($($arg)*); }) } @@ -318,7 +320,7 @@ macro_rules! r#try { /// Writes formatted data into a buffer. /// -/// This macro accepts a format string, a list of arguments, and a 'writer'. Arguments will be +/// This macro accepts a 'writer', a format string, and a list of arguments. Arguments will be /// formatted according to the specified format string and the result will be passed to the writer. /// The writer may be any value with a `write_fmt` method; generally this comes from an /// implementation of either the [`fmt::Write`] or the [`io::Write`] trait. The macro @@ -1215,6 +1217,8 @@ pub(crate) mod builtin { #[stable(feature = "rust1", since = "1.0.0")] #[rustc_builtin_macro] #[macro_export] + #[cfg_attr(not(bootstrap), rustc_diagnostic_item = "assert_macro")] + #[allow_internal_unstable(core_panic)] macro_rules! assert { ($cond:expr $(,)?) => {{ /* compiler built-in */ }}; ($cond:expr, $($arg:tt)+) => {{ /* compiler built-in */ }}; diff --git a/library/core/src/marker.rs b/library/core/src/marker.rs index cdf742057b7..85e0e720087 100644 --- a/library/core/src/marker.rs +++ b/library/core/src/marker.rs @@ -156,18 +156,18 @@ pub trait StructuralPartialEq { /// Required trait for constants used in pattern matches. /// /// Any type that derives `Eq` automatically implements this trait, *regardless* -/// of whether its type-parameters implement `Eq`. +/// of whether its type parameters implement `Eq`. /// -/// This is a hack to workaround a limitation in our type-system. +/// This is a hack to work around a limitation in our type system. /// -/// Background: +/// # Background /// /// We want to require that types of consts used in pattern matches /// have the attribute `#[derive(PartialEq, Eq)]`. /// /// In a more ideal world, we could check that requirement by just checking that -/// the given type implements both (1.) the `StructuralPartialEq` trait *and* -/// (2.) the `Eq` trait. However, you can have ADTs that *do* `derive(PartialEq, Eq)`, +/// the given type implements both the `StructuralPartialEq` trait *and* +/// the `Eq` trait. However, you can have ADTs that *do* `derive(PartialEq, Eq)`, /// and be a case that we want the compiler to accept, and yet the constant's /// type fails to implement `Eq`. /// @@ -176,8 +176,11 @@ pub trait StructuralPartialEq { /// ```rust /// #[derive(PartialEq, Eq)] /// struct Wrap(X); +/// /// fn higher_order(_: &()) { } +/// /// const CFN: Wrap = Wrap(higher_order); +/// /// fn main() { /// match CFN { /// CFN => {} @@ -772,7 +775,7 @@ pub auto trait Unpin {} /// /// If a type contains a `PhantomPinned`, it will not implement `Unpin` by default. #[stable(feature = "pin", since = "1.33.0")] -#[derive(Debug, Copy, Clone, Eq, PartialEq, Ord, PartialOrd, Hash)] +#[derive(Debug, Default, Copy, Clone, Eq, PartialEq, Ord, PartialOrd, Hash)] pub struct PhantomPinned; #[stable(feature = "pin", since = "1.33.0")] diff --git a/library/core/src/mem/maybe_uninit.rs b/library/core/src/mem/maybe_uninit.rs index 660b7db70be..94ac16954a7 100644 --- a/library/core/src/mem/maybe_uninit.rs +++ b/library/core/src/mem/maybe_uninit.rs @@ -392,7 +392,7 @@ impl MaybeUninit { /// use std::mem::MaybeUninit; /// /// let mut x = MaybeUninit::>::uninit(); - /// unsafe { x.as_mut_ptr().write(vec![0,1,2]); } + /// unsafe { x.as_mut_ptr().write(vec![0, 1, 2]); } /// // Create a reference into the `MaybeUninit`. This is okay because we initialized it. /// let x_vec = unsafe { &*x.as_ptr() }; /// assert_eq!(x_vec.len(), 3); @@ -429,7 +429,7 @@ impl MaybeUninit { /// use std::mem::MaybeUninit; /// /// let mut x = MaybeUninit::>::uninit(); - /// unsafe { x.as_mut_ptr().write(vec![0,1,2]); } + /// unsafe { x.as_mut_ptr().write(vec![0, 1, 2]); } /// // Create a reference into the `MaybeUninit>`. /// // This is okay because we initialized it. /// let x_vec = unsafe { &mut *x.as_mut_ptr() }; @@ -565,7 +565,7 @@ impl MaybeUninit { /// use std::mem::MaybeUninit; /// /// let mut x = MaybeUninit::>>::uninit(); - /// x.write(Some(vec![0,1,2])); + /// x.write(Some(vec![0, 1, 2])); /// let x1 = unsafe { x.assume_init_read() }; /// let x2 = unsafe { x.assume_init_read() }; /// // We now created two copies of the same vector, leading to a double-free ⚠️ when diff --git a/library/core/src/num/f32.rs b/library/core/src/num/f32.rs index 86e6352d132..33df175bfc5 100644 --- a/library/core/src/num/f32.rs +++ b/library/core/src/num/f32.rs @@ -441,6 +441,32 @@ impl f32 { self.abs_private() < Self::INFINITY } + /// Returns `true` if the number is [subnormal]. + /// + /// ``` + /// #![feature(is_subnormal)] + /// let min = f32::MIN_POSITIVE; // 1.17549435e-38f32 + /// let max = f32::MAX; + /// let lower_than_min = 1.0e-40_f32; + /// let zero = 0.0_f32; + /// + /// assert!(!min.is_subnormal()); + /// assert!(!max.is_subnormal()); + /// + /// assert!(!zero.is_subnormal()); + /// assert!(!f32::NAN.is_subnormal()); + /// assert!(!f32::INFINITY.is_subnormal()); + /// // Values between `0` and `min` are Subnormal. + /// assert!(lower_than_min.is_subnormal()); + /// ``` + /// [subnormal]: https://en.wikipedia.org/wiki/Denormal_number + #[unstable(feature = "is_subnormal", issue = "79288")] + #[rustc_const_unstable(feature = "const_float_classify", issue = "72505")] + #[inline] + pub const fn is_subnormal(self) -> bool { + matches!(self.classify(), FpCategory::Subnormal) + } + /// Returns `true` if the number is neither zero, infinite, /// [subnormal], or `NaN`. /// diff --git a/library/core/src/num/f64.rs b/library/core/src/num/f64.rs index 9b1405b479f..b85e8deb6d2 100644 --- a/library/core/src/num/f64.rs +++ b/library/core/src/num/f64.rs @@ -440,6 +440,32 @@ impl f64 { self.abs_private() < Self::INFINITY } + /// Returns `true` if the number is [subnormal]. + /// + /// ``` + /// #![feature(is_subnormal)] + /// let min = f64::MIN_POSITIVE; // 2.2250738585072014e-308_f64 + /// let max = f64::MAX; + /// let lower_than_min = 1.0e-308_f64; + /// let zero = 0.0_f64; + /// + /// assert!(!min.is_subnormal()); + /// assert!(!max.is_subnormal()); + /// + /// assert!(!zero.is_subnormal()); + /// assert!(!f64::NAN.is_subnormal()); + /// assert!(!f64::INFINITY.is_subnormal()); + /// // Values between `0` and `min` are Subnormal. + /// assert!(lower_than_min.is_subnormal()); + /// ``` + /// [subnormal]: https://en.wikipedia.org/wiki/Denormal_number + #[unstable(feature = "is_subnormal", issue = "79288")] + #[rustc_const_unstable(feature = "const_float_classify", issue = "72505")] + #[inline] + pub const fn is_subnormal(self) -> bool { + matches!(self.classify(), FpCategory::Subnormal) + } + /// Returns `true` if the number is neither zero, infinite, /// [subnormal], or `NaN`. /// diff --git a/library/core/src/num/int_macros.rs b/library/core/src/num/int_macros.rs index 728381b658f..289f14a360a 100644 --- a/library/core/src/num/int_macros.rs +++ b/library/core/src/num/int_macros.rs @@ -2047,8 +2047,7 @@ assert_eq!( #[rustc_const_stable(feature = "const_int_conversion", since = "1.44.0")] // SAFETY: const sound because integers are plain old datatypes so we can always // transmute them to arrays of bytes - #[cfg_attr(not(bootstrap), rustc_allow_const_fn_unstable(const_fn_transmute))] - #[cfg_attr(bootstrap, allow_internal_unstable(const_fn_transmute))] + #[rustc_allow_const_fn_unstable(const_fn_transmute)] #[inline] pub const fn to_ne_bytes(self) -> [u8; mem::size_of::()] { // SAFETY: integers are plain old datatypes so we can always transmute them to @@ -2196,8 +2195,7 @@ fn read_ne_", stringify!($SelfT), "(input: &mut &[u8]) -> ", stringify!($SelfT), #[rustc_const_stable(feature = "const_int_conversion", since = "1.44.0")] // SAFETY: const sound because integers are plain old datatypes so we can always // transmute to them - #[cfg_attr(not(bootstrap), rustc_allow_const_fn_unstable(const_fn_transmute))] - #[cfg_attr(bootstrap, allow_internal_unstable(const_fn_transmute))] + #[rustc_allow_const_fn_unstable(const_fn_transmute)] #[inline] pub const fn from_ne_bytes(bytes: [u8; mem::size_of::()]) -> Self { // SAFETY: integers are plain old datatypes so we can always transmute to them diff --git a/library/core/src/num/nonzero.rs b/library/core/src/num/nonzero.rs index 5a9fd902c9c..716b4a90e5e 100644 --- a/library/core/src/num/nonzero.rs +++ b/library/core/src/num/nonzero.rs @@ -6,6 +6,7 @@ use crate::str::FromStr; use super::from_str_radix; use super::{IntErrorKind, ParseIntError}; +use crate::intrinsics; macro_rules! doc_comment { ($x:expr, $($tt:tt)*) => { @@ -189,3 +190,76 @@ macro_rules! from_str_radix_nzint_impl { from_str_radix_nzint_impl! { NonZeroU8 NonZeroU16 NonZeroU32 NonZeroU64 NonZeroU128 NonZeroUsize NonZeroI8 NonZeroI16 NonZeroI32 NonZeroI64 NonZeroI128 NonZeroIsize } + +macro_rules! nonzero_leading_trailing_zeros { + ( $( $Ty: ident($Uint: ty) , $LeadingTestExpr:expr ;)+ ) => { + $( + impl $Ty { + doc_comment! { + concat!("Returns the number of leading zeros in the binary representation of `self`. + +On many architectures, this function can perform better than `leading_zeros()` on the underlying integer type, as special handling of zero can be avoided. + +# Examples + +Basic usage: + +``` +#![feature(nonzero_leading_trailing_zeros)] +let n = std::num::", stringify!($Ty), "::new(", stringify!($LeadingTestExpr), ").unwrap(); + +assert_eq!(n.leading_zeros(), 0); +```"), + #[unstable(feature = "nonzero_leading_trailing_zeros", issue = "79143")] + #[rustc_const_unstable(feature = "nonzero_leading_trailing_zeros", issue = "79143")] + #[inline] + pub const fn leading_zeros(self) -> u32 { + // SAFETY: since `self` can not be zero it is safe to call ctlz_nonzero + unsafe { intrinsics::ctlz_nonzero(self.0 as $Uint) as u32 } + } + } + + doc_comment! { + concat!("Returns the number of trailing zeros in the binary representation +of `self`. + +On many architectures, this function can perform better than `trailing_zeros()` on the underlying integer type, as special handling of zero can be avoided. + +# Examples + +Basic usage: + +``` +#![feature(nonzero_leading_trailing_zeros)] +let n = std::num::", stringify!($Ty), "::new(0b0101000).unwrap(); + +assert_eq!(n.trailing_zeros(), 3); +```"), + #[unstable(feature = "nonzero_leading_trailing_zeros", issue = "79143")] + #[rustc_const_unstable(feature = "nonzero_leading_trailing_zeros", issue = "79143")] + #[inline] + pub const fn trailing_zeros(self) -> u32 { + // SAFETY: since `self` can not be zero it is safe to call cttz_nonzero + unsafe { intrinsics::cttz_nonzero(self.0 as $Uint) as u32 } + } + } + + } + )+ + } +} + +nonzero_leading_trailing_zeros! { + NonZeroU8(u8), u8::MAX; + NonZeroU16(u16), u16::MAX; + NonZeroU32(u32), u32::MAX; + NonZeroU64(u64), u64::MAX; + NonZeroU128(u128), u128::MAX; + NonZeroUsize(usize), usize::MAX; + NonZeroI8(u8), -1i8; + NonZeroI16(u16), -1i16; + NonZeroI32(u32), -1i32; + NonZeroI64(u64), -1i64; + NonZeroI128(u128), -1i128; + NonZeroIsize(usize), -1isize; +} diff --git a/library/core/src/num/uint_macros.rs b/library/core/src/num/uint_macros.rs index adcbbf91433..dbdc9c0fb5f 100644 --- a/library/core/src/num/uint_macros.rs +++ b/library/core/src/num/uint_macros.rs @@ -1805,8 +1805,7 @@ assert_eq!( #[rustc_const_stable(feature = "const_int_conversion", since = "1.44.0")] // SAFETY: const sound because integers are plain old datatypes so we can always // transmute them to arrays of bytes - #[cfg_attr(not(bootstrap), rustc_allow_const_fn_unstable(const_fn_transmute))] - #[cfg_attr(bootstrap, allow_internal_unstable(const_fn_transmute))] + #[rustc_allow_const_fn_unstable(const_fn_transmute)] #[inline] pub const fn to_ne_bytes(self) -> [u8; mem::size_of::()] { // SAFETY: integers are plain old datatypes so we can always transmute them to @@ -1954,8 +1953,7 @@ fn read_ne_", stringify!($SelfT), "(input: &mut &[u8]) -> ", stringify!($SelfT), #[rustc_const_stable(feature = "const_int_conversion", since = "1.44.0")] // SAFETY: const sound because integers are plain old datatypes so we can always // transmute to them - #[cfg_attr(not(bootstrap), rustc_allow_const_fn_unstable(const_fn_transmute))] - #[cfg_attr(bootstrap, allow_internal_unstable(const_fn_transmute))] + #[rustc_allow_const_fn_unstable(const_fn_transmute)] #[inline] pub const fn from_ne_bytes(bytes: [u8; mem::size_of::()]) -> Self { // SAFETY: integers are plain old datatypes so we can always transmute to them diff --git a/library/core/src/ops/index.rs b/library/core/src/ops/index.rs index 3c2ada57612..a8dea4e9b4e 100644 --- a/library/core/src/ops/index.rs +++ b/library/core/src/ops/index.rs @@ -79,7 +79,7 @@ pub trait Index { /// each can be indexed mutably and immutably. /// /// ``` -/// use std::ops::{Index,IndexMut}; +/// use std::ops::{Index, IndexMut}; /// /// #[derive(Debug)] /// enum Side { diff --git a/library/core/src/panicking.rs b/library/core/src/panicking.rs index 09dd19b8f5f..25651502510 100644 --- a/library/core/src/panicking.rs +++ b/library/core/src/panicking.rs @@ -52,7 +52,7 @@ pub fn panic(expr: &'static str) -> ! { #[inline] #[track_caller] -#[cfg_attr(not(bootstrap), lang = "panic_str")] // needed for const-evaluated panics +#[lang = "panic_str"] // needed for const-evaluated panics pub fn panic_str(expr: &str) -> ! { panic_fmt(format_args!("{}", expr)); } diff --git a/library/core/src/ptr/mod.rs b/library/core/src/ptr/mod.rs index 9de2758767e..27d49529a5e 100644 --- a/library/core/src/ptr/mod.rs +++ b/library/core/src/ptr/mod.rs @@ -16,12 +16,16 @@ //! provided at this point are very minimal: //! //! * A [null] pointer is *never* valid, not even for accesses of [size zero][zst]. -//! * All pointers (except for the null pointer) are valid for all operations of -//! [size zero][zst]. //! * For a pointer to be valid, it is necessary, but not always sufficient, that the pointer //! be *dereferenceable*: the memory range of the given size starting at the pointer must all be //! within the bounds of a single allocated object. Note that in Rust, //! every (stack-allocated) variable is considered a separate allocated object. +//! * Even for operations of [size zero][zst], the pointer must not be pointing to deallocated +//! memory, i.e., deallocation makes pointers invalid even for zero-sized operations. However, +//! casting any non-zero integer *literal* to a pointer is valid for zero-sized accesses, even if +//! some memory happens to exist at that address and gets deallocated. This corresponds to writing +//! your own allocator: allocating zero-sized objects is not very hard. The canonical way to +//! obtain a pointer that is valid for zero-sized accesses is [`NonNull::dangling`]. //! * All accesses performed by functions in this module are *non-atomic* in the sense //! of [atomic operations] used to synchronize between threads. This means it is //! undefined behavior to perform two concurrent accesses to the same location from different diff --git a/library/core/src/slice/mod.rs b/library/core/src/slice/mod.rs index 79ae1d5829a..44fe2ca8859 100644 --- a/library/core/src/slice/mod.rs +++ b/library/core/src/slice/mod.rs @@ -88,8 +88,7 @@ impl [T] { #[rustc_const_stable(feature = "const_slice_len", since = "1.32.0")] #[inline] // SAFETY: const sound because we transmute out the length field as a usize (which it must be) - #[cfg_attr(not(bootstrap), rustc_allow_const_fn_unstable(const_fn_union))] - #[cfg_attr(bootstrap, allow_internal_unstable(const_fn_union))] + #[rustc_allow_const_fn_unstable(const_fn_union)] pub const fn len(&self) -> usize { // SAFETY: this is safe because `&[T]` and `FatPtr` have the same layout. // Only `std` can make this guarantee. @@ -605,8 +604,9 @@ impl [T] { // many bytes away from the end of `self`. // - Any initialized memory is valid `usize`. unsafe { - let pa: *mut T = self.get_unchecked_mut(i); - let pb: *mut T = self.get_unchecked_mut(ln - i - chunk); + let ptr = self.as_mut_ptr(); + let pa = ptr.add(i); + let pb = ptr.add(ln - i - chunk); let va = ptr::read_unaligned(pa as *mut usize); let vb = ptr::read_unaligned(pb as *mut usize); ptr::write_unaligned(pa as *mut usize, vb.swap_bytes()); @@ -635,8 +635,9 @@ impl [T] { // always respected, ensuring the `pb` pointer can be used // safely. unsafe { - let pa: *mut T = self.get_unchecked_mut(i); - let pb: *mut T = self.get_unchecked_mut(ln - i - chunk); + let ptr = self.as_mut_ptr(); + let pa = ptr.add(i); + let pb = ptr.add(ln - i - chunk); let va = ptr::read_unaligned(pa as *mut u32); let vb = ptr::read_unaligned(pb as *mut u32); ptr::write_unaligned(pa as *mut u32, vb.rotate_left(16)); @@ -654,8 +655,9 @@ impl [T] { // aligned, and can be read from and written to. unsafe { // Unsafe swap to avoid the bounds check in safe swap. - let pa: *mut T = self.get_unchecked_mut(i); - let pb: *mut T = self.get_unchecked_mut(ln - i - 1); + let ptr = self.as_mut_ptr(); + let pa = ptr.add(i); + let pb = ptr.add(ln - i - 1); ptr::swap(pa, pb); } i += 1; @@ -1958,10 +1960,10 @@ impl [T] { /// (1, 2), (2, 3), (4, 5), (5, 8), (3, 13), /// (1, 21), (2, 34), (4, 55)]; /// - /// assert_eq!(s.binary_search_by_key(&13, |&(a,b)| b), Ok(9)); - /// assert_eq!(s.binary_search_by_key(&4, |&(a,b)| b), Err(7)); - /// assert_eq!(s.binary_search_by_key(&100, |&(a,b)| b), Err(13)); - /// let r = s.binary_search_by_key(&1, |&(a,b)| b); + /// assert_eq!(s.binary_search_by_key(&13, |&(a, b)| b), Ok(9)); + /// assert_eq!(s.binary_search_by_key(&4, |&(a, b)| b), Err(7)); + /// assert_eq!(s.binary_search_by_key(&100, |&(a, b)| b), Err(13)); + /// let r = s.binary_search_by_key(&1, |&(a, b)| b); /// assert!(match r { Ok(1..=4) => true, _ => false, }); /// ``` #[stable(feature = "slice_binary_search_by_key", since = "1.10.0")] @@ -2585,6 +2587,7 @@ impl [T] { /// buf.fill(1); /// assert_eq!(buf, vec![1; 10]); /// ``` + #[doc(alias = "memset")] #[unstable(feature = "slice_fill", issue = "70758")] pub fn fill(&mut self, value: T) where @@ -2599,6 +2602,34 @@ impl [T] { } } + /// Fills `self` with elements returned by calling a closure repeatedly. + /// + /// This method uses a closure to create new values. If you'd rather + /// [`Clone`] a given value, use [`fill`]. If you want to use the [`Default`] + /// trait to generate values, you can pass [`Default::default`] as the + /// argument. + /// + /// [`fill`]: #method.fill + /// + /// # Examples + /// + /// ``` + /// #![feature(slice_fill_with)] + /// + /// let mut buf = vec![1; 10]; + /// buf.fill_with(Default::default); + /// assert_eq!(buf, vec![0; 10]); + /// ``` + #[unstable(feature = "slice_fill_with", issue = "79221")] + pub fn fill_with(&mut self, mut f: F) + where + F: FnMut() -> T, + { + for el in self { + *el = f(); + } + } + /// Copies the elements from `src` into `self`. /// /// The length of `src` must be the same as `self`. @@ -2724,6 +2755,7 @@ impl [T] { /// /// [`clone_from_slice`]: #method.clone_from_slice /// [`split_at_mut`]: #method.split_at_mut + #[doc(alias = "memcpy")] #[stable(feature = "copy_from_slice", since = "1.9.0")] pub fn copy_from_slice(&mut self, src: &[T]) where diff --git a/library/core/src/str/converts.rs b/library/core/src/str/converts.rs index 952d0598a7c..73316433e09 100644 --- a/library/core/src/str/converts.rs +++ b/library/core/src/str/converts.rs @@ -157,8 +157,7 @@ pub fn from_utf8_mut(v: &mut [u8]) -> Result<&mut str, Utf8Error> { #[inline] #[stable(feature = "rust1", since = "1.0.0")] #[rustc_const_unstable(feature = "const_str_from_utf8_unchecked", issue = "75196")] -#[cfg_attr(not(bootstrap), rustc_allow_const_fn_unstable(const_fn_transmute))] -#[cfg_attr(bootstrap, allow_internal_unstable(const_fn_transmute))] +#[rustc_allow_const_fn_unstable(const_fn_transmute)] pub const unsafe fn from_utf8_unchecked(v: &[u8]) -> &str { // SAFETY: the caller must guarantee that the bytes `v` are valid UTF-8. // Also relies on `&str` and `&[u8]` having the same layout. diff --git a/library/core/src/str/mod.rs b/library/core/src/str/mod.rs index 23d63a4787e..604e317110c 100644 --- a/library/core/src/str/mod.rs +++ b/library/core/src/str/mod.rs @@ -219,8 +219,7 @@ impl str { #[rustc_const_stable(feature = "str_as_bytes", since = "1.32.0")] #[inline(always)] #[allow(unused_attributes)] - #[cfg_attr(not(bootstrap), rustc_allow_const_fn_unstable(const_fn_transmute))] - #[cfg_attr(bootstrap, allow_internal_unstable(const_fn_transmute))] + #[rustc_allow_const_fn_unstable(const_fn_transmute)] pub const fn as_bytes(&self) -> &[u8] { // SAFETY: const sound because we transmute two types with the same layout unsafe { mem::transmute(self) } diff --git a/library/core/src/str/validations.rs b/library/core/src/str/validations.rs index 10cf1e172e6..373a8212425 100644 --- a/library/core/src/str/validations.rs +++ b/library/core/src/str/validations.rs @@ -125,7 +125,7 @@ pub(super) fn run_utf8_validation(v: &[u8]) -> Result<(), Utf8Error> { let old_offset = index; macro_rules! err { ($error_len: expr) => { - return Err(Utf8Error { valid_up_to: old_offset, error_len: $error_len }); + return Err(Utf8Error { valid_up_to: old_offset, error_len: $error_len }) }; } diff --git a/library/core/src/sync/atomic.rs b/library/core/src/sync/atomic.rs index d48c02bf59c..9d204599057 100644 --- a/library/core/src/sync/atomic.rs +++ b/library/core/src/sync/atomic.rs @@ -47,9 +47,16 @@ //! //! * PowerPC and MIPS platforms with 32-bit pointers do not have `AtomicU64` or //! `AtomicI64` types. -//! * ARM platforms like `armv5te` that aren't for Linux do not have any atomics -//! at all. -//! * ARM targets with `thumbv6m` do not have atomic operations at all. +//! * ARM platforms like `armv5te` that aren't for Linux only provide `load` +//! and `store` operations, and do not support Compare and Swap (CAS) +//! operations, such as `swap`, `fetch_add`, etc. Additionally on Linux, +//! these CAS operations are implemented via [operating system support], which +//! may come with a performance penalty. +//! * ARM targets with `thumbv6m` only provide `load` and `store` operations, +//! and do not support Compare and Swap (CAS) operations, such as `swap`, +//! `fetch_add`, etc. +//! +//! [operating system support]: https://www.kernel.org/doc/Documentation/arm/kernel_user_helpers.txt //! //! Note that future platforms may be added that also do not have support for //! some atomic operations. Maximally portable code will want to be careful diff --git a/library/core/src/task/wake.rs b/library/core/src/task/wake.rs index d3c0d9b7841..b775e022a54 100644 --- a/library/core/src/task/wake.rs +++ b/library/core/src/task/wake.rs @@ -130,8 +130,7 @@ impl RawWakerVTable { #[rustc_promotable] #[stable(feature = "futures_api", since = "1.36.0")] #[rustc_const_stable(feature = "futures_api", since = "1.36.0")] - #[cfg_attr(not(bootstrap), rustc_allow_const_fn_unstable(const_fn_fn_ptr_basics))] - #[cfg_attr(bootstrap, allow_internal_unstable(const_fn_fn_ptr_basics))] + #[rustc_allow_const_fn_unstable(const_fn_fn_ptr_basics)] pub const fn new( clone: unsafe fn(*const ()) -> RawWaker, wake: unsafe fn(*const ()), diff --git a/library/core/src/time.rs b/library/core/src/time.rs index 6dc542dee58..88b4e2a2436 100644 --- a/library/core/src/time.rs +++ b/library/core/src/time.rs @@ -108,18 +108,20 @@ impl Duration { #[unstable(feature = "duration_constants", issue = "57391")] pub const NANOSECOND: Duration = Duration::from_nanos(1); - /// The minimum duration. + /// A duration of zero time. /// /// # Examples /// /// ``` - /// #![feature(duration_constants)] + /// #![feature(duration_zero)] /// use std::time::Duration; /// - /// assert_eq!(Duration::MIN, Duration::new(0, 0)); + /// let duration = Duration::ZERO; + /// assert!(duration.is_zero()); + /// assert_eq!(duration.as_nanos(), 0); /// ``` - #[unstable(feature = "duration_constants", issue = "57391")] - pub const MIN: Duration = Duration::from_nanos(0); + #[unstable(feature = "duration_zero", issue = "73544")] + pub const ZERO: Duration = Duration::from_nanos(0); /// The maximum duration. /// @@ -166,24 +168,6 @@ impl Duration { Duration { secs, nanos } } - /// Creates a new `Duration` that spans no time. - /// - /// # Examples - /// - /// ``` - /// #![feature(duration_zero)] - /// use std::time::Duration; - /// - /// let duration = Duration::zero(); - /// assert!(duration.is_zero()); - /// assert_eq!(duration.as_nanos(), 0); - /// ``` - #[unstable(feature = "duration_zero", issue = "73544")] - #[inline] - pub const fn zero() -> Duration { - Duration { secs: 0, nanos: 0 } - } - /// Creates a new `Duration` from the specified number of whole seconds. /// /// # Examples @@ -277,7 +261,7 @@ impl Duration { /// #![feature(duration_zero)] /// use std::time::Duration; /// - /// assert!(Duration::zero().is_zero()); + /// assert!(Duration::ZERO.is_zero()); /// assert!(Duration::new(0, 0).is_zero()); /// assert!(Duration::from_nanos(0).is_zero()); /// assert!(Duration::from_secs(0).is_zero()); @@ -536,18 +520,18 @@ impl Duration { } } - /// Saturating `Duration` subtraction. Computes `self - other`, returning [`Duration::MIN`] + /// Saturating `Duration` subtraction. Computes `self - other`, returning [`Duration::ZERO`] /// if the result would be negative or if overflow occurred. /// /// # Examples /// /// ``` /// #![feature(duration_saturating_ops)] - /// #![feature(duration_constants)] + /// #![feature(duration_zero)] /// use std::time::Duration; /// /// assert_eq!(Duration::new(0, 1).saturating_sub(Duration::new(0, 0)), Duration::new(0, 1)); - /// assert_eq!(Duration::new(0, 0).saturating_sub(Duration::new(0, 1)), Duration::MIN); + /// assert_eq!(Duration::new(0, 0).saturating_sub(Duration::new(0, 1)), Duration::ZERO); /// ``` #[unstable(feature = "duration_saturating_ops", issue = "76416")] #[inline] @@ -555,7 +539,7 @@ impl Duration { pub const fn saturating_sub(self, rhs: Duration) -> Duration { match self.checked_sub(rhs) { Some(res) => res, - None => Duration::MIN, + None => Duration::ZERO, } } diff --git a/library/core/tests/lib.rs b/library/core/tests/lib.rs index c9f9b890c39..14ef03fd53e 100644 --- a/library/core/tests/lib.rs +++ b/library/core/tests/lib.rs @@ -60,6 +60,8 @@ #![feature(once_cell)] #![feature(unsafe_block_in_unsafe_fn)] #![feature(int_bits_const)] +#![feature(nonzero_leading_trailing_zeros)] +#![feature(const_option)] #![deny(unsafe_op_in_unsafe_fn)] extern crate test; diff --git a/library/core/tests/nonzero.rs b/library/core/tests/nonzero.rs index fb1293c99bb..b66c482c5e5 100644 --- a/library/core/tests/nonzero.rs +++ b/library/core/tests/nonzero.rs @@ -1,5 +1,8 @@ use core::convert::TryFrom; -use core::num::{IntErrorKind, NonZeroI32, NonZeroI8, NonZeroU32, NonZeroU8}; +use core::num::{ + IntErrorKind, NonZeroI128, NonZeroI16, NonZeroI32, NonZeroI64, NonZeroI8, NonZeroIsize, + NonZeroU128, NonZeroU16, NonZeroU32, NonZeroU64, NonZeroU8, NonZeroUsize, +}; use core::option::Option::{self, None, Some}; use std::mem::size_of; @@ -82,7 +85,7 @@ fn test_match_option_string() { let five = "Five".to_string(); match Some(five) { Some(s) => assert_eq!(s, "Five"), - None => panic!("unexpected None while matching on Some(String { ... })"), + None => panic!("{}", "unexpected None while matching on Some(String { ... })"), } } @@ -212,3 +215,100 @@ fn nonzero_const() { const ONE: Option = NonZeroU8::new(1); assert!(ONE.is_some()); } + +#[test] +fn nonzero_leading_zeros() { + assert_eq!(NonZeroU8::new(1).unwrap().leading_zeros(), 7); + assert_eq!(NonZeroI8::new(1).unwrap().leading_zeros(), 7); + assert_eq!(NonZeroU16::new(1).unwrap().leading_zeros(), 15); + assert_eq!(NonZeroI16::new(1).unwrap().leading_zeros(), 15); + assert_eq!(NonZeroU32::new(1).unwrap().leading_zeros(), 31); + assert_eq!(NonZeroI32::new(1).unwrap().leading_zeros(), 31); + assert_eq!(NonZeroU64::new(1).unwrap().leading_zeros(), 63); + assert_eq!(NonZeroI64::new(1).unwrap().leading_zeros(), 63); + assert_eq!(NonZeroU128::new(1).unwrap().leading_zeros(), 127); + assert_eq!(NonZeroI128::new(1).unwrap().leading_zeros(), 127); + assert_eq!(NonZeroUsize::new(1).unwrap().leading_zeros(), usize::BITS - 1); + assert_eq!(NonZeroIsize::new(1).unwrap().leading_zeros(), usize::BITS - 1); + + assert_eq!(NonZeroU8::new(u8::MAX >> 2).unwrap().leading_zeros(), 2); + assert_eq!(NonZeroI8::new((u8::MAX >> 2) as i8).unwrap().leading_zeros(), 2); + assert_eq!(NonZeroU16::new(u16::MAX >> 2).unwrap().leading_zeros(), 2); + assert_eq!(NonZeroI16::new((u16::MAX >> 2) as i16).unwrap().leading_zeros(), 2); + assert_eq!(NonZeroU32::new(u32::MAX >> 2).unwrap().leading_zeros(), 2); + assert_eq!(NonZeroI32::new((u32::MAX >> 2) as i32).unwrap().leading_zeros(), 2); + assert_eq!(NonZeroU64::new(u64::MAX >> 2).unwrap().leading_zeros(), 2); + assert_eq!(NonZeroI64::new((u64::MAX >> 2) as i64).unwrap().leading_zeros(), 2); + assert_eq!(NonZeroU128::new(u128::MAX >> 2).unwrap().leading_zeros(), 2); + assert_eq!(NonZeroI128::new((u128::MAX >> 2) as i128).unwrap().leading_zeros(), 2); + assert_eq!(NonZeroUsize::new(usize::MAX >> 2).unwrap().leading_zeros(), 2); + assert_eq!(NonZeroIsize::new((usize::MAX >> 2) as isize).unwrap().leading_zeros(), 2); + + assert_eq!(NonZeroU8::new(u8::MAX).unwrap().leading_zeros(), 0); + assert_eq!(NonZeroI8::new(-1i8).unwrap().leading_zeros(), 0); + assert_eq!(NonZeroU16::new(u16::MAX).unwrap().leading_zeros(), 0); + assert_eq!(NonZeroI16::new(-1i16).unwrap().leading_zeros(), 0); + assert_eq!(NonZeroU32::new(u32::MAX).unwrap().leading_zeros(), 0); + assert_eq!(NonZeroI32::new(-1i32).unwrap().leading_zeros(), 0); + assert_eq!(NonZeroU64::new(u64::MAX).unwrap().leading_zeros(), 0); + assert_eq!(NonZeroI64::new(-1i64).unwrap().leading_zeros(), 0); + assert_eq!(NonZeroU128::new(u128::MAX).unwrap().leading_zeros(), 0); + assert_eq!(NonZeroI128::new(-1i128).unwrap().leading_zeros(), 0); + assert_eq!(NonZeroUsize::new(usize::MAX).unwrap().leading_zeros(), 0); + assert_eq!(NonZeroIsize::new(-1isize).unwrap().leading_zeros(), 0); + + const LEADING_ZEROS: u32 = NonZeroU16::new(1).unwrap().leading_zeros(); + assert_eq!(LEADING_ZEROS, 15); +} + +#[test] +fn nonzero_trailing_zeros() { + assert_eq!(NonZeroU8::new(1).unwrap().trailing_zeros(), 0); + assert_eq!(NonZeroI8::new(1).unwrap().trailing_zeros(), 0); + assert_eq!(NonZeroU16::new(1).unwrap().trailing_zeros(), 0); + assert_eq!(NonZeroI16::new(1).unwrap().trailing_zeros(), 0); + assert_eq!(NonZeroU32::new(1).unwrap().trailing_zeros(), 0); + assert_eq!(NonZeroI32::new(1).unwrap().trailing_zeros(), 0); + assert_eq!(NonZeroU64::new(1).unwrap().trailing_zeros(), 0); + assert_eq!(NonZeroI64::new(1).unwrap().trailing_zeros(), 0); + assert_eq!(NonZeroU128::new(1).unwrap().trailing_zeros(), 0); + assert_eq!(NonZeroI128::new(1).unwrap().trailing_zeros(), 0); + assert_eq!(NonZeroUsize::new(1).unwrap().trailing_zeros(), 0); + assert_eq!(NonZeroIsize::new(1).unwrap().trailing_zeros(), 0); + + assert_eq!(NonZeroU8::new(1 << 2).unwrap().trailing_zeros(), 2); + assert_eq!(NonZeroI8::new(1 << 2).unwrap().trailing_zeros(), 2); + assert_eq!(NonZeroU16::new(1 << 2).unwrap().trailing_zeros(), 2); + assert_eq!(NonZeroI16::new(1 << 2).unwrap().trailing_zeros(), 2); + assert_eq!(NonZeroU32::new(1 << 2).unwrap().trailing_zeros(), 2); + assert_eq!(NonZeroI32::new(1 << 2).unwrap().trailing_zeros(), 2); + assert_eq!(NonZeroU64::new(1 << 2).unwrap().trailing_zeros(), 2); + assert_eq!(NonZeroI64::new(1 << 2).unwrap().trailing_zeros(), 2); + assert_eq!(NonZeroU128::new(1 << 2).unwrap().trailing_zeros(), 2); + assert_eq!(NonZeroI128::new(1 << 2).unwrap().trailing_zeros(), 2); + assert_eq!(NonZeroUsize::new(1 << 2).unwrap().trailing_zeros(), 2); + assert_eq!(NonZeroIsize::new(1 << 2).unwrap().trailing_zeros(), 2); + + assert_eq!(NonZeroU8::new(1 << 7).unwrap().trailing_zeros(), 7); + assert_eq!(NonZeroI8::new(1 << 7).unwrap().trailing_zeros(), 7); + assert_eq!(NonZeroU16::new(1 << 15).unwrap().trailing_zeros(), 15); + assert_eq!(NonZeroI16::new(1 << 15).unwrap().trailing_zeros(), 15); + assert_eq!(NonZeroU32::new(1 << 31).unwrap().trailing_zeros(), 31); + assert_eq!(NonZeroI32::new(1 << 31).unwrap().trailing_zeros(), 31); + assert_eq!(NonZeroU64::new(1 << 63).unwrap().trailing_zeros(), 63); + assert_eq!(NonZeroI64::new(1 << 63).unwrap().trailing_zeros(), 63); + assert_eq!(NonZeroU128::new(1 << 127).unwrap().trailing_zeros(), 127); + assert_eq!(NonZeroI128::new(1 << 127).unwrap().trailing_zeros(), 127); + + assert_eq!( + NonZeroUsize::new(1 << (usize::BITS - 1)).unwrap().trailing_zeros(), + usize::BITS - 1 + ); + assert_eq!( + NonZeroIsize::new(1 << (usize::BITS - 1)).unwrap().trailing_zeros(), + usize::BITS - 1 + ); + + const TRAILING_ZEROS: u32 = NonZeroU16::new(1 << 2).unwrap().trailing_zeros(); + assert_eq!(TRAILING_ZEROS, 2); +} diff --git a/library/core/tests/num/i128.rs b/library/core/tests/num/i128.rs new file mode 100644 index 00000000000..72c0b225991 --- /dev/null +++ b/library/core/tests/num/i128.rs @@ -0,0 +1 @@ +int_module!(i128, i128); diff --git a/library/core/tests/num/int_macros.rs b/library/core/tests/num/int_macros.rs index fcb0d6031be..90c47656784 100644 --- a/library/core/tests/num/int_macros.rs +++ b/library/core/tests/num/int_macros.rs @@ -131,9 +131,9 @@ macro_rules! int_module { assert_eq!(B.rotate_left(0), B); assert_eq!(C.rotate_left(0), C); // Rotating by a multiple of word size should also have no effect - assert_eq!(A.rotate_left(64), A); - assert_eq!(B.rotate_left(64), B); - assert_eq!(C.rotate_left(64), C); + assert_eq!(A.rotate_left(128), A); + assert_eq!(B.rotate_left(128), B); + assert_eq!(C.rotate_left(128), C); } #[test] diff --git a/library/core/tests/num/mod.rs b/library/core/tests/num/mod.rs index 49e5cc0eaa5..012ab4ea5c5 100644 --- a/library/core/tests/num/mod.rs +++ b/library/core/tests/num/mod.rs @@ -11,6 +11,7 @@ use core::str::FromStr; #[macro_use] mod int_macros; +mod i128; mod i16; mod i32; mod i64; @@ -19,6 +20,7 @@ mod i8; #[macro_use] mod uint_macros; +mod u128; mod u16; mod u32; mod u64; diff --git a/library/core/tests/num/u128.rs b/library/core/tests/num/u128.rs new file mode 100644 index 00000000000..716d1836f2c --- /dev/null +++ b/library/core/tests/num/u128.rs @@ -0,0 +1 @@ +uint_module!(u128, u128); diff --git a/library/core/tests/num/uint_macros.rs b/library/core/tests/num/uint_macros.rs index 952ec188dc1..445f8fb350e 100644 --- a/library/core/tests/num/uint_macros.rs +++ b/library/core/tests/num/uint_macros.rs @@ -96,9 +96,9 @@ macro_rules! uint_module { assert_eq!(B.rotate_left(0), B); assert_eq!(C.rotate_left(0), C); // Rotating by a multiple of word size should also have no effect - assert_eq!(A.rotate_left(64), A); - assert_eq!(B.rotate_left(64), B); - assert_eq!(C.rotate_left(64), C); + assert_eq!(A.rotate_left(128), A); + assert_eq!(B.rotate_left(128), B); + assert_eq!(C.rotate_left(128), C); } #[test] diff --git a/library/core/tests/ops.rs b/library/core/tests/ops.rs index 8f0cd3be406..e9d595e65e2 100644 --- a/library/core/tests/ops.rs +++ b/library/core/tests/ops.rs @@ -1,4 +1,4 @@ -use core::ops::{Bound, Range, RangeFrom, RangeFull, RangeInclusive, RangeTo}; +use core::ops::{Bound, Range, RangeFrom, RangeFull, RangeInclusive, RangeTo, RangeToInclusive}; // Test the Range structs and syntax. @@ -59,6 +59,12 @@ fn test_range_inclusive() { assert_eq!(r.next(), None); } +#[test] +fn test_range_to_inclusive() { + // Not much to test. + let _ = RangeToInclusive { end: 42 }; +} + #[test] fn test_range_is_empty() { assert!(!(0.0..10.0).is_empty()); @@ -151,3 +157,43 @@ fn test_range_syntax_in_return_statement() { } // Not much to test. } + +#[test] +fn range_structural_match() { + // test that all range types can be structurally matched upon + + const RANGE: Range = 0..1000; + match RANGE { + RANGE => {} + _ => unreachable!(), + } + + const RANGE_FROM: RangeFrom = 0..; + match RANGE_FROM { + RANGE_FROM => {} + _ => unreachable!(), + } + + const RANGE_FULL: RangeFull = ..; + match RANGE_FULL { + RANGE_FULL => {} + } + + const RANGE_INCLUSIVE: RangeInclusive = 0..=999; + match RANGE_INCLUSIVE { + RANGE_INCLUSIVE => {} + _ => unreachable!(), + } + + const RANGE_TO: RangeTo = ..1000; + match RANGE_TO { + RANGE_TO => {} + _ => unreachable!(), + } + + const RANGE_TO_INCLUSIVE: RangeToInclusive = ..=999; + match RANGE_TO_INCLUSIVE { + RANGE_TO_INCLUSIVE => {} + _ => unreachable!(), + } +} diff --git a/library/core/tests/time.rs b/library/core/tests/time.rs index 7c43885040b..f14639e0d58 100644 --- a/library/core/tests/time.rs +++ b/library/core/tests/time.rs @@ -108,24 +108,24 @@ fn sub() { #[test] fn checked_sub() { - let zero = Duration::new(0, 0); - let one_nano = Duration::new(0, 1); - let one_sec = Duration::new(1, 0); - assert_eq!(one_nano.checked_sub(zero), Some(Duration::new(0, 1))); - assert_eq!(one_sec.checked_sub(one_nano), Some(Duration::new(0, 999_999_999))); - assert_eq!(zero.checked_sub(one_nano), None); - assert_eq!(zero.checked_sub(one_sec), None); + assert_eq!(Duration::NANOSECOND.checked_sub(Duration::ZERO), Some(Duration::NANOSECOND)); + assert_eq!( + Duration::SECOND.checked_sub(Duration::NANOSECOND), + Some(Duration::new(0, 999_999_999)) + ); + assert_eq!(Duration::ZERO.checked_sub(Duration::NANOSECOND), None); + assert_eq!(Duration::ZERO.checked_sub(Duration::SECOND), None); } #[test] fn saturating_sub() { - let zero = Duration::new(0, 0); - let one_nano = Duration::new(0, 1); - let one_sec = Duration::new(1, 0); - assert_eq!(one_nano.saturating_sub(zero), Duration::new(0, 1)); - assert_eq!(one_sec.saturating_sub(one_nano), Duration::new(0, 999_999_999)); - assert_eq!(zero.saturating_sub(one_nano), Duration::MIN); - assert_eq!(zero.saturating_sub(one_sec), Duration::MIN); + assert_eq!(Duration::NANOSECOND.saturating_sub(Duration::ZERO), Duration::NANOSECOND); + assert_eq!( + Duration::SECOND.saturating_sub(Duration::NANOSECOND), + Duration::new(0, 999_999_999) + ); + assert_eq!(Duration::ZERO.saturating_sub(Duration::NANOSECOND), Duration::ZERO); + assert_eq!(Duration::ZERO.saturating_sub(Duration::SECOND), Duration::ZERO); } #[test] @@ -337,87 +337,82 @@ fn duration_const() { const SUB_SEC_NANOS: u32 = DURATION.subsec_nanos(); assert_eq!(SUB_SEC_NANOS, 123_456_789); - const ZERO: Duration = Duration::zero(); - assert_eq!(ZERO, Duration::new(0, 0)); - - const IS_ZERO: bool = ZERO.is_zero(); + const IS_ZERO: bool = Duration::ZERO.is_zero(); assert!(IS_ZERO); - const ONE: Duration = Duration::new(1, 0); - - const SECONDS: u64 = ONE.as_secs(); + const SECONDS: u64 = Duration::SECOND.as_secs(); assert_eq!(SECONDS, 1); const FROM_SECONDS: Duration = Duration::from_secs(1); - assert_eq!(FROM_SECONDS, ONE); + assert_eq!(FROM_SECONDS, Duration::SECOND); - const SECONDS_F32: f32 = ONE.as_secs_f32(); + const SECONDS_F32: f32 = Duration::SECOND.as_secs_f32(); assert_eq!(SECONDS_F32, 1.0); const FROM_SECONDS_F32: Duration = Duration::from_secs_f32(1.0); - assert_eq!(FROM_SECONDS_F32, ONE); + assert_eq!(FROM_SECONDS_F32, Duration::SECOND); - const SECONDS_F64: f64 = ONE.as_secs_f64(); + const SECONDS_F64: f64 = Duration::SECOND.as_secs_f64(); assert_eq!(SECONDS_F64, 1.0); const FROM_SECONDS_F64: Duration = Duration::from_secs_f64(1.0); - assert_eq!(FROM_SECONDS_F64, ONE); + assert_eq!(FROM_SECONDS_F64, Duration::SECOND); - const MILLIS: u128 = ONE.as_millis(); + const MILLIS: u128 = Duration::SECOND.as_millis(); assert_eq!(MILLIS, 1_000); const FROM_MILLIS: Duration = Duration::from_millis(1_000); - assert_eq!(FROM_MILLIS, ONE); + assert_eq!(FROM_MILLIS, Duration::SECOND); - const MICROS: u128 = ONE.as_micros(); + const MICROS: u128 = Duration::SECOND.as_micros(); assert_eq!(MICROS, 1_000_000); const FROM_MICROS: Duration = Duration::from_micros(1_000_000); - assert_eq!(FROM_MICROS, ONE); + assert_eq!(FROM_MICROS, Duration::SECOND); - const NANOS: u128 = ONE.as_nanos(); + const NANOS: u128 = Duration::SECOND.as_nanos(); assert_eq!(NANOS, 1_000_000_000); const FROM_NANOS: Duration = Duration::from_nanos(1_000_000_000); - assert_eq!(FROM_NANOS, ONE); + assert_eq!(FROM_NANOS, Duration::SECOND); const MAX: Duration = Duration::new(u64::MAX, 999_999_999); - const CHECKED_ADD: Option = MAX.checked_add(ONE); + const CHECKED_ADD: Option = MAX.checked_add(Duration::SECOND); assert_eq!(CHECKED_ADD, None); - const CHECKED_SUB: Option = ZERO.checked_sub(ONE); + const CHECKED_SUB: Option = Duration::ZERO.checked_sub(Duration::SECOND); assert_eq!(CHECKED_SUB, None); - const CHECKED_MUL: Option = ONE.checked_mul(1); - assert_eq!(CHECKED_MUL, Some(ONE)); + const CHECKED_MUL: Option = Duration::SECOND.checked_mul(1); + assert_eq!(CHECKED_MUL, Some(Duration::SECOND)); - const MUL_F32: Duration = ONE.mul_f32(1.0); - assert_eq!(MUL_F32, ONE); + const MUL_F32: Duration = Duration::SECOND.mul_f32(1.0); + assert_eq!(MUL_F32, Duration::SECOND); - const MUL_F64: Duration = ONE.mul_f64(1.0); - assert_eq!(MUL_F64, ONE); + const MUL_F64: Duration = Duration::SECOND.mul_f64(1.0); + assert_eq!(MUL_F64, Duration::SECOND); - const CHECKED_DIV: Option = ONE.checked_div(1); - assert_eq!(CHECKED_DIV, Some(ONE)); + const CHECKED_DIV: Option = Duration::SECOND.checked_div(1); + assert_eq!(CHECKED_DIV, Some(Duration::SECOND)); - const DIV_F32: Duration = ONE.div_f32(1.0); - assert_eq!(DIV_F32, ONE); + const DIV_F32: Duration = Duration::SECOND.div_f32(1.0); + assert_eq!(DIV_F32, Duration::SECOND); - const DIV_F64: Duration = ONE.div_f64(1.0); - assert_eq!(DIV_F64, ONE); + const DIV_F64: Duration = Duration::SECOND.div_f64(1.0); + assert_eq!(DIV_F64, Duration::SECOND); - const DIV_DURATION_F32: f32 = ONE.div_duration_f32(ONE); + const DIV_DURATION_F32: f32 = Duration::SECOND.div_duration_f32(Duration::SECOND); assert_eq!(DIV_DURATION_F32, 1.0); - const DIV_DURATION_F64: f64 = ONE.div_duration_f64(ONE); + const DIV_DURATION_F64: f64 = Duration::SECOND.div_duration_f64(Duration::SECOND); assert_eq!(DIV_DURATION_F64, 1.0); - const SATURATING_ADD: Duration = MAX.saturating_add(ONE); + const SATURATING_ADD: Duration = MAX.saturating_add(Duration::SECOND); assert_eq!(SATURATING_ADD, MAX); - const SATURATING_SUB: Duration = ZERO.saturating_sub(ONE); - assert_eq!(SATURATING_SUB, ZERO); + const SATURATING_SUB: Duration = Duration::ZERO.saturating_sub(Duration::SECOND); + assert_eq!(SATURATING_SUB, Duration::ZERO); const SATURATING_MUL: Duration = MAX.saturating_mul(2); assert_eq!(SATURATING_MUL, MAX); diff --git a/library/proc_macro/src/bridge/client.rs b/library/proc_macro/src/bridge/client.rs index dfe5df965cf..c6bec5a6fbd 100644 --- a/library/proc_macro/src/bridge/client.rs +++ b/library/proc_macro/src/bridge/client.rs @@ -401,8 +401,7 @@ fn run_client DecodeMut<'a, 's, ()>, R: Encode<()>>( } impl Client crate::TokenStream> { - #[cfg_attr(not(bootstrap), rustc_allow_const_fn_unstable(const_fn))] - #[cfg_attr(bootstrap, allow_internal_unstable(const_fn))] + #[rustc_allow_const_fn_unstable(const_fn)] pub const fn expand1(f: fn(crate::TokenStream) -> crate::TokenStream) -> Self { extern "C" fn run( bridge: Bridge<'_>, @@ -415,8 +414,7 @@ impl Client crate::TokenStream> { } impl Client crate::TokenStream> { - #[cfg_attr(not(bootstrap), rustc_allow_const_fn_unstable(const_fn))] - #[cfg_attr(bootstrap, allow_internal_unstable(const_fn))] + #[rustc_allow_const_fn_unstable(const_fn)] pub const fn expand2( f: fn(crate::TokenStream, crate::TokenStream) -> crate::TokenStream, ) -> Self { @@ -461,8 +459,7 @@ impl ProcMacro { } } - #[cfg_attr(not(bootstrap), rustc_allow_const_fn_unstable(const_fn))] - #[cfg_attr(bootstrap, allow_internal_unstable(const_fn))] + #[rustc_allow_const_fn_unstable(const_fn)] pub const fn custom_derive( trait_name: &'static str, attributes: &'static [&'static str], @@ -471,8 +468,7 @@ impl ProcMacro { ProcMacro::CustomDerive { trait_name, attributes, client: Client::expand1(expand) } } - #[cfg_attr(not(bootstrap), rustc_allow_const_fn_unstable(const_fn))] - #[cfg_attr(bootstrap, allow_internal_unstable(const_fn))] + #[rustc_allow_const_fn_unstable(const_fn)] pub const fn attr( name: &'static str, expand: fn(crate::TokenStream, crate::TokenStream) -> crate::TokenStream, @@ -480,8 +476,7 @@ impl ProcMacro { ProcMacro::Attr { name, client: Client::expand2(expand) } } - #[cfg_attr(not(bootstrap), rustc_allow_const_fn_unstable(const_fn))] - #[cfg_attr(bootstrap, allow_internal_unstable(const_fn))] + #[rustc_allow_const_fn_unstable(const_fn)] pub const fn bang( name: &'static str, expand: fn(crate::TokenStream) -> crate::TokenStream, diff --git a/library/proc_macro/src/bridge/scoped_cell.rs b/library/proc_macro/src/bridge/scoped_cell.rs index e7c32b10384..e1307856175 100644 --- a/library/proc_macro/src/bridge/scoped_cell.rs +++ b/library/proc_macro/src/bridge/scoped_cell.rs @@ -35,8 +35,7 @@ impl<'a, 'b, T: LambdaL> DerefMut for RefMutL<'a, 'b, T> { pub struct ScopedCell(Cell<>::Out>); impl ScopedCell { - #[cfg_attr(not(bootstrap), rustc_allow_const_fn_unstable(const_fn))] - #[cfg_attr(bootstrap, allow_internal_unstable(const_fn))] + #[rustc_allow_const_fn_unstable(const_fn)] pub const fn new(value: >::Out) -> Self { ScopedCell(Cell::new(value)) } diff --git a/library/proc_macro/src/lib.rs b/library/proc_macro/src/lib.rs index 03733d3b3ed..3ba706b09e8 100644 --- a/library/proc_macro/src/lib.rs +++ b/library/proc_macro/src/lib.rs @@ -18,7 +18,7 @@ test(no_crate_inject, attr(deny(warnings))), test(attr(allow(dead_code, deprecated, unused_variables, unused_mut))) )] -#![cfg_attr(not(bootstrap), feature(rustc_allow_const_fn_unstable))] +#![feature(rustc_allow_const_fn_unstable)] #![feature(nll)] #![feature(staged_api)] #![feature(const_fn)] diff --git a/library/std/Cargo.toml b/library/std/Cargo.toml index 281ed4f336c..71d00b5c087 100644 --- a/library/std/Cargo.toml +++ b/library/std/Cargo.toml @@ -23,20 +23,20 @@ unwind = { path = "../unwind" } hashbrown = { version = "0.9.0", default-features = false, features = ['rustc-dep-of-std'] } # Dependencies of the `backtrace` crate -addr2line = { version = "0.13.0", optional = true, default-features = false } +addr2line = { version = "0.14.0", optional = true, default-features = false } rustc-demangle = { version = "0.1.18", features = ['rustc-dep-of-std'] } miniz_oxide = { version = "0.4.0", optional = true, default-features = false } [dependencies.object] -version = "0.20" +version = "0.22" optional = true default-features = false -features = ['read_core', 'elf', 'macho', 'pe'] +features = ['read_core', 'elf', 'macho', 'pe', 'unaligned', 'archive'] [dev-dependencies] rand = "0.7" [target.'cfg(any(all(target_arch = "wasm32", not(target_os = "emscripten")), all(target_vendor = "fortanix", target_env = "sgx")))'.dependencies] -dlmalloc = { version = "0.1", features = ['rustc-dep-of-std'] } +dlmalloc = { version = "0.2.1", features = ['rustc-dep-of-std'] } [target.x86_64-fortanix-unknown-sgx.dependencies] fortanix-sgx-abi = { version = "0.3.2", features = ['rustc-dep-of-std'] } @@ -60,6 +60,8 @@ panic-unwind = ["panic_unwind"] profiler = ["profiler_builtins"] compiler-builtins-c = ["alloc/compiler-builtins-c"] compiler-builtins-mem = ["alloc/compiler-builtins-mem"] +compiler-builtins-asm = ["alloc/compiler-builtins-asm"] +compiler-builtins-mangled-names = ["alloc/compiler-builtins-mangled-names"] llvm-libunwind = ["unwind/llvm-libunwind"] system-llvm-libunwind = ["unwind/system-llvm-libunwind"] diff --git a/library/std/build.rs b/library/std/build.rs index f2ed7552afb..f730569f74b 100644 --- a/library/std/build.rs +++ b/library/std/build.rs @@ -3,66 +3,23 @@ use std::env; fn main() { println!("cargo:rerun-if-changed=build.rs"); let target = env::var("TARGET").expect("TARGET was not set"); - if target.contains("linux") { - if target.contains("android") { - println!("cargo:rustc-link-lib=dl"); - println!("cargo:rustc-link-lib=log"); - println!("cargo:rustc-link-lib=gcc"); - } - } else if target.contains("freebsd") { - println!("cargo:rustc-link-lib=execinfo"); - println!("cargo:rustc-link-lib=pthread"); + if target.contains("freebsd") { if env::var("RUST_STD_FREEBSD_12_ABI").is_ok() { println!("cargo:rustc-cfg=freebsd12"); } - } else if target.contains("netbsd") { - println!("cargo:rustc-link-lib=pthread"); - println!("cargo:rustc-link-lib=rt"); - } else if target.contains("dragonfly") || target.contains("openbsd") { - println!("cargo:rustc-link-lib=pthread"); - } else if target.contains("solaris") { - println!("cargo:rustc-link-lib=socket"); - println!("cargo:rustc-link-lib=posix4"); - println!("cargo:rustc-link-lib=pthread"); - println!("cargo:rustc-link-lib=resolv"); - } else if target.contains("illumos") { - println!("cargo:rustc-link-lib=socket"); - println!("cargo:rustc-link-lib=posix4"); - println!("cargo:rustc-link-lib=pthread"); - println!("cargo:rustc-link-lib=resolv"); - println!("cargo:rustc-link-lib=nsl"); - // Use libumem for the (malloc-compatible) allocator - println!("cargo:rustc-link-lib=umem"); - } else if target.contains("apple-darwin") { - println!("cargo:rustc-link-lib=System"); - - // res_init and friends require -lresolv on macOS/iOS. - // See #41582 and http://blog.achernya.com/2013/03/os-x-has-silly-libsystem.html - println!("cargo:rustc-link-lib=resolv"); - } else if target.contains("apple-ios") { - println!("cargo:rustc-link-lib=System"); - println!("cargo:rustc-link-lib=objc"); - println!("cargo:rustc-link-lib=framework=Security"); - println!("cargo:rustc-link-lib=framework=Foundation"); - println!("cargo:rustc-link-lib=resolv"); - } else if target.contains("uwp") { - println!("cargo:rustc-link-lib=ws2_32"); - // For BCryptGenRandom - println!("cargo:rustc-link-lib=bcrypt"); - } else if target.contains("windows") { - println!("cargo:rustc-link-lib=advapi32"); - println!("cargo:rustc-link-lib=ws2_32"); - println!("cargo:rustc-link-lib=userenv"); - } else if target.contains("fuchsia") { - println!("cargo:rustc-link-lib=zircon"); - println!("cargo:rustc-link-lib=fdio"); - } else if target.contains("cloudabi") { - if cfg!(feature = "backtrace") { - println!("cargo:rustc-link-lib=unwind"); - } - println!("cargo:rustc-link-lib=c"); - println!("cargo:rustc-link-lib=compiler_rt"); - } else if (target.contains("sgx") && target.contains("fortanix")) + } else if target.contains("linux") + || target.contains("netbsd") + || target.contains("dragonfly") + || target.contains("openbsd") + || target.contains("solaris") + || target.contains("illumos") + || target.contains("apple-darwin") + || target.contains("apple-ios") + || target.contains("uwp") + || target.contains("windows") + || target.contains("fuchsia") + || target.contains("cloudabi") + || (target.contains("sgx") && target.contains("fortanix")) || target.contains("hermit") || target.contains("l4re") || target.contains("redox") diff --git a/library/std/src/alloc.rs b/library/std/src/alloc.rs index dd760062380..375b015ccc8 100644 --- a/library/std/src/alloc.rs +++ b/library/std/src/alloc.rs @@ -316,7 +316,7 @@ pub fn take_alloc_error_hook() -> fn(Layout) { } fn default_alloc_error_hook(layout: Layout) { - dumb_print(format_args!("memory allocation of {} bytes failed", layout.size())); + dumb_print(format_args!("memory allocation of {} bytes failed\n", layout.size())); } #[cfg(not(test))] diff --git a/library/std/src/backtrace.rs b/library/std/src/backtrace.rs index a9d8a4e2a81..7e8e0a621e3 100644 --- a/library/std/src/backtrace.rs +++ b/library/std/src/backtrace.rs @@ -161,6 +161,7 @@ struct BacktraceSymbol { name: Option>, filename: Option, lineno: Option, + colno: Option, } enum BytesOrWide { @@ -197,6 +198,10 @@ impl fmt::Debug for Backtrace { impl fmt::Debug for BacktraceSymbol { fn fmt(&self, fmt: &mut fmt::Formatter<'_>) -> fmt::Result { + // FIXME: improve formatting: https://github.com/rust-lang/rust/issues/65280 + // FIXME: Also, include column numbers into the debug format as Display already has them. + // Until there are stable per-frame accessors, the format shouldn't be changed: + // https://github.com/rust-lang/rust/issues/65280#issuecomment-638966585 write!(fmt, "{{ ")?; if let Some(fn_name) = self.name.as_ref().map(|b| backtrace_rs::SymbolName::new(b)) { @@ -209,7 +214,7 @@ impl fmt::Debug for BacktraceSymbol { write!(fmt, ", file: \"{:?}\"", fname)?; } - if let Some(line) = self.lineno.as_ref() { + if let Some(line) = self.lineno { write!(fmt, ", line: {:?}", line)?; } @@ -381,7 +386,7 @@ impl fmt::Display for Backtrace { f.print_raw(frame.frame.ip(), None, None, None)?; } else { for symbol in frame.symbols.iter() { - f.print_raw( + f.print_raw_with_column( frame.frame.ip(), symbol.name.as_ref().map(|b| backtrace_rs::SymbolName::new(b)), symbol.filename.as_ref().map(|b| match b { @@ -389,6 +394,7 @@ impl fmt::Display for Backtrace { BytesOrWide::Wide(w) => BytesOrWideString::Wide(w), }), symbol.lineno, + symbol.colno, )?; } } @@ -427,6 +433,7 @@ impl Capture { BytesOrWideString::Wide(b) => BytesOrWide::Wide(b.to_owned()), }), lineno: symbol.lineno(), + colno: symbol.colno(), }); }); } diff --git a/library/std/src/backtrace/tests.rs b/library/std/src/backtrace/tests.rs index 287359cd545..f5f74d1eb9a 100644 --- a/library/std/src/backtrace/tests.rs +++ b/library/std/src/backtrace/tests.rs @@ -13,6 +13,7 @@ fn test_debug() { name: Some(b"std::backtrace::Backtrace::create".to_vec()), filename: Some(BytesOrWide::Bytes(b"rust/backtrace.rs".to_vec())), lineno: Some(100), + colno: None, }], }, BacktraceFrame { @@ -21,6 +22,7 @@ fn test_debug() { name: Some(b"__rust_maybe_catch_panic".to_vec()), filename: None, lineno: None, + colno: None, }], }, BacktraceFrame { @@ -30,11 +32,13 @@ fn test_debug() { name: Some(b"std::rt::lang_start_internal".to_vec()), filename: Some(BytesOrWide::Bytes(b"rust/rt.rs".to_vec())), lineno: Some(300), + colno: Some(5), }, BacktraceSymbol { name: Some(b"std::rt::lang_start".to_vec()), filename: Some(BytesOrWide::Bytes(b"rust/rt.rs".to_vec())), lineno: Some(400), + colno: None, }, ], }, diff --git a/library/std/src/collections/hash/map.rs b/library/std/src/collections/hash/map.rs index fa229251703..27d90e66137 100644 --- a/library/std/src/collections/hash/map.rs +++ b/library/std/src/collections/hash/map.rs @@ -34,8 +34,8 @@ use crate::sys; /// attacks such as HashDoS. /// /// The hashing algorithm can be replaced on a per-`HashMap` basis using the -/// [`default`], [`with_hasher`], and [`with_capacity_and_hasher`] methods. Many -/// alternative algorithms are available on crates.io, such as the [`fnv`] crate. +/// [`default`], [`with_hasher`], and [`with_capacity_and_hasher`] methods. +/// There are many alternative [hashing algorithms available on crates.io]. /// /// It is required that the keys implement the [`Eq`] and [`Hash`] traits, although /// this can frequently be achieved by using `#[derive(PartialEq, Eq, Hash)]`. @@ -57,6 +57,7 @@ use crate::sys; /// The original C++ version of SwissTable can be found [here], and this /// [CppCon talk] gives an overview of how the algorithm works. /// +/// [hashing algorithms available on crates.io]: https://crates.io/keywords/hasher /// [SwissTable]: https://abseil.io/blog/20180927-swisstables /// [here]: https://github.com/abseil/abseil-cpp/blob/master/absl/container/internal/raw_hash_set.h /// [CppCon talk]: https://www.youtube.com/watch?v=ncHmEUmJZf4 @@ -154,7 +155,6 @@ use crate::sys; /// [`default`]: Default::default /// [`with_hasher`]: Self::with_hasher /// [`with_capacity_and_hasher`]: Self::with_capacity_and_hasher -/// [`fnv`]: https://crates.io/crates/fnv /// /// ``` /// use std::collections::HashMap; diff --git a/library/std/src/error.rs b/library/std/src/error.rs index 5771ca758af..0044e59d697 100644 --- a/library/std/src/error.rs +++ b/library/std/src/error.rs @@ -19,7 +19,7 @@ mod tests; use core::array; use core::convert::Infallible; -use crate::alloc::{AllocError, LayoutErr}; +use crate::alloc::{AllocError, LayoutError}; use crate::any::TypeId; use crate::backtrace::Backtrace; use crate::borrow::Cow; @@ -390,7 +390,7 @@ impl Error for ! {} impl Error for AllocError {} #[stable(feature = "alloc_layout", since = "1.28.0")] -impl Error for LayoutErr {} +impl Error for LayoutError {} #[stable(feature = "rust1", since = "1.0.0")] impl Error for str::ParseBoolError { diff --git a/library/std/src/f32.rs b/library/std/src/f32.rs index 2a54b117ff4..09a9b184e3a 100644 --- a/library/std/src/f32.rs +++ b/library/std/src/f32.rs @@ -893,14 +893,13 @@ impl f32 { /// # Examples /// /// ``` - /// #![feature(clamp)] /// assert!((-3.0f32).clamp(-2.0, 1.0) == -2.0); /// assert!((0.0f32).clamp(-2.0, 1.0) == 0.0); /// assert!((2.0f32).clamp(-2.0, 1.0) == 1.0); /// assert!((f32::NAN).clamp(-2.0, 1.0).is_nan()); /// ``` #[must_use = "method returns a new number and does not mutate the original value"] - #[unstable(feature = "clamp", issue = "44095")] + #[stable(feature = "clamp", since = "1.50.0")] #[inline] pub fn clamp(self, min: f32, max: f32) -> f32 { assert!(min <= max); diff --git a/library/std/src/f64.rs b/library/std/src/f64.rs index 363d1a00476..64bb7cd9fd1 100644 --- a/library/std/src/f64.rs +++ b/library/std/src/f64.rs @@ -895,14 +895,13 @@ impl f64 { /// # Examples /// /// ``` - /// #![feature(clamp)] /// assert!((-3.0f64).clamp(-2.0, 1.0) == -2.0); /// assert!((0.0f64).clamp(-2.0, 1.0) == 0.0); /// assert!((2.0f64).clamp(-2.0, 1.0) == 1.0); /// assert!((f64::NAN).clamp(-2.0, 1.0).is_nan()); /// ``` #[must_use = "method returns a new number and does not mutate the original value"] - #[unstable(feature = "clamp", issue = "44095")] + #[stable(feature = "clamp", since = "1.50.0")] #[inline] pub fn clamp(self, min: f64, max: f64) -> f64 { assert!(min <= max); diff --git a/library/std/src/ffi/c_str.rs b/library/std/src/ffi/c_str.rs index 8c6d6c80402..60b642a6dba 100644 --- a/library/std/src/ffi/c_str.rs +++ b/library/std/src/ffi/c_str.rs @@ -1266,7 +1266,7 @@ impl CStr { /// behavior when `ptr` is used inside the `unsafe` block: /// /// ```no_run - /// # #![allow(unused_must_use)] #![cfg_attr(not(bootstrap), allow(temporary_cstring_as_ptr))] + /// # #![allow(unused_must_use)] #![allow(temporary_cstring_as_ptr)] /// use std::ffi::CString; /// /// let ptr = CString::new("Hello").expect("CString::new failed").as_ptr(); diff --git a/library/std/src/fs.rs b/library/std/src/fs.rs index c256f556b3c..4cff6cb6f10 100644 --- a/library/std/src/fs.rs +++ b/library/std/src/fs.rs @@ -1656,7 +1656,7 @@ pub fn rename, Q: AsRef>(from: P, to: Q) -> io::Result<()> /// the length of the `to` file as reported by `metadata`. /// /// If you’re wanting to copy the contents of one file to another and you’re -/// working with [`File`]s, see the [`io::copy`] function. +/// working with [`File`]s, see the [`io::copy()`] function. /// /// # Platform-specific behavior /// @@ -1698,12 +1698,14 @@ pub fn copy, Q: AsRef>(from: P, to: Q) -> io::Result { /// Creates a new hard link on the filesystem. /// -/// The `dst` path will be a link pointing to the `src` path. Note that systems -/// often require these two paths to both be located on the same filesystem. +/// The `link` path will be a link pointing to the `original` path. Note that +/// systems often require these two paths to both be located on the same +/// filesystem. /// -/// If `src` names a symbolic link, it is platform-specific whether the symbolic -/// link is followed. On platforms where it's possible to not follow it, it is -/// not followed, and the created hard link points to the symbolic link itself. +/// If `original` names a symbolic link, it is platform-specific whether the +/// symbolic link is followed. On platforms where it's possible to not follow +/// it, it is not followed, and the created hard link points to the symbolic +/// link itself. /// /// # Platform-specific behavior /// @@ -1718,7 +1720,7 @@ pub fn copy, Q: AsRef>(from: P, to: Q) -> io::Result { /// This function will return an error in the following situations, but is not /// limited to just these cases: /// -/// * The `src` path is not a file or doesn't exist. +/// * The `original` path is not a file or doesn't exist. /// /// # Examples /// @@ -1731,13 +1733,13 @@ pub fn copy, Q: AsRef>(from: P, to: Q) -> io::Result { /// } /// ``` #[stable(feature = "rust1", since = "1.0.0")] -pub fn hard_link, Q: AsRef>(src: P, dst: Q) -> io::Result<()> { - fs_imp::link(src.as_ref(), dst.as_ref()) +pub fn hard_link, Q: AsRef>(original: P, link: Q) -> io::Result<()> { + fs_imp::link(original.as_ref(), link.as_ref()) } /// Creates a new symbolic link on the filesystem. /// -/// The `dst` path will be a symbolic link pointing to the `src` path. +/// The `link` path will be a symbolic link pointing to the `original` path. /// On Windows, this will be a file symlink, not a directory symlink; /// for this reason, the platform-specific [`std::os::unix::fs::symlink`] /// and [`std::os::windows::fs::symlink_file`] or [`symlink_dir`] should be @@ -1763,8 +1765,8 @@ pub fn hard_link, Q: AsRef>(src: P, dst: Q) -> io::Result<( reason = "replaced with std::os::unix::fs::symlink and \ std::os::windows::fs::{symlink_file, symlink_dir}" )] -pub fn soft_link, Q: AsRef>(src: P, dst: Q) -> io::Result<()> { - fs_imp::symlink(src.as_ref(), dst.as_ref()) +pub fn soft_link, Q: AsRef>(original: P, link: Q) -> io::Result<()> { + fs_imp::symlink(original.as_ref(), link.as_ref()) } /// Reads a symbolic link, returning the file that the link points to. diff --git a/library/std/src/fs/tests.rs b/library/std/src/fs/tests.rs index 0642dca8e48..5c969741592 100644 --- a/library/std/src/fs/tests.rs +++ b/library/std/src/fs/tests.rs @@ -1341,6 +1341,9 @@ fn metadata_access_times() { #[test] fn symlink_hard_link() { let tmpdir = tmpdir(); + if !got_symlink_permission(&tmpdir) { + return; + }; // Create "file", a file. check!(fs::File::create(tmpdir.join("file"))); diff --git a/library/std/src/io/copy.rs b/library/std/src/io/copy.rs new file mode 100644 index 00000000000..b88bca2f2b4 --- /dev/null +++ b/library/std/src/io/copy.rs @@ -0,0 +1,88 @@ +use crate::io::{self, ErrorKind, Read, Write}; +use crate::mem::MaybeUninit; + +/// Copies the entire contents of a reader into a writer. +/// +/// This function will continuously read data from `reader` and then +/// write it into `writer` in a streaming fashion until `reader` +/// returns EOF. +/// +/// On success, the total number of bytes that were copied from +/// `reader` to `writer` is returned. +/// +/// If you’re wanting to copy the contents of one file to another and you’re +/// working with filesystem paths, see the [`fs::copy`] function. +/// +/// [`fs::copy`]: crate::fs::copy +/// +/// # Errors +/// +/// This function will return an error immediately if any call to [`read`] or +/// [`write`] returns an error. All instances of [`ErrorKind::Interrupted`] are +/// handled by this function and the underlying operation is retried. +/// +/// [`read`]: Read::read +/// [`write`]: Write::write +/// +/// # Examples +/// +/// ``` +/// use std::io; +/// +/// fn main() -> io::Result<()> { +/// let mut reader: &[u8] = b"hello"; +/// let mut writer: Vec = vec![]; +/// +/// io::copy(&mut reader, &mut writer)?; +/// +/// assert_eq!(&b"hello"[..], &writer[..]); +/// Ok(()) +/// } +/// ``` +#[stable(feature = "rust1", since = "1.0.0")] +pub fn copy(reader: &mut R, writer: &mut W) -> io::Result +where + R: Read, + W: Write, +{ + cfg_if::cfg_if! { + if #[cfg(any(target_os = "linux", target_os = "android"))] { + crate::sys::kernel_copy::copy_spec(reader, writer) + } else { + generic_copy(reader, writer) + } + } +} + +/// The general read-write-loop implementation of +/// `io::copy` that is used when specializations are not available or not applicable. +pub(crate) fn generic_copy(reader: &mut R, writer: &mut W) -> io::Result +where + R: Read, + W: Write, +{ + let mut buf = MaybeUninit::<[u8; super::DEFAULT_BUF_SIZE]>::uninit(); + // FIXME: #42788 + // + // - This creates a (mut) reference to a slice of + // _uninitialized_ integers, which is **undefined behavior** + // + // - Only the standard library gets to soundly "ignore" this, + // based on its privileged knowledge of unstable rustc + // internals; + unsafe { + reader.initializer().initialize(buf.assume_init_mut()); + } + + let mut written = 0; + loop { + let len = match reader.read(unsafe { buf.assume_init_mut() }) { + Ok(0) => return Ok(written), + Ok(len) => len, + Err(ref e) if e.kind() == ErrorKind::Interrupted => continue, + Err(e) => return Err(e), + }; + writer.write_all(unsafe { &buf.assume_init_ref()[..len] })?; + written += len as u64; + } +} diff --git a/library/std/src/io/impls.rs b/library/std/src/io/impls.rs index 66426101d27..6b3c86cb0df 100644 --- a/library/std/src/io/impls.rs +++ b/library/std/src/io/impls.rs @@ -209,20 +209,6 @@ impl BufRead for Box { } } -// Used by panicking::default_hook -#[cfg(test)] -/// This impl is only used by printing logic, so any error returned is always -/// of kind `Other`, and should be ignored. -impl Write for dyn ::realstd::io::LocalOutput { - fn write(&mut self, buf: &[u8]) -> io::Result { - (*self).write(buf).map_err(|_| ErrorKind::Other.into()) - } - - fn flush(&mut self) -> io::Result<()> { - (*self).flush().map_err(|_| ErrorKind::Other.into()) - } -} - // ============================================================================= // In-memory buffer implementations diff --git a/library/std/src/io/mod.rs b/library/std/src/io/mod.rs index e6efe6ec57e..dfbf6c3f244 100644 --- a/library/std/src/io/mod.rs +++ b/library/std/src/io/mod.rs @@ -266,24 +266,25 @@ pub use self::buffered::IntoInnerError; #[stable(feature = "rust1", since = "1.0.0")] pub use self::buffered::{BufReader, BufWriter, LineWriter}; #[stable(feature = "rust1", since = "1.0.0")] +pub use self::copy::copy; +#[stable(feature = "rust1", since = "1.0.0")] pub use self::cursor::Cursor; #[stable(feature = "rust1", since = "1.0.0")] pub use self::error::{Error, ErrorKind, Result}; +#[unstable(feature = "internal_output_capture", issue = "none")] +#[doc(no_inline, hidden)] +pub use self::stdio::set_output_capture; #[stable(feature = "rust1", since = "1.0.0")] pub use self::stdio::{stderr, stdin, stdout, Stderr, Stdin, Stdout}; #[stable(feature = "rust1", since = "1.0.0")] pub use self::stdio::{StderrLock, StdinLock, StdoutLock}; #[unstable(feature = "print_internals", issue = "none")] pub use self::stdio::{_eprint, _print}; -#[unstable(feature = "libstd_io_internals", issue = "42788")] -#[doc(no_inline, hidden)] -pub use self::stdio::{set_panic, set_print, LocalOutput}; #[stable(feature = "rust1", since = "1.0.0")] -pub use self::util::{copy, empty, repeat, sink, Empty, Repeat, Sink}; - -pub(crate) use self::stdio::clone_io; +pub use self::util::{empty, repeat, sink, Empty, Repeat, Sink}; mod buffered; +pub(crate) mod copy; mod cursor; mod error; mod impls; @@ -1306,10 +1307,10 @@ pub trait Write { default_write_vectored(|b| self.write(b), bufs) } - /// Determines if this `Write`er has an efficient [`write_vectored`] + /// Determines if this `Write`r has an efficient [`write_vectored`] /// implementation. /// - /// If a `Write`er does not override the default [`write_vectored`] + /// If a `Write`r does not override the default [`write_vectored`] /// implementation, code using it may want to avoid the method all together /// and coalesce writes into a single buffer for higher performance. /// diff --git a/library/std/src/io/stdio.rs b/library/std/src/io/stdio.rs index 2eb5fb45286..6ea7704d422 100644 --- a/library/std/src/io/stdio.rs +++ b/library/std/src/io/stdio.rs @@ -5,44 +5,38 @@ mod tests; use crate::io::prelude::*; -use crate::cell::RefCell; +use crate::cell::{Cell, RefCell}; use crate::fmt; use crate::io::{self, BufReader, Initializer, IoSlice, IoSliceMut, LineWriter}; use crate::lazy::SyncOnceCell; use crate::sync::atomic::{AtomicBool, Ordering}; -use crate::sync::{Mutex, MutexGuard}; +use crate::sync::{Arc, Mutex, MutexGuard}; use crate::sys::stdio; use crate::sys_common; use crate::sys_common::remutex::{ReentrantMutex, ReentrantMutexGuard}; -use crate::thread::LocalKey; -thread_local! { - /// Used by the test crate to capture the output of the print! and println! macros. - static LOCAL_STDOUT: RefCell>> = { - RefCell::new(None) - } -} +type LocalStream = Arc>>; thread_local! { - /// Used by the test crate to capture the output of the eprint! and eprintln! macros, and panics. - static LOCAL_STDERR: RefCell>> = { - RefCell::new(None) + /// Used by the test crate to capture the output of the print macros and panics. + static OUTPUT_CAPTURE: Cell> = { + Cell::new(None) } } -/// Flag to indicate LOCAL_STDOUT and/or LOCAL_STDERR is used. +/// Flag to indicate OUTPUT_CAPTURE is used. /// -/// If both are None and were never set on any thread, this flag is set to -/// false, and both LOCAL_STDOUT and LOCAL_STDOUT can be safely ignored on all -/// threads, saving some time and memory registering an unused thread local. +/// If it is None and was never set on any thread, this flag is set to false, +/// and OUTPUT_CAPTURE can be safely ignored on all threads, saving some time +/// and memory registering an unused thread local. /// -/// Note about memory ordering: This contains information about whether two -/// thread local variables might be in use. Although this is a global flag, the +/// Note about memory ordering: This contains information about whether a +/// thread local variable might be in use. Although this is a global flag, the /// memory ordering between threads does not matter: we only want this flag to -/// have a consistent order between set_print/set_panic and print_to *within +/// have a consistent order between set_output_capture and print_to *within /// the same thread*. Within the same thread, things always have a perfectly /// consistent order. So Ordering::Relaxed is fine. -static LOCAL_STREAMS: AtomicBool = AtomicBool::new(false); +static OUTPUT_CAPTURE_USED: AtomicBool = AtomicBool::new(false); /// A handle to a raw instance of the standard input stream of this process. /// @@ -409,6 +403,14 @@ impl Read for Stdin { } } +// only used by platform-dependent io::copy specializations, i.e. unused on some platforms +#[cfg(any(target_os = "linux", target_os = "android"))] +impl StdinLock<'_> { + pub(crate) fn as_mut_buf(&mut self) -> &mut BufReader { + &mut self.inner + } +} + #[stable(feature = "rust1", since = "1.0.0")] impl Read for StdinLock<'_> { fn read(&mut self, buf: &mut [u8]) -> io::Result { @@ -888,97 +890,24 @@ impl fmt::Debug for StderrLock<'_> { } } -/// A writer than can be cloned to new threads. +/// Sets the thread-local output capture buffer and returns the old one. #[unstable( - feature = "set_stdio", - reason = "this trait may disappear completely or be replaced \ - with a more general mechanism", + feature = "internal_output_capture", + reason = "this function is meant for use in the test crate \ + and may disappear in the future", issue = "none" )] #[doc(hidden)] -pub trait LocalOutput: Write + Send { - fn clone_box(&self) -> Box; -} - -/// Resets the thread-local stderr handle to the specified writer -/// -/// This will replace the current thread's stderr handle, returning the old -/// handle. All future calls to `panic!` and friends will emit their output to -/// this specified handle. -/// -/// Note that this does not need to be called for all new threads; the default -/// output handle is to the process's stderr stream. -#[unstable( - feature = "set_stdio", - reason = "this function may disappear completely or be replaced \ - with a more general mechanism", - issue = "none" -)] -#[doc(hidden)] -pub fn set_panic(sink: Option>) -> Option> { - use crate::mem; - if sink.is_none() && !LOCAL_STREAMS.load(Ordering::Relaxed) { - // LOCAL_STDERR is definitely None since LOCAL_STREAMS is false. +pub fn set_output_capture(sink: Option) -> Option { + if sink.is_none() && !OUTPUT_CAPTURE_USED.load(Ordering::Relaxed) { + // OUTPUT_CAPTURE is definitely None since OUTPUT_CAPTURE_USED is false. return None; } - let s = LOCAL_STDERR.with(move |slot| mem::replace(&mut *slot.borrow_mut(), sink)).and_then( - |mut s| { - let _ = s.flush(); - Some(s) - }, - ); - LOCAL_STREAMS.store(true, Ordering::Relaxed); - s + OUTPUT_CAPTURE_USED.store(true, Ordering::Relaxed); + OUTPUT_CAPTURE.with(move |slot| slot.replace(sink)) } -/// Resets the thread-local stdout handle to the specified writer -/// -/// This will replace the current thread's stdout handle, returning the old -/// handle. All future calls to `print!` and friends will emit their output to -/// this specified handle. -/// -/// Note that this does not need to be called for all new threads; the default -/// output handle is to the process's stdout stream. -#[unstable( - feature = "set_stdio", - reason = "this function may disappear completely or be replaced \ - with a more general mechanism", - issue = "none" -)] -#[doc(hidden)] -pub fn set_print(sink: Option>) -> Option> { - use crate::mem; - if sink.is_none() && !LOCAL_STREAMS.load(Ordering::Relaxed) { - // LOCAL_STDOUT is definitely None since LOCAL_STREAMS is false. - return None; - } - let s = LOCAL_STDOUT.with(move |slot| mem::replace(&mut *slot.borrow_mut(), sink)).and_then( - |mut s| { - let _ = s.flush(); - Some(s) - }, - ); - LOCAL_STREAMS.store(true, Ordering::Relaxed); - s -} - -pub(crate) fn clone_io() -> (Option>, Option>) { - // Don't waste time when LOCAL_{STDOUT,STDERR} are definitely None. - if !LOCAL_STREAMS.load(Ordering::Relaxed) { - return (None, None); - } - - LOCAL_STDOUT.with(|stdout| { - LOCAL_STDERR.with(|stderr| { - ( - stdout.borrow().as_ref().map(|o| o.clone_box()), - stderr.borrow().as_ref().map(|o| o.clone_box()), - ) - }) - }) -} - -/// Write `args` to output stream `local_s` if possible, `global_s` +/// Write `args` to the capture buffer if enabled and possible, or `global_s` /// otherwise. `label` identifies the stream in a panic message. /// /// This function is used to print error messages, so it takes extra @@ -988,36 +917,26 @@ pub(crate) fn clone_io() -> (Option>, Option( - args: fmt::Arguments<'_>, - local_s: &'static LocalKey>>>, - global_s: fn() -> T, - label: &str, -) where +fn print_to(args: fmt::Arguments<'_>, global_s: fn() -> T, label: &str) +where T: Write, { - let result = LOCAL_STREAMS - .load(Ordering::Relaxed) - .then(|| { - local_s - .try_with(|s| { - // Note that we completely remove a local sink to write to in case - // our printing recursively panics/prints, so the recursive - // panic/print goes to the global sink instead of our local sink. - let prev = s.borrow_mut().take(); - if let Some(mut w) = prev { - let result = w.write_fmt(args); - *s.borrow_mut() = Some(w); - return result; - } - global_s().write_fmt(args) - }) - .ok() - }) - .flatten() - .unwrap_or_else(|| global_s().write_fmt(args)); - - if let Err(e) = result { + if OUTPUT_CAPTURE_USED.load(Ordering::Relaxed) + && OUTPUT_CAPTURE.try_with(|s| { + // Note that we completely remove a local sink to write to in case + // our printing recursively panics/prints, so the recursive + // panic/print goes to the global sink instead of our local sink. + s.take().map(|w| { + let _ = w.lock().unwrap_or_else(|e| e.into_inner()).write_fmt(args); + s.set(Some(w)); + }) + }) == Ok(Some(())) + { + // Succesfully wrote to capture buffer. + return; + } + + if let Err(e) = global_s().write_fmt(args) { panic!("failed printing to {}: {}", label, e); } } @@ -1030,7 +949,7 @@ fn print_to( #[doc(hidden)] #[cfg(not(test))] pub fn _print(args: fmt::Arguments<'_>) { - print_to(args, &LOCAL_STDOUT, stdout, "stdout"); + print_to(args, stdout, "stdout"); } #[unstable( @@ -1041,7 +960,7 @@ pub fn _print(args: fmt::Arguments<'_>) { #[doc(hidden)] #[cfg(not(test))] pub fn _eprint(args: fmt::Arguments<'_>) { - print_to(args, &LOCAL_STDERR, stderr, "stderr"); + print_to(args, stderr, "stderr"); } #[cfg(test)] diff --git a/library/std/src/io/tests.rs b/library/std/src/io/tests.rs index 913b28538b7..f176c2f088c 100644 --- a/library/std/src/io/tests.rs +++ b/library/std/src/io/tests.rs @@ -1,7 +1,7 @@ use super::{repeat, Cursor, SeekFrom}; use crate::cmp::{self, min}; -use crate::io::prelude::*; use crate::io::{self, IoSlice, IoSliceMut}; +use crate::io::{BufRead, Read, Seek, Write}; use crate::ops::Deref; #[test] diff --git a/library/std/src/io/util.rs b/library/std/src/io/util.rs index 2b1f371129e..db845457c96 100644 --- a/library/std/src/io/util.rs +++ b/library/std/src/io/util.rs @@ -4,78 +4,7 @@ mod tests; use crate::fmt; -use crate::io::{self, BufRead, ErrorKind, Initializer, IoSlice, IoSliceMut, Read, Write}; -use crate::mem::MaybeUninit; - -/// Copies the entire contents of a reader into a writer. -/// -/// This function will continuously read data from `reader` and then -/// write it into `writer` in a streaming fashion until `reader` -/// returns EOF. -/// -/// On success, the total number of bytes that were copied from -/// `reader` to `writer` is returned. -/// -/// If you’re wanting to copy the contents of one file to another and you’re -/// working with filesystem paths, see the [`fs::copy`] function. -/// -/// [`fs::copy`]: crate::fs::copy -/// -/// # Errors -/// -/// This function will return an error immediately if any call to [`read`] or -/// [`write`] returns an error. All instances of [`ErrorKind::Interrupted`] are -/// handled by this function and the underlying operation is retried. -/// -/// [`read`]: Read::read -/// [`write`]: Write::write -/// -/// # Examples -/// -/// ``` -/// use std::io; -/// -/// fn main() -> io::Result<()> { -/// let mut reader: &[u8] = b"hello"; -/// let mut writer: Vec = vec![]; -/// -/// io::copy(&mut reader, &mut writer)?; -/// -/// assert_eq!(&b"hello"[..], &writer[..]); -/// Ok(()) -/// } -/// ``` -#[stable(feature = "rust1", since = "1.0.0")] -pub fn copy(reader: &mut R, writer: &mut W) -> io::Result -where - R: Read, - W: Write, -{ - let mut buf = MaybeUninit::<[u8; super::DEFAULT_BUF_SIZE]>::uninit(); - // FIXME: #42788 - // - // - This creates a (mut) reference to a slice of - // _uninitialized_ integers, which is **undefined behavior** - // - // - Only the standard library gets to soundly "ignore" this, - // based on its privileged knowledge of unstable rustc - // internals; - unsafe { - reader.initializer().initialize(buf.assume_init_mut()); - } - - let mut written = 0; - loop { - let len = match reader.read(unsafe { buf.assume_init_mut() }) { - Ok(0) => return Ok(written), - Ok(len) => len, - Err(ref e) if e.kind() == ErrorKind::Interrupted => continue, - Err(e) => return Err(e), - }; - writer.write_all(unsafe { &buf.assume_init_ref()[..len] })?; - written += len as u64; - } -} +use crate::io::{self, BufRead, Initializer, IoSlice, IoSliceMut, Read, Write}; /// A reader which is always at EOF. /// diff --git a/library/std/src/lib.rs b/library/std/src/lib.rs index bc218b77c87..3463b8cdf1c 100644 --- a/library/std/src/lib.rs +++ b/library/std/src/lib.rs @@ -206,8 +206,8 @@ #![needs_panic_runtime] // std may use features in a platform-specific way #![allow(unused_features)] -#![cfg_attr(not(bootstrap), feature(rustc_allow_const_fn_unstable))] -#![cfg_attr(test, feature(print_internals, set_stdio, update_panic_count))] +#![feature(rustc_allow_const_fn_unstable)] +#![cfg_attr(test, feature(internal_output_capture, print_internals, update_panic_count))] #![cfg_attr( all(target_vendor = "fortanix", target_env = "sgx"), feature(slice_index_methods, coerce_unsized, sgx_platform) @@ -227,7 +227,6 @@ #![feature(asm)] #![feature(associated_type_bounds)] #![feature(atomic_mut_ptr)] -#![feature(bool_to_option)] #![feature(box_syntax)] #![feature(c_variadic)] #![feature(cfg_accessible)] @@ -235,7 +234,6 @@ #![feature(cfg_target_thread_local)] #![feature(char_error_internals)] #![feature(char_internals)] -#![feature(clamp)] #![feature(concat_idents)] #![feature(const_cstr_unchecked)] #![feature(const_fn_floating_point_arithmetic)] @@ -257,6 +255,7 @@ #![feature(doc_spotlight)] #![feature(dropck_eyepatch)] #![feature(duration_constants)] +#![feature(duration_zero)] #![feature(exact_size_is_empty)] #![feature(exhaustive_patterns)] #![feature(extend_one)] @@ -316,12 +315,12 @@ #![feature(toowned_clone_into)] #![feature(total_cmp)] #![feature(trace_macros)] +#![feature(try_blocks)] #![feature(try_reserve)] #![feature(unboxed_closures)] #![feature(unsafe_block_in_unsafe_fn)] #![feature(unsafe_cell_get_mut)] #![feature(unsafe_cell_raw_get)] -#![cfg_attr(bootstrap, feature(untagged_unions))] #![feature(unwind_attributes)] #![feature(vec_into_raw_parts)] #![feature(wake_trait)] @@ -563,5 +562,5 @@ include!("keyword_docs.rs"); // This is required to avoid an unstable error when `restricted-std` is not // enabled. The use of #![feature(restricted_std)] in rustc-std-workspace-std // is unconditional, so the unstable feature needs to be defined somewhere. -#[cfg_attr(not(feature = "restricted-std"), unstable(feature = "restricted_std", issue = "none"))] +#[unstable(feature = "restricted_std", issue = "none")] mod __restricted_std_workaround {} diff --git a/library/std/src/macros.rs b/library/std/src/macros.rs index 57649d6f8f2..de072e83dfc 100644 --- a/library/std/src/macros.rs +++ b/library/std/src/macros.rs @@ -8,6 +8,7 @@ #[macro_export] #[stable(feature = "rust1", since = "1.0.0")] #[allow_internal_unstable(libstd_sys_internals)] +#[cfg_attr(not(any(bootstrap, test)), rustc_diagnostic_item = "std_panic_macro")] macro_rules! panic { () => ({ $crate::panic!("explicit panic") }); ($msg:expr $(,)?) => ({ $crate::rt::begin_panic($msg) }); diff --git a/library/std/src/net/ip.rs b/library/std/src/net/ip.rs index bb3ece4c273..87bbd33bc01 100644 --- a/library/std/src/net/ip.rs +++ b/library/std/src/net/ip.rs @@ -263,8 +263,9 @@ impl IpAddr { /// assert_eq!(IpAddr::V4(Ipv4Addr::new(203, 0, 113, 6)).is_ipv4(), true); /// assert_eq!(IpAddr::V6(Ipv6Addr::new(0x2001, 0xdb8, 0, 0, 0, 0, 0, 0)).is_ipv4(), false); /// ``` + #[rustc_const_stable(feature = "const_ip", since = "1.50.0")] #[stable(feature = "ipaddr_checker", since = "1.16.0")] - pub fn is_ipv4(&self) -> bool { + pub const fn is_ipv4(&self) -> bool { matches!(self, IpAddr::V4(_)) } @@ -281,8 +282,9 @@ impl IpAddr { /// assert_eq!(IpAddr::V4(Ipv4Addr::new(203, 0, 113, 6)).is_ipv6(), false); /// assert_eq!(IpAddr::V6(Ipv6Addr::new(0x2001, 0xdb8, 0, 0, 0, 0, 0, 0)).is_ipv6(), true); /// ``` + #[rustc_const_stable(feature = "const_ip", since = "1.50.0")] #[stable(feature = "ipaddr_checker", since = "1.16.0")] - pub fn is_ipv6(&self) -> bool { + pub const fn is_ipv6(&self) -> bool { matches!(self, IpAddr::V6(_)) } } @@ -1043,8 +1045,7 @@ impl Ipv6Addr { /// ``` #[stable(feature = "rust1", since = "1.0.0")] #[rustc_const_stable(feature = "const_ipv6", since = "1.32.0")] - #[cfg_attr(not(bootstrap), rustc_allow_const_fn_unstable(const_fn_transmute))] - #[cfg_attr(bootstrap, allow_internal_unstable(const_fn_transmute))] + #[rustc_allow_const_fn_unstable(const_fn_transmute)] pub const fn new(a: u16, b: u16, c: u16, d: u16, e: u16, f: u16, g: u16, h: u16) -> Ipv6Addr { let addr16 = [ a.to_be(), diff --git a/library/std/src/num/tests.rs b/library/std/src/num/tests.rs index 2f50b73f490..df0df3f23f7 100644 --- a/library/std/src/num/tests.rs +++ b/library/std/src/num/tests.rs @@ -75,8 +75,8 @@ fn test_checked_mul() { macro_rules! test_is_power_of_two { ($test_name:ident, $T:ident) => { + #[test] fn $test_name() { - #![test] assert_eq!((0 as $T).is_power_of_two(), false); assert_eq!((1 as $T).is_power_of_two(), true); assert_eq!((2 as $T).is_power_of_two(), true); @@ -96,8 +96,8 @@ test_is_power_of_two! { test_is_power_of_two_uint, usize } macro_rules! test_next_power_of_two { ($test_name:ident, $T:ident) => { + #[test] fn $test_name() { - #![test] assert_eq!((0 as $T).next_power_of_two(), 1); let mut next_power = 1; for i in 1 as $T..40 { @@ -118,8 +118,8 @@ test_next_power_of_two! { test_next_power_of_two_uint, usize } macro_rules! test_checked_next_power_of_two { ($test_name:ident, $T:ident) => { + #[test] fn $test_name() { - #![test] assert_eq!((0 as $T).checked_next_power_of_two(), Some(1)); let smax = $T::MAX >> 1; assert_eq!(smax.checked_next_power_of_two(), Some(smax + 1)); diff --git a/library/std/src/panicking.rs b/library/std/src/panicking.rs index fbbc61f4e60..8ba3feccb6b 100644 --- a/library/std/src/panicking.rs +++ b/library/std/src/panicking.rs @@ -24,11 +24,11 @@ use crate::sys_common::{thread_info, util}; use crate::thread; #[cfg(not(test))] -use crate::io::set_panic; +use crate::io::set_output_capture; // make sure to use the stderr output configured // by libtest in the real copy of std #[cfg(test)] -use realstd::io::set_panic; +use realstd::io::set_output_capture; // Binary interface to the panic runtime that the standard library depends on. // @@ -218,11 +218,9 @@ fn default_hook(info: &PanicInfo<'_>) { } }; - if let Some(mut local) = set_panic(None) { - // NB. In `cfg(test)` this uses the forwarding impl - // for `dyn ::realstd::io::LocalOutput`. - write(&mut local); - set_panic(Some(local)); + if let Some(local) = set_output_capture(None) { + write(&mut *local.lock().unwrap_or_else(|e| e.into_inner())); + set_output_capture(Some(local)); } else if let Some(mut out) = panic_output() { write(&mut out); } diff --git a/library/std/src/process.rs b/library/std/src/process.rs index 2c7ed4614bc..2ffa7d2316a 100644 --- a/library/std/src/process.rs +++ b/library/std/src/process.rs @@ -1779,6 +1779,7 @@ pub fn exit(code: i32) -> ! { /// /// [panic hook]: crate::panic::set_hook #[stable(feature = "process_abort", since = "1.17.0")] +#[cold] pub fn abort() -> ! { crate::sys::abort_internal(); } diff --git a/library/std/src/sys/cloudabi/mod.rs b/library/std/src/sys/cloudabi/mod.rs index 13f1bc8826e..b4a9246ca4e 100644 --- a/library/std/src/sys/cloudabi/mod.rs +++ b/library/std/src/sys/cloudabi/mod.rs @@ -66,3 +66,8 @@ pub fn hashmap_random_keys() -> (u64, u64) { v.assume_init() } } + +#[cfg_attr(feature = "backtrace", link(name = "unwind"))] +#[link(name = "c")] +#[link(name = "compiler_rt")] +extern "C" {} diff --git a/library/std/src/sys/cloudabi/shims/fs.rs b/library/std/src/sys/cloudabi/shims/fs.rs index ecb5b51cccd..a11cde9aea3 100644 --- a/library/std/src/sys/cloudabi/shims/fs.rs +++ b/library/std/src/sys/cloudabi/shims/fs.rs @@ -283,7 +283,7 @@ pub fn readlink(_p: &Path) -> io::Result { unsupported() } -pub fn symlink(_src: &Path, _dst: &Path) -> io::Result<()> { +pub fn symlink(_original: &Path, _link: &Path) -> io::Result<()> { unsupported() } diff --git a/library/std/src/sys/hermit/fs.rs b/library/std/src/sys/hermit/fs.rs index 829d4c943f1..1807655e971 100644 --- a/library/std/src/sys/hermit/fs.rs +++ b/library/std/src/sys/hermit/fs.rs @@ -377,11 +377,11 @@ pub fn readlink(_p: &Path) -> io::Result { unsupported() } -pub fn symlink(_src: &Path, _dst: &Path) -> io::Result<()> { +pub fn symlink(_original: &Path, _link: &Path) -> io::Result<()> { unsupported() } -pub fn link(_src: &Path, _dst: &Path) -> io::Result<()> { +pub fn link(_original: &Path, _link: &Path) -> io::Result<()> { unsupported() } diff --git a/library/std/src/sys/sgx/abi/mem.rs b/library/std/src/sys/sgx/abi/mem.rs index ffa234fccfe..da899773dbb 100644 --- a/library/std/src/sys/sgx/abi/mem.rs +++ b/library/std/src/sys/sgx/abi/mem.rs @@ -12,6 +12,18 @@ pub(crate) unsafe fn rel_ptr_mut(offset: u64) -> *mut T { extern "C" { static ENCLAVE_SIZE: usize; + static HEAP_BASE: u64; + static HEAP_SIZE: usize; +} + +/// Returns the base memory address of the heap +pub(crate) fn heap_base() -> *const u8 { + unsafe { rel_ptr_mut(HEAP_BASE) } +} + +/// Returns the size of the heap +pub(crate) fn heap_size() -> usize { + unsafe { HEAP_SIZE } } // Do not remove inline: will result in relocation failure diff --git a/library/std/src/sys/sgx/alloc.rs b/library/std/src/sys/sgx/alloc.rs index 4559ea7cd25..4aea28cb83e 100644 --- a/library/std/src/sys/sgx/alloc.rs +++ b/library/std/src/sys/sgx/alloc.rs @@ -1,4 +1,7 @@ use crate::alloc::{GlobalAlloc, Layout, System}; +use crate::ptr; +use crate::sys::sgx::abi::mem as sgx_mem; +use core::sync::atomic::{AtomicBool, Ordering}; use super::waitqueue::SpinMutex; @@ -10,7 +13,48 @@ use super::waitqueue::SpinMutex; // dlmalloc.c from C to Rust. #[cfg_attr(test, linkage = "available_externally")] #[export_name = "_ZN16__rust_internals3std3sys3sgx5alloc8DLMALLOCE"] -static DLMALLOC: SpinMutex = SpinMutex::new(dlmalloc::DLMALLOC_INIT); +static DLMALLOC: SpinMutex> = + SpinMutex::new(dlmalloc::Dlmalloc::new_with_allocator(Sgx {})); + +struct Sgx; + +unsafe impl dlmalloc::Allocator for Sgx { + /// Allocs system resources + fn alloc(&self, _size: usize) -> (*mut u8, usize, u32) { + static INIT: AtomicBool = AtomicBool::new(false); + + // No ordering requirement since this function is protected by the global lock. + if !INIT.swap(true, Ordering::Relaxed) { + (sgx_mem::heap_base() as _, sgx_mem::heap_size(), 0) + } else { + (ptr::null_mut(), 0, 0) + } + } + + fn remap(&self, _ptr: *mut u8, _oldsize: usize, _newsize: usize, _can_move: bool) -> *mut u8 { + ptr::null_mut() + } + + fn free_part(&self, _ptr: *mut u8, _oldsize: usize, _newsize: usize) -> bool { + false + } + + fn free(&self, _ptr: *mut u8, _size: usize) -> bool { + return false; + } + + fn can_release_part(&self, _flags: u32) -> bool { + false + } + + fn allocates_zeros(&self) -> bool { + false + } + + fn page_size(&self) -> usize { + 0x1000 + } +} #[stable(feature = "alloc_system_type", since = "1.28.0")] unsafe impl GlobalAlloc for System { diff --git a/library/std/src/sys/unix/ext/fs.rs b/library/std/src/sys/unix/ext/fs.rs index 66bbc1c5854..ba75b9bac80 100644 --- a/library/std/src/sys/unix/ext/fs.rs +++ b/library/std/src/sys/unix/ext/fs.rs @@ -841,7 +841,7 @@ impl DirEntryExt for fs::DirEntry { /// Creates a new symbolic link on the filesystem. /// -/// The `dst` path will be a symbolic link pointing to the `src` path. +/// The `link` path will be a symbolic link pointing to the `original` path. /// /// # Examples /// @@ -854,8 +854,8 @@ impl DirEntryExt for fs::DirEntry { /// } /// ``` #[stable(feature = "symlink", since = "1.1.0")] -pub fn symlink, Q: AsRef>(src: P, dst: Q) -> io::Result<()> { - sys::fs::symlink(src.as_ref(), dst.as_ref()) +pub fn symlink, Q: AsRef>(original: P, link: Q) -> io::Result<()> { + sys::fs::symlink(original.as_ref(), link.as_ref()) } /// Unix-specific extensions to [`fs::DirBuilder`]. diff --git a/library/std/src/sys/unix/fs.rs b/library/std/src/sys/unix/fs.rs index 96594095cc3..e2f0870ef0e 100644 --- a/library/std/src/sys/unix/fs.rs +++ b/library/std/src/sys/unix/fs.rs @@ -1071,28 +1071,28 @@ pub fn readlink(p: &Path) -> io::Result { } } -pub fn symlink(src: &Path, dst: &Path) -> io::Result<()> { - let src = cstr(src)?; - let dst = cstr(dst)?; - cvt(unsafe { libc::symlink(src.as_ptr(), dst.as_ptr()) })?; +pub fn symlink(original: &Path, link: &Path) -> io::Result<()> { + let original = cstr(original)?; + let link = cstr(link)?; + cvt(unsafe { libc::symlink(original.as_ptr(), link.as_ptr()) })?; Ok(()) } -pub fn link(src: &Path, dst: &Path) -> io::Result<()> { - let src = cstr(src)?; - let dst = cstr(dst)?; +pub fn link(original: &Path, link: &Path) -> io::Result<()> { + let original = cstr(original)?; + let link = cstr(link)?; cfg_if::cfg_if! { if #[cfg(any(target_os = "vxworks", target_os = "redox", target_os = "android"))] { // VxWorks, Redox, and old versions of Android lack `linkat`, so use // `link` instead. POSIX leaves it implementation-defined whether // `link` follows symlinks, so rely on the `symlink_hard_link` test // in library/std/src/fs/tests.rs to check the behavior. - cvt(unsafe { libc::link(src.as_ptr(), dst.as_ptr()) })?; + cvt(unsafe { libc::link(original.as_ptr(), link.as_ptr()) })?; } else { // Use `linkat` with `AT_FDCWD` instead of `link` as `linkat` gives // us a flag to specify how symlinks should be handled. Pass 0 as // the flags argument, meaning don't follow symlinks. - cvt(unsafe { libc::linkat(libc::AT_FDCWD, src.as_ptr(), libc::AT_FDCWD, dst.as_ptr(), 0) })?; + cvt(unsafe { libc::linkat(libc::AT_FDCWD, original.as_ptr(), libc::AT_FDCWD, link.as_ptr(), 0) })?; } } Ok(()) @@ -1204,88 +1204,19 @@ pub fn copy(from: &Path, to: &Path) -> io::Result { #[cfg(any(target_os = "linux", target_os = "android"))] pub fn copy(from: &Path, to: &Path) -> io::Result { - use crate::cmp; - use crate::sync::atomic::{AtomicBool, Ordering}; - - // Kernel prior to 4.5 don't have copy_file_range - // We store the availability in a global to avoid unnecessary syscalls - static HAS_COPY_FILE_RANGE: AtomicBool = AtomicBool::new(true); - - unsafe fn copy_file_range( - fd_in: libc::c_int, - off_in: *mut libc::loff_t, - fd_out: libc::c_int, - off_out: *mut libc::loff_t, - len: libc::size_t, - flags: libc::c_uint, - ) -> libc::c_long { - libc::syscall(libc::SYS_copy_file_range, fd_in, off_in, fd_out, off_out, len, flags) - } - let (mut reader, reader_metadata) = open_from(from)?; let max_len = u64::MAX; let (mut writer, _) = open_to_and_set_permissions(to, reader_metadata)?; - let has_copy_file_range = HAS_COPY_FILE_RANGE.load(Ordering::Relaxed); - let mut written = 0u64; - while written < max_len { - let copy_result = if has_copy_file_range { - let bytes_to_copy = cmp::min(max_len - written, usize::MAX as u64) as usize; - let copy_result = unsafe { - // We actually don't have to adjust the offsets, - // because copy_file_range adjusts the file offset automatically - cvt(copy_file_range( - reader.as_raw_fd(), - ptr::null_mut(), - writer.as_raw_fd(), - ptr::null_mut(), - bytes_to_copy, - 0, - )) - }; - if let Err(ref copy_err) = copy_result { - match copy_err.raw_os_error() { - Some(libc::ENOSYS | libc::EPERM | libc::EOPNOTSUPP) => { - HAS_COPY_FILE_RANGE.store(false, Ordering::Relaxed); - } - _ => {} - } - } - copy_result - } else { - Err(io::Error::from_raw_os_error(libc::ENOSYS)) - }; - match copy_result { - Ok(0) if written == 0 => { - // fallback to work around several kernel bugs where copy_file_range will fail to - // copy any bytes and return 0 instead of an error if - // - reading virtual files from the proc filesystem which appear to have 0 size - // but are not empty. noted in coreutils to affect kernels at least up to 5.6.19. - // - copying from an overlay filesystem in docker. reported to occur on fedora 32. - return io::copy(&mut reader, &mut writer); - } - Ok(0) => return Ok(written), // reached EOF - Ok(ret) => written += ret as u64, - Err(err) => { - match err.raw_os_error() { - Some( - libc::ENOSYS | libc::EXDEV | libc::EINVAL | libc::EPERM | libc::EOPNOTSUPP, - ) => { - // Try fallback io::copy if either: - // - Kernel version is < 4.5 (ENOSYS) - // - Files are mounted on different fs (EXDEV) - // - copy_file_range is broken in various ways on RHEL/CentOS 7 (EOPNOTSUPP) - // - copy_file_range is disallowed, for example by seccomp (EPERM) - // - copy_file_range cannot be used with pipes or device nodes (EINVAL) - assert_eq!(written, 0); - return io::copy(&mut reader, &mut writer); - } - _ => return Err(err), - } - } - } + use super::kernel_copy::{copy_regular_files, CopyResult}; + + match copy_regular_files(reader.as_raw_fd(), writer.as_raw_fd(), max_len) { + CopyResult::Ended(result) => result, + CopyResult::Fallback(written) => match io::copy::generic_copy(&mut reader, &mut writer) { + Ok(bytes) => Ok(bytes + written), + Err(e) => Err(e), + }, } - Ok(written) } #[cfg(any(target_os = "macos", target_os = "ios"))] diff --git a/library/std/src/sys/unix/futex.rs b/library/std/src/sys/unix/futex.rs index e6f0c48c59b..42ddc1d514e 100644 --- a/library/std/src/sys/unix/futex.rs +++ b/library/std/src/sys/unix/futex.rs @@ -1,10 +1,17 @@ -#![cfg(any(target_os = "linux", target_os = "android"))] +#![cfg(any( + target_os = "linux", + target_os = "android", + all(target_os = "emscripten", target_feature = "atomics") +))] +#[cfg(any(target_os = "linux", target_os = "android"))] use crate::convert::TryInto; +#[cfg(any(target_os = "linux", target_os = "android"))] use crate::ptr::null; use crate::sync::atomic::AtomicI32; use crate::time::Duration; +#[cfg(any(target_os = "linux", target_os = "android"))] pub fn futex_wait(futex: &AtomicI32, expected: i32, timeout: Option) { let timespec = timeout.and_then(|d| { Some(libc::timespec { @@ -25,6 +32,28 @@ pub fn futex_wait(futex: &AtomicI32, expected: i32, timeout: Option) { } } +#[cfg(target_os = "emscripten")] +pub fn futex_wait(futex: &AtomicI32, expected: i32, timeout: Option) { + extern "C" { + fn emscripten_futex_wait( + addr: *const AtomicI32, + val: libc::c_uint, + max_wait_ms: libc::c_double, + ) -> libc::c_int; + } + + unsafe { + emscripten_futex_wait( + futex as *const AtomicI32, + // `val` is declared unsigned to match the Emscripten headers, but since it's used as + // an opaque value, we can ignore the meaning of signed vs. unsigned and cast here. + expected as libc::c_uint, + timeout.map_or(crate::f64::INFINITY, |d| d.as_secs_f64() * 1000.0), + ); + } +} + +#[cfg(any(target_os = "linux", target_os = "android"))] pub fn futex_wake(futex: &AtomicI32) { unsafe { libc::syscall( @@ -35,3 +64,14 @@ pub fn futex_wake(futex: &AtomicI32) { ); } } + +#[cfg(target_os = "emscripten")] +pub fn futex_wake(futex: &AtomicI32) { + extern "C" { + fn emscripten_futex_wake(addr: *const AtomicI32, count: libc::c_int) -> libc::c_int; + } + + unsafe { + emscripten_futex_wake(futex as *const AtomicI32, 1); + } +} diff --git a/library/std/src/sys/unix/kernel_copy.rs b/library/std/src/sys/unix/kernel_copy.rs new file mode 100644 index 00000000000..1dc16ef0993 --- /dev/null +++ b/library/std/src/sys/unix/kernel_copy.rs @@ -0,0 +1,603 @@ +//! This module contains specializations that can offload `io::copy()` operations on file descriptor +//! containing types (`File`, `TcpStream`, etc.) to more efficient syscalls than `read(2)` and `write(2)`. +//! +//! Specialization is only applied to wholly std-owned types so that user code can't observe +//! that the `Read` and `Write` traits are not used. +//! +//! Since a copy operation involves a reader and writer side where each can consist of different types +//! and also involve generic wrappers (e.g. `Take`, `BufReader`) it is not practical to specialize +//! a single method on all possible combinations. +//! +//! Instead readers and writers are handled separately by the `CopyRead` and `CopyWrite` specialization +//! traits and then specialized on by the `Copier::copy` method. +//! +//! `Copier` uses the specialization traits to unpack the underlying file descriptors and +//! additional prerequisites and constraints imposed by the wrapper types. +//! +//! Once it has obtained all necessary pieces and brought any wrapper types into a state where they +//! can be safely bypassed it will attempt to use the `copy_file_range(2)`, +//! `sendfile(2)` or `splice(2)` syscalls to move data directly between file descriptors. +//! Since those syscalls have requirements that cannot be fully checked in advance and +//! gathering additional information about file descriptors would require additional syscalls +//! anyway it simply attempts to use them one after another (guided by inaccurate hints) to +//! figure out which one works and and falls back to the generic read-write copy loop if none of them +//! does. +//! Once a working syscall is found for a pair of file descriptors it will be called in a loop +//! until the copy operation is completed. +//! +//! Advantages of using these syscalls: +//! +//! * fewer context switches since reads and writes are coalesced into a single syscall +//! and more bytes are transferred per syscall. This translates to higher throughput +//! and fewer CPU cycles, at least for sufficiently large transfers to amortize the initial probing. +//! * `copy_file_range` creates reflink copies on CoW filesystems, thus moving less data and +//! consuming less disk space +//! * `sendfile` and `splice` can perform zero-copy IO under some circumstances while +//! a naive copy loop would move every byte through the CPU. +//! +//! Drawbacks: +//! +//! * copy operations smaller than the default buffer size can under some circumstances, especially +//! on older kernels, incur more syscalls than the naive approach would. As mentioned above +//! the syscall selection is guided by hints to minimize this possibility but they are not perfect. +//! * optimizations only apply to std types. If a user adds a custom wrapper type, e.g. to report +//! progress, they can hit a performance cliff. +//! * complexity + +use crate::cmp::min; +use crate::convert::TryInto; +use crate::fs::{File, Metadata}; +use crate::io::copy::generic_copy; +use crate::io::{ + BufRead, BufReader, BufWriter, Error, Read, Result, StderrLock, StdinLock, StdoutLock, Take, + Write, +}; +use crate::mem::ManuallyDrop; +use crate::net::TcpStream; +use crate::os::unix::fs::FileTypeExt; +use crate::os::unix::io::{AsRawFd, FromRawFd, RawFd}; +use crate::process::{ChildStderr, ChildStdin, ChildStdout}; +use crate::ptr; +use crate::sync::atomic::{AtomicBool, Ordering}; +use crate::sys::cvt; + +#[cfg(test)] +mod tests; + +pub(crate) fn copy_spec( + read: &mut R, + write: &mut W, +) -> Result { + let copier = Copier { read, write }; + SpecCopy::copy(copier) +} + +/// This type represents either the inferred `FileType` of a `RawFd` based on the source +/// type from which it was extracted or the actual metadata +/// +/// The methods on this type only provide hints, due to `AsRawFd` and `FromRawFd` the inferred +/// type may be wrong. +enum FdMeta { + /// We obtained the FD from a type that can contain any type of `FileType` and queried the metadata + /// because it is cheaper than probing all possible syscalls (reader side) + Metadata(Metadata), + Socket, + Pipe, + /// We don't have any metadata, e.g. because the original type was `File` which can represent + /// any `FileType` and we did not query the metadata either since it did not seem beneficial + /// (writer side) + NoneObtained, +} + +impl FdMeta { + fn maybe_fifo(&self) -> bool { + match self { + FdMeta::Metadata(meta) => meta.file_type().is_fifo(), + FdMeta::Socket => false, + FdMeta::Pipe => true, + FdMeta::NoneObtained => true, + } + } + + fn potential_sendfile_source(&self) -> bool { + match self { + // procfs erronously shows 0 length on non-empty readable files. + // and if a file is truly empty then a `read` syscall will determine that and skip the write syscall + // thus there would be benefit from attempting sendfile + FdMeta::Metadata(meta) + if meta.file_type().is_file() && meta.len() > 0 + || meta.file_type().is_block_device() => + { + true + } + _ => false, + } + } + + fn copy_file_range_candidate(&self) -> bool { + match self { + // copy_file_range will fail on empty procfs files. `read` can determine whether EOF has been reached + // without extra cost and skip the write, thus there is no benefit in attempting copy_file_range + FdMeta::Metadata(meta) if meta.is_file() && meta.len() > 0 => true, + FdMeta::NoneObtained => true, + _ => false, + } + } +} + +struct CopyParams(FdMeta, Option); + +struct Copier<'a, 'b, R: Read + ?Sized, W: Write + ?Sized> { + read: &'a mut R, + write: &'b mut W, +} + +trait SpecCopy { + fn copy(self) -> Result; +} + +impl SpecCopy for Copier<'_, '_, R, W> { + default fn copy(self) -> Result { + generic_copy(self.read, self.write) + } +} + +impl SpecCopy for Copier<'_, '_, R, W> { + fn copy(self) -> Result { + let (reader, writer) = (self.read, self.write); + let r_cfg = reader.properties(); + let w_cfg = writer.properties(); + + // before direct operations on file descriptors ensure that all source and sink buffers are empty + let mut flush = || -> crate::io::Result { + let bytes = reader.drain_to(writer, u64::MAX)?; + // BufWriter buffered bytes have already been accounted for in earlier write() calls + writer.flush()?; + Ok(bytes) + }; + + let mut written = 0u64; + + if let (CopyParams(input_meta, Some(readfd)), CopyParams(output_meta, Some(writefd))) = + (r_cfg, w_cfg) + { + written += flush()?; + let max_write = reader.min_limit(); + + if input_meta.copy_file_range_candidate() && output_meta.copy_file_range_candidate() { + let result = copy_regular_files(readfd, writefd, max_write); + + match result { + CopyResult::Ended(Ok(bytes_copied)) => return Ok(bytes_copied + written), + CopyResult::Ended(err) => return err, + CopyResult::Fallback(bytes) => written += bytes, + } + } + + // on modern kernels sendfile can copy from any mmapable type (some but not all regular files and block devices) + // to any writable file descriptor. On older kernels the writer side can only be a socket. + // So we just try and fallback if needed. + // If current file offsets + write sizes overflow it may also fail, we do not try to fix that and instead + // fall back to the generic copy loop. + if input_meta.potential_sendfile_source() { + let result = sendfile_splice(SpliceMode::Sendfile, readfd, writefd, max_write); + + match result { + CopyResult::Ended(Ok(bytes_copied)) => return Ok(bytes_copied + written), + CopyResult::Ended(err) => return err, + CopyResult::Fallback(bytes) => written += bytes, + } + } + + if input_meta.maybe_fifo() || output_meta.maybe_fifo() { + let result = sendfile_splice(SpliceMode::Splice, readfd, writefd, max_write); + + match result { + CopyResult::Ended(Ok(bytes_copied)) => return Ok(bytes_copied + written), + CopyResult::Ended(err) => return err, + CopyResult::Fallback(0) => { /* use the fallback below */ } + CopyResult::Fallback(_) => { + unreachable!("splice should not return > 0 bytes on the fallback path") + } + } + } + } + + // fallback if none of the more specialized syscalls wants to work with these file descriptors + match generic_copy(reader, writer) { + Ok(bytes) => Ok(bytes + written), + err => err, + } + } +} + +#[rustc_specialization_trait] +trait CopyRead: Read { + /// Implementations that contain buffers (i.e. `BufReader`) must transfer data from their internal + /// buffers into `writer` until either the buffers are emptied or `limit` bytes have been + /// transferred, whichever occurs sooner. + /// If nested buffers are present the outer buffers must be drained first. + /// + /// This is necessary to directly bypass the wrapper types while preserving the data order + /// when operating directly on the underlying file descriptors. + fn drain_to(&mut self, _writer: &mut W, _limit: u64) -> Result { + Ok(0) + } + + /// The minimum of the limit of all `Take<_>` wrappers, `u64::MAX` otherwise. + /// This method does not account for data `BufReader` buffers and would underreport + /// the limit of a `Take>>` type. Thus its result is only valid + /// after draining the buffers via `drain_to`. + fn min_limit(&self) -> u64 { + u64::MAX + } + + /// Extracts the file descriptor and hints/metadata, delegating through wrappers if necessary. + fn properties(&self) -> CopyParams; +} + +#[rustc_specialization_trait] +trait CopyWrite: Write { + /// Extracts the file descriptor and hints/metadata, delegating through wrappers if necessary. + fn properties(&self) -> CopyParams; +} + +impl CopyRead for &mut T +where + T: CopyRead, +{ + fn drain_to(&mut self, writer: &mut W, limit: u64) -> Result { + (**self).drain_to(writer, limit) + } + + fn min_limit(&self) -> u64 { + (**self).min_limit() + } + + fn properties(&self) -> CopyParams { + (**self).properties() + } +} + +impl CopyWrite for &mut T +where + T: CopyWrite, +{ + fn properties(&self) -> CopyParams { + (**self).properties() + } +} + +impl CopyRead for File { + fn properties(&self) -> CopyParams { + CopyParams(fd_to_meta(self), Some(self.as_raw_fd())) + } +} + +impl CopyRead for &File { + fn properties(&self) -> CopyParams { + CopyParams(fd_to_meta(*self), Some(self.as_raw_fd())) + } +} + +impl CopyWrite for File { + fn properties(&self) -> CopyParams { + CopyParams(FdMeta::NoneObtained, Some(self.as_raw_fd())) + } +} + +impl CopyWrite for &File { + fn properties(&self) -> CopyParams { + CopyParams(FdMeta::NoneObtained, Some(self.as_raw_fd())) + } +} + +impl CopyRead for TcpStream { + fn properties(&self) -> CopyParams { + // avoid the stat syscall since we can be fairly sure it's a socket + CopyParams(FdMeta::Socket, Some(self.as_raw_fd())) + } +} + +impl CopyRead for &TcpStream { + fn properties(&self) -> CopyParams { + // avoid the stat syscall since we can be fairly sure it's a socket + CopyParams(FdMeta::Socket, Some(self.as_raw_fd())) + } +} + +impl CopyWrite for TcpStream { + fn properties(&self) -> CopyParams { + // avoid the stat syscall since we can be fairly sure it's a socket + CopyParams(FdMeta::Socket, Some(self.as_raw_fd())) + } +} + +impl CopyWrite for &TcpStream { + fn properties(&self) -> CopyParams { + // avoid the stat syscall since we can be fairly sure it's a socket + CopyParams(FdMeta::Socket, Some(self.as_raw_fd())) + } +} + +impl CopyWrite for ChildStdin { + fn properties(&self) -> CopyParams { + CopyParams(FdMeta::Pipe, Some(self.as_raw_fd())) + } +} + +impl CopyRead for ChildStdout { + fn properties(&self) -> CopyParams { + CopyParams(FdMeta::Pipe, Some(self.as_raw_fd())) + } +} + +impl CopyRead for ChildStderr { + fn properties(&self) -> CopyParams { + CopyParams(FdMeta::Pipe, Some(self.as_raw_fd())) + } +} + +impl CopyRead for StdinLock<'_> { + fn drain_to(&mut self, writer: &mut W, outer_limit: u64) -> Result { + let buf_reader = self.as_mut_buf(); + let buf = buf_reader.buffer(); + let buf = &buf[0..min(buf.len(), outer_limit.try_into().unwrap_or(usize::MAX))]; + let bytes_drained = buf.len(); + writer.write_all(buf)?; + buf_reader.consume(bytes_drained); + + Ok(bytes_drained as u64) + } + + fn properties(&self) -> CopyParams { + CopyParams(fd_to_meta(self), Some(self.as_raw_fd())) + } +} + +impl CopyWrite for StdoutLock<'_> { + fn properties(&self) -> CopyParams { + CopyParams(FdMeta::NoneObtained, Some(self.as_raw_fd())) + } +} + +impl CopyWrite for StderrLock<'_> { + fn properties(&self) -> CopyParams { + CopyParams(FdMeta::NoneObtained, Some(self.as_raw_fd())) + } +} + +impl CopyRead for Take { + fn drain_to(&mut self, writer: &mut W, outer_limit: u64) -> Result { + let local_limit = self.limit(); + let combined_limit = min(outer_limit, local_limit); + let bytes_drained = self.get_mut().drain_to(writer, combined_limit)?; + // update limit since read() was bypassed + self.set_limit(local_limit - bytes_drained); + + Ok(bytes_drained) + } + + fn min_limit(&self) -> u64 { + min(Take::limit(self), self.get_ref().min_limit()) + } + + fn properties(&self) -> CopyParams { + self.get_ref().properties() + } +} + +impl CopyRead for BufReader { + fn drain_to(&mut self, writer: &mut W, outer_limit: u64) -> Result { + let buf = self.buffer(); + let buf = &buf[0..min(buf.len(), outer_limit.try_into().unwrap_or(usize::MAX))]; + let bytes = buf.len(); + writer.write_all(buf)?; + self.consume(bytes); + + let remaining = outer_limit - bytes as u64; + + // in case of nested bufreaders we also need to drain the ones closer to the source + let inner_bytes = self.get_mut().drain_to(writer, remaining)?; + + Ok(bytes as u64 + inner_bytes) + } + + fn min_limit(&self) -> u64 { + self.get_ref().min_limit() + } + + fn properties(&self) -> CopyParams { + self.get_ref().properties() + } +} + +impl CopyWrite for BufWriter { + fn properties(&self) -> CopyParams { + self.get_ref().properties() + } +} + +fn fd_to_meta(fd: &T) -> FdMeta { + let fd = fd.as_raw_fd(); + let file: ManuallyDrop = ManuallyDrop::new(unsafe { File::from_raw_fd(fd) }); + match file.metadata() { + Ok(meta) => FdMeta::Metadata(meta), + Err(_) => FdMeta::NoneObtained, + } +} + +pub(super) enum CopyResult { + Ended(Result), + Fallback(u64), +} + +/// linux-specific implementation that will attempt to use copy_file_range for copy offloading +/// as the name says, it only works on regular files +/// +/// Callers must handle fallback to a generic copy loop. +/// `Fallback` may indicate non-zero number of bytes already written +/// if one of the files' cursor +`max_len` would exceed u64::MAX (`EOVERFLOW`). +pub(super) fn copy_regular_files(reader: RawFd, writer: RawFd, max_len: u64) -> CopyResult { + use crate::cmp; + + // Kernel prior to 4.5 don't have copy_file_range + // We store the availability in a global to avoid unnecessary syscalls + static HAS_COPY_FILE_RANGE: AtomicBool = AtomicBool::new(true); + + syscall! { + fn copy_file_range( + fd_in: libc::c_int, + off_in: *mut libc::loff_t, + fd_out: libc::c_int, + off_out: *mut libc::loff_t, + len: libc::size_t, + flags: libc::c_uint + ) -> libc::ssize_t + } + + let has_copy_file_range = HAS_COPY_FILE_RANGE.load(Ordering::Relaxed); + let mut written = 0u64; + while written < max_len { + let copy_result = if has_copy_file_range { + let bytes_to_copy = cmp::min(max_len - written, usize::MAX as u64); + // cap to 1GB chunks in case u64::MAX is passed as max_len and the file has a non-zero seek position + // this allows us to copy large chunks without hitting EOVERFLOW, + // unless someone sets a file offset close to u64::MAX - 1GB, in which case a fallback would be required + let bytes_to_copy = cmp::min(bytes_to_copy as usize, 0x4000_0000usize); + let copy_result = unsafe { + // We actually don't have to adjust the offsets, + // because copy_file_range adjusts the file offset automatically + cvt(copy_file_range( + reader, + ptr::null_mut(), + writer, + ptr::null_mut(), + bytes_to_copy, + 0, + )) + }; + if let Err(ref copy_err) = copy_result { + match copy_err.raw_os_error() { + Some(libc::ENOSYS | libc::EPERM | libc::EOPNOTSUPP) => { + HAS_COPY_FILE_RANGE.store(false, Ordering::Relaxed); + } + _ => {} + } + } + copy_result + } else { + Err(Error::from_raw_os_error(libc::ENOSYS)) + }; + match copy_result { + Ok(0) if written == 0 => { + // fallback to work around several kernel bugs where copy_file_range will fail to + // copy any bytes and return 0 instead of an error if + // - reading virtual files from the proc filesystem which appear to have 0 size + // but are not empty. noted in coreutils to affect kernels at least up to 5.6.19. + // - copying from an overlay filesystem in docker. reported to occur on fedora 32. + return CopyResult::Fallback(0); + } + Ok(0) => return CopyResult::Ended(Ok(written)), // reached EOF + Ok(ret) => written += ret as u64, + Err(err) => { + return match err.raw_os_error() { + // when file offset + max_length > u64::MAX + Some(libc::EOVERFLOW) => CopyResult::Fallback(written), + Some( + libc::ENOSYS | libc::EXDEV | libc::EINVAL | libc::EPERM | libc::EOPNOTSUPP, + ) => { + // Try fallback io::copy if either: + // - Kernel version is < 4.5 (ENOSYS) + // - Files are mounted on different fs (EXDEV) + // - copy_file_range is broken in various ways on RHEL/CentOS 7 (EOPNOTSUPP) + // - copy_file_range is disallowed, for example by seccomp (EPERM) + // - copy_file_range cannot be used with pipes or device nodes (EINVAL) + assert_eq!(written, 0); + CopyResult::Fallback(0) + } + _ => CopyResult::Ended(Err(err)), + }; + } + } + } + CopyResult::Ended(Ok(written)) +} + +#[derive(PartialEq)] +enum SpliceMode { + Sendfile, + Splice, +} + +/// performs splice or sendfile between file descriptors +/// Does _not_ fall back to a generic copy loop. +fn sendfile_splice(mode: SpliceMode, reader: RawFd, writer: RawFd, len: u64) -> CopyResult { + static HAS_SENDFILE: AtomicBool = AtomicBool::new(true); + static HAS_SPLICE: AtomicBool = AtomicBool::new(true); + + syscall! { + fn splice( + srcfd: libc::c_int, + src_offset: *const i64, + dstfd: libc::c_int, + dst_offset: *const i64, + len: libc::size_t, + flags: libc::c_int + ) -> libc::ssize_t + } + + match mode { + SpliceMode::Sendfile if !HAS_SENDFILE.load(Ordering::Relaxed) => { + return CopyResult::Fallback(0); + } + SpliceMode::Splice if !HAS_SPLICE.load(Ordering::Relaxed) => { + return CopyResult::Fallback(0); + } + _ => (), + } + + let mut written = 0u64; + while written < len { + // according to its manpage that's the maximum size sendfile() will copy per invocation + let chunk_size = crate::cmp::min(len - written, 0x7ffff000_u64) as usize; + + let result = match mode { + SpliceMode::Sendfile => { + cvt(unsafe { libc::sendfile(writer, reader, ptr::null_mut(), chunk_size) }) + } + SpliceMode::Splice => cvt(unsafe { + splice(reader, ptr::null_mut(), writer, ptr::null_mut(), chunk_size, 0) + }), + }; + + match result { + Ok(0) => break, // EOF + Ok(ret) => written += ret as u64, + Err(err) => { + return match err.raw_os_error() { + Some(libc::ENOSYS | libc::EPERM) => { + // syscall not supported (ENOSYS) + // syscall is disallowed, e.g. by seccomp (EPERM) + match mode { + SpliceMode::Sendfile => HAS_SENDFILE.store(false, Ordering::Relaxed), + SpliceMode::Splice => HAS_SPLICE.store(false, Ordering::Relaxed), + } + assert_eq!(written, 0); + CopyResult::Fallback(0) + } + Some(libc::EINVAL) => { + // splice/sendfile do not support this particular file descriptor (EINVAL) + assert_eq!(written, 0); + CopyResult::Fallback(0) + } + Some(os_err) if mode == SpliceMode::Sendfile && os_err == libc::EOVERFLOW => { + CopyResult::Fallback(written) + } + _ => CopyResult::Ended(Err(err)), + }; + } + } + } + CopyResult::Ended(Ok(written)) +} diff --git a/library/std/src/sys/unix/kernel_copy/tests.rs b/library/std/src/sys/unix/kernel_copy/tests.rs new file mode 100644 index 00000000000..21b121c26ff --- /dev/null +++ b/library/std/src/sys/unix/kernel_copy/tests.rs @@ -0,0 +1,213 @@ +use crate::env::temp_dir; +use crate::fs::OpenOptions; +use crate::io; +use crate::io::Result; +use crate::io::SeekFrom; +use crate::io::{BufRead, Read, Seek, Write}; +use crate::os::unix::io::AsRawFd; + +#[test] +fn copy_specialization() -> Result<()> { + use crate::io::{BufReader, BufWriter}; + + let path = crate::env::temp_dir(); + let source_path = path.join("copy-spec.source"); + let sink_path = path.join("copy-spec.sink"); + + let result: Result<()> = try { + let mut source = crate::fs::OpenOptions::new() + .read(true) + .write(true) + .create(true) + .truncate(true) + .open(&source_path)?; + source.write_all(b"abcdefghiklmnopqr")?; + source.seek(SeekFrom::Start(8))?; + let mut source = BufReader::with_capacity(8, source.take(5)); + source.fill_buf()?; + assert_eq!(source.buffer(), b"iklmn"); + source.get_mut().set_limit(6); + source.get_mut().get_mut().seek(SeekFrom::Start(1))?; // "bcdefg" + let mut source = source.take(10); // "iklmnbcdef" + + let mut sink = crate::fs::OpenOptions::new() + .read(true) + .write(true) + .create(true) + .truncate(true) + .open(&sink_path)?; + sink.write_all(b"000000")?; + let mut sink = BufWriter::with_capacity(5, sink); + sink.write_all(b"wxyz")?; + assert_eq!(sink.buffer(), b"wxyz"); + + let copied = crate::io::copy(&mut source, &mut sink)?; + assert_eq!(copied, 10); + assert_eq!(sink.buffer().len(), 0); + + let mut sink = sink.into_inner()?; + sink.seek(SeekFrom::Start(0))?; + let mut copied = Vec::new(); + sink.read_to_end(&mut copied)?; + assert_eq!(&copied, b"000000wxyziklmnbcdef"); + }; + + let rm1 = crate::fs::remove_file(source_path); + let rm2 = crate::fs::remove_file(sink_path); + + result.and(rm1).and(rm2) +} + +#[bench] +fn bench_file_to_file_copy(b: &mut test::Bencher) { + const BYTES: usize = 128 * 1024; + let src_path = temp_dir().join("file-copy-bench-src"); + let mut src = crate::fs::OpenOptions::new() + .create(true) + .truncate(true) + .read(true) + .write(true) + .open(src_path) + .unwrap(); + src.write(&vec![0u8; BYTES]).unwrap(); + + let sink_path = temp_dir().join("file-copy-bench-sink"); + let mut sink = crate::fs::OpenOptions::new() + .create(true) + .truncate(true) + .write(true) + .open(sink_path) + .unwrap(); + + b.bytes = BYTES as u64; + b.iter(|| { + src.seek(SeekFrom::Start(0)).unwrap(); + sink.seek(SeekFrom::Start(0)).unwrap(); + assert_eq!(BYTES as u64, io::copy(&mut src, &mut sink).unwrap()); + }); +} + +#[bench] +fn bench_file_to_socket_copy(b: &mut test::Bencher) { + const BYTES: usize = 128 * 1024; + let src_path = temp_dir().join("pipe-copy-bench-src"); + let mut src = OpenOptions::new() + .create(true) + .truncate(true) + .read(true) + .write(true) + .open(src_path) + .unwrap(); + src.write(&vec![0u8; BYTES]).unwrap(); + + let sink_drainer = crate::net::TcpListener::bind("localhost:0").unwrap(); + let mut sink = crate::net::TcpStream::connect(sink_drainer.local_addr().unwrap()).unwrap(); + let mut sink_drainer = sink_drainer.accept().unwrap().0; + + crate::thread::spawn(move || { + let mut sink_buf = vec![0u8; 1024 * 1024]; + loop { + sink_drainer.read(&mut sink_buf[..]).unwrap(); + } + }); + + b.bytes = BYTES as u64; + b.iter(|| { + src.seek(SeekFrom::Start(0)).unwrap(); + assert_eq!(BYTES as u64, io::copy(&mut src, &mut sink).unwrap()); + }); +} + +#[cfg(any(target_os = "linux", target_os = "android"))] +#[bench] +fn bench_socket_pipe_socket_copy(b: &mut test::Bencher) { + use super::CopyResult; + use crate::io::ErrorKind; + use crate::process::{ChildStdin, ChildStdout}; + use crate::sys_common::FromInner; + + let (read_end, write_end) = crate::sys::pipe::anon_pipe().unwrap(); + + let mut read_end = ChildStdout::from_inner(read_end); + let write_end = ChildStdin::from_inner(write_end); + + let acceptor = crate::net::TcpListener::bind("localhost:0").unwrap(); + let mut remote_end = crate::net::TcpStream::connect(acceptor.local_addr().unwrap()).unwrap(); + + let local_end = crate::sync::Arc::new(acceptor.accept().unwrap().0); + + // the data flow in this benchmark: + // + // socket(tx) local_source + // remote_end (write) +--------> (splice to) + // write_end + // + + // | + // | pipe + // v + // read_end + // remote_end (read) <---------+ (splice to) * + // socket(rx) local_end + // + // * benchmark loop using io::copy + + crate::thread::spawn(move || { + let mut sink_buf = vec![0u8; 1024 * 1024]; + remote_end.set_nonblocking(true).unwrap(); + loop { + match remote_end.write(&mut sink_buf[..]) { + Err(err) if err.kind() == ErrorKind::WouldBlock => {} + Ok(_) => {} + err => { + err.expect("write failed"); + } + }; + match remote_end.read(&mut sink_buf[..]) { + Err(err) if err.kind() == ErrorKind::WouldBlock => {} + Ok(_) => {} + err => { + err.expect("read failed"); + } + }; + } + }); + + // check that splice works, otherwise the benchmark would hang + let probe = super::sendfile_splice( + super::SpliceMode::Splice, + local_end.as_raw_fd(), + write_end.as_raw_fd(), + 1, + ); + + match probe { + CopyResult::Ended(Ok(1)) => { + // splice works + } + _ => { + eprintln!("splice failed, skipping benchmark"); + return; + } + } + + let local_source = local_end.clone(); + crate::thread::spawn(move || { + loop { + super::sendfile_splice( + super::SpliceMode::Splice, + local_source.as_raw_fd(), + write_end.as_raw_fd(), + u64::MAX, + ); + } + }); + + const BYTES: usize = 128 * 1024; + b.bytes = BYTES as u64; + b.iter(|| { + assert_eq!( + BYTES as u64, + io::copy(&mut (&mut read_end).take(BYTES as u64), &mut &*local_end).unwrap() + ); + }); +} diff --git a/library/std/src/sys/unix/mod.rs b/library/std/src/sys/unix/mod.rs index b28c6d85b7c..f8a5ee89969 100644 --- a/library/std/src/sys/unix/mod.rs +++ b/library/std/src/sys/unix/mod.rs @@ -51,6 +51,8 @@ pub mod fd; pub mod fs; pub mod futex; pub mod io; +#[cfg(any(target_os = "linux", target_os = "android"))] +pub mod kernel_copy; #[cfg(target_os = "l4re")] mod l4re; pub mod memchr; @@ -234,3 +236,55 @@ pub fn cvt_nz(error: libc::c_int) -> crate::io::Result<()> { pub fn abort_internal() -> ! { unsafe { libc::abort() } } + +cfg_if::cfg_if! { + if #[cfg(target_os = "android")] { + #[link(name = "dl")] + #[link(name = "log")] + #[link(name = "gcc")] + extern "C" {} + } else if #[cfg(target_os = "freebsd")] { + #[link(name = "execinfo")] + #[link(name = "pthread")] + extern "C" {} + } else if #[cfg(target_os = "netbsd")] { + #[link(name = "pthread")] + #[link(name = "rt")] + extern "C" {} + } else if #[cfg(any(target_os = "dragonfly", target_os = "openbsd"))] { + #[link(name = "pthread")] + extern "C" {} + } else if #[cfg(target_os = "solaris")] { + #[link(name = "socket")] + #[link(name = "posix4")] + #[link(name = "pthread")] + #[link(name = "resolv")] + extern "C" {} + } else if #[cfg(target_os = "illumos")] { + #[link(name = "socket")] + #[link(name = "posix4")] + #[link(name = "pthread")] + #[link(name = "resolv")] + #[link(name = "nsl")] + // Use libumem for the (malloc-compatible) allocator + #[link(name = "umem")] + extern "C" {} + } else if #[cfg(target_os = "macos")] { + #[link(name = "System")] + // res_init and friends require -lresolv on macOS/iOS. + // See #41582 and http://blog.achernya.com/2013/03/os-x-has-silly-libsystem.html + #[link(name = "resolv")] + extern "C" {} + } else if #[cfg(target_os = "ios")] { + #[link(name = "System")] + #[link(name = "objc")] + #[link(name = "Security", kind = "framework")] + #[link(name = "Foundation", kind = "framework")] + #[link(name = "resolv")] + extern "C" {} + } else if #[cfg(target_os = "fuchsia")] { + #[link(name = "zircon")] + #[link(name = "fdio")] + extern "C" {} + } +} diff --git a/library/std/src/sys/unix/rand.rs b/library/std/src/sys/unix/rand.rs index eed6fbf13b7..38ddb41700c 100644 --- a/library/std/src/sys/unix/rand.rs +++ b/library/std/src/sys/unix/rand.rs @@ -25,10 +25,19 @@ mod imp { use crate::io::Read; #[cfg(any(target_os = "linux", target_os = "android"))] - fn getrandom(buf: &mut [u8]) -> libc::c_long { - unsafe { - libc::syscall(libc::SYS_getrandom, buf.as_mut_ptr(), buf.len(), libc::GRND_NONBLOCK) + fn getrandom(buf: &mut [u8]) -> libc::ssize_t { + // A weak symbol allows interposition, e.g. for perf measurements that want to + // disable randomness for consistency. Otherwise, we'll try a raw syscall. + // (`getrandom` was added in glibc 2.25, musl 1.1.20, android API level 28) + syscall! { + fn getrandom( + buffer: *mut libc::c_void, + length: libc::size_t, + flags: libc::c_uint + ) -> libc::ssize_t } + + unsafe { getrandom(buf.as_mut_ptr().cast(), buf.len(), libc::GRND_NONBLOCK) } } #[cfg(not(any(target_os = "linux", target_os = "android")))] diff --git a/library/std/src/sys/unix/thread.rs b/library/std/src/sys/unix/thread.rs index f1ab302d30e..cda17eb4bd2 100644 --- a/library/std/src/sys/unix/thread.rs +++ b/library/std/src/sys/unix/thread.rs @@ -22,24 +22,6 @@ pub struct Thread { unsafe impl Send for Thread {} unsafe impl Sync for Thread {} -// The pthread_attr_setstacksize symbol doesn't exist in the emscripten libc, -// so we have to not link to it to satisfy emcc's ERROR_ON_UNDEFINED_SYMBOLS. -#[cfg(not(target_os = "emscripten"))] -unsafe fn pthread_attr_setstacksize( - attr: *mut libc::pthread_attr_t, - stack_size: libc::size_t, -) -> libc::c_int { - libc::pthread_attr_setstacksize(attr, stack_size) -} - -#[cfg(target_os = "emscripten")] -unsafe fn pthread_attr_setstacksize( - _attr: *mut libc::pthread_attr_t, - _stack_size: libc::size_t, -) -> libc::c_int { - panic!() -} - impl Thread { // unsafe: see thread::Builder::spawn_unchecked for safety requirements pub unsafe fn new(stack: usize, p: Box) -> io::Result { @@ -50,7 +32,7 @@ impl Thread { let stack_size = cmp::max(stack, min_stack_size(&attr)); - match pthread_attr_setstacksize(&mut attr, stack_size) { + match libc::pthread_attr_setstacksize(&mut attr, stack_size) { 0 => {} n => { assert_eq!(n, libc::EINVAL); diff --git a/library/std/src/sys/unix/weak.rs b/library/std/src/sys/unix/weak.rs index f4b33a00f7c..e93a4972caa 100644 --- a/library/std/src/sys/unix/weak.rs +++ b/library/std/src/sys/unix/weak.rs @@ -24,7 +24,7 @@ use crate::ffi::CStr; use crate::marker; use crate::mem; -use crate::sync::atomic::{AtomicUsize, Ordering}; +use crate::sync::atomic::{self, AtomicUsize, Ordering}; macro_rules! weak { (fn $name:ident($($t:ty),*) -> $ret:ty) => ( @@ -47,15 +47,49 @@ impl Weak { pub fn get(&self) -> Option { assert_eq!(mem::size_of::(), mem::size_of::()); unsafe { - if self.addr.load(Ordering::SeqCst) == 1 { - self.addr.store(fetch(self.name), Ordering::SeqCst); - } - match self.addr.load(Ordering::SeqCst) { + // Relaxed is fine here because we fence before reading through the + // pointer (see the comment below). + match self.addr.load(Ordering::Relaxed) { + 1 => self.initialize(), 0 => None, - addr => Some(mem::transmute_copy::(&addr)), + addr => { + let func = mem::transmute_copy::(&addr); + // The caller is presumably going to read through this value + // (by calling the function we've dlsymed). This means we'd + // need to have loaded it with at least C11's consume + // ordering in order to be guaranteed that the data we read + // from the pointer isn't from before the pointer was + // stored. Rust has no equivalent to memory_order_consume, + // so we use an acquire fence (sorry, ARM). + // + // Now, in practice this likely isn't needed even on CPUs + // where relaxed and consume mean different things. The + // symbols we're loading are probably present (or not) at + // init, and even if they aren't the runtime dynamic loader + // is extremely likely have sufficient barriers internally + // (possibly implicitly, for example the ones provided by + // invoking `mprotect`). + // + // That said, none of that's *guaranteed*, and so we fence. + atomic::fence(Ordering::Acquire); + Some(func) + } } } } + + // Cold because it should only happen during first-time initalization. + #[cold] + unsafe fn initialize(&self) -> Option { + let val = fetch(self.name); + // This synchronizes with the acquire fence in `get`. + self.addr.store(val, Ordering::Release); + + match val { + 0 => None, + addr => Some(mem::transmute_copy::(&addr)), + } + } } unsafe fn fetch(name: &str) -> usize { @@ -66,7 +100,7 @@ unsafe fn fetch(name: &str) -> usize { libc::dlsym(libc::RTLD_DEFAULT, name.as_ptr()) as usize } -#[cfg(not(target_os = "linux"))] +#[cfg(not(any(target_os = "linux", target_os = "android")))] macro_rules! syscall { (fn $name:ident($($arg_name:ident: $t:ty),*) -> $ret:ty) => ( unsafe fn $name($($arg_name: $t),*) -> $ret { @@ -84,7 +118,7 @@ macro_rules! syscall { ) } -#[cfg(target_os = "linux")] +#[cfg(any(target_os = "linux", target_os = "android"))] macro_rules! syscall { (fn $name:ident($($arg_name:ident: $t:ty),*) -> $ret:ty) => ( unsafe fn $name($($arg_name:$t),*) -> $ret { @@ -92,10 +126,18 @@ macro_rules! syscall { // (not paths). use libc::*; - syscall( - concat_idents!(SYS_, $name), - $($arg_name as c_long),* - ) as $ret + weak! { fn $name($($t),*) -> $ret } + + // Use a weak symbol from libc when possible, allowing `LD_PRELOAD` + // interposition, but if it's not found just use a raw syscall. + if let Some(fun) = $name.get() { + fun($($arg_name),*) + } else { + syscall( + concat_idents!(SYS_, $name), + $($arg_name),* + ) as $ret + } } ) } diff --git a/library/std/src/sys/unsupported/fs.rs b/library/std/src/sys/unsupported/fs.rs index faa53b6a744..4271d9b3345 100644 --- a/library/std/src/sys/unsupported/fs.rs +++ b/library/std/src/sys/unsupported/fs.rs @@ -279,7 +279,7 @@ pub fn readlink(_p: &Path) -> io::Result { unsupported() } -pub fn symlink(_src: &Path, _dst: &Path) -> io::Result<()> { +pub fn symlink(_original: &Path, _link: &Path) -> io::Result<()> { unsupported() } diff --git a/library/std/src/sys/wasi/fs.rs b/library/std/src/sys/wasi/fs.rs index 93a92b49cfc..120b9f59f1e 100644 --- a/library/std/src/sys/wasi/fs.rs +++ b/library/std/src/sys/wasi/fs.rs @@ -549,19 +549,19 @@ fn read_link(fd: &WasiFd, file: &Path) -> io::Result { } } -pub fn symlink(src: &Path, dst: &Path) -> io::Result<()> { - let (dst, dst_file) = open_parent(dst)?; - dst.symlink(osstr2str(src.as_ref())?, osstr2str(dst_file.as_ref())?) +pub fn symlink(original: &Path, link: &Path) -> io::Result<()> { + let (link, link_file) = open_parent(link)?; + link.symlink(osstr2str(original.as_ref())?, osstr2str(link_file.as_ref())?) } -pub fn link(src: &Path, dst: &Path) -> io::Result<()> { - let (src, src_file) = open_parent(src)?; - let (dst, dst_file) = open_parent(dst)?; - src.link( +pub fn link(original: &Path, link: &Path) -> io::Result<()> { + let (original, original_file) = open_parent(original)?; + let (link, link_file) = open_parent(link)?; + original.link( wasi::LOOKUPFLAGS_SYMLINK_FOLLOW, - osstr2str(src_file.as_ref())?, - &dst, - osstr2str(dst_file.as_ref())?, + osstr2str(original_file.as_ref())?, + &link, + osstr2str(link_file.as_ref())?, ) } diff --git a/library/std/src/sys/wasm/alloc.rs b/library/std/src/sys/wasm/alloc.rs index b61a7872265..ef0ca3dd478 100644 --- a/library/std/src/sys/wasm/alloc.rs +++ b/library/std/src/sys/wasm/alloc.rs @@ -18,7 +18,7 @@ use crate::alloc::{GlobalAlloc, Layout, System}; -static mut DLMALLOC: dlmalloc::Dlmalloc = dlmalloc::DLMALLOC_INIT; +static mut DLMALLOC: dlmalloc::Dlmalloc = dlmalloc::Dlmalloc::new(); #[stable(feature = "alloc_system_type", since = "1.28.0")] unsafe impl GlobalAlloc for System { diff --git a/library/std/src/sys/wasm/mutex_atomics.rs b/library/std/src/sys/wasm/mutex_atomics.rs index 479182ffa44..5ff0ec052b6 100644 --- a/library/std/src/sys/wasm/mutex_atomics.rs +++ b/library/std/src/sys/wasm/mutex_atomics.rs @@ -138,7 +138,7 @@ impl ReentrantMutex { self.owner.swap(0, SeqCst); // SAFETY: the caller must gurantee that `self.ptr()` is valid i32. unsafe { - wasm32::atomic_notify(self.ptr() as *mut i32, 1); + wasm32::memory_atomic_notify(self.ptr() as *mut i32, 1); } // wake up one waiter, if any } ref mut n => *n -= 1, diff --git a/library/std/src/sys/windows/compat.rs b/library/std/src/sys/windows/compat.rs index 3f25f05e1b9..dd1523b422c 100644 --- a/library/std/src/sys/windows/compat.rs +++ b/library/std/src/sys/windows/compat.rs @@ -90,6 +90,7 @@ macro_rules! compat_fn { } } + $(#[$meta])* pub use $symbol::call as $symbol; )*) } diff --git a/library/std/src/sys/windows/ext/fs.rs b/library/std/src/sys/windows/ext/fs.rs index e0615f2d334..b20eafb4d53 100644 --- a/library/std/src/sys/windows/ext/fs.rs +++ b/library/std/src/sys/windows/ext/fs.rs @@ -519,7 +519,7 @@ impl FileTypeExt for fs::FileType { /// Creates a new file symbolic link on the filesystem. /// -/// The `dst` path will be a file symbolic link pointing to the `src` +/// The `link` path will be a file symbolic link pointing to the `original` /// path. /// /// # Examples @@ -533,13 +533,13 @@ impl FileTypeExt for fs::FileType { /// } /// ``` #[stable(feature = "symlink", since = "1.1.0")] -pub fn symlink_file, Q: AsRef>(src: P, dst: Q) -> io::Result<()> { - sys::fs::symlink_inner(src.as_ref(), dst.as_ref(), false) +pub fn symlink_file, Q: AsRef>(original: P, link: Q) -> io::Result<()> { + sys::fs::symlink_inner(original.as_ref(), link.as_ref(), false) } /// Creates a new directory symlink on the filesystem. /// -/// The `dst` path will be a directory symbolic link pointing to the `src` +/// The `link` path will be a directory symbolic link pointing to the `original` /// path. /// /// # Examples @@ -553,6 +553,6 @@ pub fn symlink_file, Q: AsRef>(src: P, dst: Q) -> io::Resul /// } /// ``` #[stable(feature = "symlink", since = "1.1.0")] -pub fn symlink_dir, Q: AsRef>(src: P, dst: Q) -> io::Result<()> { - sys::fs::symlink_inner(src.as_ref(), dst.as_ref(), true) +pub fn symlink_dir, Q: AsRef>(original: P, link: Q) -> io::Result<()> { + sys::fs::symlink_inner(original.as_ref(), link.as_ref(), true) } diff --git a/library/std/src/sys/windows/fs.rs b/library/std/src/sys/windows/fs.rs index cdbfac267b9..307a47678c6 100644 --- a/library/std/src/sys/windows/fs.rs +++ b/library/std/src/sys/windows/fs.rs @@ -759,13 +759,13 @@ pub fn readlink(path: &Path) -> io::Result { file.readlink() } -pub fn symlink(src: &Path, dst: &Path) -> io::Result<()> { - symlink_inner(src, dst, false) +pub fn symlink(original: &Path, link: &Path) -> io::Result<()> { + symlink_inner(original, link, false) } -pub fn symlink_inner(src: &Path, dst: &Path, dir: bool) -> io::Result<()> { - let src = to_u16s(src)?; - let dst = to_u16s(dst)?; +pub fn symlink_inner(original: &Path, link: &Path, dir: bool) -> io::Result<()> { + let original = to_u16s(original)?; + let link = to_u16s(link)?; let flags = if dir { c::SYMBOLIC_LINK_FLAG_DIRECTORY } else { 0 }; // Formerly, symlink creation required the SeCreateSymbolicLink privilege. For the Windows 10 // Creators Update, Microsoft loosened this to allow unprivileged symlink creation if the @@ -773,8 +773,8 @@ pub fn symlink_inner(src: &Path, dst: &Path, dir: bool) -> io::Result<()> { // added to dwFlags to opt into this behaviour. let result = cvt(unsafe { c::CreateSymbolicLinkW( - dst.as_ptr(), - src.as_ptr(), + link.as_ptr(), + original.as_ptr(), flags | c::SYMBOLIC_LINK_FLAG_ALLOW_UNPRIVILEGED_CREATE, ) as c::BOOL }); @@ -782,7 +782,9 @@ pub fn symlink_inner(src: &Path, dst: &Path, dir: bool) -> io::Result<()> { if err.raw_os_error() == Some(c::ERROR_INVALID_PARAMETER as i32) { // Older Windows objects to SYMBOLIC_LINK_FLAG_ALLOW_UNPRIVILEGED_CREATE, // so if we encounter ERROR_INVALID_PARAMETER, retry without that flag. - cvt(unsafe { c::CreateSymbolicLinkW(dst.as_ptr(), src.as_ptr(), flags) as c::BOOL })?; + cvt(unsafe { + c::CreateSymbolicLinkW(link.as_ptr(), original.as_ptr(), flags) as c::BOOL + })?; } else { return Err(err); } @@ -791,15 +793,15 @@ pub fn symlink_inner(src: &Path, dst: &Path, dir: bool) -> io::Result<()> { } #[cfg(not(target_vendor = "uwp"))] -pub fn link(src: &Path, dst: &Path) -> io::Result<()> { - let src = to_u16s(src)?; - let dst = to_u16s(dst)?; - cvt(unsafe { c::CreateHardLinkW(dst.as_ptr(), src.as_ptr(), ptr::null_mut()) })?; +pub fn link(original: &Path, link: &Path) -> io::Result<()> { + let original = to_u16s(original)?; + let link = to_u16s(link)?; + cvt(unsafe { c::CreateHardLinkW(link.as_ptr(), original.as_ptr(), ptr::null_mut()) })?; Ok(()) } #[cfg(target_vendor = "uwp")] -pub fn link(_src: &Path, _dst: &Path) -> io::Result<()> { +pub fn link(_original: &Path, _link: &Path) -> io::Result<()> { return Err(io::Error::new(io::ErrorKind::Other, "hard link are not supported on UWP")); } @@ -883,8 +885,11 @@ pub fn copy(from: &Path, to: &Path) -> io::Result { } #[allow(dead_code)] -pub fn symlink_junction, Q: AsRef>(src: P, dst: Q) -> io::Result<()> { - symlink_junction_inner(src.as_ref(), dst.as_ref()) +pub fn symlink_junction, Q: AsRef>( + original: P, + junction: Q, +) -> io::Result<()> { + symlink_junction_inner(original.as_ref(), junction.as_ref()) } // Creating a directory junction on windows involves dealing with reparse @@ -893,7 +898,7 @@ pub fn symlink_junction, Q: AsRef>(src: P, dst: Q) -> io::R // // http://www.flexhex.com/docs/articles/hard-links.phtml #[allow(dead_code)] -fn symlink_junction_inner(target: &Path, junction: &Path) -> io::Result<()> { +fn symlink_junction_inner(original: &Path, junction: &Path) -> io::Result<()> { let d = DirBuilder::new(); d.mkdir(&junction)?; @@ -911,7 +916,7 @@ fn symlink_junction_inner(target: &Path, junction: &Path) -> io::Result<()> { // FIXME: this conversion is very hacky let v = br"\??\"; let v = v.iter().map(|x| *x as u16); - for c in v.chain(target.as_os_str().encode_wide()) { + for c in v.chain(original.as_os_str().encode_wide()) { *buf.offset(i) = c; i += 1; } diff --git a/library/std/src/sys/windows/mod.rs b/library/std/src/sys/windows/mod.rs index 8c19cc78b09..c36c6196d79 100644 --- a/library/std/src/sys/windows/mod.rs +++ b/library/std/src/sys/windows/mod.rs @@ -270,3 +270,17 @@ pub fn abort_internal() -> ! { } crate::intrinsics::abort(); } + +cfg_if::cfg_if! { + if #[cfg(target_vendor = "uwp")] { + #[link(name = "ws2_32")] + // For BCryptGenRandom + #[link(name = "bcrypt")] + extern "C" {} + } else { + #[link(name = "advapi32")] + #[link(name = "ws2_32")] + #[link(name = "userenv")] + extern "C" {} + } +} diff --git a/library/std/src/thread/mod.rs b/library/std/src/thread/mod.rs index fefaa77a2a1..5d65f960fcd 100644 --- a/library/std/src/thread/mod.rs +++ b/library/std/src/thread/mod.rs @@ -456,15 +456,15 @@ impl Builder { let my_packet: Arc>>> = Arc::new(UnsafeCell::new(None)); let their_packet = my_packet.clone(); - let (stdout, stderr) = crate::io::clone_io(); + let output_capture = crate::io::set_output_capture(None); + crate::io::set_output_capture(output_capture.clone()); let main = move || { if let Some(name) = their_thread.cname() { imp::Thread::set_name(name); } - crate::io::set_print(stdout); - crate::io::set_panic(stderr); + crate::io::set_output_capture(output_capture); // SAFETY: the stack guard passed is the one for the current thread. // This means the current thread's stack and the new thread's stack diff --git a/library/std/src/time.rs b/library/std/src/time.rs index 64d7898f030..e433f69a8b0 100644 --- a/library/std/src/time.rs +++ b/library/std/src/time.rs @@ -322,7 +322,7 @@ impl Instant { /// ``` #[stable(feature = "checked_duration_since", since = "1.39.0")] pub fn saturating_duration_since(&self, earlier: Instant) -> Duration { - self.checked_duration_since(earlier).unwrap_or(Duration::new(0, 0)) + self.checked_duration_since(earlier).unwrap_or_default() } /// Returns the amount of time elapsed since this instant was created. diff --git a/library/std/src/time/tests.rs b/library/std/src/time/tests.rs index 783bf49f315..20c813fdc70 100644 --- a/library/std/src/time/tests.rs +++ b/library/std/src/time/tests.rs @@ -5,7 +5,7 @@ macro_rules! assert_almost_eq { let (a, b) = ($a, $b); if a != b { let (a, b) = if a > b { (a, b) } else { (b, a) }; - assert!(a - Duration::new(0, 1000) <= b, "{:?} is not almost equal to {:?}", a, b); + assert!(a - Duration::from_micros(1) <= b, "{:?} is not almost equal to {:?}", a, b); } }}; } @@ -34,7 +34,7 @@ fn instant_math() { assert_almost_eq!(b - dur, a); assert_almost_eq!(a + dur, b); - let second = Duration::new(1, 0); + let second = Duration::SECOND; assert_almost_eq!(a - second + second, a); assert_almost_eq!(a.checked_sub(second).unwrap().checked_add(second).unwrap(), a); @@ -65,24 +65,24 @@ fn instant_math_is_associative() { #[should_panic] fn instant_duration_since_panic() { let a = Instant::now(); - (a - Duration::new(1, 0)).duration_since(a); + (a - Duration::SECOND).duration_since(a); } #[test] fn instant_checked_duration_since_nopanic() { let now = Instant::now(); - let earlier = now - Duration::new(1, 0); - let later = now + Duration::new(1, 0); + let earlier = now - Duration::SECOND; + let later = now + Duration::SECOND; assert_eq!(earlier.checked_duration_since(now), None); - assert_eq!(later.checked_duration_since(now), Some(Duration::new(1, 0))); - assert_eq!(now.checked_duration_since(now), Some(Duration::new(0, 0))); + assert_eq!(later.checked_duration_since(now), Some(Duration::SECOND)); + assert_eq!(now.checked_duration_since(now), Some(Duration::ZERO)); } #[test] fn instant_saturating_duration_since_nopanic() { let a = Instant::now(); - let ret = (a - Duration::new(1, 0)).saturating_duration_since(a); - assert_eq!(ret, Duration::new(0, 0)); + let ret = (a - Duration::SECOND).saturating_duration_since(a); + assert_eq!(ret, Duration::ZERO); } #[test] @@ -90,7 +90,7 @@ fn system_time_math() { let a = SystemTime::now(); let b = SystemTime::now(); match b.duration_since(a) { - Ok(dur) if dur == Duration::new(0, 0) => { + Ok(Duration::ZERO) => { assert_almost_eq!(a, b); } Ok(dur) => { @@ -106,16 +106,16 @@ fn system_time_math() { } } - let second = Duration::new(1, 0); + let second = Duration::SECOND; assert_almost_eq!(a.duration_since(a - second).unwrap(), second); assert_almost_eq!(a.duration_since(a + second).unwrap_err().duration(), second); assert_almost_eq!(a - second + second, a); assert_almost_eq!(a.checked_sub(second).unwrap().checked_add(second).unwrap(), a); - let one_second_from_epoch = UNIX_EPOCH + Duration::new(1, 0); + let one_second_from_epoch = UNIX_EPOCH + Duration::SECOND; let one_second_from_epoch2 = - UNIX_EPOCH + Duration::new(0, 500_000_000) + Duration::new(0, 500_000_000); + UNIX_EPOCH + Duration::from_millis(500) + Duration::from_millis(500); assert_eq!(one_second_from_epoch, one_second_from_epoch2); // checked_add_duration will not panic on overflow @@ -141,12 +141,12 @@ fn system_time_elapsed() { #[test] fn since_epoch() { let ts = SystemTime::now(); - let a = ts.duration_since(UNIX_EPOCH + Duration::new(1, 0)).unwrap(); + let a = ts.duration_since(UNIX_EPOCH + Duration::SECOND).unwrap(); let b = ts.duration_since(UNIX_EPOCH).unwrap(); assert!(b > a); - assert_eq!(b - a, Duration::new(1, 0)); + assert_eq!(b - a, Duration::SECOND); - let thirty_years = Duration::new(1, 0) * 60 * 60 * 24 * 365 * 30; + let thirty_years = Duration::SECOND * 60 * 60 * 24 * 365 * 30; // Right now for CI this test is run in an emulator, and apparently the // aarch64 emulator's sense of time is that we're still living in the diff --git a/library/std/tests/run-time-detect.rs b/library/std/tests/run-time-detect.rs index 8dd1a8ac0d2..61a04c46722 100644 --- a/library/std/tests/run-time-detect.rs +++ b/library/std/tests/run-time-detect.rs @@ -54,42 +54,62 @@ fn powerpc64_linux() { #[test] #[cfg(any(target_arch = "x86", target_arch = "x86_64"))] fn x86_all() { + // the below is the set of features we can test at runtime, but don't actually + // use to gate anything and are thus not part of the X86_ALLOWED_FEATURES list + + println!("abm: {:?}", is_x86_feature_detected!("abm")); // this is a synonym for lzcnt but we test it anyways + println!("mmx: {:?}", is_x86_feature_detected!("mmx")); + println!("tsc: {:?}", is_x86_feature_detected!("tsc")); + + // the below is in alphabetical order and matches + // the order of X86_ALLOWED_FEATURES in rustc_codegen_ssa's target_features.rs + + println!("adx: {:?}", is_x86_feature_detected!("adx")); println!("aes: {:?}", is_x86_feature_detected!("aes")); - println!("pcmulqdq: {:?}", is_x86_feature_detected!("pclmulqdq")); + println!("avx: {:?}", is_x86_feature_detected!("avx")); + println!("avx2: {:?}", is_x86_feature_detected!("avx2")); + println!("avx512bf16: {:?}", is_x86_feature_detected!("avx512bf16")); + println!("avx512bitalg: {:?}", is_x86_feature_detected!("avx512bitalg")); + println!("avx512bw: {:?}", is_x86_feature_detected!("avx512bw")); + println!("avx512cd: {:?}", is_x86_feature_detected!("avx512cd")); + println!("avx512dq: {:?}", is_x86_feature_detected!("avx512dq")); + println!("avx512er: {:?}", is_x86_feature_detected!("avx512er")); + println!("avx512f: {:?}", is_x86_feature_detected!("avx512f")); + println!("avx512gfni: {:?}", is_x86_feature_detected!("avx512gfni")); + println!("avx512ifma: {:?}", is_x86_feature_detected!("avx512ifma")); + println!("avx512pf: {:?}", is_x86_feature_detected!("avx512pf")); + println!("avx512vaes: {:?}", is_x86_feature_detected!("avx512vaes")); + println!("avx512vbmi: {:?}", is_x86_feature_detected!("avx512vbmi")); + println!("avx512vbmi2: {:?}", is_x86_feature_detected!("avx512vbmi2")); + println!("avx512vl: {:?}", is_x86_feature_detected!("avx512vl")); + println!("avx512vnni: {:?}", is_x86_feature_detected!("avx512vnni")); + println!("avx512vp2intersect: {:?}", is_x86_feature_detected!("avx512vp2intersect")); + println!("avx512vpclmulqdq: {:?}", is_x86_feature_detected!("avx512vpclmulqdq")); + println!("avx512vpopcntdq: {:?}", is_x86_feature_detected!("avx512vpopcntdq")); + println!("bmi1: {:?}", is_x86_feature_detected!("bmi1")); + println!("bmi2: {:?}", is_x86_feature_detected!("bmi2")); + println!("cmpxchg16b: {:?}", is_x86_feature_detected!("cmpxchg16b")); + println!("f16c: {:?}", is_x86_feature_detected!("f16c")); + println!("fma: {:?}", is_x86_feature_detected!("fma")); + println!("fxsr: {:?}", is_x86_feature_detected!("fxsr")); + println!("lzcnt: {:?}", is_x86_feature_detected!("lzcnt")); + //println!("movbe: {:?}", is_x86_feature_detected!("movbe")); // movbe is unsupported as a target feature + println!("pclmulqdq: {:?}", is_x86_feature_detected!("pclmulqdq")); + println!("popcnt: {:?}", is_x86_feature_detected!("popcnt")); println!("rdrand: {:?}", is_x86_feature_detected!("rdrand")); println!("rdseed: {:?}", is_x86_feature_detected!("rdseed")); - println!("tsc: {:?}", is_x86_feature_detected!("tsc")); - println!("mmx: {:?}", is_x86_feature_detected!("mmx")); + println!("rtm: {:?}", is_x86_feature_detected!("rtm")); + println!("sha: {:?}", is_x86_feature_detected!("sha")); println!("sse: {:?}", is_x86_feature_detected!("sse")); println!("sse2: {:?}", is_x86_feature_detected!("sse2")); println!("sse3: {:?}", is_x86_feature_detected!("sse3")); - println!("ssse3: {:?}", is_x86_feature_detected!("ssse3")); println!("sse4.1: {:?}", is_x86_feature_detected!("sse4.1")); println!("sse4.2: {:?}", is_x86_feature_detected!("sse4.2")); println!("sse4a: {:?}", is_x86_feature_detected!("sse4a")); - println!("sha: {:?}", is_x86_feature_detected!("sha")); - println!("avx: {:?}", is_x86_feature_detected!("avx")); - println!("avx2: {:?}", is_x86_feature_detected!("avx2")); - println!("avx512f {:?}", is_x86_feature_detected!("avx512f")); - println!("avx512cd {:?}", is_x86_feature_detected!("avx512cd")); - println!("avx512er {:?}", is_x86_feature_detected!("avx512er")); - println!("avx512pf {:?}", is_x86_feature_detected!("avx512pf")); - println!("avx512bw {:?}", is_x86_feature_detected!("avx512bw")); - println!("avx512dq {:?}", is_x86_feature_detected!("avx512dq")); - println!("avx512vl {:?}", is_x86_feature_detected!("avx512vl")); - println!("avx512_ifma {:?}", is_x86_feature_detected!("avx512ifma")); - println!("avx512_vbmi {:?}", is_x86_feature_detected!("avx512vbmi")); - println!("avx512_vpopcntdq {:?}", is_x86_feature_detected!("avx512vpopcntdq")); - println!("fma: {:?}", is_x86_feature_detected!("fma")); - println!("bmi1: {:?}", is_x86_feature_detected!("bmi1")); - println!("bmi2: {:?}", is_x86_feature_detected!("bmi2")); - println!("abm: {:?}", is_x86_feature_detected!("abm")); - println!("lzcnt: {:?}", is_x86_feature_detected!("lzcnt")); + println!("ssse3: {:?}", is_x86_feature_detected!("ssse3")); println!("tbm: {:?}", is_x86_feature_detected!("tbm")); - println!("popcnt: {:?}", is_x86_feature_detected!("popcnt")); - println!("fxsr: {:?}", is_x86_feature_detected!("fxsr")); println!("xsave: {:?}", is_x86_feature_detected!("xsave")); + println!("xsavec: {:?}", is_x86_feature_detected!("xsavec")); println!("xsaveopt: {:?}", is_x86_feature_detected!("xsaveopt")); println!("xsaves: {:?}", is_x86_feature_detected!("xsaves")); - println!("xsavec: {:?}", is_x86_feature_detected!("xsavec")); } diff --git a/library/stdarch b/library/stdarch index 3c3664355ef..777efaf5644 160000 --- a/library/stdarch +++ b/library/stdarch @@ -1 +1 @@ -Subproject commit 3c3664355ef46e788b53080e521d6542fbddfd84 +Subproject commit 777efaf5644706b36706a7a5c51edb63835e05ca diff --git a/library/test/Cargo.toml b/library/test/Cargo.toml index 3d6910b107d..d5804cc3dd8 100644 --- a/library/test/Cargo.toml +++ b/library/test/Cargo.toml @@ -26,6 +26,8 @@ default = ["std_detect_file_io", "std_detect_dlsym_getauxval", "panic-unwind"] backtrace = ["std/backtrace"] compiler-builtins-c = ["std/compiler-builtins-c"] compiler-builtins-mem = ["std/compiler-builtins-mem"] +compiler-builtins-asm = ["std/compiler-builtins-asm"] +compiler-builtins-mangled-names = ["std/compiler-builtins-mangled-names"] llvm-libunwind = ["std/llvm-libunwind"] system-llvm-libunwind = ["std/system-llvm-libunwind"] panic-unwind = ["std/panic_unwind"] diff --git a/library/test/src/bench.rs b/library/test/src/bench.rs index 10546de1764..d4b37284ea7 100644 --- a/library/test/src/bench.rs +++ b/library/test/src/bench.rs @@ -2,8 +2,7 @@ pub use std::hint::black_box; use super::{ - event::CompletedTest, helpers::sink::Sink, options::BenchMode, test_result::TestResult, - types::TestDesc, Sender, + event::CompletedTest, options::BenchMode, test_result::TestResult, types::TestDesc, Sender, }; use crate::stats; @@ -185,21 +184,14 @@ where let mut bs = Bencher { mode: BenchMode::Auto, summary: None, bytes: 0 }; let data = Arc::new(Mutex::new(Vec::new())); - let oldio = if !nocapture { - Some(( - io::set_print(Some(Sink::new_boxed(&data))), - io::set_panic(Some(Sink::new_boxed(&data))), - )) - } else { - None - }; + + if !nocapture { + io::set_output_capture(Some(data.clone())); + } let result = catch_unwind(AssertUnwindSafe(|| bs.bench(f))); - if let Some((printio, panicio)) = oldio { - io::set_print(printio); - io::set_panic(panicio); - } + io::set_output_capture(None); let test_result = match result { //bs.bench(f) { diff --git a/library/test/src/helpers/mod.rs b/library/test/src/helpers/mod.rs index eb416b10150..b7f00c4c86c 100644 --- a/library/test/src/helpers/mod.rs +++ b/library/test/src/helpers/mod.rs @@ -5,4 +5,3 @@ pub mod concurrency; pub mod exit_code; pub mod isatty; pub mod metrics; -pub mod sink; diff --git a/library/test/src/helpers/sink.rs b/library/test/src/helpers/sink.rs deleted file mode 100644 index dfbf0a3b72f..00000000000 --- a/library/test/src/helpers/sink.rs +++ /dev/null @@ -1,31 +0,0 @@ -//! Module providing a helper structure to capture output in subprocesses. - -use std::{ - io, - io::prelude::Write, - sync::{Arc, Mutex}, -}; - -#[derive(Clone)] -pub struct Sink(Arc>>); - -impl Sink { - pub fn new_boxed(data: &Arc>>) -> Box { - Box::new(Self(data.clone())) - } -} - -impl io::LocalOutput for Sink { - fn clone_box(&self) -> Box { - Box::new(self.clone()) - } -} - -impl Write for Sink { - fn write(&mut self, data: &[u8]) -> io::Result { - Write::write(&mut *self.0.lock().unwrap(), data) - } - fn flush(&mut self) -> io::Result<()> { - Ok(()) - } -} diff --git a/library/test/src/lib.rs b/library/test/src/lib.rs index 9c5bb8957b5..f4c07655bc4 100644 --- a/library/test/src/lib.rs +++ b/library/test/src/lib.rs @@ -23,9 +23,8 @@ #![cfg_attr(any(unix, target_os = "cloudabi"), feature(libc))] #![feature(rustc_private)] #![feature(nll)] -#![feature(bool_to_option)] #![feature(available_concurrency)] -#![feature(set_stdio)] +#![feature(internal_output_capture)] #![feature(panic_unwind)] #![feature(staged_api)] #![feature(termination_trait_lib)] @@ -89,7 +88,6 @@ mod tests; use event::{CompletedTest, TestEvent}; use helpers::concurrency::get_concurrency; use helpers::exit_code::get_exit_code; -use helpers::sink::Sink; use options::{Concurrent, RunStrategy}; use test_result::*; use time::TestExecTime; @@ -531,14 +529,9 @@ fn run_test_in_process( // Buffer for capturing standard I/O let data = Arc::new(Mutex::new(Vec::new())); - let oldio = if !nocapture { - Some(( - io::set_print(Some(Sink::new_boxed(&data))), - io::set_panic(Some(Sink::new_boxed(&data))), - )) - } else { - None - }; + if !nocapture { + io::set_output_capture(Some(data.clone())); + } let start = report_time.then(Instant::now); let result = catch_unwind(AssertUnwindSafe(testfn)); @@ -547,16 +540,13 @@ fn run_test_in_process( TestExecTime(duration) }); - if let Some((printio, panicio)) = oldio { - io::set_print(printio); - io::set_panic(panicio); - } + io::set_output_capture(None); let test_result = match result { Ok(()) => calc_result(&desc, Ok(()), &time_opts, &exec_time), Err(e) => calc_result(&desc, Err(e.as_ref()), &time_opts, &exec_time), }; - let stdout = data.lock().unwrap().to_vec(); + let stdout = data.lock().unwrap_or_else(|e| e.into_inner()).to_vec(); let message = CompletedTest::new(desc, test_result, exec_time, stdout); monitor_ch.send(message).unwrap(); } diff --git a/src/bootstrap/CHANGELOG.md b/src/bootstrap/CHANGELOG.md index 7bb4e504275..a103c9fb0b7 100644 --- a/src/bootstrap/CHANGELOG.md +++ b/src/bootstrap/CHANGELOG.md @@ -8,6 +8,8 @@ The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/). - `x.py check` needs opt-in to check tests (--all-targets) [#77473](https://github.com/rust-lang/rust/pull/77473) - The default bootstrap profiles are now located at `bootstrap/defaults/config.$PROFILE.toml` (previously they were located at `bootstrap/defaults/config.toml.$PROFILE`) [#77558](https://github.com/rust-lang/rust/pull/77558) +- If you have Rust already installed, `x.py` will now infer the host target + from the default rust toolchain. [#78513](https://github.com/rust-lang/rust/pull/78513) ## [Version 2] - 2020-09-25 diff --git a/src/bootstrap/bootstrap.py b/src/bootstrap/bootstrap.py index 87e15363818..a819e1b6e2f 100644 --- a/src/bootstrap/bootstrap.py +++ b/src/bootstrap/bootstrap.py @@ -187,8 +187,23 @@ def format_build_time(duration): return str(datetime.timedelta(seconds=int(duration))) -def default_build_triple(): +def default_build_triple(verbose): """Build triple as in LLVM""" + # If the user already has a host build triple with an existing `rustc` + # install, use their preference. This fixes most issues with Windows builds + # being detected as GNU instead of MSVC. + try: + version = subprocess.check_output(["rustc", "--version", "--verbose"]) + host = next(x for x in version.split('\n') if x.startswith("host: ")) + triple = host.split("host: ")[1] + if verbose: + print("detected default triple {}".format(triple)) + return triple + except Exception as e: + if verbose: + print("rustup not detected: {}".format(e)) + print("falling back to auto-detect") + default_encoding = sys.getdefaultencoding() required = sys.platform != 'win32' ostype = require(["uname", "-s"], exit=required) @@ -345,7 +360,6 @@ def output(filepath): class RustBuild(object): """Provide all the methods required to build Rust""" def __init__(self): - self.cargo_channel = '' self.date = '' self._download_url = '' self.rustc_channel = '' @@ -372,7 +386,6 @@ def download_stage0(self): will move all the content to the right place. """ rustc_channel = self.rustc_channel - cargo_channel = self.cargo_channel rustfmt_channel = self.rustfmt_channel if self.rustc().startswith(self.bin_root()) and \ @@ -385,12 +398,15 @@ def download_stage0(self): rustc_channel, self.build, tarball_suffix) pattern = "rust-std-{}".format(self.build) self._download_stage0_helper(filename, pattern, tarball_suffix) - filename = "rustc-{}-{}{}".format(rustc_channel, self.build, tarball_suffix) self._download_stage0_helper(filename, "rustc", tarball_suffix) + filename = "cargo-{}-{}{}".format(rustc_channel, self.build, + tarball_suffix) + self._download_stage0_helper(filename, "cargo", tarball_suffix) self.fix_bin_or_dylib("{}/bin/rustc".format(self.bin_root())) self.fix_bin_or_dylib("{}/bin/rustdoc".format(self.bin_root())) + self.fix_bin_or_dylib("{}/bin/cargo".format(self.bin_root())) lib_dir = "{}/lib".format(self.bin_root()) for lib in os.listdir(lib_dir): if lib.endswith(".so"): @@ -398,17 +414,6 @@ def download_stage0(self): with output(self.rustc_stamp()) as rust_stamp: rust_stamp.write(self.date) - if self.cargo().startswith(self.bin_root()) and \ - (not os.path.exists(self.cargo()) or - self.program_out_of_date(self.cargo_stamp())): - tarball_suffix = '.tar.xz' if support_xz() else '.tar.gz' - filename = "cargo-{}-{}{}".format(cargo_channel, self.build, - tarball_suffix) - self._download_stage0_helper(filename, "cargo", tarball_suffix) - self.fix_bin_or_dylib("{}/bin/cargo".format(self.bin_root())) - with output(self.cargo_stamp()) as cargo_stamp: - cargo_stamp.write(self.date) - if self.rustfmt() and self.rustfmt().startswith(self.bin_root()) and ( not os.path.exists(self.rustfmt()) or self.program_out_of_date(self.rustfmt_stamp(), self.rustfmt_channel) @@ -432,12 +437,15 @@ def download_stage0(self): # # This works even in a repository that has not yet initialized # submodules. + top_level = subprocess.check_output([ + "git", "rev-parse", "--show-toplevel", + ]).decode(sys.getdefaultencoding()).strip() llvm_sha = subprocess.check_output([ "git", "log", "--author=bors", "--format=%H", "-n1", "-m", "--first-parent", "--", - "src/llvm-project", - "src/bootstrap/download-ci-llvm-stamp", + "{}/src/llvm-project".format(top_level), + "{}/src/bootstrap/download-ci-llvm-stamp".format(top_level), ]).decode(sys.getdefaultencoding()).strip() llvm_assertions = self.get_toml('assertions', 'llvm') == 'true' if self.program_out_of_date(self.llvm_stamp(), llvm_sha + str(llvm_assertions)): @@ -586,16 +594,6 @@ def rustc_stamp(self): """ return os.path.join(self.bin_root(), '.rustc-stamp') - def cargo_stamp(self): - """Return the path for .cargo-stamp - - >>> rb = RustBuild() - >>> rb.build_dir = "build" - >>> rb.cargo_stamp() == os.path.join("build", "stage0", ".cargo-stamp") - True - """ - return os.path.join(self.bin_root(), '.cargo-stamp') - def rustfmt_stamp(self): """Return the path for .rustfmt-stamp @@ -831,7 +829,7 @@ def build_triple(self): config = self.get_toml('build') if config: return config - return default_build_triple() + return default_build_triple(self.verbose) def check_submodule(self, module, slow_submodules): if not slow_submodules: @@ -1041,7 +1039,6 @@ def bootstrap(help_triggered): data = stage0_data(build.rust_root) build.date = data['date'] build.rustc_channel = data['rustc'] - build.cargo_channel = data['cargo'] if "rustfmt" in data: build.rustfmt_channel = data['rustfmt'] diff --git a/src/bootstrap/builder.rs b/src/bootstrap/builder.rs index db671c5fe65..508d785834f 100644 --- a/src/bootstrap/builder.rs +++ b/src/bootstrap/builder.rs @@ -19,7 +19,7 @@ use crate::compile; use crate::config::TargetSelection; use crate::dist; use crate::doc; -use crate::flags::Subcommand; +use crate::flags::{Color, Subcommand}; use crate::install; use crate::native; use crate::run; @@ -264,7 +264,7 @@ impl<'a> ShouldRun<'a> { /// `all_krates` should probably be removed at some point. pub fn all_krates(mut self, name: &str) -> Self { let mut set = BTreeSet::new(); - for krate in self.builder.in_tree_crates(name) { + for krate in self.builder.in_tree_crates(name, None) { let path = krate.local_path(self.builder); set.insert(path); } @@ -277,7 +277,7 @@ impl<'a> ShouldRun<'a> { /// /// `make_run` will be called separately for each matching command-line path. pub fn krate(mut self, name: &str) -> Self { - for krate in self.builder.in_tree_crates(name) { + for krate in self.builder.in_tree_crates(name, None) { let path = krate.local_path(self.builder); self.paths.insert(PathSet::one(path)); } @@ -811,6 +811,16 @@ impl<'a> Builder<'a> { cargo.env("REAL_LIBRARY_PATH", e); } + match self.build.config.color { + Color::Always => { + cargo.arg("--color=always"); + } + Color::Never => { + cargo.arg("--color=never"); + } + Color::Auto => {} // nothing to do + } + if cmd != "install" { cargo.arg("--target").arg(target.rustc_target_arg()); } else { diff --git a/src/bootstrap/check.rs b/src/bootstrap/check.rs index 2e3cfc98c8c..f65b2b2c79f 100644 --- a/src/bootstrap/check.rs +++ b/src/bootstrap/check.rs @@ -108,7 +108,7 @@ impl Step for Std { // Explicitly pass -p for all dependencies krates -- this will force cargo // to also check the tests/benches/examples for these crates, rather // than just the leaf crate. - for krate in builder.in_tree_crates("test") { + for krate in builder.in_tree_crates("test", Some(target)) { cargo.arg("-p").arg(krate.name); } @@ -172,7 +172,7 @@ impl Step for Rustc { // Explicitly pass -p for all compiler krates -- this will force cargo // to also check the tests/benches/examples for these crates, rather // than just the leaf crate. - for krate in builder.in_tree_crates("rustc-main") { + for krate in builder.in_tree_crates("rustc-main", Some(target)) { cargo.arg("-p").arg(krate.name); } @@ -232,6 +232,11 @@ impl Step for CodegenBackend { .arg(builder.src.join(format!("compiler/rustc_codegen_{}/Cargo.toml", backend))); rustc_cargo_env(builder, &mut cargo, target); + builder.info(&format!( + "Checking {} artifacts ({} -> {})", + backend, &compiler.host.triple, target.triple + )); + run_cargo( builder, cargo, diff --git a/src/bootstrap/compile.rs b/src/bootstrap/compile.rs index ed9b9108586..cdad1cb4d49 100644 --- a/src/bootstrap/compile.rs +++ b/src/bootstrap/compile.rs @@ -143,7 +143,7 @@ fn copy_third_party_objects( } } - if builder.config.sanitizers && compiler.stage != 0 { + if builder.config.sanitizers_enabled(target) && compiler.stage != 0 { // The sanitizers are only copied in stage1 or above, // to avoid creating dependency on LLVM. target_deps.extend( @@ -251,7 +251,7 @@ pub fn std_cargo(builder: &Builder<'_>, target: TargetSelection, stage: u32, car .arg("--features") .arg(features); } else { - let mut features = builder.std_features(); + let mut features = builder.std_features(target); features.push_str(compiler_builtins_c_feature); cargo diff --git a/src/bootstrap/config.rs b/src/bootstrap/config.rs index 40ea9afb6f6..9fcbe8e1e35 100644 --- a/src/bootstrap/config.rs +++ b/src/bootstrap/config.rs @@ -13,8 +13,8 @@ use std::path::{Path, PathBuf}; use std::str::FromStr; use crate::cache::{Interned, INTERNER}; -use crate::flags::Flags; pub use crate::flags::Subcommand; +use crate::flags::{Color, Flags}; use crate::util::exe; use build_helper::t; use merge::Merge; @@ -67,6 +67,7 @@ pub struct Config { pub json_output: bool, pub test_compare_mode: bool, pub llvm_libunwind: Option, + pub color: Color, pub on_fail: Option, pub stage: u32, @@ -99,6 +100,7 @@ pub struct Config { pub llvm_version_suffix: Option, pub llvm_use_linker: Option, pub llvm_allow_old_toolchain: Option, + pub llvm_polly: Option, pub llvm_from_ci: bool, pub use_lld: bool, @@ -150,6 +152,7 @@ pub struct Config { // misc pub low_priority: bool, pub channel: String, + pub description: Option, pub verbose_tests: bool, pub save_toolstates: Option, pub print_step_timings: bool, @@ -278,6 +281,8 @@ pub struct Target { pub ranlib: Option, pub linker: Option, pub ndk: Option, + pub sanitizers: Option, + pub profiler: Option, pub crt_static: Option, pub musl_root: Option, pub musl_libdir: Option, @@ -418,6 +423,7 @@ struct Llvm { use_libcxx: Option, use_linker: Option, allow_old_toolchain: Option, + polly: Option, download_ci_llvm: Option, } @@ -465,6 +471,7 @@ struct Rust { parallel_compiler: Option, default_linker: Option, channel: Option, + description: Option, musl_root: Option, rpath: Option, verbose_tests: Option, @@ -501,6 +508,8 @@ struct TomlTarget { llvm_config: Option, llvm_filecheck: Option, android_ndk: Option, + sanitizers: Option, + profiler: Option, crt_static: Option, musl_root: Option, musl_libdir: Option, @@ -571,6 +580,7 @@ impl Config { config.keep_stage = flags.keep_stage; config.keep_stage_std = flags.keep_stage_std; config.bindir = "bin".into(); // default + config.color = flags.color; if let Some(value) = flags.deny_warnings { config.deny_warnings = value; } @@ -762,6 +772,7 @@ impl Config { set(&mut config.llvm_use_libcxx, llvm.use_libcxx); config.llvm_use_linker = llvm.use_linker.clone(); config.llvm_allow_old_toolchain = llvm.allow_old_toolchain; + config.llvm_polly = llvm.polly; config.llvm_from_ci = match llvm.download_ci_llvm { Some(StringOrBool::String(s)) => { assert!(s == "if-available", "unknown option `{}` for download-ci-llvm", s); @@ -795,6 +806,7 @@ impl Config { check_ci_llvm!(llvm.use_libcxx); check_ci_llvm!(llvm.use_linker); check_ci_llvm!(llvm.allow_old_toolchain); + check_ci_llvm!(llvm.polly); // CI-built LLVM is shared config.llvm_link_shared = true; @@ -831,6 +843,7 @@ impl Config { .map(|v| v.parse().expect("failed to parse rust.llvm-libunwind")); set(&mut config.backtrace, rust.backtrace); set(&mut config.channel, rust.channel); + config.description = rust.description; set(&mut config.rust_dist_src, rust.dist_src); set(&mut config.verbose_tests, rust.verbose_tests); // in the case "false" is set explicitly, do not overwrite the command line args @@ -886,6 +899,8 @@ impl Config { target.musl_libdir = cfg.musl_libdir.map(PathBuf::from); target.wasi_root = cfg.wasi_root.map(PathBuf::from); target.qemu_rootfs = cfg.qemu_rootfs.map(PathBuf::from); + target.sanitizers = cfg.sanitizers; + target.profiler = cfg.profiler; config.target_config.insert(TargetSelection::from_user(&triple), target); } @@ -995,6 +1010,22 @@ impl Config { self.verbose > 1 } + pub fn sanitizers_enabled(&self, target: TargetSelection) -> bool { + self.target_config.get(&target).map(|t| t.sanitizers).flatten().unwrap_or(self.sanitizers) + } + + pub fn any_sanitizers_enabled(&self) -> bool { + self.target_config.values().any(|t| t.sanitizers == Some(true)) || self.sanitizers + } + + pub fn profiler_enabled(&self, target: TargetSelection) -> bool { + self.target_config.get(&target).map(|t| t.profiler).flatten().unwrap_or(self.profiler) + } + + pub fn any_profiler_enabled(&self) -> bool { + self.target_config.values().any(|t| t.profiler == Some(true)) || self.profiler + } + pub fn llvm_enabled(&self) -> bool { self.rust_codegen_backends.contains(&INTERNER.intern_str("llvm")) } diff --git a/src/bootstrap/configure.py b/src/bootstrap/configure.py index e156952d56f..42f00ce9621 100755 --- a/src/bootstrap/configure.py +++ b/src/bootstrap/configure.py @@ -146,6 +146,7 @@ def v(*args): v("experimental-targets", "llvm.experimental-targets", "experimental LLVM targets to build") v("release-channel", "rust.channel", "the name of the release channel to build") +v("release-description", "rust.description", "optional descriptive string for version output") # Used on systems where "cc" is unavailable v("default-linker", "rust.default-linker", "the default linker") @@ -266,7 +267,7 @@ def err(msg): def build(): if 'build' in known_args: return known_args['build'][-1][1] - return bootstrap.default_build_triple() + return bootstrap.default_build_triple(verbose=False) def set(key, value): diff --git a/src/bootstrap/dist.rs b/src/bootstrap/dist.rs index bdab12db435..9b77e38a847 100644 --- a/src/bootstrap/dist.rs +++ b/src/bootstrap/dist.rs @@ -1040,6 +1040,30 @@ impl Step for Src { builder.copy(&builder.src.join(file), &dst_src.join(file)); } + // libtest includes std and everything else, so vendoring it + // creates exactly what's needed for `cargo -Zbuild-std` or any + // other analysis of the stdlib's source. Cargo also needs help + // finding the lock, so we copy it to libtest temporarily. + // + // Note that this requires std to only have one version of each + // crate. e.g. two versions of getopts won't be patchable. + let dst_libtest = dst_src.join("library/test"); + let dst_vendor = dst_src.join("vendor"); + let root_lock = dst_src.join("Cargo.lock"); + let temp_lock = dst_libtest.join("Cargo.lock"); + + // `cargo vendor` will delete everything from the lockfile that + // isn't used by libtest, so we need to not use any links! + builder.really_copy(&root_lock, &temp_lock); + + let mut cmd = Command::new(&builder.initial_cargo); + cmd.arg("vendor").arg(dst_vendor).current_dir(&dst_libtest); + builder.info("Dist src"); + let _time = timeit(builder); + builder.run(&mut cmd); + + builder.remove(&temp_lock); + // Create source tarball in rust-installer format let mut cmd = rust_installer(builder); cmd.arg("generate") @@ -1056,8 +1080,6 @@ impl Step for Src { .arg("--component-name=rust-src") .arg("--legacy-manifest-dirs=rustlib,cargo"); - builder.info("Dist src"); - let _time = timeit(builder); builder.run(&mut cmd); builder.remove_dir(&image); @@ -2335,6 +2357,25 @@ fn maybe_install_llvm(builder: &Builder<'_>, target: TargetSelection, dst_libdir return; } + if let Some(config) = builder.config.target_config.get(&target) { + if config.llvm_config.is_some() && !builder.config.llvm_from_ci { + // If the LLVM was externally provided, then we don't currently copy + // artifacts into the sysroot. This is not necessarily the right + // choice (in particular, it will require the LLVM dylib to be in + // the linker's load path at runtime), but the common use case for + // external LLVMs is distribution provided LLVMs, and in that case + // they're usually in the standard search path (e.g., /usr/lib) and + // copying them here is going to cause problems as we may end up + // with the wrong files and isn't what distributions want. + // + // This behavior may be revisited in the future though. + // + // If the LLVM is coming from ourselves (just from CI) though, we + // still want to install it, as it otherwise won't be available. + return; + } + } + // On macOS, rustc (and LLVM tools) link to an unversioned libLLVM.dylib // instead of libLLVM-11-rust-....dylib, as on linux. It's not entirely // clear why this is the case, though. llvm-config will emit the versioned diff --git a/src/bootstrap/doc.rs b/src/bootstrap/doc.rs index aa670bd9a2e..af7f7eff894 100644 --- a/src/bootstrap/doc.rs +++ b/src/bootstrap/doc.rs @@ -535,8 +535,12 @@ impl Step for Rustc { // Find dependencies for top level crates. let mut compiler_crates = HashSet::new(); for root_crate in &["rustc_driver", "rustc_codegen_llvm", "rustc_codegen_ssa"] { - compiler_crates - .extend(builder.in_tree_crates(root_crate).into_iter().map(|krate| krate.name)); + compiler_crates.extend( + builder + .in_tree_crates(root_crate, Some(target)) + .into_iter() + .map(|krate| krate.name), + ); } for krate in &compiler_crates { diff --git a/src/bootstrap/flags.rs b/src/bootstrap/flags.rs index dbfcf4df9b4..5a8096674c6 100644 --- a/src/bootstrap/flags.rs +++ b/src/bootstrap/flags.rs @@ -15,6 +15,31 @@ use crate::config::{Config, TargetSelection}; use crate::setup::Profile; use crate::{Build, DocTests}; +pub enum Color { + Always, + Never, + Auto, +} + +impl Default for Color { + fn default() -> Self { + Self::Auto + } +} + +impl std::str::FromStr for Color { + type Err = (); + + fn from_str(s: &str) -> Result { + match s.to_lowercase().as_str() { + "always" => Ok(Self::Always), + "never" => Ok(Self::Never), + "auto" => Ok(Self::Auto), + _ => Err(()), + } + } +} + /// Deserialized version of all flags for this compile. pub struct Flags { pub verbose: usize, // number of -v args; each extra -v after the first is passed to Cargo @@ -34,6 +59,7 @@ pub struct Flags { pub rustc_error_format: Option, pub json_output: bool, pub dry_run: bool, + pub color: Color, // This overrides the deny-warnings configuration option, // which passes -Dwarnings to the compiler invocations. @@ -184,6 +210,7 @@ To learn more about a subcommand, run `./x.py -h`", ); opts.optopt("", "error-format", "rustc error format", "FORMAT"); opts.optflag("", "json-output", "use message-format=json"); + opts.optopt("", "color", "whether to use color in cargo and rustc output", "STYLE"); opts.optopt( "", "llvm-skip-rebuild", @@ -644,6 +671,9 @@ Arguments: llvm_skip_rebuild: matches.opt_str("llvm-skip-rebuild").map(|s| s.to_lowercase()).map( |s| s.parse::().expect("`llvm-skip-rebuild` should be either true or false"), ), + color: matches + .opt_get_default("color", Color::Auto) + .expect("`color` should be `always`, `never`, or `auto`"), } } } diff --git a/src/bootstrap/lib.rs b/src/bootstrap/lib.rs index 0c41ca14403..37adc7cda44 100644 --- a/src/bootstrap/lib.rs +++ b/src/bootstrap/lib.rs @@ -173,17 +173,19 @@ pub use crate::config::Config; pub use crate::flags::Subcommand; const LLVM_TOOLS: &[&str] = &[ - "llvm-nm", // used to inspect binaries; it shows symbol names, their sizes and visibility - "llvm-objcopy", // used to transform ELFs into binary format which flashing tools consume - "llvm-objdump", // used to disassemble programs + "llvm-cov", // used to generate coverage report + "llvm-nm", // used to inspect binaries; it shows symbol names, their sizes and visibility + "llvm-objcopy", // used to transform ELFs into binary format which flashing tools consume + "llvm-objdump", // used to disassemble programs "llvm-profdata", // used to inspect and merge files generated by profiles - "llvm-readobj", // used to get information from ELFs/objects that the other tools don't provide - "llvm-size", // used to prints the size of the linker sections of a program - "llvm-strip", // used to discard symbols from binary files to reduce their size - "llvm-ar", // used for creating and modifying archive files - "llvm-dis", // used to disassemble LLVM bitcode - "llc", // used to compile LLVM bytecode - "opt", // used to optimize LLVM bytecode + "llvm-readobj", // used to get information from ELFs/objects that the other tools don't provide + "llvm-size", // used to prints the size of the linker sections of a program + "llvm-strip", // used to discard symbols from binary files to reduce their size + "llvm-ar", // used for creating and modifying archive files + "llvm-as", // used to convert LLVM assembly to LLVM bitcode + "llvm-dis", // used to disassemble LLVM bitcode + "llc", // used to compile LLVM bytecode + "opt", // used to optimize LLVM bytecode ]; pub const VERSION: usize = 2; @@ -545,7 +547,7 @@ impl Build { /// Gets the space-separated set of activated features for the standard /// library. - fn std_features(&self) -> String { + fn std_features(&self, target: TargetSelection) -> String { let mut features = "panic-unwind".to_string(); match self.config.llvm_libunwind.unwrap_or_default() { @@ -556,7 +558,7 @@ impl Build { if self.config.backtrace { features.push_str(" backtrace"); } - if self.config.profiler { + if self.config.profiler_enabled(target) { features.push_str(" profiler"); } features @@ -1084,7 +1086,13 @@ impl Build { /// Note that this is a descriptive string which includes the commit date, /// sha, version, etc. fn rust_version(&self) -> String { - self.rust_info.version(self, &self.version) + let mut version = self.rust_info.version(self, &self.version); + if let Some(ref s) = self.config.description { + version.push_str(" ("); + version.push_str(s); + version.push_str(")"); + } + version } /// Returns the full commit hash. @@ -1119,7 +1127,7 @@ impl Build { /// Returns a Vec of all the dependencies of the given root crate, /// including transitive dependencies and the root itself. Only includes /// "local" crates (those in the local source tree, not from a registry). - fn in_tree_crates(&self, root: &str) -> Vec<&Crate> { + fn in_tree_crates(&self, root: &str, target: Option) -> Vec<&Crate> { let mut ret = Vec::new(); let mut list = vec![INTERNER.intern_str(root)]; let mut visited = HashSet::new(); @@ -1143,7 +1151,10 @@ impl Build { // metadata::build. if visited.insert(dep) && dep != "build_helper" - && (dep != "profiler_builtins" || self.config.profiler) + && (dep != "profiler_builtins" + || target + .map(|t| self.config.profiler_enabled(t)) + .unwrap_or(self.config.any_profiler_enabled())) && (dep != "rustc_codegen_llvm" || self.config.llvm_enabled()) { list.push(*dep); @@ -1178,6 +1189,27 @@ impl Build { paths } + /// Copies a file from `src` to `dst` and doesn't use links, so + /// that the copy can be modified without affecting the original. + pub fn really_copy(&self, src: &Path, dst: &Path) { + if self.config.dry_run { + return; + } + self.verbose_than(1, &format!("Copy {:?} to {:?}", src, dst)); + if src == dst { + return; + } + let _ = fs::remove_file(&dst); + let metadata = t!(src.symlink_metadata()); + if let Err(e) = fs::copy(src, dst) { + panic!("failed to copy `{}` to `{}`: {}", src.display(), dst.display(), e) + } + t!(fs::set_permissions(dst, metadata.permissions())); + let atime = FileTime::from_last_access_time(&metadata); + let mtime = FileTime::from_last_modification_time(&metadata); + t!(filetime::set_file_times(dst, atime, mtime)); + } + /// Copies a file from `src` to `dst` pub fn copy(&self, src: &Path, dst: &Path) { if self.config.dry_run { diff --git a/src/bootstrap/native.rs b/src/bootstrap/native.rs index 37d6fab070b..d716b23af60 100644 --- a/src/bootstrap/native.rs +++ b/src/bootstrap/native.rs @@ -257,6 +257,10 @@ impl Step for Llvm { enabled_llvm_projects.push("compiler-rt"); } + if let Some(true) = builder.config.llvm_polly { + enabled_llvm_projects.push("polly"); + } + // We want libxml to be disabled. // See https://github.com/rust-lang/rust/pull/50104 cfg.define("LLVM_ENABLE_LIBXML2", "OFF"); @@ -344,11 +348,11 @@ fn check_llvm_version(builder: &Builder<'_>, llvm_config: &Path) { let version = output(cmd.arg("--version")); let mut parts = version.split('.').take(2).filter_map(|s| s.parse::().ok()); if let (Some(major), Some(_minor)) = (parts.next(), parts.next()) { - if major >= 8 { + if major >= 9 { return; } } - panic!("\n\nbad LLVM version: {}, need >=8.0\n\n", version) + panic!("\n\nbad LLVM version: {}, need >=9.0\n\n", version) } fn configure_cmake( diff --git a/src/bootstrap/sanity.rs b/src/bootstrap/sanity.rs index 6826d177a4a..4cfcf6ca407 100644 --- a/src/bootstrap/sanity.rs +++ b/src/bootstrap/sanity.rs @@ -91,7 +91,7 @@ pub fn check(build: &mut Build) { .unwrap_or(true) }) .any(|build_llvm_ourselves| build_llvm_ourselves); - if building_llvm || build.config.sanitizers { + if building_llvm || build.config.any_sanitizers_enabled() { cmd_finder.must_have("cmake"); } diff --git a/src/bootstrap/test.rs b/src/bootstrap/test.rs index 2b87c4b91af..e087e2b8ff1 100644 --- a/src/bootstrap/test.rs +++ b/src/bootstrap/test.rs @@ -484,10 +484,13 @@ impl Step for CompiletestTest { let host = self.host; let compiler = builder.compiler(0, host); + // We need `ToolStd` for the locally-built sysroot because + // compiletest uses unstable features of the `test` crate. + builder.ensure(compile::Std { compiler, target: host }); let cargo = tool::prepare_tool_cargo( builder, compiler, - Mode::ToolBootstrap, + Mode::ToolStd, host, "test", "src/tools/compiletest", @@ -1271,11 +1274,11 @@ note: if you're sure you want to do this, please open an issue as to why. In the cmd.env("RUSTC_BOOTSTRAP", "1"); builder.add_rust_test_threads(&mut cmd); - if builder.config.sanitizers { + if builder.config.sanitizers_enabled(target) { cmd.env("RUSTC_SANITIZER_SUPPORT", "1"); } - if builder.config.profiler { + if builder.config.profiler_enabled(target) { cmd.env("RUSTC_PROFILER_SUPPORT", "1"); } @@ -1591,7 +1594,7 @@ impl Step for CrateLibrustc { let builder = run.builder; let compiler = builder.compiler(builder.top_stage, run.build_triple()); - for krate in builder.in_tree_crates("rustc-main") { + for krate in builder.in_tree_crates("rustc-main", Some(run.target)) { if krate.path.ends_with(&run.path) { let test_kind = builder.kind.into(); @@ -1698,7 +1701,7 @@ impl Step for Crate { }); }; - for krate in builder.in_tree_crates("test") { + for krate in builder.in_tree_crates("test", Some(run.target)) { if krate.path.ends_with(&run.path) { make(Mode::Std, krate); } diff --git a/src/ci/docker/host-x86_64/dist-i686-freebsd/Dockerfile b/src/ci/docker/host-x86_64/dist-i686-freebsd/Dockerfile deleted file mode 100644 index 7db6e58c4d6..00000000000 --- a/src/ci/docker/host-x86_64/dist-i686-freebsd/Dockerfile +++ /dev/null @@ -1,34 +0,0 @@ -FROM ubuntu:18.04 - -RUN apt-get update && apt-get install -y --no-install-recommends \ - clang \ - make \ - ninja-build \ - file \ - curl \ - ca-certificates \ - python3 \ - git \ - cmake \ - sudo \ - bzip2 \ - xz-utils \ - wget \ - libssl-dev \ - pkg-config - -COPY scripts/freebsd-toolchain.sh /tmp/ -RUN /tmp/freebsd-toolchain.sh i686 - -COPY scripts/sccache.sh /scripts/ -RUN sh /scripts/sccache.sh - -ENV \ - AR_i686_unknown_freebsd=i686-unknown-freebsd11-ar \ - CC_i686_unknown_freebsd=i686-unknown-freebsd11-clang \ - CXX_i686_unknown_freebsd=i686-unknown-freebsd11-clang++ - -ENV HOSTS=i686-unknown-freebsd - -ENV RUST_CONFIGURE_ARGS --enable-extended --enable-profiler --disable-docs -ENV SCRIPT python3 ../x.py dist --host $HOSTS --target $HOSTS diff --git a/src/ci/docker/host-x86_64/dist-various-2/Dockerfile b/src/ci/docker/host-x86_64/dist-various-2/Dockerfile index 47a66f74808..b8b81ab327b 100644 --- a/src/ci/docker/host-x86_64/dist-various-2/Dockerfile +++ b/src/ci/docker/host-x86_64/dist-various-2/Dockerfile @@ -48,6 +48,9 @@ ENV \ CFLAGS_x86_64_fortanix_unknown_sgx="-mlvi-hardening -mllvm -x86-experimental-lvi-inline-asm-hardening" \ CXX_x86_64_fortanix_unknown_sgx=x86_64-fortanix-unknown-sgx-clang++-11 \ CXXFLAGS_x86_64_fortanix_unknown_sgx="-mlvi-hardening -mllvm -x86-experimental-lvi-inline-asm-hardening" \ + AR_i686_unknown_freebsd=i686-unknown-freebsd11-ar \ + CC_i686_unknown_freebsd=i686-unknown-freebsd11-clang \ + CXX_i686_unknown_freebsd=i686-unknown-freebsd11-clang++ \ CC=gcc-7 \ CXX=g++-7 @@ -74,6 +77,9 @@ RUN /tmp/build-x86_64-fortanix-unknown-sgx-toolchain.sh COPY host-x86_64/dist-various-2/build-wasi-toolchain.sh /tmp/ RUN /tmp/build-wasi-toolchain.sh +COPY scripts/freebsd-toolchain.sh /tmp/ +RUN /tmp/freebsd-toolchain.sh i686 + COPY scripts/sccache.sh /scripts/ RUN sh /scripts/sccache.sh @@ -99,6 +105,7 @@ ENV TARGETS=$TARGETS,x86_64-fortanix-unknown-sgx ENV TARGETS=$TARGETS,nvptx64-nvidia-cuda ENV TARGETS=$TARGETS,armv7-unknown-linux-gnueabi ENV TARGETS=$TARGETS,armv7-unknown-linux-musleabi +ENV TARGETS=$TARGETS,i686-unknown-freebsd # As per https://bugs.launchpad.net/ubuntu/+source/gcc-defaults/+bug/1300211 # we need asm in the search path for gcc-7 (for gnux32) but not in the search path of the diff --git a/src/ci/docker/host-x86_64/dist-x86_64-linux/build-gcc.sh b/src/ci/docker/host-x86_64/dist-x86_64-linux/build-gcc.sh index 9d7461ebee3..fcf869b68be 100755 --- a/src/ci/docker/host-x86_64/dist-x86_64-linux/build-gcc.sh +++ b/src/ci/docker/host-x86_64/dist-x86_64-linux/build-gcc.sh @@ -29,7 +29,8 @@ mkdir ../gcc-build cd ../gcc-build hide_output ../gcc-$GCC/configure \ --prefix=/rustroot \ - --enable-languages=c,c++ + --enable-languages=c,c++ \ + --disable-gnu-unique-object hide_output make -j10 hide_output make install ln -s gcc /rustroot/bin/cc diff --git a/src/ci/docker/host-x86_64/x86_64-gnu-llvm-8/Dockerfile b/src/ci/docker/host-x86_64/x86_64-gnu-llvm-9/Dockerfile similarity index 63% rename from src/ci/docker/host-x86_64/x86_64-gnu-llvm-8/Dockerfile rename to src/ci/docker/host-x86_64/x86_64-gnu-llvm-9/Dockerfile index bd046f802c8..38eac6588b0 100644 --- a/src/ci/docker/host-x86_64/x86_64-gnu-llvm-8/Dockerfile +++ b/src/ci/docker/host-x86_64/x86_64-gnu-llvm-9/Dockerfile @@ -2,7 +2,7 @@ FROM ubuntu:18.04 RUN apt-get update && apt-get install -y --no-install-recommends \ g++ \ - g++-arm-linux-gnueabi \ + gcc-multilib \ make \ ninja-build \ file \ @@ -13,7 +13,8 @@ RUN apt-get update && apt-get install -y --no-install-recommends \ cmake \ sudo \ gdb \ - llvm-8-tools \ + llvm-9-tools \ + llvm-9-dev \ libedit-dev \ libssl-dev \ pkg-config \ @@ -27,7 +28,7 @@ RUN sh /scripts/sccache.sh # using llvm-link-shared due to libffi issues -- see #34486 ENV RUST_CONFIGURE_ARGS \ --build=x86_64-unknown-linux-gnu \ - --llvm-root=/usr/lib/llvm-8 \ + --llvm-root=/usr/lib/llvm-9 \ --enable-llvm-link-shared \ --set rust.thin-lto-import-instr-limit=10 @@ -38,22 +39,13 @@ ENV SCRIPT python2.7 ../x.py --stage 2 test --exclude src/tools/tidy && \ # the PR is approved and tested for merging. # It will also detect tests lacking `// EMIT_MIR_FOR_EACH_BIT_WIDTH`, # despite having different output on 32-bit vs 64-bit targets. - # - # HACK(eddyb) `armv5te` is used (not `i686`) to support older LLVM than LLVM 9: - # https://github.com/rust-lang/compiler-builtins/pull/311#issuecomment-612270089. - # This also requires `--pass=build` because we can't execute the tests - # on the `x86_64` host when they're built as `armv5te` binaries. - # (we're only interested in the MIR output, so this doesn't matter) - python2.7 ../x.py --stage 2 test src/test/mir-opt --pass=build \ - --host='' --target=armv5te-unknown-linux-gnueabi && \ + python2.7 ../x.py --stage 2 test src/test/mir-opt \ + --host='' --target=i686-unknown-linux-gnu && \ # Run the UI test suite again, but in `--pass=check` mode # # This is intended to make sure that both `--pass=check` continues to # work. # - # FIXME: We ideally want to test this in 32-bit mode, but currently - # (due to the LLVM problems mentioned above) that isn't readily - # possible. - python2.7 ../x.py --stage 2 test src/test/ui --pass=check && \ + python2.7 ../x.py --stage 2 test src/test/ui --pass=check --target=i686-unknown-linux-gnu && \ # Run tidy at the very end, after all the other tests. python2.7 ../x.py --stage 2 test src/tools/tidy diff --git a/src/ci/github-actions/ci.yml b/src/ci/github-actions/ci.yml index 889c98966eb..9eea6243dfa 100644 --- a/src/ci/github-actions/ci.yml +++ b/src/ci/github-actions/ci.yml @@ -280,7 +280,7 @@ jobs: - name: mingw-check <<: *job-linux-xl - - name: x86_64-gnu-llvm-8 + - name: x86_64-gnu-llvm-9 <<: *job-linux-xl - name: x86_64-gnu-tools @@ -328,9 +328,6 @@ jobs: - name: dist-i586-gnu-i586-i686-musl <<: *job-linux-xl - - name: dist-i686-freebsd - <<: *job-linux-xl - - name: dist-i686-linux <<: *job-linux-xl @@ -415,7 +412,7 @@ jobs: - name: x86_64-gnu-distcheck <<: *job-linux-xl - - name: x86_64-gnu-llvm-8 + - name: x86_64-gnu-llvm-9 env: RUST_BACKTRACE: 1 <<: *job-linux-xl diff --git a/src/ci/scripts/should-skip-this.sh b/src/ci/scripts/should-skip-this.sh index f945db0ada2..36bf4368990 100755 --- a/src/ci/scripts/should-skip-this.sh +++ b/src/ci/scripts/should-skip-this.sh @@ -14,6 +14,10 @@ elif git diff HEAD^ | grep --quiet "^index .* 160000"; then # Submodules pseudo-files inside git have the 160000 permissions, so when # those files are present in the diff a submodule was updated. echo "Executing the job since submodules are updated" +elif git diff --name-only HEAD^ | grep --quiet src/tools/clippy; then + # There is not an easy blanket search for subtrees. For now, manually list + # clippy. + echo "Executing the job since clippy subtree was updated" else echo "Not executing this job since no submodules were updated" ciCommandSetEnv SKIP_JOB 1 diff --git a/src/doc/edition-guide b/src/doc/edition-guide index 7bc9b7a5e80..b91a9a881ee 160000 --- a/src/doc/edition-guide +++ b/src/doc/edition-guide @@ -1 +1 @@ -Subproject commit 7bc9b7a5e800f79df62947cb7d566fd2fbaf19fe +Subproject commit b91a9a881ee007c12e74e844460ec407cf07a50f diff --git a/src/doc/nomicon b/src/doc/nomicon index 69333eddb1d..23c49f1d5ce 160000 --- a/src/doc/nomicon +++ b/src/doc/nomicon @@ -1 +1 @@ -Subproject commit 69333eddb1de92fd17e272ce4677cc983d3bd71d +Subproject commit 23c49f1d5ce4720bc5b7e3a920f47eccc8da6b63 diff --git a/src/doc/reference b/src/doc/reference index 10c16caebe4..a7de763c213 160000 --- a/src/doc/reference +++ b/src/doc/reference @@ -1 +1 @@ -Subproject commit 10c16caebe475d0d11bec0531b95d7697856c13c +Subproject commit a7de763c213292f5b44bf10acb87ffa38724814d diff --git a/src/doc/rust-by-example b/src/doc/rust-by-example index 99eafee0cb1..1886fda6981 160000 --- a/src/doc/rust-by-example +++ b/src/doc/rust-by-example @@ -1 +1 @@ -Subproject commit 99eafee0cb14e6ec641bf02a69d7b30f6058349a +Subproject commit 1886fda6981b723e4de637074455558f8bc1e83c diff --git a/src/doc/rustc/src/platform-support.md b/src/doc/rustc/src/platform-support.md index 8005a5f3563..215e5d3d104 100644 --- a/src/doc/rustc/src/platform-support.md +++ b/src/doc/rustc/src/platform-support.md @@ -94,7 +94,7 @@ target | std | host | notes `i586-unknown-linux-gnu` | ✓ | | 32-bit Linux w/o SSE (kernel 4.4, glibc 2.23) `i586-unknown-linux-musl` | ✓ | | 32-bit Linux w/o SSE, MUSL `i686-linux-android` | ✓ | | 32-bit x86 Android -`i686-unknown-freebsd` | ✓ | ✓ | 32-bit FreeBSD +`i686-unknown-freebsd` | ✓ | | 32-bit FreeBSD `i686-unknown-linux-musl` | ✓ | | 32-bit Linux with MUSL `mips-unknown-linux-gnu` | ✓ | ✓ | MIPS Linux (kernel 4.4, glibc 2.23) `mips-unknown-linux-musl` | ✓ | | MIPS Linux with MUSL diff --git a/src/doc/unstable-book/src/language-features/cfg-panic.md b/src/doc/unstable-book/src/language-features/cfg-panic.md new file mode 100644 index 00000000000..f5b73128ad6 --- /dev/null +++ b/src/doc/unstable-book/src/language-features/cfg-panic.md @@ -0,0 +1,38 @@ +# `cfg_panic` + +The tracking issue for this feature is: [#77443] + +[#77443]: https://github.com/rust-lang/rust/issues/77443 + +------------------------ + +The `cfg_panic` feature makes it possible to execute different code +depending on the panic strategy. + +Possible values at the moment are `"unwind"` or `"abort"`, although +it is possible that new panic strategies may be added to Rust in the +future. + +## Examples + +```rust +#![feature(cfg_panic)] + +#[cfg(panic = "unwind")] +fn a() { + // ... +} + +#[cfg(not(panic = "unwind"))] +fn a() { + // ... +} + +fn b() { + if cfg!(panic = "abort") { + // ... + } else { + // ... + } +} +``` diff --git a/src/doc/unstable-book/src/library-features/libstd-io-internals.md b/src/doc/unstable-book/src/library-features/internal-output-capture.md similarity index 79% rename from src/doc/unstable-book/src/library-features/libstd-io-internals.md rename to src/doc/unstable-book/src/library-features/internal-output-capture.md index 8bcc2769db7..7e1241fce98 100644 --- a/src/doc/unstable-book/src/library-features/libstd-io-internals.md +++ b/src/doc/unstable-book/src/library-features/internal-output-capture.md @@ -1,4 +1,4 @@ -# `libstd_io_internals` +# `internal_output_capture` This feature is internal to the Rust compiler and is not intended for general use. diff --git a/src/doc/unstable-book/src/library-features/set-stdio.md b/src/doc/unstable-book/src/library-features/set-stdio.md deleted file mode 100644 index 7dbdcdaa1a2..00000000000 --- a/src/doc/unstable-book/src/library-features/set-stdio.md +++ /dev/null @@ -1,5 +0,0 @@ -# `set_stdio` - -This feature is internal to the Rust compiler and is not intended for general use. - ------------------------- diff --git a/src/etc/gdb_providers.py b/src/etc/gdb_providers.py index eec3027085c..b5ade324bba 100644 --- a/src/etc/gdb_providers.py +++ b/src/etc/gdb_providers.py @@ -207,42 +207,43 @@ def children(self): yield "borrow", self.borrow -# Yields children (in a provider's sense of the word) for a tree headed by a BoxedNode. -# In particular, yields each key/value pair in the node and in any child nodes. -def children_of_node(boxed_node, height): - def cast_to_internal(node): - internal_type_name = node.type.target().name.replace("LeafNode", "InternalNode", 1) - internal_type = lookup_type(internal_type_name) - return node.cast(internal_type.pointer()) - - node_ptr = unwrap_unique_or_non_null(boxed_node["ptr"]) - leaf = node_ptr.dereference() - keys = leaf["keys"] - vals = leaf["vals"] - edges = cast_to_internal(node_ptr)["edges"] if height > 0 else None - length = int(leaf["len"]) - - for i in xrange(0, length + 1): - if height > 0: - boxed_child_node = edges[i]["value"]["value"] - for child in children_of_node(boxed_child_node, height - 1): - yield child - if i < length: - # Avoid "Cannot perform pointer math on incomplete type" on zero-sized arrays. - key = keys[i]["value"]["value"] if keys.type.sizeof > 0 else "()" - val = vals[i]["value"]["value"] if vals.type.sizeof > 0 else "()" - yield key, val - - -# Yields children for a BTreeMap. -def children_of_map(map): +# Yields children (in a provider's sense of the word) for a BTreeMap. +def children_of_btree_map(map): + # Yields each key/value pair in the node and in any child nodes. + def children_of_node(node_ptr, height): + def cast_to_internal(node): + internal_type_name = node.type.target().name.replace("LeafNode", "InternalNode", 1) + internal_type = lookup_type(internal_type_name) + return node.cast(internal_type.pointer()) + + leaf = node_ptr.dereference() + keys = leaf["keys"] + vals = leaf["vals"] + edges = cast_to_internal(node_ptr)["edges"] if height > 0 else None + length = leaf["len"] + + for i in xrange(0, length + 1): + if height > 0: + boxed_child_node = edges[i]["value"]["value"] + child_node = unwrap_unique_or_non_null(boxed_child_node["ptr"]) + for child in children_of_node(child_node, height - 1): + yield child + if i < length: + # Avoid "Cannot perform pointer math on incomplete type" on zero-sized arrays. + key = keys[i]["value"]["value"] if keys.type.sizeof > 0 else "()" + val = vals[i]["value"]["value"] if vals.type.sizeof > 0 else "()" + yield key, val + if map["length"] > 0: root = map["root"] if root.type.name.startswith("core::option::Option<"): root = root.cast(gdb.lookup_type(root.type.name[21:-1])) - boxed_root_node = root["node"] + node_ptr = root["node"] + if node_ptr.type.name.startswith("alloc::collections::btree::node::BoxedNode<"): + node_ptr = node_ptr["ptr"] + node_ptr = unwrap_unique_or_non_null(node_ptr) height = root["height"] - for child in children_of_node(boxed_root_node, height): + for child in children_of_node(node_ptr, height): yield child @@ -255,7 +256,7 @@ def to_string(self): def children(self): inner_map = self.valobj["map"] - for i, (child, _) in enumerate(children_of_map(inner_map)): + for i, (child, _) in enumerate(children_of_btree_map(inner_map)): yield "[{}]".format(i), child @staticmethod @@ -271,7 +272,7 @@ def to_string(self): return "BTreeMap(size={})".format(self.valobj["length"]) def children(self): - for i, (key, val) in enumerate(children_of_map(self.valobj)): + for i, (key, val) in enumerate(children_of_btree_map(self.valobj)): yield "key{}".format(i), key yield "val{}".format(i), val diff --git a/src/librustdoc/clean/auto_trait.rs b/src/librustdoc/clean/auto_trait.rs index f39b53f3c82..ff996b2a925 100644 --- a/src/librustdoc/clean/auto_trait.rs +++ b/src/librustdoc/clean/auto_trait.rs @@ -20,13 +20,13 @@ struct RegionDeps<'tcx> { smaller: FxHashSet>, } -pub struct AutoTraitFinder<'a, 'tcx> { - pub cx: &'a core::DocContext<'tcx>, - pub f: auto_trait::AutoTraitFinder<'tcx>, +crate struct AutoTraitFinder<'a, 'tcx> { + crate cx: &'a core::DocContext<'tcx>, + crate f: auto_trait::AutoTraitFinder<'tcx>, } impl<'a, 'tcx> AutoTraitFinder<'a, 'tcx> { - pub fn new(cx: &'a core::DocContext<'tcx>) -> Self { + crate fn new(cx: &'a core::DocContext<'tcx>) -> Self { let f = auto_trait::AutoTraitFinder::new(cx.tcx); AutoTraitFinder { cx, f } @@ -34,7 +34,7 @@ impl<'a, 'tcx> AutoTraitFinder<'a, 'tcx> { // FIXME(eddyb) figure out a better way to pass information about // parametrization of `ty` than `param_env_def_id`. - pub fn get_auto_trait_impls(&self, ty: Ty<'tcx>, param_env_def_id: DefId) -> Vec { + crate fn get_auto_trait_impls(&self, ty: Ty<'tcx>, param_env_def_id: DefId) -> Vec { let param_env = self.cx.tcx.param_env(param_env_def_id); debug!("get_auto_trait_impls({:?})", ty); @@ -125,7 +125,7 @@ impl<'a, 'tcx> AutoTraitFinder<'a, 'tcx> { def_id: self.cx.next_def_id(param_env_def_id.krate), stability: None, deprecation: None, - inner: ImplItem(Impl { + kind: ImplItem(Impl { unsafety: hir::Unsafety::Normal, generics: new_generics, provided_trait_methods: Default::default(), diff --git a/src/librustdoc/clean/blanket_impl.rs b/src/librustdoc/clean/blanket_impl.rs index de5a9a61555..5721927d0ec 100644 --- a/src/librustdoc/clean/blanket_impl.rs +++ b/src/librustdoc/clean/blanket_impl.rs @@ -9,18 +9,18 @@ use rustc_span::DUMMY_SP; use super::*; -pub struct BlanketImplFinder<'a, 'tcx> { - pub cx: &'a core::DocContext<'tcx>, +crate struct BlanketImplFinder<'a, 'tcx> { + crate cx: &'a core::DocContext<'tcx>, } impl<'a, 'tcx> BlanketImplFinder<'a, 'tcx> { - pub fn new(cx: &'a core::DocContext<'tcx>) -> Self { + crate fn new(cx: &'a core::DocContext<'tcx>) -> Self { BlanketImplFinder { cx } } // FIXME(eddyb) figure out a better way to pass information about // parametrization of `ty` than `param_env_def_id`. - pub fn get_blanket_impls(&self, ty: Ty<'tcx>, param_env_def_id: DefId) -> Vec { + crate fn get_blanket_impls(&self, ty: Ty<'tcx>, param_env_def_id: DefId) -> Vec { let param_env = self.cx.tcx.param_env(param_env_def_id); debug!("get_blanket_impls({:?})", ty); @@ -62,14 +62,30 @@ impl<'a, 'tcx> BlanketImplFinder<'a, 'tcx> { "invoking predicate_may_hold: param_env={:?}, trait_ref={:?}, ty={:?}", param_env, trait_ref, ty ); - match infcx.evaluate_obligation(&traits::Obligation::new( - cause, - param_env, - trait_ref.without_const().to_predicate(infcx.tcx), - )) { - Ok(eval_result) => eval_result.may_apply(), - Err(traits::OverflowError) => true, // overflow doesn't mean yes *or* no + let predicates = self + .cx + .tcx + .predicates_of(impl_def_id) + .instantiate(self.cx.tcx, impl_substs) + .predicates + .into_iter() + .chain(Some(trait_ref.without_const().to_predicate(infcx.tcx))); + for predicate in predicates { + debug!("testing predicate {:?}", predicate); + let obligation = traits::Obligation::new( + traits::ObligationCause::dummy(), + param_env, + predicate, + ); + match infcx.evaluate_obligation(&obligation) { + Ok(eval_result) if eval_result.may_apply() => {} + Err(traits::OverflowError) => {} + _ => { + return false; + } + } } + true } else { false } @@ -98,7 +114,7 @@ impl<'a, 'tcx> BlanketImplFinder<'a, 'tcx> { def_id: self.cx.next_def_id(impl_def_id.krate), stability: None, deprecation: None, - inner: ImplItem(Impl { + kind: ImplItem(Impl { unsafety: hir::Unsafety::Normal, generics: ( self.cx.tcx.generics_of(impl_def_id), diff --git a/src/librustdoc/clean/cfg.rs b/src/librustdoc/clean/cfg.rs index b659f3eab43..2f169d1d3f3 100644 --- a/src/librustdoc/clean/cfg.rs +++ b/src/librustdoc/clean/cfg.rs @@ -20,7 +20,7 @@ use crate::html::escape::Escape; mod tests; #[derive(Clone, Debug, PartialEq, Eq, Hash)] -pub enum Cfg { +crate enum Cfg { /// Accepts all configurations. True, /// Denies all configurations. @@ -36,9 +36,9 @@ pub enum Cfg { } #[derive(PartialEq, Debug)] -pub struct InvalidCfgError { - pub msg: &'static str, - pub span: Span, +crate struct InvalidCfgError { + crate msg: &'static str, + crate span: Span, } impl Cfg { @@ -59,7 +59,7 @@ impl Cfg { /// /// If the content is not properly formatted, it will return an error indicating what and where /// the error is. - pub fn parse(cfg: &MetaItem) -> Result { + crate fn parse(cfg: &MetaItem) -> Result { let name = match cfg.ident() { Some(ident) => ident.name, None => { @@ -102,7 +102,7 @@ impl Cfg { /// /// Equivalent to `attr::cfg_matches`. // FIXME: Actually make use of `features`. - pub fn matches(&self, parse_sess: &ParseSess, features: Option<&Features>) -> bool { + crate fn matches(&self, parse_sess: &ParseSess, features: Option<&Features>) -> bool { match *self { Cfg::False => false, Cfg::True => true, diff --git a/src/librustdoc/clean/inline.rs b/src/librustdoc/clean/inline.rs index b3de70e5905..cc3e8707e52 100644 --- a/src/librustdoc/clean/inline.rs +++ b/src/librustdoc/clean/inline.rs @@ -54,7 +54,7 @@ crate fn try_inline( debug!("attrs={:?}", attrs); let attrs_clone = attrs; - let inner = match res { + let kind = match res { Res::Def(DefKind::Trait, did) => { record_extern_fqn(cx, did, clean::TypeKind::Trait); ret.extend(build_impls(cx, Some(parent_module), did, attrs)); @@ -124,16 +124,8 @@ crate fn try_inline( let attrs = merge_attrs(cx, Some(parent_module), target_attrs, attrs_clone); cx.renderinfo.borrow_mut().inlined.insert(did); - ret.push(clean::Item { - source: cx.tcx.def_span(did).clean(cx), - name: Some(name.clean(cx)), - attrs, - inner, - visibility: clean::Public, - stability: cx.tcx.lookup_stability(did).cloned(), - deprecation: cx.tcx.lookup_deprecation(did).clean(cx), - def_id: did, - }); + let what_rustc_thinks = clean::Item::from_def_id_and_parts(did, Some(name), kind, cx); + ret.push(clean::Item { attrs, ..what_rustc_thinks }); Some(ret) } @@ -193,7 +185,6 @@ crate fn build_external_trait(cx: &DocContext<'_>, did: DefId) -> clean::Trait { let trait_items = cx.tcx.associated_items(did).in_definition_order().map(|item| item.clean(cx)).collect(); - let auto_trait = cx.tcx.trait_def(did).has_auto_impl; let predicates = cx.tcx.predicates_of(did); let generics = (cx.tcx.generics_of(did), predicates).clean(cx); let generics = filter_non_trait_generics(did, generics); @@ -201,7 +192,6 @@ crate fn build_external_trait(cx: &DocContext<'_>, did: DefId) -> clean::Trait { let is_spotlight = load_attrs(cx, did).clean(cx).has_doc_flag(sym::spotlight); let is_auto = cx.tcx.trait_is_auto(did); clean::Trait { - auto: auto_trait, unsafety: cx.tcx.trait_def(did).unsafety, generics, items: trait_items, @@ -445,8 +435,10 @@ crate fn build_impl( debug!("build_impl: impl {:?} for {:?}", trait_.def_id(), for_.def_id()); - ret.push(clean::Item { - inner: clean::ImplItem(clean::Impl { + ret.push(clean::Item::from_def_id_and_parts( + did, + None, + clean::ImplItem(clean::Impl { unsafety: hir::Unsafety::Normal, generics, provided_trait_methods: provided, @@ -457,14 +449,8 @@ crate fn build_impl( synthetic: false, blanket_impl: None, }), - source: tcx.def_span(did).clean(cx), - name: None, - attrs, - visibility: clean::Inherited, - stability: tcx.lookup_stability(did).cloned(), - deprecation: tcx.lookup_deprecation(did).clean(cx), - def_id: did, - }); + cx, + )); } fn build_module(cx: &DocContext<'_>, did: DefId, visited: &mut FxHashSet) -> clean::Module { @@ -498,7 +484,7 @@ fn build_module(cx: &DocContext<'_>, did: DefId, visited: &mut FxHashSet) visibility: clean::Public, stability: None, deprecation: None, - inner: clean::ImportItem(clean::Import::new_simple( + kind: clean::ImportItem(clean::Import::new_simple( item.ident.to_string(), clean::ImportSource { path: clean::Path { @@ -555,7 +541,7 @@ fn build_static(cx: &DocContext<'_>, did: DefId, mutable: bool) -> clean::Static } } -fn build_macro(cx: &DocContext<'_>, did: DefId, name: Symbol) -> clean::ItemEnum { +fn build_macro(cx: &DocContext<'_>, did: DefId, name: Symbol) -> clean::ItemKind { let imported_from = cx.tcx.original_crate_name(did.krate); match cx.enter_resolver(|r| r.cstore().load_macro_untracked(did, cx.sess())) { LoadedMacro::MacroDef(def, _) => { diff --git a/src/librustdoc/clean/mod.rs b/src/librustdoc/clean/mod.rs index bfc74705818..d58a88957df 100644 --- a/src/librustdoc/clean/mod.rs +++ b/src/librustdoc/clean/mod.rs @@ -3,11 +3,11 @@ mod auto_trait; mod blanket_impl; -pub mod cfg; -pub mod inline; +crate mod cfg; +crate mod inline; mod simplify; -pub mod types; -pub mod utils; +crate mod types; +crate mod utils; use rustc_ast as ast; use rustc_attr as attr; @@ -39,18 +39,18 @@ use crate::doctree; use utils::*; -pub use utils::{get_auto_trait_and_blanket_impls, krate, register_res}; +crate use utils::{get_auto_trait_and_blanket_impls, krate, register_res}; -pub use self::types::FnRetTy::*; -pub use self::types::ItemEnum::*; -pub use self::types::SelfTy::*; -pub use self::types::Type::*; -pub use self::types::Visibility::{Inherited, Public}; -pub use self::types::*; +crate use self::types::FnRetTy::*; +crate use self::types::ItemKind::*; +crate use self::types::SelfTy::*; +crate use self::types::Type::*; +crate use self::types::Visibility::{Inherited, Public}; +crate use self::types::*; const FN_OUTPUT_NAME: &str = "Output"; -pub trait Clean { +crate trait Clean { fn clean(&self, cx: &DocContext<'_>) -> T; } @@ -223,12 +223,6 @@ impl Clean for CrateNum { impl Clean for doctree::Module<'_> { fn clean(&self, cx: &DocContext<'_>) -> Item { - let name = if self.name.is_some() { - self.name.expect("No name provided").clean(cx) - } else { - String::new() - }; - // maintain a stack of mod ids, for doc comment path resolution // but we also need to resolve the module's own docs based on whether its docs were written // inside or outside the module, so check for that @@ -237,21 +231,14 @@ impl Clean for doctree::Module<'_> { let mut items: Vec = vec![]; items.extend(self.extern_crates.iter().flat_map(|x| x.clean(cx))); items.extend(self.imports.iter().flat_map(|x| x.clean(cx))); - items.extend(self.structs.iter().map(|x| x.clean(cx))); - items.extend(self.unions.iter().map(|x| x.clean(cx))); - items.extend(self.enums.iter().map(|x| x.clean(cx))); items.extend(self.fns.iter().map(|x| x.clean(cx))); items.extend(self.foreigns.iter().map(|x| x.clean(cx))); items.extend(self.mods.iter().map(|x| x.clean(cx))); - items.extend(self.typedefs.iter().map(|x| x.clean(cx))); - items.extend(self.opaque_tys.iter().map(|x| x.clean(cx))); - items.extend(self.statics.iter().map(|x| x.clean(cx))); - items.extend(self.constants.iter().map(|x| x.clean(cx))); + items.extend(self.items.iter().map(|x| x.clean(cx))); items.extend(self.traits.iter().map(|x| x.clean(cx))); items.extend(self.impls.iter().flat_map(|x| x.clean(cx))); items.extend(self.macros.iter().map(|x| x.clean(cx))); items.extend(self.proc_macros.iter().map(|x| x.clean(cx))); - items.extend(self.trait_aliases.iter().map(|x| x.clean(cx))); // determine if we should display the inner contents or // the outer `mod` item for the source code. @@ -268,15 +255,17 @@ impl Clean for doctree::Module<'_> { } }; + let what_rustc_thinks = Item::from_hir_id_and_parts( + self.id, + self.name, + ModuleItem(Module { is_crate: self.is_crate, items }), + cx, + ); Item { - name: Some(name), + name: Some(what_rustc_thinks.name.unwrap_or_default()), attrs, source: span.clean(cx), - visibility: self.vis.clean(cx), - stability: cx.stability(self.id), - deprecation: cx.deprecation(self.id).clean(cx), - def_id: cx.tcx.hir().local_def_id(self.id).to_def_id(), - inner: ModuleItem(Module { is_crate: self.is_crate, items }), + ..what_rustc_thinks } } } @@ -883,14 +872,12 @@ impl<'a, 'tcx> Clean for (&'a ty::Generics, ty::GenericPredicates<'tcx } } -impl<'a> Clean - for (&'a hir::FnSig<'a>, &'a hir::Generics<'a>, hir::BodyId, Option) -{ - fn clean(&self, cx: &DocContext<'_>) -> Method { +impl<'a> Clean for (&'a hir::FnSig<'a>, &'a hir::Generics<'a>, hir::BodyId) { + fn clean(&self, cx: &DocContext<'_>) -> Function { let (generics, decl) = enter_impl_trait(cx, || (self.1.clean(cx), (&*self.0.decl, self.2).clean(cx))); let (all_types, ret_types) = get_all_types(&generics, &decl, cx); - Method { decl, generics, header: self.0.header, defaultness: self.3, all_types, ret_types } + Function { decl, generics, header: self.0.header, all_types, ret_types } } } @@ -899,31 +886,26 @@ impl Clean for doctree::Function<'_> { let (generics, decl) = enter_impl_trait(cx, || (self.generics.clean(cx), (self.decl, self.body).clean(cx))); - let did = cx.tcx.hir().local_def_id(self.id); - let constness = if is_const_fn(cx.tcx, did.to_def_id()) - && !is_unstable_const_fn(cx.tcx, did.to_def_id()).is_some() + let did = cx.tcx.hir().local_def_id(self.id).to_def_id(); + let constness = if is_const_fn(cx.tcx, did) && !is_unstable_const_fn(cx.tcx, did).is_some() { hir::Constness::Const } else { hir::Constness::NotConst }; let (all_types, ret_types) = get_all_types(&generics, &decl, cx); - Item { - name: Some(self.name.clean(cx)), - attrs: self.attrs.clean(cx), - source: self.span.clean(cx), - visibility: self.vis.clean(cx), - stability: cx.stability(self.id), - deprecation: cx.deprecation(self.id).clean(cx), - def_id: did.to_def_id(), - inner: FunctionItem(Function { + Item::from_def_id_and_parts( + did, + Some(self.name), + FunctionItem(Function { decl, generics, header: hir::FnHeader { constness, ..self.header }, all_types, ret_types, }), - } + cx, + ) } } @@ -935,8 +917,7 @@ impl<'a> Clean for (&'a [hir::Ty<'a>], &'a [Ident]) { .iter() .enumerate() .map(|(i, ty)| { - let mut name = - self.1.get(i).map(|ident| ident.to_string()).unwrap_or(String::new()); + let mut name = self.1.get(i).map(|ident| ident.to_string()).unwrap_or_default(); if name.is_empty() { name = "_".to_string(); } @@ -1016,16 +997,10 @@ impl Clean for doctree::Trait<'_> { fn clean(&self, cx: &DocContext<'_>) -> Item { let attrs = self.attrs.clean(cx); let is_spotlight = attrs.has_doc_flag(sym::spotlight); - Item { - name: Some(self.name.clean(cx)), - attrs, - source: self.span.clean(cx), - def_id: cx.tcx.hir().local_def_id(self.id).to_def_id(), - visibility: self.vis.clean(cx), - stability: cx.stability(self.id), - deprecation: cx.deprecation(self.id).clean(cx), - inner: TraitItem(Trait { - auto: self.is_auto.clean(cx), + Item::from_hir_id_and_parts( + self.id, + Some(self.name), + TraitItem(Trait { unsafety: self.unsafety, items: self.items.iter().map(|ti| ti.clean(cx)).collect(), generics: self.generics.clean(cx), @@ -1033,26 +1008,8 @@ impl Clean for doctree::Trait<'_> { is_spotlight, is_auto: self.is_auto.clean(cx), }), - } - } -} - -impl Clean for doctree::TraitAlias<'_> { - fn clean(&self, cx: &DocContext<'_>) -> Item { - let attrs = self.attrs.clean(cx); - Item { - name: Some(self.name.clean(cx)), - attrs, - source: self.span.clean(cx), - def_id: cx.tcx.hir().local_def_id(self.id).to_def_id(), - visibility: self.vis.clean(cx), - stability: cx.stability(self.id), - deprecation: cx.deprecation(self.id).clean(cx), - inner: TraitAliasItem(TraitAlias { - generics: self.generics.clean(cx), - bounds: self.bounds.clean(cx), - }), - } + cx, + ) } } @@ -1102,28 +1059,28 @@ impl Clean for hir::def::DefKind { impl Clean for hir::TraitItem<'_> { fn clean(&self, cx: &DocContext<'_>) -> Item { - let local_did = cx.tcx.hir().local_def_id(self.hir_id); + let local_did = cx.tcx.hir().local_def_id(self.hir_id).to_def_id(); let inner = match self.kind { hir::TraitItemKind::Const(ref ty, default) => { AssocConstItem(ty.clean(cx), default.map(|e| print_const_expr(cx, e))) } hir::TraitItemKind::Fn(ref sig, hir::TraitFn::Provided(body)) => { - let mut m = (sig, &self.generics, body, None).clean(cx); + let mut m = (sig, &self.generics, body).clean(cx); if m.header.constness == hir::Constness::Const - && is_unstable_const_fn(cx.tcx, local_did.to_def_id()).is_some() + && is_unstable_const_fn(cx.tcx, local_did).is_some() { m.header.constness = hir::Constness::NotConst; } - MethodItem(m) + MethodItem(m, None) } hir::TraitItemKind::Fn(ref sig, hir::TraitFn::Required(ref names)) => { let (generics, decl) = enter_impl_trait(cx, || { (self.generics.clean(cx), (&*sig.decl, &names[..]).clean(cx)) }); let (all_types, ret_types) = get_all_types(&generics, &decl, cx); - let mut t = TyMethod { header: sig.header, decl, generics, all_types, ret_types }; + let mut t = Function { header: sig.header, decl, generics, all_types, ret_types }; if t.header.constness == hir::Constness::Const - && is_unstable_const_fn(cx.tcx, local_did.to_def_id()).is_some() + && is_unstable_const_fn(cx.tcx, local_did).is_some() { t.header.constness = hir::Constness::NotConst; } @@ -1133,34 +1090,25 @@ impl Clean for hir::TraitItem<'_> { AssocTypeItem(bounds.clean(cx), default.clean(cx)) } }; - Item { - name: Some(self.ident.name.clean(cx)), - attrs: self.attrs.clean(cx), - source: self.span.clean(cx), - def_id: local_did.to_def_id(), - visibility: Visibility::Inherited, - stability: get_stability(cx, local_did.to_def_id()), - deprecation: get_deprecation(cx, local_did.to_def_id()), - inner, - } + Item::from_def_id_and_parts(local_did, Some(self.ident.name), inner, cx) } } impl Clean for hir::ImplItem<'_> { fn clean(&self, cx: &DocContext<'_>) -> Item { - let local_did = cx.tcx.hir().local_def_id(self.hir_id); + let local_did = cx.tcx.hir().local_def_id(self.hir_id).to_def_id(); let inner = match self.kind { hir::ImplItemKind::Const(ref ty, expr) => { AssocConstItem(ty.clean(cx), Some(print_const_expr(cx, expr))) } hir::ImplItemKind::Fn(ref sig, body) => { - let mut m = (sig, &self.generics, body, Some(self.defaultness)).clean(cx); + let mut m = (sig, &self.generics, body).clean(cx); if m.header.constness == hir::Constness::Const - && is_unstable_const_fn(cx.tcx, local_did.to_def_id()).is_some() + && is_unstable_const_fn(cx.tcx, local_did).is_some() { m.header.constness = hir::Constness::NotConst; } - MethodItem(m) + MethodItem(m, Some(self.defaultness)) } hir::ImplItemKind::TyAlias(ref ty) => { let type_ = ty.clean(cx); @@ -1168,22 +1116,13 @@ impl Clean for hir::ImplItem<'_> { TypedefItem(Typedef { type_, generics: Generics::default(), item_type }, true) } }; - Item { - name: Some(self.ident.name.clean(cx)), - source: self.span.clean(cx), - attrs: self.attrs.clean(cx), - def_id: local_did.to_def_id(), - visibility: self.vis.clean(cx), - stability: get_stability(cx, local_did.to_def_id()), - deprecation: get_deprecation(cx, local_did.to_def_id()), - inner, - } + Item::from_def_id_and_parts(local_did, Some(self.ident.name), inner, cx) } } impl Clean for ty::AssocItem { fn clean(&self, cx: &DocContext<'_>) -> Item { - let inner = match self.kind { + let kind = match self.kind { ty::AssocKind::Const => { let ty = cx.tcx.type_of(self.def_id); let default = if self.defaultness.has_value() { @@ -1236,21 +1175,23 @@ impl Clean for ty::AssocItem { ty::ImplContainer(_) => Some(self.defaultness), ty::TraitContainer(_) => None, }; - MethodItem(Method { - generics, - decl, - header: hir::FnHeader { - unsafety: sig.unsafety(), - abi: sig.abi(), - constness, - asyncness, + MethodItem( + Function { + generics, + decl, + header: hir::FnHeader { + unsafety: sig.unsafety(), + abi: sig.abi(), + constness, + asyncness, + }, + all_types, + ret_types, }, defaultness, - all_types, - ret_types, - }) + ) } else { - TyMethodItem(TyMethod { + TyMethodItem(Function { generics, decl, header: hir::FnHeader { @@ -1331,21 +1272,7 @@ impl Clean for ty::AssocItem { } }; - let visibility = match self.container { - ty::ImplContainer(_) => self.vis.clean(cx), - ty::TraitContainer(_) => Inherited, - }; - - Item { - name: Some(self.ident.name.clean(cx)), - visibility, - stability: get_stability(cx, self.def_id), - deprecation: get_deprecation(cx, self.def_id), - def_id: self.def_id, - attrs: inline::load_attrs(cx, self.def_id).clean(cx), - source: cx.tcx.def_span(self.def_id).clean(cx), - inner, - } + Item::from_def_id_and_parts(self.def_id, Some(self.ident.name), kind, cx) } } @@ -1775,33 +1702,27 @@ impl<'tcx> Clean for ty::Const<'tcx> { impl Clean for hir::StructField<'_> { fn clean(&self, cx: &DocContext<'_>) -> Item { - let local_did = cx.tcx.hir().local_def_id(self.hir_id); - - Item { - name: Some(self.ident.name).clean(cx), - attrs: self.attrs.clean(cx), - source: self.span.clean(cx), - visibility: self.vis.clean(cx), - stability: get_stability(cx, local_did.to_def_id()), - deprecation: get_deprecation(cx, local_did.to_def_id()), - def_id: local_did.to_def_id(), - inner: StructFieldItem(self.ty.clean(cx)), - } + let what_rustc_thinks = Item::from_hir_id_and_parts( + self.hir_id, + Some(self.ident.name), + StructFieldItem(self.ty.clean(cx)), + cx, + ); + // Don't show `pub` for fields on enum variants; they are always public + Item { visibility: self.vis.clean(cx), ..what_rustc_thinks } } } impl Clean for ty::FieldDef { fn clean(&self, cx: &DocContext<'_>) -> Item { - Item { - name: Some(self.ident.name).clean(cx), - attrs: cx.tcx.get_attrs(self.did).clean(cx), - source: cx.tcx.def_span(self.did).clean(cx), - visibility: self.vis.clean(cx), - stability: get_stability(cx, self.did), - deprecation: get_deprecation(cx, self.did), - def_id: self.did, - inner: StructFieldItem(cx.tcx.type_of(self.did).clean(cx)), - } + let what_rustc_thinks = Item::from_def_id_and_parts( + self.did, + Some(self.ident.name), + StructFieldItem(cx.tcx.type_of(self.did).clean(cx)), + cx, + ); + // Don't show `pub` for fields on enum variants; they are always public + Item { visibility: self.vis.clean(cx), ..what_rustc_thinks } } } @@ -1810,58 +1731,27 @@ impl Clean for hir::Visibility<'_> { match self.node { hir::VisibilityKind::Public => Visibility::Public, hir::VisibilityKind::Inherited => Visibility::Inherited, - hir::VisibilityKind::Crate(_) => Visibility::Crate, + hir::VisibilityKind::Crate(_) => { + let krate = DefId::local(CRATE_DEF_INDEX); + Visibility::Restricted(krate, cx.tcx.def_path(krate)) + } hir::VisibilityKind::Restricted { ref path, .. } => { let path = path.clean(cx); let did = register_res(cx, path.res); - Visibility::Restricted(did, path) + Visibility::Restricted(did, cx.tcx.def_path(did)) } } } } impl Clean for ty::Visibility { - fn clean(&self, _: &DocContext<'_>) -> Visibility { - if *self == ty::Visibility::Public { Public } else { Inherited } - } -} - -impl Clean for doctree::Struct<'_> { - fn clean(&self, cx: &DocContext<'_>) -> Item { - Item { - name: Some(self.name.clean(cx)), - attrs: self.attrs.clean(cx), - source: self.span.clean(cx), - def_id: cx.tcx.hir().local_def_id(self.id).to_def_id(), - visibility: self.vis.clean(cx), - stability: cx.stability(self.id), - deprecation: cx.deprecation(self.id).clean(cx), - inner: StructItem(Struct { - struct_type: self.struct_type, - generics: self.generics.clean(cx), - fields: self.fields.clean(cx), - fields_stripped: false, - }), - } - } -} - -impl Clean for doctree::Union<'_> { - fn clean(&self, cx: &DocContext<'_>) -> Item { - Item { - name: Some(self.name.clean(cx)), - attrs: self.attrs.clean(cx), - source: self.span.clean(cx), - def_id: cx.tcx.hir().local_def_id(self.id).to_def_id(), - visibility: self.vis.clean(cx), - stability: cx.stability(self.id), - deprecation: cx.deprecation(self.id).clean(cx), - inner: UnionItem(Union { - struct_type: self.struct_type, - generics: self.generics.clean(cx), - fields: self.fields.clean(cx), - fields_stripped: false, - }), + fn clean(&self, cx: &DocContext<'_>) -> Visibility { + match *self { + ty::Visibility::Public => Visibility::Public, + ty::Visibility::Invisible => Visibility::Inherited, + ty::Visibility::Restricted(module) => { + Visibility::Restricted(module, cx.tcx.def_path(module)) + } } } } @@ -1876,37 +1766,16 @@ impl Clean for rustc_hir::VariantData<'_> { } } -impl Clean for doctree::Enum<'_> { - fn clean(&self, cx: &DocContext<'_>) -> Item { - Item { - name: Some(self.name.clean(cx)), - attrs: self.attrs.clean(cx), - source: self.span.clean(cx), - def_id: cx.tcx.hir().local_def_id(self.id).to_def_id(), - visibility: self.vis.clean(cx), - stability: cx.stability(self.id), - deprecation: cx.deprecation(self.id).clean(cx), - inner: EnumItem(Enum { - variants: self.variants.iter().map(|v| v.clean(cx)).collect(), - generics: self.generics.clean(cx), - variants_stripped: false, - }), - } - } -} - impl Clean for doctree::Variant<'_> { fn clean(&self, cx: &DocContext<'_>) -> Item { - Item { - name: Some(self.name.clean(cx)), - attrs: self.attrs.clean(cx), - source: self.span.clean(cx), - visibility: Inherited, - stability: cx.stability(self.id), - deprecation: cx.deprecation(self.id).clean(cx), - def_id: cx.tcx.hir().local_def_id(self.id).to_def_id(), - inner: VariantItem(Variant { kind: self.def.clean(cx) }), - } + let what_rustc_thinks = Item::from_hir_id_and_parts( + self.id, + Some(self.name), + VariantItem(Variant { kind: self.def.clean(cx) }), + cx, + ); + // don't show `pub` for variants, which are always public + Item { visibility: Inherited, ..what_rustc_thinks } } } @@ -1927,25 +1796,23 @@ impl Clean for ty::VariantDef { source: cx.tcx.def_span(field.did).clean(cx), name: Some(field.ident.name.clean(cx)), attrs: cx.tcx.get_attrs(field.did).clean(cx), - visibility: field.vis.clean(cx), + visibility: Visibility::Inherited, def_id: field.did, stability: get_stability(cx, field.did), deprecation: get_deprecation(cx, field.did), - inner: StructFieldItem(cx.tcx.type_of(field.did).clean(cx)), + kind: StructFieldItem(cx.tcx.type_of(field.did).clean(cx)), }) .collect(), }), }; - Item { - name: Some(self.ident.clean(cx)), - attrs: inline::load_attrs(cx, self.def_id).clean(cx), - source: cx.tcx.def_span(self.def_id).clean(cx), - visibility: Inherited, - def_id: self.def_id, - inner: VariantItem(Variant { kind }), - stability: get_stability(cx, self.def_id), - deprecation: get_deprecation(cx, self.def_id), - } + let what_rustc_thinks = Item::from_def_id_and_parts( + self.def_id, + Some(self.ident.name), + VariantItem(Variant { kind }), + cx, + ); + // don't show `pub` for fields, which are always public + Item { visibility: Inherited, ..what_rustc_thinks } } } @@ -1967,10 +1834,15 @@ impl Clean for rustc_span::Span { return Span::empty(); } + // Get the macro invocation instead of the definition, + // in case the span is result of a macro expansion. + // (See rust-lang/rust#39726) + let span = self.source_callsite(); + let sm = cx.sess().source_map(); - let filename = sm.span_to_filename(*self); - let lo = sm.lookup_char_pos(self.lo()); - let hi = sm.lookup_char_pos(self.hi()); + let filename = sm.span_to_filename(span); + let lo = sm.lookup_char_pos(span.lo()); + let hi = sm.lookup_char_pos(span.hi()); Span { filename, cnum: lo.file.cnum, @@ -1978,7 +1850,7 @@ impl Clean for rustc_span::Span { locol: lo.col.to_usize(), hiline: hi.line, hicol: hi.col.to_usize(), - original: *self, + original: span, } } } @@ -2041,41 +1913,6 @@ impl Clean for Symbol { } } -impl Clean for doctree::Typedef<'_> { - fn clean(&self, cx: &DocContext<'_>) -> Item { - let type_ = self.ty.clean(cx); - let item_type = type_.def_id().and_then(|did| inline::build_ty(cx, did)); - Item { - name: Some(self.name.clean(cx)), - attrs: self.attrs.clean(cx), - source: self.span.clean(cx), - def_id: cx.tcx.hir().local_def_id(self.id).to_def_id(), - visibility: self.vis.clean(cx), - stability: cx.stability(self.id), - deprecation: cx.deprecation(self.id).clean(cx), - inner: TypedefItem(Typedef { type_, generics: self.gen.clean(cx), item_type }, false), - } - } -} - -impl Clean for doctree::OpaqueTy<'_> { - fn clean(&self, cx: &DocContext<'_>) -> Item { - Item { - name: Some(self.name.clean(cx)), - attrs: self.attrs.clean(cx), - source: self.span.clean(cx), - def_id: cx.tcx.hir().local_def_id(self.id).to_def_id(), - visibility: self.vis.clean(cx), - stability: cx.stability(self.id), - deprecation: cx.deprecation(self.id).clean(cx), - inner: OpaqueTyItem(OpaqueTy { - bounds: self.opaque_ty.bounds.clean(cx), - generics: self.opaque_ty.generics.clean(cx), - }), - } - } -} - impl Clean for hir::BareFnTy<'_> { fn clean(&self, cx: &DocContext<'_>) -> BareFunctionDecl { let (generic_params, decl) = enter_impl_trait(cx, || { @@ -2085,45 +1922,75 @@ impl Clean for hir::BareFnTy<'_> { } } -impl Clean for doctree::Static<'_> { +impl Clean for (&hir::Item<'_>, Option) { fn clean(&self, cx: &DocContext<'_>) -> Item { - debug!("cleaning static {}: {:?}", self.name.clean(cx), self); - Item { - name: Some(self.name.clean(cx)), - attrs: self.attrs.clean(cx), - source: self.span.clean(cx), - def_id: cx.tcx.hir().local_def_id(self.id).to_def_id(), - visibility: self.vis.clean(cx), - stability: cx.stability(self.id), - deprecation: cx.deprecation(self.id).clean(cx), - inner: StaticItem(Static { - type_: self.type_.clean(cx), - mutability: self.mutability, - expr: print_const_expr(cx, self.expr), + use hir::ItemKind; + + let (item, renamed) = self; + let def_id = cx.tcx.hir().local_def_id(item.hir_id).to_def_id(); + let name = match renamed { + Some(ident) => ident.name, + None => cx.tcx.hir().name(item.hir_id), + }; + let kind = match item.kind { + ItemKind::Static(ty, mutability, body_id) => StaticItem(Static { + type_: ty.clean(cx), + mutability, + expr: print_const_expr(cx, body_id), }), - } + ItemKind::Const(ty, body_id) => ConstantItem(Constant { + type_: ty.clean(cx), + expr: print_const_expr(cx, body_id), + value: print_evaluated_const(cx, def_id), + is_literal: is_literal_expr(cx, body_id.hir_id), + }), + ItemKind::OpaqueTy(ref ty) => OpaqueTyItem(OpaqueTy { + bounds: ty.bounds.clean(cx), + generics: ty.generics.clean(cx), + }), + ItemKind::TyAlias(ty, ref generics) => { + let rustdoc_ty = ty.clean(cx); + let item_type = rustdoc_ty.def_id().and_then(|did| inline::build_ty(cx, did)); + TypedefItem( + Typedef { type_: rustdoc_ty, generics: generics.clean(cx), item_type }, + false, + ) + } + ItemKind::Enum(ref def, ref generics) => EnumItem(Enum { + variants: def.variants.iter().map(|v| v.clean(cx)).collect(), + generics: generics.clean(cx), + variants_stripped: false, + }), + ItemKind::TraitAlias(ref generics, bounds) => TraitAliasItem(TraitAlias { + generics: generics.clean(cx), + bounds: bounds.clean(cx), + }), + ItemKind::Union(ref variant_data, ref generics) => UnionItem(Union { + struct_type: doctree::struct_type_from_def(&variant_data), + generics: generics.clean(cx), + fields: variant_data.fields().clean(cx), + fields_stripped: false, + }), + ItemKind::Struct(ref variant_data, ref generics) => StructItem(Struct { + struct_type: doctree::struct_type_from_def(&variant_data), + generics: generics.clean(cx), + fields: variant_data.fields().clean(cx), + fields_stripped: false, + }), + _ => unreachable!("not yet converted"), + }; + + Item::from_def_id_and_parts(def_id, Some(name), kind, cx) } } -impl Clean for doctree::Constant<'_> { +impl Clean for hir::Variant<'_> { fn clean(&self, cx: &DocContext<'_>) -> Item { - let def_id = cx.tcx.hir().local_def_id(self.id); - - Item { - name: Some(self.name.clean(cx)), - attrs: self.attrs.clean(cx), - source: self.span.clean(cx), - def_id: def_id.to_def_id(), - visibility: self.vis.clean(cx), - stability: cx.stability(self.id), - deprecation: cx.deprecation(self.id).clean(cx), - inner: ConstantItem(Constant { - type_: self.type_.clean(cx), - expr: print_const_expr(cx, self.expr), - value: print_evaluated_const(cx, def_id.to_def_id()), - is_literal: is_literal_expr(cx, self.expr.hir_id), - }), - } + let kind = VariantItem(Variant { kind: self.data.clean(cx) }); + let what_rustc_thinks = + Item::from_hir_id_and_parts(self.id, Some(self.ident.name), kind, cx); + // don't show `pub` for variants, which are always public + Item { visibility: Inherited, ..what_rustc_thinks } } } @@ -2171,7 +2038,7 @@ impl Clean> for doctree::Impl<'_> { visibility: self.vis.clean(cx), stability: cx.stability(self.id), deprecation: cx.deprecation(self.id).clean(cx), - inner: ImplItem(Impl { + kind: ImplItem(Impl { unsafety: self.unsafety, generics: self.generics.clean(cx), provided_trait_methods: provided.clone(), @@ -2227,7 +2094,7 @@ impl Clean> for doctree::ExternCrate<'_> { visibility: self.vis.clean(cx), stability: None, deprecation: None, - inner: ExternCrateItem(self.name.clean(cx), self.path.clone()), + kind: ExternCrateItem(self.name.clean(cx), self.path.clone()), }] } } @@ -2298,7 +2165,7 @@ impl Clean> for doctree::Import<'_> { visibility: self.vis.clean(cx), stability: None, deprecation: None, - inner: ImportItem(Import::new_simple( + kind: ImportItem(Import::new_simple( self.name.clean(cx), resolve_use_source(cx, path), false, @@ -2318,14 +2185,14 @@ impl Clean> for doctree::Import<'_> { visibility: self.vis.clean(cx), stability: None, deprecation: None, - inner: ImportItem(inner), + kind: ImportItem(inner), }] } } impl Clean for doctree::ForeignItem<'_> { fn clean(&self, cx: &DocContext<'_>) -> Item { - let inner = match self.kind { + let kind = match self.kind { hir::ForeignItemKind::Fn(ref decl, ref names, ref generics) => { let abi = cx.tcx.hir().get_foreign_abi(self.id); let (generics, decl) = @@ -2352,34 +2219,19 @@ impl Clean for doctree::ForeignItem<'_> { hir::ForeignItemKind::Type => ForeignTypeItem, }; - Item { - name: Some(self.name.clean(cx)), - attrs: self.attrs.clean(cx), - source: self.span.clean(cx), - def_id: cx.tcx.hir().local_def_id(self.id).to_def_id(), - visibility: self.vis.clean(cx), - stability: cx.stability(self.id), - deprecation: cx.deprecation(self.id).clean(cx), - inner, - } + Item::from_hir_id_and_parts(self.id, Some(self.name), kind, cx) } } -impl Clean for doctree::Macro<'_> { +impl Clean for doctree::Macro { fn clean(&self, cx: &DocContext<'_>) -> Item { - let name = self.name.clean(cx); - Item { - name: Some(name.clone()), - attrs: self.attrs.clean(cx), - source: self.span.clean(cx), - visibility: Public, - stability: cx.stability(self.hid), - deprecation: cx.deprecation(self.hid).clean(cx), - def_id: self.def_id, - inner: MacroItem(Macro { + Item::from_def_id_and_parts( + self.def_id, + Some(self.name), + MacroItem(Macro { source: format!( "macro_rules! {} {{\n{}}}", - name, + self.name, self.matchers .iter() .map(|span| { format!(" {} => {{ ... }};\n", span.to_src(cx)) }) @@ -2387,22 +2239,19 @@ impl Clean for doctree::Macro<'_> { ), imported_from: self.imported_from.clean(cx), }), - } + cx, + ) } } -impl Clean for doctree::ProcMacro<'_> { +impl Clean for doctree::ProcMacro { fn clean(&self, cx: &DocContext<'_>) -> Item { - Item { - name: Some(self.name.clean(cx)), - attrs: self.attrs.clean(cx), - source: self.span.clean(cx), - visibility: Public, - stability: cx.stability(self.id), - deprecation: cx.deprecation(self.id).clean(cx), - def_id: cx.tcx.hir().local_def_id(self.id).to_def_id(), - inner: ProcMacroItem(ProcMacro { kind: self.kind, helpers: self.helpers.clean(cx) }), - } + Item::from_hir_id_and_parts( + self.id, + Some(self.name), + ProcMacroItem(ProcMacro { kind: self.kind, helpers: self.helpers.clean(cx) }), + cx, + ) } } diff --git a/src/librustdoc/clean/simplify.rs b/src/librustdoc/clean/simplify.rs index 990189f6ea0..121c9d2bc4c 100644 --- a/src/librustdoc/clean/simplify.rs +++ b/src/librustdoc/clean/simplify.rs @@ -21,7 +21,7 @@ use crate::clean::GenericArgs as PP; use crate::clean::WherePredicate as WP; use crate::core::DocContext; -pub fn where_clauses(cx: &DocContext<'_>, clauses: Vec) -> Vec { +crate fn where_clauses(cx: &DocContext<'_>, clauses: Vec) -> Vec { // First, partition the where clause into its separate components let mut params: BTreeMap<_, Vec<_>> = BTreeMap::new(); let mut lifetimes = Vec::new(); @@ -74,7 +74,7 @@ pub fn where_clauses(cx: &DocContext<'_>, clauses: Vec) -> Vec { clauses } -pub fn merge_bounds( +crate fn merge_bounds( cx: &clean::DocContext<'_>, bounds: &mut Vec, trait_did: DefId, diff --git a/src/librustdoc/clean/types.rs b/src/librustdoc/clean/types.rs index 32b3f69ecd4..43b986aae1c 100644 --- a/src/librustdoc/clean/types.rs +++ b/src/librustdoc/clean/types.rs @@ -41,63 +41,62 @@ use crate::formats::item_type::ItemType; use crate::html::render::cache::ExternalLocation; use self::FnRetTy::*; -use self::ItemEnum::*; +use self::ItemKind::*; use self::SelfTy::*; use self::Type::*; -thread_local!(pub static MAX_DEF_ID: RefCell> = Default::default()); +thread_local!(crate static MAX_DEF_ID: RefCell> = Default::default()); #[derive(Clone, Debug)] -pub struct Crate { - pub name: String, - pub version: Option, - pub src: FileName, - pub module: Option, - pub externs: Vec<(CrateNum, ExternalCrate)>, - pub primitives: Vec<(DefId, PrimitiveType, Attributes)>, +crate struct Crate { + crate name: String, + crate version: Option, + crate src: FileName, + crate module: Option, + crate externs: Vec<(CrateNum, ExternalCrate)>, + crate primitives: Vec<(DefId, PrimitiveType, Attributes)>, // These are later on moved into `CACHEKEY`, leaving the map empty. // Only here so that they can be filtered through the rustdoc passes. - pub external_traits: Rc>>, - pub masked_crates: FxHashSet, - pub collapsed: bool, + crate external_traits: Rc>>, + crate masked_crates: FxHashSet, + crate collapsed: bool, } #[derive(Clone, Debug)] -pub struct ExternalCrate { - pub name: String, - pub src: FileName, - pub attrs: Attributes, - pub primitives: Vec<(DefId, PrimitiveType, Attributes)>, - pub keywords: Vec<(DefId, String, Attributes)>, +crate struct ExternalCrate { + crate name: String, + crate src: FileName, + crate attrs: Attributes, + crate primitives: Vec<(DefId, PrimitiveType, Attributes)>, + crate keywords: Vec<(DefId, String, Attributes)>, } /// Anything with a source location and set of attributes and, optionally, a /// name. That is, anything that can be documented. This doesn't correspond /// directly to the AST's concept of an item; it's a strict superset. #[derive(Clone)] -pub struct Item { +crate struct Item { /// Stringified span - pub source: Span, + crate source: Span, /// Not everything has a name. E.g., impls - pub name: Option, - pub attrs: Attributes, - pub inner: ItemEnum, - pub visibility: Visibility, - pub def_id: DefId, - pub stability: Option, - pub deprecation: Option, + crate name: Option, + crate attrs: Attributes, + crate visibility: Visibility, + crate kind: ItemKind, + crate def_id: DefId, + crate stability: Option, + crate deprecation: Option, } impl fmt::Debug for Item { fn fmt(&self, fmt: &mut fmt::Formatter<'_>) -> fmt::Result { - let fake = self.is_fake(); - let def_id: &dyn fmt::Debug = if fake { &"**FAKE**" } else { &self.def_id }; + let def_id: &dyn fmt::Debug = if self.is_fake() { &"**FAKE**" } else { &self.def_id }; fmt.debug_struct("Item") .field("source", &self.source) .field("name", &self.name) .field("attrs", &self.attrs) - .field("inner", &self.inner) + .field("kind", &self.kind) .field("visibility", &self.visibility) .field("def_id", def_id) .field("stability", &self.stability) @@ -109,81 +108,123 @@ impl fmt::Debug for Item { impl Item { /// Finds the `doc` attribute as a NameValue and returns the corresponding /// value found. - pub fn doc_value(&self) -> Option<&str> { + crate fn doc_value(&self) -> Option<&str> { self.attrs.doc_value() } + /// Convenience wrapper around [`Self::from_def_id_and_parts`] which converts + /// `hir_id` to a [`DefId`] + pub fn from_hir_id_and_parts( + hir_id: hir::HirId, + name: Option, + kind: ItemKind, + cx: &DocContext<'_>, + ) -> Item { + Item::from_def_id_and_parts(cx.tcx.hir().local_def_id(hir_id).to_def_id(), name, kind, cx) + } + + pub fn from_def_id_and_parts( + def_id: DefId, + name: Option, + kind: ItemKind, + cx: &DocContext<'_>, + ) -> Item { + use super::Clean; + + debug!("name={:?}, def_id={:?}", name, def_id); + + // `span_if_local()` lies about functions and only gives the span of the function signature + let source = def_id.as_local().map_or_else( + || cx.tcx.def_span(def_id), + |local| { + let hir = cx.tcx.hir(); + hir.span_with_body(hir.local_def_id_to_hir_id(local)) + }, + ); + + Item { + def_id, + kind, + name: name.clean(cx), + source: source.clean(cx), + attrs: cx.tcx.get_attrs(def_id).clean(cx), + visibility: cx.tcx.visibility(def_id).clean(cx), + stability: cx.tcx.lookup_stability(def_id).cloned(), + deprecation: cx.tcx.lookup_deprecation(def_id).clean(cx), + } + } + /// Finds all `doc` attributes as NameValues and returns their corresponding values, joined /// with newlines. - pub fn collapsed_doc_value(&self) -> Option { + crate fn collapsed_doc_value(&self) -> Option { self.attrs.collapsed_doc_value() } - pub fn links(&self) -> Vec { + crate fn links(&self) -> Vec { self.attrs.links(&self.def_id.krate) } - pub fn is_crate(&self) -> bool { - match self.inner { + crate fn is_crate(&self) -> bool { + match self.kind { StrippedItem(box ModuleItem(Module { is_crate: true, .. })) | ModuleItem(Module { is_crate: true, .. }) => true, _ => false, } } - pub fn is_mod(&self) -> bool { + crate fn is_mod(&self) -> bool { self.type_() == ItemType::Module } - pub fn is_trait(&self) -> bool { + crate fn is_trait(&self) -> bool { self.type_() == ItemType::Trait } - pub fn is_struct(&self) -> bool { + crate fn is_struct(&self) -> bool { self.type_() == ItemType::Struct } - pub fn is_enum(&self) -> bool { + crate fn is_enum(&self) -> bool { self.type_() == ItemType::Enum } - pub fn is_variant(&self) -> bool { + crate fn is_variant(&self) -> bool { self.type_() == ItemType::Variant } - pub fn is_associated_type(&self) -> bool { + crate fn is_associated_type(&self) -> bool { self.type_() == ItemType::AssocType } - pub fn is_associated_const(&self) -> bool { + crate fn is_associated_const(&self) -> bool { self.type_() == ItemType::AssocConst } - pub fn is_method(&self) -> bool { + crate fn is_method(&self) -> bool { self.type_() == ItemType::Method } - pub fn is_ty_method(&self) -> bool { + crate fn is_ty_method(&self) -> bool { self.type_() == ItemType::TyMethod } - pub fn is_typedef(&self) -> bool { + crate fn is_typedef(&self) -> bool { self.type_() == ItemType::Typedef } - pub fn is_primitive(&self) -> bool { + crate fn is_primitive(&self) -> bool { self.type_() == ItemType::Primitive } - pub fn is_union(&self) -> bool { + crate fn is_union(&self) -> bool { self.type_() == ItemType::Union } - pub fn is_import(&self) -> bool { + crate fn is_import(&self) -> bool { self.type_() == ItemType::Import } - pub fn is_extern_crate(&self) -> bool { + crate fn is_extern_crate(&self) -> bool { self.type_() == ItemType::ExternCrate } - pub fn is_keyword(&self) -> bool { + crate fn is_keyword(&self) -> bool { self.type_() == ItemType::Keyword } - pub fn is_stripped(&self) -> bool { - match self.inner { + crate fn is_stripped(&self) -> bool { + match self.kind { StrippedItem(..) => true, ImportItem(ref i) => !i.should_be_displayed, _ => false, } } - pub fn has_stripped_fields(&self) -> Option { - match self.inner { + crate fn has_stripped_fields(&self) -> Option { + match self.kind { StructItem(ref _struct) => Some(_struct.fields_stripped), UnionItem(ref union) => Some(union.fields_stripped), VariantItem(Variant { kind: VariantKind::Struct(ref vstruct) }) => { @@ -193,7 +234,7 @@ impl Item { } } - pub fn stability_class(&self) -> Option { + crate fn stability_class(&self) -> Option { self.stability.as_ref().and_then(|ref s| { let mut classes = Vec::with_capacity(2); @@ -210,37 +251,33 @@ impl Item { }) } - pub fn stable_since(&self) -> Option { + crate fn stable_since(&self) -> Option { match self.stability?.level { StabilityLevel::Stable { since, .. } => Some(since.as_str()), StabilityLevel::Unstable { .. } => None, } } - pub fn is_non_exhaustive(&self) -> bool { + crate fn is_non_exhaustive(&self) -> bool { self.attrs.other_attrs.iter().any(|a| a.has_name(sym::non_exhaustive)) } /// Returns a documentation-level item type from the item. - pub fn type_(&self) -> ItemType { + crate fn type_(&self) -> ItemType { ItemType::from(self) } - pub fn is_default(&self) -> bool { - match self.inner { - ItemEnum::MethodItem(ref meth) => { - if let Some(defaultness) = meth.defaultness { - defaultness.has_value() && !defaultness.is_final() - } else { - false - } + crate fn is_default(&self) -> bool { + match self.kind { + ItemKind::MethodItem(_, Some(defaultness)) => { + defaultness.has_value() && !defaultness.is_final() } _ => false, } } /// See comments on next_def_id - pub fn is_fake(&self) -> bool { + crate fn is_fake(&self) -> bool { MAX_DEF_ID.with(|m| { m.borrow().get(&self.def_id.krate).map(|id| self.def_id >= *id).unwrap_or(false) }) @@ -248,7 +285,7 @@ impl Item { } #[derive(Clone, Debug)] -pub enum ItemEnum { +crate enum ItemKind { ExternCrateItem(String, Option), ImportItem(Import), StructItem(Struct), @@ -265,9 +302,9 @@ pub enum ItemEnum { ImplItem(Impl), /// A method signature only. Used for required methods in traits (ie, /// non-default-methods). - TyMethodItem(TyMethod), + TyMethodItem(Function), /// A method with a body. - MethodItem(Method), + MethodItem(Function, Option), StructFieldItem(Type), VariantItem(Variant), /// `fn`s from an extern block @@ -282,35 +319,35 @@ pub enum ItemEnum { AssocConstItem(Type, Option), AssocTypeItem(Vec, Option), /// An item that has been stripped by a rustdoc pass - StrippedItem(Box), + StrippedItem(Box), KeywordItem(String), } -impl ItemEnum { - pub fn is_type_alias(&self) -> bool { +impl ItemKind { + crate fn is_type_alias(&self) -> bool { match *self { - ItemEnum::TypedefItem(_, _) | ItemEnum::AssocTypeItem(_, _) => true, + ItemKind::TypedefItem(_, _) | ItemKind::AssocTypeItem(_, _) => true, _ => false, } } - pub fn as_assoc_kind(&self) -> Option { + crate fn as_assoc_kind(&self) -> Option { match *self { - ItemEnum::AssocConstItem(..) => Some(AssocKind::Const), - ItemEnum::AssocTypeItem(..) => Some(AssocKind::Type), - ItemEnum::TyMethodItem(..) | ItemEnum::MethodItem(..) => Some(AssocKind::Fn), + ItemKind::AssocConstItem(..) => Some(AssocKind::Const), + ItemKind::AssocTypeItem(..) => Some(AssocKind::Type), + ItemKind::TyMethodItem(..) | ItemKind::MethodItem(..) => Some(AssocKind::Fn), _ => None, } } } #[derive(Clone, Debug)] -pub struct Module { - pub items: Vec, - pub is_crate: bool, +crate struct Module { + crate items: Vec, + crate is_crate: bool, } -pub struct ListAttributesIter<'a> { +crate struct ListAttributesIter<'a> { attrs: slice::Iter<'a, ast::Attribute>, current_list: vec::IntoIter, name: Symbol, @@ -344,7 +381,7 @@ impl<'a> Iterator for ListAttributesIter<'a> { } } -pub trait AttributesExt { +crate trait AttributesExt { /// Finds an attribute as List and returns the list of attributes nested inside. fn lists(&self, name: Symbol) -> ListAttributesIter<'_>; } @@ -355,7 +392,7 @@ impl AttributesExt for [ast::Attribute] { } } -pub trait NestedAttributesExt { +crate trait NestedAttributesExt { /// Returns `true` if the attribute list contains a specific `Word` fn has_word(self, word: Symbol) -> bool; } @@ -375,20 +412,20 @@ impl> NestedAttributesExt for I { /// information can be given when a doctest fails. Sugared doc comments and "raw" doc comments are /// kept separate because of issue #42760. #[derive(Clone, PartialEq, Eq, Debug, Hash)] -pub struct DocFragment { - pub line: usize, - pub span: rustc_span::Span, +crate struct DocFragment { + crate line: usize, + crate span: rustc_span::Span, /// The module this doc-comment came from. /// /// This allows distinguishing between the original documentation and a pub re-export. /// If it is `None`, the item was not re-exported. - pub parent_module: Option, - pub doc: String, - pub kind: DocFragmentKind, + crate parent_module: Option, + crate doc: String, + crate kind: DocFragmentKind, } #[derive(Clone, PartialEq, Eq, Debug, Hash)] -pub enum DocFragmentKind { +crate enum DocFragmentKind { /// A doc fragment created from a `///` or `//!` doc comment. SugaredDoc, /// A doc fragment created from a "raw" `#[doc=""]` attribute. @@ -414,21 +451,21 @@ impl<'a> FromIterator<&'a DocFragment> for String { } #[derive(Clone, Debug, Default)] -pub struct Attributes { - pub doc_strings: Vec, - pub other_attrs: Vec, - pub cfg: Option>, - pub span: Option, +crate struct Attributes { + crate doc_strings: Vec, + crate other_attrs: Vec, + crate cfg: Option>, + crate span: Option, /// map from Rust paths to resolved defs and potential URL fragments - pub links: Vec, - pub inner_docs: bool, + crate links: Vec, + crate inner_docs: bool, } #[derive(Clone, Debug, Default, PartialEq, Eq, Hash)] /// A link that has not yet been rendered. /// /// This link will be turned into a rendered link by [`Attributes::links`] -pub struct ItemLink { +crate struct ItemLink { /// The original link written in the markdown pub(crate) link: String, /// The link text displayed in the HTML. @@ -454,7 +491,7 @@ pub struct RenderedLink { impl Attributes { /// Extracts the content from an attribute `#[doc(cfg(content))]`. - pub fn extract_cfg(mi: &ast::MetaItem) -> Option<&ast::MetaItem> { + crate fn extract_cfg(mi: &ast::MetaItem) -> Option<&ast::MetaItem> { use rustc_ast::NestedMetaItem::MetaItem; if let ast::MetaItemKind::List(ref nmis) = mi.kind { @@ -479,7 +516,7 @@ impl Attributes { /// Reads a `MetaItem` from within an attribute, looks for whether it is a /// `#[doc(include="file")]`, and returns the filename and contents of the file as loaded from /// its expansion. - pub fn extract_include(mi: &ast::MetaItem) -> Option<(String, String)> { + crate fn extract_include(mi: &ast::MetaItem) -> Option<(String, String)> { mi.meta_item_list().and_then(|list| { for meta in list { if meta.has_name(sym::include) { @@ -515,7 +552,7 @@ impl Attributes { }) } - pub fn has_doc_flag(&self, flag: Symbol) -> bool { + crate fn has_doc_flag(&self, flag: Symbol) -> bool { for attr in &self.other_attrs { if !attr.has_name(sym::doc) { continue; @@ -531,7 +568,7 @@ impl Attributes { false } - pub fn from_ast( + crate fn from_ast( diagnostic: &::rustc_errors::Handler, attrs: &[ast::Attribute], additional_attrs: Option<(&[ast::Attribute], DefId)>, @@ -635,20 +672,20 @@ impl Attributes { /// Finds the `doc` attribute as a NameValue and returns the corresponding /// value found. - pub fn doc_value(&self) -> Option<&str> { + crate fn doc_value(&self) -> Option<&str> { self.doc_strings.first().map(|s| s.doc.as_str()) } /// Finds all `doc` attributes as NameValues and returns their corresponding values, joined /// with newlines. - pub fn collapsed_doc_value(&self) -> Option { + crate fn collapsed_doc_value(&self) -> Option { if !self.doc_strings.is_empty() { Some(self.doc_strings.iter().collect()) } else { None } } /// Gets links as a vector /// /// Cache must be populated before call - pub fn links(&self, krate: &CrateNum) -> Vec { + crate fn links(&self, krate: &CrateNum) -> Vec { use crate::html::format::href; use crate::html::render::CURRENT_DEPTH; @@ -681,7 +718,9 @@ impl Attributes { } Some(&(_, _, ExternalLocation::Remote(ref s))) => s.to_string(), Some(&(_, _, ExternalLocation::Unknown)) | None => String::from( - if UnstableFeatures::from_environment().is_nightly_build() { + // NOTE: intentionally doesn't pass crate name to avoid having + // different primitive links between crates + if UnstableFeatures::from_environment(None).is_nightly_build() { "https://doc.rust-lang.org/nightly" } else { "https://doc.rust-lang.org" @@ -710,7 +749,7 @@ impl Attributes { .collect() } - pub fn get_doc_aliases(&self) -> FxHashSet { + crate fn get_doc_aliases(&self) -> FxHashSet { self.other_attrs .lists(sym::doc) .filter(|a| a.has_name(sym::alias)) @@ -755,13 +794,13 @@ impl AttributesExt for Attributes { } #[derive(Clone, PartialEq, Eq, Debug, Hash)] -pub enum GenericBound { +crate enum GenericBound { TraitBound(PolyTrait, hir::TraitBoundModifier), Outlives(Lifetime), } impl GenericBound { - pub fn maybe_sized(cx: &DocContext<'_>) -> GenericBound { + crate fn maybe_sized(cx: &DocContext<'_>) -> GenericBound { let did = cx.tcx.require_lang_item(LangItem::Sized, None); let empty = cx.tcx.intern_substs(&[]); let path = external_path(cx, cx.tcx.item_name(did), Some(did), false, vec![], empty); @@ -775,7 +814,7 @@ impl GenericBound { ) } - pub fn is_sized_bound(&self, cx: &DocContext<'_>) -> bool { + crate fn is_sized_bound(&self, cx: &DocContext<'_>) -> bool { use rustc_hir::TraitBoundModifier as TBM; if let GenericBound::TraitBound(PolyTrait { ref trait_, .. }, TBM::None) = *self { if trait_.def_id() == cx.tcx.lang_items().sized_trait() { @@ -785,14 +824,14 @@ impl GenericBound { false } - pub fn get_poly_trait(&self) -> Option { + crate fn get_poly_trait(&self) -> Option { if let GenericBound::TraitBound(ref p, _) = *self { return Some(p.clone()); } None } - pub fn get_trait_type(&self) -> Option { + crate fn get_trait_type(&self) -> Option { if let GenericBound::TraitBound(PolyTrait { ref trait_, .. }, _) = *self { Some(trait_.clone()) } else { @@ -802,33 +841,33 @@ impl GenericBound { } #[derive(Clone, PartialEq, Eq, Debug, Hash)] -pub struct Lifetime(pub String); +crate struct Lifetime(pub String); impl Lifetime { - pub fn get_ref<'a>(&'a self) -> &'a str { + crate fn get_ref<'a>(&'a self) -> &'a str { let Lifetime(ref s) = *self; let s: &'a str = s; s } - pub fn statik() -> Lifetime { + crate fn statik() -> Lifetime { Lifetime("'static".to_string()) } - pub fn elided() -> Lifetime { + crate fn elided() -> Lifetime { Lifetime("'_".to_string()) } } #[derive(Clone, Debug)] -pub enum WherePredicate { +crate enum WherePredicate { BoundPredicate { ty: Type, bounds: Vec }, RegionPredicate { lifetime: Lifetime, bounds: Vec }, EqPredicate { lhs: Type, rhs: Type }, } impl WherePredicate { - pub fn get_bounds(&self) -> Option<&[GenericBound]> { + crate fn get_bounds(&self) -> Option<&[GenericBound]> { match *self { WherePredicate::BoundPredicate { ref bounds, .. } => Some(bounds), WherePredicate::RegionPredicate { ref bounds, .. } => Some(bounds), @@ -838,7 +877,7 @@ impl WherePredicate { } #[derive(Clone, PartialEq, Eq, Debug, Hash)] -pub enum GenericParamDefKind { +crate enum GenericParamDefKind { Lifetime, Type { did: DefId, @@ -853,7 +892,7 @@ pub enum GenericParamDefKind { } impl GenericParamDefKind { - pub fn is_type(&self) -> bool { + crate fn is_type(&self) -> bool { match *self { GenericParamDefKind::Type { .. } => true, _ => false, @@ -863,7 +902,7 @@ impl GenericParamDefKind { // FIXME(eddyb) this either returns the default of a type parameter, or the // type of a `const` parameter. It seems that the intention is to *visit* // any embedded types, but `get_type` seems to be the wrong name for that. - pub fn get_type(&self) -> Option { + crate fn get_type(&self) -> Option { match self { GenericParamDefKind::Type { default, .. } => default.clone(), GenericParamDefKind::Const { ty, .. } => Some(ty.clone()), @@ -873,28 +912,28 @@ impl GenericParamDefKind { } #[derive(Clone, PartialEq, Eq, Debug, Hash)] -pub struct GenericParamDef { - pub name: String, - pub kind: GenericParamDefKind, +crate struct GenericParamDef { + crate name: String, + crate kind: GenericParamDefKind, } impl GenericParamDef { - pub fn is_synthetic_type_param(&self) -> bool { + crate fn is_synthetic_type_param(&self) -> bool { match self.kind { GenericParamDefKind::Lifetime | GenericParamDefKind::Const { .. } => false, GenericParamDefKind::Type { ref synthetic, .. } => synthetic.is_some(), } } - pub fn is_type(&self) -> bool { + crate fn is_type(&self) -> bool { self.kind.is_type() } - pub fn get_type(&self) -> Option { + crate fn get_type(&self) -> Option { self.kind.get_type() } - pub fn get_bounds(&self) -> Option<&[GenericBound]> { + crate fn get_bounds(&self) -> Option<&[GenericBound]> { match self.kind { GenericParamDefKind::Type { ref bounds, .. } => Some(bounds), _ => None, @@ -904,49 +943,30 @@ impl GenericParamDef { // maybe use a Generic enum and use Vec? #[derive(Clone, Debug, Default)] -pub struct Generics { - pub params: Vec, - pub where_predicates: Vec, +crate struct Generics { + crate params: Vec, + crate where_predicates: Vec, } #[derive(Clone, Debug)] -pub struct Method { - pub generics: Generics, - pub decl: FnDecl, - pub header: hir::FnHeader, - pub defaultness: Option, - pub all_types: Vec<(Type, TypeKind)>, - pub ret_types: Vec<(Type, TypeKind)>, -} - -#[derive(Clone, Debug)] -pub struct TyMethod { - pub header: hir::FnHeader, - pub decl: FnDecl, - pub generics: Generics, - pub all_types: Vec<(Type, TypeKind)>, - pub ret_types: Vec<(Type, TypeKind)>, -} - -#[derive(Clone, Debug)] -pub struct Function { - pub decl: FnDecl, - pub generics: Generics, - pub header: hir::FnHeader, - pub all_types: Vec<(Type, TypeKind)>, - pub ret_types: Vec<(Type, TypeKind)>, +crate struct Function { + crate decl: FnDecl, + crate generics: Generics, + crate header: hir::FnHeader, + crate all_types: Vec<(Type, TypeKind)>, + crate ret_types: Vec<(Type, TypeKind)>, } #[derive(Clone, PartialEq, Eq, Debug, Hash)] -pub struct FnDecl { - pub inputs: Arguments, - pub output: FnRetTy, - pub c_variadic: bool, - pub attrs: Attributes, +crate struct FnDecl { + crate inputs: Arguments, + crate output: FnRetTy, + crate c_variadic: bool, + crate attrs: Attributes, } impl FnDecl { - pub fn self_type(&self) -> Option { + crate fn self_type(&self) -> Option { self.inputs.values.get(0).and_then(|v| v.to_self()) } @@ -959,7 +979,7 @@ impl FnDecl { /// /// This function will panic if the return type does not match the expected sugaring for async /// functions. - pub fn sugared_async_return_type(&self) -> FnRetTy { + crate fn sugared_async_return_type(&self) -> FnRetTy { match &self.output { FnRetTy::Return(Type::ImplTrait(bounds)) => match &bounds[0] { GenericBound::TraitBound(PolyTrait { trait_, .. }, ..) => { @@ -974,25 +994,25 @@ impl FnDecl { } #[derive(Clone, PartialEq, Eq, Debug, Hash)] -pub struct Arguments { - pub values: Vec, +crate struct Arguments { + crate values: Vec, } #[derive(Clone, PartialEq, Eq, Debug, Hash)] -pub struct Argument { - pub type_: Type, - pub name: String, +crate struct Argument { + crate type_: Type, + crate name: String, } #[derive(Clone, PartialEq, Debug)] -pub enum SelfTy { +crate enum SelfTy { SelfValue, SelfBorrowed(Option, Mutability), SelfExplicit(Type), } impl Argument { - pub fn to_self(&self) -> Option { + crate fn to_self(&self) -> Option { if self.name != "self" { return None; } @@ -1009,7 +1029,7 @@ impl Argument { } #[derive(Clone, PartialEq, Eq, Debug, Hash)] -pub enum FnRetTy { +crate enum FnRetTy { Return(Type), DefaultReturn, } @@ -1024,34 +1044,33 @@ impl GetDefId for FnRetTy { } #[derive(Clone, Debug)] -pub struct Trait { - pub auto: bool, - pub unsafety: hir::Unsafety, - pub items: Vec, - pub generics: Generics, - pub bounds: Vec, - pub is_spotlight: bool, - pub is_auto: bool, +crate struct Trait { + crate unsafety: hir::Unsafety, + crate items: Vec, + crate generics: Generics, + crate bounds: Vec, + crate is_spotlight: bool, + crate is_auto: bool, } #[derive(Clone, Debug)] -pub struct TraitAlias { - pub generics: Generics, - pub bounds: Vec, +crate struct TraitAlias { + crate generics: Generics, + crate bounds: Vec, } /// A trait reference, which may have higher ranked lifetimes. #[derive(Clone, PartialEq, Eq, Debug, Hash)] -pub struct PolyTrait { - pub trait_: Type, - pub generic_params: Vec, +crate struct PolyTrait { + crate trait_: Type, + crate generic_params: Vec, } /// A representation of a type suitable for hyperlinking purposes. Ideally, one can get the original /// type out of the AST/`TyCtxt` given one of these, if more information is needed. Most /// importantly, it does not preserve mutability or boxes. #[derive(Clone, PartialEq, Eq, Debug, Hash)] -pub enum Type { +crate enum Type { /// Structs/enums/traits (most that would be an `hir::TyKind::Path`). ResolvedPath { path: Path, @@ -1094,7 +1113,7 @@ pub enum Type { } #[derive(Clone, PartialEq, Eq, Hash, Copy, Debug)] -pub enum PrimitiveType { +crate enum PrimitiveType { Isize, I8, I16, @@ -1123,7 +1142,7 @@ pub enum PrimitiveType { } #[derive(Clone, PartialEq, Eq, Hash, Copy, Debug)] -pub enum TypeKind { +crate enum TypeKind { Enum, Function, Module, @@ -1140,7 +1159,7 @@ pub enum TypeKind { TraitAlias, } -pub trait GetDefId { +crate trait GetDefId { fn def_id(&self) -> Option; } @@ -1151,7 +1170,7 @@ impl GetDefId for Option { } impl Type { - pub fn primitive_type(&self) -> Option { + crate fn primitive_type(&self) -> Option { match *self { Primitive(p) | BorrowedRef { type_: box Primitive(p), .. } => Some(p), Slice(..) | BorrowedRef { type_: box Slice(..), .. } => Some(PrimitiveType::Slice), @@ -1171,21 +1190,21 @@ impl Type { } } - pub fn is_generic(&self) -> bool { + crate fn is_generic(&self) -> bool { match *self { ResolvedPath { is_generic, .. } => is_generic, _ => false, } } - pub fn is_self_type(&self) -> bool { + crate fn is_self_type(&self) -> bool { match *self { Generic(ref name) => name == "Self", _ => false, } } - pub fn generics(&self) -> Option> { + crate fn generics(&self) -> Option> { match *self { ResolvedPath { ref path, .. } => path.segments.last().and_then(|seg| { if let GenericArgs::AngleBracketed { ref args, .. } = seg.args { @@ -1205,7 +1224,7 @@ impl Type { } } - pub fn bindings(&self) -> Option<&[TypeBinding]> { + crate fn bindings(&self) -> Option<&[TypeBinding]> { match *self { ResolvedPath { ref path, .. } => path.segments.last().and_then(|seg| { if let GenericArgs::AngleBracketed { ref bindings, .. } = seg.args { @@ -1218,14 +1237,14 @@ impl Type { } } - pub fn is_full_generic(&self) -> bool { + crate fn is_full_generic(&self) -> bool { match *self { Type::Generic(_) => true, _ => false, } } - pub fn projection(&self) -> Option<(&Type, DefId, &str)> { + crate fn projection(&self) -> Option<(&Type, DefId, &str)> { let (self_, trait_, name) = match self { QPath { ref self_type, ref trait_, ref name } => (self_type, trait_, name), _ => return None, @@ -1266,7 +1285,7 @@ impl GetDefId for Type { } impl PrimitiveType { - pub fn from_hir(prim: hir::PrimTy) -> PrimitiveType { + crate fn from_hir(prim: hir::PrimTy) -> PrimitiveType { match prim { hir::PrimTy::Int(IntTy::Isize) => PrimitiveType::Isize, hir::PrimTy::Int(IntTy::I8) => PrimitiveType::I8, @@ -1288,7 +1307,7 @@ impl PrimitiveType { } } - pub fn from_symbol(s: Symbol) -> Option { + crate fn from_symbol(s: Symbol) -> Option { match s { sym::isize => Some(PrimitiveType::Isize), sym::i8 => Some(PrimitiveType::I8), @@ -1319,7 +1338,7 @@ impl PrimitiveType { } } - pub fn as_str(&self) -> &'static str { + crate fn as_str(&self) -> &'static str { use self::PrimitiveType::*; match *self { Isize => "isize", @@ -1350,11 +1369,11 @@ impl PrimitiveType { } } - pub fn impls(&self, tcx: TyCtxt<'_>) -> &'static SmallVec<[DefId; 4]> { + crate fn impls(&self, tcx: TyCtxt<'_>) -> &'static SmallVec<[DefId; 4]> { Self::all_impls(tcx).get(self).expect("missing impl for primitive type") } - pub fn all_impls(tcx: TyCtxt<'_>) -> &'static FxHashMap> { + crate fn all_impls(tcx: TyCtxt<'_>) -> &'static FxHashMap> { static CELL: OnceCell>> = OnceCell::new(); CELL.get_or_init(move || { @@ -1430,7 +1449,7 @@ impl PrimitiveType { }) } - pub fn to_url_str(&self) -> &'static str { + crate fn to_url_str(&self) -> &'static str { self.as_str() } } @@ -1483,72 +1502,77 @@ impl From for PrimitiveType { } } -#[derive(Clone, PartialEq, Eq, Debug)] -pub enum Visibility { +#[derive(Clone, Debug)] +crate enum Visibility { Public, Inherited, - Crate, - Restricted(DefId, Path), + Restricted(DefId, rustc_hir::definitions::DefPath), +} + +impl Visibility { + crate fn is_public(&self) -> bool { + matches!(self, Visibility::Public) + } } #[derive(Clone, Debug)] -pub struct Struct { - pub struct_type: doctree::StructType, - pub generics: Generics, - pub fields: Vec, - pub fields_stripped: bool, +crate struct Struct { + crate struct_type: doctree::StructType, + crate generics: Generics, + crate fields: Vec, + crate fields_stripped: bool, } #[derive(Clone, Debug)] -pub struct Union { - pub struct_type: doctree::StructType, - pub generics: Generics, - pub fields: Vec, - pub fields_stripped: bool, +crate struct Union { + crate struct_type: doctree::StructType, + crate generics: Generics, + crate fields: Vec, + crate fields_stripped: bool, } /// This is a more limited form of the standard Struct, different in that /// it lacks the things most items have (name, id, parameterization). Found /// only as a variant in an enum. #[derive(Clone, Debug)] -pub struct VariantStruct { - pub struct_type: doctree::StructType, - pub fields: Vec, - pub fields_stripped: bool, +crate struct VariantStruct { + crate struct_type: doctree::StructType, + crate fields: Vec, + crate fields_stripped: bool, } #[derive(Clone, Debug)] -pub struct Enum { - pub variants: IndexVec, - pub generics: Generics, - pub variants_stripped: bool, +crate struct Enum { + crate variants: IndexVec, + crate generics: Generics, + crate variants_stripped: bool, } #[derive(Clone, Debug)] -pub struct Variant { - pub kind: VariantKind, +crate struct Variant { + crate kind: VariantKind, } #[derive(Clone, Debug)] -pub enum VariantKind { +crate enum VariantKind { CLike, Tuple(Vec), Struct(VariantStruct), } #[derive(Clone, Debug)] -pub struct Span { - pub filename: FileName, - pub cnum: CrateNum, - pub loline: usize, - pub locol: usize, - pub hiline: usize, - pub hicol: usize, - pub original: rustc_span::Span, +crate struct Span { + crate filename: FileName, + crate cnum: CrateNum, + crate loline: usize, + crate locol: usize, + crate hiline: usize, + crate hicol: usize, + crate original: rustc_span::Span, } impl Span { - pub fn empty() -> Span { + crate fn empty() -> Span { Span { filename: FileName::Anon(0), cnum: LOCAL_CRATE, @@ -1560,49 +1584,49 @@ impl Span { } } - pub fn span(&self) -> rustc_span::Span { + crate fn span(&self) -> rustc_span::Span { self.original } } #[derive(Clone, PartialEq, Eq, Debug, Hash)] -pub struct Path { - pub global: bool, - pub res: Res, - pub segments: Vec, +crate struct Path { + crate global: bool, + crate res: Res, + crate segments: Vec, } impl Path { - pub fn last_name(&self) -> &str { + crate fn last_name(&self) -> &str { self.segments.last().expect("segments were empty").name.as_str() } } #[derive(Clone, PartialEq, Eq, Debug, Hash)] -pub enum GenericArg { +crate enum GenericArg { Lifetime(Lifetime), Type(Type), Const(Constant), } #[derive(Clone, PartialEq, Eq, Debug, Hash)] -pub enum GenericArgs { +crate enum GenericArgs { AngleBracketed { args: Vec, bindings: Vec }, Parenthesized { inputs: Vec, output: Option }, } #[derive(Clone, PartialEq, Eq, Debug, Hash)] -pub struct PathSegment { - pub name: String, - pub args: GenericArgs, +crate struct PathSegment { + crate name: String, + crate args: GenericArgs, } #[derive(Clone, Debug)] -pub struct Typedef { - pub type_: Type, - pub generics: Generics, +crate struct Typedef { + crate type_: Type, + crate generics: Generics, // Type of target item. - pub item_type: Option, + crate item_type: Option, } impl GetDefId for Typedef { @@ -1612,75 +1636,75 @@ impl GetDefId for Typedef { } #[derive(Clone, Debug)] -pub struct OpaqueTy { - pub bounds: Vec, - pub generics: Generics, +crate struct OpaqueTy { + crate bounds: Vec, + crate generics: Generics, } #[derive(Clone, PartialEq, Eq, Debug, Hash)] -pub struct BareFunctionDecl { - pub unsafety: hir::Unsafety, - pub generic_params: Vec, - pub decl: FnDecl, - pub abi: Abi, +crate struct BareFunctionDecl { + crate unsafety: hir::Unsafety, + crate generic_params: Vec, + crate decl: FnDecl, + crate abi: Abi, } #[derive(Clone, Debug)] -pub struct Static { - pub type_: Type, - pub mutability: Mutability, +crate struct Static { + crate type_: Type, + crate mutability: Mutability, /// It's useful to have the value of a static documented, but I have no /// desire to represent expressions (that'd basically be all of the AST, /// which is huge!). So, have a string. - pub expr: String, + crate expr: String, } #[derive(Clone, PartialEq, Eq, Hash, Debug)] -pub struct Constant { - pub type_: Type, - pub expr: String, - pub value: Option, - pub is_literal: bool, +crate struct Constant { + crate type_: Type, + crate expr: String, + crate value: Option, + crate is_literal: bool, } #[derive(Clone, PartialEq, Debug)] -pub enum ImplPolarity { +crate enum ImplPolarity { Positive, Negative, } #[derive(Clone, Debug)] -pub struct Impl { - pub unsafety: hir::Unsafety, - pub generics: Generics, - pub provided_trait_methods: FxHashSet, - pub trait_: Option, - pub for_: Type, - pub items: Vec, - pub polarity: Option, - pub synthetic: bool, - pub blanket_impl: Option, +crate struct Impl { + crate unsafety: hir::Unsafety, + crate generics: Generics, + crate provided_trait_methods: FxHashSet, + crate trait_: Option, + crate for_: Type, + crate items: Vec, + crate polarity: Option, + crate synthetic: bool, + crate blanket_impl: Option, } #[derive(Clone, Debug)] -pub struct Import { - pub kind: ImportKind, - pub source: ImportSource, - pub should_be_displayed: bool, +crate struct Import { + crate kind: ImportKind, + crate source: ImportSource, + crate should_be_displayed: bool, } impl Import { - pub fn new_simple(name: String, source: ImportSource, should_be_displayed: bool) -> Self { + crate fn new_simple(name: String, source: ImportSource, should_be_displayed: bool) -> Self { Self { kind: ImportKind::Simple(name), source, should_be_displayed } } - pub fn new_glob(source: ImportSource, should_be_displayed: bool) -> Self { + crate fn new_glob(source: ImportSource, should_be_displayed: bool) -> Self { Self { kind: ImportKind::Glob, source, should_be_displayed } } } #[derive(Clone, Debug)] -pub enum ImportKind { +crate enum ImportKind { // use source as str; Simple(String), // use source::*; @@ -1688,46 +1712,46 @@ pub enum ImportKind { } #[derive(Clone, Debug)] -pub struct ImportSource { - pub path: Path, - pub did: Option, +crate struct ImportSource { + crate path: Path, + crate did: Option, } #[derive(Clone, Debug)] -pub struct Macro { - pub source: String, - pub imported_from: Option, +crate struct Macro { + crate source: String, + crate imported_from: Option, } #[derive(Clone, Debug)] -pub struct ProcMacro { - pub kind: MacroKind, - pub helpers: Vec, +crate struct ProcMacro { + crate kind: MacroKind, + crate helpers: Vec, } #[derive(Clone, Debug)] -pub struct Deprecation { - pub since: Option, - pub note: Option, - pub is_since_rustc_version: bool, +crate struct Deprecation { + crate since: Option, + crate note: Option, + crate is_since_rustc_version: bool, } /// An type binding on an associated type (e.g., `A = Bar` in `Foo` or /// `A: Send + Sync` in `Foo`). #[derive(Clone, PartialEq, Eq, Debug, Hash)] -pub struct TypeBinding { - pub name: String, - pub kind: TypeBindingKind, +crate struct TypeBinding { + crate name: String, + crate kind: TypeBindingKind, } #[derive(Clone, PartialEq, Eq, Debug, Hash)] -pub enum TypeBindingKind { +crate enum TypeBindingKind { Equality { ty: Type }, Constraint { bounds: Vec }, } impl TypeBinding { - pub fn ty(&self) -> &Type { + crate fn ty(&self) -> &Type { match self.kind { TypeBindingKind::Equality { ref ty } => ty, _ => panic!("expected equality type binding for parenthesized generic args"), diff --git a/src/librustdoc/clean/utils.rs b/src/librustdoc/clean/utils.rs index f6258221e32..22917fbceb4 100644 --- a/src/librustdoc/clean/utils.rs +++ b/src/librustdoc/clean/utils.rs @@ -2,7 +2,7 @@ use crate::clean::auto_trait::AutoTraitFinder; use crate::clean::blanket_impl::BlanketImplFinder; use crate::clean::{ inline, Clean, Crate, Deprecation, ExternalCrate, FnDecl, FnRetTy, Generic, GenericArg, - GenericArgs, GenericBound, Generics, GetDefId, ImportSource, Item, ItemEnum, Lifetime, + GenericArgs, GenericBound, Generics, GetDefId, ImportSource, Item, ItemKind, Lifetime, MacroKind, Path, PathSegment, Primitive, PrimitiveType, ResolvedPath, Span, Type, TypeBinding, TypeKind, Visibility, WherePredicate, }; @@ -20,7 +20,7 @@ use rustc_middle::ty::{self, DefIdTree, Ty}; use rustc_span::symbol::{kw, sym, Symbol}; use std::mem; -pub fn krate(mut cx: &mut DocContext<'_>) -> Crate { +crate fn krate(mut cx: &mut DocContext<'_>) -> Crate { use crate::visit_lib::LibEmbargoVisitor; let krate = cx.tcx.hir().krate(); @@ -44,8 +44,8 @@ pub fn krate(mut cx: &mut DocContext<'_>) -> Crate { let mut module = module.clean(cx); let mut masked_crates = FxHashSet::default(); - match module.inner { - ItemEnum::ModuleItem(ref module) => { + match module.kind { + ItemKind::ModuleItem(ref module) => { for it in &module.items { // `compiler_builtins` should be masked too, but we can't apply // `#[doc(masked)]` to the injected `extern crate` because it's unstable. @@ -62,8 +62,8 @@ pub fn krate(mut cx: &mut DocContext<'_>) -> Crate { let ExternalCrate { name, src, primitives, keywords, .. } = LOCAL_CRATE.clean(cx); { - let m = match module.inner { - ItemEnum::ModuleItem(ref mut m) => m, + let m = match module.kind { + ItemKind::ModuleItem(ref mut m) => m, _ => unreachable!(), }; m.items.extend(primitives.iter().map(|&(def_id, prim, ref attrs)| Item { @@ -74,7 +74,7 @@ pub fn krate(mut cx: &mut DocContext<'_>) -> Crate { stability: get_stability(cx, def_id), deprecation: get_deprecation(cx, def_id), def_id, - inner: ItemEnum::PrimitiveItem(prim), + kind: ItemKind::PrimitiveItem(prim), })); m.items.extend(keywords.into_iter().map(|(def_id, kw, attrs)| Item { source: Span::empty(), @@ -84,7 +84,7 @@ pub fn krate(mut cx: &mut DocContext<'_>) -> Crate { stability: get_stability(cx, def_id), deprecation: get_deprecation(cx, def_id), def_id, - inner: ItemEnum::KeywordItem(kw), + kind: ItemKind::KeywordItem(kw), })); } @@ -102,11 +102,11 @@ pub fn krate(mut cx: &mut DocContext<'_>) -> Crate { } // extract the stability index for a node from tcx, if possible -pub fn get_stability(cx: &DocContext<'_>, def_id: DefId) -> Option { +crate fn get_stability(cx: &DocContext<'_>, def_id: DefId) -> Option { cx.tcx.lookup_stability(def_id).cloned() } -pub fn get_deprecation(cx: &DocContext<'_>, def_id: DefId) -> Option { +crate fn get_deprecation(cx: &DocContext<'_>, def_id: DefId) -> Option { cx.tcx.lookup_deprecation(def_id).clean(cx) } @@ -183,7 +183,7 @@ pub(super) fn external_path( /// i.e. `[T, U]` when you have the following bounds: `T: Display, U: Option` will return /// `[Display, Option]` (we just returns the list of the types, we don't care about the /// wrapped types in here). -pub fn get_real_types( +crate fn get_real_types( generics: &Generics, arg: &Type, cx: &DocContext<'_>, @@ -261,7 +261,7 @@ pub fn get_real_types( /// /// i.e. `fn foo>(x: u32, y: B)` will return /// `[u32, Display, Option]`. -pub fn get_all_types( +crate fn get_all_types( generics: &Generics, decl: &FnDecl, cx: &DocContext<'_>, @@ -296,7 +296,7 @@ pub fn get_all_types( (all_types.into_iter().collect(), ret_types) } -pub fn strip_type(ty: Type) -> Type { +crate fn strip_type(ty: Type) -> Type { match ty { Type::ResolvedPath { path, param_names, did, is_generic } => { Type::ResolvedPath { path: strip_path(&path), param_names, did, is_generic } @@ -319,7 +319,7 @@ pub fn strip_type(ty: Type) -> Type { } } -pub fn strip_path(path: &Path) -> Path { +crate fn strip_path(path: &Path) -> Path { let segments = path .segments .iter() @@ -332,7 +332,7 @@ pub fn strip_path(path: &Path) -> Path { Path { global: path.global, res: path.res, segments } } -pub fn qpath_to_string(p: &hir::QPath<'_>) -> String { +crate fn qpath_to_string(p: &hir::QPath<'_>) -> String { let segments = match *p { hir::QPath::Resolved(_, ref path) => &path.segments, hir::QPath::TypeRelative(_, ref segment) => return segment.ident.to_string(), @@ -351,12 +351,12 @@ pub fn qpath_to_string(p: &hir::QPath<'_>) -> String { s } -pub fn build_deref_target_impls(cx: &DocContext<'_>, items: &[Item], ret: &mut Vec) { +crate fn build_deref_target_impls(cx: &DocContext<'_>, items: &[Item], ret: &mut Vec) { let tcx = cx.tcx; for item in items { - let target = match item.inner { - ItemEnum::TypedefItem(ref t, true) => &t.type_, + let target = match item.kind { + ItemKind::TypedefItem(ref t, true) => &t.type_, _ => continue, }; let primitive = match *target { @@ -378,7 +378,7 @@ pub fn build_deref_target_impls(cx: &DocContext<'_>, items: &[Item], ret: &mut V } } -pub trait ToSource { +crate trait ToSource { fn to_src(&self, cx: &DocContext<'_>) -> String; } @@ -394,7 +394,7 @@ impl ToSource for rustc_span::Span { } } -pub fn name_from_pat(p: &hir::Pat<'_>) -> String { +crate fn name_from_pat(p: &hir::Pat<'_>) -> String { use rustc_hir::*; debug!("trying to get a name from pattern: {:?}", p); @@ -440,7 +440,7 @@ pub fn name_from_pat(p: &hir::Pat<'_>) -> String { } } -pub fn print_const(cx: &DocContext<'_>, n: &'tcx ty::Const<'_>) -> String { +crate fn print_const(cx: &DocContext<'_>, n: &'tcx ty::Const<'_>) -> String { match n.val { ty::ConstKind::Unevaluated(def, _, promoted) => { let mut s = if let Some(def) = def.as_local() { @@ -470,7 +470,7 @@ pub fn print_const(cx: &DocContext<'_>, n: &'tcx ty::Const<'_>) -> String { } } -pub fn print_evaluated_const(cx: &DocContext<'_>, def_id: DefId) -> Option { +crate fn print_evaluated_const(cx: &DocContext<'_>, def_id: DefId) -> Option { cx.tcx.const_eval_poly(def_id).ok().and_then(|val| { let ty = cx.tcx.type_of(def_id); match (val, ty.kind()) { @@ -518,7 +518,7 @@ fn print_const_with_custom_print_scalar(cx: &DocContext<'_>, ct: &'tcx ty::Const } } -pub fn is_literal_expr(cx: &DocContext<'_>, hir_id: hir::HirId) -> bool { +crate fn is_literal_expr(cx: &DocContext<'_>, hir_id: hir::HirId) -> bool { if let hir::Node::Expr(expr) = cx.tcx.hir().get(hir_id) { if let hir::ExprKind::Lit(_) = &expr.kind { return true; @@ -534,7 +534,7 @@ pub fn is_literal_expr(cx: &DocContext<'_>, hir_id: hir::HirId) -> bool { false } -pub fn print_const_expr(cx: &DocContext<'_>, body: hir::BodyId) -> String { +crate fn print_const_expr(cx: &DocContext<'_>, body: hir::BodyId) -> String { let value = &cx.tcx.hir().body(body).value; let snippet = if !value.span.from_expansion() { @@ -547,7 +547,7 @@ pub fn print_const_expr(cx: &DocContext<'_>, body: hir::BodyId) -> String { } /// Given a type Path, resolve it to a Type using the TyCtxt -pub fn resolve_type(cx: &DocContext<'_>, path: Path, id: hir::HirId) -> Type { +crate fn resolve_type(cx: &DocContext<'_>, path: Path, id: hir::HirId) -> Type { debug!("resolve_type({:?},{:?})", path, id); let is_generic = match path.res { @@ -565,7 +565,7 @@ pub fn resolve_type(cx: &DocContext<'_>, path: Path, id: hir::HirId) -> Type { ResolvedPath { path, param_names: None, did, is_generic } } -pub fn get_auto_trait_and_blanket_impls( +crate fn get_auto_trait_and_blanket_impls( cx: &DocContext<'tcx>, ty: Ty<'tcx>, param_env_def_id: DefId, @@ -576,7 +576,7 @@ pub fn get_auto_trait_and_blanket_impls( .chain(BlanketImplFinder::new(cx).get_blanket_impls(ty, param_env_def_id)) } -pub fn register_res(cx: &DocContext<'_>, res: Res) -> DefId { +crate fn register_res(cx: &DocContext<'_>, res: Res) -> DefId { debug!("register_res({:?})", res); let (did, kind) = match res { @@ -616,14 +616,14 @@ pub fn register_res(cx: &DocContext<'_>, res: Res) -> DefId { did } -pub fn resolve_use_source(cx: &DocContext<'_>, path: Path) -> ImportSource { +crate fn resolve_use_source(cx: &DocContext<'_>, path: Path) -> ImportSource { ImportSource { did: if path.res.opt_def_id().is_none() { None } else { Some(register_res(cx, path.res)) }, path, } } -pub fn enter_impl_trait(cx: &DocContext<'_>, f: F) -> R +crate fn enter_impl_trait(cx: &DocContext<'_>, f: F) -> R where F: FnOnce() -> R, { diff --git a/src/librustdoc/config.rs b/src/librustdoc/config.rs index 02885f51936..e60970af0d3 100644 --- a/src/librustdoc/config.rs +++ b/src/librustdoc/config.rs @@ -30,13 +30,13 @@ use crate::passes::{self, Condition, DefaultPassOption}; use crate::theme; #[derive(Clone, Copy, PartialEq, Eq, Debug)] -pub enum OutputFormat { +crate enum OutputFormat { Json, Html, } impl OutputFormat { - pub fn is_json(&self) -> bool { + crate fn is_json(&self) -> bool { match self { OutputFormat::Json => true, _ => false, @@ -58,93 +58,96 @@ impl TryFrom<&str> for OutputFormat { /// Configuration options for rustdoc. #[derive(Clone)] -pub struct Options { +crate struct Options { // Basic options / Options passed directly to rustc /// The crate root or Markdown file to load. - pub input: PathBuf, + crate input: PathBuf, /// The name of the crate being documented. - pub crate_name: Option, + crate crate_name: Option, /// Whether or not this is a proc-macro crate - pub proc_macro_crate: bool, + crate proc_macro_crate: bool, /// How to format errors and warnings. - pub error_format: ErrorOutputType, + crate error_format: ErrorOutputType, /// Library search paths to hand to the compiler. - pub libs: Vec, + crate libs: Vec, /// Library search paths strings to hand to the compiler. - pub lib_strs: Vec, + crate lib_strs: Vec, /// The list of external crates to link against. - pub externs: Externs, + crate externs: Externs, /// The list of external crates strings to link against. - pub extern_strs: Vec, + crate extern_strs: Vec, /// List of `cfg` flags to hand to the compiler. Always includes `rustdoc`. - pub cfgs: Vec, + crate cfgs: Vec, /// Codegen options to hand to the compiler. - pub codegen_options: CodegenOptions, + crate codegen_options: CodegenOptions, /// Codegen options strings to hand to the compiler. - pub codegen_options_strs: Vec, + crate codegen_options_strs: Vec, /// Debugging (`-Z`) options to pass to the compiler. - pub debugging_opts: DebuggingOptions, + crate debugging_opts: DebuggingOptions, /// Debugging (`-Z`) options strings to pass to the compiler. - pub debugging_opts_strs: Vec, + crate debugging_opts_strs: Vec, /// The target used to compile the crate against. - pub target: TargetTriple, + crate target: TargetTriple, /// Edition used when reading the crate. Defaults to "2015". Also used by default when /// compiling doctests from the crate. - pub edition: Edition, + crate edition: Edition, /// The path to the sysroot. Used during the compilation process. - pub maybe_sysroot: Option, + crate maybe_sysroot: Option, /// Lint information passed over the command-line. - pub lint_opts: Vec<(String, Level)>, + crate lint_opts: Vec<(String, Level)>, /// Whether to ask rustc to describe the lints it knows. Practically speaking, this will not be /// used, since we abort if we have no input file, but it's included for completeness. - pub describe_lints: bool, + crate describe_lints: bool, /// What level to cap lints at. - pub lint_cap: Option, + crate lint_cap: Option, // Options specific to running doctests /// Whether we should run doctests instead of generating docs. - pub should_test: bool, + crate should_test: bool, /// List of arguments to pass to the test harness, if running tests. - pub test_args: Vec, + crate test_args: Vec, /// Optional path to persist the doctest executables to, defaults to a /// temporary directory if not set. - pub persist_doctests: Option, + crate persist_doctests: Option, /// Runtool to run doctests with - pub runtool: Option, + crate runtool: Option, /// Arguments to pass to the runtool - pub runtool_args: Vec, + crate runtool_args: Vec, /// Whether to allow ignoring doctests on a per-target basis /// For example, using ignore-foo to ignore running the doctest on any target that /// contains "foo" as a substring - pub enable_per_target_ignores: bool, + crate enable_per_target_ignores: bool, /// The path to a rustc-like binary to build tests with. If not set, we /// default to loading from $sysroot/bin/rustc. - pub test_builder: Option, + crate test_builder: Option, // Options that affect the documentation process /// The selected default set of passes to use. /// /// Be aware: This option can come both from the CLI and from crate attributes! - pub default_passes: DefaultPassOption, + crate default_passes: DefaultPassOption, /// Any passes manually selected by the user. /// /// Be aware: This option can come both from the CLI and from crate attributes! - pub manual_passes: Vec, + crate manual_passes: Vec, /// Whether to display warnings during doc generation or while gathering doctests. By default, /// all non-rustdoc-specific lints are allowed when generating docs. - pub display_warnings: bool, + crate display_warnings: bool, /// Whether to run the `calculate-doc-coverage` pass, which counts the number of public items /// with and without documentation. - pub show_coverage: bool, + crate show_coverage: bool, // Options that alter generated documentation pages /// Crate version to note on the sidebar of generated docs. - pub crate_version: Option, + crate crate_version: Option, /// Collected options specific to outputting final pages. - pub render_options: RenderOptions, + crate render_options: RenderOptions, /// Output format rendering (used only for "show-coverage" option for the moment) - pub output_format: Option, + crate output_format: Option, + /// If this option is set to `true`, rustdoc will only run checks and not generate + /// documentation. + crate run_check: bool, } impl fmt::Debug for Options { @@ -185,94 +188,96 @@ impl fmt::Debug for Options { .field("runtool", &self.runtool) .field("runtool_args", &self.runtool_args) .field("enable-per-target-ignores", &self.enable_per_target_ignores) + .field("run_check", &self.run_check) .finish() } } /// Configuration options for the HTML page-creation process. #[derive(Clone, Debug)] -pub struct RenderOptions { +crate struct RenderOptions { /// Output directory to generate docs into. Defaults to `doc`. - pub output: PathBuf, + crate output: PathBuf, /// External files to insert into generated pages. - pub external_html: ExternalHtml, + crate external_html: ExternalHtml, /// A pre-populated `IdMap` with the default headings and any headings added by Markdown files /// processed by `external_html`. - pub id_map: IdMap, + crate id_map: IdMap, /// If present, playground URL to use in the "Run" button added to code samples. /// /// Be aware: This option can come both from the CLI and from crate attributes! - pub playground_url: Option, + crate playground_url: Option, /// Whether to sort modules alphabetically on a module page instead of using declaration order. /// `true` by default. // // FIXME(misdreavus): the flag name is `--sort-modules-by-appearance` but the meaning is // inverted once read. - pub sort_modules_alphabetically: bool, + crate sort_modules_alphabetically: bool, /// List of themes to extend the docs with. Original argument name is included to assist in /// displaying errors if it fails a theme check. - pub themes: Vec, + crate themes: Vec, /// If present, CSS file that contains rules to add to the default CSS. - pub extension_css: Option, + crate extension_css: Option, /// A map of crate names to the URL to use instead of querying the crate's `html_root_url`. - pub extern_html_root_urls: BTreeMap, + crate extern_html_root_urls: BTreeMap, /// A map of the default settings (values are as for DOM storage API). Keys should lack the /// `rustdoc-` prefix. - pub default_settings: HashMap, + crate default_settings: HashMap, /// If present, suffix added to CSS/JavaScript files when referencing them in generated pages. - pub resource_suffix: String, + crate resource_suffix: String, /// Whether to run the static CSS/JavaScript through a minifier when outputting them. `true` by /// default. // // FIXME(misdreavus): the flag name is `--disable-minification` but the meaning is inverted // once read. - pub enable_minification: bool, + crate enable_minification: bool, /// Whether to create an index page in the root of the output directory. If this is true but /// `enable_index_page` is None, generate a static listing of crates instead. - pub enable_index_page: bool, + crate enable_index_page: bool, /// A file to use as the index page at the root of the output directory. Overrides /// `enable_index_page` to be true if set. - pub index_page: Option, + crate index_page: Option, /// An optional path to use as the location of static files. If not set, uses combinations of /// `../` to reach the documentation root. - pub static_root_path: Option, + crate static_root_path: Option, // Options specific to reading standalone Markdown files /// Whether to generate a table of contents on the output file when reading a standalone /// Markdown file. - pub markdown_no_toc: bool, + crate markdown_no_toc: bool, /// Additional CSS files to link in pages generated from standalone Markdown files. - pub markdown_css: Vec, + crate markdown_css: Vec, /// If present, playground URL to use in the "Run" button added to code samples generated from /// standalone Markdown files. If not present, `playground_url` is used. - pub markdown_playground_url: Option, + crate markdown_playground_url: Option, /// If false, the `select` element to have search filtering by crates on rendered docs /// won't be generated. - pub generate_search_filter: bool, + crate generate_search_filter: bool, /// Document items that have lower than `pub` visibility. - pub document_private: bool, + crate document_private: bool, /// Document items that have `doc(hidden)`. - pub document_hidden: bool, + crate document_hidden: bool, + crate unstable_features: rustc_feature::UnstableFeatures, } /// Temporary storage for data obtained during `RustdocVisitor::clean()`. /// Later on moved into `CACHE_KEY`. #[derive(Default, Clone)] -pub struct RenderInfo { - pub inlined: FxHashSet, - pub external_paths: crate::core::ExternalPaths, - pub exact_paths: FxHashMap>, - pub access_levels: AccessLevels, - pub deref_trait_did: Option, - pub deref_mut_trait_did: Option, - pub owned_box_did: Option, - pub output_format: Option, +crate struct RenderInfo { + crate inlined: FxHashSet, + crate external_paths: crate::core::ExternalPaths, + crate exact_paths: FxHashMap>, + crate access_levels: AccessLevels, + crate deref_trait_did: Option, + crate deref_mut_trait_did: Option, + crate owned_box_did: Option, + crate output_format: Option, } impl Options { /// Parses the given command-line for options. If an error message or other early-return has /// been printed, returns `Err` with the exit code. - pub fn from_matches(matches: &getopts::Matches) -> Result { + crate fn from_matches(matches: &getopts::Matches) -> Result { // Check for unstable options. nightly_options::check_nightly_options(&matches, &opts()); @@ -295,7 +300,7 @@ impl Options { println_condition(p.condition); } - if nightly_options::is_nightly_build() { + if nightly_options::match_is_nightly_build(matches) { println!("\nPasses run with `--show-coverage`:"); for p in passes::COVERAGE_PASSES { print!("{:>20}", p.pass.name); @@ -479,6 +484,7 @@ impl Options { &matches.opt_strs("html-after-content"), &matches.opt_strs("markdown-before-content"), &matches.opt_strs("markdown-after-content"), + nightly_options::match_is_nightly_build(&matches), &diag, &mut id_map, edition, @@ -535,7 +541,9 @@ impl Options { let output_format = match matches.opt_str("output-format") { Some(s) => match OutputFormat::try_from(s.as_str()) { Ok(o) => { - if o.is_json() && !(show_coverage || nightly_options::is_nightly_build()) { + if o.is_json() + && !(show_coverage || nightly_options::match_is_nightly_build(matches)) + { diag.struct_err("json output format isn't supported for doc generation") .emit(); return Err(1); @@ -581,12 +589,12 @@ impl Options { let enable_per_target_ignores = matches.opt_present("enable-per-target-ignores"); let document_private = matches.opt_present("document-private-items"); let document_hidden = matches.opt_present("document-hidden-items"); + let run_check = matches.opt_present("check"); let (lint_opts, describe_lints, lint_cap) = get_cmd_lint_options(matches, error_format); Ok(Options { input, - crate_name, proc_macro_crate, error_format, libs, @@ -616,6 +624,7 @@ impl Options { runtool_args, enable_per_target_ignores, test_builder, + run_check, render_options: RenderOptions { output, external_html, @@ -637,13 +646,17 @@ impl Options { generate_search_filter, document_private, document_hidden, + unstable_features: rustc_feature::UnstableFeatures::from_environment( + crate_name.as_deref(), + ), }, + crate_name, output_format, }) } /// Returns `true` if the file given as `self.input` is a Markdown file. - pub fn markdown_input(&self) -> bool { + crate fn markdown_input(&self) -> bool { self.input.extension().map_or(false, |e| e == "md" || e == "markdown") } } @@ -655,7 +668,8 @@ fn check_deprecated_options(matches: &getopts::Matches, diag: &rustc_errors::Han for flag in deprecated_flags.iter() { if matches.opt_present(flag) { if *flag == "output-format" - && (matches.opt_present("show-coverage") || nightly_options::is_nightly_build()) + && (matches.opt_present("show-coverage") + || nightly_options::match_is_nightly_build(matches)) { continue; } diff --git a/src/librustdoc/core.rs b/src/librustdoc/core.rs index 4cad6418d6a..413f5bdf521 100644 --- a/src/librustdoc/core.rs +++ b/src/librustdoc/core.rs @@ -36,52 +36,51 @@ use crate::config::{Options as RustdocOptions, RenderOptions}; use crate::config::{OutputFormat, RenderInfo}; use crate::passes::{self, Condition::*, ConditionalPass}; -pub use rustc_session::config::{CodegenOptions, DebuggingOptions, Input, Options}; -pub use rustc_session::search_paths::SearchPath; +crate use rustc_session::config::{DebuggingOptions, Input, Options}; -pub type ExternalPaths = FxHashMap, clean::TypeKind)>; +crate type ExternalPaths = FxHashMap, clean::TypeKind)>; -pub struct DocContext<'tcx> { - pub tcx: TyCtxt<'tcx>, - pub resolver: Rc>, +crate struct DocContext<'tcx> { + crate tcx: TyCtxt<'tcx>, + crate resolver: Rc>, /// Later on moved into `CACHE_KEY` - pub renderinfo: RefCell, + crate renderinfo: RefCell, /// Later on moved through `clean::Crate` into `CACHE_KEY` - pub external_traits: Rc>>, + crate external_traits: Rc>>, /// Used while populating `external_traits` to ensure we don't process the same trait twice at /// the same time. - pub active_extern_traits: RefCell>, + crate active_extern_traits: RefCell>, // The current set of type and lifetime substitutions, // for expanding type aliases at the HIR level: /// Table `DefId` of type parameter -> substituted type - pub ty_substs: RefCell>, + crate ty_substs: RefCell>, /// Table `DefId` of lifetime parameter -> substituted lifetime - pub lt_substs: RefCell>, + crate lt_substs: RefCell>, /// Table `DefId` of const parameter -> substituted const - pub ct_substs: RefCell>, + crate ct_substs: RefCell>, /// Table synthetic type parameter for `impl Trait` in argument position -> bounds - pub impl_trait_bounds: RefCell>>, - pub fake_def_ids: RefCell>, - pub all_fake_def_ids: RefCell>, + crate impl_trait_bounds: RefCell>>, + crate fake_def_ids: RefCell>, + crate all_fake_def_ids: RefCell>, /// Auto-trait or blanket impls processed so far, as `(self_ty, trait_def_id)`. // FIXME(eddyb) make this a `ty::TraitRef<'tcx>` set. - pub generated_synthetics: RefCell, DefId)>>, - pub auto_traits: Vec, + crate generated_synthetics: RefCell, DefId)>>, + crate auto_traits: Vec, /// The options given to rustdoc that could be relevant to a pass. - pub render_options: RenderOptions, + crate render_options: RenderOptions, /// The traits in scope for a given module. /// /// See `collect_intra_doc_links::traits_implemented_by` for more details. /// `map>` - pub module_trait_cache: RefCell>>, + crate module_trait_cache: RefCell>>, } impl<'tcx> DocContext<'tcx> { - pub fn sess(&self) -> &Session { + crate fn sess(&self) -> &Session { &self.tcx.sess } - pub fn enter_resolver(&self, f: F) -> R + crate fn enter_resolver(&self, f: F) -> R where F: FnOnce(&mut resolve::Resolver<'_>) -> R, { @@ -90,7 +89,7 @@ impl<'tcx> DocContext<'tcx> { /// Call the closure with the given parameters set as /// the substitutions for a type alias' RHS. - pub fn enter_alias( + crate fn enter_alias( &self, ty_substs: FxHashMap, lt_substs: FxHashMap, @@ -120,7 +119,7 @@ impl<'tcx> DocContext<'tcx> { // Instead, we construct 'fake' def ids, which start immediately after the last DefId. // In the Debug impl for clean::Item, we explicitly check for fake // def ids, as we'll end up with a panic if we use the DefId Debug impl for fake DefIds - pub fn next_def_id(&self, crate_num: CrateNum) -> DefId { + crate fn next_def_id(&self, crate_num: CrateNum) -> DefId { let start_def_id = { let num_def_ids = if crate_num == LOCAL_CRATE { self.tcx.hir().definitions().def_path_table().num_def_ids() @@ -150,7 +149,7 @@ impl<'tcx> DocContext<'tcx> { /// Like `hir().local_def_id_to_hir_id()`, but skips calling it on fake DefIds. /// (This avoids a slice-index-out-of-bounds panic.) - pub fn as_local_hir_id(&self, def_id: DefId) -> Option { + crate fn as_local_hir_id(&self, def_id: DefId) -> Option { if self.all_fake_def_ids.borrow().contains(&def_id) { None } else { @@ -158,7 +157,7 @@ impl<'tcx> DocContext<'tcx> { } } - pub fn stability(&self, id: HirId) -> Option { + crate fn stability(&self, id: HirId) -> Option { self.tcx .hir() .opt_local_def_id(id) @@ -166,7 +165,7 @@ impl<'tcx> DocContext<'tcx> { .cloned() } - pub fn deprecation(&self, id: HirId) -> Option { + crate fn deprecation(&self, id: HirId) -> Option { self.tcx .hir() .opt_local_def_id(id) @@ -178,7 +177,7 @@ impl<'tcx> DocContext<'tcx> { /// /// If the given `error_format` is `ErrorOutputType::Json` and no `SourceMap` is given, a new one /// will be created for the handler. -pub fn new_handler( +crate fn new_handler( error_format: ErrorOutputType, source_map: Option>, debugging_opts: &DebuggingOptions, @@ -280,7 +279,7 @@ where (lint_opts, lint_caps) } -pub fn run_core( +crate fn run_core( options: RustdocOptions, ) -> (clean::Crate, RenderInfo, RenderOptions, Lrc) { // Parse, resolve, and typecheck the given crate. @@ -371,7 +370,7 @@ pub fn run_core( cg: codegen_options, externs, target_triple: target, - unstable_features: UnstableFeatures::from_environment(), + unstable_features: UnstableFeatures::from_environment(crate_name.as_deref()), actually_rustdoc: true, debugging_opts, error_format, @@ -725,7 +724,7 @@ impl<'tcx> Visitor<'tcx> for EmitIgnoredResolutionErrors<'tcx> { /// `DefId` or parameter index (`ty::ParamTy.index`) of a synthetic type parameter /// for `impl Trait` in argument position. #[derive(Clone, Copy, PartialEq, Eq, Hash, PartialOrd, Ord)] -pub enum ImplTraitParam { +crate enum ImplTraitParam { DefId(DefId), ParamIndex(u32), } diff --git a/src/librustdoc/docfs.rs b/src/librustdoc/docfs.rs index 8b52ce710a4..9b740acfcdf 100644 --- a/src/librustdoc/docfs.rs +++ b/src/librustdoc/docfs.rs @@ -24,38 +24,38 @@ macro_rules! try_err { }; } -pub trait PathError { +crate trait PathError { fn new>(e: S, path: P) -> Self where S: ToString + Sized; } -pub struct DocFS { +crate struct DocFS { sync_only: bool, errors: Option>, } impl DocFS { - pub fn new(errors: Sender) -> DocFS { + crate fn new(errors: Sender) -> DocFS { DocFS { sync_only: false, errors: Some(errors) } } - pub fn set_sync_only(&mut self, sync_only: bool) { + crate fn set_sync_only(&mut self, sync_only: bool) { self.sync_only = sync_only; } - pub fn close(&mut self) { + crate fn close(&mut self) { self.errors = None; } - pub fn create_dir_all>(&self, path: P) -> io::Result<()> { + crate fn create_dir_all>(&self, path: P) -> io::Result<()> { // For now, dir creation isn't a huge time consideration, do it // synchronously, which avoids needing ordering between write() actions // and directory creation. fs::create_dir_all(path) } - pub fn write(&self, path: P, contents: C) -> Result<(), E> + crate fn write(&self, path: P, contents: C) -> Result<(), E> where P: AsRef, C: AsRef<[u8]>, diff --git a/src/librustdoc/doctest.rs b/src/librustdoc/doctest.rs index eb33890fb5f..9f35e57df41 100644 --- a/src/librustdoc/doctest.rs +++ b/src/librustdoc/doctest.rs @@ -1,14 +1,13 @@ use rustc_ast as ast; use rustc_data_structures::sync::Lrc; -use rustc_errors::ErrorReported; -use rustc_feature::UnstableFeatures; +use rustc_errors::{ColorConfig, ErrorReported}; use rustc_hir as hir; use rustc_hir::intravisit; use rustc_hir::{HirId, CRATE_HIR_ID}; use rustc_interface::interface; use rustc_middle::hir::map::Map; use rustc_middle::ty::TyCtxt; -use rustc_session::config::{self, CrateType}; +use rustc_session::config::{self, CrateType, ErrorOutputType}; use rustc_session::{lint, DiagnosticOutput, Session}; use rustc_span::edition::Edition; use rustc_span::source_map::SourceMap; @@ -32,17 +31,17 @@ use crate::html::markdown::{self, ErrorCodes, Ignore, LangString}; use crate::passes::span_of_attrs; #[derive(Clone, Default)] -pub struct TestOptions { +crate struct TestOptions { /// Whether to disable the default `extern crate my_crate;` when creating doctests. - pub no_crate_inject: bool, + crate no_crate_inject: bool, /// Whether to emit compilation warnings when compiling doctests. Setting this will suppress /// the default `#![allow(unused)]`. - pub display_warnings: bool, + crate display_warnings: bool, /// Additional crate-level attributes to add to doctests. - pub attrs: Vec, + crate attrs: Vec, } -pub fn run(options: Options) -> Result<(), ErrorReported> { +crate fn run(options: Options) -> Result<(), ErrorReported> { let input = config::Input::File(options.input.clone()); let invalid_codeblock_attributes_name = rustc_lint::builtin::INVALID_CODEBLOCK_ATTRIBUTES.name; @@ -70,7 +69,7 @@ pub fn run(options: Options) -> Result<(), ErrorReported> { lint_cap: Some(options.lint_cap.clone().unwrap_or_else(|| lint::Forbid)), cg: options.codegen_options.clone(), externs: options.externs.clone(), - unstable_features: UnstableFeatures::from_environment(), + unstable_features: options.render_options.unstable_features, actually_rustdoc: true, debugging_opts: config::DebuggingOptions { ..config::basic_debugging_options() }, edition: options.edition, @@ -249,7 +248,8 @@ fn run_test( outdir: DirState, path: PathBuf, ) -> Result<(), TestFailure> { - let (test, line_offset) = make_test(test, Some(cratename), as_test_harness, opts, edition); + let (test, line_offset, supports_color) = + make_test(test, Some(cratename), as_test_harness, opts, edition); let output_file = outdir.path().join("rust_out"); @@ -294,6 +294,20 @@ fn run_test( path.to_str().expect("target path must be valid unicode").to_string() } }); + if let ErrorOutputType::HumanReadable(kind) = options.error_format { + let (_, color_config) = kind.unzip(); + match color_config { + ColorConfig::Never => { + compiler.arg("--color").arg("never"); + } + ColorConfig::Always => { + compiler.arg("--color").arg("always"); + } + ColorConfig::Auto => { + compiler.arg("--color").arg(if supports_color { "always" } else { "never" }); + } + } + } compiler.arg("-"); compiler.stdin(Stdio::piped()); @@ -321,7 +335,10 @@ fn run_test( (true, false) => {} (false, true) => { if !error_codes.is_empty() { - error_codes.retain(|err| !out.contains(&format!("error[{}]: ", err))); + // We used to check if the output contained "error[{}]: " but since we added the + // colored output, we can't anymore because of the color escape characters before + // the ":". + error_codes.retain(|err| !out.contains(&format!("error[{}]", err))); if !error_codes.is_empty() { return Err(TestFailure::MissingErrorCodes(error_codes)); @@ -363,18 +380,19 @@ fn run_test( } /// Transforms a test into code that can be compiled into a Rust binary, and returns the number of -/// lines before the test code begins. -pub fn make_test( +/// lines before the test code begins as well as if the output stream supports colors or not. +crate fn make_test( s: &str, cratename: Option<&str>, dont_insert_main: bool, opts: &TestOptions, edition: Edition, -) -> (String, usize) { +) -> (String, usize, bool) { let (crate_attrs, everything_else, crates) = partition_source(s); let everything_else = everything_else.trim(); let mut line_offset = 0; let mut prog = String::new(); + let mut supports_color = false; if opts.attrs.is_empty() && !opts.display_warnings { // If there aren't any attributes supplied by #![doc(test(attr(...)))], then allow some @@ -400,7 +418,7 @@ pub fn make_test( // crate already is included. let result = rustc_driver::catch_fatal_errors(|| { rustc_span::with_session_globals(edition, || { - use rustc_errors::emitter::EmitterWriter; + use rustc_errors::emitter::{Emitter, EmitterWriter}; use rustc_errors::Handler; use rustc_parse::maybe_new_parser_from_source_str; use rustc_session::parse::ParseSess; @@ -412,8 +430,13 @@ pub fn make_test( // Any errors in parsing should also appear when the doctest is compiled for real, so just // send all the errors that librustc_ast emits directly into a `Sink` instead of stderr. let sm = Lrc::new(SourceMap::new(FilePathMapping::empty())); + supports_color = + EmitterWriter::stderr(ColorConfig::Auto, None, false, false, Some(80), false) + .supports_color(); + let emitter = EmitterWriter::new(box io::sink(), None, false, false, false, None, false); + // FIXME(misdreavus): pass `-Z treat-err-as-bug` to the doctest parser let handler = Handler::with_emitter(false, None, box emitter); let sess = ParseSess::with_span_handler(handler, sm); @@ -483,7 +506,7 @@ pub fn make_test( Err(ErrorReported) => { // If the parser panicked due to a fatal error, pass the test code through unchanged. // The error will be reported during compilation. - return (s.to_owned(), 0); + return (s.to_owned(), 0, false); } }; @@ -533,7 +556,7 @@ pub fn make_test( debug!("final doctest:\n{}", prog); - (prog, line_offset) + (prog, line_offset, supports_color) } // FIXME(aburka): use a real parser to deal with multiline attributes @@ -606,7 +629,7 @@ fn partition_source(s: &str) -> (String, String, String) { (before, after, crates) } -pub trait Tester { +crate trait Tester { fn add_test(&mut self, test: String, config: LangString, line: usize); fn get_line(&self) -> usize { 0 @@ -614,8 +637,8 @@ pub trait Tester { fn register_header(&mut self, _name: &str, _level: u32) {} } -pub struct Collector { - pub tests: Vec, +crate struct Collector { + crate tests: Vec, // The name of the test displayed to the user, separated by `::`. // @@ -651,7 +674,7 @@ pub struct Collector { } impl Collector { - pub fn new( + crate fn new( cratename: String, options: Options, use_headers: bool, @@ -683,7 +706,7 @@ impl Collector { format!("{} - {}(line {})", filename, item_path, line) } - pub fn set_position(&mut self, position: Span) { + crate fn set_position(&mut self, position: Span) { self.position = position; } diff --git a/src/librustdoc/doctest/tests.rs b/src/librustdoc/doctest/tests.rs index a96186a95e1..a024e9c72a4 100644 --- a/src/librustdoc/doctest/tests.rs +++ b/src/librustdoc/doctest/tests.rs @@ -11,8 +11,8 @@ fn main() { assert_eq!(2+2, 4); }" .to_string(); - let output = make_test(input, None, false, &opts, DEFAULT_EDITION); - assert_eq!(output, (expected, 2)); + let (output, len, _) = make_test(input, None, false, &opts, DEFAULT_EDITION); + assert_eq!((output, len), (expected, 2)); } #[test] @@ -26,8 +26,8 @@ fn main() { assert_eq!(2+2, 4); }" .to_string(); - let output = make_test(input, Some("asdf"), false, &opts, DEFAULT_EDITION); - assert_eq!(output, (expected, 2)); + let (output, len, _) = make_test(input, Some("asdf"), false, &opts, DEFAULT_EDITION); + assert_eq!((output, len), (expected, 2)); } #[test] @@ -44,8 +44,8 @@ use asdf::qwop; assert_eq!(2+2, 4); }" .to_string(); - let output = make_test(input, Some("asdf"), false, &opts, DEFAULT_EDITION); - assert_eq!(output, (expected, 3)); + let (output, len, _) = make_test(input, Some("asdf"), false, &opts, DEFAULT_EDITION); + assert_eq!((output, len), (expected, 3)); } #[test] @@ -61,8 +61,8 @@ use asdf::qwop; assert_eq!(2+2, 4); }" .to_string(); - let output = make_test(input, Some("asdf"), false, &opts, DEFAULT_EDITION); - assert_eq!(output, (expected, 2)); + let (output, len, _) = make_test(input, Some("asdf"), false, &opts, DEFAULT_EDITION); + assert_eq!((output, len), (expected, 2)); } #[test] @@ -79,8 +79,8 @@ use std::*; assert_eq!(2+2, 4); }" .to_string(); - let output = make_test(input, Some("std"), false, &opts, DEFAULT_EDITION); - assert_eq!(output, (expected, 2)); + let (output, len, _) = make_test(input, Some("std"), false, &opts, DEFAULT_EDITION); + assert_eq!((output, len), (expected, 2)); } #[test] @@ -98,8 +98,8 @@ use asdf::qwop; assert_eq!(2+2, 4); }" .to_string(); - let output = make_test(input, Some("asdf"), false, &opts, DEFAULT_EDITION); - assert_eq!(output, (expected, 2)); + let (output, len, _) = make_test(input, Some("asdf"), false, &opts, DEFAULT_EDITION); + assert_eq!((output, len), (expected, 2)); } #[test] @@ -115,8 +115,8 @@ use asdf::qwop; assert_eq!(2+2, 4); }" .to_string(); - let output = make_test(input, Some("asdf"), false, &opts, DEFAULT_EDITION); - assert_eq!(output, (expected, 2)); + let (output, len, _) = make_test(input, Some("asdf"), false, &opts, DEFAULT_EDITION); + assert_eq!((output, len), (expected, 2)); } #[test] @@ -134,8 +134,8 @@ use asdf::qwop; assert_eq!(2+2, 4); }" .to_string(); - let output = make_test(input, Some("asdf"), false, &opts, DEFAULT_EDITION); - assert_eq!(output, (expected, 3)); + let (output, len, _) = make_test(input, Some("asdf"), false, &opts, DEFAULT_EDITION); + assert_eq!((output, len), (expected, 3)); // Adding more will also bump the returned line offset. opts.attrs.push("feature(hella_dope)".to_string()); @@ -147,8 +147,8 @@ use asdf::qwop; assert_eq!(2+2, 4); }" .to_string(); - let output = make_test(input, Some("asdf"), false, &opts, DEFAULT_EDITION); - assert_eq!(output, (expected, 4)); + let (output, len, _) = make_test(input, Some("asdf"), false, &opts, DEFAULT_EDITION); + assert_eq!((output, len), (expected, 4)); } #[test] @@ -164,8 +164,8 @@ fn main() { assert_eq!(2+2, 4); }" .to_string(); - let output = make_test(input, None, false, &opts, DEFAULT_EDITION); - assert_eq!(output, (expected, 2)); + let (output, len, _) = make_test(input, None, false, &opts, DEFAULT_EDITION); + assert_eq!((output, len), (expected, 2)); } #[test] @@ -180,8 +180,8 @@ fn main() { assert_eq!(2+2, 4); }" .to_string(); - let output = make_test(input, None, false, &opts, DEFAULT_EDITION); - assert_eq!(output, (expected, 1)); + let (output, len, _) = make_test(input, None, false, &opts, DEFAULT_EDITION); + assert_eq!((output, len), (expected, 1)); } #[test] @@ -196,8 +196,8 @@ fn main() { assert_eq!(2+2, 4); }" .to_string(); - let output = make_test(input, None, false, &opts, DEFAULT_EDITION); - assert_eq!(output, (expected, 2)); + let (output, len, _) = make_test(input, None, false, &opts, DEFAULT_EDITION); + assert_eq!((output, len), (expected, 2)); } #[test] @@ -210,8 +210,8 @@ assert_eq!(2+2, 4);"; //Ceci n'est pas une `fn main` assert_eq!(2+2, 4);" .to_string(); - let output = make_test(input, None, true, &opts, DEFAULT_EDITION); - assert_eq!(output, (expected, 1)); + let (output, len, _) = make_test(input, None, true, &opts, DEFAULT_EDITION); + assert_eq!((output, len), (expected, 1)); } #[test] @@ -224,8 +224,8 @@ fn make_test_display_warnings() { assert_eq!(2+2, 4); }" .to_string(); - let output = make_test(input, None, false, &opts, DEFAULT_EDITION); - assert_eq!(output, (expected, 1)); + let (output, len, _) = make_test(input, None, false, &opts, DEFAULT_EDITION); + assert_eq!((output, len), (expected, 1)); } #[test] @@ -242,8 +242,8 @@ assert_eq!(2+2, 4); }" .to_string(); - let output = make_test(input, None, false, &opts, DEFAULT_EDITION); - assert_eq!(output, (expected, 2)); + let (output, len, _) = make_test(input, None, false, &opts, DEFAULT_EDITION); + assert_eq!((output, len), (expected, 2)); let input = "extern crate hella_qwop; assert_eq!(asdf::foo, 4);"; @@ -256,8 +256,8 @@ assert_eq!(asdf::foo, 4); }" .to_string(); - let output = make_test(input, Some("asdf"), false, &opts, DEFAULT_EDITION); - assert_eq!(output, (expected, 3)); + let (output, len, _) = make_test(input, Some("asdf"), false, &opts, DEFAULT_EDITION); + assert_eq!((output, len), (expected, 3)); } #[test] @@ -274,6 +274,6 @@ test_wrapper! { }" .to_string(); - let output = make_test(input, Some("my_crate"), false, &opts, DEFAULT_EDITION); - assert_eq!(output, (expected, 1)); + let (output, len, _) = make_test(input, Some("my_crate"), false, &opts, DEFAULT_EDITION); + assert_eq!((output, len), (expected, 1)); } diff --git a/src/librustdoc/doctree.rs b/src/librustdoc/doctree.rs index ee217d99d2c..d56328cc2aa 100644 --- a/src/librustdoc/doctree.rs +++ b/src/librustdoc/doctree.rs @@ -1,79 +1,60 @@ //! This module is used to store stuff from Rust's AST in a more convenient //! manner (and with prettier names) before cleaning. -pub use self::StructType::*; +crate use self::StructType::*; use rustc_ast as ast; use rustc_span::hygiene::MacroKind; -use rustc_span::{self, Span, Symbol}; +use rustc_span::{self, symbol::Ident, Span, Symbol}; use rustc_hir as hir; use rustc_hir::def_id::CrateNum; use rustc_hir::HirId; -pub struct Module<'hir> { - pub name: Option, - pub attrs: &'hir [ast::Attribute], - pub where_outer: Span, - pub where_inner: Span, - pub extern_crates: Vec>, - pub imports: Vec>, - pub structs: Vec>, - pub unions: Vec>, - pub enums: Vec>, - pub fns: Vec>, - pub mods: Vec>, - pub id: hir::HirId, - pub typedefs: Vec>, - pub opaque_tys: Vec>, - pub statics: Vec>, - pub constants: Vec>, - pub traits: Vec>, - pub vis: &'hir hir::Visibility<'hir>, - pub impls: Vec>, - pub foreigns: Vec>, - pub macros: Vec>, - pub proc_macros: Vec>, - pub trait_aliases: Vec>, - pub is_crate: bool, +crate struct Module<'hir> { + crate name: Option, + crate attrs: &'hir [ast::Attribute], + crate where_outer: Span, + crate where_inner: Span, + crate extern_crates: Vec>, + crate imports: Vec>, + crate fns: Vec>, + crate mods: Vec>, + crate id: hir::HirId, + // (item, renamed) + crate items: Vec<(&'hir hir::Item<'hir>, Option)>, + crate traits: Vec>, + crate impls: Vec>, + crate foreigns: Vec>, + crate macros: Vec, + crate proc_macros: Vec, + crate is_crate: bool, } impl Module<'hir> { - pub fn new( - name: Option, - attrs: &'hir [ast::Attribute], - vis: &'hir hir::Visibility<'hir>, - ) -> Module<'hir> { + crate fn new(name: Option, attrs: &'hir [ast::Attribute]) -> Module<'hir> { Module { name, id: hir::CRATE_HIR_ID, - vis, where_outer: rustc_span::DUMMY_SP, where_inner: rustc_span::DUMMY_SP, attrs, extern_crates: Vec::new(), imports: Vec::new(), - structs: Vec::new(), - unions: Vec::new(), - enums: Vec::new(), fns: Vec::new(), mods: Vec::new(), - typedefs: Vec::new(), - opaque_tys: Vec::new(), - statics: Vec::new(), - constants: Vec::new(), + items: Vec::new(), traits: Vec::new(), impls: Vec::new(), foreigns: Vec::new(), macros: Vec::new(), proc_macros: Vec::new(), - trait_aliases: Vec::new(), is_crate: false, } } } #[derive(Debug, Clone, Copy)] -pub enum StructType { +crate enum StructType { /// A braced struct Plain, /// A tuple struct @@ -82,190 +63,92 @@ pub enum StructType { Unit, } -pub struct Struct<'hir> { - pub vis: &'hir hir::Visibility<'hir>, - pub id: hir::HirId, - pub struct_type: StructType, - pub name: Symbol, - pub generics: &'hir hir::Generics<'hir>, - pub attrs: &'hir [ast::Attribute], - pub fields: &'hir [hir::StructField<'hir>], - pub span: Span, +crate struct Variant<'hir> { + crate name: Symbol, + crate id: hir::HirId, + crate def: &'hir hir::VariantData<'hir>, } -pub struct Union<'hir> { - pub vis: &'hir hir::Visibility<'hir>, - pub id: hir::HirId, - pub struct_type: StructType, - pub name: Symbol, - pub generics: &'hir hir::Generics<'hir>, - pub attrs: &'hir [ast::Attribute], - pub fields: &'hir [hir::StructField<'hir>], - pub span: Span, +crate struct Function<'hir> { + crate decl: &'hir hir::FnDecl<'hir>, + crate id: hir::HirId, + crate name: Symbol, + crate header: hir::FnHeader, + crate generics: &'hir hir::Generics<'hir>, + crate body: hir::BodyId, } -pub struct Enum<'hir> { - pub vis: &'hir hir::Visibility<'hir>, - pub variants: Vec>, - pub generics: &'hir hir::Generics<'hir>, - pub attrs: &'hir [ast::Attribute], - pub id: hir::HirId, - pub span: Span, - pub name: Symbol, -} - -pub struct Variant<'hir> { - pub name: Symbol, - pub id: hir::HirId, - pub attrs: &'hir [ast::Attribute], - pub def: &'hir hir::VariantData<'hir>, - pub span: Span, -} - -pub struct Function<'hir> { - pub decl: &'hir hir::FnDecl<'hir>, - pub attrs: &'hir [ast::Attribute], - pub id: hir::HirId, - pub name: Symbol, - pub vis: &'hir hir::Visibility<'hir>, - pub header: hir::FnHeader, - pub span: Span, - pub generics: &'hir hir::Generics<'hir>, - pub body: hir::BodyId, -} - -pub struct Typedef<'hir> { - pub ty: &'hir hir::Ty<'hir>, - pub gen: &'hir hir::Generics<'hir>, - pub name: Symbol, - pub id: hir::HirId, - pub attrs: &'hir [ast::Attribute], - pub span: Span, - pub vis: &'hir hir::Visibility<'hir>, -} - -pub struct OpaqueTy<'hir> { - pub opaque_ty: &'hir hir::OpaqueTy<'hir>, - pub name: Symbol, - pub id: hir::HirId, - pub attrs: &'hir [ast::Attribute], - pub span: Span, - pub vis: &'hir hir::Visibility<'hir>, +crate struct Trait<'hir> { + crate is_auto: hir::IsAuto, + crate unsafety: hir::Unsafety, + crate name: Symbol, + crate items: Vec<&'hir hir::TraitItem<'hir>>, + crate generics: &'hir hir::Generics<'hir>, + crate bounds: &'hir [hir::GenericBound<'hir>], + crate attrs: &'hir [ast::Attribute], + crate id: hir::HirId, } #[derive(Debug)] -pub struct Static<'hir> { - pub type_: &'hir hir::Ty<'hir>, - pub mutability: hir::Mutability, - pub expr: hir::BodyId, - pub name: Symbol, - pub attrs: &'hir [ast::Attribute], - pub vis: &'hir hir::Visibility<'hir>, - pub id: hir::HirId, - pub span: Span, -} - -pub struct Constant<'hir> { - pub type_: &'hir hir::Ty<'hir>, - pub expr: hir::BodyId, - pub name: Symbol, - pub attrs: &'hir [ast::Attribute], - pub vis: &'hir hir::Visibility<'hir>, - pub id: hir::HirId, - pub span: Span, -} - -pub struct Trait<'hir> { - pub is_auto: hir::IsAuto, - pub unsafety: hir::Unsafety, - pub name: Symbol, - pub items: Vec<&'hir hir::TraitItem<'hir>>, - pub generics: &'hir hir::Generics<'hir>, - pub bounds: &'hir [hir::GenericBound<'hir>], - pub attrs: &'hir [ast::Attribute], - pub id: hir::HirId, - pub span: Span, - pub vis: &'hir hir::Visibility<'hir>, -} - -pub struct TraitAlias<'hir> { - pub name: Symbol, - pub generics: &'hir hir::Generics<'hir>, - pub bounds: &'hir [hir::GenericBound<'hir>], - pub attrs: &'hir [ast::Attribute], - pub id: hir::HirId, - pub span: Span, - pub vis: &'hir hir::Visibility<'hir>, -} - -#[derive(Debug)] -pub struct Impl<'hir> { - pub unsafety: hir::Unsafety, - pub polarity: hir::ImplPolarity, - pub defaultness: hir::Defaultness, - pub constness: hir::Constness, - pub generics: &'hir hir::Generics<'hir>, - pub trait_: &'hir Option>, - pub for_: &'hir hir::Ty<'hir>, - pub items: Vec<&'hir hir::ImplItem<'hir>>, - pub attrs: &'hir [ast::Attribute], - pub span: Span, - pub vis: &'hir hir::Visibility<'hir>, - pub id: hir::HirId, -} - -pub struct ForeignItem<'hir> { - pub vis: &'hir hir::Visibility<'hir>, - pub id: hir::HirId, - pub name: Symbol, - pub kind: &'hir hir::ForeignItemKind<'hir>, - pub attrs: &'hir [ast::Attribute], - pub span: Span, +crate struct Impl<'hir> { + crate unsafety: hir::Unsafety, + crate polarity: hir::ImplPolarity, + crate defaultness: hir::Defaultness, + crate constness: hir::Constness, + crate generics: &'hir hir::Generics<'hir>, + crate trait_: &'hir Option>, + crate for_: &'hir hir::Ty<'hir>, + crate items: Vec<&'hir hir::ImplItem<'hir>>, + crate attrs: &'hir [ast::Attribute], + crate span: Span, + crate vis: &'hir hir::Visibility<'hir>, + crate id: hir::HirId, +} + +crate struct ForeignItem<'hir> { + crate id: hir::HirId, + crate name: Symbol, + crate kind: &'hir hir::ForeignItemKind<'hir>, } // For Macro we store the DefId instead of the NodeId, since we also create // these imported macro_rules (which only have a DUMMY_NODE_ID). -pub struct Macro<'hir> { - pub name: Symbol, - pub hid: hir::HirId, - pub def_id: hir::def_id::DefId, - pub attrs: &'hir [ast::Attribute], - pub span: Span, - pub matchers: Vec, - pub imported_from: Option, +crate struct Macro { + crate name: Symbol, + crate def_id: hir::def_id::DefId, + crate matchers: Vec, + crate imported_from: Option, } -pub struct ExternCrate<'hir> { - pub name: Symbol, - pub hir_id: HirId, - pub cnum: CrateNum, - pub path: Option, - pub vis: &'hir hir::Visibility<'hir>, - pub attrs: &'hir [ast::Attribute], - pub span: Span, +crate struct ExternCrate<'hir> { + crate name: Symbol, + crate hir_id: HirId, + crate cnum: CrateNum, + crate path: Option, + crate vis: &'hir hir::Visibility<'hir>, + crate attrs: &'hir [ast::Attribute], + crate span: Span, } #[derive(Debug)] -pub struct Import<'hir> { - pub name: Symbol, - pub id: hir::HirId, - pub vis: &'hir hir::Visibility<'hir>, - pub attrs: &'hir [ast::Attribute], - pub path: &'hir hir::Path<'hir>, - pub glob: bool, - pub span: Span, +crate struct Import<'hir> { + crate name: Symbol, + crate id: hir::HirId, + crate vis: &'hir hir::Visibility<'hir>, + crate attrs: &'hir [ast::Attribute], + crate path: &'hir hir::Path<'hir>, + crate glob: bool, + crate span: Span, } -pub struct ProcMacro<'hir> { - pub name: Symbol, - pub id: hir::HirId, - pub kind: MacroKind, - pub helpers: Vec, - pub attrs: &'hir [ast::Attribute], - pub span: Span, +crate struct ProcMacro { + crate name: Symbol, + crate id: hir::HirId, + crate kind: MacroKind, + crate helpers: Vec, } -pub fn struct_type_from_def(vdata: &hir::VariantData<'_>) -> StructType { +crate fn struct_type_from_def(vdata: &hir::VariantData<'_>) -> StructType { match *vdata { hir::VariantData::Struct(..) => Plain, hir::VariantData::Tuple(..) => Tuple, diff --git a/src/librustdoc/error.rs b/src/librustdoc/error.rs index 77063ab4639..82d0002b98b 100644 --- a/src/librustdoc/error.rs +++ b/src/librustdoc/error.rs @@ -5,9 +5,9 @@ use std::path::{Path, PathBuf}; use crate::docfs::PathError; #[derive(Debug)] -pub struct Error { - pub file: PathBuf, - pub error: String, +crate struct Error { + crate file: PathBuf, + crate error: String, } impl error::Error for Error {} diff --git a/src/librustdoc/externalfiles.rs b/src/librustdoc/externalfiles.rs index c8121d39d0f..6c86baa36ac 100644 --- a/src/librustdoc/externalfiles.rs +++ b/src/librustdoc/externalfiles.rs @@ -1,36 +1,36 @@ use crate::html::markdown::{ErrorCodes, IdMap, Markdown, Playground}; use crate::rustc_span::edition::Edition; -use rustc_feature::UnstableFeatures; use std::fs; use std::path::Path; use std::str; #[derive(Clone, Debug)] -pub struct ExternalHtml { +crate struct ExternalHtml { /// Content that will be included inline in the section of a /// rendered Markdown file or generated documentation - pub in_header: String, + crate in_header: String, /// Content that will be included inline between and the content of /// a rendered Markdown file or generated documentation - pub before_content: String, + crate before_content: String, /// Content that will be included inline between the content and of /// a rendered Markdown file or generated documentation - pub after_content: String, + crate after_content: String, } impl ExternalHtml { - pub fn load( + crate fn load( in_header: &[String], before_content: &[String], after_content: &[String], md_before_content: &[String], md_after_content: &[String], + nightly_build: bool, diag: &rustc_errors::Handler, id_map: &mut IdMap, edition: Edition, playground: &Option, ) -> Option { - let codes = ErrorCodes::from(UnstableFeatures::from_environment().is_nightly_build()); + let codes = ErrorCodes::from(nightly_build); let ih = load_external_files(in_header, diag)?; let bc = load_external_files(before_content, diag)?; let m_bc = load_external_files(md_before_content, diag)?; @@ -50,12 +50,12 @@ impl ExternalHtml { } } -pub enum LoadStringError { +crate enum LoadStringError { ReadFail, BadUtf8, } -pub fn load_string>( +crate fn load_string>( file_path: P, diag: &rustc_errors::Handler, ) -> Result { diff --git a/src/librustdoc/fold.rs b/src/librustdoc/fold.rs index d4ada3278e6..a72860ef0a8 100644 --- a/src/librustdoc/fold.rs +++ b/src/librustdoc/fold.rs @@ -1,27 +1,27 @@ use crate::clean::*; -pub struct StripItem(pub Item); +crate struct StripItem(pub Item); impl StripItem { - pub fn strip(self) -> Option { + crate fn strip(self) -> Option { match self.0 { - Item { inner: StrippedItem(..), .. } => Some(self.0), + Item { kind: StrippedItem(..), .. } => Some(self.0), mut i => { - i.inner = StrippedItem(box i.inner); + i.kind = StrippedItem(box i.kind); Some(i) } } } } -pub trait DocFolder: Sized { +crate trait DocFolder: Sized { fn fold_item(&mut self, item: Item) -> Option { self.fold_item_recur(item) } /// don't override! - fn fold_inner_recur(&mut self, inner: ItemEnum) -> ItemEnum { - match inner { + fn fold_inner_recur(&mut self, kind: ItemKind) -> ItemKind { + match kind { StrippedItem(..) => unreachable!(), ModuleItem(i) => ModuleItem(self.fold_mod(i)), StructItem(mut i) => { @@ -72,14 +72,14 @@ pub trait DocFolder: Sized { /// don't override! fn fold_item_recur(&mut self, item: Item) -> Option { - let Item { attrs, name, source, visibility, def_id, inner, stability, deprecation } = item; + let Item { attrs, name, source, visibility, def_id, kind, stability, deprecation } = item; - let inner = match inner { + let kind = match kind { StrippedItem(box i) => StrippedItem(box self.fold_inner_recur(i)), - _ => self.fold_inner_recur(inner), + _ => self.fold_inner_recur(kind), }; - Some(Item { attrs, name, source, inner, visibility, stability, deprecation, def_id }) + Some(Item { attrs, name, source, kind, visibility, stability, deprecation, def_id }) } fn fold_mod(&mut self, m: Module) -> Module { diff --git a/src/librustdoc/formats/cache.rs b/src/librustdoc/formats/cache.rs index b99321e8484..917c1a95fdb 100644 --- a/src/librustdoc/formats/cache.rs +++ b/src/librustdoc/formats/cache.rs @@ -30,25 +30,25 @@ thread_local!(crate static CACHE_KEY: RefCell> = Default::default()); /// to `Send` so it may be stored in a `Arc` instance and shared among the various /// rendering threads. #[derive(Default)] -pub struct Cache { +crate struct Cache { /// Maps a type ID to all known implementations for that type. This is only /// recognized for intra-crate `ResolvedPath` types, and is used to print /// out extra documentation on the page of an enum/struct. /// /// The values of the map are a list of implementations and documentation /// found on that implementation. - pub impls: FxHashMap>, + crate impls: FxHashMap>, /// Maintains a mapping of local crate `DefId`s to the fully qualified name /// and "short type description" of that node. This is used when generating /// URLs when a type is being linked to. External paths are not located in /// this map because the `External` type itself has all the information /// necessary. - pub paths: FxHashMap, ItemType)>, + crate paths: FxHashMap, ItemType)>, /// Similar to `paths`, but only holds external paths. This is only used for /// generating explicit hyperlinks to other crates. - pub external_paths: FxHashMap, ItemType)>, + crate external_paths: FxHashMap, ItemType)>, /// Maps local `DefId`s of exported types to fully qualified paths. /// Unlike 'paths', this mapping ignores any renames that occur @@ -60,36 +60,36 @@ pub struct Cache { /// to the path used if the corresponding type is inlined. By /// doing this, we can detect duplicate impls on a trait page, and only display /// the impl for the inlined type. - pub exact_paths: FxHashMap>, + crate exact_paths: FxHashMap>, /// This map contains information about all known traits of this crate. /// Implementations of a crate should inherit the documentation of the /// parent trait if no extra documentation is specified, and default methods /// should show up in documentation about trait implementations. - pub traits: FxHashMap, + crate traits: FxHashMap, /// When rendering traits, it's often useful to be able to list all /// implementors of the trait, and this mapping is exactly, that: a mapping /// of trait ids to the list of known implementors of the trait - pub implementors: FxHashMap>, + crate implementors: FxHashMap>, /// Cache of where external crate documentation can be found. - pub extern_locations: FxHashMap, + crate extern_locations: FxHashMap, /// Cache of where documentation for primitives can be found. - pub primitive_locations: FxHashMap, + crate primitive_locations: FxHashMap, // Note that external items for which `doc(hidden)` applies to are shown as // non-reachable while local items aren't. This is because we're reusing // the access levels from the privacy check pass. - pub access_levels: AccessLevels, + crate access_levels: AccessLevels, /// The version of the crate being documented, if given from the `--crate-version` flag. - pub crate_version: Option, + crate crate_version: Option, /// Whether to document private items. /// This is stored in `Cache` so it doesn't need to be passed through all rustdoc functions. - pub document_private: bool, + crate document_private: bool, // Private fields only used when initially crawling a crate to build a cache stack: Vec, @@ -98,17 +98,17 @@ pub struct Cache { stripped_mod: bool, masked_crates: FxHashSet, - pub search_index: Vec, - pub deref_trait_did: Option, - pub deref_mut_trait_did: Option, - pub owned_box_did: Option, + crate search_index: Vec, + crate deref_trait_did: Option, + crate deref_mut_trait_did: Option, + crate owned_box_did: Option, // In rare case where a structure is defined in one module but implemented // in another, if the implementing module is parsed before defining module, // then the fully qualified name of the structure isn't presented in `paths` // yet when its implementation methods are being indexed. Caches such methods // and their parent id here and indexes them at the end of crate parsing. - pub orphan_impl_items: Vec<(DefId, clean::Item)>, + crate orphan_impl_items: Vec<(DefId, clean::Item)>, // Similarly to `orphan_impl_items`, sometimes trait impls are picked up // even though the trait itself is not exported. This can happen if a trait @@ -121,11 +121,11 @@ pub struct Cache { /// Aliases added through `#[doc(alias = "...")]`. Since a few items can have the same alias, /// we need the alias element to have an array of items. - pub aliases: BTreeMap>, + crate aliases: BTreeMap>, } impl Cache { - pub fn from_krate( + crate fn from_krate( render_info: RenderInfo, document_private: bool, extern_html_root_urls: &BTreeMap, @@ -218,7 +218,7 @@ impl DocFolder for Cache { // If this is a stripped module, // we don't want it or its children in the search index. - let orig_stripped_mod = match item.inner { + let orig_stripped_mod = match item.kind { clean::StrippedItem(box clean::ModuleItem(..)) => { mem::replace(&mut self.stripped_mod, true) } @@ -227,7 +227,7 @@ impl DocFolder for Cache { // If the impl is from a masked crate or references something from a // masked crate then remove it completely. - if let clean::ImplItem(ref i) = item.inner { + if let clean::ImplItem(ref i) = item.kind { if self.masked_crates.contains(&item.def_id.krate) || i.trait_.def_id().map_or(false, |d| self.masked_crates.contains(&d.krate)) || i.for_.def_id().map_or(false, |d| self.masked_crates.contains(&d.krate)) @@ -238,12 +238,12 @@ impl DocFolder for Cache { // Propagate a trait method's documentation to all implementors of the // trait. - if let clean::TraitItem(ref t) = item.inner { + if let clean::TraitItem(ref t) = item.kind { self.traits.entry(item.def_id).or_insert_with(|| t.clone()); } // Collect all the implementors of traits. - if let clean::ImplItem(ref i) = item.inner { + if let clean::ImplItem(ref i) = item.kind { if let Some(did) = i.trait_.def_id() { if i.blanket_impl.is_none() { self.implementors @@ -256,7 +256,7 @@ impl DocFolder for Cache { // Index this method for searching later on. if let Some(ref s) = item.name { - let (parent, is_inherent_impl_item) = match item.inner { + let (parent, is_inherent_impl_item) = match item.kind { clean::StrippedItem(..) => ((None, None), false), clean::AssocConstItem(..) | clean::TypedefItem(_, true) if self.parent_is_trait_impl => @@ -345,7 +345,7 @@ impl DocFolder for Cache { _ => false, }; - match item.inner { + match item.kind { clean::StructItem(..) | clean::EnumItem(..) | clean::TypedefItem(..) @@ -384,7 +384,7 @@ impl DocFolder for Cache { // Maintain the parent stack let orig_parent_is_trait_impl = self.parent_is_trait_impl; - let parent_pushed = match item.inner { + let parent_pushed = match item.kind { clean::TraitItem(..) | clean::EnumItem(..) | clean::ForeignTypeItem @@ -422,12 +422,12 @@ impl DocFolder for Cache { // Once we've recursively found all the generics, hoard off all the // implementations elsewhere. let ret = self.fold_item_recur(item).and_then(|item| { - if let clean::Item { inner: clean::ImplItem(_), .. } = item { + if let clean::Item { kind: clean::ImplItem(_), .. } = item { // Figure out the id of this impl. This may map to a // primitive rather than always to a struct/enum. // Note: matching twice to restrict the lifetime of the `i` borrow. let mut dids = FxHashSet::default(); - if let clean::Item { inner: clean::ImplItem(ref i), .. } = item { + if let clean::Item { kind: clean::ImplItem(ref i), .. } = item { match i.for_ { clean::ResolvedPath { did, .. } | clean::BorrowedRef { diff --git a/src/librustdoc/formats/item_type.rs b/src/librustdoc/formats/item_type.rs index 696bdae94fc..af512e37460 100644 --- a/src/librustdoc/formats/item_type.rs +++ b/src/librustdoc/formats/item_type.rs @@ -20,7 +20,7 @@ use crate::clean; /// a heading, edit the listing in `html/render.rs`, function `sidebar_module`. This uses an /// ordering based on a helper function inside `item_module`, in the same file. #[derive(Copy, PartialEq, Eq, Clone, Debug, PartialOrd, Ord)] -pub enum ItemType { +crate enum ItemType { Module = 0, ExternCrate = 1, Import = 2, @@ -60,12 +60,12 @@ impl Serialize for ItemType { impl<'a> From<&'a clean::Item> for ItemType { fn from(item: &'a clean::Item) -> ItemType { - let inner = match item.inner { + let kind = match item.kind { clean::StrippedItem(box ref item) => item, - ref inner => inner, + ref kind => kind, }; - match *inner { + match *kind { clean::ModuleItem(..) => ItemType::Module, clean::ExternCrateItem(..) => ItemType::ExternCrate, clean::ImportItem(..) => ItemType::Import, @@ -124,7 +124,7 @@ impl From for ItemType { } impl ItemType { - pub fn as_str(&self) -> &'static str { + crate fn as_str(&self) -> &'static str { match *self { ItemType::Module => "mod", ItemType::ExternCrate => "externcrate", diff --git a/src/librustdoc/formats/mod.rs b/src/librustdoc/formats/mod.rs index dcb0184c58c..55fd4948f45 100644 --- a/src/librustdoc/formats/mod.rs +++ b/src/librustdoc/formats/mod.rs @@ -1,8 +1,8 @@ -pub mod cache; -pub mod item_type; -pub mod renderer; +crate mod cache; +crate mod item_type; +crate mod renderer; -pub use renderer::{run_format, FormatRenderer}; +crate use renderer::{run_format, FormatRenderer}; use rustc_span::def_id::DefId; @@ -11,7 +11,7 @@ use crate::clean::types::GetDefId; /// Specifies whether rendering directly implemented trait items or ones from a certain Deref /// impl. -pub enum AssocItemRender<'a> { +crate enum AssocItemRender<'a> { All, DerefFor { trait_: &'a clean::Type, type_: &'a clean::Type, deref_mut_: bool }, } @@ -19,26 +19,26 @@ pub enum AssocItemRender<'a> { /// For different handling of associated items from the Deref target of a type rather than the type /// itself. #[derive(Copy, Clone, PartialEq)] -pub enum RenderMode { +crate enum RenderMode { Normal, ForDeref { mut_: bool }, } /// Metadata about implementations for a type or trait. #[derive(Clone, Debug)] -pub struct Impl { - pub impl_item: clean::Item, +crate struct Impl { + crate impl_item: clean::Item, } impl Impl { - pub fn inner_impl(&self) -> &clean::Impl { - match self.impl_item.inner { + crate fn inner_impl(&self) -> &clean::Impl { + match self.impl_item.kind { clean::ImplItem(ref impl_) => impl_, _ => panic!("non-impl item found in impl"), } } - pub fn trait_did(&self) -> Option { + crate fn trait_did(&self) -> Option { self.inner_impl().trait_.def_id() } } diff --git a/src/librustdoc/formats/renderer.rs b/src/librustdoc/formats/renderer.rs index 90ace4d44c4..d0fdc69cc19 100644 --- a/src/librustdoc/formats/renderer.rs +++ b/src/librustdoc/formats/renderer.rs @@ -10,7 +10,7 @@ use crate::formats::cache::{Cache, CACHE_KEY}; /// Allows for different backends to rustdoc to be used with the `run_format()` function. Each /// backend renderer has hooks for initialization, documenting an item, entering and exiting a /// module, and cleanup/finalizing output. -pub trait FormatRenderer: Clone { +crate trait FormatRenderer: Clone { /// Sets up any state required for the renderer. When this is called the cache has already been /// populated. fn init( @@ -43,7 +43,7 @@ pub trait FormatRenderer: Clone { } /// Main method for rendering a crate. -pub fn run_format( +crate fn run_format( krate: clean::Crate, options: RenderOptions, render_info: RenderInfo, @@ -86,7 +86,7 @@ pub fn run_format( } cx.mod_item_in(&item, &name, &cache)?; - let module = match item.inner { + let module = match item.kind { clean::StrippedItem(box clean::ModuleItem(m)) | clean::ModuleItem(m) => m, _ => unreachable!(), }; diff --git a/src/librustdoc/html/escape.rs b/src/librustdoc/html/escape.rs index 03660c32654..60c19551983 100644 --- a/src/librustdoc/html/escape.rs +++ b/src/librustdoc/html/escape.rs @@ -7,7 +7,7 @@ use std::fmt; /// Wrapper struct which will emit the HTML-escaped version of the contained /// string when passed to a format string. -pub struct Escape<'a>(pub &'a str); +crate struct Escape<'a>(pub &'a str); impl<'a> fmt::Display for Escape<'a> { fn fmt(&self, fmt: &mut fmt::Formatter<'_>) -> fmt::Result { diff --git a/src/librustdoc/html/format.rs b/src/librustdoc/html/format.rs index d18282d6e67..536c2e08fde 100644 --- a/src/librustdoc/html/format.rs +++ b/src/librustdoc/html/format.rs @@ -11,7 +11,7 @@ use std::fmt; use rustc_data_structures::fx::FxHashSet; use rustc_hir as hir; -use rustc_span::def_id::DefId; +use rustc_span::def_id::{DefId, CRATE_DEF_INDEX}; use rustc_target::spec::abi::Abi; use crate::clean::{self, PrimitiveType}; @@ -21,7 +21,7 @@ use crate::html::escape::Escape; use crate::html::render::cache::ExternalLocation; use crate::html::render::CURRENT_DEPTH; -pub trait Print { +crate trait Print { fn print(self, buffer: &mut Buffer); } @@ -47,7 +47,7 @@ impl Print for &'_ str { } #[derive(Debug, Clone)] -pub struct Buffer { +crate struct Buffer { for_html: bool, buffer: String, } @@ -115,28 +115,28 @@ impl Buffer { } /// Wrapper struct for properly emitting a function or method declaration. -pub struct Function<'a> { +crate struct Function<'a> { /// The declaration to emit. - pub decl: &'a clean::FnDecl, + crate decl: &'a clean::FnDecl, /// The length of the function header and name. In other words, the number of characters in the /// function declaration up to but not including the parentheses. /// /// Used to determine line-wrapping. - pub header_len: usize, + crate header_len: usize, /// The number of spaces to indent each successive line with, if line-wrapping is necessary. - pub indent: usize, + crate indent: usize, /// Whether the function is async or not. - pub asyncness: hir::IsAsync, + crate asyncness: hir::IsAsync, } /// Wrapper struct for emitting a where-clause from Generics. -pub struct WhereClause<'a> { +crate struct WhereClause<'a> { /// The Generics from which to emit a where-clause. - pub gens: &'a clean::Generics, + crate gens: &'a clean::Generics, /// The number of spaces to indent each line with. - pub indent: usize, + crate indent: usize, /// Whether the where-clause needs to add a comma and newline after the last bound. - pub end_newline: bool, + crate end_newline: bool, } fn comma_sep(items: impl Iterator) -> impl fmt::Display { @@ -480,7 +480,7 @@ impl clean::Path { } } -pub fn href(did: DefId) -> Option<(String, ItemType, Vec)> { +crate fn href(did: DefId) -> Option<(String, ItemType, Vec)> { let cache = cache(); if !did.is_local() && !cache.access_levels.is_public(did) && !cache.document_private { return None; @@ -618,7 +618,7 @@ fn tybounds(param_names: &Option>) -> impl fmt::Display }) } -pub fn anchor(did: DefId, text: &str) -> impl fmt::Display + '_ { +crate fn anchor(did: DefId, text: &str) -> impl fmt::Display + '_ { display_fn(move |f| { if let Some((url, short_ty, fqp)) = href(did) { write!( @@ -910,7 +910,7 @@ impl clean::Impl { } // The difference from above is that trait is not hyperlinked. -pub fn fmt_impl_for_trait_page(i: &clean::Impl, f: &mut Buffer, use_absolute: bool) { +crate fn fmt_impl_for_trait_page(i: &clean::Impl, f: &mut Buffer, use_absolute: bool) { f.from_display(i.print_inner(false, use_absolute)) } @@ -1089,19 +1089,31 @@ impl Function<'_> { impl clean::Visibility { crate fn print_with_space(&self) -> impl fmt::Display + '_ { + use rustc_span::symbol::kw; + display_fn(move |f| match *self { clean::Public => f.write_str("pub "), clean::Inherited => Ok(()), - clean::Visibility::Crate => write!(f, "pub(crate) "), + // If this is `pub(crate)`, `path` will be empty. + clean::Visibility::Restricted(did, _) if did.index == CRATE_DEF_INDEX => { + write!(f, "pub(crate) ") + } clean::Visibility::Restricted(did, ref path) => { f.write_str("pub(")?; - if path.segments.len() != 1 - || (path.segments[0].name != "self" && path.segments[0].name != "super") + debug!("path={:?}", path); + let first_name = + path.data[0].data.get_opt_name().expect("modules are always named"); + if path.data.len() != 1 || (first_name != kw::SelfLower && first_name != kw::Super) { f.write_str("in ")?; } - resolved_path(f, did, path, true, false)?; - f.write_str(") ") + // modified from `resolved_path()` to work with `DefPathData` + let last_name = path.data.last().unwrap().data.get_opt_name().unwrap(); + for seg in &path.data[..path.data.len() - 1] { + write!(f, "{}::", seg.data.get_opt_name().unwrap())?; + } + let path = anchor(did, &last_name.as_str()).to_string(); + write!(f, "{}) ", path) } }) } diff --git a/src/librustdoc/html/highlight.rs b/src/librustdoc/html/highlight.rs index 4769edc50ff..1cbfbf50dd7 100644 --- a/src/librustdoc/html/highlight.rs +++ b/src/librustdoc/html/highlight.rs @@ -15,7 +15,7 @@ use rustc_span::symbol::Ident; use rustc_span::with_default_session_globals; /// Highlights `src`, returning the HTML output. -pub fn render_with_highlighting( +crate fn render_with_highlighting( src: String, class: Option<&str>, playground_button: Option<&str>, @@ -46,7 +46,9 @@ fn write_header(out: &mut String, class: Option<&str>) { } fn write_code(out: &mut String, src: &str) { - Classifier::new(src).highlight(&mut |highlight| { + // This replace allows to fix how the code source with DOS backline characters is displayed. + let src = src.replace("\r\n", "\n"); + Classifier::new(&src).highlight(&mut |highlight| { match highlight { Highlight::Token { text, class } => string(out, Escape(text), class), Highlight::EnterSpan { class } => enter_span(out, class), @@ -62,7 +64,6 @@ fn write_footer(out: &mut String, playground_button: Option<&str>) { /// How a span of text is classified. Mostly corresponds to token kinds. #[derive(Clone, Copy, Debug, Eq, PartialEq)] enum Class { - None, Comment, DocComment, Attribute, @@ -87,7 +88,6 @@ impl Class { /// Returns the css class expected by rustdoc for each `Class`. fn as_html(self) -> &'static str { match self { - Class::None => "", Class::Comment => "comment", Class::DocComment => "doccomment", Class::Attribute => "attribute", @@ -110,7 +110,7 @@ impl Class { } enum Highlight<'a> { - Token { text: &'a str, class: Class }, + Token { text: &'a str, class: Option }, EnterSpan { class: Class }, ExitSpan, } @@ -164,8 +164,9 @@ impl<'a> Classifier<'a> { /// a couple of following ones as well. fn advance(&mut self, token: TokenKind, text: &'a str, sink: &mut dyn FnMut(Highlight<'a>)) { let lookahead = self.peek(); + let no_highlight = |sink: &mut dyn FnMut(_)| sink(Highlight::Token { text, class: None }); let class = match token { - TokenKind::Whitespace => Class::None, + TokenKind::Whitespace => return no_highlight(sink), TokenKind::LineComment { doc_style } | TokenKind::BlockComment { doc_style, .. } => { if doc_style.is_some() { Class::DocComment @@ -190,12 +191,12 @@ impl<'a> Classifier<'a> { TokenKind::And => match lookahead { Some(TokenKind::And) => { let _and = self.tokens.next(); - sink(Highlight::Token { text: "&&", class: Class::Op }); + sink(Highlight::Token { text: "&&", class: Some(Class::Op) }); return; } Some(TokenKind::Eq) => { let _eq = self.tokens.next(); - sink(Highlight::Token { text: "&=", class: Class::Op }); + sink(Highlight::Token { text: "&=", class: Some(Class::Op) }); return; } Some(TokenKind::Whitespace) => Class::Op, @@ -226,7 +227,7 @@ impl<'a> Classifier<'a> { | TokenKind::At | TokenKind::Tilde | TokenKind::Colon - | TokenKind::Unknown => Class::None, + | TokenKind::Unknown => return no_highlight(sink), TokenKind::Question => Class::QuestionMark, @@ -235,7 +236,7 @@ impl<'a> Classifier<'a> { self.in_macro_nonterminal = true; Class::MacroNonTerminal } - _ => Class::None, + _ => return no_highlight(sink), }, // This might be the start of an attribute. We're going to want to @@ -251,8 +252,8 @@ impl<'a> Classifier<'a> { self.in_attribute = true; sink(Highlight::EnterSpan { class: Class::Attribute }); } - sink(Highlight::Token { text: "#", class: Class::None }); - sink(Highlight::Token { text: "!", class: Class::None }); + sink(Highlight::Token { text: "#", class: None }); + sink(Highlight::Token { text: "!", class: None }); return; } // Case 2: #[outer_attribute] @@ -262,16 +263,16 @@ impl<'a> Classifier<'a> { } _ => (), } - Class::None + return no_highlight(sink); } TokenKind::CloseBracket => { if self.in_attribute { self.in_attribute = false; - sink(Highlight::Token { text: "]", class: Class::None }); + sink(Highlight::Token { text: "]", class: None }); sink(Highlight::ExitSpan); return; } - Class::None + return no_highlight(sink); } TokenKind::Literal { kind, .. } => match kind { // Text literals. @@ -307,7 +308,7 @@ impl<'a> Classifier<'a> { }; // Anything that didn't return above is the simple case where we the // class just spans a single token, so we can use the `string` method. - sink(Highlight::Token { text, class }); + sink(Highlight::Token { text, class: Some(class) }); } fn peek(&mut self) -> Option { @@ -337,10 +338,10 @@ fn exit_span(out: &mut String) { /// ``` /// The latter can be thought of as a shorthand for the former, which is more /// flexible. -fn string(out: &mut String, text: T, klass: Class) { +fn string(out: &mut String, text: T, klass: Option) { match klass { - Class::None => write!(out, "{}", text).unwrap(), - klass => write!(out, "{}", klass.as_html(), text).unwrap(), + None => write!(out, "{}", text).unwrap(), + Some(klass) => write!(out, "{}", klass.as_html(), text).unwrap(), } } diff --git a/src/librustdoc/html/highlight/fixtures/dos_line.html b/src/librustdoc/html/highlight/fixtures/dos_line.html new file mode 100644 index 00000000000..4400f85681d --- /dev/null +++ b/src/librustdoc/html/highlight/fixtures/dos_line.html @@ -0,0 +1,3 @@ +pub fn foo() { +println!("foo"); +} diff --git a/src/librustdoc/html/highlight/tests.rs b/src/librustdoc/html/highlight/tests.rs index c79471b1fae..f57f52d6f08 100644 --- a/src/librustdoc/html/highlight/tests.rs +++ b/src/librustdoc/html/highlight/tests.rs @@ -1,17 +1,6 @@ use super::write_code; use expect_test::expect_file; -#[test] -fn test_html_highlighting() { - let src = include_str!("fixtures/sample.rs"); - let html = { - let mut out = String::new(); - write_code(&mut out, src); - format!("{}