diff --git a/.config/dotnet-tools.json b/.config/dotnet-tools.json
index 7b38e6d8507882..02e0543e98b8e4 100644
--- a/.config/dotnet-tools.json
+++ b/.config/dotnet-tools.json
@@ -15,7 +15,7 @@
]
},
"microsoft.dotnet.xharness.cli": {
- "version": "1.0.0-prerelease.21281.2",
+ "version": "1.0.0-prerelease.21314.1",
"commands": [
"xharness"
]
diff --git a/Directory.Build.props b/Directory.Build.props
index c0adfc01c6439b..e2c0de947f3364 100644
--- a/Directory.Build.props
+++ b/Directory.Build.props
@@ -83,6 +83,7 @@
$([MSBuild]::NormalizePath('$(WasmBuildTasksDir)', 'WasmBuildTasks.dll'))
$([MSBuild]::NormalizePath('$(MonoAOTCompilerDir)', 'MonoAOTCompiler.dll'))
$([MSBuild]::NormalizePath('$(RuntimeConfigParserDir)', 'RuntimeConfigParser.dll'))
+ $([MSBuild]::NormalizeDirectory('$(ArtifactsBinDir)', 'coreclr', '$(TargetOS).$(TargetArchitecture).$(Configuration)'))
@@ -202,6 +203,17 @@
true
+
+ $([MSBuild]::NormalizeDirectory('$(ArtifactsBinDir)', 'microsoft.netcore.app.ref'))
+ $([MSBuild]::NormalizeDirectory('$(MicrosoftNetCoreAppRefPackDir)', 'ref', '$(NetCoreAppCurrent)'))
+ $([MSBuild]::NormalizeDirectory('$(MicrosoftNetCoreAppRefPackDir)', 'data'))
+
+ $([MSBuild]::NormalizeDirectory('$(ArtifactsBinDir)', 'microsoft.netcore.app.runtime.$(PackageRID)', '$(Configuration)'))
+ $([MSBuild]::NormalizeDirectory('$(MicrosoftNetCoreAppRuntimePackDir)', 'runtimes', '$(PackageRID)'))
+ $([MSBuild]::NormalizeDirectory('$(MicrosoftNetCoreAppRuntimePackRidDir)', 'lib', '$(NetCoreAppCurrent)'))
+ $([MSBuild]::NormalizeDirectory('$(MicrosoftNetCoreAppRuntimePackRidDir)', 'native'))
+
+
true
@@ -226,6 +238,9 @@
$([MSBuild]::NormalizeDirectory('$(LibrariesProjectRoot)', 'System.Private.CoreLib', 'src'))
$([MSBuild]::NormalizePath('$(CoreClrProjectRoot)', 'System.Private.CoreLib', 'System.Private.CoreLib.csproj'))
$([MSBuild]::NormalizePath('$(MonoProjectRoot)', 'System.Private.CoreLib', 'System.Private.CoreLib.csproj'))
+
+
+ true
diff --git a/THIRD-PARTY-NOTICES.TXT b/THIRD-PARTY-NOTICES.TXT
index b0694275b3dbb3..14c806c5ca3e4d 100644
--- a/THIRD-PARTY-NOTICES.TXT
+++ b/THIRD-PARTY-NOTICES.TXT
@@ -680,7 +680,7 @@ worldwide. This software is distributed without any warranty.
See .
-License for fastmod (https://github.com/lemire/fastmod)
+License for fastmod (https://github.com/lemire/fastmod) and ibm-fpgen (https://github.com/nigeltao/parse-number-fxx-test-data)
--------------------------------------
Copyright 2018 Daniel Lemire
@@ -952,3 +952,29 @@ by constants, including codegen instructions. The unsigned division incorporates
"round down" optimization per ridiculous_fish.
This is free and unencumbered software. Any copyright is dedicated to the Public Domain.
+
+
+License notice for mimalloc
+-----------------------------------
+
+MIT License
+
+Copyright (c) 2019 Microsoft Corporation, Daan Leijen
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in all
+copies or substantial portions of the Software.
+
+THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+SOFTWARE.
diff --git a/docs/coding-guidelines/libraries-packaging.md b/docs/coding-guidelines/libraries-packaging.md
index caf447ca598a10..51c5424583e686 100644
--- a/docs/coding-guidelines/libraries-packaging.md
+++ b/docs/coding-guidelines/libraries-packaging.md
@@ -8,9 +8,9 @@ To add a library to the .NETCore shared framework, that library's `AssemblyName`
The library should have both a `ref` and `src` project. Its reference assembly will be included in the ref-pack for the Microsoft.NETCore.App shared framework, and its implementation assembly will be included in the runtime pack.
-Including a library in the shared framework only includes the best applicable TargetFramework build of that library: `$(NetCoreAppCurrent)` if it exists, but possibly `netstandard2.1` or another if that is best. If a library has builds for other frameworks those will only be shipped if the library also produces a [Nuget package](#nuget-package). If a library ships both in the shared framework and a nuget package, it may decide to exclude its latest `$(NetCoreAppCurrent)` build from the package. This can be done by setting `ExcludeCurrentNetCoreAppFromPackage` to true. Libraries should take care when doing this to ensure that whatever asset in the package that would apply to `$(NetCoreAppCurrent)` is functionally equivalent to that which it replaces from the shared framework, to avoid breaking applications which reference a newer package than the shared framework. If possible, it's preferable to avoid this by choosing to target frameworks which can both ship in the package and shared framework.
+Including a library in the shared framework only includes the best applicable TargetFramework build of that library: `$(NetCoreAppCurrent)` if it exists, but possibly `netstandard2.1` or another if that is best. If a library has builds for other frameworks those will only be shipped if the library also produces a [Nuget package](#nuget-package).
-In some occasions we may want to include a library in the shared framework, but not expose it publicly. To do so, include the library in the `NetCoreAppLibraryNoReference` property in [NetCoreAppLibrary.props](../../src/libraries/NetCoreAppLibrary.props). The library should also be named in a way to discourage use at runtime, for example using the `System.Private` prefix. We should avoid hiding arbitrary public libraries as it complicates deployment and servicing, though some platform specific libraries are in this state due to historical reasons.
+In some occasions we may want to include a library in the shared framework, but not expose it publicly. The library should be named in a way to discourage use at runtime, for example using the `System.Private` prefix. We should avoid hiding arbitrary public libraries as it complicates deployment and servicing.
Libraries included in the shared framework should ensure all direct and transitive assembly references are also included in the shared framework. This will be validated as part of the build and errors raised if any dependencies are unsatisfied.
@@ -61,13 +61,6 @@ By default all TargetFrameworks listed in your project will be included in the p
```
-A common pattern is to build for the latest .NET version, for example to include a library in the shared framework or a transport package, but then excluded this from the NuGet package. This can be done to avoid growing the NuGet package in size. To do this set
-```xml
-
- true
-
-```
-
When excluding TargetFrameworks from a package special care should be taken to ensure that the builds included are equivalent to those excluded. Avoid ifdef'ing the implementation only in an excluded TargetFramework. Doing so will result in testing something different than what we ship, or shipping a nuget package that degrades the shared framework.
### Build props / targets and other content
diff --git a/docs/coding-guidelines/project-guidelines.md b/docs/coding-guidelines/project-guidelines.md
index c0a79248d01c5e..3e974772a671d9 100644
--- a/docs/coding-guidelines/project-guidelines.md
+++ b/docs/coding-guidelines/project-guidelines.md
@@ -120,9 +120,10 @@ Library projects should use the following directory layout.
```
src\\src - Contains the source code for the library.
-src\\ref - Contains any reference assembly projects for the library
+src\\ref - Contains any reference assembly projects for the library.
src\\pkg - Contains package projects for the library.
-src\\tests - Contains the test code for a library
+src\\tests - Contains the test code for a library.
+src\\gen - Contains source code for the assembly's source generator.
```
## ref
@@ -163,6 +164,9 @@ All test outputs should be under
`bin\$(MSBuildProjectName)\$(TargetFramework)`
+## gen
+In the gen directory any source generator related to the assembly should exist. This does not mean the source generator is only used for that assembly only that it is conceptually apart of that assembly. For example, the assembly may provide attributes or low-level types the source generator uses.
+
## Facades
Facade are unique in that they don't have any code and instead are generated by finding a contract reference assembly with the matching identity and generating type forwards for all the types to where they live in the implementation assemblies (aka facade seeds). There are also partial facades which contain some type forwards as well as some code definitions. All the various build configurations should be contained in the one csproj file per library.
diff --git a/docs/design/coreclr/botr/intro-to-clr.md b/docs/design/coreclr/botr/intro-to-clr.md
index d7bb57f233a930..7626ff47f22bd0 100644
--- a/docs/design/coreclr/botr/intro-to-clr.md
+++ b/docs/design/coreclr/botr/intro-to-clr.md
@@ -110,7 +110,7 @@ In addition, there is another important ramification of managed code that may no
The result of this is that unmanaged interfaces are almost always _wrapped_ before being exposed to managed code developers. For example, when accessing files, you don't use the Win32 CreateFile functions provided by the operating system, but rather the managed System.IO.File class that wraps this functionality. It is in fact extremely rare that unmanaged functionality is exposed to users directly.
-While this wrapping may seem to be "bad" in some way (more code that does not seem do much), it is in fact good because it actually adds quite a bit of value. Remember it was always _possible_ to expose the unmanaged interfaces directly; we _chose_ to wrap the functionality. Why? Because the overarching goal of the runtime is to **make programming easy**, and typically the unmanaged functions are not easy enough. Most often, unmanaged interfaces are _not_ designed with ease of use in mind, but rather are tuned for completeness. Anyone looking at the arguments to CreateFile or CreateProcess would be hard pressed to characterize them as "easy." Luckily, the functionality gets a "facelift" when it enters the managed world, and while this makeover is often very "low tech" (requiring nothing more complex than renaming, simplification, and organizing the functionality), it is also profoundly useful. One of the very important documents created for the CLR is the [Framework Design Guidelines][fx-design-guidelines]. This 800+ page document details best practices in making new managed class libraries.
+While this wrapping may seem to be "bad" in some way (more code that does not seem to do much), it is in fact good because it actually adds quite a bit of value. Remember it was always _possible_ to expose the unmanaged interfaces directly; we _chose_ to wrap the functionality. Why? Because the overarching goal of the runtime is to **make programming easy**, and typically the unmanaged functions are not easy enough. Most often, unmanaged interfaces are _not_ designed with ease of use in mind, but rather are tuned for completeness. Anyone looking at the arguments to CreateFile or CreateProcess would be hard pressed to characterize them as "easy." Luckily, the functionality gets a "facelift" when it enters the managed world, and while this makeover is often very "low tech" (requiring nothing more complex than renaming, simplification, and organizing the functionality), it is also profoundly useful. One of the very important documents created for the CLR is the [Framework Design Guidelines][fx-design-guidelines]. This 800+ page document details best practices in making new managed class libraries.
Thus, we have now seen that managed code (which is intimately involved with the CLR) differs from unmanaged code in two important ways:
@@ -204,7 +204,7 @@ As an aside, while exceptions avoid one common error (not checking for failure),
Previous to version 2.0 of the CLR, the only parameterized types were arrays. All other containers (such as hash tables, lists, queues, etc.), all operated on a generic Object type. The inability to create List, or Dictionary certainly had a negative performance effect because value types needed to be boxed on entry to a collection, and explicit casting was needed on element fetch. Nevertheless, that is not the overriding reason for adding parameterized types to the CLR. The main reason is that **parameterized types make programming easier**.
-The reason for this is subtle. The easiest way to see the effect is to imagine what a class library would look like if all types were replaced with a generic Object type. This effect is not unlike what happens in dynamically typed languages like JavaScript. In such a world, there are simply far more ways for a programmer to make incorrect (but type-safe) programs. Is the parameter for that method supposed to be a list? a string? an integer? any of the above? It is no longer obvious from looking at the method's signature. Worse, when a method returns an Object, what other methods can accept it as a parameter? Typical frameworks have hundreds of methods; if they all take parameters of type Object, it becomes very difficult to determine which Object instances are valid for the operations the method will perform. In short, strong typing helps a programmer express their intent more clearly, and allows tools (e.g., the compiler) to enforce their intent. This results in big productivity boost.
+The reason for this is subtle. The easiest way to see the effect is to imagine what a class library would look like if all types were replaced with a generic Object type. This effect is not unlike what happens in dynamically typed languages like JavaScript. In such a world, there are simply far more ways for a programmer to make incorrect (but type-safe) programs. Is the parameter for that method supposed to be a list? a string? an integer? any of the above? It is no longer obvious from looking at the method's signature. Worse, when a method returns an Object, what other methods can accept it as a parameter? Typical frameworks have hundreds of methods; if they all take parameters of type Object, it becomes very difficult to determine which Object instances are valid for the operations the method will perform. In short, strong typing helps a programmer express their intent more clearly, and allows tools (e.g., the compiler) to enforce their intent. This results in a big productivity boost.
These benefits do not disappear just because the type gets put into a List or a Dictionary, so clearly parameterized types have value. The only real question is whether parameterized types are best thought of as a language specific feature which is "compiled out" by the time CIL is generated, or whether this feature should have first class support in the runtime. Either implementation is certainly possible. The CLR team chose first class support because without it, parameterized types would be implemented different ways by different languages. This would imply that interoperability would be cumbersome at best. In addition, expressing programmer intent for parameterized types is most valuable _at the interface_ of a class library. If the CLR did not officially support parameterized types, then class libraries could not use them, and an important usability feature would be lost.
diff --git a/docs/design/coreclr/botr/readytorun-format.md b/docs/design/coreclr/botr/readytorun-format.md
index f8774aa2cb9c09..63b032326403e4 100644
--- a/docs/design/coreclr/botr/readytorun-format.md
+++ b/docs/design/coreclr/botr/readytorun-format.md
@@ -6,6 +6,7 @@ Revisions:
* 3.1 - [Tomas Rylek](https://github.com/trylek) - 2019
* 4.1 - [Tomas Rylek](https://github.com/trylek) - 2020
* 5.3 - [Tomas Rylek](https://github.com/trylek) - 2021
+* 5.4 - [David Wrighton](https://github.com/davidwrighton) - 2021
# Introduction
@@ -251,6 +252,10 @@ fixup kind, the rest of the signature varies based on the fixup kind.
| READYTORUN_FIXUP_IndirectPInvokeTarget | 0x2E | Target (indirect) of an inlined PInvoke. Followed by method signature.
| READYTORUN_FIXUP_PInvokeTarget | 0x2F | Target of an inlined PInvoke. Followed by method signature.
| READYTORUN_FIXUP_Check_InstructionSetSupport | 0x30 | Specify the instruction sets that must be supported/unsupported to use the R2R code associated with the fixup.
+| READYTORUN_FIXUP_Verify_FieldOffset | 0x31 | Generate a runtime check to ensure that the field offset matches between compile and runtime. Unlike CheckFieldOffset, this will generate a runtime exception on failure instead of silently dropping the method
+| READYTORUN_FIXUP_Verify_TypeLayout | 0x32 | Generate a runtime check to ensure that the field offset matches between compile and runtime. Unlike CheckFieldOffset, this will generate a runtime exception on failure instead of silently dropping the method
+| READYTORUN_FIXUP_Check_VirtualFunctionOverride | 0x33 | Generate a runtime check to ensure that virtual function resolution has equivalent behavior at runtime as at compile time. If not equivalent, code will not be used. See [Virtual override signatures](virtual-override-signatures) for details of the signature used.
+| READYTORUN_FIXUP_Verify_VirtualFunctionOverride | 0x33 | Generate a runtime check to ensure that virtual function resolution has equivalent behavior at runtime as at compile time. If not equivalent, generate runtime failure. See [Virtual override signatures](virtual-override-signatures) for details of the signature used.
| READYTORUN_FIXUP_ModuleOverride | 0x80 | When or-ed to the fixup ID, the fixup byte in the signature is followed by an encoded uint with assemblyref index, either within the MSIL metadata of the master context module for the signature or within the manifest metadata R2R header table (used in cases inlining brings in references to assemblies not seen in the input MSIL).
#### Method Signatures
@@ -268,6 +273,7 @@ token, and additional data determined by the flags.
| READYTORUN_METHOD_SIG_MemberRefToken | 0x10 | If set, the token is memberref token. If not set, the token is methoddef token.
| READYTORUN_METHOD_SIG_Constrained | 0x20 | Constrained type for method resolution. Typespec appended as additional data.
| READYTORUN_METHOD_SIG_OwnerType | 0x40 | Method type. Typespec appended as additional data.
+| READYTORUN_METHOD_SIG_UpdateContext | 0x80 | If set, update the module which is used to parse tokens before performing any token processing. A uint index into the modules table immediately follows the flags
#### Field Signatures
@@ -281,6 +287,16 @@ additional data determined by the flags.
| READYTORUN_FIELD_SIG_MemberRefToken | 0x10 | If set, the token is memberref token. If not set, the token is fielddef token.
| READYTORUN_FIELD_SIG_OwnerType | 0x40 | Field type. Typespec appended as additional data.
+#### Virtual override signatures
+
+ECMA 335 does not have a natural encoding for describing an overriden method. These signatures are encoded as a ReadyToRunVirtualFunctionOverrideFlags byte, followed by a method signature representing the declaration method, a type signature representing the type which is being devirtualized, and (optionally) a method signature indicating the implementation method.
+
+| ReadyToRunVirtualFunctionOverrideFlags | Value | Description
+|:------------------------------------------------------|------:|:-----------
+| READYTORUN_VIRTUAL_OVERRIDE_None | 0x00 | No flags are set
+| READYTORUN_VIRTUAL_OVERRIDE_VirtualFunctionOverriden | 0x01 | If set, then the virtual function has an implementation, which is encoded in the optional method implementation signature.
+
+
### READYTORUN_IMPORT_SECTIONS::AuxiliaryData
For slots resolved lazily via `READYTORUN_HELPER_DelayLoad_MethodCall` helper, auxiliary data are
diff --git a/docs/design/coreclr/botr/type-loader.md b/docs/design/coreclr/botr/type-loader.md
index b873d8541a012f..7c8b34627665be 100644
--- a/docs/design/coreclr/botr/type-loader.md
+++ b/docs/design/coreclr/botr/type-loader.md
@@ -76,21 +76,25 @@ There is a relatively small number of entry-points to the loader. Although the s
There are usually many calls to the type loader during JITting. Consider:
- object CreateClass()
- {
- return new MyClass();
- }
+```csharp
+object CreateClass()
+{
+ return new MyClass();
+}
+```
In the IL, MyClass is referred to using a metadata token. In order to generate a call to the `JIT_New` helper which takes care of the actual instantiation, the JIT will ask the type loader to load the type and return a handle to it. This handle will be then directly embedded in the JITted code as an immediate value. The fact that types and members are usually resolved and loaded at JIT time and not at run-time also explains the sometimes confusing behavior easily hit with code like this:
- object CreateClass()
- {
- try {
- return new MyClass();
- } catch (TypeLoadException) {
- return null;
- }
- }
+```csharp
+object CreateClass()
+{
+ try {
+ return new MyClass();
+ } catch (TypeLoadException) {
+ return null;
+ }
+}
+```
If `MyClass` fails to load, for example because it's supposed to be defined in another assembly and it was accidentally removed in the newest build, then this code will still throw `TypeLoadException`. The reason that the catch block did not catch it is that it never ran! The exception occurred during JITting and would only be catchable in the method that called `CreateClass` and caused it to be JITted. In addition, it may not be always obvious at which point the JITting is triggered due to inlining, so users should not expect and rely on deterministic behavior.
@@ -153,14 +157,16 @@ both the same type.
When the type loader is asked to load a specified type, identified for example by a typedef/typeref/typespec **token** and a **Module** , it does not do all the work atomically at once. The loading is done in phases instead. The reason for this is that the type usually depends on other types and requiring it to be fully loaded before it can be referred to by other types would result in infinite recursion and deadlocks. Consider:
- class A : C>
- { }
+```csharp
+class A : C>
+{ }
- class B : C>
- { }
+class B : C>
+{ }
- class C
- { }
+class C
+{ }
+```
These are valid types and apparently `A` depends on `B` and `B` depends on `A`.
@@ -195,10 +201,12 @@ A placeholder to be substituted by another type; the `T` in the declaration of `
A type being substituted for a generic parameter; the `int` in `List`. Note that a generic parameter can also be used as an argument. Consider:
- List GetList()
- {
- return new List();
- }
+```csharp
+List GetList()
+{
+ return new List();
+}
+```
The method has one generic parameter `T` which is used as a generic argument for the generic list class.
@@ -209,28 +217,38 @@ An optional requirement placed by generic parameters on its potential generic ar
1. Special constraints
- Reference type constraint - the generic argument must be a reference type (as opposed to a value type). The `class` keyword is used in C# to express this constraint.
- public class A where T : class
+ ```csharp
+ public class A where T : class
+ ```
- Value type constraint - the generic argument must be a value type different from `System.Nullable`. C# uses the `struct` keyword.
- public class A where T : struct
+ ```csharp
+ public class A where T : struct
+ ```
- Default constructor constraint - the generic argument must have a public parameterless constructor. This is expressed by `new()` in C#.
- public class A where T : new()
+ ```csharp
+ public class A where T : new()
+ ```
2. Base type constraints - the generic argument must be derived from
(or directly be of) the given non-interface type. It obviously makes
sense to use only zero or one reference type as a base types
constraint.
- public class A where T : EventArgs
+ ```csharp
+ public class A where T : EventArgs
+ ```
3. Implemented interface constraints - the generic argument must
implement (or directly be of) the given interface type. Zero or more
interfaces can be given.
- public class A where T : ICloneable, IComparable
+ ```csharp
+ public class A where T : ICloneable, IComparable
+ ```
The above constraints are combined with an implicit AND, i.e. a
generic parameter can be constrained to be derived from a given type,
@@ -239,11 +257,13 @@ generic parameters of the declaring type can be used to express the
constraints, introducing interdependencies among the parameters. For
example:
- public class A
- where S : T
- where T : IList {
- void f(V v) where V : S {}
- }
+```csharp
+public class A
+ where S : T
+ where T : IList {
+ void f(V v) where V : S {}
+}
+```
**Instantiation**
@@ -259,7 +279,9 @@ declared. There exists exactly one typical instantiation for each
generic type and method. Usually when one talks about an open generic
type, they have the typical instantiation in mind. Example:
- public class A {}
+```csharp
+public class A {}
+```
The C# `typeof(A<,,>)` compiles to ldtoken A\'3 which makes the
runtime load ``A`3`` instantiated at `S` , `T` , `U`.
diff --git a/docs/design/features/cross-dac.md b/docs/design/features/cross-dac.md
new file mode 100644
index 00000000000000..30becca5be6267
--- /dev/null
+++ b/docs/design/features/cross-dac.md
@@ -0,0 +1,97 @@
+# Cross DAC Notes
+
+The `crossdac` is a cross-compiled DAC. It is compiled to execute on one platform, but debug a target of a different architecture.
+
+Our current crossdacs are all:
+
+- compiled to run on Windows
+- Same bitness. (Target and host have the same number of bits.
+- target a *nix variant
+
+The crossdac allow us to use Windows debugging tools to debug dumps from *nix processes.
+
+## Design
+
+### Limitations
+
+- To avoid solving remoting and synchronization issues, the crossdac will not support live processes. Only dump debugging is supported.
+- Similar to the DAC, each cross DAC must match its runtime. The DACs are indexed on a symbol server to allow the debuggers to get these as needed.
+
+### Conditional Code Selection
+
+This is a simple cross compilation of the DAC, `C++` code. This mean the `HOST_*` and the `TARGET_*` are configured differently. In this context:
+
+- `HOST` refers to the architecture of the platform that is running the debugger.
+- `TARGET` refers to the platform that generated the code dump.
+
+In general, most code should be conditioned on `TARGET_*` variables. This is because in general we want the `DAC` to behave identically when cross compiled.
+
+Code must be conditioned on `HOST` when it refers to host needed services. These have typically been thing like file i/o and memory allocation.
+
+Initial implementation allowed the compiler to find most of these. The strategy was to assume all code should be conditioned on `TARGET` and let the compiler gripe.
+
+### Type Layout
+
+The DAC is essentially a memory parsing tool with supporting functionality. The layout of types in the DAC must match the layout of types in the runtime.
+
+The `C++` standard is not explicit about all layout rules of data structures. Due to its historical evolution from `C`, most structures are arranged in an intuitive easily understood fashion. Newer and more exotic structures are less consistent.
+
+Experimentation has shown that layout varies in inheritance cases. The DAC does not support general multiple inheritance, so that simplifies things. It does support multiple inheritance with the empty base classes.
+
+These cases have proven to be problematic:
+
+- Classes with empty base classes. (I the only issue is with multiple base classes.)
+ - By default `gcc` use an empty base class optimization to eliminate the 1 byte of space these empty base classes normally consume (alone).
+ - By default `Windows` compilers do not do this optimization. This is to preserve backward binary compatibility.
+ - The Windows compilers allow this optimization to be enabled. Our code uses `EMPTY_BASES_DECL` to enable this optimization. It has to be applied to every structure that has multiple base classes or derives from a such a structure. See `__declspec(empty_bases)`.
+- Packing of the first member of the derived class. In the case where the base class ended with padding:
+ - `gcc` compilers reuse the padding for the first member of the derived class. This effectively removes the padding of the base class in the derived class.
+ - Windows compilers do not remove this padding.
+ - Our code uses the `DAC_ALIGNAS(a)` macro before the first element of the derived class to force the `gcc` compiler to align that member and keep the base classes padding.
+ - The `a` parameter is preferentially the base classes typename.
+ - However, in some cases the compiler will not allow this due to some circular layout issues it causes. In these cases, `a` can refer to a well known type instead. I prefer `int64_t`, `int32_t`, `size_t` ...
+
+#### DacCompareNativeTypes Usage
+
+I wrote and used [DacCompareNativeTypes](https://github.com/dotnet/diagnostics/tree/main/src/tests/DacCompareNativeTypes), to locate and identify type layout issues.
+
+The tool is a bit crude, but it helped get the job done.
+
+The `libcoreclr.so` has a lot of symbols. This proved very slow. So to expedite things, I compared the `dac` and later the `dbi` libraries for structure layout. This had the advantage of eliminating irrelevant data structures.
+
+The compilers generate different debug data and different hidden data structures. The tool tries to overlook these. Be aware that not all differences are real. Some data structures are host only so these are expected to be different.
+
+I usually ran the tool in a debugger so that I could look at other available meta-data the tool keeps. i.e. source file and line number.
+
+### Missing/Different types
+
+There are some cases where types are defined by the Target. These types maybe missing or different on the Host. In these cases we define the cross compilation types in `src/coreclr/inc/crosscomp.h`.
+
+See `T_CRITICAL_SECTION` for a key example. In this case both host and target supported critical sections, but we needed to correctly map the target data structures. So we needed a type defined which was the TARGET's `CRITICAL_SECTION`.
+
+So the Target's definition was made available for the cross compile. Additionally the macro was created to make sure references which required the Target's definition could be separated from ones which might need the host's definition.
+
+There is also some defensive programming to make sure these structures accurate. See `T_CRITICAL_SECTION_VALIDATION_MESSAGE` for one example.
+
+### Out of Process Unwinding
+
+To fully support native stack processing, we needed a Target unwinder. For this `libunwind` was also cross-compiled.
+
+See [CMake cross libunwind](https://github.com/dotnet/runtime/blob/0049c629381c5a18e4dadd1038c2bd6b3ae6e3e6/src/coreclr/CMakeLists.txt#L113)
+
+### DBI
+
+I use the term `DAC` in this document to refer to both the `DAC` and the `DBI` debug interface. Both were actually cross compiled. Be aware.
+
+### Build entry point
+
+The main build systme change is adding the ability to set the Target OS on a Windows build.
+
+- See [build-runtime.cmd changes](https://github.com/dotnet/runtime/blob/0049c629381c5a18e4dadd1038c2bd6b3ae6e3e6/src/coreclr/build-runtime.cmd#L133-L134)
+- See [Subsets.props](https://github.com/dotnet/runtime/blob/0049c629381c5a18e4dadd1038c2bd6b3ae6e3e6/eng/Subsets.props#L191-L197)
+
+There are also changes to the official build to set these flags package the results and upload to the symbol server.
+
+### Client changes
+
+Various changes were required in the DAC clients to consume the new crossdac. These are really out of the scope of this document.
\ No newline at end of file
diff --git a/docs/design/mono/diagnostics-tracing.md b/docs/design/mono/diagnostics-tracing.md
new file mode 100644
index 00000000000000..ccf46f940d5f4d
--- /dev/null
+++ b/docs/design/mono/diagnostics-tracing.md
@@ -0,0 +1,255 @@
+# MonoVM Diagnostics Tracing Component
+
+## Summary
+
+MonoVM includes support for EventPipe and DiagnosticServer components used to generate nettrace files including both runtime as well as custom `EventSource` events. It is possible to either use dynamic components (Android) or link statically (iOS) depending on build configuration. EventPipe will mainly be used during development/testing cycle, and should not be deployed or linked into builds passed to app store for verification and publication.
+
+## Scenarios
+
+.Net supports a number of different EventPipe scenarios using tools mainly from diagnostics repository. MonoVM include support for several of these scenarios, running tools like `dotnet-counters` and `dotnet-trace` to collect and analyze runtime performance data. Other things like requesting a core/gc dump or attaching profiler over EventPipe is currently not supported on MonoVM.
+
+Due to differences between runtimes many of the NativeRuntimeEvents won't apply to MonoVM. Only a small selected amount of NativeRuntimeEvents will initially be added to MonoVM. Current supported NativeRuntimeEvents can be viewed in MonoVM include file, https://github.com/dotnet/runtime/blob/main/src/mono/mono/eventpipe/gen-eventing-event-inc.lst. Since primarly focus is EventPipe and mobile platforms (iOS/Android), ETW and LTTng providers have currently not been integrated/enabled for NativeRuntimeEvents on MonoVM.
+
+MonoVM runs on a variaty of platforms, and depending on platform capabilities MonoVM support different build configurations of EventPipe and DiagnosticServer. For desktop platforms (Windows, Linux, MacOS), MonoVM build DiagnosticServer using
+`NamedPipes` (Windows) or `UnixDomainSockets` (Linux, MacOS) support. This is inline with CoreCLR build configuration of the DiagnosticServer, working in the same way.
+
+On mobile platforms (Android/iOS) or other remote sandboxed environments, MonoVM DiagnosticServer component can be build using TCP/IP support to better handle remote targets. It is also handles the connect senario (runtime act as TCP/IP client connecting back to tooling), as well as the listening scenario (runtime act as a TCP/IP listener waiting for tooling to connect). Depending on platform, allowed capabilities (some platforms won't allow listening on sockets) and tracing scenarios (startup tracing needs suspended runtime), a combination of these scenarios can be used.
+
+Existing diagnostic tooling only supports `NamedPipes`/`UnixDomainSockets`, so in order to reuse these tools transparently when targeting MonoVM running on mobile platforms, a new component have been implmenet in diagnostics repro, `dotnet-dsrouter`, https://github.com/dotnet/diagnostics/tree/main/src/Tools/dotnet-dsrouter. dsrouter represents the application running on a remote target locally to the diagnostic tools, routing local IPC traffic over to TCP/IP handled by MonoVM running on remote target. dsrouter implements 3 different modes, server-server (IPC server, TCP server), client-server (IPC client, TCP server) and server-client (IPC server, TCP client ) and depending on configuration all three modes can be used. dsrouter also improves the reversed connect runtime scenario to support more than one diagnostic tooling client at a time, as well as converting the reversed connect runtime scenario (used when tracing startup) into a normal direct connect scenario using dsrouter in server-server mode.
+
+For more details around diagnostic scenarios, see:
+
+https://docs.microsoft.com/en-us/dotnet/core/diagnostics/dotnet-counters
+
+https://docs.microsoft.com/en-us/dotnet/core/diagnostics/dotnet-trace
+
+https://docs.microsoft.com/en-us/dotnet/core/diagnostics/event-counter-perf
+
+https://docs.microsoft.com/en-us/dotnet/core/diagnostics/debug-highcpu
+
+## Building an application including diagnostic tracing support
+
+Depending on platform, there are different recommended and supported ways to include diagnostic tracing support when building the application.
+
+### Android
+
+Android is build using dynamic component support, meaning that components are included as shared objects and runtime will try to load them from the same location as `libmonosgen-2.0.so`. If runtime fails to load component, it will be disabled, if it successfully load the component at runtime, it will be enabled and used. Enabling/disabling components is then a matter of incuding/excluding the needed shared object files in the APK (in same folder as `libmonosgen-2.0.so`), same runtime build can be used to support any combination of components.
+
+If `AndroidAppBuilderTask` is used, there is a msbuild property, `RuntimeComponents` that can be used to include specific components in the generated application. By default its empty, meaning all components will be disabled, using a `*` will enabled all components and by specify individual components, only those will be enabled. Enabling tracing would look like this, `RuntimeComponents="diagnostics_tracing"`, more components can be enabled by separting them with `;`.
+
+Android runtime pack have the following component libraries included. For default scenarios, the dynamic versions should be used togtether with `libmonosgen-2.0.so`, but runtime pack also includes static versions of the components that can be used if runtime is build statically using `libmonosgen-2.0.a`. In case of static linking, using `libmono-component-*-stub-static.a` library will disable the component, using `libmono-component-*-static.a` will enable it.
+
+```
+libmono-component-diagnostics_tracing.so
+libmono-component-diagnostics_tracing-static.a
+libmono-component-diagnostics_tracing-stub-static.a
+```
+
+### iOS
+
+iOS is build using static component support, meaning that components are included as static libraries that needs to be linked together with `libmonosgen-2.0.a` to produce final application. Static components comes in two flavours, the component library and a stub library. Linking the component library will enable the component in final application, while linking the stub library disables the component. Depeding on linked component flavours it is possible to create a build that enables specific components while disabling others. All components needs to be linked in (using component or stub library) or there will be unresolved symbols in `libmonosgen-2.0.a`.
+
+If `AppleAppBuilderTask` is used, there is a msbuild property, `RuntimeComponents` that can be used to include specific components in the build application. By default its empty, meaning all components will be disabled, using a `*` will enabled all components and by specify individual components, only those will be enabled. Enabling tracing would look like this, `RuntimeComponents="diagnostics_tracing"`, more components can be enabled by separting them with `;`.
+
+iOS runtime pack have the following component libraries included. Using `libmono-component-*-stub-static.a` library will disable the component, using `libmono-component-*-static.a` will enable it.
+
+```
+libmono-component-diagnostics_tracing-static.a
+libmono-component-diagnostics_tracing-stub-static.a
+```
+
+## Run an application including diagnostic tracing support
+
+By default EventPipe/DiagnosticServer is controlled using the same set of environment variables used by CoreCLR. The single most important one is `DOTNET_DiagnosticPorts` used in order to setup runtime to connect or accept requests from diagnostic tooling. If not defined, diagnostic server won't startup and it will not be possible to interact with runtime using diagnostic tooling. Depending on platform, capabilities and scenarios, the content of `DOTNET_DiagnosticPorts` will look differently.
+
+### Application running in simulator/emulator connecting over loopback interface
+
+Starting up application using `DOTNET_DiagnosticPorts=127.0.0.1:9000,nosuspend` on iOS, or `DOTNET_DiagnosticPorts=10.0.2.2:9000,nosuspend` on Android, will connect to dsrouter listening on loopback port 9000 (can be any available port) on local machine. Once runtime is connected, it is possible to connect diagnostic tools like dotnet-counters, dotnet-trace, towards dsrouter local IPC interface. In order to include startup events in EventPipe sessions, change `nosuspend` to `suspend` and runtime startup and wait for diagnostic tooling to connect before resuming.
+
+If supported it is possible to push the TCP/IP listener over to the device and only run a TCP/IP client on the local machine connecting to the runtime listener. Using `DOTNET_DiagnosticPorts=127.0.0.1:9000,nosuspend,listen` will run a local listener binding to loopback interface on simulator/emulator. On Android, it is possible to setup adb port forwading while on iOS runtime can bind local machine loopback interface directly from simulator. dsrouter will be configured to use a TCP/IP client when running this scenario.
+
+For more information on Android emulator networking and port forwarding:
+
+https://developer.android.com/studio/run/emulator-networking
+https://developer.android.com/studio/command-line/adb#forwardports
+
+#### Example using dotnet-counters using sample app on iOS simulator
+
+Make sure following is enabled in https://github.com/dotnet/runtime/blob/main/src/mono/sample/iOS/Makefile,
+
+RUNTIME_COMPONENTS=diagnostics_tracing
+DIAGNOSTIC_PORTS=127.0.0.1:9000,nosuspend
+
+dsrouter needs to run using a compatible configration depending on scenario. Either launch a new instance for every run, or have a background instance running over several sessions using same configuration.
+
+```
+dotnet-dsrouter server-server -tcps 127.0.0.1:9000 &
+```
+
+```
+cd src/mono/sample/iOS/
+make run-sim
+```
+
+```
+dotnet-counters monitor --process-id [dotnet-dsrouter pid]
+```
+
+Alternative running ```make run-sim``` as a child process of dsrouter:
+
+```
+cd src/mono/sample/iOS/
+dotnet-dsrouter server-server -tcps 127.0.0.1:9000 -- make run-sim
+```
+
+```
+dotnet-counters monitor --process-id [dotnet-dsrouter pid]
+```
+
+#### Example using dotnet-counters using sample app on Android emulator
+
+Make sure following is enabled in https://github.com/dotnet/runtime/blob/main/src/mono/sample/Android/Makefile,
+
+RUNTIME_COMPONENTS=diagnostics_tracing
+DIAGNOSTIC_PORTS=10.0.2.2:9000,nosuspend
+
+dsrouter needs to run using a compatible configration depending on scenario. Either launch a new instance for every run, or have a background instance running over several sessions using same configuration.
+
+```
+dotnet-dsrouter server-server -tcps 10.0.2.2:9000 &
+```
+
+```
+cd src/mono/sample/Android/
+make run
+```
+
+```
+dotnet-counters monitor --process-id [dotnet-dsrouter pid]
+```
+
+Alternative running ```make run``` as a child process of dsrouter:
+
+```
+cd src/mono/sample/Android/
+dotnet-dsrouter server-server -tcps 10.0.2.2:9000 -- make run
+```
+
+```
+dotnet-counters monitor --process-id [dotnet-dsrouter pid]
+```
+
+#### Example using dotnet-trace startup tracing using sample app on iOS simulator
+
+Make sure following is enabled in https://github.com/dotnet/runtime/blob/main/src/mono/sample/iOS/Makefile,
+
+RUNTIME_COMPONENTS=diagnostics_tracing
+DIAGNOSTIC_PORTS=127.0.0.1:9000,suspend
+
+dsrouter needs to run using a compatible configration depending on scenario. Either launch a new instance for every run, or have a background instance running over several sessions using same configuration.
+
+```
+dotnet-dsrouter client-server -tcpc [dotnet-trace path]-tcps 127.0.0.1:9000 &
+```
+
+```
+dotnet-trace collect --diagnostic-port myport
+```
+
+```
+cd src/mono/sample/iOS/
+make run-sim
+```
+
+Alternative running ```make run-sim``` as a child process of dsrouter:
+
+```
+dotnet-trace collect --diagnostic-port myport
+```
+
+```
+cd src/mono/sample/iOS/
+dotnet-dsrouter client-server -tcpc [dotnet-trace path]-tcps 127.0.0.1:9000 & -- make run-sim
+```
+
+#### Example using dotnet-trace startup tracing using sample app on Android simulator
+
+Make sure following is enabled in https://github.com/dotnet/runtime/blob/main/src/mono/sample/Android/Makefile,
+
+RUNTIME_COMPONENTS=diagnostics_tracing
+DIAGNOSTIC_PORTS=10.0.2.2:9000,suspend
+
+dsrouter needs to run using a compatible configration depending on scenario. Either launch a new instance for every run, or have a background instance running over several sessions using same configuration.
+
+```
+dotnet-dsrouter client-server -tcpc [dotnet-trace path]-tcps 127.0.0.1:9000 &
+```
+
+```
+dotnet-trace collect --diagnostic-port myport
+```
+
+```
+cd src/mono/sample/Android/
+make run
+```
+
+Alternative running ```make run``` as a child process of dsrouter:
+
+```
+dotnet-trace collect --diagnostic-port myport
+```
+
+```
+cd src/mono/sample/Android/
+dotnet-dsrouter client-server -tcpc [dotnet-trace path]-tcps 127.0.0.1:9000 & -- make run
+```
+
+### Application running on device
+
+The same envrionment variable is used when running on device, and if device is connected to development machine using usb, it is still possible to use loopback interface as described above, but it requires use of adb port forwarding on Android and an implementation of usbmux on iOS, such as mlaunch using `--tcp-tunnel` argument.
+
+If loopback interface won't work, it is possible to use any interface reachable between development machine and device in `DOTNET_DiagnosticPorts` variable, just keep in mind that the the connection is unauthenticated and unencrypted.
+
+** TODO **: Describe changes to above examples when running on device.
+
+### Application running single file based EventPipe session
+
+If application shutdown runtime on close, it is possible to run a single file based EventPipe session using environment variables as described in https://docs.microsoft.com/en-us/dotnet/core/diagnostics/eventpipe#trace-using-environment-variables. In .net6 an additional variable has been added, `COMPlus_EventPipeOutputStreaming`, making sure data is periodically flushed into the output file.
+
+If application doesn't shutdown runtime on close, this mode won't work, since it requires rundown events, only emitted when closing session and flushing memory manager. If runtime doesn't shutdown, generated nettrace file will be corrupt.
+
+Running using single file based EventPipe session will produce a file in device working directory. Use platform sepecific tooling in order to extract file once application has closed. Since file based EventPipe session doesn't use diagnostic server, there is no need to use `DOTNET_DiagnosticPorts` or running dotnet-dsrouter.
+
+** TODO **: Add example running single file based EventPipe session.
+
+### Examples running dotnet-* tooling against MonoVM
+
+https://github.com/dotnet/diagnostics/blob/main/documentation/dotnet-counters-instructions.md
+https://github.com/dotnet/diagnostics/blob/main/documentation/dotnet-trace-instructions.md
+
+** TODO **: Example of commands run to launch a dotnet-trace session collecting SampleProfiler.
+** TODO **: Example of commands run to launch a dotnet-trace session tracing startup events.
+** TODO **: Example of commands run to launch a dotnet-trace session tracing JIT statistics.
+
+## Analyze a nettrace file
+
+Collected events retrieved over EventPipe sessions is stored in a nettrace file that can be analyzed using tooling like perfview, Speedscope or Chromium:
+
+https://docs.microsoft.com/en-us/dotnet/core/diagnostics/debug-highcpu?tabs=windows#trace-generation
+https://docs.microsoft.com/en-us/dotnet/core/diagnostics/dotnet-trace#dotnet-trace-convert
+https://github.com/dotnet/diagnostics/blob/main/documentation/tutorial/app_running_slow_highcpu.md
+
+It is also possible to analyze the trace file using diagnostic client librarys from diagnostic repro:
+
+https://github.com/dotnet/diagnostics/blob/main/documentation/diagnostics-client-library-instructions.md
+
+Using the diagnostic client library gives full flexibilty to use data in nettrace to extract any information contained in file. Using the library it is also possible to implement custom tooling, that will connect and do live analyzing of event stream retrieved directly from running application.
+
+** TODO **: Add example running with the sartup events and retrieve time to method X (replaceing MonoVM --stat=method).
+
+## Developing EventPipe/DiagnosticServer on MonoVM
+
+** TODO **: EventPipe/DiagnosticServer library design.
+** TODO **: How to add a new NativeRuntimeEvent.
+** TODO **: How to add a new component API.
\ No newline at end of file
diff --git a/docs/project/list-of-diagnostics.md b/docs/project/list-of-diagnostics.md
index 46092b5528018f..fc946ddb03b081 100644
--- a/docs/project/list-of-diagnostics.md
+++ b/docs/project/list-of-diagnostics.md
@@ -80,6 +80,7 @@ The PR that reveals the implementation of the ` becomes `dotnet xharness wasm test --xyz`
+ Example: `WasmXHarnessArgs="--set-web-server-http-env=DOTNET_TEST_WEBSOCKETHOST"` -> becomes `dotnet xharness wasm test --set-web-server-http-env=DOTNET_TEST_WEBSOCKETHOST`
-- `$(WasmXHarnessMonoArgs)` - arguments to mono
+- `$(WasmXHarnessMonoArgs)` - arguments and variables for mono
- Example: `WasmXHarnessMonoArgs="--runtime-arg=--trace=E"`
+ Example: `WasmXHarnessMonoArgs="--runtime-arg=--trace=E --setenv=MONO_LOG_LEVEL=debug"`
- `$(WasmTestAppArgs)` - arguments for the test app itself
diff --git a/docs/workflow/testing/libraries/testing.md b/docs/workflow/testing/libraries/testing.md
index fe4d76255a7913..a78a3d0409868d 100644
--- a/docs/workflow/testing/libraries/testing.md
+++ b/docs/workflow/testing/libraries/testing.md
@@ -1,54 +1,71 @@
# Testing Libraries
-We use the OSS testing framework [xunit](https://github.com/xunit/xunit).
+## Full Build and Test Run
-To build the tests and run them you can call the libraries build script. For libraries tests to work, you must have built the coreclr or mono runtime for them to run on.
+These example commands automate the test run and all pre-requisite build steps in a single command from a clean enlistment.
-**Examples**
-- The following shows how to build only the tests but not run them:
+- Run all tests - Builds clr in release, libs+tests in debug:
```
-build.cmd/sh -subset libs.tests
+build.cmd/sh -subset clr+libs+libs.tests -test -rc Release
```
-- The following builds and runs all tests using clr:
+- Run all tests - Builds Mono in release, libs+tests in debug:
```
-build.cmd/sh -subset clr+libs.tests -test
+build.cmd/sh -subset mono+libs+libs.tests -test -rc Release
```
-- The following builds and runs all tests using mono:
+- Run all tests - Build Mono and libs for x86 architecture in debug (choosing debug for runtime will run very slowly):
```
-build.cmd/sh -subset mono+libs.tests -test
+build.cmd/sh -subset mono+libs+libs.tests -test -arch x86
```
-- The following builds and runs all tests in release configuration:
+## Partial Build and Test Runs
+
+Doing full build and test runs takes a long time and is very inefficient if you need to iterate on a change.
+For greater control and efficiency individual parts of the build + testing workflow can be run in isolation.
+See the [Building instructions](../../building/libraries/README.md) for more info on build options.
+
+### Test Run Pre-requisites
+Before any tests can run we need a complete build to run them on. This requires building (1) a runtime, and
+(2) all the libraries. Examples:
+
+- Build release clr + debug libraries
```
-build.cmd/sh -subset libs.tests -test -c Release
+build.cmd/sh -subset clr+libs -rc Release
```
-- The following builds clr in release, libs in debug and runs all tests:
+- Build release mono + debug libraries
```
-build.cmd/sh -subset clr+libs+libs.tests -test -rc Release
+build.cmd/sh -subset mono+libs -rc Release
```
-- The following builds mono and libs for x86 architecture and runs all tests:
+Building the `libs` subset or any of individual library projects automatically copies product binaries into the testhost folder
+in the bin directory. This is where the tests will load the binaries from during the run. However System.Private.CorLib is an
+exception - the build does not automatically copy it to the testhost folder. If you [rebuild System.Private.CoreLib](https://github.com/dotnet/runtime/blob/main/docs/workflow/building/libraries/README.md#iterating-on-systemprivatecorelib-changes) you must also build the `libs.pretest` subset to ensure S.P.C is copied before running tests.
+
+### Running tests for all libraries
+
+- Build and run all tests in release configuration.
```
-build.cmd/sh -subset mono+libs+libs.tests -test -arch x86
+build.cmd/sh -subset libs.tests -test -c Release
```
-- The following example shows how to pass extra msbuild properties to ignore tests ignored in CI:
+- Build the tests without running them
```
-build.cmd/sh -subset libs.tests -test /p:WithoutCategories=IgnoreForCI
+build.cmd/sh -subset libs.tests
```
-Unless you specifiy `-testnobuild`, test assemblies are implicitly built when invoking the `Test` action.
-- The following shows how to only test the libraries without building them
+- Run the tests without building them
```
build.cmd/sh -subset libs.tests -test -testnobuild
```
-## Running tests on the command line
+- The following example shows how to pass extra msbuild properties to ignore tests ignored in CI.
+```
+build.cmd/sh -subset libs.tests -test /p:WithoutCategories=IgnoreForCI
+```
-To build tests you need to specify the `test` subset when invoking build.cmd/sh: `build.cmd/sh -subset libs.tests`.
+### Running tests for a single library
The easiest (and recommended) way to build and run the tests for a specific library, is to invoke the `Test` target on that library:
```cmd
@@ -68,21 +85,21 @@ dotnet build /t:Test /p:TargetArchitecture=x86
There may be multiple projects in some directories so you may need to specify the path to a specific test project to get it to build and run the tests.
-#### Running a single test on the command line
+### Running a single test on the command line
To quickly run or debug a single test from the command line, set the XunitMethodName property, e.g.:
```cmd
dotnet build /t:Test /p:XunitMethodName={FullyQualifiedNamespace}.{ClassName}.{MethodName}
```
-#### Running outer loop tests
+### Running outer loop tests
To run all tests, including "outer loop" tests (which are typically slower and in some test suites less reliable, but which are more comprehensive):
```cmd
dotnet build /t:Test /p:Outerloop=true
```
-#### Running tests on a different target framework
+### Running tests on a different target framework
Each test project can potentially have multiple target frameworks. There are some tests that might be OS-specific, or might be testing an API that is available only on some target frameworks, so the `TargetFrameworks` property specifies the valid target frameworks. By default we will build and run only the default build target framework which is `net5.0`. The rest of the `TargetFrameworks` will need to be built and ran by specifying the `BuildTargetFramework` option, e.g.:
```cmd
diff --git a/eng/CodeAnalysis.ruleset b/eng/CodeAnalysis.ruleset
index 4631414599589f..8faa50e9a6f00b 100644
--- a/eng/CodeAnalysis.ruleset
+++ b/eng/CodeAnalysis.ruleset
@@ -126,6 +126,7 @@
+
diff --git a/eng/Signing.props b/eng/Signing.props
index ca044455e91c10..b546fd4ec3b5d3 100644
--- a/eng/Signing.props
+++ b/eng/Signing.props
@@ -35,7 +35,6 @@
-
@@ -77,7 +76,7 @@
Exclude="mscordaccore.dll"
CertificateName="MicrosoftSHA2" />
-
+
+
+ ./build.sh
+
+ true
+ false
+
+
+ $([System.Runtime.InteropServices.RuntimeInformation]::RuntimeIdentifier)
+ $(__DistroRid)
+
+
+ <_targetRidPlatformIndex>$(TargetRid.LastIndexOfAny("-"))
+ $(TargetRid.Substring(0, $(_targetRidPlatformIndex)))
+ $(TargetRid.Substring($(_targetRidPlatformIndex)).TrimStart('-'))
+
+ minimal
+
+
+
+
+
+
+
+
+
+
+
+
+ $(InnerBuildArgs) --arch $(TargetRidPlatform)
+ $(InnerBuildArgs) --configuration $(Configuration)
+ $(InnerBuildArgs) --ci
+ $(InnerBuildArgs) --allconfigurations
+ $(InnerBuildArgs) --verbosity $(LogVerbosity)
+ $(InnerBuildArgs) --nodereuse false
+ $(InnerBuildArgs) --warnAsError false
+ $(InnerBuildArgs) --cmakeargs -DCLR_CMAKE_USE_SYSTEM_LIBUNWIND=TRUE
+ $(InnerBuildArgs) /p:MicrosoftNetFrameworkReferenceAssembliesVersion=1.0.0
+ $(InnerBuildArgs) /p:ContinuousIntegrationBuild=true
+ $(InnerBuildArgs) /p:PackageRid=$(TargetRid)
+ $(InnerBuildArgs) /p:NoPgoOptimize=true
+ $(InnerBuildArgs) /p:KeepNativeSymbols=true
+ $(InnerBuildArgs) /p:RuntimeOS=$(TargetRidWithoutPlatform)
+ $(InnerBuildArgs) /p:PortableBuild=$(SourceBuildPortable)
+ $(InnerBuildArgs) /p:BuildDebPackage=false
+
+
+
+
+
+
+
+
+
+
+
+
+
diff --git a/eng/SourceBuildPrebuiltBaseline.xml b/eng/SourceBuildPrebuiltBaseline.xml
new file mode 100644
index 00000000000000..c1b6dfbf053817
--- /dev/null
+++ b/eng/SourceBuildPrebuiltBaseline.xml
@@ -0,0 +1,5 @@
+
+
+
+
+
diff --git a/eng/Subsets.props b/eng/Subsets.props
index bf6b25bffc7317..105b34254c6c2b 100644
--- a/eng/Subsets.props
+++ b/eng/Subsets.props
@@ -25,8 +25,8 @@
clr+mono+libs+host+packs
mono+libs+packs
-
- clr+libs+host+packs
+
+ clr+libs+host+packs
@@ -49,18 +49,21 @@
$(DefaultMonoSubsets)mono.wasmruntime+
$(DefaultMonoSubsets)mono.aotcross+
$(DefaultMonoSubsets)mono.runtime+mono.corelib+mono.packages
-
+
libs.native+
- $(DefaultLibrariesSubsets)libs.ref+libs.src+libs.pretest+libs.packages
+ $(DefaultLibrariesSubsets)libs.ref+libs.src+libs.packages
+ $(DefaultLibrariesSubsets)+libs.pretest
- host.native+host.pkg+host.tools+host.tests
+ host.native+host.tools
+ $(DefaultHostSubsets)+host.pkg+host.tests
host.native
packs.product
$(DefaultPacksSubsets)+packs.tests
+ $(DefaultPacksSubsets)+packs.installers
@@ -204,12 +207,12 @@
+ $(CoreClrProjectRoot)tools\r2rtest\R2RTest.csproj" Category="clr" Condition="'$(DotNetBuildFromSource)' != 'true'"/>
+
+ Test="true" Category="clr" Condition="'$(__DistroRid)' != 'linux-musl-x64' and '$(DotNetBuildFromSource)' != 'true'"/>
@@ -221,7 +224,7 @@
-
+
diff --git a/eng/Tools.props b/eng/Tools.props
index 04f1c9d742510c..1a9804dbaf6925 100644
--- a/eng/Tools.props
+++ b/eng/Tools.props
@@ -7,7 +7,6 @@
-
diff --git a/eng/Version.Details.xml b/eng/Version.Details.xml
index 4eeb9d3192cd20..5c1f457ae40f4d 100644
--- a/eng/Version.Details.xml
+++ b/eng/Version.Details.xml
@@ -1,218 +1,227 @@
-
+
https://github.com/dotnet/icu
- 05fbe174749fc77d77eaffc777908f438c011c81
+ d7db669b70f4dd67ec001c192f9809c218cab88b
-
+
https://github.com/dotnet/arcade
- c7d6bd607715f334cda90e01967bb0c02dee09be
+ 4a2b475948d498b89fedef7cf890883f49bc1ea3
+
-
+
https://github.com/dotnet/arcade
- c7d6bd607715f334cda90e01967bb0c02dee09be
+ 4a2b475948d498b89fedef7cf890883f49bc1ea3
-
+
https://github.com/dotnet/arcade
- c7d6bd607715f334cda90e01967bb0c02dee09be
+ 4a2b475948d498b89fedef7cf890883f49bc1ea3
-
+
https://github.com/dotnet/arcade
- c7d6bd607715f334cda90e01967bb0c02dee09be
+ 4a2b475948d498b89fedef7cf890883f49bc1ea3
-
+
https://github.com/dotnet/arcade
- c7d6bd607715f334cda90e01967bb0c02dee09be
+ 4a2b475948d498b89fedef7cf890883f49bc1ea3
-
+
https://github.com/dotnet/arcade
- c7d6bd607715f334cda90e01967bb0c02dee09be
+ 4a2b475948d498b89fedef7cf890883f49bc1ea3
-
+
https://github.com/dotnet/arcade
- c7d6bd607715f334cda90e01967bb0c02dee09be
+ 4a2b475948d498b89fedef7cf890883f49bc1ea3
-
+
https://github.com/dotnet/arcade
- c7d6bd607715f334cda90e01967bb0c02dee09be
+ 4a2b475948d498b89fedef7cf890883f49bc1ea3
-
+
https://github.com/dotnet/arcade
- c7d6bd607715f334cda90e01967bb0c02dee09be
+ 4a2b475948d498b89fedef7cf890883f49bc1ea3
-
+
https://github.com/dotnet/arcade
- c7d6bd607715f334cda90e01967bb0c02dee09be
+ 4a2b475948d498b89fedef7cf890883f49bc1ea3
-
+
https://github.com/dotnet/arcade
- c7d6bd607715f334cda90e01967bb0c02dee09be
+ 4a2b475948d498b89fedef7cf890883f49bc1ea3
-
+
https://github.com/dotnet/arcade
- c7d6bd607715f334cda90e01967bb0c02dee09be
+ 4a2b475948d498b89fedef7cf890883f49bc1ea3
-
+
https://github.com/dotnet/arcade
- c7d6bd607715f334cda90e01967bb0c02dee09be
+ 4a2b475948d498b89fedef7cf890883f49bc1ea3
-
+
https://github.com/dotnet/arcade
- c7d6bd607715f334cda90e01967bb0c02dee09be
+ 4a2b475948d498b89fedef7cf890883f49bc1ea3
-
+
https://github.com/dotnet/arcade
- c7d6bd607715f334cda90e01967bb0c02dee09be
+ 4a2b475948d498b89fedef7cf890883f49bc1ea3
-
+
https://github.com/dotnet/arcade
- c7d6bd607715f334cda90e01967bb0c02dee09be
+ 4a2b475948d498b89fedef7cf890883f49bc1ea3
https://github.com/microsoft/vstest
140434f7109d357d0158ade9e5164a4861513965
-
+
https://github.com/dotnet/runtime-assets
- 3f92f2642a176248e735f61d748ace8b9dfaa56a
+ 0612b036e67746930105231b605c4df9ac6ed47e
-
+
https://github.com/dotnet/runtime-assets
- 3f92f2642a176248e735f61d748ace8b9dfaa56a
+ 0612b036e67746930105231b605c4df9ac6ed47e
-
+
https://github.com/dotnet/runtime-assets
- 3f92f2642a176248e735f61d748ace8b9dfaa56a
+ 0612b036e67746930105231b605c4df9ac6ed47e
-
+
https://github.com/dotnet/runtime-assets
- 3f92f2642a176248e735f61d748ace8b9dfaa56a
+ 0612b036e67746930105231b605c4df9ac6ed47e
-
+
https://github.com/dotnet/runtime-assets
- 3f92f2642a176248e735f61d748ace8b9dfaa56a
+ 0612b036e67746930105231b605c4df9ac6ed47e
-
+
https://github.com/dotnet/runtime-assets
- 3f92f2642a176248e735f61d748ace8b9dfaa56a
+ 0612b036e67746930105231b605c4df9ac6ed47e
-
+
https://github.com/dotnet/runtime-assets
- 3f92f2642a176248e735f61d748ace8b9dfaa56a
+ 0612b036e67746930105231b605c4df9ac6ed47e
-
+
https://github.com/dotnet/runtime-assets
- 3f92f2642a176248e735f61d748ace8b9dfaa56a
+ 0612b036e67746930105231b605c4df9ac6ed47e
-
+
https://github.com/dotnet/runtime-assets
- 3f92f2642a176248e735f61d748ace8b9dfaa56a
+ 0612b036e67746930105231b605c4df9ac6ed47e
-
+
https://github.com/dotnet/llvm-project
- a3ff327da9634da948e84d45df9e6beb64eb5c4c
+ a76e596b96a1b9b4bc7a213f9a8335bcd9189b67
-
+
https://github.com/dotnet/llvm-project
- a3ff327da9634da948e84d45df9e6beb64eb5c4c
+ a76e596b96a1b9b4bc7a213f9a8335bcd9189b67
-
+
https://github.com/dotnet/llvm-project
- a3ff327da9634da948e84d45df9e6beb64eb5c4c
+ a76e596b96a1b9b4bc7a213f9a8335bcd9189b67
-
+
https://github.com/dotnet/llvm-project
- a3ff327da9634da948e84d45df9e6beb64eb5c4c
+ a76e596b96a1b9b4bc7a213f9a8335bcd9189b67
-
+
https://github.com/dotnet/llvm-project
- a3ff327da9634da948e84d45df9e6beb64eb5c4c
+ a76e596b96a1b9b4bc7a213f9a8335bcd9189b67
-
+
https://github.com/dotnet/llvm-project
- a3ff327da9634da948e84d45df9e6beb64eb5c4c
+ a76e596b96a1b9b4bc7a213f9a8335bcd9189b67
-
+
https://github.com/dotnet/llvm-project
- a3ff327da9634da948e84d45df9e6beb64eb5c4c
+ a76e596b96a1b9b4bc7a213f9a8335bcd9189b67
-
+
https://github.com/dotnet/llvm-project
- a3ff327da9634da948e84d45df9e6beb64eb5c4c
+ a76e596b96a1b9b4bc7a213f9a8335bcd9189b67
https://github.com/dotnet/runtime
38017c3935de95d0335bac04f4901ddfc2718656
-
+
https://github.com/dotnet/runtime
- fea7ff2cce79da2df9cf1c50ce215287750f055e
+ af5c238556e204583b129cc8f5c7338f84dc2c40
-
+
https://github.com/dotnet/runtime
- fea7ff2cce79da2df9cf1c50ce215287750f055e
+ af5c238556e204583b129cc8f5c7338f84dc2c40
-
+
https://github.com/dotnet/runtime
- fea7ff2cce79da2df9cf1c50ce215287750f055e
+ af5c238556e204583b129cc8f5c7338f84dc2c40
-
+
https://github.com/dotnet/runtime
- fea7ff2cce79da2df9cf1c50ce215287750f055e
+ af5c238556e204583b129cc8f5c7338f84dc2c40
-
+
https://github.com/dotnet/runtime
- fea7ff2cce79da2df9cf1c50ce215287750f055e
+ af5c238556e204583b129cc8f5c7338f84dc2c40
-
+
https://github.com/dotnet/runtime
- fea7ff2cce79da2df9cf1c50ce215287750f055e
+ af5c238556e204583b129cc8f5c7338f84dc2c40
-
+
https://github.com/dotnet/runtime
- fea7ff2cce79da2df9cf1c50ce215287750f055e
+ af5c238556e204583b129cc8f5c7338f84dc2c40
-
+
https://github.com/mono/linker
- 493a448586c1fad68efc2126836f5bb9b5f9ad20
+ caeaf2a3fb3f636805fdd4881df4f9a539fff8f6
-
+
https://github.com/dotnet/xharness
- c22941f71c25dd9371caa25410ec264e9d8efac4
+ d6f8a4ad30908fb210390380eae97264e4fbe8ce
-
+
https://github.com/dotnet/xharness
- c22941f71c25dd9371caa25410ec264e9d8efac4
+ d6f8a4ad30908fb210390380eae97264e4fbe8ce
-
+
https://github.com/dotnet/arcade
- c7d6bd607715f334cda90e01967bb0c02dee09be
+ 85a65ea1fca1d0867f699fed44d191358270bf6a
-
+
https://dev.azure.com/dnceng/internal/_git/dotnet-optimization
- f4ee4d18519bd793bb3992349e3f756aa3028425
+ 4e5bea15eb5a9c8cf9142195b1c9c78437a5b27f
-
+
https://dev.azure.com/dnceng/internal/_git/dotnet-optimization
- f4ee4d18519bd793bb3992349e3f756aa3028425
+ 4e5bea15eb5a9c8cf9142195b1c9c78437a5b27f
-
+
https://dev.azure.com/dnceng/internal/_git/dotnet-optimization
- f4ee4d18519bd793bb3992349e3f756aa3028425
+ 4e5bea15eb5a9c8cf9142195b1c9c78437a5b27f
-
+
https://dev.azure.com/dnceng/internal/_git/dotnet-optimization
- f4ee4d18519bd793bb3992349e3f756aa3028425
+ 4e5bea15eb5a9c8cf9142195b1c9c78437a5b27f
https://github.com/dotnet/emsdk
defa37b05c734e025292c5747664e970cd2ac444
-
+
https://github.com/dotnet/hotreload-utils
- 2a24834bde0dab496ed9eba1bf9b87e39544b9b5
+ 25b814e010cd4796cedfbcce72a274c26928f496
+
+
+ https://github.com/dotnet/runtime-assets
+ 8d7b898b96cbdb868cac343e938173105287ed9e
+
+
+ https://github.com/dotnet/roslyn-analyzers
+ fcddb771f42866f9521f23f093b1f30e129018bb
diff --git a/eng/Versions.props b/eng/Versions.props
index c9ca4cead3279c..a194baa33def9e 100644
--- a/eng/Versions.props
+++ b/eng/Versions.props
@@ -8,7 +8,7 @@
0
6.0.100
preview
- 6
+ 7
$(MajorVersion).$(MinorVersion).0.0
@@ -45,84 +45,87 @@
3.8.0
- 6.0.0-preview6.21274.2
+
3.10.0-2.final
3.10.0-2.final
+ 6.0.0-rc1.21320.2
- 6.0.0-beta.21281.1
- 6.0.0-beta.21281.1
- 6.0.0-beta.21281.1
- 6.0.0-beta.21281.1
- 6.0.0-beta.21281.1
- 6.0.0-beta.21281.1
- 2.5.1-beta.21281.1
- 6.0.0-beta.21281.1
- 6.0.0-beta.21281.1
- 6.0.0-beta.21281.1
- 6.0.0-beta.21281.1
- 6.0.0-beta.21281.1
- 6.0.0-beta.21281.1
+ 6.0.0-beta.21311.3
+ 6.0.0-beta.21311.3
+ 6.0.0-beta.21311.3
+ 6.0.0-beta.21311.3
+ 6.0.0-beta.21311.3
+ 6.0.0-beta.21311.3
+ 2.5.1-beta.21311.3
+ 6.0.0-beta.21311.3
+ 6.0.0-beta.21311.3
+ 6.0.0-beta.21311.3
+ 6.0.0-beta.21311.3
+ 6.0.0-beta.21311.3
+ 6.0.0-beta.21311.3
5.9.0-preview.2
6.0.0-alpha.1.20612.4
- 6.0.0-preview.6.21281.1
- 6.0.0-preview.6.21281.1
+ 6.0.0-preview.6.21314.1
+ 6.0.0-preview.6.21314.1
3.1.0
- 6.0.0-preview.6.21281.1
+ 6.0.0-preview.6.21314.1
+ 5.0.0
+ 4.3.0
+ 5.0.0
1.2.0-beta.304
- 4.3.0
4.5.1
4.3.0
- 4.3.0
5.0.0
- 4.8.1
+ 4.8.2
+ 4.5.0
4.3.0
- 4.3.0
- 4.3.0
- 4.3.0
4.3.0
+ 5.0.0
+ 5.0.0
4.3.0
4.5.4
- 4.3.4
4.3.1
4.5.0
- 4.3.0
5.0.0
- 4.3.0
+ 4.7.0
+ 4.7.0
+ 4.7.0
4.3.1
4.3.1
4.3.0
4.3.0
4.3.0
+ 5.0.0
4.3.1
- 4.7.0
- 4.7.0
- 6.0.0-preview.6.21281.1
- 6.0.0-preview.6.21281.1
- 4.3.0
+ 5.0.0
+ 5.0.0
+ 5.0.0
+ 4.8.1
+ 6.0.0-preview.6.21314.1
+ 6.0.0-preview.6.21314.1
4.5.4
4.5.0
- 1.1.1
- 4.3.0
- 6.0.0-preview.6.21281.1
+ 6.0.0-preview.6.21314.1
- 6.0.0-beta.21275.1
- 6.0.0-beta.21275.1
- 6.0.0-beta.21275.1
- 6.0.0-beta.21275.1
- 6.0.0-beta.21275.1
- 6.0.0-beta.21275.1
- 6.0.0-beta.21275.1
- 6.0.0-beta.21275.1
- 6.0.0-beta.21275.1
+ 6.0.0-beta.21314.1
+ 6.0.0-beta.21307.1
+ 6.0.0-beta.21307.1
+ 6.0.0-beta.21307.1
+ 6.0.0-beta.21307.1
+ 6.0.0-beta.21307.1
+ 6.0.0-beta.21307.1
+ 6.0.0-beta.21307.1
+ 6.0.0-beta.21307.1
+ 6.0.0-beta.21307.1
- 1.0.0-prerelease.21301.4
- 1.0.0-prerelease.21301.4
- 1.0.0-prerelease.21301.4
- 1.0.0-prerelease.21301.4
+ 1.0.0-prerelease.21313.4
+ 1.0.0-prerelease.21313.4
+ 1.0.0-prerelease.21313.4
+ 1.0.0-prerelease.21313.4
16.9.0-beta1.21055.5
2.0.0-beta1.20253.1
@@ -137,7 +140,7 @@
These are used as reference assemblies only, so they must not take a ProdCon/source-build
version. Insert "RefOnly" to avoid assignment via PVP.
-->
- 16.8.0
+ 16.9.0
$(RefOnlyMicrosoftBuildVersion)
$(RefOnlyMicrosoftBuildVersion)
$(RefOnlyMicrosoftBuildVersion)
@@ -146,9 +149,9 @@
1.0.1-prerelease-00006
16.9.0-preview-20201201-01
- 1.0.0-prerelease.21281.2
- 1.0.0-prerelease.21281.2
- 1.0.1-alpha.0.21281.1
+ 1.0.0-prerelease.21314.1
+ 1.0.0-prerelease.21314.1
+ 1.0.1-alpha.0.21311.1
2.4.1
2.4.2
1.3.0
@@ -159,23 +162,21 @@
5.0.0-preview-20201009.2
- 6.0.100-preview.6.21281.4
+ 6.0.100-preview.6.21310.3
$(MicrosoftNETILLinkTasksVersion)
- 6.0.0-preview.6.21281.1
+ 6.0.0-preview.6.21307.1
- 9.0.1-alpha.1.21281.1
- 9.0.1-alpha.1.21281.1
- 9.0.1-alpha.1.21281.1
- 9.0.1-alpha.1.21281.1
- 9.0.1-alpha.1.21281.1
- 9.0.1-alpha.1.21281.1
- 9.0.1-alpha.1.21281.1
- 9.0.1-alpha.1.21281.1
+ 11.1.0-alpha.1.21308.1
+ 11.1.0-alpha.1.21308.1
+ 11.1.0-alpha.1.21308.1
+ 11.1.0-alpha.1.21308.1
+ 11.1.0-alpha.1.21308.1
+ 11.1.0-alpha.1.21308.1
+ 11.1.0-alpha.1.21308.1
+ 11.1.0-alpha.1.21308.1
6.0.0-preview.6.21275.1
$(MicrosoftNETRuntimeEmscripten2021Nodewinx64Version)
-
-
diff --git a/eng/build.ps1 b/eng/build.ps1
index f93182c5bd89e5..5980695e3a2c34 100644
--- a/eng/build.ps1
+++ b/eng/build.ps1
@@ -153,7 +153,7 @@ if ($vs) {
if ($runtimeFlavor -eq "Mono") {
# Search for the solution in mono
- $vs = Split-Path $PSScriptRoot -Parent | Join-Path -ChildPath "src\mono\netcore" | Join-Path -ChildPath $vs | Join-Path -ChildPath "$vs.sln"
+ $vs = Split-Path $PSScriptRoot -Parent | Join-Path -ChildPath "src\mono" | Join-Path -ChildPath $vs | Join-Path -ChildPath "$vs.sln"
} else {
# Search for the solution in coreclr
$vs = Split-Path $PSScriptRoot -Parent | Join-Path -ChildPath "src\coreclr" | Join-Path -ChildPath $vs | Join-Path -ChildPath "$vs.sln"
diff --git a/eng/common/dotnet-install.sh b/eng/common/dotnet-install.sh
index d6efeb44340ba1..fdfeea66e7d43f 100755
--- a/eng/common/dotnet-install.sh
+++ b/eng/common/dotnet-install.sh
@@ -70,7 +70,7 @@ case $cpuname in
;;
esac
-dotnetRoot="$repo_root/.dotnet"
+dotnetRoot="${repo_root}.dotnet"
if [[ $architecture != "" ]] && [[ $architecture != $buildarch ]]; then
dotnetRoot="$dotnetRoot/$architecture"
fi
diff --git a/eng/common/internal-feed-operations.ps1 b/eng/common/internal-feed-operations.ps1
index 418c09930cf16b..92b77347d9904e 100644
--- a/eng/common/internal-feed-operations.ps1
+++ b/eng/common/internal-feed-operations.ps1
@@ -45,11 +45,11 @@ function SetupCredProvider {
# Then, we set the 'VSS_NUGET_EXTERNAL_FEED_ENDPOINTS' environment variable to restore from the stable
# feeds successfully
- $nugetConfigPath = "$RepoRoot\NuGet.config"
+ $nugetConfigPath = Join-Path $RepoRoot "NuGet.config"
if (-Not (Test-Path -Path $nugetConfigPath)) {
Write-PipelineTelemetryError -Category 'Build' -Message 'NuGet.config file not found in repo root!'
- ExitWithExitCode 1
+ ExitWithExitCode 1
}
$endpoints = New-Object System.Collections.ArrayList
@@ -85,7 +85,7 @@ function SetupCredProvider {
#Workaround for https://github.com/microsoft/msbuild/issues/4430
function InstallDotNetSdkAndRestoreArcade {
- $dotnetTempDir = "$RepoRoot\dotnet"
+ $dotnetTempDir = Join-Path $RepoRoot "dotnet"
$dotnetSdkVersion="2.1.507" # After experimentation we know this version works when restoring the SDK (compared to 3.0.*)
$dotnet = "$dotnetTempDir\dotnet.exe"
$restoreProjPath = "$PSScriptRoot\restore.proj"
diff --git a/eng/common/internal-feed-operations.sh b/eng/common/internal-feed-operations.sh
index e2233e781220f4..9378223ba0955b 100755
--- a/eng/common/internal-feed-operations.sh
+++ b/eng/common/internal-feed-operations.sh
@@ -39,7 +39,7 @@ function SetupCredProvider {
# Then, we set the 'VSS_NUGET_EXTERNAL_FEED_ENDPOINTS' environment variable to restore from the stable
# feeds successfully
- local nugetConfigPath="$repo_root/NuGet.config"
+ local nugetConfigPath="{$repo_root}NuGet.config"
if [ ! "$nugetConfigPath" ]; then
Write-PipelineTelemetryError -category 'Build' "NuGet.config file not found in repo's root!"
diff --git a/eng/common/sdk-task.ps1 b/eng/common/sdk-task.ps1
index 65f1d75f3d3226..b1bca63ab1d82c 100644
--- a/eng/common/sdk-task.ps1
+++ b/eng/common/sdk-task.ps1
@@ -34,7 +34,7 @@ function Print-Usage() {
function Build([string]$target) {
$logSuffix = if ($target -eq 'Execute') { '' } else { ".$target" }
$log = Join-Path $LogDir "$task$logSuffix.binlog"
- $outputPath = Join-Path $ToolsetDir "$task\\"
+ $outputPath = Join-Path $ToolsetDir "$task\"
MSBuild $taskProject `
/bl:$log `
@@ -64,7 +64,7 @@ try {
$GlobalJson.tools | Add-Member -Name "vs" -Value (ConvertFrom-Json "{ `"version`": `"16.5`" }") -MemberType NoteProperty
}
if( -not ($GlobalJson.tools.PSObject.Properties.Name -match "xcopy-msbuild" )) {
- $GlobalJson.tools | Add-Member -Name "xcopy-msbuild" -Value "16.8.0-preview3" -MemberType NoteProperty
+ $GlobalJson.tools | Add-Member -Name "xcopy-msbuild" -Value "16.10.0-preview2" -MemberType NoteProperty
}
if ($GlobalJson.tools."xcopy-msbuild".Trim() -ine "none") {
$xcopyMSBuildToolsFolder = InitializeXCopyMSBuild $GlobalJson.tools."xcopy-msbuild" -install $true
diff --git a/eng/common/sdl/execute-all-sdl-tools.ps1 b/eng/common/sdl/execute-all-sdl-tools.ps1
index 81b729f74a4d4e..2881a56083cf5e 100644
--- a/eng/common/sdl/execute-all-sdl-tools.ps1
+++ b/eng/common/sdl/execute-all-sdl-tools.ps1
@@ -32,7 +32,7 @@ try {
$ErrorActionPreference = 'Stop'
Set-StrictMode -Version 2.0
$disableConfigureToolsetImport = $true
- $LASTEXITCODE = 0
+ $global:LASTEXITCODE = 0
# `tools.ps1` checks $ci to perform some actions. Since the SDL
# scripts don't necessarily execute in the same agent that run the
diff --git a/eng/common/sdl/init-sdl.ps1 b/eng/common/sdl/init-sdl.ps1
index 1fe9271193cc6b..3ac1d92b37006f 100644
--- a/eng/common/sdl/init-sdl.ps1
+++ b/eng/common/sdl/init-sdl.ps1
@@ -10,7 +10,7 @@ Param(
$ErrorActionPreference = 'Stop'
Set-StrictMode -Version 2.0
$disableConfigureToolsetImport = $true
-$LASTEXITCODE = 0
+$global:LASTEXITCODE = 0
# `tools.ps1` checks $ci to perform some actions. Since the SDL
# scripts don't necessarily execute in the same agent that run the
diff --git a/eng/common/sdl/run-sdl.ps1 b/eng/common/sdl/run-sdl.ps1
index fe95ab35aa5d12..3d9c87aba6acf2 100644
--- a/eng/common/sdl/run-sdl.ps1
+++ b/eng/common/sdl/run-sdl.ps1
@@ -13,7 +13,7 @@ Param(
$ErrorActionPreference = 'Stop'
Set-StrictMode -Version 2.0
$disableConfigureToolsetImport = $true
-$LASTEXITCODE = 0
+$global:LASTEXITCODE = 0
try {
# `tools.ps1` checks $ci to perform some actions. Since the SDL
diff --git a/eng/common/templates/job/onelocbuild.yml b/eng/common/templates/job/onelocbuild.yml
index 2acdd5256dd83c..e8bc77d2ebbe39 100644
--- a/eng/common/templates/job/onelocbuild.yml
+++ b/eng/common/templates/job/onelocbuild.yml
@@ -18,6 +18,9 @@ parameters:
LclSource: lclFilesInRepo
LclPackageId: ''
RepoType: gitHub
+ GitHubOrg: dotnet
+ MirrorRepo: ''
+ MirrorBranch: main
condition: ''
jobs:
@@ -66,6 +69,11 @@ jobs:
${{ if eq(parameters.RepoType, 'gitHub') }}:
repoType: ${{ parameters.RepoType }}
gitHubPatVariable: "${{ parameters.GithubPat }}"
+ ${{ if ne(parameters.MirrorRepo, '') }}:
+ isMirrorRepoSelected: true
+ gitHubOrganization: ${{ parameters.GitHubOrg }}
+ mirrorRepo: ${{ parameters.MirrorRepo }}
+ mirrorBranch: ${{ parameters.MirrorBranch }}
condition: ${{ parameters.condition }}
- task: PublishBuildArtifacts@1
diff --git a/eng/common/templates/job/source-index-stage1.yml b/eng/common/templates/job/source-index-stage1.yml
index a649d2b5990c01..6e8aa9f7f218b0 100644
--- a/eng/common/templates/job/source-index-stage1.yml
+++ b/eng/common/templates/job/source-index-stage1.yml
@@ -1,6 +1,6 @@
parameters:
runAsPublic: false
- sourceIndexPackageVersion: 1.0.1-20210421.1
+ sourceIndexPackageVersion: 1.0.1-20210614.1
sourceIndexPackageSource: https://pkgs.dev.azure.com/dnceng/public/_packaging/dotnet-tools/nuget/v3/index.json
sourceIndexBuildCommand: powershell -NoLogo -NoProfile -ExecutionPolicy Bypass -Command "eng/common/build.ps1 -restore -build -binarylog -ci"
preSteps: []
diff --git a/eng/common/tools.ps1 b/eng/common/tools.ps1
index 2d8a74f7d9e89a..5619c7aaee1d7f 100644
--- a/eng/common/tools.ps1
+++ b/eng/common/tools.ps1
@@ -193,38 +193,42 @@ function InitializeDotNetCli([bool]$install, [bool]$createSdkLocationFile) {
return $global:_DotNetInstallDir = $dotnetRoot
}
+function Retry($downloadBlock, $maxRetries = 5) {
+ $retries = 1
+
+ while($true) {
+ try {
+ & $downloadBlock
+ break
+ }
+ catch {
+ Write-PipelineTelemetryError -Category 'InitializeToolset' -Message $_
+ }
+
+ if (++$retries -le $maxRetries) {
+ $delayInSeconds = [math]::Pow(2, $retries) - 1 # Exponential backoff
+ Write-Host "Retrying. Waiting for $delayInSeconds seconds before next attempt ($retries of $maxRetries)."
+ Start-Sleep -Seconds $delayInSeconds
+ }
+ else {
+ Write-PipelineTelemetryError -Category 'InitializeToolset' -Message "Unable to download file in $maxRetries attempts."
+ break
+ }
+
+ }
+}
+
function GetDotNetInstallScript([string] $dotnetRoot) {
$installScript = Join-Path $dotnetRoot 'dotnet-install.ps1'
if (!(Test-Path $installScript)) {
Create-Directory $dotnetRoot
$ProgressPreference = 'SilentlyContinue' # Don't display the console progress UI - it's a huge perf hit
-
- $maxRetries = 5
- $retries = 1
-
$uri = "https://dot.net/$dotnetInstallScriptVersion/dotnet-install.ps1"
- while($true) {
- try {
- Write-Host "GET $uri"
- Invoke-WebRequest $uri -OutFile $installScript
- break
- }
- catch {
- Write-Host "Failed to download '$uri'"
- Write-Error $_.Exception.Message -ErrorAction Continue
- }
-
- if (++$retries -le $maxRetries) {
- $delayInSeconds = [math]::Pow(2, $retries) - 1 # Exponential backoff
- Write-Host "Retrying. Waiting for $delayInSeconds seconds before next attempt ($retries of $maxRetries)."
- Start-Sleep -Seconds $delayInSeconds
- }
- else {
- throw "Unable to download file in $maxRetries attempts."
- }
-
- }
+ Retry({
+ Write-Host "GET $uri"
+ Invoke-WebRequest $uri -OutFile $installScript
+ })
}
return $installScript
@@ -308,8 +312,8 @@ function InitializeVisualStudioMSBuild([bool]$install, [object]$vsRequirements =
# If the version of msbuild is going to be xcopied,
# use this version. Version matches a package here:
- # https://dev.azure.com/dnceng/public/_packaging?_a=package&feed=dotnet-eng&package=RoslynTools.MSBuild&protocolType=NuGet&version=16.8.0-preview3&view=overview
- $defaultXCopyMSBuildVersion = '16.8.0-preview3'
+ # https://dev.azure.com/dnceng/public/_packaging?_a=package&feed=dotnet-eng&package=RoslynTools.MSBuild&protocolType=NuGet&version=16.10.0-preview2&view=overview
+ $defaultXCopyMSBuildVersion = '16.10.0-preview2'
if (!$vsRequirements) { $vsRequirements = $GlobalJson.tools.vs }
$vsMinVersionStr = if ($vsRequirements.version) { $vsRequirements.version } else { $vsMinVersionReqdStr }
@@ -403,9 +407,13 @@ function InitializeXCopyMSBuild([string]$packageVersion, [bool]$install) {
}
Create-Directory $packageDir
+
Write-Host "Downloading $packageName $packageVersion"
$ProgressPreference = 'SilentlyContinue' # Don't display the console progress UI - it's a huge perf hit
- Invoke-WebRequest "https://pkgs.dev.azure.com/dnceng/public/_packaging/dotnet-eng/nuget/v3/flat2/$packageName/$packageVersion/$packageName.$packageVersion.nupkg" -OutFile $packagePath
+ Retry({
+ Invoke-WebRequest "https://pkgs.dev.azure.com/dnceng/public/_packaging/dotnet-eng/nuget/v3/flat2/$packageName/$packageVersion/$packageName.$packageVersion.nupkg" -OutFile $packagePath
+ })
+
Unzip $packagePath $packageDir
}
@@ -442,27 +450,9 @@ function LocateVisualStudio([object]$vsRequirements = $null){
if (!(Test-Path $vsWhereExe)) {
Create-Directory $vsWhereDir
Write-Host 'Downloading vswhere'
- $maxRetries = 5
- $retries = 1
-
- while($true) {
- try {
- Invoke-WebRequest "https://netcorenativeassets.blob.core.windows.net/resource-packages/external/windows/vswhere/$vswhereVersion/vswhere.exe" -OutFile $vswhereExe
- break
- }
- catch{
- Write-PipelineTelemetryError -Category 'InitializeToolset' -Message $_
- }
-
- if (++$retries -le $maxRetries) {
- $delayInSeconds = [math]::Pow(2, $retries) - 1 # Exponential backoff
- Write-Host "Retrying. Waiting for $delayInSeconds seconds before next attempt ($retries of $maxRetries)."
- Start-Sleep -Seconds $delayInSeconds
- }
- else {
- Write-PipelineTelemetryError -Category 'InitializeToolset' -Message "Unable to download file in $maxRetries attempts."
- }
- }
+ Retry({
+ Invoke-WebRequest "https://netcorenativeassets.blob.core.windows.net/resource-packages/external/windows/vswhere/$vswhereVersion/vswhere.exe" -OutFile $vswhereExe
+ })
}
if (!$vsRequirements) { $vsRequirements = $GlobalJson.tools.vs }
@@ -498,7 +488,7 @@ function InitializeBuildTool() {
if (Test-Path variable:global:_BuildTool) {
# If the requested msbuild parameters do not match, clear the cached variables.
if($global:_BuildTool.Contains('ExcludePrereleaseVS') -and $global:_BuildTool.ExcludePrereleaseVS -ne $excludePrereleaseVS) {
- Remove-Item variable:global:_BuildTool
+ Remove-Item variable:global:_BuildTool
Remove-Item variable:global:_MSBuildExe
} else {
return $global:_BuildTool
@@ -555,7 +545,7 @@ function GetDefaultMSBuildEngine() {
function GetNuGetPackageCachePath() {
if ($env:NUGET_PACKAGES -eq $null) {
- # Use local cache on CI to ensure deterministic build.
+ # Use local cache on CI to ensure deterministic build.
# Avoid using the http cache as workaround for https://github.com/NuGet/Home/issues/3116
# use global cache in dev builds to avoid cost of downloading packages.
# For directory normalization, see also: https://github.com/NuGet/Home/issues/7968
@@ -712,7 +702,10 @@ function MSBuild-Core() {
}
foreach ($arg in $args) {
- if ($arg -ne $null -and $arg.Trim() -ne "") {
+ if ($null -ne $arg -and $arg.Trim() -ne "") {
+ if ($arg.EndsWith('\')) {
+ $arg = $arg + "\"
+ }
$cmdArgs += " `"$arg`""
}
}
@@ -784,7 +777,7 @@ function Get-Darc($version) {
. $PSScriptRoot\pipeline-logging-functions.ps1
-$RepoRoot = Resolve-Path (Join-Path $PSScriptRoot '..\..')
+$RepoRoot = Resolve-Path (Join-Path $PSScriptRoot '..\..\')
$EngRoot = Resolve-Path (Join-Path $PSScriptRoot '..')
$ArtifactsDir = Join-Path $RepoRoot 'artifacts'
$ToolsetDir = Join-Path $ArtifactsDir 'toolset'
diff --git a/eng/common/tools.sh b/eng/common/tools.sh
index 5fad1846e5a534..05ca99c6b2813c 100755
--- a/eng/common/tools.sh
+++ b/eng/common/tools.sh
@@ -485,13 +485,14 @@ _script_dir=`dirname "$_ResolvePath"`
eng_root=`cd -P "$_script_dir/.." && pwd`
repo_root=`cd -P "$_script_dir/../.." && pwd`
-artifacts_dir="$repo_root/artifacts"
+repo_root="${repo_root}/"
+artifacts_dir="${repo_root}artifacts"
toolset_dir="$artifacts_dir/toolset"
-tools_dir="$repo_root/.tools"
+tools_dir="${repo_root}.tools"
log_dir="$artifacts_dir/log/$configuration"
temp_dir="$artifacts_dir/tmp/$configuration"
-global_json_file="$repo_root/global.json"
+global_json_file="${repo_root}global.json"
# determine if global.json contains a "runtimes" entry
global_json_has_runtimes=false
if command -v jq &> /dev/null; then
@@ -504,7 +505,7 @@ fi
# HOME may not be defined in some scenarios, but it is required by NuGet
if [[ -z $HOME ]]; then
- export HOME="$repo_root/artifacts/.home/"
+ export HOME="${repo_root}artifacts/.home/"
mkdir -p "$HOME"
fi
diff --git a/eng/native/configurecompiler.cmake b/eng/native/configurecompiler.cmake
index 9a6832c90cfcd9..99a8013d7cf406 100644
--- a/eng/native/configurecompiler.cmake
+++ b/eng/native/configurecompiler.cmake
@@ -43,7 +43,8 @@ set(CMAKE_EXE_LINKER_FLAGS_DEBUG "")
set(CMAKE_EXE_LINKER_FLAGS_DEBUG "")
set(CMAKE_EXE_LINKER_FLAGS_RELWITHDEBINFO "")
-add_compile_definitions("$<$,$>:DEBUG;_DEBUG;_DBG;URTBLDENV_FRIENDLY=Checked;BUILDENV_CHECKED=1>")
+add_compile_definitions("$<$:DEBUG;_DEBUG;_DBG;URTBLDENV_FRIENDLY=Debug;BUILDENV_DEBUG=1>")
+add_compile_definitions("$<$:DEBUG;_DEBUG;_DBG;URTBLDENV_FRIENDLY=Checked;BUILDENV_CHECKED=1>")
add_compile_definitions("$<$,$>:NDEBUG;URTBLDENV_FRIENDLY=Retail>")
if (MSVC)
diff --git a/eng/native/tryrun.cmake b/eng/native/tryrun.cmake
index 4aac101718111d..96199969da69c9 100644
--- a/eng/native/tryrun.cmake
+++ b/eng/native/tryrun.cmake
@@ -149,6 +149,6 @@ else()
message(FATAL_ERROR "Arch is ${TARGET_ARCH_NAME}. Only armel, arm, arm64, s390x and x86 are supported!")
endif()
-if(TARGET_ARCH_NAME STREQUAL "x86")
+if(TARGET_ARCH_NAME STREQUAL "x86" OR TARGET_ARCH_NAME STREQUAL "s390x")
set_cache_value(HAVE_FUNCTIONAL_PTHREAD_ROBUST_MUTEXES_EXITCODE 0)
endif()
diff --git a/eng/packaging.props b/eng/packaging.props
index 87cecef871a697..534d3ddcf321b4 100644
--- a/eng/packaging.props
+++ b/eng/packaging.props
@@ -16,10 +16,6 @@
-
- false
-
- true
false
diff --git a/eng/pipelines/common/entitlements.plist b/eng/pipelines/common/entitlements.plist
index f4ea418fb45a8d..168fce4fcd879b 100644
--- a/eng/pipelines/common/entitlements.plist
+++ b/eng/pipelines/common/entitlements.plist
@@ -4,8 +4,6 @@
com.apple.security.cs.allow-jit
- com.apple.security.cs.allow-unsigned-executable-memory
-
com.apple.security.cs.allow-dyld-environment-variables
com.apple.security.cs.disable-library-validation
diff --git a/eng/pipelines/common/templates/runtimes/android-runtime-and-send-to-helix.yml b/eng/pipelines/common/templates/runtimes/android-runtime-and-send-to-helix.yml
index c45d9f2f5d37b1..f42787af8ebd26 100644
--- a/eng/pipelines/common/templates/runtimes/android-runtime-and-send-to-helix.yml
+++ b/eng/pipelines/common/templates/runtimes/android-runtime-and-send-to-helix.yml
@@ -26,7 +26,7 @@ parameters:
steps:
- - script: $(Build.SourcesDirectory)/src/tests/build$(scriptExt) /p:LibrariesConfiguration=${{ parameters.buildConfig }} -ci -excludemonofailures os ${{ parameters.osGroup }} ${{ parameters.archType }} $(buildConfigUpper)
+ - script: $(Build.SourcesDirectory)/src/tests/build$(scriptExt) /p:LibrariesConfiguration=${{ parameters.buildConfig }} -ci -excludemonofailures os ${{ parameters.osGroup }} ${{ parameters.archType }} /p:RuntimeVariant=${{ parameters.runtimeVariant }} $(buildConfigUpper)
displayName: Build Tests
# Send tests to Helix
@@ -40,6 +40,7 @@ steps:
coreClrRepoRoot: $(Build.SourcesDirectory)/src/coreclr
runtimeFlavorDisplayName: ${{ parameters.runtimeFlavorDisplayName }}
shouldContinueOnError: ${{ parameters.shouldContinueOnError }}
+ runtimeVariant: ${{ parameters.runtimeVariant }}
${{ if eq(variables['System.TeamProject'], 'public') }}:
creator: $(Build.DefinitionName)
diff --git a/eng/pipelines/common/templates/runtimes/build-test-job.yml b/eng/pipelines/common/templates/runtimes/build-test-job.yml
index 50de94ee79c7d0..84cfd8e58ab183 100644
--- a/eng/pipelines/common/templates/runtimes/build-test-job.yml
+++ b/eng/pipelines/common/templates/runtimes/build-test-job.yml
@@ -7,8 +7,7 @@ parameters:
testGroup: ''
liveRuntimeBuildConfig: ''
- # When set to a non-empty value (Debug / Release), it determines libraries
- # build configuration to use for the tests. Setting this property implies
+ # Determines librariesbuild configuration to use for the tests. Setting this property implies
# a dependency of this job on the appropriate libraries build and is used
# to construct the name of the Azure artifact representing libraries build
# to use for building the tests.
@@ -47,9 +46,13 @@ jobs:
testGroup: ${{ parameters.testGroup }}
stagedBuild: ${{ parameters.stagedBuild }}
liveLibrariesBuildConfig: ${{ parameters.liveLibrariesBuildConfig }}
- variables: ${{ parameters.variables }}
pool: ${{ parameters.pool }}
dependOnEvaluatePaths: ${{ parameters.dependOnEvaluatePaths }}
+ liveRuntimeBuildParams: ${{ format('clr.corelib+libs.ref+libs.native -rc {0} -c {1} -arch {2} -ci', coalesce(parameters.liveRuntimeBuildConfig, parameters.buildConfig), parameters.liveLibrariesBuildConfig, parameters.archType) }}
+ ${{ if and(ne(parameters.osGroup, 'windows'), ne(parameters.compilerName, 'gcc'), not(and(eq(parameters.osGroup, 'Linux'), eq(parameters.osSubgroup, '_musl'), eq(parameters.archType, 'x64'))), not(eq(parameters.osGroup, 'OSX'))) }}:
+ compilerArg: '-clang9'
+ ${{ if not(and(ne(parameters.osGroup, 'windows'), ne(parameters.compilerName, 'gcc'), not(and(eq(parameters.osGroup, 'Linux'), eq(parameters.osSubgroup, '_musl'), eq(parameters.archType, 'x64'))), not(eq(parameters.osGroup, 'OSX')))) }}:
+ compilerArg: ''
# Test jobs should continue on error for internal builds
${{ if eq(variables['System.TeamProject'], 'internal') }}:
@@ -70,22 +73,31 @@ jobs:
${{ if ne(parameters.dependsOn[0], '') }}:
dependsOn: ${{ parameters.dependsOn }}
- # TODO: Build of managed test components currently depends on the corresponding build job
- # because it needs System.Private.Corelib; we should be able to remove this dependency
- # by switching over to using reference assembly.
- ${{ if and(ne(parameters.stagedBuild, true), eq(parameters.dependsOn[0], '')) }}:
- dependsOn:
- - ${{ format('coreclr_{0}_product_build_{1}{2}_{3}_{4}', parameters.runtimeVariant, parameters.osGroup, parameters.osSubgroup, parameters.archType, coalesce(parameters.liveRuntimeBuildConfig, parameters.buildConfig)) }}
- - ${{ if ne(parameters.liveLibrariesBuildConfig, '') }}:
- - ${{ format('libraries_build_{0}{1}_{2}_{3}', parameters.osGroup, parameters.osSubgroup, parameters.archType, parameters.liveLibrariesBuildConfig) }}
-
-
${{ if in(parameters.testGroup, 'innerloop', 'clrinterpreter') }}:
timeoutInMinutes: 90
${{ if notIn(parameters.testGroup, 'innerloop', 'clrinterpreter') }}:
timeoutInMinutes: 160
+ variables:
+ - ${{ each variable in parameters.variables }}:
+ - ${{ variable }}
+ - name: liveRuntimeBuildParams
+ value: ${{ format('clr.corelib+libs.ref+libs.native -rc {0} -c {1} -arch {2} -ci', coalesce(parameters.liveRuntimeBuildConfig, parameters.buildConfig), parameters.liveLibrariesBuildConfig, parameters.archType) }}
+ - name: compilerArg
+ value: ''
+ - ${{ if and(ne(parameters.osGroup, 'windows'), ne(parameters.compilerName, 'gcc')) }}:
+ - name: compilerArg
+ value: '-clang9'
+ # Building for x64 MUSL happens on Alpine Linux and we need to use the stable version available there
+ - ${{ if and(eq(parameters.osGroup, 'Linux'), eq(parameters.osSubgroup, '_musl'), eq(parameters.archType, 'x64')) }}:
+ - name: compilerArg
+ value: ''
+ # AppleClang has different version scheme, so we let complier introspection pick up the available clang from PATH
+ - ${{ if eq(parameters.osGroup, 'OSX') }}:
+ - name: compilerArg
+ value: ''
+
steps:
# Install test build dependencies
@@ -97,24 +109,9 @@ jobs:
- script: $(Build.SourcesDirectory)\eng\common\init-tools-native.cmd -InstallDirectory $(Build.SourcesDirectory)\native-tools -Force
displayName: Install native dependencies
-
- # Optionally download live-built libraries
- - ${{ if ne(parameters.liveLibrariesBuildConfig, '') }}:
- - template: /eng/pipelines/common/download-artifact-step.yml
- parameters:
- unpackFolder: $(librariesDownloadDir)
- cleanUnpackFolder: false
- artifactFileName: '$(librariesBuildArtifactName)$(archiveExtension)'
- artifactName: '$(librariesBuildArtifactName)'
- displayName: 'live-built libraries'
-
- # We need to explictly download CoreCLR, even if building Mono because the CoreCLR tests depend on it
- - template: /eng/pipelines/common/download-artifact-step.yml
- parameters:
- unpackFolder: $(coreClrProductRootFolderPath)
- artifactFileName: '$(coreClrProductArtifactName)$(archiveExtension)'
- artifactName: '$(coreClrProductArtifactName)'
- displayName: 'CoreCLR product build'
+ # Build core/libraries dependencies of test build
+ - script: $(Build.SourcesDirectory)/build$(scriptExt) $(liveRuntimeBuildParams) $(compilerArg)
+ displayName: Build coreclr/libs components needed by test build
- ${{ if in(parameters.osGroup, 'OSX', 'iOS', 'tvOS') }}:
- script: |
diff --git a/eng/pipelines/common/templates/runtimes/run-test-job.yml b/eng/pipelines/common/templates/runtimes/run-test-job.yml
index e8ca30b4e54e22..fb1a3b55f9afc9 100644
--- a/eng/pipelines/common/templates/runtimes/run-test-job.yml
+++ b/eng/pipelines/common/templates/runtimes/run-test-job.yml
@@ -64,6 +64,8 @@ jobs:
- '${{ parameters.runtimeFlavor }}_common_test_build_p1_AnyOS_AnyCPU_${{parameters.buildConfig }}'
- ${{ if ne(parameters.stagedBuild, true) }}:
- ${{ if or( eq(parameters.runtimeVariant, 'minijit'), eq(parameters.runtimeVariant, 'monointerpreter')) }}:
+ # This is needed for creating a CORE_ROOT in the current design.
+ - ${{ format('coreclr_{0}_product_build_{1}{2}_{3}_{4}', '', parameters.osGroup, parameters.osSubgroup, parameters.archType, parameters.buildConfig) }}
# minijit and mono interpreter runtimevariants do not require any special build of the runtime
- ${{ format('{0}_{1}_product_build_{2}{3}_{4}_{5}', parameters.runtimeFlavor, '', parameters.osGroup, parameters.osSubgroup, parameters.archType, parameters.buildConfig) }}
- ${{ if not(or(eq(parameters.runtimeVariant, 'minijit'), eq(parameters.runtimeVariant, 'monointerpreter'))) }}:
@@ -102,7 +104,8 @@ jobs:
- ${{ if eq(parameters.readyToRun, true) }}:
- name: crossgenArg
- value: 'crossgen'
+ # Switch R2R to use cg2 by default
+ value: 'crossgen2'
- name: LogNamePrefix
value: TestRunLogs_R2R
- ${{ if eq(parameters.crossgen2, true) }}:
@@ -175,6 +178,9 @@ jobs:
value: 180
- name: timeoutPerTestInMinutes
value: 30
+ - ${{ if in(parameters.testGroup, 'pgo') }}:
+ - name: timeoutPerTestCollectionInMinutes
+ value: 120
- ${{ if eq(parameters.compositeBuildMode, true) }}:
- name: crossgenArg
@@ -188,7 +194,7 @@ jobs:
# TODO: update these numbers as they were determined long ago
${{ if eq(parameters.testGroup, 'innerloop') }}:
timeoutInMinutes: 200
- ${{ if in(parameters.testGroup, 'outerloop', 'jit-experimental') }}:
+ ${{ if in(parameters.testGroup, 'outerloop', 'jit-experimental', 'pgo') }}:
timeoutInMinutes: 270
${{ if in(parameters.testGroup, 'gc-longrunning', 'gc-simulator') }}:
timeoutInMinutes: 480
@@ -320,6 +326,7 @@ jobs:
osSubgroup: ${{ parameters.osSubgroup}}
runtimeFlavorDisplayName: ${{ parameters.runtimeFlavorDisplayName }}
shouldContinueOnError: ${{ parameters.shouldContinueOnError }}
+ runtimeVariant: ${{ parameters.runtimeVariant }}
${{ if eq(variables['System.TeamProject'], 'public') }}:
creator: $(Build.DefinitionName)
diff --git a/eng/pipelines/coreclr/superpmi.yml b/eng/pipelines/coreclr/superpmi.yml
index 8219ad02b8387d..d6325d87b74eb9 100644
--- a/eng/pipelines/coreclr/superpmi.yml
+++ b/eng/pipelines/coreclr/superpmi.yml
@@ -77,27 +77,6 @@ jobs:
collectionType: pmi
collectionName: libraries
-- template: /eng/pipelines/common/platform-matrix.yml
- parameters:
- jobTemplate: /eng/pipelines/coreclr/templates/superpmi-job.yml
- buildConfig: checked
- platforms:
- # Linux tests are built on the OSX machines.
- # - OSX_x64
- - Linux_arm
- - Linux_arm64
- - Linux_x64
- - windows_x64
- - windows_x86
- - windows_arm64
- helixQueueGroup: ci
- helixQueuesTemplate: /eng/pipelines/coreclr/templates/helix-queues-setup.yml
- jobParameters:
- testGroup: outerloop
- liveLibrariesBuildConfig: Release
- collectionType: crossgen
- collectionName: libraries
-
- template: /eng/pipelines/common/platform-matrix.yml
parameters:
jobTemplate: /eng/pipelines/coreclr/templates/superpmi-job.yml
diff --git a/eng/pipelines/coreclr/templates/build-job.yml b/eng/pipelines/coreclr/templates/build-job.yml
index 57c4134736d440..699983fa0c38c6 100644
--- a/eng/pipelines/coreclr/templates/build-job.yml
+++ b/eng/pipelines/coreclr/templates/build-job.yml
@@ -243,7 +243,8 @@ jobs:
/p:DiagnosticsFilesRoot="$(buildProductRootFolderPath)"
/p:SignDiagnostics=true
/p:DotNetSignType=$(SignType)
- /bl:$(Build.SourcesDirectory)artifacts/log/$(buildConfig)/SignDiagnostics.binlog
+ -noBl
+ /bl:$(Build.SourcesDirectory)/artifacts/log/$(buildConfig)/SignDiagnostics.binlog
-projects $(Build.SourcesDirectory)\eng\empty.csproj
displayName: Sign Diagnostic Binaries
diff --git a/eng/pipelines/coreclr/templates/helix-queues-setup.yml b/eng/pipelines/coreclr/templates/helix-queues-setup.yml
index 9312c4ac7d93d1..45c309f3ba863f 100644
--- a/eng/pipelines/coreclr/templates/helix-queues-setup.yml
+++ b/eng/pipelines/coreclr/templates/helix-queues-setup.yml
@@ -49,12 +49,12 @@ jobs:
# Linux arm64
- ${{ if eq(parameters.platform, 'Linux_arm64') }}:
- ${{ if eq(variables['System.TeamProject'], 'public') }}:
- - (Ubuntu.1804.Arm64.Open)Ubuntu.1804.Armarch.Open@mcr.microsoft.com/dotnet-buildtools/prereqs:ubuntu-18.04-helix-arm64v8-a45aeeb-20190620155855
+ - (Ubuntu.1804.Arm64.Open)Ubuntu.1804.Armarch.Open@mcr.microsoft.com/dotnet-buildtools/prereqs:ubuntu-18.04-helix-arm64v8-20210531091519-97d8652
- ${{ if and(eq(variables['System.TeamProject'], 'public'), notIn(parameters.jobParameters.helixQueueGroup, 'pr', 'ci', 'libraries')) }}:
- (Debian.9.Arm64.Open)Ubuntu.1804.Armarch.Open@mcr.microsoft.com/dotnet-buildtools/prereqs:debian-9-helix-arm64v8-bfcd90a-20200121150055
- ${{ if eq(variables['System.TeamProject'], 'internal') }}:
- (Debian.9.Arm64)Ubuntu.1804.ArmArch@mcr.microsoft.com/dotnet-buildtools/prereqs:debian-9-helix-arm64v8-bfcd90a-20200121150055
- - (Ubuntu.1804.Arm64)Ubuntu.1804.ArmArch@mcr.microsoft.com/dotnet-buildtools/prereqs:ubuntu-18.04-helix-arm64v8-a45aeeb-20190620155855
+ - (Ubuntu.1804.Arm64)Ubuntu.1804.ArmArch@mcr.microsoft.com/dotnet-buildtools/prereqs:ubuntu-18.04-helix-arm64v8-20210531091519-97d8652
# Linux musl x64
- ${{ if eq(parameters.platform, 'Linux_musl_x64') }}:
diff --git a/eng/pipelines/coreclr/templates/run-performance-job.yml b/eng/pipelines/coreclr/templates/run-performance-job.yml
index 87832c2e9ec100..de523bb2d99e61 100644
--- a/eng/pipelines/coreclr/templates/run-performance-job.yml
+++ b/eng/pipelines/coreclr/templates/run-performance-job.yml
@@ -57,7 +57,7 @@ jobs:
- HelixApiAccessToken: ''
- HelixPreCommandStemWindows: 'py -m pip install -U pip;py -3 -m venv %HELIX_WORKITEM_PAYLOAD%\.venv;call %HELIX_WORKITEM_PAYLOAD%\.venv\Scripts\activate.bat;set PYTHONPATH=;py -3 -m pip install -U pip;py -3 -m pip install azure.storage.blob==12.0.0;py -3 -m pip install azure.storage.queue==12.0.0;set "PERFLAB_UPLOAD_TOKEN=$(PerfCommandUploadToken)"'
- HelixPreCommandStemLinux: 'python3 -m pip install -U pip;sudo apt-get -y install python3-venv;python3 -m venv $HELIX_WORKITEM_PAYLOAD/.venv;source $HELIX_WORKITEM_PAYLOAD/.venv/Scripts/activate;export PYTHONPATH=;python3 -m pip install -U pip;pip3 install azure.storage.blob==12.0.0;pip3 install azure.storage.queue==12.0.0;sudo apt-get update;sudo apt -y install curl dirmngr apt-transport-https lsb-release ca-certificates;curl -sL https://deb.nodesource.com/setup_12.x | sudo -E bash -;sudo apt-get -y install nodejs;sudo apt-get -y install npm;npm install --prefix $HELIX_WORKITEM_PAYLOAD jsvu -g;$HELIX_WORKITEM_PAYLOAD/bin/jsvu --os=linux64 --engines=v8,javascriptcore;export PERFLAB_UPLOAD_TOKEN="$(PerfCommandUploadTokenLinux)"'
- - HelixPreCommandStemMsul: 'sudo apk add icu-libs krb5-libs libgcc libintl libssl1.1 libstdc++ zlib;sudo apk add cargo --repository http://sjc.edge.kernel.org/alpine/edge/community ;sudo apk add libgdiplus --repository http://dl-cdn.alpinelinux.org/alpine/edge/testing; python3 -m venv $HELIX_WORKITEM_PAYLOAD/.venv;source $HELIX_WORKITEM_PAYLOAD/.venv/bin/activate;export PYTHONPATH=;python3 -m pip install -U pip;pip3 install azure.storage.blob==12.7.1;pip3 install azure.storage.queue==12.1.5;export PERFLAB_UPLOAD_TOKEN="$(PerfCommandUploadTokenLinux)"'
+ - HelixPreCommandStemMsul: 'sudo apk add icu-libs krb5-libs libgcc libintl libssl1.1 libstdc++ zlib;sudo apk add cargo --repository http://dl-cdn.alpinelinux.org/alpine/v3.12/community ;sudo apk add libgdiplus --repository http://dl-cdn.alpinelinux.org/alpine/edge/testing; python3 -m venv $HELIX_WORKITEM_PAYLOAD/.venv;source $HELIX_WORKITEM_PAYLOAD/.venv/bin/activate;export PYTHONPATH=;python3 -m pip install -U pip;pip3 install azure.storage.blob==12.7.1;pip3 install azure.storage.queue==12.1.5;export PERFLAB_UPLOAD_TOKEN="$(PerfCommandUploadTokenLinux)"'
- ExtraMSBuildLogsWindows: 'set MSBUILDDEBUGCOMM=1;set "MSBUILDDEBUGPATH=%HELIX_WORKITEM_UPLOAD_ROOT%"'
- ExtraMSBuildLogsLinux: 'export MSBUILDDEBUGCOMM=1;export "MSBUILDDEBUGPATH=$HELIX_WORKITEM_UPLOAD_ROOT"'
- HelixPreCommand: ''
diff --git a/eng/pipelines/libraries/base-job.yml b/eng/pipelines/libraries/base-job.yml
index bea30446deed64..9c5cff7cc5e81f 100644
--- a/eng/pipelines/libraries/base-job.yml
+++ b/eng/pipelines/libraries/base-job.yml
@@ -80,7 +80,6 @@ jobs:
- ${{ if eq(parameters.framework, 'allConfigurations') }}:
- _finalFrameworkArg: -allConfigurations
- _testModeArg: /p:TestAssemblies=false /p:TestPackages=true
- - _extraHelixArguments: /p:TestPackages=true
- ${{ if eq(parameters.isOfficialAllConfigurations, true) }}:
- librariesBuildArtifactName: 'libraries_bin_official_allconfigurations'
diff --git a/eng/pipelines/libraries/build-job.yml b/eng/pipelines/libraries/build-job.yml
index ee0c0fc938f696..11fdde6316dae7 100644
--- a/eng/pipelines/libraries/build-job.yml
+++ b/eng/pipelines/libraries/build-job.yml
@@ -29,6 +29,7 @@ parameters:
variables: {}
pool: ''
runTests: false
+ useHelix: true
testScope: ''
testBuildPlatforms: []
@@ -66,6 +67,7 @@ jobs:
variables:
- librariesTestsArtifactName: ${{ format('libraries_test_assets_{0}_{1}_{2}', parameters.osGroup, parameters.archType, parameters.buildConfig) }}
- _subset: libs
+ - _buildAction: ''
- _additionalBuildArguments: ''
- ${{ parameters.variables }}
@@ -73,7 +75,10 @@ jobs:
# If platform is in testBuildPlatforms we build tests as well.
- ${{ if or(eq(parameters.runTests, true), containsValue(parameters.testBuildPlatforms, parameters.platform)) }}:
- _subset: libs+libs.tests
- - _additionalBuildArguments: /p:ArchiveTests=true
+ - ${{ if eq(parameters.useHelix, false) }}:
+ - _buildAction: -restore -build -test
+ - ${{ if eq(parameters.useHelix, true) }}:
+ - _additionalBuildArguments: /p:ArchiveTests=true
- ${{ parameters.variables }}
@@ -92,6 +97,7 @@ jobs:
- script: $(_buildScript)
-subset $(_subset)
+ $(_buildAction)
$(_buildArguments)
$(_additionalBuildArguments)
displayName: Restore and Build Product
@@ -136,7 +142,7 @@ jobs:
parameters:
name: Libraries_AllConfigurations
- - ${{ if eq(parameters.runTests, true) }}:
+ - ${{ if and(eq(parameters.runTests, true), eq(parameters.useHelix, true)) }}:
- template: /eng/pipelines/libraries/helix.yml
parameters:
osGroup: ${{ parameters.osGroup }}
diff --git a/eng/pipelines/libraries/helix-queues-setup.yml b/eng/pipelines/libraries/helix-queues-setup.yml
index 7cea78b1a5f3c7..b766f1af96aecb 100644
--- a/eng/pipelines/libraries/helix-queues-setup.yml
+++ b/eng/pipelines/libraries/helix-queues-setup.yml
@@ -128,7 +128,7 @@ jobs:
# windows x64
- ${{ if eq(parameters.platform, 'windows_x64') }}:
# netcoreapp
- - ${{ if notIn(parameters.jobParameters.framework, 'allConfigurations', 'net48') }}:
+ - ${{ if notIn(parameters.jobParameters.framework, 'net48') }}:
- ${{ if and(eq(parameters.jobParameters.testScope, 'outerloop'), eq(parameters.jobParameters.runtimeFlavor, 'mono')) }}:
- Windows.81.Amd64.Open
- Windows.10.Amd64.Server19H1.Open
@@ -152,14 +152,10 @@ jobs:
- ${{ if eq(parameters.jobParameters.framework, 'net48') }}:
- Windows.10.Amd64.Client19H1.Open
- # AllConfigurations
- - ${{ if eq(parameters.jobParameters.framework, 'allConfigurations') }}:
- - Windows.10.Amd64.Server19H1.Open
-
# windows x86
- ${{ if eq(parameters.platform, 'windows_x86') }}:
# netcoreapp
- - ${{ if notIn(parameters.jobParameters.framework, 'allConfigurations', 'net48') }}:
+ - ${{ if notIn(parameters.jobParameters.framework, 'net48') }}:
- ${{ if and(eq(parameters.jobParameters.testScope, 'outerloop'), eq(parameters.jobParameters.runtimeFlavor, 'mono')) }}:
- Windows.7.Amd64.Open
- Windows.10.Amd64.ServerRS5.Open
diff --git a/eng/pipelines/runtime-staging.yml b/eng/pipelines/runtime-staging.yml
index eaaa9e08fe2c50..2352995df910a5 100644
--- a/eng/pipelines/runtime-staging.yml
+++ b/eng/pipelines/runtime-staging.yml
@@ -257,47 +257,43 @@ jobs:
eq(variables['isFullMatrix'], true))
#
-# Build the whole product using Mono and run libraries tests
+# Build the whole product using Mono for Android and run runtime tests with interpreter
#
- template: /eng/pipelines/common/platform-matrix.yml
parameters:
jobTemplate: /eng/pipelines/common/global-build-job.yml
- helixQueuesTemplate: /eng/pipelines/libraries/helix-queues-setup.yml
+ helixQueuesTemplate: /eng/pipelines/coreclr/templates/helix-queues-setup.yml
buildConfig: Release
runtimeFlavor: mono
platforms:
- - Browser_wasm
+ - Android_x64
variables:
- # map dependencies variables to local variables
- - name: librariesContainsChange
- value: $[ dependencies.evaluate_paths.outputs['SetPathVars_libraries.containsChange'] ]
- - name: monoContainsChange
- value: $[ dependencies.evaluate_paths.outputs['SetPathVars_mono.containsChange'] ]
+ - ${{ if and(eq(variables['System.TeamProject'], 'public'), eq(variables['Build.Reason'], 'PullRequest')) }}:
+ - name: _HelixSource
+ value: pr/dotnet/runtime/$(Build.SourceBranch)
+ - ${{ if and(eq(variables['System.TeamProject'], 'public'), ne(variables['Build.Reason'], 'PullRequest')) }}:
+ - name: _HelixSource
+ value: ci/dotnet/runtime/$(Build.SourceBranch)
+ - name: timeoutPerTestInMinutes
+ value: 60
+ - name: timeoutPerTestCollectionInMinutes
+ value: 180
jobParameters:
testGroup: innerloop
- nameSuffix: AllSubsets_Mono_AOT
- buildArgs: -s mono+libs+host+packs+libs.tests -c $(_BuildConfig) /p:ArchiveTests=true /p:EnableAggressiveTrimming=true /p:BuildAOTTestsOnHelix=true /p:RunAOTCompilation=true
- timeoutInMinutes: 180
+ nameSuffix: AllSubsets_Mono_RuntimeTests
+ buildArgs: -s mono+libs -c $(_BuildConfig)
+ timeoutInMinutes: 240
+ runtimeVariant: monointerpreter
condition: >-
or(
- eq(dependencies.evaluate_paths.outputs['SetPathVars_libraries.containsChange'], true),
+ eq(dependencies.evaluate_paths.outputs['SetPathVars_runtimetests.containsChange'], true),
eq(dependencies.evaluate_paths.outputs['SetPathVars_mono.containsChange'], true),
- eq(dependencies.evaluate_paths.outputs['SetPathVars_installer.containsChange'], true),
eq(variables['isFullMatrix'], true))
# extra steps, run tests
- extraStepsTemplate: /eng/pipelines/libraries/helix.yml
+ extraStepsTemplate: /eng/pipelines/common/templates/runtimes/android-runtime-and-send-to-helix.yml
extraStepsParameters:
creator: dotnet-bot
testRunNamePrefixSuffix: Mono_$(_BuildConfig)
- extraHelixArguments: /p:NeedsToBuildWasmAppsOnHelix=true
- scenarios:
- - normal
- condition: >-
- or(
- eq(variables['librariesContainsChange'], true),
- eq(variables['monoContainsChange'], true),
- eq(variables['isFullMatrix'], true))
-
#
# Build the whole product using Mono for Android and run runtime tests with Android devices
@@ -309,7 +305,7 @@ jobs:
buildConfig: Release
runtimeFlavor: mono
platforms:
- #- Android_arm64 # disabled due to https://github.com/dotnet/runtime/issues/47850
+ - Android_arm64
variables:
- ${{ if and(eq(variables['System.TeamProject'], 'public'), eq(variables['Build.Reason'], 'PullRequest')) }}:
- name: _HelixSource
@@ -331,11 +327,13 @@ jobs:
eq(dependencies.evaluate_paths.outputs['SetPathVars_runtimetests.containsChange'], true),
eq(dependencies.evaluate_paths.outputs['SetPathVars_mono.containsChange'], true),
eq(variables['isFullMatrix'], true))
- # extra steps, run tests
- extraStepsTemplate: /eng/pipelines/common/templates/runtimes/android-runtime-and-send-to-helix.yml
- extraStepsParameters:
- creator: dotnet-bot
- testRunNamePrefixSuffix: Mono_$(_BuildConfig)
+ # don't run tests on PRs until we can get significantly more devices
+ ${{ if eq(variables['isFullMatrix'], true) }}:
+ # extra steps, run tests
+ extraStepsTemplate: /eng/pipelines/common/templates/runtimes/android-runtime-and-send-to-helix.yml
+ extraStepsParameters:
+ creator: dotnet-bot
+ testRunNamePrefixSuffix: Mono_$(_BuildConfig)
# Run disabled installer tests on Linux x64
- template: /eng/pipelines/common/platform-matrix.yml
diff --git a/eng/pipelines/runtime.yml b/eng/pipelines/runtime.yml
index 818fb1360a8863..931a65a05e11ee 100644
--- a/eng/pipelines/runtime.yml
+++ b/eng/pipelines/runtime.yml
@@ -347,6 +347,48 @@ jobs:
eq(variables['monoContainsChange'], true),
eq(variables['isFullMatrix'], true))
+#
+# Build for Browser/wasm with RunAOTCompilation=true
+#
+- template: /eng/pipelines/common/platform-matrix.yml
+ parameters:
+ jobTemplate: /eng/pipelines/common/global-build-job.yml
+ helixQueuesTemplate: /eng/pipelines/libraries/helix-queues-setup.yml
+ buildConfig: Release
+ runtimeFlavor: mono
+ platforms:
+ - Browser_wasm
+ variables:
+ # map dependencies variables to local variables
+ - name: librariesContainsChange
+ value: $[ dependencies.evaluate_paths.outputs['SetPathVars_libraries.containsChange'] ]
+ - name: monoContainsChange
+ value: $[ dependencies.evaluate_paths.outputs['SetPathVars_mono.containsChange'] ]
+ jobParameters:
+ testGroup: innerloop
+ nameSuffix: AllSubsets_Mono_AOT
+ buildArgs: -s mono+libs+host+packs+libs.tests -c $(_BuildConfig) /p:ArchiveTests=true /p:EnableAggressiveTrimming=true /p:BuildAOTTestsOnHelix=true /p:RunAOTCompilation=true
+ timeoutInMinutes: 180
+ condition: >-
+ or(
+ eq(dependencies.evaluate_paths.outputs['SetPathVars_libraries.containsChange'], true),
+ eq(dependencies.evaluate_paths.outputs['SetPathVars_mono.containsChange'], true),
+ eq(dependencies.evaluate_paths.outputs['SetPathVars_installer.containsChange'], true),
+ eq(variables['isFullMatrix'], true))
+ # extra steps, run tests
+ extraStepsTemplate: /eng/pipelines/libraries/helix.yml
+ extraStepsParameters:
+ creator: dotnet-bot
+ testRunNamePrefixSuffix: Mono_$(_BuildConfig)
+ extraHelixArguments: /p:NeedsToBuildWasmAppsOnHelix=true
+ scenarios:
+ - normal
+ condition: >-
+ or(
+ eq(variables['librariesContainsChange'], true),
+ eq(variables['monoContainsChange'], true),
+ eq(variables['isFullMatrix'], true))
+
# Build and test libraries under single-file publishing
- template: /eng/pipelines/common/platform-matrix.yml
parameters:
@@ -758,11 +800,11 @@ jobs:
buildConfig: ${{ variables.debugOnPrReleaseOnRolling }}
platforms:
- windows_x64
- helixQueuesTemplate: /eng/pipelines/libraries/helix-queues-setup.yml
jobParameters:
isFullMatrix: ${{ variables.isFullMatrix }}
framework: allConfigurations
runTests: true
+ useHelix: false
condition: >-
or(
eq(dependencies.evaluate_paths.outputs['SetPathVars_libraries.containsChange'], true),
diff --git a/eng/references.targets b/eng/references.targets
index 42763624c5eded..6adcba93322895 100644
--- a/eng/references.targets
+++ b/eng/references.targets
@@ -7,13 +7,6 @@
<_FindDependencies>false
-
-
-
-
-
diff --git a/eng/resolveContract.targets b/eng/resolveContract.targets
index f41be687441991..d98ef8dffb7bd7 100644
--- a/eng/resolveContract.targets
+++ b/eng/resolveContract.targets
@@ -3,6 +3,8 @@
@(ReferencePath->'%(RelativeDir)'->Distinct())
+
+ $(ContractDependencyPaths);$(MicrosoftNetCoreAppRefPackRefDir)
diff --git a/eng/restore/harvestPackages.targets b/eng/restore/harvestPackages.targets
index a7fc8aa34655f5..07eb5a91a7a8ed 100644
--- a/eng/restore/harvestPackages.targets
+++ b/eng/restore/harvestPackages.targets
@@ -1,4 +1,14 @@
+
+
+ $(NuGetPackageRoot)microsoft.dotnet.build.tasks.packaging\$(MicrosoftDotNetBuildTasksPackagingVersion)\tools\
+ $(PackagingTaskAssembly)netcoreapp3.1\
+ $(PackagingTaskAssembly)net472\
+
+ $(PackagingTaskAssembly)net5.0\
+ $(PackagingTaskAssembly)Microsoft.DotNet.Build.Tasks.Packaging.dll
+
+
diff --git a/eng/targetingpacks.targets b/eng/targetingpacks.targets
index da9407c5090d0c..f80b26fb984456 100644
--- a/eng/targetingpacks.targets
+++ b/eng/targetingpacks.targets
@@ -1,11 +1,21 @@
+
+
- $(MicrosoftNetCoreAppFrameworkName)
+ $(MicrosoftNetCoreAppFrameworkName)
+ $([MSBuild]::VersionGreaterThanOrEquals($(TargetFrameworkVersion), '$(NetCoreAppCurrentVersion)'))">
true
false
false
@@ -15,15 +25,22 @@
+
-
-
-
-
- $(PkgMicrosoft_NETCore_App)\ref\netcoreapp$(TargetFrameworkVersion.TrimStart('v'))\
-
-
-
-
-
-
-
@@ -61,13 +62,12 @@
-
+
+ '$(TargetFrameworkIdentifier)' == '.NETCoreApp'">
<_NetCoreAppTargetFrameworkIdentifier Condition="$([MSBuild]::VersionLessThan($(TargetFrameworkVersion), '5.0'))">netcoreapp
@@ -112,9 +112,10 @@
NuGetPackageVersion="$(ProductVersion)"
PackageDirectory="$(MicrosoftNetCoreAppRefPackDir.TrimEnd('/\'))"
Condition="'%(ResolvedTargetingPack.RuntimeFrameworkName)' == '$(LocalFrameworkOverrideName)' and
- Exists('$(MicrosoftNetCoreAppRefPackDataDir)FrameworkList.xml')" />
+ Exists('$(MicrosoftNetCoreAppRefPackDir)data\FrameworkList.xml')" />
+ Condition="'$(MicrosoftNetCoreAppRuntimePackDir)' != '' and
+ '%(ResolvedRuntimePack.FrameworkName)' == '$(LocalFrameworkOverrideName)'" />
diff --git a/eng/testing/linker/SupportFiles/Directory.Build.props b/eng/testing/linker/SupportFiles/Directory.Build.props
index c1220c719b9af8..760ff6d86e8102 100644
--- a/eng/testing/linker/SupportFiles/Directory.Build.props
+++ b/eng/testing/linker/SupportFiles/Directory.Build.props
@@ -21,5 +21,8 @@
+
diff --git a/eng/testing/linker/project.csproj.template b/eng/testing/linker/project.csproj.template
index 95298969cf61ad..7f8ccaba794415 100644
--- a/eng/testing/linker/project.csproj.template
+++ b/eng/testing/linker/project.csproj.template
@@ -10,6 +10,7 @@
{RepositoryEngineeringDir}
{NetCoreAppCurrent}
{RuntimeIdentifier}
+ {UseMonoRuntime}
{TargetingPackDir}
{NetCoreAppMaximumVersion}
{MicrosoftNETCoreAppVersion}
diff --git a/eng/testing/linker/trimmingTests.targets b/eng/testing/linker/trimmingTests.targets
index 0473cfcd03636c..f43dbee3b4e1f2 100644
--- a/eng/testing/linker/trimmingTests.targets
+++ b/eng/testing/linker/trimmingTests.targets
@@ -74,6 +74,7 @@
.Replace('{NetCoreAppMaximumVersion}', '$(NetCoreAppMaximumVersion)')
.Replace('{TargetingPackDir}','$(MicrosoftNetCoreAppRefPackDir)')
.Replace('{RuntimeIdentifier}','%(TestConsoleApps.TestRuntimeIdentifier)')
+ .Replace('{UseMonoRuntime}','$(UseMonoRuntime)')
.Replace('{MicrosoftNETILLinkTasksVersion}', '$(MicrosoftNETILLinkTasksVersion)')
.Replace('{ExtraTrimmerArgs}', '%(TestConsoleApps.ExtraTrimmerArgs)')
.Replace('{AdditionalProjectReferences}', '$(_additionalProjectReferencesString)')
diff --git a/eng/testing/performance/crossgen_perf.proj b/eng/testing/performance/crossgen_perf.proj
index eb8bdd9c440cd9..c6599aecfd42f1 100644
--- a/eng/testing/performance/crossgen_perf.proj
+++ b/eng/testing/performance/crossgen_perf.proj
@@ -44,13 +44,6 @@
-
-
- $(WorkItemDirectory)
- $(Python) $(CrossgenDirectory)test.py crossgen --core-root $(CoreRoot) --test-name %(Identity)
-
-
-
$(WorkItemDirectory)
@@ -65,15 +58,6 @@
-
-
- $(WorkItemDirectory)
- $(Python) $(CrossgenDirectory)pre.py crossgen --core-root $(CoreRoot) --single %(Identity)
- $(Python) $(CrossgenDirectory)test.py sod --scenario-name "Crossgen %(Identity) Size" --dirs ./crossgen.out/
- $(Python) $(CrossgenDirectory)post.py
-
-
-
$(WorkItemDirectory)
@@ -84,10 +68,6 @@
-
-
- 4:00
-
4:00
@@ -100,9 +80,6 @@
$(Python) $(Crossgen2Directory)test.py crossgen2 --core-root $(CoreRoot) --composite $(Crossgen2Directory)framework-r2r.dll.rsp
1:00
-
- 4:00
-
4:00
diff --git a/eng/testing/performance/performance-setup.ps1 b/eng/testing/performance/performance-setup.ps1
index 92db27fd599a8f..3fc5a44a52ba50 100644
--- a/eng/testing/performance/performance-setup.ps1
+++ b/eng/testing/performance/performance-setup.ps1
@@ -88,7 +88,6 @@ elseif($DynamicPGO)
elseif($FullPGO)
{
$Configurations += " PGOType=fullpgo"
- $ExtraBenchmarkDotNetArguments = "--category-exclusion-filter NoAOT"
}
# FIX ME: This is a workaround until we get this from the actual pipeline
diff --git a/eng/testing/tests.mobile.targets b/eng/testing/tests.mobile.targets
index 38d568edb2c7ee..83d95eae00859f 100644
--- a/eng/testing/tests.mobile.targets
+++ b/eng/testing/tests.mobile.targets
@@ -94,7 +94,7 @@
- $([MSBuild]::NormalizePath($(MonoAotCrossDir), 'mono-aot-cross'))
- $(MonoAotCrossCompilerPath).exe
+ <_MonoAotCrossCompilerPath>$([MSBuild]::NormalizePath($(MonoAotCrossDir), 'mono-aot-cross'))
+ <_MonoAotCrossCompilerPath Condition="$([MSBuild]::IsOSPlatform('WINDOWS'))">$(_MonoAotCrossCompilerPath).exe
+
+
+
PrepareForRun
diff --git a/eng/testing/tests.wasm.targets b/eng/testing/tests.wasm.targets
index ce10194388d550..de4432bee9f5ea 100644
--- a/eng/testing/tests.wasm.targets
+++ b/eng/testing/tests.wasm.targets
@@ -35,7 +35,7 @@
- <_AOTBuildCommand>dotnet msbuild publish/AOTTestProjectForHelix.proj /bl:$XHARNESS_OUT/AOTBuild.binlog
+ <_AOTBuildCommand>dotnet msbuild publish/ProxyProjectForAOTOnHelix.proj /bl:$XHARNESS_OUT/AOTBuild.binlog
<_AOTBuildCommand Condition="'$(ContinuousIntegrationBuild)' != 'true'">$(_AOTBuildCommand) /p:RuntimeSrcDir=$(RepoRoot) /p:RuntimeConfig=$(Configuration)
@@ -69,8 +69,7 @@
-
-
+
@@ -96,16 +95,23 @@
<_WasmPropertiesToPass
Include="$(%(_WasmPropertyNames.Identity))"
Name="%(_WasmPropertyNames.Identity)"
- ConditionToUse="%(_WasmPropertyNames.ConditionToUse)" />
+ ConditionToUse__="%(_WasmPropertyNames.ConditionToUse__)" />
<_WasmVFSFilesToCopy Include="@(WasmFilesToIncludeInFileSystem)" />
<_WasmVFSFilesToCopy TargetPath="%(FileName)%(Extension)" Condition="'%(TargetPath)' == ''" />
+
+
+ Items="@(_WasmItemsToPass)"
+ OutputFile="$(BundleDir)publish\ProxyProjectForAOTOnHelix.props" />
@@ -124,17 +130,17 @@
-1
-
-
-
+
+ <_SatelliteAssemblies Include="$(PublishDir)*\*.resources.dll" />
+ <_SatelliteAssemblies CultureName="$([System.IO.Directory]::GetParent('%(Identity)').Name)" />
+ <_SatelliteAssemblies TargetPath="%(CultureName)\%(FileName)%(Extension)" />
-
+
+
+
+
-
<_CopyLocalPaths
Include="@(PublishItemsOutputGroupOutputs)"
diff --git a/eng/versioning.targets b/eng/versioning.targets
index 9981f73a0beae9..4dacec01a8686b 100644
--- a/eng/versioning.targets
+++ b/eng/versioning.targets
@@ -84,6 +84,11 @@
+
+
+
+
+
false
diff --git a/global.json b/global.json
index 1b29cb7539fb62..df8f7ead63774f 100644
--- a/global.json
+++ b/global.json
@@ -1,24 +1,24 @@
{
"sdk": {
- "version": "6.0.100-preview.3.21202.5",
+ "version": "6.0.100-preview.4.21255.9",
"allowPrerelease": true,
"rollForward": "major"
},
"tools": {
- "dotnet": "6.0.100-preview.3.21202.5"
+ "dotnet": "6.0.100-preview.4.21255.9"
},
"native-tools": {
"cmake": "3.16.4",
"python3": "3.7.1"
},
"msbuild-sdks": {
- "Microsoft.DotNet.Build.Tasks.TargetFramework.Sdk": "6.0.0-beta.21281.1",
+ "Microsoft.DotNet.Build.Tasks.TargetFramework.Sdk": "6.0.0-beta.21311.3",
"Microsoft.DotNet.PackageValidation": "1.0.0-preview.6.21274.7",
- "Microsoft.DotNet.Arcade.Sdk": "6.0.0-beta.21281.1",
- "Microsoft.DotNet.Helix.Sdk": "6.0.0-beta.21281.1",
- "Microsoft.DotNet.SharedFramework.Sdk": "6.0.0-beta.21281.1",
+ "Microsoft.DotNet.Arcade.Sdk": "6.0.0-beta.21311.3",
+ "Microsoft.DotNet.Helix.Sdk": "6.0.0-beta.21311.3",
+ "Microsoft.DotNet.SharedFramework.Sdk": "6.0.0-beta.21311.3",
"Microsoft.Build.NoTargets": "3.0.4",
"Microsoft.Build.Traversal": "3.0.23",
- "Microsoft.NET.Sdk.IL": "6.0.0-preview.6.21281.1"
+ "Microsoft.NET.Sdk.IL": "6.0.0-preview.6.21314.1"
}
}
diff --git a/src/coreclr/.nuget/coreclr-packages.proj b/src/coreclr/.nuget/coreclr-packages.proj
index cef8381866cf12..1347e06d5ff0a0 100644
--- a/src/coreclr/.nuget/coreclr-packages.proj
+++ b/src/coreclr/.nuget/coreclr-packages.proj
@@ -5,11 +5,14 @@
+
+
+
+
-
diff --git a/src/coreclr/.nuget/dotnet-ilverify/dotnet-ilverify.pkgproj b/src/coreclr/.nuget/dotnet-ilverify/dotnet-ilverify.pkgproj
index 3fcc2d5e372767..5b808c62f8b86e 100644
--- a/src/coreclr/.nuget/dotnet-ilverify/dotnet-ilverify.pkgproj
+++ b/src/coreclr/.nuget/dotnet-ilverify/dotnet-ilverify.pkgproj
@@ -13,19 +13,19 @@
- tools\netcoreapp3.1\any\DotnetToolSettings.xml
+ tools\$(NetCoreAppToolCurrent)\any\DotnetToolSettings.xml
- tools\netcoreapp3.1\any\ILVerify.dll
+ tools\$(NetCoreAppToolCurrent)\any\ILVerify.dll
- tools\netcoreapp3.1\any\System.CommandLine.dll
+ tools\$(NetCoreAppToolCurrent)\any\System.CommandLine.dll
- tools\netcoreapp3.1\any\ILVerify.deps.json
+ tools\$(NetCoreAppToolCurrent)\any\ILVerify.deps.json
- tools\netcoreapp3.1\any\ILVerify.runtimeconfig.json
+ tools\$(NetCoreAppToolCurrent)\any\ILVerify.runtimeconfig.json
diff --git a/src/coreclr/CMakeLists.txt b/src/coreclr/CMakeLists.txt
index 8f82d83fa4b735..78aa969473525d 100644
--- a/src/coreclr/CMakeLists.txt
+++ b/src/coreclr/CMakeLists.txt
@@ -34,6 +34,10 @@ endif(CORECLR_SET_RPATH)
OPTION(CLR_CMAKE_ENABLE_CODE_COVERAGE "Enable code coverage" OFF)
+if(CLR_CMAKE_HOST_ARCH_S390X)
+ add_definitions(-DBIGENDIAN)
+endif(CLR_CMAKE_HOST_ARCH_S390X)
+
#----------------------------------------------------
# Cross target Component build specific configuration
#----------------------------------------------------
diff --git a/src/coreclr/Directory.Build.props b/src/coreclr/Directory.Build.props
index 0117eb8dbe3d84..4b18a3a329a2f4 100644
--- a/src/coreclr/Directory.Build.props
+++ b/src/coreclr/Directory.Build.props
@@ -7,6 +7,8 @@
Debug
Release
Checked
+
+ false
diff --git a/src/coreclr/System.Private.CoreLib/src/ILLink/ILLink.Descriptors.Windows.xml b/src/coreclr/System.Private.CoreLib/src/ILLink/ILLink.Descriptors.Windows.xml
index 0648aed9763d13..52ab4af9bef745 100644
--- a/src/coreclr/System.Private.CoreLib/src/ILLink/ILLink.Descriptors.Windows.xml
+++ b/src/coreclr/System.Private.CoreLib/src/ILLink/ILLink.Descriptors.Windows.xml
@@ -4,5 +4,11 @@
+
+
+
+
+
+
diff --git a/src/coreclr/System.Private.CoreLib/src/System/Attribute.CoreCLR.cs b/src/coreclr/System.Private.CoreLib/src/System/Attribute.CoreCLR.cs
index bd108893d6c22f..0779c2401f01d1 100644
--- a/src/coreclr/System.Private.CoreLib/src/System/Attribute.CoreCLR.cs
+++ b/src/coreclr/System.Private.CoreLib/src/System/Attribute.CoreCLR.cs
@@ -25,24 +25,26 @@ private static Attribute[] InternalGetCustomAttributes(PropertyInfo element, Typ
if (!inherit)
return attributes;
+ // if this is an index we need to get the parameter types to help disambiguate
+ Type[] indexParamTypes = GetIndexParameterTypes(element);
+ PropertyInfo? baseProp = GetParentDefinition(element, indexParamTypes);
+ if (baseProp == null)
+ return attributes;
+
// create the hashtable that keeps track of inherited types
Dictionary types = new Dictionary(11);
// create an array list to collect all the requested attibutes
List attributeList = new List();
- CopyToArrayList(attributeList, attributes, types);
-
- // if this is an index we need to get the parameter types to help disambiguate
- Type[] indexParamTypes = GetIndexParameterTypes(element);
-
-
- PropertyInfo? baseProp = GetParentDefinition(element, indexParamTypes);
- while (baseProp != null)
+ CopyToAttributeList(attributeList, attributes, types);
+ do
{
attributes = GetCustomAttributes(baseProp, type, false);
AddAttributesToList(attributeList, attributes, types);
baseProp = GetParentDefinition(baseProp, indexParamTypes);
}
+ while (baseProp != null);
+
Attribute[] array = CreateAttributeArrayHelper(type, attributeList.Count);
attributeList.CopyTo(array, 0);
return array;
@@ -123,27 +125,33 @@ private static Attribute[] InternalGetCustomAttributes(EventInfo element, Type t
// walk up the hierarchy chain
Attribute[] attributes = (Attribute[])element.GetCustomAttributes(type, inherit);
- if (inherit)
+ if (!inherit)
{
- // create the hashtable that keeps track of inherited types
- Dictionary types = new Dictionary(11);
- // create an array list to collect all the requested attibutes
- List attributeList = new List();
- CopyToArrayList(attributeList, attributes, types);
-
- EventInfo? baseEvent = GetParentDefinition(element);
- while (baseEvent != null)
- {
- attributes = GetCustomAttributes(baseEvent, type, false);
- AddAttributesToList(attributeList, attributes, types);
- baseEvent = GetParentDefinition(baseEvent);
- }
- Attribute[] array = CreateAttributeArrayHelper(type, attributeList.Count);
- attributeList.CopyTo(array, 0);
- return array;
+ return attributes;
}
- else
+
+ EventInfo? baseEvent = GetParentDefinition(element);
+ if (baseEvent == null)
+ {
return attributes;
+ }
+
+ // create the hashtable that keeps track of inherited types
+ // create an array list to collect all the requested attibutes
+ Dictionary types = new Dictionary(11);
+ List attributeList = new List();
+ CopyToAttributeList(attributeList, attributes, types);
+ do
+ {
+ attributes = GetCustomAttributes(baseEvent, type, false);
+ AddAttributesToList(attributeList, attributes, types);
+ baseEvent = GetParentDefinition(baseEvent);
+ }
+ while (baseEvent != null);
+
+ Attribute[] array = CreateAttributeArrayHelper(type, attributeList.Count);
+ attributeList.CopyTo(array, 0);
+ return array;
}
[UnconditionalSuppressMessage("ReflectionAnalysis", "IL2075:UnrecognizedReflectionPattern",
@@ -357,7 +365,7 @@ private static bool InternalParamIsDefined(ParameterInfo param, Type type, bool
#endregion
#region Utility
- private static void CopyToArrayList(List attributeList, Attribute[] attributes, Dictionary types)
+ private static void CopyToAttributeList(List attributeList, Attribute[] attributes, Dictionary types)
{
for (int i = 0; i < attributes.Length; i++)
{
diff --git a/src/coreclr/System.Private.CoreLib/src/System/Reflection/CustomAttribute.cs b/src/coreclr/System.Private.CoreLib/src/System/Reflection/CustomAttribute.cs
index 1b825f0dc952f5..cf35de24627525 100644
--- a/src/coreclr/System.Private.CoreLib/src/System/Reflection/CustomAttribute.cs
+++ b/src/coreclr/System.Private.CoreLib/src/System/Reflection/CustomAttribute.cs
@@ -237,6 +237,10 @@ private static CustomAttributeType InitCustomAttributeType(RuntimeType parameter
private static IList GetCustomAttributes(RuntimeModule module, int tkTarget)
{
CustomAttributeRecord[] records = GetCustomAttributeRecords(module, tkTarget);
+ if (records.Length == 0)
+ {
+ return Array.Empty();
+ }
CustomAttributeData[] customAttributes = new CustomAttributeData[records.Length];
for (int i = 0; i < records.Length; i++)
diff --git a/src/coreclr/System.Private.CoreLib/src/System/Reflection/RuntimePropertyInfo.cs b/src/coreclr/System.Private.CoreLib/src/System/Reflection/RuntimePropertyInfo.cs
index b7e6a58f1fe4b4..f01ae3933d0d9c 100644
--- a/src/coreclr/System.Private.CoreLib/src/System/Reflection/RuntimePropertyInfo.cs
+++ b/src/coreclr/System.Private.CoreLib/src/System/Reflection/RuntimePropertyInfo.cs
@@ -304,9 +304,11 @@ internal ParameterInfo[] GetIndexParametersNoCopy()
// Now copy over the parameter info's and change their
// owning member info to the current property info.
- ParameterInfo[] propParams = new ParameterInfo[numParams];
+ ParameterInfo[] propParams = numParams != 0 ?
+ new ParameterInfo[numParams] :
+ Array.Empty();
- for (int i = 0; i < numParams; i++)
+ for (int i = 0; i < propParams.Length; i++)
propParams[i] = new RuntimeParameterInfo((RuntimeParameterInfo)methParams![i], this);
m_parameters = propParams;
diff --git a/src/coreclr/System.Private.CoreLib/src/System/Runtime/InteropServices/Marshal.CoreCLR.cs b/src/coreclr/System.Private.CoreLib/src/System/Runtime/InteropServices/Marshal.CoreCLR.cs
index 688e4f83908c76..e064c36746d517 100644
--- a/src/coreclr/System.Private.CoreLib/src/System/Runtime/InteropServices/Marshal.CoreCLR.cs
+++ b/src/coreclr/System.Private.CoreLib/src/System/Runtime/InteropServices/Marshal.CoreCLR.cs
@@ -464,8 +464,15 @@ public static IntPtr CreateAggregatedObject(IntPtr pOuter, T o) where T : not
///
/// Checks if the object is classic COM component.
///
- [MethodImpl(MethodImplOptions.InternalCall)]
- public static extern bool IsComObject(object o);
+ public static bool IsComObject(object o)
+ {
+ if (o is null)
+ {
+ throw new ArgumentNullException(nameof(o));
+ }
+
+ return o is __ComObject;
+ }
///
/// Release the COM component and if the reference hits 0 zombie this object.
diff --git a/src/coreclr/System.Private.CoreLib/src/System/RuntimeHandles.cs b/src/coreclr/System.Private.CoreLib/src/System/RuntimeHandles.cs
index 75aff556de7c34..dd17a2fa744184 100644
--- a/src/coreclr/System.Private.CoreLib/src/System/RuntimeHandles.cs
+++ b/src/coreclr/System.Private.CoreLib/src/System/RuntimeHandles.cs
@@ -460,8 +460,17 @@ internal RuntimeMethodHandleInternal GetInterfaceMethodImplementation(RuntimeTyp
return GetInterfaceMethodImplementation(new QCallTypeHandle(ref nativeHandle), new QCallTypeHandle(ref nativeInterfaceHandle), interfaceMethodHandle);
}
- [MethodImpl(MethodImplOptions.InternalCall)]
- internal static extern bool IsComObject(RuntimeType type, bool isGenericCOM);
+ internal static bool IsComObject(RuntimeType type, bool isGenericCOM)
+ {
+#if FEATURE_COMINTEROP
+ if (isGenericCOM)
+ return type == typeof(__ComObject);
+
+ return RuntimeTypeHandle.CanCastTo(type, (RuntimeType)typeof(__ComObject));
+#else
+ return false;
+#endif
+ }
[MethodImpl(MethodImplOptions.InternalCall)]
internal static extern bool IsInterface(RuntimeType type);
diff --git a/src/coreclr/System.Private.CoreLib/src/System/RuntimeType.CoreCLR.cs b/src/coreclr/System.Private.CoreLib/src/System/RuntimeType.CoreCLR.cs
index 1e2ece2cde5c8d..b07971e0f5600e 100644
--- a/src/coreclr/System.Private.CoreLib/src/System/RuntimeType.CoreCLR.cs
+++ b/src/coreclr/System.Private.CoreLib/src/System/RuntimeType.CoreCLR.cs
@@ -3068,6 +3068,131 @@ public override MemberInfo[] GetMember(string name, MemberTypes type, BindingFla
return compressMembers;
}
+
+ public override MemberInfo GetMemberWithSameMetadataDefinitionAs(MemberInfo member)
+ {
+ if (member is null) throw new ArgumentNullException(nameof(member));
+
+ RuntimeType? runtimeType = this;
+ while (runtimeType != null)
+ {
+ MemberInfo? result = member.MemberType switch
+ {
+ MemberTypes.Method => GetMethodWithSameMetadataDefinitionAs(runtimeType, member),
+ MemberTypes.Constructor => GetConstructorWithSameMetadataDefinitionAs(runtimeType, member),
+ MemberTypes.Property => GetPropertyWithSameMetadataDefinitionAs(runtimeType, member),
+ MemberTypes.Field => GetFieldWithSameMetadataDefinitionAs(runtimeType, member),
+ MemberTypes.Event => GetEventWithSameMetadataDefinitionAs(runtimeType, member),
+ MemberTypes.NestedType => GetNestedTypeWithSameMetadataDefinitionAs(runtimeType, member),
+ _ => null
+ };
+
+ if (result != null)
+ {
+ return result;
+ }
+
+ runtimeType = runtimeType.GetBaseType();
+ }
+
+ throw CreateGetMemberWithSameMetadataDefinitionAsNotFoundException(member);
+ }
+
+ private static MemberInfo? GetMethodWithSameMetadataDefinitionAs(RuntimeType runtimeType, MemberInfo method)
+ {
+ RuntimeMethodInfo[] cache = runtimeType.Cache.GetMethodList(MemberListType.CaseSensitive, method.Name);
+
+ for (int i = 0; i < cache.Length; i++)
+ {
+ RuntimeMethodInfo candidate = cache[i];
+ if (candidate.HasSameMetadataDefinitionAs(method))
+ {
+ return candidate;
+ }
+ }
+
+ return null;
+ }
+
+ private static MemberInfo? GetConstructorWithSameMetadataDefinitionAs(RuntimeType runtimeType, MemberInfo constructor)
+ {
+ RuntimeConstructorInfo[] cache = runtimeType.Cache.GetConstructorList(MemberListType.CaseSensitive, constructor.Name);
+
+ for (int i = 0; i < cache.Length; i++)
+ {
+ RuntimeConstructorInfo candidate = cache[i];
+ if (candidate.HasSameMetadataDefinitionAs(constructor))
+ {
+ return candidate;
+ }
+ }
+
+ return null;
+ }
+
+ private static MemberInfo? GetPropertyWithSameMetadataDefinitionAs(RuntimeType runtimeType, MemberInfo property)
+ {
+ RuntimePropertyInfo[] cache = runtimeType.Cache.GetPropertyList(MemberListType.CaseSensitive, property.Name);
+
+ for (int i = 0; i < cache.Length; i++)
+ {
+ RuntimePropertyInfo candidate = cache[i];
+ if (candidate.HasSameMetadataDefinitionAs(property))
+ {
+ return candidate;
+ }
+ }
+
+ return null;
+ }
+
+ private static MemberInfo? GetFieldWithSameMetadataDefinitionAs(RuntimeType runtimeType, MemberInfo field)
+ {
+ RuntimeFieldInfo[] cache = runtimeType.Cache.GetFieldList(MemberListType.CaseSensitive, field.Name);
+
+ for (int i = 0; i < cache.Length; i++)
+ {
+ RuntimeFieldInfo candidate = cache[i];
+ if (candidate.HasSameMetadataDefinitionAs(field))
+ {
+ return candidate;
+ }
+ }
+
+ return null;
+ }
+
+ private static MemberInfo? GetEventWithSameMetadataDefinitionAs(RuntimeType runtimeType, MemberInfo eventInfo)
+ {
+ RuntimeEventInfo[] cache = runtimeType.Cache.GetEventList(MemberListType.CaseSensitive, eventInfo.Name);
+
+ for (int i = 0; i < cache.Length; i++)
+ {
+ RuntimeEventInfo candidate = cache[i];
+ if (candidate.HasSameMetadataDefinitionAs(eventInfo))
+ {
+ return candidate;
+ }
+ }
+
+ return null;
+ }
+
+ private static MemberInfo? GetNestedTypeWithSameMetadataDefinitionAs(RuntimeType runtimeType, MemberInfo nestedType)
+ {
+ RuntimeType[] cache = runtimeType.Cache.GetNestedTypeList(MemberListType.CaseSensitive, nestedType.Name);
+
+ for (int i = 0; i < cache.Length; i++)
+ {
+ RuntimeType candidate = cache[i];
+ if (candidate.HasSameMetadataDefinitionAs(nestedType))
+ {
+ return candidate;
+ }
+ }
+
+ return null;
+ }
#endregion
#region Identity
diff --git a/src/coreclr/System.Private.CoreLib/src/System/Threading/ThreadPool.CoreCLR.cs b/src/coreclr/System.Private.CoreLib/src/System/Threading/ThreadPool.CoreCLR.cs
index 95b283e707c69e..86dd19a5a75609 100644
--- a/src/coreclr/System.Private.CoreLib/src/System/Threading/ThreadPool.CoreCLR.cs
+++ b/src/coreclr/System.Private.CoreLib/src/System/Threading/ThreadPool.CoreCLR.cs
@@ -522,6 +522,15 @@ internal static void NotifyWorkItemProgress()
[MethodImpl(MethodImplOptions.InternalCall)]
private static extern void NotifyWorkItemProgressNative();
+ internal static bool NotifyThreadBlocked() =>
+ UsePortableThreadPool && PortableThreadPool.ThreadPoolInstance.NotifyThreadBlocked();
+
+ internal static void NotifyThreadUnblocked()
+ {
+ Debug.Assert(UsePortableThreadPool);
+ PortableThreadPool.ThreadPoolInstance.NotifyThreadUnblocked();
+ }
+
internal static object? GetOrCreateThreadLocalCompletionCountObject() =>
UsePortableThreadPool ? PortableThreadPool.ThreadPoolInstance.GetOrCreateThreadLocalCompletionCountObject() : null;
diff --git a/src/coreclr/System.Private.CoreLib/src/System/Variant.cs b/src/coreclr/System.Private.CoreLib/src/System/Variant.cs
index 0ac6557579356e..8d2c55444072d4 100644
--- a/src/coreclr/System.Private.CoreLib/src/System/Variant.cs
+++ b/src/coreclr/System.Private.CoreLib/src/System/Variant.cs
@@ -198,7 +198,7 @@ public Variant(float val)
{
_objref = null;
_flags = CV_R4;
- _data = (uint)BitConverter.SingleToInt32Bits(val);
+ _data = BitConverter.SingleToUInt32Bits(val);
}
public Variant(double val)
@@ -328,7 +328,7 @@ public Variant(object? obj)
CV_U4 => (uint)_data,
CV_I8 => _data,
CV_U8 => (ulong)_data,
- CV_R4 => BitConverter.Int32BitsToSingle((int)_data),
+ CV_R4 => BitConverter.UInt32BitsToSingle((uint)_data),
CV_R8 => BitConverter.Int64BitsToDouble(_data),
CV_DATETIME => new DateTime(_data),
CV_TIMESPAN => new TimeSpan(_data),
diff --git a/src/coreclr/ToolBox/superpmi/readme.md b/src/coreclr/ToolBox/superpmi/readme.md
index d9aded28c479a5..c3de3dfe94288c 100644
--- a/src/coreclr/ToolBox/superpmi/readme.md
+++ b/src/coreclr/ToolBox/superpmi/readme.md
@@ -91,7 +91,7 @@ speed up the disk operations.
These are the general steps that are followed when doing a SuperPMI collection:
1. Collect .MC files. Set up for collection, then cause the JIT to be invoked
-by either running a scenario, running tests, crossgen compiling assemblies,
+by either running a scenario, running tests, crossgen2 compiling assemblies,
or using PMI to force the JIT to compile functions in an assembly.
During collection, the data for each JIT compiled function is stored
in a uniquely named file with a ".MC" filename extension (for "method context").
diff --git a/src/coreclr/ToolBox/superpmi/superpmi-shared/agnostic.h b/src/coreclr/ToolBox/superpmi/superpmi-shared/agnostic.h
index 4b47339b352f64..e51eb3ef6d049b 100644
--- a/src/coreclr/ToolBox/superpmi/superpmi-shared/agnostic.h
+++ b/src/coreclr/ToolBox/superpmi/superpmi-shared/agnostic.h
@@ -557,18 +557,22 @@ struct Agnostic_GetSystemVAmd64PassStructInRegisterDescriptor
struct Agnostic_ResolveVirtualMethodKey
{
- DWORDLONG virtualMethod;
- DWORDLONG objClass;
- DWORDLONG context;
+ DWORDLONG virtualMethod;
+ DWORDLONG objClass;
+ DWORDLONG context;
+ DWORD pResolvedTokenVirtualMethodNonNull;
+ Agnostic_CORINFO_RESOLVED_TOKEN pResolvedTokenVirtualMethod;
};
struct Agnostic_ResolveVirtualMethodResult
{
- bool returnValue;
- DWORDLONG devirtualizedMethod;
- bool requiresInstMethodTableArg;
- DWORDLONG exactContext;
- DWORD detail;
+ bool returnValue;
+ DWORDLONG devirtualizedMethod;
+ bool requiresInstMethodTableArg;
+ DWORDLONG exactContext;
+ DWORD detail;
+ Agnostic_CORINFO_RESOLVED_TOKEN resolvedTokenDevirtualizedMethod;
+ Agnostic_CORINFO_RESOLVED_TOKEN resolvedTokenDevirtualizedUnboxedMethod;
};
struct ResolveTokenValue
diff --git a/src/coreclr/ToolBox/superpmi/superpmi-shared/compileresult.cpp b/src/coreclr/ToolBox/superpmi/superpmi-shared/compileresult.cpp
index 94ac3a50a36923..729064aa1b0bc8 100644
--- a/src/coreclr/ToolBox/superpmi/superpmi-shared/compileresult.cpp
+++ b/src/coreclr/ToolBox/superpmi/superpmi-shared/compileresult.cpp
@@ -1096,7 +1096,10 @@ void CompileResult::repRecordCallSite(ULONG instrOffset, CORINFO_SIG_INFO* callS
// The most call site records have only `methodHandle`, so creating two separate maps give us better perfomance
// and smaller memory consumption. Note: we are not reading values from these maps during a normal replay.
RecordCallSiteWithSignature = new LightWeightMap();
- RecordCallSiteWithoutSignature = new LightWeightMap();
+ if (recordCallSitesWithoutSig)
+ {
+ RecordCallSiteWithoutSignature = new LightWeightMap();
+ }
}
if (callSig != nullptr)
@@ -1107,7 +1110,7 @@ void CompileResult::repRecordCallSite(ULONG instrOffset, CORINFO_SIG_INFO* callS
value.methodHandle = CastHandle(methodHandle);
RecordCallSiteWithSignature->Add(instrOffset, value);
}
- else
+ else if (recordCallSitesWithoutSig)
{
RecordCallSiteWithoutSignature->Add(instrOffset, CastHandle(methodHandle));
}
diff --git a/src/coreclr/ToolBox/superpmi/superpmi-shared/compileresult.h b/src/coreclr/ToolBox/superpmi/superpmi-shared/compileresult.h
index 47dbc0a07ba0e7..4c912d10b5a38d 100644
--- a/src/coreclr/ToolBox/superpmi/superpmi-shared/compileresult.h
+++ b/src/coreclr/ToolBox/superpmi/superpmi-shared/compileresult.h
@@ -211,5 +211,7 @@ class CompileResult
MemoryTracker* memoryTracker;
Capture_AllocMemDetails allocMemDets;
allocGCInfoDetails allocGCInfoDets;
+
+ const bool recordCallSitesWithoutSig = false; // Set it to true if you want to use CallUtils::GetRecordedCallSiteInfo.
};
#endif
diff --git a/src/coreclr/ToolBox/superpmi/superpmi-shared/lightweightmap.h b/src/coreclr/ToolBox/superpmi/superpmi-shared/lightweightmap.h
index ed06e9fac4d59e..e696f469d17dfa 100644
--- a/src/coreclr/ToolBox/superpmi/superpmi-shared/lightweightmap.h
+++ b/src/coreclr/ToolBox/superpmi/superpmi-shared/lightweightmap.h
@@ -319,7 +319,7 @@ class LightWeightMap : public LightWeightMapBuffer
// If we have RTTI, we can make this assert report the correct type. No RTTI, though, when
// built with .NET Core, especially when built against the PAL.
- AssertCodeMsg((ptr - bytes) == size, EXCEPTIONCODE_LWM, "%s - Ended with unexpected sizes %p != %x",
+ AssertCodeMsg(ptr == (bytes + size), EXCEPTIONCODE_LWM, "%s - Ended with unexpected sizes %p != %x",
"Unknown type" /*typeid(_Item).name()*/, (void*)(ptr - bytes), size);
return size;
}
@@ -385,11 +385,9 @@ class LightWeightMap : public LightWeightMapBuffer
if (numItems > 0)
{
- for (unsigned int i = numItems; i > insert; i--)
- {
- pKeys[i] = pKeys[i - 1];
- pItems[i] = pItems[i - 1];
- }
+ int countToMove = (numItems - insert);
+ memmove(&pKeys[insert+1], &pKeys[insert], countToMove * sizeof(pKeys[insert]));
+ memmove(&pItems[insert+1], &pItems[insert], countToMove * sizeof(pItems[insert]));
}
pKeys[insert] = key;
@@ -658,7 +656,7 @@ class DenseLightWeightMap : public LightWeightMapBuffer
ptr += bufferLength * sizeof(unsigned char);
}
- AssertCodeMsg((ptr - bytes) == size, EXCEPTIONCODE_LWM, "Ended with unexpected sizes %Ix != %x", ptr - bytes,
+ AssertCodeMsg(ptr == (bytes + size), EXCEPTIONCODE_LWM, "Ended with unexpected sizes %Ix != %x", ptr - bytes,
size);
return size;
}
diff --git a/src/coreclr/ToolBox/superpmi/superpmi-shared/methodcontext.cpp b/src/coreclr/ToolBox/superpmi/superpmi-shared/methodcontext.cpp
index 52739c2f8547e2..7d7824e2208b30 100644
--- a/src/coreclr/ToolBox/superpmi/superpmi-shared/methodcontext.cpp
+++ b/src/coreclr/ToolBox/superpmi/superpmi-shared/methodcontext.cpp
@@ -3164,23 +3164,53 @@ void MethodContext::recResolveVirtualMethod(CORINFO_DEVIRTUALIZATION_INFO * info
}
Agnostic_ResolveVirtualMethodKey key;
+ ZeroMemory(&key, sizeof(key)); // Zero token including any struct padding
key.virtualMethod = CastHandle(info->virtualMethod);
key.objClass = CastHandle(info->objClass);
key.context = CastHandle(info->context);
+
+ key.pResolvedTokenVirtualMethodNonNull = info->pResolvedTokenVirtualMethod != NULL ? 1 : 0;
+ if (key.pResolvedTokenVirtualMethodNonNull)
+ key.pResolvedTokenVirtualMethod = SpmiRecordsHelper::StoreAgnostic_CORINFO_RESOLVED_TOKEN(info->pResolvedTokenVirtualMethod, ResolveToken);
+
Agnostic_ResolveVirtualMethodResult result;
- result.returnValue = returnValue;
- result.devirtualizedMethod = CastHandle(info->devirtualizedMethod);
+ result.returnValue = returnValue;
+ result.devirtualizedMethod = CastHandle(info->devirtualizedMethod);
result.requiresInstMethodTableArg = info->requiresInstMethodTableArg;
- result.exactContext = CastHandle(info->exactContext);
- result.detail = (DWORD) info->detail;
+ result.exactContext = CastHandle(info->exactContext);
+ result.detail = (DWORD) info->detail;
+
+ if (returnValue)
+ {
+ result.resolvedTokenDevirtualizedMethod = SpmiRecordsHelper::StoreAgnostic_CORINFO_RESOLVED_TOKEN(&info->resolvedTokenDevirtualizedMethod, ResolveToken);
+ result.resolvedTokenDevirtualizedUnboxedMethod = SpmiRecordsHelper::StoreAgnostic_CORINFO_RESOLVED_TOKEN(&info->resolvedTokenDevirtualizedUnboxedMethod, ResolveToken);
+ }
+ else
+ {
+ ZeroMemory(&result.resolvedTokenDevirtualizedMethod, sizeof(result.resolvedTokenDevirtualizedMethod));
+ ZeroMemory(&result.resolvedTokenDevirtualizedUnboxedMethod, sizeof(result.resolvedTokenDevirtualizedUnboxedMethod));
+ }
+
ResolveVirtualMethod->Add(key, result);
DEBUG_REC(dmpResolveVirtualMethod(key, result));
}
void MethodContext::dmpResolveVirtualMethod(const Agnostic_ResolveVirtualMethodKey& key, const Agnostic_ResolveVirtualMethodResult& result)
{
- printf("ResolveVirtualMethod virtMethod-%016llX, objClass-%016llX, context-%016llX :: returnValue-%d, devirtMethod-%016llX, requiresInstArg-%d, exactContext-%016llX, detail-%d",
- key.virtualMethod, key.objClass, key.context, result.returnValue, result.devirtualizedMethod, result.requiresInstMethodTableArg, result.exactContext, result.detail);
+ printf("ResolveVirtualMethod key virtMethod-%016llX, objClass-%016llX, context-%016llX pResolvedTokenVirtualMethodNonNull-%08X pResolvedTokenVirtualMethod{%s}",
+ key.virtualMethod,
+ key.objClass,
+ key.context,
+ key.pResolvedTokenVirtualMethodNonNull,
+ key.pResolvedTokenVirtualMethodNonNull ? SpmiDumpHelper::DumpAgnostic_CORINFO_RESOLVED_TOKEN(key.pResolvedTokenVirtualMethod).c_str() : "???");
+ printf(", value returnValue-%s, devirtMethod-%016llX, requiresInstArg-%s, exactContext-%016llX, detail-%d, tokDvMeth{%s}, tokDvUnboxMeth{%s}",
+ result.returnValue ? "true" : "false",
+ result.devirtualizedMethod,
+ result.requiresInstMethodTableArg ? "true" : "false",
+ result.exactContext,
+ result.detail,
+ result.returnValue ? SpmiDumpHelper::DumpAgnostic_CORINFO_RESOLVED_TOKEN(result.resolvedTokenDevirtualizedMethod).c_str() : "???",
+ result.returnValue ? SpmiDumpHelper::DumpAgnostic_CORINFO_RESOLVED_TOKEN(result.resolvedTokenDevirtualizedUnboxedMethod).c_str() : "???");
}
bool MethodContext::repResolveVirtualMethod(CORINFO_DEVIRTUALIZATION_INFO * info)
@@ -3191,7 +3221,11 @@ bool MethodContext::repResolveVirtualMethod(CORINFO_DEVIRTUALIZATION_INFO * info
key.objClass = CastHandle(info->objClass);
key.context = CastHandle(info->context);
- AssertMapAndKeyExist(ResolveVirtualMethod, key, ": %016llX-%016llX-%016llX", key.virtualMethod, key.objClass, key.context);
+ key.pResolvedTokenVirtualMethodNonNull = info->pResolvedTokenVirtualMethod != NULL ? 1 : 0;
+ if (key.pResolvedTokenVirtualMethodNonNull)
+ key.pResolvedTokenVirtualMethod = SpmiRecordsHelper::StoreAgnostic_CORINFO_RESOLVED_TOKEN(info->pResolvedTokenVirtualMethod, ResolveToken);
+
+ AssertMapAndKeyExist(ResolveVirtualMethod, key, ": %016llX-%016llX-%016llX-%08X", key.virtualMethod, key.objClass, key.context, key.pResolvedTokenVirtualMethodNonNull);
Agnostic_ResolveVirtualMethodResult result = ResolveVirtualMethod->Get(key);
DEBUG_REP(dmpResolveVirtualMethod(key, result));
@@ -3200,6 +3234,11 @@ bool MethodContext::repResolveVirtualMethod(CORINFO_DEVIRTUALIZATION_INFO * info
info->requiresInstMethodTableArg = result.requiresInstMethodTableArg;
info->exactContext = (CORINFO_CONTEXT_HANDLE) result.exactContext;
info->detail = (CORINFO_DEVIRTUALIZATION_DETAIL) result.detail;
+ if (result.returnValue)
+ {
+ info->resolvedTokenDevirtualizedMethod = SpmiRecordsHelper::Restore_CORINFO_RESOLVED_TOKEN(&result.resolvedTokenDevirtualizedMethod, ResolveToken);
+ info->resolvedTokenDevirtualizedUnboxedMethod = SpmiRecordsHelper::Restore_CORINFO_RESOLVED_TOKEN(&result.resolvedTokenDevirtualizedUnboxedMethod, ResolveToken);
+ }
return result.returnValue;
}
diff --git a/src/coreclr/ToolBox/superpmi/superpmi-shared/spmirecordhelper.h b/src/coreclr/ToolBox/superpmi/superpmi-shared/spmirecordhelper.h
index 0caf8bbf597660..d5316b94db17bb 100644
--- a/src/coreclr/ToolBox/superpmi/superpmi-shared/spmirecordhelper.h
+++ b/src/coreclr/ToolBox/superpmi/superpmi-shared/spmirecordhelper.h
@@ -35,6 +35,10 @@ class SpmiRecordsHelper
static Agnostic_CORINFO_RESOLVED_TOKEN RestoreAgnostic_CORINFO_RESOLVED_TOKEN(
CORINFO_RESOLVED_TOKEN* pResolvedToken, LightWeightMap* buffers);
+ template
+ static CORINFO_RESOLVED_TOKEN Restore_CORINFO_RESOLVED_TOKEN(
+ Agnostic_CORINFO_RESOLVED_TOKEN* pResolvedTokenAgnostic, LightWeightMap* buffers);
+
// Restore the out values in the first argument from the second.
// Can't just return whole CORINFO_RESOLVED_TOKEN because [in] values in it are important too.
template
@@ -200,6 +204,22 @@ inline Agnostic_CORINFO_RESOLVED_TOKEN SpmiRecordsHelper::RestoreAgnostic_CORINF
return token;
}
+template
+inline CORINFO_RESOLVED_TOKEN SpmiRecordsHelper::Restore_CORINFO_RESOLVED_TOKEN(
+ Agnostic_CORINFO_RESOLVED_TOKEN* pResolvedTokenAgnostic, LightWeightMap* buffers)
+{
+ CORINFO_RESOLVED_TOKEN token;
+ ZeroMemory(&token, sizeof(token));
+
+ token.tokenContext = (CORINFO_CONTEXT_HANDLE)pResolvedTokenAgnostic->inValue.tokenContext;
+ token.tokenScope = (CORINFO_MODULE_HANDLE)pResolvedTokenAgnostic->inValue.tokenScope;
+ token.token = (mdToken)pResolvedTokenAgnostic->inValue.token;
+ token.tokenType = (CorInfoTokenKind)pResolvedTokenAgnostic->inValue.tokenType;
+
+ Restore_CORINFO_RESOLVED_TOKENout(&token, pResolvedTokenAgnostic->outValue, buffers);
+ return token;
+}
+
template
inline void SpmiRecordsHelper::Restore_CORINFO_RESOLVED_TOKENout(
CORINFO_RESOLVED_TOKEN* pResolvedToken,
diff --git a/src/coreclr/ToolBox/superpmi/superpmi-shim-collector/icorjitinfo.cpp b/src/coreclr/ToolBox/superpmi/superpmi-shim-collector/icorjitinfo.cpp
index cd601a26213339..de2c555c20a4c6 100644
--- a/src/coreclr/ToolBox/superpmi/superpmi-shim-collector/icorjitinfo.cpp
+++ b/src/coreclr/ToolBox/superpmi/superpmi-shim-collector/icorjitinfo.cpp
@@ -2115,7 +2115,7 @@ uint16_t interceptor_ICJI::getRelocTypeHint(void* target)
// For what machine does the VM expect the JIT to generate code? The VM
// returns one of the IMAGE_FILE_MACHINE_* values. Note that if the VM
-// is cross-compiling (such as the case for crossgen), it will return a
+// is cross-compiling (such as the case for crossgen2), it will return a
// different value than if it was compiling for the host architecture.
//
uint32_t interceptor_ICJI::getExpectedTargetArchitecture()
diff --git a/src/coreclr/ToolBox/superpmi/superpmi-shim-collector/superpmi-shim-collector.cpp b/src/coreclr/ToolBox/superpmi/superpmi-shim-collector/superpmi-shim-collector.cpp
index f1f61fcc672096..9cd22007107534 100644
--- a/src/coreclr/ToolBox/superpmi/superpmi-shim-collector/superpmi-shim-collector.cpp
+++ b/src/coreclr/ToolBox/superpmi/superpmi-shim-collector/superpmi-shim-collector.cpp
@@ -130,7 +130,7 @@ extern "C"
return TRUE;
}
-extern "C" DLLEXPORT void __stdcall jitStartup(ICorJitHost* host)
+extern "C" DLLEXPORT void jitStartup(ICorJitHost* host)
{
// crossgen2 doesn't invoke DllMain on Linux/Mac (under PAL), so optionally do initialization work here.
InitializeShim();
@@ -157,7 +157,7 @@ extern "C" DLLEXPORT void __stdcall jitStartup(ICorJitHost* host)
pnjitStartup(g_ourJitHost);
}
-extern "C" DLLEXPORT ICorJitCompiler* __stdcall getJit()
+extern "C" DLLEXPORT ICorJitCompiler* getJit()
{
DWORD dwRetVal = 0;
PgetJit pngetJit;
diff --git a/src/coreclr/ToolBox/superpmi/superpmi-shim-counter/superpmi-shim-counter.cpp b/src/coreclr/ToolBox/superpmi/superpmi-shim-counter/superpmi-shim-counter.cpp
index af0ceac75d269f..b2ad7f6754a53a 100644
--- a/src/coreclr/ToolBox/superpmi/superpmi-shim-counter/superpmi-shim-counter.cpp
+++ b/src/coreclr/ToolBox/superpmi/superpmi-shim-counter/superpmi-shim-counter.cpp
@@ -108,7 +108,7 @@ extern "C"
return TRUE;
}
-extern "C" DLLEXPORT void __stdcall jitStartup(ICorJitHost* host)
+extern "C" DLLEXPORT void jitStartup(ICorJitHost* host)
{
SetDefaultPaths();
SetLibName();
@@ -141,7 +141,7 @@ extern "C" DLLEXPORT void __stdcall jitStartup(ICorJitHost* host)
pnjitStartup(g_ourJitHost);
}
-extern "C" DLLEXPORT ICorJitCompiler* __stdcall getJit()
+extern "C" DLLEXPORT ICorJitCompiler* getJit()
{
DWORD dwRetVal = 0;
PgetJit pngetJit;
diff --git a/src/coreclr/ToolBox/superpmi/superpmi-shim-simple/superpmi-shim-simple.cpp b/src/coreclr/ToolBox/superpmi/superpmi-shim-simple/superpmi-shim-simple.cpp
index ac720a16983b0e..c6c24afb089334 100644
--- a/src/coreclr/ToolBox/superpmi/superpmi-shim-simple/superpmi-shim-simple.cpp
+++ b/src/coreclr/ToolBox/superpmi/superpmi-shim-simple/superpmi-shim-simple.cpp
@@ -93,7 +93,7 @@ extern "C"
return TRUE;
}
-extern "C" DLLEXPORT void __stdcall jitStartup(ICorJitHost* host)
+extern "C" DLLEXPORT void jitStartup(ICorJitHost* host)
{
SetDefaultPaths();
SetLibName();
@@ -117,7 +117,7 @@ extern "C" DLLEXPORT void __stdcall jitStartup(ICorJitHost* host)
pnjitStartup(g_ourJitHost);
}
-extern "C" DLLEXPORT ICorJitCompiler* __stdcall getJit()
+extern "C" DLLEXPORT ICorJitCompiler* getJit()
{
DWORD dwRetVal = 0;
PgetJit pngetJit;
diff --git a/src/coreclr/ToolBox/superpmi/superpmi/icorjitinfo.cpp b/src/coreclr/ToolBox/superpmi/superpmi/icorjitinfo.cpp
index f1eb73b8915c96..91bf8bf2f779ff 100644
--- a/src/coreclr/ToolBox/superpmi/superpmi/icorjitinfo.cpp
+++ b/src/coreclr/ToolBox/superpmi/superpmi/icorjitinfo.cpp
@@ -1859,7 +1859,7 @@ uint16_t MyICJI::getRelocTypeHint(void* target)
// For what machine does the VM expect the JIT to generate code? The VM
// returns one of the IMAGE_FILE_MACHINE_* values. Note that if the VM
-// is cross-compiling (such as the case for crossgen), it will return a
+// is cross-compiling (such as the case for crossgen2), it will return a
// different value than if it was compiling for the host architecture.
//
uint32_t MyICJI::getExpectedTargetArchitecture()
diff --git a/src/coreclr/classlibnative/bcltype/objectnative.cpp b/src/coreclr/classlibnative/bcltype/objectnative.cpp
index 80023e27a9d817..5ecbdc3b0c3742 100644
--- a/src/coreclr/classlibnative/bcltype/objectnative.cpp
+++ b/src/coreclr/classlibnative/bcltype/objectnative.cpp
@@ -163,8 +163,8 @@ FCIMPL2(FC_BOOL_RET, ObjectNative::Equals, Object *pThisRef, Object *pCompareRef
FC_RETURN_BOOL(FALSE);
// Compare the contents (size - vtable - sync block index).
- DWORD dwBaseSize = pThisRef->GetMethodTable()->GetBaseSize();
- if(pThisRef->GetMethodTable() == g_pStringClass)
+ DWORD dwBaseSize = pThisMT->GetBaseSize();
+ if(pThisMT == g_pStringClass)
dwBaseSize -= sizeof(WCHAR);
BOOL ret = memcmp(
(void *) (pThisRef+1),
diff --git a/src/coreclr/clrdefinitions.cmake b/src/coreclr/clrdefinitions.cmake
index 425d3d84d6a13f..eeb421cac4c2f9 100644
--- a/src/coreclr/clrdefinitions.cmake
+++ b/src/coreclr/clrdefinitions.cmake
@@ -57,12 +57,14 @@ if(CLR_CMAKE_HOST_WIN32)
add_definitions(-D_CRT_SECURE_NO_WARNINGS)
endif(CLR_CMAKE_HOST_WIN32)
-add_compile_definitions($<$>>:EnC_SUPPORTED>)
-if(CLR_CMAKE_TARGET_ARCH_AMD64 OR CLR_CMAKE_TARGET_ARCH_I386)
+if (NOT (CLR_CMAKE_TARGET_ARCH_I386 AND CLR_CMAKE_TARGET_UNIX))
+ add_compile_definitions($<$>>:EnC_SUPPORTED>)
+endif()
+if(CLR_CMAKE_TARGET_ARCH_AMD64 OR (CLR_CMAKE_TARGET_ARCH_I386 AND CLR_CMAKE_TARGET_WIN32))
if(CLR_CMAKE_TARGET_WIN32)
add_compile_definitions($<$>>:FEATURE_ENC_SUPPORTED>)
endif(CLR_CMAKE_TARGET_WIN32)
-endif(CLR_CMAKE_TARGET_ARCH_AMD64 OR CLR_CMAKE_TARGET_ARCH_I386)
+endif(CLR_CMAKE_TARGET_ARCH_AMD64 OR (CLR_CMAKE_TARGET_ARCH_I386 AND CLR_CMAKE_TARGET_WIN32))
# Features - please keep them alphabetically sorted
if(CLR_CMAKE_TARGET_WIN32)
diff --git a/src/coreclr/crosscomponents.cmake b/src/coreclr/crosscomponents.cmake
index b779f461aa9c04..864bdf9e722184 100644
--- a/src/coreclr/crosscomponents.cmake
+++ b/src/coreclr/crosscomponents.cmake
@@ -13,14 +13,6 @@ if (CLR_CMAKE_HOST_OS STREQUAL CLR_CMAKE_TARGET_OS)
COMPONENT crosscomponents
)
- if(CLR_CMAKE_HOST_LINUX OR NOT FEATURE_CROSSBITNESS)
- install_clr (TARGETS
- crossgen
- DESTINATIONS . sharedFramework
- COMPONENT crosscomponents
- )
- endif()
-
if(CLR_CMAKE_TARGET_OSX AND ARCH_TARGET_NAME STREQUAL arm64)
install_clr (TARGETS
clrjit_unix_osx_${ARCH_TARGET_NAME}_${ARCH_HOST_NAME}
diff --git a/src/coreclr/crossgen-corelib.proj b/src/coreclr/crossgen-corelib.proj
index 8ffbaae33220a3..6d675a2a59e247 100644
--- a/src/coreclr/crossgen-corelib.proj
+++ b/src/coreclr/crossgen-corelib.proj
@@ -96,8 +96,10 @@
$(CrossGenDllCmd) -o:$(CoreLibOutputPath)
$(CrossGenDllCmd) -r:$([MSBuild]::NormalizePath('$(BinDir)', 'IL', '*.dll'))
$(CrossGenDllCmd) --targetarch:$(TargetArchitecture)
- $(CrossGenDllCmd) -m:$(MergedMibcPath) --embed-pgo-data
+ @(OptimizationMibcFiles->'-m:$(MergedMibcPath)', ' ')
+ $(CrossGenDllCmd) $(MibcArgs) --embed-pgo-data
$(CrossGenDllCmd) -O
+ $(CrossGenDllCmd) --verify-type-and-field-layout
$(CrossGenDllCmd) $(CoreLibInputPath)
diff --git a/src/libraries/Microsoft.Extensions.HostFactoryResolver/tests/MockHostTypes/IHostBuilder.cs b/src/coreclr/debug/daccess/s390x/primitives.cpp
similarity index 58%
rename from src/libraries/Microsoft.Extensions.HostFactoryResolver/tests/MockHostTypes/IHostBuilder.cs
rename to src/coreclr/debug/daccess/s390x/primitives.cpp
index ef73665296d748..06d7ad1d084566 100644
--- a/src/libraries/Microsoft.Extensions.HostFactoryResolver/tests/MockHostTypes/IHostBuilder.cs
+++ b/src/coreclr/debug/daccess/s390x/primitives.cpp
@@ -1,10 +1,8 @@
// Licensed to the .NET Foundation under one or more agreements.
// The .NET Foundation licenses this file to you under the MIT license.
-namespace MockHostTypes
-{
- public interface IHostBuilder
- {
- IHost Build();
- }
-}
+//
+
+#include "stdafx.h"
+
+#include "../../shared/s390x/primitives.cpp"
diff --git a/src/coreclr/debug/di/CMakeLists.txt b/src/coreclr/debug/di/CMakeLists.txt
index 55f34bd1ce3e27..b0272c4d2518d8 100644
--- a/src/coreclr/debug/di/CMakeLists.txt
+++ b/src/coreclr/debug/di/CMakeLists.txt
@@ -1,4 +1,5 @@
add_definitions(
+ -DDBI_COMPILE
-DFEATURE_METADATA_CUSTOM_DATA_SOURCE
-DFEATURE_METADATA_DEBUGGEE_DATA_SOURCE
-DFEATURE_NO_HOST
diff --git a/src/coreclr/debug/ee/controller.cpp b/src/coreclr/debug/ee/controller.cpp
index 7f7fb109b402cd..eed3c453e78e2b 100644
--- a/src/coreclr/debug/ee/controller.cpp
+++ b/src/coreclr/debug/ee/controller.cpp
@@ -83,7 +83,11 @@ SharedPatchBypassBuffer* DebuggerControllerPatch::GetOrCreateSharedPatchBypassBu
if (m_pSharedPatchBypassBuffer == NULL)
{
- m_pSharedPatchBypassBuffer = new (interopsafeEXEC) SharedPatchBypassBuffer();
+ void *pSharedPatchBypassBufferRX = g_pDebugger->GetInteropSafeExecutableHeap()->Alloc(sizeof(SharedPatchBypassBuffer));
+ ExecutableWriterHolder sharedPatchBypassBufferWriterHolder((SharedPatchBypassBuffer*)pSharedPatchBypassBufferRX, sizeof(SharedPatchBypassBuffer));
+ new (sharedPatchBypassBufferWriterHolder.GetRW()) SharedPatchBypassBuffer();
+ m_pSharedPatchBypassBuffer = (SharedPatchBypassBuffer*)pSharedPatchBypassBufferRX;
+
_ASSERTE(m_pSharedPatchBypassBuffer);
TRACE_ALLOC(m_pSharedPatchBypassBuffer);
}
@@ -1364,9 +1368,7 @@ bool DebuggerController::ApplyPatch(DebuggerControllerPatch *patch)
LPVOID baseAddress = (LPVOID)(patch->address);
-#if defined(HOST_OSX) && defined(HOST_ARM64)
- auto jitWriteEnableHolder = PAL_JITWriteEnable(true);
-#else // defined(HOST_OSX) && defined(HOST_ARM64)
+#if !defined(HOST_OSX) || !defined(HOST_ARM64)
DWORD oldProt;
if (!VirtualProtect(baseAddress,
@@ -1376,7 +1378,7 @@ bool DebuggerController::ApplyPatch(DebuggerControllerPatch *patch)
_ASSERTE(!"VirtualProtect of code page failed");
return false;
}
-#endif // defined(HOST_OSX) && defined(HOST_ARM64)
+#endif // !defined(HOST_OSX) || !defined(HOST_ARM64)
patch->opcode = CORDbgGetInstruction(patch->address);
@@ -1391,7 +1393,7 @@ bool DebuggerController::ApplyPatch(DebuggerControllerPatch *patch)
_ASSERTE(!"VirtualProtect of code page failed");
return false;
}
-#endif // !defined(HOST_OSX) || !defined(HOST_ARM64)
+#endif // !defined(HOST_OSX) || !defined(HOST_ARM64)
}
// TODO: : determine if this is needed for AMD64
#if defined(TARGET_X86) //REVISIT_TODO what is this?!
@@ -1408,12 +1410,14 @@ bool DebuggerController::ApplyPatch(DebuggerControllerPatch *patch)
_ASSERTE(!"VirtualProtect of code page failed");
return false;
}
+
patch->opcode =
(unsigned int) *(unsigned short*)(patch->address+1);
_ASSERTE(patch->opcode != CEE_BREAK);
- *(unsigned short *) (patch->address+1) = CEE_BREAK;
+ ExecutableWriterHolder breakpointWriterHolder((BYTE*)patch->address, 2);
+ *(unsigned short *) (breakpointWriterHolder.GetRW()+1) = CEE_BREAK;
if (!VirtualProtect((void *) patch->address, 2, oldProt, &oldProt))
{
@@ -1460,9 +1464,7 @@ bool DebuggerController::UnapplyPatch(DebuggerControllerPatch *patch)
LPVOID baseAddress = (LPVOID)(patch->address);
-#if defined(HOST_OSX) && defined(HOST_ARM64)
- auto jitWriteEnableHolder = PAL_JITWriteEnable(true);
-#else // defined(HOST_OSX) && defined(HOST_ARM64)
+#if !defined(HOST_OSX) || !defined(HOST_ARM64)
DWORD oldProt;
if (!VirtualProtect(baseAddress,
@@ -1477,7 +1479,7 @@ bool DebuggerController::UnapplyPatch(DebuggerControllerPatch *patch)
InitializePRD(&(patch->opcode));
return false;
}
-#endif // defined(HOST_OSX) && defined(HOST_ARM64)
+#endif // !defined(HOST_OSX) || !defined(HOST_ARM64)
CORDbgSetInstruction((CORDB_ADDRESS_TYPE *)patch->address, patch->opcode);
@@ -1494,7 +1496,7 @@ bool DebuggerController::UnapplyPatch(DebuggerControllerPatch *patch)
_ASSERTE(!"VirtualProtect of code page failed");
return false;
}
-#endif // !defined(HOST_OSX) || !defined(HOST_ARM64)
+#endif // !defined(HOST_OSX) || !defined(HOST_ARM64)
}
else
{
@@ -1519,7 +1521,8 @@ bool DebuggerController::UnapplyPatch(DebuggerControllerPatch *patch)
#if defined(TARGET_X86)
_ASSERTE(*(unsigned short*)(patch->address+1) == CEE_BREAK);
- *(unsigned short *) (patch->address+1)
+ ExecutableWriterHolder breakpointWriterHolder((BYTE*)patch->address, 2);
+ *(unsigned short *) (breakpointWriterHolder.GetRW()+1)
= (unsigned short) patch->opcode;
#endif //this makes no sense on anything but X86
//VERY IMPORTANT to zero out opcode, else we might mistake
@@ -4409,8 +4412,9 @@ DebuggerPatchSkip::DebuggerPatchSkip(Thread *thread,
}
else
{
+ _ASSERTE(m_instrAttrib.m_cOperandSize <= SharedPatchBypassBuffer::cbBufferBypass);
// Copy the data into our buffer.
- memcpy(bufferBypass, patch->address + m_instrAttrib.m_cbInstr + dwOldDisp, SharedPatchBypassBuffer::cbBufferBypass);
+ memcpy(bufferBypass, patch->address + m_instrAttrib.m_cbInstr + dwOldDisp, m_instrAttrib.m_cOperandSize);
if (m_instrAttrib.m_fIsWrite)
{
diff --git a/src/coreclr/debug/ee/controller.h b/src/coreclr/debug/ee/controller.h
index 9bcfc8682f7b2c..9fe0a67e3ea28f 100644
--- a/src/coreclr/debug/ee/controller.h
+++ b/src/coreclr/debug/ee/controller.h
@@ -288,7 +288,7 @@ class SharedPatchBypassBuffer
// "PatchBypass" must be the first field of this class for alignment to be correct.
BYTE PatchBypass[MAX_INSTRUCTION_LENGTH];
#if defined(TARGET_AMD64)
- const static int cbBufferBypass = 0x10;
+ const static int cbBufferBypass = 0x20;
BYTE BypassBuffer[cbBufferBypass];
UINT_PTR RipTargetFixup;
diff --git a/src/coreclr/debug/ee/debugger.cpp b/src/coreclr/debug/ee/debugger.cpp
index d8791362ca7200..4706790dd3d7f8 100644
--- a/src/coreclr/debug/ee/debugger.cpp
+++ b/src/coreclr/debug/ee/debugger.cpp
@@ -67,7 +67,6 @@ bool g_EnableSIS = false;
// The following instances are used for invoking overloaded new/delete
InteropSafe interopsafe;
-InteropSafeExecutable interopsafeEXEC;
#ifndef DACCESS_COMPILE
@@ -1316,16 +1315,15 @@ DebuggerEval::DebuggerEval(CONTEXT * pContext, DebuggerIPCE_FuncEvalInfo * pEval
{
WRAPPER_NO_CONTRACT;
-#if defined(HOST_OSX) && defined(HOST_ARM64)
- auto jitWriteEnableHolder = PAL_JITWriteEnable(true);
-#endif // defined(HOST_OSX) && defined(HOST_ARM64)
-
// Allocate the breakpoint instruction info in executable memory.
- m_bpInfoSegment = new (interopsafeEXEC, nothrow) DebuggerEvalBreakpointInfoSegment(this);
+ void *bpInfoSegmentRX = g_pDebugger->GetInteropSafeExecutableHeap()->Alloc(sizeof(DebuggerEvalBreakpointInfoSegment));
+ ExecutableWriterHolder bpInfoSegmentWriterHolder((DebuggerEvalBreakpointInfoSegment*)bpInfoSegmentRX, sizeof(DebuggerEvalBreakpointInfoSegment));
+ new (bpInfoSegmentWriterHolder.GetRW()) DebuggerEvalBreakpointInfoSegment(this);
+ m_bpInfoSegment = (DebuggerEvalBreakpointInfoSegment*)bpInfoSegmentRX;
// This must be non-zero so that the saved opcode is non-zero, and on IA64 we want it to be 0x16
// so that we can have a breakpoint instruction in any slot in the bundle.
- m_bpInfoSegment->m_breakpointInstruction[0] = 0x16;
+ bpInfoSegmentWriterHolder.GetRW()->m_breakpointInstruction[0] = 0x16;
#if defined(TARGET_ARM)
USHORT *bp = (USHORT*)&m_bpInfoSegment->m_breakpointInstruction;
*bp = CORDbg_BREAK_INSTRUCTION;
diff --git a/src/coreclr/debug/ee/debugger.h b/src/coreclr/debug/ee/debugger.h
index d66198607e5c1a..9d80fb3eec77eb 100644
--- a/src/coreclr/debug/ee/debugger.h
+++ b/src/coreclr/debug/ee/debugger.h
@@ -1104,11 +1104,8 @@ struct DECLSPEC_ALIGN(4096) DebuggerHeapExecutableMemoryPage
inline void SetNextPage(DebuggerHeapExecutableMemoryPage* nextPage)
{
-#if defined(HOST_OSX) && defined(HOST_ARM64)
- auto jitWriteEnableHolder = PAL_JITWriteEnable(true);
-#endif // defined(HOST_OSX) && defined(HOST_ARM64)
-
- chunks[0].bookkeeping.nextPage = nextPage;
+ ExecutableWriterHolder debuggerHeapPageWriterHolder(this, sizeof(DebuggerHeapExecutableMemoryPage));
+ debuggerHeapPageWriterHolder.GetRW()->chunks[0].bookkeeping.nextPage = nextPage;
}
inline uint64_t GetPageOccupancy() const
@@ -1118,14 +1115,11 @@ struct DECLSPEC_ALIGN(4096) DebuggerHeapExecutableMemoryPage
inline void SetPageOccupancy(uint64_t newOccupancy)
{
-#if defined(HOST_OSX) && defined(HOST_ARM64)
- auto jitWriteEnableHolder = PAL_JITWriteEnable(true);
-#endif // defined(HOST_OSX) && defined(HOST_ARM64)
-
// Can't unset first bit of occupancy!
ASSERT((newOccupancy & 0x8000000000000000) != 0);
- chunks[0].bookkeeping.pageOccupancy = newOccupancy;
+ ExecutableWriterHolder debuggerHeapPageWriterHolder(this, sizeof(DebuggerHeapExecutableMemoryPage));
+ debuggerHeapPageWriterHolder.GetRW()->chunks[0].bookkeeping.pageOccupancy = newOccupancy;
}
inline void* GetPointerToChunk(int chunkNum) const
@@ -1135,16 +1129,14 @@ struct DECLSPEC_ALIGN(4096) DebuggerHeapExecutableMemoryPage
DebuggerHeapExecutableMemoryPage()
{
-#if defined(HOST_OSX) && defined(HOST_ARM64)
- auto jitWriteEnableHolder = PAL_JITWriteEnable(true);
-#endif // defined(HOST_OSX) && defined(HOST_ARM64)
+ ExecutableWriterHolder debuggerHeapPageWriterHolder(this, sizeof(DebuggerHeapExecutableMemoryPage));
SetPageOccupancy(0x8000000000000000); // only the first bit is set.
for (uint8_t i = 1; i < sizeof(chunks)/sizeof(chunks[0]); i++)
{
ASSERT(i != 0);
- chunks[i].data.startOfPage = this;
- chunks[i].data.chunkNumber = i;
+ debuggerHeapPageWriterHolder.GetRW()->chunks[i].data.startOfPage = this;
+ debuggerHeapPageWriterHolder.GetRW()->chunks[i].data.chunkNumber = i;
}
}
@@ -3486,9 +3478,6 @@ class DebuggerEval
class InteropSafe {};
extern InteropSafe interopsafe;
-class InteropSafeExecutable {};
-extern InteropSafeExecutable interopsafeEXEC;
-
#ifndef DACCESS_COMPILE
inline void * __cdecl operator new(size_t n, const InteropSafe&)
{
@@ -3631,62 +3620,6 @@ template void DeleteInteropSafe(T *p)
}
}
-inline void * __cdecl operator new(size_t n, const InteropSafeExecutable&)
-{
- CONTRACTL
- {
- THROWS; // throw on OOM
- GC_NOTRIGGER;
- }
- CONTRACTL_END;
-
- _ASSERTE(g_pDebugger != NULL);
- void *result = g_pDebugger->GetInteropSafeExecutableHeap()->Alloc((DWORD)n);
- if (result == NULL) {
- ThrowOutOfMemory();
- }
- return result;
-}
-
-inline void * __cdecl operator new(size_t n, const InteropSafeExecutable&, const NoThrow&) throw()
-{
- CONTRACTL
- {
- NOTHROW;
- GC_NOTRIGGER;
- }
- CONTRACTL_END;
-
- _ASSERTE(g_pDebugger != NULL);
- DebuggerHeap * pHeap = g_pDebugger->GetInteropSafeExecutableHeap_NoThrow();
- if (pHeap == NULL)
- {
- return NULL;
- }
- void *result = pHeap->Alloc((DWORD)n);
- return result;
-}
-
-// Note: there is no C++ syntax for manually invoking this, but if a constructor throws an exception I understand that
-// this delete operator will be invoked automatically to destroy the object.
-inline void __cdecl operator delete(void *p, const InteropSafeExecutable&)
-{
- CONTRACTL
- {
- NOTHROW;
- GC_NOTRIGGER;
- }
- CONTRACTL_END;
-
- if (p != NULL)
- {
- _ASSERTE(g_pDebugger != NULL);
- DebuggerHeap * pHeap = g_pDebugger->GetInteropSafeExecutableHeap_NoThrow();
- _ASSERTE(pHeap != NULL); // should have had heap around if we're deleting
- pHeap->Free(p);
- }
-}
-
//
// Interop safe delete to match the interop safe new's above. There is no C++ syntax for actually invoking those interop
// safe delete operators above, so we use this method to accomplish the same thing.
diff --git a/src/coreclr/debug/ee/funceval.cpp b/src/coreclr/debug/ee/funceval.cpp
index 41acc0cb92b9d3..de23343b7979b7 100644
--- a/src/coreclr/debug/ee/funceval.cpp
+++ b/src/coreclr/debug/ee/funceval.cpp
@@ -1635,7 +1635,7 @@ static void GCProtectAllPassedArgs(DebuggerEval *pDE,
#endif
}
#endif // TARGET_X86
-
+ FALLTHROUGH;
default:
//
// Ignorable - no need to protect
diff --git a/src/coreclr/debug/ee/i386/x86walker.cpp b/src/coreclr/debug/ee/i386/x86walker.cpp
index ef7dabb3be4c26..cb262e770ecdc3 100644
--- a/src/coreclr/debug/ee/i386/x86walker.cpp
+++ b/src/coreclr/debug/ee/i386/x86walker.cpp
@@ -415,7 +415,7 @@ void NativeWalker::DecodeInstructionForPatchSkip(const BYTE *address, Instructio
case 2:
case 3:
pInstrAttrib->m_fIsCall = true;
- // fall through
+ FALLTHROUGH;
case 4:
case 5:
pInstrAttrib->m_fIsAbsBranch = true;
diff --git a/src/coreclr/debug/ee/s390x/dbghelpers.S b/src/coreclr/debug/ee/s390x/dbghelpers.S
new file mode 100644
index 00000000000000..a1ec66394511ea
--- /dev/null
+++ b/src/coreclr/debug/ee/s390x/dbghelpers.S
@@ -0,0 +1,8 @@
+// Licensed to the .NET Foundation under one or more agreements.
+// The .NET Foundation licenses this file to you under the MIT license.
+
+#include "asmconstants.h"
+#include "unixasmmacros.inc"
+
+#error Unsupported platform
+
diff --git a/src/coreclr/debug/ee/s390x/primitives.cpp b/src/coreclr/debug/ee/s390x/primitives.cpp
new file mode 100644
index 00000000000000..3432959ed16815
--- /dev/null
+++ b/src/coreclr/debug/ee/s390x/primitives.cpp
@@ -0,0 +1,9 @@
+// Licensed to the .NET Foundation under one or more agreements.
+// The .NET Foundation licenses this file to you under the MIT license.
+
+//
+
+#include "stdafx.h"
+#include "threads.h"
+#include "../../shared/s390x/primitives.cpp"
+
diff --git a/src/coreclr/debug/ee/wks/CMakeLists.txt b/src/coreclr/debug/ee/wks/CMakeLists.txt
index ca3aca44e1ffaf..6411b64d436ff7 100644
--- a/src/coreclr/debug/ee/wks/CMakeLists.txt
+++ b/src/coreclr/debug/ee/wks/CMakeLists.txt
@@ -32,11 +32,7 @@ if (CLR_CMAKE_TARGET_WIN32)
else ()
- if(CLR_CMAKE_HOST_ARCH_AMD64 OR CLR_CMAKE_HOST_ARCH_ARM OR CLR_CMAKE_HOST_ARCH_ARM64 OR CLR_CMAKE_HOST_ARCH_I386)
- add_library_clr(cordbee_wks_obj OBJECT ${CORDBEE_SOURCES_WKS} ../${ARCH_SOURCES_DIR}/dbghelpers.S)
- else()
- message(FATAL_ERROR "Unknown platform")
- endif()
+ add_library_clr(cordbee_wks_obj OBJECT ${CORDBEE_SOURCES_WKS} ../${ARCH_SOURCES_DIR}/dbghelpers.S)
endif (CLR_CMAKE_TARGET_WIN32)
diff --git a/src/coreclr/debug/inc/amd64/primitives.h b/src/coreclr/debug/inc/amd64/primitives.h
index a119c2b3639fe3..d8d14b24b5425a 100644
--- a/src/coreclr/debug/inc/amd64/primitives.h
+++ b/src/coreclr/debug/inc/amd64/primitives.h
@@ -12,6 +12,10 @@
#ifndef PRIMITIVES_H_
#define PRIMITIVES_H_
+#if !defined(DBI_COMPILE) && !defined(DACCESS_COMPILE)
+#include "executableallocator.h"
+#endif
+
#ifndef CORDB_ADDRESS_TYPE
typedef const BYTE CORDB_ADDRESS_TYPE;
typedef DPTR(CORDB_ADDRESS_TYPE) PTR_CORDB_ADDRESS_TYPE;
@@ -187,7 +191,14 @@ inline void CORDbgInsertBreakpoint(UNALIGNED CORDB_ADDRESS_TYPE *address)
{
LIMITED_METHOD_CONTRACT;
- *((unsigned char*)address) = 0xCC; // int 3 (single byte patch)
+#if !defined(DBI_COMPILE) && !defined(DACCESS_COMPILE)
+ ExecutableWriterHolder breakpointWriterHolder(address, CORDbg_BREAK_INSTRUCTION_SIZE);
+ UNALIGNED CORDB_ADDRESS_TYPE* addressRW = breakpointWriterHolder.GetRW();
+#else // !DBI_COMPILE && !DACCESS_COMPILE
+ UNALIGNED CORDB_ADDRESS_TYPE* addressRW = address;
+#endif // !DBI_COMPILE && !DACCESS_COMPILE
+
+ *((unsigned char*)addressRW) = 0xCC; // int 3 (single byte patch)
FlushInstructionCache(GetCurrentProcess(), address, 1);
}
@@ -198,7 +209,14 @@ inline void CORDbgSetInstruction(UNALIGNED CORDB_ADDRESS_TYPE* address,
// In a DAC build, this function assumes the input is an host address.
LIMITED_METHOD_DAC_CONTRACT;
- *((unsigned char*)address) =
+#if !defined(DBI_COMPILE) && !defined(DACCESS_COMPILE)
+ ExecutableWriterHolder instructionWriterHolder(address, sizeof(unsigned char));
+ UNALIGNED CORDB_ADDRESS_TYPE* addressRW = instructionWriterHolder.GetRW();
+#else // !DBI_COMPILE && !DACCESS_COMPILE
+ UNALIGNED CORDB_ADDRESS_TYPE* addressRW = address;
+#endif // !DBI_COMPILE && !DACCESS_COMPILE
+
+ *((unsigned char*)addressRW) =
(unsigned char) instruction; // setting one byte is important
FlushInstructionCache(GetCurrentProcess(), address, 1);
diff --git a/src/coreclr/debug/inc/arm/primitives.h b/src/coreclr/debug/inc/arm/primitives.h
index 269281eb006bed..c4e2d28602e565 100644
--- a/src/coreclr/debug/inc/arm/primitives.h
+++ b/src/coreclr/debug/inc/arm/primitives.h
@@ -12,6 +12,10 @@
#ifndef PRIMITIVES_H_
#define PRIMITIVES_H_
+#if !defined(DBI_COMPILE) && !defined(DACCESS_COMPILE)
+#include "executableallocator.h"
+#endif
+
#ifndef THUMB_CODE
#define THUMB_CODE 1
#endif
@@ -159,7 +163,14 @@ inline void CORDbgSetInstruction(CORDB_ADDRESS_TYPE* address,
// In a DAC build, this function assumes the input is an host address.
LIMITED_METHOD_DAC_CONTRACT;
- CORDB_ADDRESS ptraddr = (CORDB_ADDRESS)address;
+#if !defined(DBI_COMPILE) && !defined(DACCESS_COMPILE)
+ ExecutableWriterHolder instructionWriterHolder(address, sizeof(PRD_TYPE));
+ CORDB_ADDRESS_TYPE* addressRW = instructionWriterHolder.GetRW();
+#else // !DBI_COMPILE && !DACCESS_COMPILE
+ CORDB_ADDRESS_TYPE* addressRW = address;
+#endif // !DBI_COMPILE && !DACCESS_COMPILE
+
+ CORDB_ADDRESS ptraddr = (CORDB_ADDRESS)addressRW;
_ASSERTE(ptraddr & THUMB_CODE);
ptraddr &= ~THUMB_CODE;
diff --git a/src/coreclr/debug/inc/arm64/primitives.h b/src/coreclr/debug/inc/arm64/primitives.h
index f359680370dfe5..4f4c3f7bcd8f21 100644
--- a/src/coreclr/debug/inc/arm64/primitives.h
+++ b/src/coreclr/debug/inc/arm64/primitives.h
@@ -12,6 +12,10 @@
#ifndef PRIMITIVES_H_
#define PRIMITIVES_H_
+#if !defined(DBI_COMPILE) && !defined(DACCESS_COMPILE)
+#include "executableallocator.h"
+#endif
+
typedef NEON128 FPRegister64;
typedef const BYTE CORDB_ADDRESS_TYPE;
typedef DPTR(CORDB_ADDRESS_TYPE) PTR_CORDB_ADDRESS_TYPE;
@@ -146,7 +150,13 @@ inline void CORDbgSetInstruction(CORDB_ADDRESS_TYPE* address,
// In a DAC build, this function assumes the input is an host address.
LIMITED_METHOD_DAC_CONTRACT;
+#if !defined(DBI_COMPILE) && !defined(DACCESS_COMPILE)
+ ExecutableWriterHolder instructionWriterHolder((LPVOID)address, sizeof(PRD_TYPE));
+
+ ULONGLONG ptraddr = dac_cast(instructionWriterHolder.GetRW());
+#else // !DBI_COMPILE && !DACCESS_COMPILE
ULONGLONG ptraddr = dac_cast(address);
+#endif // !DBI_COMPILE && !DACCESS_COMPILE
*(PRD_TYPE *)ptraddr = instruction;
FlushInstructionCache(GetCurrentProcess(),
address,
diff --git a/src/coreclr/debug/inc/i386/primitives.h b/src/coreclr/debug/inc/i386/primitives.h
index 980dc2707bb0f6..313b42c5a1970d 100644
--- a/src/coreclr/debug/inc/i386/primitives.h
+++ b/src/coreclr/debug/inc/i386/primitives.h
@@ -12,6 +12,9 @@
#ifndef PRIMITIVES_H_
#define PRIMITIVES_H_
+#if !defined(DBI_COMPILE) && !defined(DACCESS_COMPILE)
+#include "executableallocator.h"
+#endif
typedef const BYTE CORDB_ADDRESS_TYPE;
typedef DPTR(CORDB_ADDRESS_TYPE) PTR_CORDB_ADDRESS_TYPE;
@@ -148,7 +151,14 @@ inline void CORDbgInsertBreakpoint(UNALIGNED CORDB_ADDRESS_TYPE *address)
{
LIMITED_METHOD_CONTRACT;
- *((unsigned char*)address) = 0xCC; // int 3 (single byte patch)
+#if !defined(DBI_COMPILE) && !defined(DACCESS_COMPILE)
+ ExecutableWriterHolder breakpointWriterHolder(address, CORDbg_BREAK_INSTRUCTION_SIZE);
+ UNALIGNED CORDB_ADDRESS_TYPE* addressRW = breakpointWriterHolder.GetRW();
+#else // !DBI_COMPILE && !DACCESS_COMPILE
+ UNALIGNED CORDB_ADDRESS_TYPE* addressRW = address;
+#endif // !DBI_COMPILE && !DACCESS_COMPILE
+
+ *((unsigned char*)addressRW) = 0xCC; // int 3 (single byte patch)
FlushInstructionCache(GetCurrentProcess(), address, 1);
}
diff --git a/src/coreclr/debug/shared/s390x/primitives.cpp b/src/coreclr/debug/shared/s390x/primitives.cpp
new file mode 100644
index 00000000000000..cb4be30c89b56e
--- /dev/null
+++ b/src/coreclr/debug/shared/s390x/primitives.cpp
@@ -0,0 +1,15 @@
+// Licensed to the .NET Foundation under one or more agreements.
+// The .NET Foundation licenses this file to you under the MIT license.
+//*****************************************************************************
+// File: primitives.cpp
+//
+
+//
+// Platform-specific debugger primitives
+//
+//*****************************************************************************
+
+#include "primitives.h"
+
+#error Unsupported platform
+
diff --git a/src/coreclr/dlls/mscorrc/mscorrc.rc b/src/coreclr/dlls/mscorrc/mscorrc.rc
index 20390aec94eb11..0b4247c17e637a 100644
--- a/src/coreclr/dlls/mscorrc/mscorrc.rc
+++ b/src/coreclr/dlls/mscorrc/mscorrc.rc
@@ -745,7 +745,10 @@ BEGIN
IDS_PROF_V2PROFILER_DISABLED "Loading profiler failed. The profiler that was configured to load was designed for an older version of the CLR. You can use the COMPlus_ProfAPI_ProfilerCompatibilitySetting environment variable to allow older profilers to be loaded by the current version of the CLR. Please consult the documentation for information on how to use this environment variable, and the risks associated with it. Profiler CLSID: '%s'."
IDS_PROF_V2PROFILER_ENABLED "A profiler designed for an older version of the CLR was loaded because of the environment variable setting below. Older profilers will continue to work in many cases, but if you encounter problems, please consider upgrading the profiler or changing the setting of the environment variable. Please consult the documentation for information on how to use this environment variable, and the risks associated with it. Environment variable setting: %s=%s. Profiler CLSID: '%s'."
IDS_PROF_PROFILER_DISABLED "Profilers will not be loaded by the current version of the CLR because of the environment variable setting below. Please consult the documentation for information on how to use this environment variable, and the risks associated with it. Environment variable setting: %s=%s. Profiler CLSID: '%s'."
+ IDS_E_PROF_NOTIFICATION_DISABLED "Profiler was prevented from loading notification profiler due to app settings."
+ IDS_E_PROF_NOTIFICATION_LIMIT_EXCEEDED "Notification profiler was prevented from loading because the limit of notification profilers was reached."
IDS_E_PROF_TIMEOUT_WAITING_FOR_CONCURRENT_GC "Profiler timed out on waiting for concurrent GC to finish after '%d' milliseconds. Please configure your profiler to increase its attaching time out value or consult the documentation for the COMPlus_ProfAPI_AttachProfilerMinTimeoutInMs environment variable and try again. Profiler CLSID: '%s'."
+ IDS_PROF_ALREADY_LOADED "A request was made to load a profiler when a profiler was already loaded."
END
diff --git a/src/coreclr/dlls/mscorrc/resource.h b/src/coreclr/dlls/mscorrc/resource.h
index b26dc8567426ca..8038665a0b8d40 100644
--- a/src/coreclr/dlls/mscorrc/resource.h
+++ b/src/coreclr/dlls/mscorrc/resource.h
@@ -549,6 +549,9 @@
#define IDS_PROF_SUPPLEMENTARY_INFO 0x2506
#define IDS_PROF_LOAD_COMPLETE 0x2507
#define IDS_E_PROF_BAD_PATH 0x2508
+#define IDS_E_PROF_NOTIFICATION_DISABLED 0x2509
+#define IDS_PROF_ALREADY_LOADED 0x250A
+#define IDS_E_PROF_NOTIFICATION_LIMIT_EXCEEDED 0x250B
#define IDS_E_PROF_NOT_ATTACHABLE 0x250E
#define IDS_E_PROF_UNHANDLED_EXCEPTION_ON_LOAD 0x250F
#define IDS_PROF_ATTACH_REQUEST_RECEIVED 0x2512
diff --git a/src/coreclr/gc/env/gcenv.h b/src/coreclr/gc/env/gcenv.h
index 6a6b1c3c87412f..63b5979aab0c8a 100644
--- a/src/coreclr/gc/env/gcenv.h
+++ b/src/coreclr/gc/env/gcenv.h
@@ -195,131 +195,7 @@ enum LogLevel
class ThreadStressLog
{
public:
- static const char* gcStartMsg()
- {
- STATIC_CONTRACT_LEAF;
- return "{ =========== BEGINGC %d, (requested generation = %lu, collect_classes = %lu) ==========\n";
- }
-
- static const char* gcEndMsg()
- {
- STATIC_CONTRACT_LEAF;
- return "========== ENDGC %d (gen = %lu, collect_classes = %lu) ===========}\n";
- }
-
- static const char* gcRootMsg()
- {
- STATIC_CONTRACT_LEAF;
- return " GC Root %p RELOCATED %p -> %p MT = %pT\n";
- }
-
- static const char* gcRootPromoteMsg()
- {
- STATIC_CONTRACT_LEAF;
- return " IGCHeap::Promote: Promote GC Root *%p = %p MT = %pT\n";
- }
-
- static const char* gcPlugMoveMsg()
- {
- STATIC_CONTRACT_LEAF;
- return "GC_HEAP RELOCATING Objects in heap within range [%p %p) by -0x%x bytes\n";
- }
-
- static const char* gcServerThread0StartMsg()
- {
- STATIC_CONTRACT_LEAF;
- return "%d gc thread waiting...";
- }
-
- static const char* gcServerThreadNStartMsg()
- {
- STATIC_CONTRACT_LEAF;
- return "%d gc thread waiting... Done";
- }
-
- static const char* gcDetailedStartMsg()
- {
- STATIC_CONTRACT_LEAF;
- return "*GC* %d(gen0:%d)(%d)(alloc: %Id)(%s)(%d)";
- }
-
- static const char* gcDetailedEndMsg()
- {
- STATIC_CONTRACT_LEAF;
- return "*EGC* %Id(gen0:%Id)(%Id)(%d)(%s)(%s)(%s)(ml: %d->%d)";
- }
-
- static const char* gcStartMarkMsg()
- {
- STATIC_CONTRACT_LEAF;
- return "---- Mark Phase on heap %d condemning %d ----";
- }
-
- static const char* gcStartPlanMsg()
- {
- STATIC_CONTRACT_LEAF;
- return "---- Plan Phase on heap %d ---- Condemned generation %d, promotion: %d";
- }
-
- static const char* gcStartRelocateMsg()
- {
- STATIC_CONTRACT_LEAF;
- return "---- Relocate phase on heap %d -----";
- }
-
- static const char* gcEndRelocateMsg()
- {
- STATIC_CONTRACT_LEAF;
- return "---- End of Relocate phase on heap %d ----";
- }
-
- static const char* gcStartCompactMsg()
- {
- STATIC_CONTRACT_LEAF;
- return "---- Compact Phase on heap %d: %Ix(%Ix)----";
- }
-
- static const char* gcEndCompactMsg()
- {
- STATIC_CONTRACT_LEAF;
- return "---- End of Compact phase on heap %d ----";
- }
-
- static const char* gcMemCopyMsg()
- {
- STATIC_CONTRACT_LEAF;
- return " mc: [%Ix->%Ix, %Ix->%Ix[";
- }
-
- static const char* gcPlanPlugMsg()
- {
- STATIC_CONTRACT_LEAF;
- return "(%Ix)[%Ix->%Ix, NA: [%Ix(%Id), %Ix[: %Ix(%d), x: %Ix (%s)";
- }
-
- static const char* gcPlanPinnedPlugMsg()
- {
- STATIC_CONTRACT_LEAF;
- return "(%Ix)PP: [%Ix, %Ix[%Ix](m:%d)";
- }
-
- static const char* gcDesiredNewAllocationMsg()
- {
- STATIC_CONTRACT_LEAF;
- return "h%d g%d surv: %Id current: %Id alloc: %Id (%d%%) f: %d%% new-size: %Id new-alloc: %Id";
- }
-
- static const char* gcMakeUnusedArrayMsg()
- {
- STATIC_CONTRACT_LEAF;
- return "Making unused array [%Ix, %Ix[";
- }
-
- static const char* gcStartBgcThread()
- {
- STATIC_CONTRACT_LEAF;
- return "beginning of bgc on heap %d: gen2 FL: %d, FO: %d, frag: %d";
- }
+ #include "../../inc/gcmsg.inl"
};
struct StressLogMsg
diff --git a/src/coreclr/gc/gc.cpp b/src/coreclr/gc/gc.cpp
index 61ea4f8f73b3e2..9cb439e650411f 100644
--- a/src/coreclr/gc/gc.cpp
+++ b/src/coreclr/gc/gc.cpp
@@ -641,6 +641,10 @@ process_sync_log_stats()
#ifndef DACCESS_COMPILE
uint32_t g_num_active_processors = 0;
+// Note that when a join is no longer used we still keep the values here because
+// tooling already recognized them as having the meaning they were assigned originally.
+// It doesn't break tooling if we stop using them but does if we assign a new meaning
+// to them.
enum gc_join_stage
{
gc_join_init_cpu_mapping = 0,
@@ -681,6 +685,7 @@ enum gc_join_stage
gc_join_after_commit_soh_no_gc = 35,
gc_join_expand_loh_no_gc = 36,
gc_join_final_no_gc = 37,
+ // No longer in use but do not remove, see comments for this enum.
gc_join_disable_software_write_watch = 38,
gc_join_max = 39
};
@@ -10808,7 +10813,7 @@ void gc_heap::return_free_region (heap_segment* region)
heap_segment_mem (region), num_basic_regions, num_free_regions));
for (int i = 0; i < num_basic_regions; i++)
{
- uint8_t* basic_region_start = region_start + (i << min_segment_size_shr);
+ uint8_t* basic_region_start = region_start + ((size_t)i << min_segment_size_shr);
heap_segment* basic_region = get_region_info (basic_region_start);
heap_segment_allocated (basic_region) = 0;
#ifdef MULTIPLE_HEAPS
@@ -15527,9 +15532,9 @@ int bgc_allocate_spin(size_t min_gc_size, size_t bgc_begin_size, size_t bgc_size
return 0;
}
- if (((bgc_begin_size / end_size) >= 2) || (bgc_size_increased >= bgc_begin_size))
+ if ((bgc_begin_size >= (2 * end_size)) || (bgc_size_increased >= bgc_begin_size))
{
- if ((bgc_begin_size / end_size) >= 2)
+ if (bgc_begin_size >= (2 * end_size))
{
dprintf (3, ("alloc-ed too much before bgc started"));
}
@@ -20587,6 +20592,7 @@ void gc_heap::garbage_collect (int n)
update_collection_counts_for_no_gc();
#ifdef MULTIPLE_HEAPS
+ gc_start_event.Reset();
gc_t_join.restart();
#endif //MULTIPLE_HEAPS
}
@@ -22290,7 +22296,7 @@ inline
void gc_heap::mark_object (uint8_t* o THREAD_NUMBER_DCL)
{
#ifdef USE_REGIONS
- if (is_in_condemned_gc (o))
+ if ((o != nullptr) && is_in_condemned_gc (o))
{
mark_object_simple (&o THREAD_NUMBER_ARG);
}
@@ -23314,7 +23320,10 @@ void gc_heap::process_mark_overflow_internal (int condemned_gen_number,
#endif //MULTIPLE_HEAPS
BOOL full_p = (condemned_gen_number == max_generation);
- dprintf(3,("Processing Mark overflow [%Ix %Ix]", (size_t)min_add, (size_t)max_add));
+ dprintf(3,("Processing Mark overflow [%Ix %Ix]", (size_t)min_add, (size_t)max_add));
+
+ size_t obj_count = 0;
+
#ifdef MULTIPLE_HEAPS
for (int hi = 0; hi < n_heaps; hi++)
{
@@ -23323,11 +23332,10 @@ void gc_heap::process_mark_overflow_internal (int condemned_gen_number,
#else
{
gc_heap* hp = 0;
-
#endif //MULTIPLE_HEAPS
int gen_limit = full_p ? total_generation_count : condemned_gen_number + 1;
- for (int i = condemned_gen_number; i < gen_limit; i++)
+ for (int i = get_stop_generation_index (condemned_gen_number); i < gen_limit; i++)
{
generation* gen = hp->generation_of (i);
heap_segment* seg = heap_segment_in_range (generation_start_segment (gen));
@@ -23347,6 +23355,7 @@ void gc_heap::process_mark_overflow_internal (int condemned_gen_number,
if (marked (o))
{
mark_through_object (o, TRUE THREAD_NUMBER_ARG);
+ obj_count++;
}
o = o + Align (size (o), align_const);
@@ -23355,6 +23364,10 @@ void gc_heap::process_mark_overflow_internal (int condemned_gen_number,
seg = heap_segment_next_in_range (seg);
}
}
+#ifndef MULTIPLE_HEAPS
+ // we should have found at least one object
+ assert (obj_count > 0);
+#endif //MULTIPLE_HEAPS
}
}
@@ -28669,11 +28682,6 @@ heap_segment* gc_heap::relocate_advance_to_non_sip (heap_segment* region)
{
dprintf (4444, ("SIP %Ix(%Ix)->%Ix->%Ix(%Ix)",
x, (uint8_t*)pval, child, *pval, method_table (child)));
-
- if (method_table (child) == 0)
- {
- FATAL_GC_ERROR();
- }
}
});
}
@@ -29214,6 +29222,7 @@ void gc_heap::relocate_address (uint8_t** pold_address THREAD_NUMBER_DCL)
}
}
+ dprintf (4, (ThreadStressLog::gcRelocateReferenceMsg(), pold_address, old_address, new_address));
*pold_address = new_address;
return;
}
@@ -29243,7 +29252,9 @@ void gc_heap::relocate_address (uint8_t** pold_address THREAD_NUMBER_DCL)
#endif
)
{
- *pold_address = old_address + loh_node_relocation_distance (old_address);
+ new_address = old_address + loh_node_relocation_distance (old_address);
+ dprintf (4, (ThreadStressLog::gcRelocateReferenceMsg(), pold_address, old_address, new_address));
+ *pold_address = new_address;
}
}
}
@@ -31799,23 +31810,6 @@ void gc_heap::background_mark_phase ()
//concurrent_print_time_delta ("nonconcurrent revisit dirtied pages on LOH");
concurrent_print_time_delta ("NRre LOH");
-#ifdef FEATURE_USE_SOFTWARE_WRITE_WATCH_FOR_GC_HEAP
-#ifdef MULTIPLE_HEAPS
- bgc_t_join.join(this, gc_join_disable_software_write_watch);
- if (bgc_t_join.joined())
-#endif // MULTIPLE_HEAPS
- {
- // The runtime is suspended, and we will be doing a final query of dirty pages, so pause tracking written pages to
- // avoid further perf penalty after the runtime is restarted
- SoftwareWriteWatch::DisableForGCHeap();
-
-#ifdef MULTIPLE_HEAPS
- dprintf(3, ("Restarting BGC threads after disabling software write watch"));
- bgc_t_join.restart();
-#endif // MULTIPLE_HEAPS
- }
-#endif // FEATURE_USE_SOFTWARE_WRITE_WATCH_FOR_GC_HEAP
-
dprintf (2, ("before NR 1st Hov count: %d", bgc_overflow_count));
bgc_overflow_count = 0;
@@ -31840,6 +31834,12 @@ void gc_heap::background_mark_phase ()
if (bgc_t_join.joined())
#endif //MULTIPLE_HEAPS
{
+#ifdef FEATURE_USE_SOFTWARE_WRITE_WATCH_FOR_GC_HEAP
+ // The runtime is suspended, take this opportunity to pause tracking written pages to
+ // avoid further perf penalty after the runtime is restarted
+ SoftwareWriteWatch::DisableForGCHeap();
+#endif // FEATURE_USE_SOFTWARE_WRITE_WATCH_FOR_GC_HEAP
+
GCToEEInterface::AfterGcScanRoots (max_generation, max_generation, &sc);
#ifdef MULTIPLE_HEAPS
@@ -38467,7 +38467,7 @@ void gc_heap::process_background_segment_end (heap_segment* seg,
heap_segment_mem (seg), (*delete_p ? "should" : "should not")));
}
- else
+ if (!*delete_p)
{
dprintf (3, ("[h%d] seg %Ix alloc %Ix->%Ix",
heap_number, (size_t)seg,
@@ -39732,9 +39732,6 @@ void gc_heap::mark_through_cards_for_uoh_objects (card_fn fn,
void gc_heap::descr_generations_to_profiler (gen_walk_fn fn, void *context)
{
-#ifdef USE_REGIONS
- assert (!"not impl!!");
-#else
#ifdef MULTIPLE_HEAPS
for (int i = 0; i < n_heaps; i++)
{
@@ -39753,6 +39750,16 @@ void gc_heap::descr_generations_to_profiler (gen_walk_fn fn, void *context)
{
generation* gen = hp->generation_of (curr_gen_number);
heap_segment* seg = generation_start_segment (gen);
+#ifdef USE_REGIONS
+ while (seg)
+ {
+ fn(context, curr_gen_number, heap_segment_mem (seg),
+ heap_segment_allocated (seg),
+ heap_segment_reserved (seg));
+
+ seg = heap_segment_next (seg);
+ }
+#else
while (seg && (seg != hp->ephemeral_heap_segment))
{
assert (curr_gen_number > 0);
@@ -39805,9 +39812,9 @@ void gc_heap::descr_generations_to_profiler (gen_walk_fn fn, void *context)
heap_segment_reserved (hp->ephemeral_heap_segment) );
}
}
+#endif //USE_REGIONS
}
}
-#endif //USE_REGIONS
}
#ifdef TRACE_GC
@@ -42250,6 +42257,15 @@ GCHeap::Alloc(gc_alloc_context* context, size_t size, uint32_t flags REQD_ALIGN_
newAlloc = (Object*) hp->allocate_uoh_object (size + ComputeMaxStructAlignPadLarge(requiredAlignment), flags, gen_num, acontext->alloc_bytes_uoh);
ASSERT(((size_t)newAlloc & 7) == 0);
+#ifdef MULTIPLE_HEAPS
+ if (flags & GC_ALLOC_FINALIZE)
+ {
+ // the heap may have changed due to heap balancing - it's important
+ // to register the object for finalization on the heap it was allocated on
+ hp = gc_heap::heap_of ((uint8_t*)newAlloc);
+ }
+#endif //MULTIPLE_HEAPS
+
#ifdef FEATURE_STRUCTALIGN
newAlloc = (Object*) hp->pad_for_alignment_large ((uint8_t*) newAlloc, requiredAlignment, size);
#endif // FEATURE_STRUCTALIGN
@@ -42269,6 +42285,16 @@ GCHeap::Alloc(gc_alloc_context* context, size_t size, uint32_t flags REQD_ALIGN_
newAlloc = (Object*) hp->allocate (size + ComputeMaxStructAlignPad(requiredAlignment), acontext, flags);
}
+#ifdef MULTIPLE_HEAPS
+ if (flags & GC_ALLOC_FINALIZE)
+ {
+ // the heap may have changed due to heap balancing - it's important
+ // to register the object for finalization on the heap it was allocated on
+ hp = acontext->get_alloc_heap()->pGenGCHeap;
+ assert ((newAlloc == nullptr) || (hp == gc_heap::heap_of ((uint8_t*)newAlloc)));
+ }
+#endif //MULTIPLE_HEAPS
+
#ifdef FEATURE_STRUCTALIGN
newAlloc = (Object*) hp->pad_for_alignment ((uint8_t*) newAlloc, requiredAlignment, size, acontext);
#endif // FEATURE_STRUCTALIGN
@@ -44397,6 +44423,9 @@ CFinalize::RelocateFinalizationData (int gen, gc_heap* hp)
unsigned int Seg = gen_segment (gen);
Object** startIndex = SegQueue (Seg);
+
+ dprintf (3, ("RelocateFinalizationData gen=%d, [%Ix,%Ix[", gen, startIndex, SegQueue (FreeList)));
+
for (Object** po = startIndex; po < SegQueue (FreeList);po++)
{
GCHeap::Relocate (po, &sc);
@@ -44406,6 +44435,8 @@ CFinalize::RelocateFinalizationData (int gen, gc_heap* hp)
void
CFinalize::UpdatePromotedGenerations (int gen, BOOL gen_0_empty_p)
{
+ dprintf(3, ("UpdatePromotedGenerations gen=%d, gen_0_empty_p=%d", gen, gen_0_empty_p));
+
// update the generation fill pointers.
// if gen_0_empty is FALSE, test each object to find out if
// it was promoted or not
@@ -44430,6 +44461,8 @@ CFinalize::UpdatePromotedGenerations (int gen, BOOL gen_0_empty_p)
int new_gen = g_theGCHeap->WhichGeneration (*po);
if (new_gen != i)
{
+ dprintf (3, ("Moving object %Ix->%Ix from gen %d to gen %d", po, *po, i, new_gen));
+
if (new_gen > i)
{
//promotion
@@ -44461,6 +44494,8 @@ CFinalize::GrowArray()
}
memcpy (newArray, m_Array, oldArraySize*sizeof(Object*));
+ dprintf (3, ("Grow finalizer array [%Ix,%Ix[ -> [%Ix,%Ix[", m_Array, m_EndArray, newArray, &m_Array[newArraySize]));
+
//adjust the fill pointers
for (int i = 0; i < FreeList; i++)
{
@@ -44518,6 +44553,19 @@ void gc_heap::walk_heap_per_heap (walk_fn fn, void* context, int gen_number, BOO
end = heap_segment_allocated (seg);
continue;
}
+#ifdef USE_REGIONS
+ else if (gen_number > 0)
+ {
+ // advance to next lower generation
+ gen_number--;
+ gen = gc_heap::generation_of (gen_number);
+ seg = generation_start_segment (gen);
+
+ x = heap_segment_mem (seg);
+ end = heap_segment_allocated (seg);
+ continue;
+ }
+#endif // USE_REGIONS
else
{
if (walk_large_object_heap_p)
diff --git a/src/coreclr/gc/gcinterface.ee.h b/src/coreclr/gc/gcinterface.ee.h
index 4b048271c0e553..ea5f2d2914c543 100644
--- a/src/coreclr/gc/gcinterface.ee.h
+++ b/src/coreclr/gc/gcinterface.ee.h
@@ -4,7 +4,7 @@
#ifndef _GCINTERFACE_EE_H_
#define _GCINTERFACE_EE_H_
-enum EtwGCRootFlags
+enum EtwGCRootFlags: int32_t
{
kEtwGCRootFlagsPinning = 0x1,
kEtwGCRootFlagsWeakRef = 0x2,
@@ -12,7 +12,7 @@ enum EtwGCRootFlags
kEtwGCRootFlagsRefCounted = 0x8,
};
-enum EtwGCRootKind
+enum EtwGCRootKind: int32_t
{
kEtwGCRootKindStack = 0,
kEtwGCRootKindFinalizer = 1,
diff --git a/src/coreclr/gc/gcpriv.h b/src/coreclr/gc/gcpriv.h
index b89a0043589258..09bd97d1891886 100644
--- a/src/coreclr/gc/gcpriv.h
+++ b/src/coreclr/gc/gcpriv.h
@@ -63,7 +63,7 @@ inline void FATAL_GC_ERROR()
// + creates some pins on our own
// + creates some ro segs
// We can add more mechanisms here.
-#define STRESS_REGIONS
+//#define STRESS_REGIONS
#endif //USE_REGIONS
// FEATURE_STRUCTALIGN was added by Midori. In CLR we are not interested
diff --git a/src/coreclr/gc/unix/gcenv.unix.cpp b/src/coreclr/gc/unix/gcenv.unix.cpp
index 9408a9b121406e..d2798c860adee5 100644
--- a/src/coreclr/gc/unix/gcenv.unix.cpp
+++ b/src/coreclr/gc/unix/gcenv.unix.cpp
@@ -884,7 +884,7 @@ static size_t GetLogicalProcessorCacheSizeFromOS()
cacheSize = std::max(cacheSize, ( size_t) sysconf(_SC_LEVEL4_CACHE_SIZE));
#endif
-#if defined(TARGET_LINUX) && !defined(HOST_ARM)
+#if defined(TARGET_LINUX) && !defined(HOST_ARM) && !defined(HOST_X86)
if (cacheSize == 0)
{
//
diff --git a/src/coreclr/gcinfo/CMakeLists.txt b/src/coreclr/gcinfo/CMakeLists.txt
index 70b0f7396d89b3..bfddaee4b700af 100644
--- a/src/coreclr/gcinfo/CMakeLists.txt
+++ b/src/coreclr/gcinfo/CMakeLists.txt
@@ -82,3 +82,7 @@ create_gcinfo_lib(TARGET gcinfo_unix_armel OS unix ARCH armel)
create_gcinfo_lib(TARGET gcinfo_unix_arm OS unix ARCH arm)
create_gcinfo_lib(TARGET gcinfo_win_arm OS win ARCH arm)
create_gcinfo_lib(TARGET gcinfo_win_x86 OS win ARCH x86)
+
+if (CLR_CMAKE_TARGET_ARCH_I386 AND CLR_CMAKE_TARGET_UNIX)
+ create_gcinfo_lib(TARGET gcinfo_unix_x86 OS unix ARCH x86)
+endif (CLR_CMAKE_TARGET_ARCH_I386 AND CLR_CMAKE_TARGET_UNIX)
diff --git a/src/coreclr/inc/CrstTypes.def b/src/coreclr/inc/CrstTypes.def
index 185a590d938e54..c48872a0b94243 100644
--- a/src/coreclr/inc/CrstTypes.def
+++ b/src/coreclr/inc/CrstTypes.def
@@ -433,7 +433,7 @@ Crst SingleUseLock
End
Crst UnwindInfoTableLock
- AcquiredAfter StubUnwindInfoHeapSegments SingleUseLock
+ AcquiredAfter StubUnwindInfoHeapSegments SingleUseLock
AcquiredBefore StressLog
End
diff --git a/src/coreclr/inc/clrconfigvalues.h b/src/coreclr/inc/clrconfigvalues.h
index 5dc7e5d34b20bd..3f21e41dfa3693 100644
--- a/src/coreclr/inc/clrconfigvalues.h
+++ b/src/coreclr/inc/clrconfigvalues.h
@@ -504,6 +504,10 @@ RETAIL_CONFIG_STRING_INFO_EX(EXTERNAL_CORECLR_PROFILER_PATH_32, W("CORECLR_PROFI
RETAIL_CONFIG_STRING_INFO_EX(EXTERNAL_CORECLR_PROFILER_PATH_64, W("CORECLR_PROFILER_PATH_64"), "CoreCLR only: Specifies the path to the DLL of profiler to load into currently running 64 process", CLRConfig::LookupOptions::DontPrependPrefix)
RETAIL_CONFIG_STRING_INFO_EX(EXTERNAL_CORECLR_PROFILER_PATH_ARM32, W("CORECLR_PROFILER_PATH_ARM32"), "CoreCLR only: Specifies the path to the DLL of profiler to load into currently running ARM32 process", CLRConfig::LookupOptions::DontPrependPrefix)
RETAIL_CONFIG_STRING_INFO_EX(EXTERNAL_CORECLR_PROFILER_PATH_ARM64, W("CORECLR_PROFILER_PATH_ARM64"), "CoreCLR only: Specifies the path to the DLL of profiler to load into currently running ARM64 process", CLRConfig::LookupOptions::DontPrependPrefix)
+RETAIL_CONFIG_DWORD_INFO_EX(EXTERNAL_CORECLR_ENABLE_NOTIFICATION_PROFILERS, W("CORECLR_ENABLE_NOTIFICATION_PROFILERS"), 0, "Set to 0 to disable loading notification profilers.", CLRConfig::LookupOptions::DontPrependPrefix)
+RETAIL_CONFIG_STRING_INFO_EX(EXTERNAL_CORECLR_NOTIFICATION_PROFILERS_64, W("CORECLR_NOTIFICATION_PROFILERS_64"), "A semi-colon separated list of notification profilers to load in the form \"path={guid}\"", CLRConfig::LookupOptions::DontPrependPrefix)
+RETAIL_CONFIG_STRING_INFO_EX(EXTERNAL_CORECLR_NOTIFICATION_PROFILERS_32, W("CORECLR_NOTIFICATION_PROFILERS_32"), "A semi-colon separated list of notification profilers to load in the form \"path={guid}\"", CLRConfig::LookupOptions::DontPrependPrefix)
+RETAIL_CONFIG_STRING_INFO_EX(EXTERNAL_CORECLR_NOTIFICATION_PROFILERS, W("CORECLR_NOTIFICATION_PROFILERS"), "A semi-colon separated list of notification profilers to load in the form \"path={guid}\"", CLRConfig::LookupOptions::DontPrependPrefix)
RETAIL_CONFIG_STRING_INFO_EX(EXTERNAL_ProfAPI_ProfilerCompatibilitySetting, W("ProfAPI_ProfilerCompatibilitySetting"), "Specifies the profiler loading policy (the default is not to load a V2 profiler in V4)", CLRConfig::LookupOptions::TrimWhiteSpaceFromStringValue)
RETAIL_CONFIG_DWORD_INFO(EXTERNAL_ProfAPI_DetachMinSleepMs, W("ProfAPI_DetachMinSleepMs"), 0, "The minimum time, in milliseconds, the CLR will wait before checking whether a profiler that is in the process of detaching is ready to be unloaded.")
RETAIL_CONFIG_DWORD_INFO(EXTERNAL_ProfAPI_DetachMaxSleepMs, W("ProfAPI_DetachMaxSleepMs"), 0, "The maximum time, in milliseconds, the CLR will wait before checking whether a profiler that is in the process of detaching is ready to be unloaded.")
diff --git a/src/coreclr/inc/corcompile.h b/src/coreclr/inc/corcompile.h
index 2fb5d6fabc7f96..283124c15b9509 100644
--- a/src/coreclr/inc/corcompile.h
+++ b/src/coreclr/inc/corcompile.h
@@ -702,6 +702,9 @@ enum CORCOMPILE_FIXUP_BLOB_KIND
ENCODE_VERIFY_FIELD_OFFSET, /* Used for the R2R compiler can generate a check against the real field offset used at runtime */
ENCODE_VERIFY_TYPE_LAYOUT, /* Used for the R2R compiler can generate a check against the real type layout used at runtime */
+ ENCODE_CHECK_VIRTUAL_FUNCTION_OVERRIDE, /* Generate a runtime check to ensure that virtual function resolution has equivalent behavior at runtime as at compile time. If not equivalent, code will not be used */
+ ENCODE_VERIFY_VIRTUAL_FUNCTION_OVERRIDE, /* Generate a runtime check to ensure that virtual function resolution has equivalent behavior at runtime as at compile time. If not equivalent, generate runtime failure. */
+
ENCODE_MODULE_HANDLE = 0x50, /* Module token */
ENCODE_STATIC_FIELD_ADDRESS, /* For accessing a static field */
ENCODE_MODULE_ID_FOR_STATICS, /* For accessing static fields */
@@ -724,6 +727,7 @@ enum EncodeMethodSigFlags
ENCODE_METHOD_SIG_MemberRefToken = 0x10,
ENCODE_METHOD_SIG_Constrained = 0x20,
ENCODE_METHOD_SIG_OwnerType = 0x40,
+ ENCODE_METHOD_SIG_UpdateContext = 0x80,
};
enum EncodeFieldSigFlags
diff --git a/src/coreclr/inc/corinfo.h b/src/coreclr/inc/corinfo.h
index c2fde45f84fc52..abf5527d0016cc 100644
--- a/src/coreclr/inc/corinfo.h
+++ b/src/coreclr/inc/corinfo.h
@@ -1604,18 +1604,24 @@ struct CORINFO_CALL_INFO
enum CORINFO_DEVIRTUALIZATION_DETAIL
{
- CORINFO_DEVIRTUALIZATION_UNKNOWN, // no details available
- CORINFO_DEVIRTUALIZATION_SUCCESS, // devirtualization was successful
- CORINFO_DEVIRTUALIZATION_FAILED_CANON, // object class was canonical
- CORINFO_DEVIRTUALIZATION_FAILED_COM, // object class was com
- CORINFO_DEVIRTUALIZATION_FAILED_CAST, // object class could not be cast to interface class
- CORINFO_DEVIRTUALIZATION_FAILED_LOOKUP, // interface method could not be found
- CORINFO_DEVIRTUALIZATION_FAILED_DIM, // interface method was default interface method
- CORINFO_DEVIRTUALIZATION_FAILED_SUBCLASS, // object not subclass of base class
- CORINFO_DEVIRTUALIZATION_FAILED_SLOT, // virtual method installed via explicit override
- CORINFO_DEVIRTUALIZATION_FAILED_BUBBLE, // devirtualization crossed version bubble
- CORINFO_DEVIRTUALIZATION_MULTIPLE_IMPL, // object has multiple implementations of interface class
- CORINFO_DEVIRTUALIZATION_COUNT, // sentinel for maximum value
+ CORINFO_DEVIRTUALIZATION_UNKNOWN, // no details available
+ CORINFO_DEVIRTUALIZATION_SUCCESS, // devirtualization was successful
+ CORINFO_DEVIRTUALIZATION_FAILED_CANON, // object class was canonical
+ CORINFO_DEVIRTUALIZATION_FAILED_COM, // object class was com
+ CORINFO_DEVIRTUALIZATION_FAILED_CAST, // object class could not be cast to interface class
+ CORINFO_DEVIRTUALIZATION_FAILED_LOOKUP, // interface method could not be found
+ CORINFO_DEVIRTUALIZATION_FAILED_DIM, // interface method was default interface method
+ CORINFO_DEVIRTUALIZATION_FAILED_SUBCLASS, // object not subclass of base class
+ CORINFO_DEVIRTUALIZATION_FAILED_SLOT, // virtual method installed via explicit override
+ CORINFO_DEVIRTUALIZATION_FAILED_BUBBLE, // devirtualization crossed version bubble
+ CORINFO_DEVIRTUALIZATION_MULTIPLE_IMPL, // object has multiple implementations of interface class
+ CORINFO_DEVIRTUALIZATION_FAILED_BUBBLE_CLASS_DECL, // decl method is defined on class and decl method not in version bubble, and decl method not in closest to version bubble
+ CORINFO_DEVIRTUALIZATION_FAILED_BUBBLE_INTERFACE_DECL, // decl method is defined on interface and not in version bubble, and implementation type not entirely defined in bubble
+ CORINFO_DEVIRTUALIZATION_FAILED_BUBBLE_IMPL, // object class not defined within version bubble
+ CORINFO_DEVIRTUALIZATION_FAILED_BUBBLE_IMPL_NOT_REFERENCEABLE, // object class cannot be referenced from R2R code due to missing tokens
+ CORINFO_DEVIRTUALIZATION_FAILED_DUPLICATE_INTERFACE, // crossgen2 virtual method algorithm and runtime algorithm differ in the presence of duplicate interface implementations
+ CORINFO_DEVIRTUALIZATION_FAILED_DECL_NOT_REPRESENTABLE, // Decl method cannot be represented in R2R image
+ CORINFO_DEVIRTUALIZATION_COUNT, // sentinel for maximum value
};
struct CORINFO_DEVIRTUALIZATION_INFO
@@ -1626,6 +1632,7 @@ struct CORINFO_DEVIRTUALIZATION_INFO
CORINFO_METHOD_HANDLE virtualMethod;
CORINFO_CLASS_HANDLE objClass;
CORINFO_CONTEXT_HANDLE context;
+ CORINFO_RESOLVED_TOKEN *pResolvedTokenVirtualMethod;
//
// [Out] results of resolveVirtualMethod.
@@ -1634,11 +1641,15 @@ struct CORINFO_DEVIRTUALIZATION_INFO
// - requiresInstMethodTableArg is set to TRUE if the devirtualized method requires a type handle arg.
// - exactContext is set to wrapped CORINFO_CLASS_HANDLE of devirt'ed method table.
// - details on the computation done by the jit host
+ // - If pResolvedTokenDevirtualizedMethod is not set to NULL and targetting an R2R image
+ // use it as the parameter to getCallInfo
//
CORINFO_METHOD_HANDLE devirtualizedMethod;
bool requiresInstMethodTableArg;
CORINFO_CONTEXT_HANDLE exactContext;
CORINFO_DEVIRTUALIZATION_DETAIL detail;
+ CORINFO_RESOLVED_TOKEN resolvedTokenDevirtualizedMethod;
+ CORINFO_RESOLVED_TOKEN resolvedTokenDevirtualizedUnboxedMethod;
};
//----------------------------------------------------------------------------
diff --git a/src/coreclr/inc/corprof.idl b/src/coreclr/inc/corprof.idl
index 74fb809928125f..8fc965a84f6b59 100644
--- a/src/coreclr/inc/corprof.idl
+++ b/src/coreclr/inc/corprof.idl
@@ -475,6 +475,7 @@ typedef enum
// and ThreadNameChanged callbacks.
COR_PRF_MONITOR_THREADS = 0x00000200,
+ // CORECLR DEPRECATION WARNING: Remoting no longer exists in coreclr
// MONITOR_REMOTING controls the Remoting*
// callbacks.
COR_PRF_MONITOR_REMOTING = 0x00000400,
@@ -492,10 +493,12 @@ typedef enum
// callbacks.
COR_PRF_MONITOR_CCW = 0x00002000,
+ // CORECLR DEPRECATION WARNING: Remoting no longer exists in coreclr
// MONITOR_REMOTING_COOKIE controls whether
// a cookie will be passed to the Remoting* callbacks
COR_PRF_MONITOR_REMOTING_COOKIE = 0x00004000 | COR_PRF_MONITOR_REMOTING,
+ // CORECLR DEPRECATION WARNING: Remoting no longer exists in coreclr
// MONITOR_REMOTING_ASYNC controls whether
// the Remoting* callbacks will monitor async events
COR_PRF_MONITOR_REMOTING_ASYNC = 0x00008000 | COR_PRF_MONITOR_REMOTING,
@@ -594,6 +597,28 @@ typedef enum
COR_PRF_MONITOR_JIT_COMPILATION |
COR_PRF_ENABLE_REJIT,
+ COR_PRF_ALLOWABLE_NOTIFICATION_PROFILER
+ = COR_PRF_MONITOR_FUNCTION_UNLOADS |
+ COR_PRF_MONITOR_CLASS_LOADS |
+ COR_PRF_MONITOR_MODULE_LOADS |
+ COR_PRF_MONITOR_ASSEMBLY_LOADS |
+ COR_PRF_MONITOR_APPDOMAIN_LOADS |
+ COR_PRF_MONITOR_JIT_COMPILATION |
+ COR_PRF_MONITOR_EXCEPTIONS |
+ COR_PRF_MONITOR_OBJECT_ALLOCATED |
+ COR_PRF_MONITOR_THREADS |
+ COR_PRF_MONITOR_CODE_TRANSITIONS |
+ COR_PRF_MONITOR_CCW |
+ COR_PRF_MONITOR_SUSPENDS |
+ COR_PRF_MONITOR_CACHE_SEARCHES |
+ COR_PRF_DISABLE_INLINING |
+ COR_PRF_DISABLE_OPTIMIZATIONS |
+ COR_PRF_ENABLE_OBJECT_ALLOCATED |
+ COR_PRF_MONITOR_CLR_EXCEPTIONS |
+ COR_PRF_ENABLE_STACK_SNAPSHOT |
+ COR_PRF_USE_PROFILE_IMAGES |
+ COR_PRF_DISABLE_ALL_NGEN_IMAGES,
+
// MONITOR_IMMUTABLE represents all flags that may only be set during initialization.
// Trying to change any of these flags elsewhere will result in a
// failed HRESULT.
@@ -621,6 +646,7 @@ typedef enum
{
COR_PRF_HIGH_MONITOR_NONE = 0x00000000,
+ // CORECLR DEPRECATION WARNING: This flag is no longer checked by the runtime
COR_PRF_HIGH_ADD_ASSEMBLY_REFERENCES = 0x00000001,
COR_PRF_HIGH_IN_MEMORY_SYMBOLS_UPDATED = 0x00000002,
@@ -648,6 +674,15 @@ typedef enum
COR_PRF_HIGH_MONITOR_LARGEOBJECT_ALLOCATED |
COR_PRF_HIGH_MONITOR_EVENT_PIPE,
+ COR_PRF_HIGH_ALLOWABLE_NOTIFICATION_PROFILER
+ = COR_PRF_HIGH_IN_MEMORY_SYMBOLS_UPDATED |
+ COR_PRF_HIGH_MONITOR_DYNAMIC_FUNCTION_UNLOADS |
+ COR_PRF_HIGH_DISABLE_TIERED_COMPILATION |
+ COR_PRF_HIGH_BASIC_GC |
+ COR_PRF_HIGH_MONITOR_GC_MOVED_OBJECTS |
+ COR_PRF_HIGH_MONITOR_LARGEOBJECT_ALLOCATED |
+ COR_PRF_HIGH_MONITOR_EVENT_PIPE,
+
// MONITOR_IMMUTABLE represents all flags that may only be set during initialization.
// Trying to change any of these flags elsewhere will result in a
// failed HRESULT.
@@ -2433,6 +2468,7 @@ interface ICorProfilerCallback5 : ICorProfilerCallback4
]
interface ICorProfilerCallback6 : ICorProfilerCallback5
{
+ // CORECLR DEPRECATION WARNING: This callback does not occur on coreclr.
// Controlled by the COR_PRF_HIGH_ADD_ASSEMBLY_REFERENCES event mask flag.
// Notifies the profiler of a very early stage in the loading of an Assembly, where the CLR
// performs an assembly reference closure walk. This is useful ONLY if the profiler will need
@@ -2555,6 +2591,17 @@ interface ICorProfilerCallback10 : ICorProfilerCallback9
HRESULT EventPipeProviderCreated([in] EVENTPIPE_PROVIDER provider);
}
+[
+ object,
+ uuid(42350846-AAED-47F7-B128-FD0C98881CDE),
+ pointer_default(unique),
+ local
+]
+interface ICorProfilerCallback11 : ICorProfilerCallback10
+{
+ HRESULT LoadAsNotficationOnly(BOOL *pbNotificationOnly);
+}
+
/*
* COR_PRF_CODEGEN_FLAGS controls various flags and hooks for a specific
* method. A combination of COR_PRF_CODEGEN_FLAGS is provided by the
diff --git a/src/coreclr/inc/crosscomp.h b/src/coreclr/inc/crosscomp.h
index d5d8cc8bd70b0d..0874603640b3b0 100644
--- a/src/coreclr/inc/crosscomp.h
+++ b/src/coreclr/inc/crosscomp.h
@@ -422,6 +422,8 @@ enum
#define DAC_CS_NATIVE_DATA_SIZE 76
#elif defined(TARGET_LINUX) && defined(TARGET_AMD64)
#define DAC_CS_NATIVE_DATA_SIZE 96
+#elif defined(TARGET_LINUX) && defined(TARGET_S390X)
+#define DAC_CS_NATIVE_DATA_SIZE 96
#elif defined(TARGET_NETBSD) && defined(TARGET_AMD64)
#define DAC_CS_NATIVE_DATA_SIZE 96
#elif defined(TARGET_NETBSD) && defined(TARGET_ARM)
diff --git a/src/coreclr/inc/executableallocator.h b/src/coreclr/inc/executableallocator.h
new file mode 100644
index 00000000000000..ce0c6c22f890e6
--- /dev/null
+++ b/src/coreclr/inc/executableallocator.h
@@ -0,0 +1,81 @@
+// Licensed to the .NET Foundation under one or more agreements.
+// The .NET Foundation licenses this file to you under the MIT license.
+//
+
+//
+// Allocator and holders for double mapped executable memory
+//
+
+#pragma once
+
+#include "utilcode.h"
+#include "ex.h"
+
+// Holder class to map read-execute memory as read-write so that it can be modified without using read-write-execute mapping.
+// At the moment the implementation is dummy, returning the same addresses for both cases and expecting them to be read-write-execute.
+// The class uses the move semantics to ensure proper unmapping in case of re-assigning of the holder value.
+template
+class ExecutableWriterHolder
+{
+ T *m_addressRX;
+ T *m_addressRW;
+
+ void Move(ExecutableWriterHolder& other)
+ {
+ m_addressRX = other.m_addressRX;
+ m_addressRW = other.m_addressRW;
+ other.m_addressRX = NULL;
+ other.m_addressRW = NULL;
+ }
+
+ void Unmap()
+ {
+ if (m_addressRX != NULL)
+ {
+ // TODO: mapping / unmapping for targets using double memory mapping will be added with the double mapped allocator addition
+#if defined(HOST_OSX) && defined(HOST_ARM64) && !defined(DACCESS_COMPILE)
+ PAL_JitWriteProtect(false);
+#endif
+ }
+ }
+
+public:
+ ExecutableWriterHolder(const ExecutableWriterHolder& other) = delete;
+ ExecutableWriterHolder& operator=(const ExecutableWriterHolder& other) = delete;
+
+ ExecutableWriterHolder(ExecutableWriterHolder&& other)
+ {
+ Move(other);
+ }
+
+ ExecutableWriterHolder& operator=(ExecutableWriterHolder&& other)
+ {
+ Unmap();
+ Move(other);
+ return *this;
+ }
+
+ ExecutableWriterHolder() : m_addressRX(nullptr), m_addressRW(nullptr)
+ {
+ }
+
+ ExecutableWriterHolder(T* addressRX, size_t size)
+ {
+ m_addressRX = addressRX;
+ m_addressRW = addressRX;
+#if defined(HOST_OSX) && defined(HOST_ARM64) && !defined(DACCESS_COMPILE)
+ PAL_JitWriteProtect(true);
+#endif
+ }
+
+ ~ExecutableWriterHolder()
+ {
+ Unmap();
+ }
+
+ // Get the writeable address
+ inline T *GetRW() const
+ {
+ return m_addressRW;
+ }
+};
diff --git a/src/coreclr/inc/gcmsg.inl b/src/coreclr/inc/gcmsg.inl
new file mode 100644
index 00000000000000..f6e1f7227a1c45
--- /dev/null
+++ b/src/coreclr/inc/gcmsg.inl
@@ -0,0 +1,132 @@
+
+ static const char* gcStartMsg()
+ {
+ STATIC_CONTRACT_LEAF;
+ return "{ =========== BEGINGC %d, (requested generation = %lu, collect_classes = %lu) ==========\n";
+ }
+
+ static const char* gcEndMsg()
+ {
+ STATIC_CONTRACT_LEAF;
+ return "========== ENDGC %d (gen = %lu, collect_classes = %lu) ===========}\n";
+ }
+
+ static const char* gcRootMsg()
+ {
+ STATIC_CONTRACT_LEAF;
+ return " GC Root %p RELOCATED %p -> %p MT = %pT\n";
+ }
+
+ static const char* gcRootPromoteMsg()
+ {
+ STATIC_CONTRACT_LEAF;
+ return " IGCHeap::Promote: Promote GC Root *%p = %p MT = %pT\n";
+ }
+
+ static const char* gcPlugMoveMsg()
+ {
+ STATIC_CONTRACT_LEAF;
+ return "GC_HEAP RELOCATING Objects in heap within range [%p %p) by -0x%x bytes\n";
+ }
+
+ static const char* gcServerThread0StartMsg()
+ {
+ STATIC_CONTRACT_LEAF;
+ return "%d gc thread waiting...";
+ }
+
+ static const char* gcServerThreadNStartMsg()
+ {
+ STATIC_CONTRACT_LEAF;
+ return "%d gc thread waiting... Done";
+ }
+
+ static const char* gcDetailedStartMsg()
+ {
+ STATIC_CONTRACT_LEAF;
+ return "*GC* %d(gen0:%d)(%d)(alloc: %Id)(%s)(%d)";
+ }
+
+ static const char* gcDetailedEndMsg()
+ {
+ STATIC_CONTRACT_LEAF;
+ return "*EGC* %Id(gen0:%Id)(%Id)(%d)(%s)(%s)(%s)(ml: %d->%d)";
+ }
+
+ static const char* gcStartMarkMsg()
+ {
+ STATIC_CONTRACT_LEAF;
+ return "---- Mark Phase on heap %d condemning %d ----";
+ }
+
+ static const char* gcStartPlanMsg()
+ {
+ STATIC_CONTRACT_LEAF;
+ return "---- Plan Phase on heap %d ---- Condemned generation %d, promotion: %d";
+ }
+
+ static const char* gcStartRelocateMsg()
+ {
+ STATIC_CONTRACT_LEAF;
+ return "---- Relocate phase on heap %d -----";
+ }
+
+ static const char* gcEndRelocateMsg()
+ {
+ STATIC_CONTRACT_LEAF;
+ return "---- End of Relocate phase on heap %d ----";
+ }
+
+ static const char* gcStartCompactMsg()
+ {
+ STATIC_CONTRACT_LEAF;
+ return "---- Compact Phase on heap %d: %Ix(%Ix)----";
+ }
+
+ static const char* gcEndCompactMsg()
+ {
+ STATIC_CONTRACT_LEAF;
+ return "---- End of Compact phase on heap %d ----";
+ }
+
+ static const char* gcMemCopyMsg()
+ {
+ STATIC_CONTRACT_LEAF;
+ return " mc: [%Ix->%Ix, %Ix->%Ix[";
+ }
+
+ static const char* gcPlanPlugMsg()
+ {
+ STATIC_CONTRACT_LEAF;
+ return "(%Ix)[%Ix->%Ix, NA: [%Ix(%Id), %Ix[: %Ix(%d), x: %Ix (%s)";
+ }
+
+ static const char* gcPlanPinnedPlugMsg()
+ {
+ STATIC_CONTRACT_LEAF;
+ return "(%Ix)PP: [%Ix, %Ix[%Ix](m:%d)";
+ }
+
+ static const char* gcDesiredNewAllocationMsg()
+ {
+ STATIC_CONTRACT_LEAF;
+ return "h%d g%d surv: %Id current: %Id alloc: %Id (%d%%) f: %d%% new-size: %Id new-alloc: %Id";
+ }
+
+ static const char* gcMakeUnusedArrayMsg()
+ {
+ STATIC_CONTRACT_LEAF;
+ return "Making unused array [%Ix, %Ix[";
+ }
+
+ static const char* gcStartBgcThread()
+ {
+ STATIC_CONTRACT_LEAF;
+ return "beginning of bgc on heap %d: gen2 FL: %d, FO: %d, frag: %d";
+ }
+
+ static const char* gcRelocateReferenceMsg()
+ {
+ STATIC_CONTRACT_LEAF;
+ return "Relocating reference *(%p) from %p to %p";
+ }
diff --git a/src/coreclr/inc/holder.h b/src/coreclr/inc/holder.h
index 20f6aeb964614a..448610634a6129 100644
--- a/src/coreclr/inc/holder.h
+++ b/src/coreclr/inc/holder.h
@@ -934,11 +934,17 @@ using NonVMComHolder = SpecializedWrapper<_TYPE, DoTheRelease<_TYPE>>;
// } // foo->DecRef() on out of scope
//
//-----------------------------------------------------------------------------
+template
+class ExecutableWriterHolder;
+
template
FORCEINLINE void StubRelease(TYPE* value)
{
if (value)
- value->DecRef();
+ {
+ ExecutableWriterHolder stubWriterHolder(value, sizeof(TYPE));
+ stubWriterHolder.GetRW()->DecRef();
+ }
}
template
diff --git a/src/coreclr/inc/jiteeversionguid.h b/src/coreclr/inc/jiteeversionguid.h
index 25746b45992942..225c73008bb297 100644
--- a/src/coreclr/inc/jiteeversionguid.h
+++ b/src/coreclr/inc/jiteeversionguid.h
@@ -43,11 +43,11 @@ typedef const GUID *LPCGUID;
#define GUID_DEFINED
#endif // !GUID_DEFINED
-constexpr GUID JITEEVersionIdentifier = { /* 1052f490-cad7-4610-99bb-6f2bd91a1d19 */
- 0x1052f490,
- 0xcad7,
- 0x4610,
- {0x99, 0xbb, 0x6f, 0x2b, 0xd9, 0x1a, 0x1d, 0x19}
+constexpr GUID JITEEVersionIdentifier = { /* de9a9a0e-c66a-4d97-a268-92a31f99d919 */
+ 0xde9a9a0e,
+ 0xc66a,
+ 0x4d97,
+ {0xa2, 0x68, 0x92, 0xa3, 0x1f, 0x99, 0xd9, 0x19}
};
//////////////////////////////////////////////////////////////////////////////////////////////////////////
diff --git a/src/coreclr/inc/loaderheap.h b/src/coreclr/inc/loaderheap.h
index 96fec35be0ce4c..ac0669a55867f3 100644
--- a/src/coreclr/inc/loaderheap.h
+++ b/src/coreclr/inc/loaderheap.h
@@ -16,6 +16,7 @@
#include "utilcode.h"
#include "ex.h"
+#include "executableallocator.h"
//==============================================================================
// Interface used to back out loader heap allocations.
@@ -553,10 +554,6 @@ class LoaderHeap : public UnlockedLoaderHeap, public ILoaderHeapBackout
{
WRAPPER_NO_CONTRACT;
-#if defined(HOST_OSX) && defined(HOST_ARM64)
- auto jitWriteEnableHolder = PAL_JITWriteEnable(true);
-#endif // defined(HOST_OSX) && defined(HOST_ARM64)
-
void *pResult;
TaggedMemAllocPtr tmap;
@@ -633,10 +630,6 @@ class LoaderHeap : public UnlockedLoaderHeap, public ILoaderHeapBackout
{
WRAPPER_NO_CONTRACT;
-#if defined(HOST_OSX) && defined(HOST_ARM64)
- auto jitWriteEnableHolder = PAL_JITWriteEnable(true);
-#endif // defined(HOST_OSX) && defined(HOST_ARM64)
-
CRITSEC_Holder csh(m_CriticalSection);
diff --git a/src/coreclr/inc/patchpointinfo.h b/src/coreclr/inc/patchpointinfo.h
index e01446beb42916..1445d8f79bce9c 100644
--- a/src/coreclr/inc/patchpointinfo.h
+++ b/src/coreclr/inc/patchpointinfo.h
@@ -66,6 +66,11 @@ struct PatchpointInfo
return m_genericContextArgOffset;
}
+ bool HasGenericContextArgOffset() const
+ {
+ return m_genericContextArgOffset != -1;
+ }
+
void SetGenericContextArgOffset(int offset)
{
m_genericContextArgOffset = offset;
diff --git a/src/coreclr/inc/pedecoder.h b/src/coreclr/inc/pedecoder.h
index b551bf02d62254..66f25747317e65 100644
--- a/src/coreclr/inc/pedecoder.h
+++ b/src/coreclr/inc/pedecoder.h
@@ -81,6 +81,8 @@ inline CHECK CheckOverflow(RVA value1, COUNT_T value2)
#define IMAGE_FILE_MACHINE_NATIVE IMAGE_FILE_MACHINE_ARMNT
#elif defined(TARGET_ARM64)
#define IMAGE_FILE_MACHINE_NATIVE IMAGE_FILE_MACHINE_ARM64
+#elif defined(TARGET_S390X)
+#define IMAGE_FILE_MACHINE_NATIVE IMAGE_FILE_MACHINE_UNKNOWN
#else
#error "port me"
#endif
diff --git a/src/coreclr/inc/profilepriv.h b/src/coreclr/inc/profilepriv.h
index bd5533f12c6fc8..75d1c3dbcc29bb 100644
--- a/src/coreclr/inc/profilepriv.h
+++ b/src/coreclr/inc/profilepriv.h
@@ -16,8 +16,15 @@
// Forward declarations
class EEToProfInterfaceImpl;
+class ProfToEEInterfaceImpl;
class Object;
struct ScanContext;
+enum EtwGCRootFlags: int32_t;
+enum EtwGCRootKind: int32_t;
+struct IAssemblyBindingClosure;
+struct AssemblyReferenceClosureWalkContextForProfAPI;
+
+#include "eventpipeadaptertypes.h"
#if defined (PROFILING_SUPPORTED_DATA) || defined(PROFILING_SUPPORTED)
#ifndef PROFILING_SUPPORTED_DATA
@@ -25,6 +32,9 @@ struct ScanContext;
#endif // PROFILING_SUPPORTED_DATA
#include "corprof.h"
+#include "slist.h"
+
+#define MAX_NOTIFICATION_PROFILERS 32
//---------------------------------------------------------------------------------------
// Enumerates the various init states of profiling.
@@ -55,33 +65,183 @@ class CurrentProfilerStatus
void Set(ProfilerStatus profStatus);
};
-// ---------------------------------------------------------------------------------------
-// Global struct that lets the EE see the load status of the profiler, and provides a
-// pointer (pProfInterface) through which profiler calls can be made
-//
-// When you are adding new session, please refer to
-// code:ProfControlBlock::ResetPerSessionStatus#ProfileResetSessionStatus for more details.
-struct ProfControlBlock
+class EventMask
{
+ friend class ProfControlBlock;
+private:
+ const UINT64 EventMaskLowMask = 0x00000000FFFFFFFF;
+ const UINT64 EventMaskHighShiftAmount = 32;
+ const UINT64 EventMaskHighMask = 0xFFFFFFFF00000000;
+
+ Volatile m_eventMask;
+
+public:
+ EventMask() :
+ m_eventMask(0)
+ {
+
+ }
+
+ EventMask& operator=(const EventMask& other);
+
+ BOOL IsEventMaskSet(DWORD eventMask);
+ DWORD GetEventMask();
+ void SetEventMask(DWORD eventMask);
+ BOOL IsEventMaskHighSet(DWORD eventMaskHigh);
+ DWORD GetEventMaskHigh();
+ void SetEventMaskHigh(DWORD eventMaskHigh);
+};
+
+class ProfilerInfo
+{
+public:
// **** IMPORTANT!! ****
// All uses of pProfInterface must be properly synchronized to avoid the profiler
// from detaching while the EE attempts to call into it. The recommended way to do
- // this is to use the (lockless) BEGIN_PIN_PROFILER / END_PIN_PROFILER macros. See
- // code:BEGIN_PIN_PROFILER for instructions. For full details on how the
+ // this is to use the (lockless) BEGIN_PROFILER_CALLBACK / END_PROFILER_CALLBACK macros. See
+ // code:BEGIN_PROFILER_CALLBACK for instructions. For full details on how the
// synchronization works, see
// code:ProfilingAPIUtility::InitializeProfiling#LoadUnloadCallbackSynchronization
VolatilePtr pProfInterface;
// **** IMPORTANT!! ****
- DWORD dwEventMask; // Original low event mask bits
- DWORD dwEventMaskHigh; // New high event mask bits
CurrentProfilerStatus curProfStatus;
+
+ EventMask eventMask;
+
+ //---------------------------------------------------------------
+ // m_dwProfilerEvacuationCounter keeps track of how many profiler
+ // callback calls remain on the stack
+ //---------------------------------------------------------------
+ // Why volatile?
+ // See code:ProfilingAPIUtility::InitializeProfiling#LoadUnloadCallbackSynchronization.
+ Volatile dwProfilerEvacuationCounter;
+
+ Volatile inUse;
+
+ // Reset those variables that is only for the current attach session
+ void ResetPerSessionStatus();
+ void Init();
+};
+
+enum class ProfilerCallbackType
+{
+ Active,
+ ActiveOrInitializing
+};
+
+// We need a way to track which profilers are in active calls, to synchronize with detach.
+// If we detached a profiler while it was actively in a callback there would be issues.
+// However, we don't want to pin all profilers, because then a chatty profiler could
+// cause another profiler to not be able to detach. We can't just check the event masks
+// before and after the call because it is legal for a profiler to change its event mask,
+// and then it would be possible for a profiler to permanently prevent itself from detaching.
+//
+// WHEN IS EvacuationCounterHolder REQUIRED?
+// Answer: any time you access a ProfilerInfo *. There is a specific sequence that must be followed:
+// - Do a dirty read of the Profiler interface
+// - Increment an evacuation counter by using EvacuationCounterHolder as a RAII guard class
+// - Now do a clean read of the ProfilerInfo's status - this will be changed during detach and
+// is always read with a memory barrier
+//
+// The DoProfilerCallback/IterateProfilers functions automate this process for you, you should use
+// them unless you are absoultely sure you know what you're doing
+class EvacuationCounterHolder
+{
+private:
+ ProfilerInfo *m_pProfilerInfo;
+
+public:
+ EvacuationCounterHolder(ProfilerInfo *pProfilerInfo) :
+ m_pProfilerInfo(pProfilerInfo)
+ {
+ _ASSERTE(m_pProfilerInfo != NULL);
+ InterlockedIncrement((LONG *)(m_pProfilerInfo->dwProfilerEvacuationCounter.GetPointer()));
+ }
+
+ ~EvacuationCounterHolder()
+ {
+ InterlockedDecrement((LONG *)(m_pProfilerInfo->dwProfilerEvacuationCounter.GetPointer()));
+ }
+};
+
+struct StoredProfilerNode
+{
+ CLSID guid;
+ SString path;
+ SLink m_Link;
+};
+
+typedef SList STOREDPROFILERLIST;
+// ---------------------------------------------------------------------------------------
+// Global struct that lets the EE see the load status of the profiler, and provides a
+// pointer (pProfInterface) through which profiler calls can be made
+//
+// When you are adding new session, please refer to
+// code:ProfControlBlock::ResetPerSessionStatus#ProfileResetSessionStatus for more details.
+class ProfControlBlock
+{
+private:
+ // IsProfilerPresent(pProfilerInfo) returns whether or not a CLR Profiler is actively loaded
+ // (meaning it's initialized and ready to receive callbacks).
+ FORCEINLINE BOOL IsProfilerPresent(ProfilerInfo *pProfilerInfo)
+ {
+ LIMITED_METHOD_DAC_CONTRACT;
+
+ return pProfilerInfo->curProfStatus.Get() >= kProfStatusActive;
+ }
+
+ FORCEINLINE BOOL IsProfilerPresentOrInitializing(ProfilerInfo *pProfilerInfo)
+ {
+ return pProfilerInfo->curProfStatus.Get() > kProfStatusDetaching;
+ }
+
+ template
+ FORCEINLINE VOID DoOneProfilerIteration(ProfilerInfo *pProfilerInfo, ProfilerCallbackType callbackType, Func callback, Args... args)
+ {
+ // This is the dirty read
+ if (pProfilerInfo->pProfInterface.Load() != NULL)
+ {
+#ifdef FEATURE_PROFAPI_ATTACH_DETACH
+ // Now indicate we are accessing the profiler
+ EvacuationCounterHolder evacuationCounter(pProfilerInfo);
+#endif // FEATURE_PROFAPI_ATTACH_DETACH
+
+ if ((callbackType == ProfilerCallbackType::Active && IsProfilerPresent(pProfilerInfo))
+ || (callbackType == ProfilerCallbackType::ActiveOrInitializing && IsProfilerPresentOrInitializing(pProfilerInfo)))
+ {
+ callback(pProfilerInfo, args...);
+ }
+ }
+ }
+
+ template
+ FORCEINLINE VOID IterateProfilers(ProfilerCallbackType callbackType, Func callback, Args... args)
+ {
+ DoOneProfilerIteration(&mainProfilerInfo, callbackType, callback, args...);
+
+ if (notificationProfilerCount > 0)
+ {
+ for (SIZE_T i = 0; i < MAX_NOTIFICATION_PROFILERS; ++i)
+ {
+ ProfilerInfo *current = &(notificationOnlyProfilers[i]);
+ DoOneProfilerIteration(current, callbackType, callback, args...);
+ }
+ }
+ }
+
+public:
BOOL fGCInProgress;
BOOL fBaseSystemClassesLoaded;
- BOOL fIsStoredProfilerRegistered;
- CLSID clsStoredProfilerGuid;
- SString sStoredProfilerPath;
+ STOREDPROFILERLIST storedProfilers;
+
+ ProfilerInfo mainProfilerInfo;
+
+ ProfilerInfo notificationOnlyProfilers[MAX_NOTIFICATION_PROFILERS];
+ Volatile notificationProfilerCount;
+
+ EventMask globalEventMask;
#ifdef PROF_TEST_ONLY_FORCE_ELT_DATA
// #TestOnlyELT This implements a test-only (and debug-only) hook that allows a test
@@ -118,14 +278,128 @@ struct ProfControlBlock
#endif // _DEBUG
// Whether we've turned off concurrent GC during attach
- BOOL fConcurrentGCDisabledForAttach;
+ Volatile fConcurrentGCDisabledForAttach;
Volatile fProfControlBlockInitialized;
Volatile fProfilerRequestedRuntimeSuspend;
void Init();
- void ResetPerSessionStatus();
+ BOOL IsMainProfiler(EEToProfInterfaceImpl *pEEToProf);
+ BOOL IsMainProfiler(ProfToEEInterfaceImpl *pProfToEE);
+ ProfilerInfo *GetProfilerInfo(ProfToEEInterfaceImpl *pProfToEE);
+
+ template
+ FORCEINLINE HRESULT DoProfilerCallback(ProfilerCallbackType callbackType, ConditionFunc condition, Data *additionalData, CallbackFunc callback, Args... args)
+ {
+ HRESULT hr = S_OK;
+ IterateProfilers(callbackType,
+ [](ProfilerInfo *pProfilerInfo, ConditionFunc condition, Data *additionalData, CallbackFunc callback, HRESULT *pHR, Args... args)
+ {
+ if (condition(pProfilerInfo))
+ {
+ HRESULT innerHR = callback(additionalData, pProfilerInfo->pProfInterface, args...);
+ if (FAILED(innerHR))
+ {
+ *pHR = innerHR;
+ }
+ }
+ },
+ condition, additionalData, callback, &hr, args...);
+ return hr;
+ }
+
+#ifndef DACCESS_COMPILE
+ ProfilerInfo *FindNextFreeProfilerInfoSlot();
+ void DeRegisterProfilerInfo(ProfilerInfo *pProfilerInfo);
+ void UpdateGlobalEventMask();
+#endif // DACCESS_COMPILE
+
+ BOOL IsCallback3Supported();
+ BOOL IsCallback5Supported();
+ BOOL IsDisableTransparencySet();
+ BOOL RequiresGenericsContextForEnterLeave();
+ UINT_PTR EEFunctionIDMapper(FunctionID funcId, BOOL * pbHookFunction);
+
+ void ThreadCreated(ThreadID threadID);
+ void ThreadDestroyed(ThreadID threadID);
+ void ThreadAssignedToOSThread(ThreadID managedThreadId, DWORD osThreadId);
+ void ThreadNameChanged(ThreadID managedThreadId, ULONG cchName, WCHAR name[]);
+ void Shutdown();
+ void FunctionUnloadStarted(FunctionID functionId);
+ void JITCompilationFinished(FunctionID functionId, HRESULT hrStatus, BOOL fIsSafeToBlock);
+ void JITCompilationStarted(FunctionID functionId, BOOL fIsSafeToBlock);
+ void DynamicMethodJITCompilationStarted(FunctionID functionId, BOOL fIsSafeToBlock, LPCBYTE pILHeader, ULONG cbILHeader);
+ void DynamicMethodJITCompilationFinished(FunctionID functionId, HRESULT hrStatus, BOOL fIsSafeToBlock);
+ void DynamicMethodUnloaded(FunctionID functionId);
+ void JITCachedFunctionSearchStarted(FunctionID functionId, BOOL *pbUseCachedFunction);
+ void JITCachedFunctionSearchFinished(FunctionID functionId, COR_PRF_JIT_CACHE result);
+ HRESULT JITInlining(FunctionID callerId, FunctionID calleeId, BOOL *pfShouldInline);
+ void ReJITCompilationStarted(FunctionID functionId, ReJITID reJitId, BOOL fIsSafeToBlock);
+ HRESULT GetReJITParameters(ModuleID moduleId, mdMethodDef methodId, ICorProfilerFunctionControl *pFunctionControl);
+ void ReJITCompilationFinished(FunctionID functionId, ReJITID reJitId, HRESULT hrStatus, BOOL fIsSafeToBlock);
+ void ReJITError(ModuleID moduleId, mdMethodDef methodId, FunctionID functionId, HRESULT hrStatus);
+ void ModuleLoadStarted(ModuleID moduleId);
+ void ModuleLoadFinished(ModuleID moduleId, HRESULT hrStatus);
+ void ModuleUnloadStarted(ModuleID moduleId);
+ void ModuleUnloadFinished(ModuleID moduleId, HRESULT hrStatus);
+ void ModuleAttachedToAssembly(ModuleID moduleId, AssemblyID AssemblyId);
+ void ModuleInMemorySymbolsUpdated(ModuleID moduleId);
+ void ClassLoadStarted(ClassID classId);
+ void ClassLoadFinished(ClassID classId, HRESULT hrStatus);
+ void ClassUnloadStarted(ClassID classId);
+ void ClassUnloadFinished(ClassID classId, HRESULT hrStatus);
+ void AppDomainCreationStarted(AppDomainID appDomainId);
+ void AppDomainCreationFinished(AppDomainID appDomainId, HRESULT hrStatus);
+ void AppDomainShutdownStarted(AppDomainID appDomainId);
+ void AppDomainShutdownFinished(AppDomainID appDomainId, HRESULT hrStatus);
+ void AssemblyLoadStarted(AssemblyID assemblyId);
+ void AssemblyLoadFinished(AssemblyID assemblyId, HRESULT hrStatus);
+ void AssemblyUnloadStarted(AssemblyID assemblyId);
+ void AssemblyUnloadFinished(AssemblyID assemblyId, HRESULT hrStatus);
+ void UnmanagedToManagedTransition(FunctionID functionId, COR_PRF_TRANSITION_REASON reason);
+ void ManagedToUnmanagedTransition(FunctionID functionId, COR_PRF_TRANSITION_REASON reason);
+ void ExceptionThrown(ObjectID thrownObjectId);
+ void ExceptionSearchFunctionEnter(FunctionID functionId);
+ void ExceptionSearchFunctionLeave();
+ void ExceptionSearchFilterEnter(FunctionID funcId);
+ void ExceptionSearchFilterLeave();
+ void ExceptionSearchCatcherFound(FunctionID functionId);
+ void ExceptionOSHandlerEnter(FunctionID funcId);
+ void ExceptionOSHandlerLeave(FunctionID funcId);
+ void ExceptionUnwindFunctionEnter(FunctionID functionId);
+ void ExceptionUnwindFunctionLeave();
+ void ExceptionUnwindFinallyEnter(FunctionID functionId);
+ void ExceptionUnwindFinallyLeave();
+ void ExceptionCatcherEnter(FunctionID functionId, ObjectID objectId);
+ void ExceptionCatcherLeave();
+ void COMClassicVTableCreated(ClassID wrappedClassId, REFGUID implementedIID, void *pVTable, ULONG cSlots);
+ void RuntimeSuspendStarted(COR_PRF_SUSPEND_REASON suspendReason);
+ void RuntimeSuspendFinished();
+ void RuntimeSuspendAborted();
+ void RuntimeResumeStarted();
+ void RuntimeResumeFinished();
+ void RuntimeThreadSuspended(ThreadID suspendedThreadId);
+ void RuntimeThreadResumed(ThreadID resumedThreadId);
+ void ObjectAllocated(ObjectID objectId, ClassID classId);
+ void FinalizeableObjectQueued(BOOL isCritical, ObjectID objectID);
+ void MovedReference(BYTE *pbMemBlockStart, BYTE *pbMemBlockEnd, ptrdiff_t cbRelocDistance, void *pHeapId, BOOL fCompacting);
+ void EndMovedReferences(void *pHeapId);
+ void RootReference2(BYTE *objectId, EtwGCRootKind dwEtwRootKind, EtwGCRootFlags dwEtwRootFlags, void *rootID, void *pHeapId);
+ void EndRootReferences2(void *pHeapId);
+ void ConditionalWeakTableElementReference(BYTE *primaryObjectId, BYTE *secondaryObjectId, void *rootID, void *pHeapId);
+ void EndConditionalWeakTableElementReferences(void *pHeapId);
+ void AllocByClass(ObjectID objId, ClassID classId, void *pHeapId);
+ void EndAllocByClass(void *pHeapId);
+ HRESULT ObjectReference(ObjectID objId, ClassID classId, ULONG cNumRefs, ObjectID *arrObjRef);
+ void HandleCreated(UINT_PTR handleId, ObjectID initialObjectId);
+ void HandleDestroyed(UINT_PTR handleId);
+ void GarbageCollectionStarted(int cGenerations, BOOL generationCollected[], COR_PRF_GC_REASON reason);
+ void GarbageCollectionFinished();
+ void GetAssemblyReferences(LPCWSTR wszAssemblyPath, IAssemblyBindingClosure *pClosure, AssemblyReferenceClosureWalkContextForProfAPI *pContext);
+ void EventPipeEventDelivered(EventPipeProvider *provider, DWORD eventId, DWORD eventVersion, ULONG cbMetadataBlob, LPCBYTE metadataBlob, ULONG cbEventData,
+ LPCBYTE eventData, LPCGUID pActivityId, LPCGUID pRelatedActivityId, Thread *pEventThread, ULONG numStackFrames, UINT_PTR stackFrames[]);
+ void EventPipeProviderCreated(EventPipeProvider *provider);
};
diff --git a/src/coreclr/inc/profilepriv.inl b/src/coreclr/inc/profilepriv.inl
index 41e0bf3da96e9b..08ba58f5623f1c 100644
--- a/src/coreclr/inc/profilepriv.inl
+++ b/src/coreclr/inc/profilepriv.inl
@@ -17,7 +17,6 @@
#include "eetoprofinterfaceimpl.h"
#ifdef PROFILING_SUPPORTED
#include "profilinghelper.h"
-BOOL CORProfilerBypassSecurityChecks();
#endif // PROFILING_SUPPORTED
//---------------------------------------------------------------------------------------
@@ -36,11 +35,1260 @@ inline ProfilerStatus CurrentProfilerStatus::Get()
return m_profStatus;
}
+inline EventMask& EventMask::operator=(const EventMask& other)
+{
+ m_eventMask = other.m_eventMask;
+ return *this;
+}
+
+inline BOOL EventMask::IsEventMaskSet(DWORD eventMask)
+{
+ return (GetEventMask() & eventMask) != 0;
+}
+
+inline DWORD EventMask::GetEventMask()
+{
+ return (DWORD)(m_eventMask & EventMaskLowMask);
+}
+
+inline void EventMask::SetEventMask(DWORD eventMask)
+{
+ m_eventMask = (m_eventMask & EventMaskHighMask) | (UINT64)eventMask;
+}
+
+inline BOOL EventMask::IsEventMaskHighSet(DWORD eventMaskHigh)
+{
+ return (GetEventMaskHigh() & eventMaskHigh) != 0;
+}
+
+inline DWORD EventMask::GetEventMaskHigh()
+{
+ return (DWORD)((m_eventMask & EventMaskHighMask) >> EventMaskHighShiftAmount);
+}
+
+inline void EventMask::SetEventMaskHigh(DWORD eventMaskHigh)
+{
+ m_eventMask = (m_eventMask & EventMaskLowMask) | ((UINT64)eventMaskHigh << EventMaskHighShiftAmount);
+}
+
+// Reset those variables that is only for the current attach session
+inline void ProfilerInfo::ResetPerSessionStatus()
+{
+ LIMITED_METHOD_CONTRACT;
+
+ pProfInterface = NULL;
+ eventMask.SetEventMask(COR_PRF_MONITOR_NONE);
+ eventMask.SetEventMaskHigh(COR_PRF_HIGH_MONITOR_NONE);
+}
+
+inline void ProfilerInfo::Init()
+{
+ curProfStatus.Init();
+ dwProfilerEvacuationCounter = 0;
+ ResetPerSessionStatus();
+ inUse = FALSE;
+}
+
+template
+inline BOOL AnyProfilerPassesCondition(ConditionFunc condition)
+{
+ BOOL anyPassed = FALSE;
+ (&g_profControlBlock)->DoProfilerCallback(ProfilerCallbackType::ActiveOrInitializing,
+ condition,
+ &anyPassed,
+ [](BOOL *pAnyPassed, VolatilePtr profInterface)
+ {
+ *pAnyPassed = TRUE;
+ return S_OK;
+ });
+
+ return anyPassed;
+}
+
//---------------------------------------------------------------------------------------
// ProfControlBlock
//---------------------------------------------------------------------------------------
-inline void ProfControlBlock::Init()
+inline void ProfControlBlock::Init()
+{
+ CONTRACTL
+ {
+ NOTHROW;
+ GC_NOTRIGGER;
+ CANNOT_TAKE_LOCK;
+ }
+ CONTRACTL_END;
+
+ mainProfilerInfo.Init();
+
+ for (SIZE_T i = 0; i < MAX_NOTIFICATION_PROFILERS; ++i)
+ {
+ notificationOnlyProfilers[i].Init();
+ }
+
+ globalEventMask.SetEventMask(COR_PRF_MONITOR_NONE);
+ globalEventMask.SetEventMaskHigh(COR_PRF_HIGH_MONITOR_NONE);
+
+ fGCInProgress = FALSE;
+ fBaseSystemClassesLoaded = FALSE;
+#ifdef PROF_TEST_ONLY_FORCE_ELT
+ fTestOnlyForceEnterLeave = FALSE;
+#endif
+
+#ifdef PROF_TEST_ONLY_FORCE_OBJECT_ALLOCATED
+ fTestOnlyForceObjectAllocated = FALSE;
+#endif
+
+#ifdef _DEBUG
+ fTestOnlyEnableICorProfilerInfo = FALSE;
+#endif // _DEBUG
+
+ fConcurrentGCDisabledForAttach = FALSE;
+
+ mainProfilerInfo.ResetPerSessionStatus();
+
+ fProfControlBlockInitialized = TRUE;
+
+ fProfilerRequestedRuntimeSuspend = FALSE;
+}
+
+
+inline BOOL ProfControlBlock::IsMainProfiler(EEToProfInterfaceImpl *pEEToProf)
+{
+ EEToProfInterfaceImpl *pProfInterface = mainProfilerInfo.pProfInterface.Load();
+ return pProfInterface == pEEToProf;
+}
+
+inline BOOL ProfControlBlock::IsMainProfiler(ProfToEEInterfaceImpl *pProfToEE)
+{
+ EEToProfInterfaceImpl *pProfInterface = mainProfilerInfo.pProfInterface.Load();
+ return pProfInterface != NULL && pProfInterface->m_pProfToEE == pProfToEE;
+}
+
+inline ProfilerInfo *ProfControlBlock::GetProfilerInfo(ProfToEEInterfaceImpl *pProfToEE)
+{
+ ProfilerInfo *pProfilerInfo = NULL;
+ IterateProfilers(ProfilerCallbackType::Active,
+ [](ProfilerInfo *pProfilerInfo, ProfToEEInterfaceImpl *pProfToEE, ProfilerInfo **ppFoundProfilerInfo)
+ {
+ if (pProfilerInfo->pProfInterface->m_pProfToEE == pProfToEE)
+ {
+ *ppFoundProfilerInfo = pProfilerInfo;
+ }
+ },
+ pProfToEE,
+ &pProfilerInfo);
+
+ return pProfilerInfo;
+}
+
+#ifndef DACCESS_COMPILE
+inline ProfilerInfo *ProfControlBlock::FindNextFreeProfilerInfoSlot()
+{
+ for (SIZE_T i = 0; i < MAX_NOTIFICATION_PROFILERS; ++i)
+ {
+ if (InterlockedCompareExchange((LONG *)notificationOnlyProfilers[i].inUse.GetPointer(), TRUE, FALSE) == FALSE)
+ {
+ InterlockedIncrement(notificationProfilerCount.GetPointer());
+ return &(notificationOnlyProfilers[i]);
+ }
+ }
+
+ return NULL;
+}
+
+inline void ProfControlBlock::DeRegisterProfilerInfo(ProfilerInfo *pProfilerInfo)
+{
+ pProfilerInfo->inUse = FALSE;
+ InterlockedDecrement(notificationProfilerCount.GetPointer());
+}
+
+inline void ProfControlBlock::UpdateGlobalEventMask()
+{
+ while (true)
+ {
+ UINT64 originalEventMask = globalEventMask.m_eventMask;
+ UINT64 qwEventMask = 0;
+
+ IterateProfilers(ProfilerCallbackType::ActiveOrInitializing,
+ [](ProfilerInfo *pProfilerInfo, UINT64 *pEventMask)
+ {
+ *pEventMask |= pProfilerInfo->eventMask.m_eventMask;
+ },
+ &qwEventMask);
+
+ // We are relying on the memory barrier introduced by InterlockedCompareExchange64 to observer any
+ // change to the global event mask.
+ if ((UINT64)InterlockedCompareExchange64((LONG64 *)&(globalEventMask.m_eventMask), (LONG64)qwEventMask, (LONG64)originalEventMask) == originalEventMask)
+ {
+ break;
+ }
+ }
+}
+#endif // DACCESS_COMPILE
+
+inline BOOL ProfControlBlock::IsCallback3Supported()
+{
+ return AnyProfilerPassesCondition([](ProfilerInfo *pProfilerInfo) { return pProfilerInfo->pProfInterface->IsCallback3Supported(); });
+}
+
+inline BOOL ProfControlBlock::IsCallback5Supported()
+{
+ return AnyProfilerPassesCondition([](ProfilerInfo *pProfilerInfo) { return pProfilerInfo->pProfInterface->IsCallback5Supported(); });
+}
+
+inline BOOL ProfControlBlock::IsDisableTransparencySet()
+{
+ return AnyProfilerPassesCondition([](ProfilerInfo *pProfilerInfo) { return pProfilerInfo->eventMask.IsEventMaskSet(COR_PRF_DISABLE_TRANSPARENCY_CHECKS_UNDER_FULL_TRUST); });
+}
+
+inline BOOL ProfControlBlock::RequiresGenericsContextForEnterLeave()
+{
+ return AnyProfilerPassesCondition([](ProfilerInfo *pProfilerInfo) { return pProfilerInfo->pProfInterface->RequiresGenericsContextForEnterLeave(); });
+}
+
+inline bool DoesProfilerWantEEFunctionIDMapper(ProfilerInfo *pProfilerInfo)
+{
+ CONTRACTL
+ {
+ NOTHROW;
+ GC_NOTRIGGER;
+ CANNOT_TAKE_LOCK;
+ }
+ CONTRACTL_END;
+
+ return ((pProfilerInfo->pProfInterface->GetFunctionIDMapper() != NULL) ||
+ (pProfilerInfo->pProfInterface->GetFunctionIDMapper2() != NULL));
+}
+
+inline UINT_PTR ProfControlBlock::EEFunctionIDMapper(FunctionID funcId, BOOL *pbHookFunction)
+{
+ LIMITED_METHOD_CONTRACT;
+ UINT_PTR ptr = NULL;
+ DoOneProfilerIteration(&mainProfilerInfo,
+ ProfilerCallbackType::Active,
+ [](ProfilerInfo *pProfilerInfo, FunctionID funcId, BOOL *pbHookFunction, UINT_PTR *pPtr)
+ {
+ if (DoesProfilerWantEEFunctionIDMapper(pProfilerInfo))
+ {
+ *pPtr = pProfilerInfo->pProfInterface->EEFunctionIDMapper(funcId, pbHookFunction);
+ }
+ },
+ funcId, pbHookFunction, &ptr);
+
+ return ptr;
+}
+
+inline BOOL IsProfilerTrackingThreads(ProfilerInfo *pProfilerInfo)
+{
+ CONTRACTL
+ {
+ NOTHROW;
+ GC_NOTRIGGER;
+ CANNOT_TAKE_LOCK;
+ }
+ CONTRACTL_END;
+
+ return pProfilerInfo->eventMask.IsEventMaskSet(COR_PRF_MONITOR_THREADS);
+}
+
+inline void ProfControlBlock::ThreadCreated(ThreadID threadID)
+{
+ LIMITED_METHOD_CONTRACT;
+
+ DoProfilerCallback(ProfilerCallbackType::Active,
+ IsProfilerTrackingThreads,
+ (void *)NULL,
+ [](void *additionalData, VolatilePtr profInterface, ThreadID threadID)
+ {
+ return profInterface->ThreadCreated(threadID);
+ },
+ threadID);
+}
+
+inline void ProfControlBlock::ThreadDestroyed(ThreadID threadID)
+{
+ LIMITED_METHOD_CONTRACT;
+
+ DoProfilerCallback(ProfilerCallbackType::Active,
+ IsProfilerTrackingThreads,
+ (void *)NULL,
+ [](void *additionalData, VolatilePtr profInterface, ThreadID threadID)
+ {
+ return profInterface->ThreadDestroyed(threadID);
+ },
+ threadID);
+}
+
+inline void ProfControlBlock::ThreadAssignedToOSThread(ThreadID managedThreadId, DWORD osThreadId)
+{
+ LIMITED_METHOD_CONTRACT;
+
+ DoProfilerCallback(ProfilerCallbackType::Active,
+ IsProfilerTrackingThreads,
+ (void *)NULL,
+ [](void *additionalData, VolatilePtr profInterface, ThreadID managedThreadId, DWORD osThreadId)
+ {
+ return profInterface->ThreadAssignedToOSThread(managedThreadId, osThreadId);
+ },
+ managedThreadId, osThreadId);
+}
+
+inline void ProfControlBlock::ThreadNameChanged(ThreadID managedThreadId, ULONG cchName, WCHAR name[])
+{
+ LIMITED_METHOD_CONTRACT;
+
+ DoProfilerCallback(ProfilerCallbackType::Active,
+ IsProfilerTrackingThreads,
+ (void *)NULL,
+ [](void *additionalData, VolatilePtr profInterface, ThreadID managedThreadId, ULONG cchName, WCHAR name[])
+ {
+ return profInterface->ThreadNameChanged(managedThreadId, cchName, name);
+ },
+ managedThreadId, cchName, name);
+}
+
+inline void ProfControlBlock::Shutdown()
+{
+ LIMITED_METHOD_CONTRACT;
+
+ DoProfilerCallback(ProfilerCallbackType::Active,
+ [](ProfilerInfo *pProfilerInfo) { return true; },
+ (void *)NULL,
+ [](void *additionalData, VolatilePtr profInterface)
+ {
+ return profInterface->Shutdown();
+ });
+}
+
+inline BOOL IsProfilerTrackingJITInfo(ProfilerInfo *pProfilerInfo)
+{
+ CONTRACTL
+ {
+ NOTHROW;
+ GC_NOTRIGGER;
+ CANNOT_TAKE_LOCK;
+ }
+ CONTRACTL_END;
+
+ return pProfilerInfo->eventMask.IsEventMaskSet(COR_PRF_MONITOR_JIT_COMPILATION);
+}
+
+inline void ProfControlBlock::JITCompilationFinished(FunctionID functionId, HRESULT hrStatus, BOOL fIsSafeToBlock)
+{
+ LIMITED_METHOD_CONTRACT;
+
+ DoProfilerCallback(ProfilerCallbackType::Active,
+ IsProfilerTrackingJITInfo,
+ (void *)NULL,
+ [](void *additionalData, VolatilePtr profInterface, FunctionID functionId, HRESULT hrStatus, BOOL fIsSafeToBlock)
+ {
+ return profInterface->JITCompilationFinished(functionId, hrStatus, fIsSafeToBlock);
+ },
+ functionId, hrStatus, fIsSafeToBlock);
+}
+
+inline void ProfControlBlock::JITCompilationStarted(FunctionID functionId, BOOL fIsSafeToBlock)
+{
+ LIMITED_METHOD_CONTRACT;
+
+ DoProfilerCallback(ProfilerCallbackType::Active,
+ IsProfilerTrackingJITInfo,
+ (void *)NULL,
+ [](void *additionalData, VolatilePtr profInterface, FunctionID functionId, BOOL fIsSafeToBlock)
+ {
+ return profInterface->JITCompilationStarted(functionId, fIsSafeToBlock);
+ },
+ functionId, fIsSafeToBlock);
+}
+
+inline void ProfControlBlock::DynamicMethodJITCompilationStarted(FunctionID functionId, BOOL fIsSafeToBlock, LPCBYTE pILHeader, ULONG cbILHeader)
+{
+ LIMITED_METHOD_CONTRACT;
+
+ DoProfilerCallback(ProfilerCallbackType::Active,
+ IsProfilerTrackingJITInfo,
+ (void *)NULL,
+ [](void *additionalData, VolatilePtr profInterface, FunctionID functionId, BOOL fIsSafeToBlock, LPCBYTE pILHeader, ULONG cbILHeader)
+ {
+ return profInterface->DynamicMethodJITCompilationStarted(functionId, fIsSafeToBlock, pILHeader, cbILHeader);
+ },
+ functionId, fIsSafeToBlock, pILHeader, cbILHeader);
+}
+
+inline void ProfControlBlock::DynamicMethodJITCompilationFinished(FunctionID functionId, HRESULT hrStatus, BOOL fIsSafeToBlock)
+{
+ LIMITED_METHOD_CONTRACT;
+
+ DoProfilerCallback(ProfilerCallbackType::Active,
+ IsProfilerTrackingJITInfo,
+ (void *)NULL,
+ [](void *additionalData, VolatilePtr profInterface, FunctionID functionId, HRESULT hrStatus, BOOL fIsSafeToBlock)
+ {
+ return profInterface->DynamicMethodJITCompilationFinished(functionId, hrStatus, fIsSafeToBlock);
+ },
+ functionId, hrStatus, fIsSafeToBlock);
+}
+
+inline BOOL IsProfilerMonitoringDynamicFunctionUnloads(ProfilerInfo *pProfilerInfo)
+{
+ CONTRACTL
+ {
+ NOTHROW;
+ GC_NOTRIGGER;
+ CANNOT_TAKE_LOCK;
+ }
+ CONTRACTL_END;
+
+ return pProfilerInfo->eventMask.IsEventMaskHighSet(COR_PRF_HIGH_MONITOR_DYNAMIC_FUNCTION_UNLOADS);
+}
+
+inline void ProfControlBlock::DynamicMethodUnloaded(FunctionID functionId)
+{
+ LIMITED_METHOD_CONTRACT;
+
+ DoProfilerCallback(ProfilerCallbackType::Active,
+ IsProfilerMonitoringDynamicFunctionUnloads,
+ (void *)NULL,
+ [](void *additionalData, VolatilePtr profInterface, FunctionID functionId)
+ {
+ return profInterface->DynamicMethodUnloaded(functionId);
+ },
+ functionId);
+}
+
+inline BOOL IsProfilerTrackingCacheSearches(ProfilerInfo *pProfilerInfo)
+{
+ CONTRACTL
+ {
+ NOTHROW;
+ GC_NOTRIGGER;
+ CANNOT_TAKE_LOCK;
+ }
+ CONTRACTL_END;
+
+ return pProfilerInfo->eventMask.IsEventMaskSet(COR_PRF_MONITOR_CACHE_SEARCHES);
+}
+
+inline void ProfControlBlock::JITCachedFunctionSearchStarted(FunctionID functionId, BOOL *pbUseCachedFunction)
+{
+ LIMITED_METHOD_CONTRACT;
+
+ BOOL allTrue = TRUE;
+ DoProfilerCallback(ProfilerCallbackType::Active,
+ IsProfilerTrackingCacheSearches,
+ &allTrue,
+ [](BOOL *pAllTrue, VolatilePtr profInterface, FunctionID functionId, BOOL *pbUseCachedFunction)
+ {
+ HRESULT hr = profInterface->JITCachedFunctionSearchStarted(functionId, pbUseCachedFunction);
+ *pAllTrue &= *pbUseCachedFunction;
+ return hr;
+ },
+ functionId, pbUseCachedFunction);
+
+ // If any reject it, consider it rejected.
+ *pbUseCachedFunction = allTrue;
+}
+
+inline void ProfControlBlock::JITCachedFunctionSearchFinished(FunctionID functionId, COR_PRF_JIT_CACHE result)
+{
+ LIMITED_METHOD_CONTRACT;
+
+ DoProfilerCallback(ProfilerCallbackType::Active,
+ IsProfilerTrackingCacheSearches,
+ (void *)NULL,
+ [](void *additionalData, VolatilePtr profInterface, FunctionID functionId, COR_PRF_JIT_CACHE result)
+ {
+ return profInterface->JITCachedFunctionSearchFinished(functionId, result);
+ },
+ functionId, result);
+}
+
+inline HRESULT ProfControlBlock::JITInlining(FunctionID callerId, FunctionID calleeId, BOOL *pfShouldInline)
+{
+ LIMITED_METHOD_CONTRACT;
+
+ BOOL allTrue = TRUE;
+ HRESULT hr = DoProfilerCallback(ProfilerCallbackType::Active,
+ IsProfilerTrackingJITInfo,
+ &allTrue,
+ [](BOOL *pAllTrue, VolatilePtr profInterface, FunctionID callerId, FunctionID calleeId, BOOL *pfShouldInline)
+ {
+ HRESULT hr = profInterface->JITInlining(callerId, calleeId, pfShouldInline);
+ *pAllTrue &= *pfShouldInline;
+ return hr;
+ },
+ callerId, calleeId, pfShouldInline);
+
+ // If any reject it, consider it rejected.
+ *pfShouldInline = allTrue;
+ return hr;
+}
+
+inline BOOL IsRejitEnabled(ProfilerInfo *pProfilerInfo)
+{
+ return pProfilerInfo->eventMask.IsEventMaskSet(COR_PRF_ENABLE_REJIT);
+}
+
+inline void ProfControlBlock::ReJITCompilationStarted(FunctionID functionId, ReJITID reJitId, BOOL fIsSafeToBlock)
+{
+ LIMITED_METHOD_CONTRACT;
+ DoOneProfilerIteration(&mainProfilerInfo,
+ ProfilerCallbackType::Active,
+ [](ProfilerInfo *pProfilerInfo, FunctionID functionId, ReJITID reJitId, BOOL fIsSafeToBlock)
+ {
+ if (IsRejitEnabled(pProfilerInfo))
+ {
+ pProfilerInfo->pProfInterface->ReJITCompilationStarted(functionId, reJitId, fIsSafeToBlock);
+ }
+ },
+ functionId, reJitId, fIsSafeToBlock);
+}
+
+inline HRESULT ProfControlBlock::GetReJITParameters(ModuleID moduleId, mdMethodDef methodId, ICorProfilerFunctionControl *pFunctionControl)
+{
+ LIMITED_METHOD_CONTRACT;
+ HRESULT hr = S_OK;
+ DoOneProfilerIteration(&mainProfilerInfo,
+ ProfilerCallbackType::Active,
+ [](ProfilerInfo *pProfilerInfo, ModuleID moduleId, mdMethodDef methodId, ICorProfilerFunctionControl *pFunctionControl, HRESULT *pHr)
+ {
+ if (IsRejitEnabled(pProfilerInfo))
+ {
+ *pHr = pProfilerInfo->pProfInterface->GetReJITParameters(moduleId, methodId, pFunctionControl);
+ }
+ },
+ moduleId, methodId, pFunctionControl, &hr);
+ return hr;
+}
+
+inline void ProfControlBlock::ReJITCompilationFinished(FunctionID functionId, ReJITID reJitId, HRESULT hrStatus, BOOL fIsSafeToBlock)
+{
+ LIMITED_METHOD_CONTRACT;
+ DoOneProfilerIteration(&mainProfilerInfo,
+ ProfilerCallbackType::Active,
+ [](ProfilerInfo *pProfilerInfo, FunctionID functionId, ReJITID reJitId, HRESULT hrStatus, BOOL fIsSafeToBlock)
+ {
+ if (IsRejitEnabled(pProfilerInfo))
+ {
+ pProfilerInfo->pProfInterface->ReJITCompilationFinished(functionId, reJitId, hrStatus, fIsSafeToBlock);
+ }
+ },
+ functionId, reJitId, hrStatus, fIsSafeToBlock);
+}
+
+inline void ProfControlBlock::ReJITError(ModuleID moduleId, mdMethodDef methodId, FunctionID functionId, HRESULT hrStatus)
+{
+ LIMITED_METHOD_CONTRACT;
+ DoOneProfilerIteration(&mainProfilerInfo,
+ ProfilerCallbackType::Active,
+ [](ProfilerInfo *pProfilerInfo, ModuleID moduleId, mdMethodDef methodId, FunctionID functionId, HRESULT hrStatus)
+ {
+ if (IsRejitEnabled(pProfilerInfo))
+ {
+ pProfilerInfo->pProfInterface->ReJITError(moduleId, methodId, functionId, hrStatus);
+ }
+ },
+ moduleId, methodId, functionId, hrStatus);
+}
+
+inline BOOL IsProfilerTrackingModuleLoads(ProfilerInfo *pProfilerInfo)
+{
+ CONTRACTL
+ {
+ NOTHROW;
+ GC_NOTRIGGER;
+ CANNOT_TAKE_LOCK;
+ }
+ CONTRACTL_END;
+
+ return pProfilerInfo->eventMask.IsEventMaskSet(COR_PRF_MONITOR_MODULE_LOADS);
+}
+
+inline void ProfControlBlock::ModuleLoadStarted(ModuleID moduleId)
+{
+ LIMITED_METHOD_CONTRACT;
+
+ DoProfilerCallback(ProfilerCallbackType::Active,
+ IsProfilerTrackingModuleLoads,
+ (void *)NULL,
+ [](void *additionalData, VolatilePtr profInterface, ModuleID moduleId)
+ {
+ return profInterface->ModuleLoadStarted(moduleId);
+ },
+ moduleId);
+}
+
+inline void ProfControlBlock::ModuleLoadFinished(ModuleID moduleId, HRESULT hrStatus)
+{
+ LIMITED_METHOD_CONTRACT;
+
+ DoProfilerCallback(ProfilerCallbackType::Active,
+ IsProfilerTrackingModuleLoads,
+ (void *)NULL,
+ [](void *additionalData, VolatilePtr profInterface, ModuleID moduleId, HRESULT hrStatus)
+ {
+ return profInterface->ModuleLoadFinished(moduleId, hrStatus);
+ },
+ moduleId, hrStatus);
+}
+
+inline void ProfControlBlock::ModuleUnloadStarted(ModuleID moduleId)
+{
+ LIMITED_METHOD_CONTRACT;
+
+ DoProfilerCallback(ProfilerCallbackType::Active,
+ IsProfilerTrackingModuleLoads,
+ (void *)NULL,
+ [](void *additionalData, VolatilePtr profInterface, ModuleID moduleId)
+ {
+ return profInterface->ModuleUnloadStarted(moduleId);
+ },
+ moduleId);
+}
+
+inline void ProfControlBlock::ModuleUnloadFinished(ModuleID moduleId, HRESULT hrStatus)
+{
+ LIMITED_METHOD_CONTRACT;
+
+ DoProfilerCallback(ProfilerCallbackType::Active,
+ IsProfilerTrackingModuleLoads,
+ (void *)NULL,
+ [](void *additionalData, VolatilePtr profInterface, ModuleID moduleId, HRESULT hrStatus)
+ {
+ return profInterface->ModuleUnloadFinished(moduleId, hrStatus);
+ },
+ moduleId, hrStatus);
+}
+
+inline void ProfControlBlock::ModuleAttachedToAssembly(ModuleID moduleId, AssemblyID AssemblyId)
+{
+ LIMITED_METHOD_CONTRACT;
+
+ DoProfilerCallback(ProfilerCallbackType::Active,
+ IsProfilerTrackingModuleLoads,
+ (void *)NULL,
+ [](void *additionalData, VolatilePtr profInterface, ModuleID moduleId, AssemblyID AssemblyId)
+ {
+ return profInterface->ModuleAttachedToAssembly(moduleId, AssemblyId);
+ },
+ moduleId, AssemblyId);
+}
+
+inline BOOL IsProfilerTrackingInMemorySymbolsUpdatesEnabled(ProfilerInfo *pProfilerInfo)
+{
+ CONTRACTL
+ {
+ NOTHROW;
+ GC_NOTRIGGER;
+ CANNOT_TAKE_LOCK;
+ }
+ CONTRACTL_END;
+
+ return pProfilerInfo->eventMask.IsEventMaskHighSet(COR_PRF_HIGH_IN_MEMORY_SYMBOLS_UPDATED);
+}
+
+inline void ProfControlBlock::ModuleInMemorySymbolsUpdated(ModuleID moduleId)
+{
+ LIMITED_METHOD_CONTRACT;
+
+ DoProfilerCallback(ProfilerCallbackType::Active,
+ IsProfilerTrackingInMemorySymbolsUpdatesEnabled,
+ (void *)NULL,
+ [](void *additionalData, VolatilePtr profInterface, ModuleID moduleId)
+ {
+ return profInterface->ModuleInMemorySymbolsUpdated(moduleId);
+ },
+ moduleId);
+}
+
+inline BOOL IsProfilerTrackingClasses(ProfilerInfo *pProfilerInfo)
+{
+ CONTRACTL
+ {
+ NOTHROW;
+ GC_NOTRIGGER;
+ CANNOT_TAKE_LOCK;
+ }
+ CONTRACTL_END;
+
+ return pProfilerInfo->eventMask.IsEventMaskSet(COR_PRF_MONITOR_CLASS_LOADS);
+}
+
+inline void ProfControlBlock::ClassLoadStarted(ClassID classId)
+{
+ LIMITED_METHOD_CONTRACT;
+
+ DoProfilerCallback(ProfilerCallbackType::Active,
+ IsProfilerTrackingClasses,
+ (void *)NULL,
+ [](void *additionalData, VolatilePtr profInterface, ClassID classId)
+ {
+ return profInterface->ClassLoadStarted(classId);
+ },
+ classId);
+}
+
+inline void ProfControlBlock::ClassLoadFinished(ClassID classId, HRESULT hrStatus)
+{
+ LIMITED_METHOD_CONTRACT;
+
+ DoProfilerCallback(ProfilerCallbackType::Active,
+ IsProfilerTrackingClasses,
+ (void *)NULL,
+ [](void *additionalData, VolatilePtr profInterface, ClassID classId, HRESULT hrStatus)
+ {
+ return profInterface->ClassLoadFinished(classId, hrStatus);
+ },
+ classId, hrStatus);
+}
+
+inline void ProfControlBlock::ClassUnloadStarted(ClassID classId)
+{
+ LIMITED_METHOD_CONTRACT;
+
+ DoProfilerCallback(ProfilerCallbackType::Active,
+ IsProfilerTrackingClasses,
+ (void *)NULL,
+ [](void *additionalData, VolatilePtr profInterface, ClassID classId)
+ {
+ return profInterface->ClassUnloadStarted(classId);
+ },
+ classId);
+}
+
+inline void ProfControlBlock::ClassUnloadFinished(ClassID classId, HRESULT hrStatus)
+{
+ LIMITED_METHOD_CONTRACT;
+
+ DoProfilerCallback(ProfilerCallbackType::Active,
+ IsProfilerTrackingClasses,
+ (void *)NULL,
+ [](void *additionalData, VolatilePtr profInterface, ClassID classId, HRESULT hrStatus)
+ {
+ return profInterface->ClassUnloadFinished(classId, hrStatus);
+ },
+ classId, hrStatus);
+}
+
+inline BOOL IsProfilerTrackingAppDomainLoads(ProfilerInfo *pProfilerInfo)
+{
+ CONTRACTL
+ {
+ NOTHROW;
+ GC_NOTRIGGER;
+ CANNOT_TAKE_LOCK;
+ }
+ CONTRACTL_END;
+
+ return pProfilerInfo->eventMask.IsEventMaskSet(COR_PRF_MONITOR_APPDOMAIN_LOADS);
+}
+
+inline void ProfControlBlock::AppDomainCreationStarted(AppDomainID appDomainId)
+{
+ LIMITED_METHOD_CONTRACT;
+
+ DoProfilerCallback(ProfilerCallbackType::Active,
+ IsProfilerTrackingAppDomainLoads,
+ (void *)NULL,
+ [](void *additionalData, VolatilePtr profInterface, AppDomainID appDomainId)
+ {
+ return profInterface->AppDomainCreationStarted(appDomainId);
+ },
+ appDomainId);
+}
+
+inline void ProfControlBlock::AppDomainCreationFinished(AppDomainID appDomainId, HRESULT hrStatus)
+{
+ LIMITED_METHOD_CONTRACT;
+
+ DoProfilerCallback(ProfilerCallbackType::Active,
+ IsProfilerTrackingAppDomainLoads,
+ (void *)NULL,
+ [](void *additionalData, VolatilePtr profInterface, AppDomainID appDomainId, HRESULT hrStatus)
+ {
+ return profInterface->AppDomainCreationFinished(appDomainId, hrStatus);
+ },
+ appDomainId, hrStatus);
+}
+
+inline void ProfControlBlock::AppDomainShutdownStarted(AppDomainID appDomainId)
+{
+ LIMITED_METHOD_CONTRACT;
+
+ DoProfilerCallback(ProfilerCallbackType::Active,
+ IsProfilerTrackingAppDomainLoads,
+ (void *)NULL,
+ [](void *additionalData, VolatilePtr profInterface, AppDomainID appDomainId)
+ {
+ return profInterface->AppDomainShutdownStarted(appDomainId);
+ },
+ appDomainId);
+}
+
+inline void ProfControlBlock::AppDomainShutdownFinished(AppDomainID appDomainId, HRESULT hrStatus)
+{
+ LIMITED_METHOD_CONTRACT;
+
+ DoProfilerCallback(ProfilerCallbackType::Active,
+ IsProfilerTrackingAppDomainLoads,
+ (void *)NULL,
+ [](void *additionalData, VolatilePtr profInterface, AppDomainID appDomainId, HRESULT hrStatus)
+ {
+ return profInterface->AppDomainShutdownFinished(appDomainId, hrStatus);
+ },
+ appDomainId, hrStatus);
+}
+
+inline BOOL IsProfilerTrackingAssemblyLoads(ProfilerInfo *pProfilerInfo)
+{
+ CONTRACTL
+ {
+ NOTHROW;
+ GC_NOTRIGGER;
+ CANNOT_TAKE_LOCK;
+ }
+ CONTRACTL_END;
+
+ return pProfilerInfo->eventMask.IsEventMaskSet(COR_PRF_MONITOR_ASSEMBLY_LOADS);
+}
+
+inline void ProfControlBlock::AssemblyLoadStarted(AssemblyID assemblyId)
+{
+ LIMITED_METHOD_CONTRACT;
+
+ DoProfilerCallback(ProfilerCallbackType::Active,
+ IsProfilerTrackingAssemblyLoads,
+ (void *)NULL,
+ [](void *additionalData, VolatilePtr profInterface, AssemblyID assemblyId)
+ {
+ return profInterface->AssemblyLoadStarted(assemblyId);
+ },
+ assemblyId);
+}
+
+inline void ProfControlBlock::AssemblyLoadFinished(AssemblyID assemblyId, HRESULT hrStatus)
+{
+ LIMITED_METHOD_CONTRACT;
+
+ DoProfilerCallback(ProfilerCallbackType::Active,
+ IsProfilerTrackingAssemblyLoads,
+ (void *)NULL,
+ [](void *additionalData, VolatilePtr profInterface, AssemblyID assemblyId, HRESULT hrStatus)
+ {
+ return profInterface->AssemblyLoadFinished(assemblyId, hrStatus);
+ },
+ assemblyId, hrStatus);
+}
+
+inline void ProfControlBlock::AssemblyUnloadStarted(AssemblyID assemblyId)
+{
+ LIMITED_METHOD_CONTRACT;
+
+ DoProfilerCallback(ProfilerCallbackType::Active,
+ IsProfilerTrackingAssemblyLoads,
+ (void *)NULL,
+ [](void *additionalData, VolatilePtr profInterface, AssemblyID assemblyId)
+ {
+ return profInterface->AssemblyUnloadStarted(assemblyId);
+ },
+ assemblyId);
+}
+
+inline void ProfControlBlock::AssemblyUnloadFinished(AssemblyID assemblyId, HRESULT hrStatus)
+{
+ LIMITED_METHOD_CONTRACT;
+
+ DoProfilerCallback(ProfilerCallbackType::Active,
+ IsProfilerTrackingAssemblyLoads,
+ (void *)NULL,
+ [](void *additionalData, VolatilePtr profInterface, AssemblyID assemblyId, HRESULT hrStatus)
+ {
+ return profInterface->AssemblyUnloadFinished(assemblyId, hrStatus);
+ },
+ assemblyId, hrStatus);
+}
+
+inline BOOL IsProfilerTrackingTransitions(ProfilerInfo *pProfilerInfo)
+{
+ CONTRACTL
+ {
+ NOTHROW;
+ GC_NOTRIGGER;
+ CANNOT_TAKE_LOCK;
+ }
+ CONTRACTL_END;
+
+ return pProfilerInfo->eventMask.IsEventMaskSet(COR_PRF_MONITOR_CODE_TRANSITIONS);
+}
+
+inline void ProfControlBlock::UnmanagedToManagedTransition(FunctionID functionId, COR_PRF_TRANSITION_REASON reason)
+{
+ LIMITED_METHOD_CONTRACT;
+
+ DoProfilerCallback(ProfilerCallbackType::Active,
+ IsProfilerTrackingTransitions,
+ (void *)NULL,
+ [](void *additionalData, VolatilePtr profInterface, FunctionID functionId, COR_PRF_TRANSITION_REASON reason)
+ {
+ return profInterface->UnmanagedToManagedTransition(functionId, reason);
+ },
+ functionId, reason);
+}
+
+inline void ProfControlBlock::ManagedToUnmanagedTransition(FunctionID functionId, COR_PRF_TRANSITION_REASON reason)
+{
+ LIMITED_METHOD_CONTRACT;
+
+ DoProfilerCallback(ProfilerCallbackType::Active,
+ IsProfilerTrackingTransitions,
+ (void *)NULL,
+ [](void *additionalData, VolatilePtr profInterface, FunctionID functionId, COR_PRF_TRANSITION_REASON reason)
+ {
+ return profInterface->ManagedToUnmanagedTransition(functionId, reason);
+ },
+ functionId, reason);
+}
+
+inline BOOL IsProfilerTrackingExceptions(ProfilerInfo *pProfilerInfo)
+{
+ CONTRACTL
+ {
+ NOTHROW;
+ GC_NOTRIGGER;
+ CANNOT_TAKE_LOCK;
+ }
+ CONTRACTL_END;
+
+ return pProfilerInfo->eventMask.IsEventMaskSet(COR_PRF_MONITOR_EXCEPTIONS);
+}
+
+inline void ProfControlBlock::ExceptionThrown(ObjectID thrownObjectId)
+{
+ LIMITED_METHOD_CONTRACT;
+
+ DoProfilerCallback(ProfilerCallbackType::Active,
+ IsProfilerTrackingExceptions,
+ (void *)NULL,
+ [](void *additionalData, VolatilePtr profInterface, ObjectID thrownObjectId)
+ {
+ return profInterface->ExceptionThrown(thrownObjectId);
+ },
+ thrownObjectId);
+}
+
+inline void ProfControlBlock::ExceptionSearchFunctionEnter(FunctionID functionId)
+{
+ LIMITED_METHOD_CONTRACT;
+
+ DoProfilerCallback(ProfilerCallbackType::Active,
+ IsProfilerTrackingExceptions,
+ (void *)NULL,
+ [](void *additionalData, VolatilePtr profInterface, FunctionID functionId)
+ {
+ return profInterface->ExceptionSearchFunctionEnter(functionId);
+ },
+ functionId);
+}
+
+inline void ProfControlBlock::ExceptionSearchFunctionLeave()
+{
+ LIMITED_METHOD_CONTRACT;
+
+ DoProfilerCallback(ProfilerCallbackType::Active,
+ IsProfilerTrackingExceptions,
+ (void *)NULL,
+ [](void *additionalData, VolatilePtr profInterface)
+ {
+ return profInterface->ExceptionSearchFunctionLeave();
+ });
+}
+
+inline void ProfControlBlock::ExceptionSearchFilterEnter(FunctionID funcId)
+{
+ LIMITED_METHOD_CONTRACT;
+
+ DoProfilerCallback(ProfilerCallbackType::Active,
+ IsProfilerTrackingExceptions,
+ (void *)NULL,
+ [](void *additionalData, VolatilePtr profInterface, FunctionID funcId)
+ {
+ return profInterface->ExceptionSearchFilterEnter(funcId);
+ },
+ funcId);
+}
+
+inline void ProfControlBlock::ExceptionSearchFilterLeave()
+{
+ LIMITED_METHOD_CONTRACT;
+
+ DoProfilerCallback(ProfilerCallbackType::Active,
+ IsProfilerTrackingExceptions,
+ (void *)NULL,
+ [](void *additionalData, VolatilePtr profInterface)
+ {
+ return profInterface->ExceptionSearchFilterLeave();
+ });
+}
+
+inline void ProfControlBlock::ExceptionSearchCatcherFound(FunctionID functionId)
+{
+ LIMITED_METHOD_CONTRACT;
+
+ DoProfilerCallback(ProfilerCallbackType::Active,
+ IsProfilerTrackingExceptions,
+ (void *)NULL,
+ [](void *additionalData, VolatilePtr profInterface, FunctionID functionId)
+ {
+ return profInterface->ExceptionSearchCatcherFound(functionId);
+ },
+ functionId);
+}
+
+inline void ProfControlBlock::ExceptionOSHandlerEnter(FunctionID funcId)
+{
+ LIMITED_METHOD_CONTRACT;
+
+ DoProfilerCallback(ProfilerCallbackType::Active,
+ IsProfilerTrackingExceptions,
+ (void *)NULL,
+ [](void *additionalData, VolatilePtr profInterface, FunctionID funcId)
+ {
+ return profInterface->ExceptionOSHandlerEnter(funcId);
+ },
+ funcId);
+}
+
+inline void ProfControlBlock::ExceptionOSHandlerLeave(FunctionID funcId)
+{
+ LIMITED_METHOD_CONTRACT;
+
+ DoProfilerCallback(ProfilerCallbackType::Active,
+ IsProfilerTrackingExceptions,
+ (void *)NULL,
+ [](void *additionalData, VolatilePtr profInterface, FunctionID funcId)
+ {
+ return profInterface->ExceptionOSHandlerLeave(funcId);
+ },
+ funcId);
+}
+
+inline void ProfControlBlock::ExceptionUnwindFunctionEnter(FunctionID functionId)
+{
+ LIMITED_METHOD_CONTRACT;
+
+ DoProfilerCallback(ProfilerCallbackType::Active,
+ IsProfilerTrackingExceptions,
+ (void *)NULL,
+ [](void *additionalData, VolatilePtr profInterface, FunctionID functionId)
+ {
+ return profInterface->ExceptionUnwindFunctionEnter(functionId);
+ },
+ functionId);
+}
+
+inline void ProfControlBlock::ExceptionUnwindFunctionLeave()
+{
+ LIMITED_METHOD_CONTRACT;
+
+ DoProfilerCallback(ProfilerCallbackType::Active,
+ IsProfilerTrackingExceptions,
+ (void *)NULL,
+ [](void *additionalData, VolatilePtr profInterface)
+ {
+ return profInterface->ExceptionUnwindFunctionLeave();
+ });
+}
+
+inline void ProfControlBlock::ExceptionUnwindFinallyEnter(FunctionID functionId)
+{
+ LIMITED_METHOD_CONTRACT;
+
+ DoProfilerCallback(ProfilerCallbackType::Active,
+ IsProfilerTrackingExceptions,
+ (void *)NULL,
+ [](void *additionalData, VolatilePtr profInterface, FunctionID functionId)
+ {
+ return profInterface->ExceptionUnwindFinallyEnter(functionId);
+ },
+ functionId);
+}
+
+inline void ProfControlBlock::ExceptionUnwindFinallyLeave()
+{
+ LIMITED_METHOD_CONTRACT;
+
+ DoProfilerCallback(ProfilerCallbackType::Active,
+ IsProfilerTrackingExceptions,
+ (void *)NULL,
+ [](void *additionalData, VolatilePtr profInterface)
+ {
+ return profInterface->ExceptionUnwindFinallyLeave();
+ });
+}
+
+inline void ProfControlBlock::ExceptionCatcherEnter(FunctionID functionId, ObjectID objectId)
+{
+ LIMITED_METHOD_CONTRACT;
+
+ DoProfilerCallback(ProfilerCallbackType::Active,
+ IsProfilerTrackingExceptions,
+ (void *)NULL,
+ [](void *additionalData, VolatilePtr profInterface, FunctionID functionId, ObjectID objectId)
+ {
+ return profInterface->ExceptionCatcherEnter(functionId, objectId);
+ },
+ functionId, objectId);
+}
+
+inline void ProfControlBlock::ExceptionCatcherLeave()
+{
+ LIMITED_METHOD_CONTRACT;
+
+ DoProfilerCallback(ProfilerCallbackType::Active,
+ IsProfilerTrackingExceptions,
+ (void *)NULL,
+ [](void *additionalData, VolatilePtr profInterface)
+ {
+ return profInterface->ExceptionCatcherLeave();
+ });
+}
+
+inline BOOL IsProfilerTrackingCCW(ProfilerInfo *pProfilerInfo)
+{
+ CONTRACTL
+ {
+ NOTHROW;
+ GC_NOTRIGGER;
+ CANNOT_TAKE_LOCK;
+ }
+ CONTRACTL_END;
+
+ return pProfilerInfo->eventMask.IsEventMaskSet(COR_PRF_MONITOR_CCW);
+}
+
+inline void ProfControlBlock::COMClassicVTableCreated(ClassID wrappedClassId, REFGUID implementedIID, void *pVTable, ULONG cSlots)
+{
+ LIMITED_METHOD_CONTRACT;
+
+ DoProfilerCallback(ProfilerCallbackType::Active,
+ IsProfilerTrackingCCW,
+ (void *)NULL,
+ [](void *additionalData, VolatilePtr profInterface, ClassID wrappedClassId, REFGUID implementedIID, void *pVTable, ULONG cSlots)
+ {
+ return profInterface->COMClassicVTableCreated(wrappedClassId, implementedIID, pVTable, cSlots);
+ },
+ wrappedClassId, implementedIID, pVTable, cSlots);
+}
+
+inline BOOL IsProfilerTrackingSuspends(ProfilerInfo *pProfilerInfo)
+{
+ CONTRACTL
+ {
+ NOTHROW;
+ GC_NOTRIGGER;
+ CANNOT_TAKE_LOCK;
+ }
+ CONTRACTL_END;
+
+ return pProfilerInfo->eventMask.IsEventMaskSet(COR_PRF_MONITOR_SUSPENDS);
+}
+
+inline void ProfControlBlock::RuntimeSuspendStarted(COR_PRF_SUSPEND_REASON suspendReason)
+{
+ LIMITED_METHOD_CONTRACT;
+
+ DoProfilerCallback(ProfilerCallbackType::Active,
+ IsProfilerTrackingSuspends,
+ (void *)NULL,
+ [](void *additionalData, VolatilePtr profInterface, COR_PRF_SUSPEND_REASON suspendReason)
+ {
+ return profInterface->RuntimeSuspendStarted(suspendReason);
+ },
+ suspendReason);
+}
+
+inline void ProfControlBlock::RuntimeSuspendFinished()
+{
+ LIMITED_METHOD_CONTRACT;
+
+ DoProfilerCallback(ProfilerCallbackType::Active,
+ IsProfilerTrackingSuspends,
+ (void *)NULL,
+ [](void *additionalData, VolatilePtr profInterface)
+ {
+ return profInterface->RuntimeSuspendFinished();
+ });
+}
+
+inline void ProfControlBlock::RuntimeSuspendAborted()
+{
+ LIMITED_METHOD_CONTRACT;
+
+ DoProfilerCallback(ProfilerCallbackType::Active,
+ IsProfilerTrackingSuspends,
+ (void *)NULL,
+ [](void *additionalData, VolatilePtr profInterface)
+ {
+ return profInterface->RuntimeSuspendAborted();
+ });
+}
+
+inline void ProfControlBlock::RuntimeResumeStarted()
+{
+ LIMITED_METHOD_CONTRACT;
+
+ DoProfilerCallback(ProfilerCallbackType::Active,
+ IsProfilerTrackingSuspends,
+ (void *)NULL,
+ [](void *additionalData, VolatilePtr profInterface)
+ {
+ return profInterface->RuntimeResumeStarted();
+ });
+}
+
+inline void ProfControlBlock::RuntimeResumeFinished()
+{
+ LIMITED_METHOD_CONTRACT;
+
+ DoProfilerCallback(ProfilerCallbackType::Active,
+ IsProfilerTrackingSuspends,
+ (void *)NULL,
+ [](void *additionalData, VolatilePtr profInterface)
+ {
+ return profInterface->RuntimeResumeFinished();
+ });
+}
+
+inline void ProfControlBlock::RuntimeThreadSuspended(ThreadID suspendedThreadId)
+{
+ LIMITED_METHOD_CONTRACT;
+
+ DoProfilerCallback(ProfilerCallbackType::Active,
+ IsProfilerTrackingSuspends,
+ (void *)NULL,
+ [](void *additionalData, VolatilePtr profInterface, ThreadID suspendedThreadId)
+ {
+ return profInterface->RuntimeThreadSuspended(suspendedThreadId);
+ },
+ suspendedThreadId);
+}
+
+inline void ProfControlBlock::RuntimeThreadResumed(ThreadID resumedThreadId)
+{
+ LIMITED_METHOD_CONTRACT;
+
+ DoProfilerCallback(ProfilerCallbackType::Active,
+ IsProfilerTrackingSuspends,
+ (void *)NULL,
+ [](void *additionalData, VolatilePtr profInterface, ThreadID resumedThreadId)
+ {
+ return profInterface->RuntimeThreadResumed(resumedThreadId);
+ },
+ resumedThreadId);
+}
+
+inline BOOL IsProfilerTrackingAllocations(ProfilerInfo *pProfilerInfo)
{
CONTRACTL
{
@@ -50,39 +1298,365 @@ inline void ProfControlBlock::Init()
}
CONTRACTL_END;
- curProfStatus.Init();
+ return (pProfilerInfo->eventMask.IsEventMaskSet(COR_PRF_ENABLE_OBJECT_ALLOCATED)
+ || pProfilerInfo->eventMask.IsEventMaskHighSet(COR_PRF_HIGH_MONITOR_LARGEOBJECT_ALLOCATED));
+}
- fGCInProgress = FALSE;
- fBaseSystemClassesLoaded = FALSE;
-#ifdef PROF_TEST_ONLY_FORCE_ELT
- fTestOnlyForceEnterLeave = FALSE;
-#endif
+inline void ProfControlBlock::ObjectAllocated(ObjectID objectId, ClassID classId)
+{
+ LIMITED_METHOD_CONTRACT;
-#ifdef PROF_TEST_ONLY_FORCE_OBJECT_ALLOCATED
- fTestOnlyForceObjectAllocated = FALSE;
-#endif
+ DoProfilerCallback(ProfilerCallbackType::Active,
+ IsProfilerTrackingAllocations,
+ (void *)NULL,
+ [](void *additionalData, VolatilePtr profInterface, ObjectID objectId, ClassID classId)
+ {
+ return profInterface->ObjectAllocated(objectId, classId);
+ },
+ objectId, classId);
+}
-#ifdef _DEBUG
- fTestOnlyEnableICorProfilerInfo = FALSE;
-#endif // _DEBUG
- fConcurrentGCDisabledForAttach = FALSE;
+inline BOOL IsProfilerTrackingGC(ProfilerInfo *pProfilerInfo)
+{
+ CONTRACTL
+ {
+ NOTHROW;
+ GC_NOTRIGGER;
+ CANNOT_TAKE_LOCK;
+ }
+ CONTRACTL_END;
- ResetPerSessionStatus();
+ return pProfilerInfo->eventMask.IsEventMaskSet(COR_PRF_MONITOR_GC);
+}
- fProfControlBlockInitialized = TRUE;
+inline void ProfControlBlock::FinalizeableObjectQueued(BOOL isCritical, ObjectID objectID)
+{
+ LIMITED_METHOD_CONTRACT;
- fProfilerRequestedRuntimeSuspend = FALSE;
+ DoProfilerCallback(ProfilerCallbackType::Active,
+ IsProfilerTrackingGC,
+ (void *)NULL,
+ [](void *additionalData, VolatilePtr profInterface, BOOL isCritical, ObjectID objectID)
+ {
+ return profInterface->FinalizeableObjectQueued(isCritical, objectID);
+ },
+ isCritical, objectID);
}
-// Reset those variables that is only for the current attach session
-inline void ProfControlBlock::ResetPerSessionStatus()
+inline void ProfControlBlock::MovedReference(BYTE *pbMemBlockStart, BYTE *pbMemBlockEnd, ptrdiff_t cbRelocDistance, void *pHeapId, BOOL fCompacting)
{
LIMITED_METHOD_CONTRACT;
- pProfInterface = NULL;
- dwEventMask = COR_PRF_MONITOR_NONE;
- dwEventMaskHigh = COR_PRF_HIGH_MONITOR_NONE;
+ DoProfilerCallback(ProfilerCallbackType::Active,
+ IsProfilerTrackingGC,
+ (void *)NULL,
+ [](void *additionalData, VolatilePtr profInterface, BYTE *pbMemBlockStart, BYTE *pbMemBlockEnd, ptrdiff_t cbRelocDistance, void *pHeapId, BOOL fCompacting)
+ {
+ return profInterface->MovedReference(pbMemBlockStart, pbMemBlockEnd, cbRelocDistance, pHeapId, fCompacting);
+ },
+ pbMemBlockStart, pbMemBlockEnd, cbRelocDistance, pHeapId, fCompacting);
+}
+
+inline void ProfControlBlock::EndMovedReferences(void *pHeapId)
+{
+ LIMITED_METHOD_CONTRACT;
+
+ DoProfilerCallback(ProfilerCallbackType::Active,
+ IsProfilerTrackingGC,
+ (void *)NULL,
+ [](void *additionalData, VolatilePtr profInterface, void *pHeapId)
+ {
+ return profInterface->EndMovedReferences(pHeapId);
+ },
+ pHeapId);
+}
+
+inline void ProfControlBlock::RootReference2(BYTE *objectId, EtwGCRootKind dwEtwRootKind, EtwGCRootFlags dwEtwRootFlags, void *rootID, void *pHeapId)
+{
+ LIMITED_METHOD_CONTRACT;
+
+ DoProfilerCallback(ProfilerCallbackType::Active,
+ IsProfilerTrackingGC,
+ (void *)NULL,
+ [](void *additionalData, VolatilePtr profInterface, BYTE *objectId, EtwGCRootKind dwEtwRootKind, EtwGCRootFlags dwEtwRootFlags, void *rootID, void *pHeapId)
+ {
+ return profInterface->RootReference2(objectId, dwEtwRootKind, dwEtwRootFlags, rootID, pHeapId);
+ },
+ objectId, dwEtwRootKind, dwEtwRootFlags, rootID, pHeapId);
+}
+
+inline void ProfControlBlock::EndRootReferences2(void *pHeapId)
+{
+ LIMITED_METHOD_CONTRACT;
+
+ DoProfilerCallback(ProfilerCallbackType::Active,
+ IsProfilerTrackingGC,
+ (void *)NULL,
+ [](void *additionalData, VolatilePtr profInterface, void *pHeapId)
+ {
+ return profInterface->EndRootReferences2(pHeapId);
+ },
+ pHeapId);
+}
+
+inline void ProfControlBlock::ConditionalWeakTableElementReference(BYTE *primaryObjectId, BYTE *secondaryObjectId, void *rootID, void *pHeapId)
+{
+ LIMITED_METHOD_CONTRACT;
+
+ DoProfilerCallback(ProfilerCallbackType::Active,
+ IsProfilerTrackingGC,
+ (void *)NULL,
+ [](void *additionalData, VolatilePtr profInterface, BYTE *primaryObjectId, BYTE *secondaryObjectId, void *rootID, void *pHeapId)
+ {
+ if (!profInterface->IsCallback5Supported())
+ {
+ return S_OK;
+ }
+
+ return profInterface->ConditionalWeakTableElementReference(primaryObjectId, secondaryObjectId, rootID, pHeapId);
+ },
+ primaryObjectId, secondaryObjectId, rootID, pHeapId);
+}
+
+inline void ProfControlBlock::EndConditionalWeakTableElementReferences(void *pHeapId)
+{
+ LIMITED_METHOD_CONTRACT;
+
+ DoProfilerCallback(ProfilerCallbackType::Active,
+ IsProfilerTrackingGC,
+ (void *)NULL,
+ [](void *additionalData, VolatilePtr profInterface, void *pHeapId)
+ {
+ if (!profInterface->IsCallback5Supported())
+ {
+ return S_OK;
+ }
+
+ return profInterface->EndConditionalWeakTableElementReferences(pHeapId);
+ },
+ pHeapId);
+}
+
+inline void ProfControlBlock::AllocByClass(ObjectID objId, ClassID classId, void *pHeapId)
+{
+ LIMITED_METHOD_CONTRACT;
+
+ DoProfilerCallback(ProfilerCallbackType::Active,
+ IsProfilerTrackingGC,
+ (void *)NULL,
+ [](void *additionalData, VolatilePtr profInterface, ObjectID objId, ClassID classId, void *pHeapId)
+ {
+ return profInterface->AllocByClass(objId, classId, pHeapId);
+ },
+ objId, classId, pHeapId);
+}
+
+inline void ProfControlBlock::EndAllocByClass(void *pHeapId)
+{
+ LIMITED_METHOD_CONTRACT;
+
+ DoProfilerCallback(ProfilerCallbackType::Active,
+ IsProfilerTrackingGC,
+ (void *)NULL,
+ [](void *additionalData, VolatilePtr profInterface, void *pHeapId)
+ {
+ return profInterface->EndAllocByClass(pHeapId);
+ },
+ pHeapId);
+}
+
+inline HRESULT ProfControlBlock::ObjectReference(ObjectID objId, ClassID classId, ULONG cNumRefs, ObjectID *arrObjRef)
+{
+ LIMITED_METHOD_CONTRACT;
+
+ return DoProfilerCallback(ProfilerCallbackType::Active,
+ IsProfilerTrackingGC,
+ (void *)NULL,
+ [](void *additionalData, VolatilePtr profInterface, ObjectID objId, ClassID classId, ULONG cNumRefs, ObjectID *arrObjRef)
+ {
+ return profInterface->ObjectReference(objId, classId, cNumRefs, arrObjRef);
+ },
+ objId, classId, cNumRefs, arrObjRef);
+}
+
+inline void ProfControlBlock::HandleCreated(UINT_PTR handleId, ObjectID initialObjectId)
+{
+ LIMITED_METHOD_CONTRACT;
+
+ DoProfilerCallback(ProfilerCallbackType::Active,
+ IsProfilerTrackingGC,
+ (void *)NULL,
+ [](void *additionalData, VolatilePtr profInterface, UINT_PTR handleId, ObjectID initialObjectId)
+ {
+ return profInterface->HandleCreated(handleId, initialObjectId);
+ },
+ handleId, initialObjectId);
+}
+
+inline void ProfControlBlock::HandleDestroyed(UINT_PTR handleId)
+{
+ LIMITED_METHOD_CONTRACT;
+
+ DoProfilerCallback(ProfilerCallbackType::Active,
+ IsProfilerTrackingGC,
+ (void *)NULL,
+ [](void *additionalData, VolatilePtr profInterface, UINT_PTR handleId)
+ {
+ return profInterface->HandleDestroyed(handleId);
+ },
+ handleId);
+}
+
+
+inline BOOL IsProfilerTrackingBasicGC(ProfilerInfo *pProfilerInfo)
+{
+ CONTRACTL
+ {
+ NOTHROW;
+ GC_NOTRIGGER;
+ CANNOT_TAKE_LOCK;
+ }
+ CONTRACTL_END;
+
+ return pProfilerInfo->eventMask.IsEventMaskHighSet(COR_PRF_HIGH_BASIC_GC);
+}
+
+inline BOOL IsProfilerTrackingMovedObjects(ProfilerInfo *pProfilerInfo)
+{
+ CONTRACTL
+ {
+ NOTHROW;
+ GC_NOTRIGGER;
+ CANNOT_TAKE_LOCK;
+ }
+ CONTRACTL_END;
+
+ return pProfilerInfo->eventMask.IsEventMaskHighSet(COR_PRF_HIGH_MONITOR_GC_MOVED_OBJECTS);
+}
+
+inline BOOL IsProfilerTrackingGCOrBasicGC(ProfilerInfo *pProfilerInfo)
+{
+ return IsProfilerTrackingGC(pProfilerInfo) || IsProfilerTrackingBasicGC(pProfilerInfo);
+}
+
+inline BOOL IsProfilerTrackingGCOrMovedObjects(ProfilerInfo *pProfilerInfo)
+{
+ return IsProfilerTrackingGC(pProfilerInfo) || IsProfilerTrackingMovedObjects(pProfilerInfo);
+}
+
+inline void ProfControlBlock::GarbageCollectionStarted(int cGenerations, BOOL generationCollected[], COR_PRF_GC_REASON reason)
+{
+ LIMITED_METHOD_CONTRACT;
+
+ DoProfilerCallback(ProfilerCallbackType::Active,
+ IsProfilerTrackingGCOrBasicGC,
+ (void *)NULL,
+ [](void *additionalData, VolatilePtr profInterface, int cGenerations, BOOL generationCollected[], COR_PRF_GC_REASON reason)
+ {
+ return profInterface->GarbageCollectionStarted(cGenerations, generationCollected, reason);
+ },
+ cGenerations, generationCollected, reason);
+}
+
+inline void ProfControlBlock::GarbageCollectionFinished()
+{
+ LIMITED_METHOD_CONTRACT;
+
+ DoProfilerCallback(ProfilerCallbackType::Active,
+ IsProfilerTrackingGCOrBasicGC,
+ (void *)NULL,
+ [](void *additionalData, VolatilePtr profInterface)
+ {
+ return profInterface->GarbageCollectionFinished();
+ });
+}
+
+
+inline BOOL IsProfilerMonitoringEventPipe(ProfilerInfo *pProfilerInfo)
+{
+ CONTRACTL
+ {
+ NOTHROW;
+ GC_NOTRIGGER;
+ CANNOT_TAKE_LOCK;
+ }
+ CONTRACTL_END;
+
+ return pProfilerInfo->eventMask.IsEventMaskHighSet(COR_PRF_HIGH_MONITOR_EVENT_PIPE);
+}
+
+
+inline void ProfControlBlock::EventPipeEventDelivered(EventPipeProvider *provider,
+ DWORD eventId,
+ DWORD eventVersion,
+ ULONG cbMetadataBlob,
+ LPCBYTE metadataBlob,
+ ULONG cbEventData,
+ LPCBYTE eventData,
+ LPCGUID pActivityId,
+ LPCGUID pRelatedActivityId,
+ Thread *pEventThread,
+ ULONG numStackFrames,
+ UINT_PTR stackFrames[])
+{
+ LIMITED_METHOD_CONTRACT;
+
+ DoProfilerCallback(ProfilerCallbackType::Active,
+ IsProfilerMonitoringEventPipe,
+ (void *)NULL,
+ [](void *additionalData, VolatilePtr profInterface, EventPipeProvider *provider,
+ DWORD eventId,
+ DWORD eventVersion,
+ ULONG cbMetadataBlob,
+ LPCBYTE metadataBlob,
+ ULONG cbEventData,
+ LPCBYTE eventData,
+ LPCGUID pActivityId,
+ LPCGUID pRelatedActivityId,
+ Thread *pEventThread,
+ ULONG numStackFrames,
+ UINT_PTR stackFrames[])
+ {
+ return profInterface->EventPipeEventDelivered(provider,
+ eventId,
+ eventVersion,
+ cbMetadataBlob,
+ metadataBlob,
+ cbEventData,
+ eventData,
+ pActivityId,
+ pRelatedActivityId,
+ pEventThread,
+ numStackFrames,
+ stackFrames);
+ },
+ provider,
+ eventId,
+ eventVersion,
+ cbMetadataBlob,
+ metadataBlob,
+ cbEventData,
+ eventData,
+ pActivityId,
+ pRelatedActivityId,
+ pEventThread,
+ numStackFrames,
+ stackFrames);
+}
+
+inline void ProfControlBlock::EventPipeProviderCreated(EventPipeProvider *provider)
+{
+ LIMITED_METHOD_CONTRACT;
+
+ DoProfilerCallback(ProfilerCallbackType::Active,
+ IsProfilerMonitoringEventPipe,
+ (void *)NULL,
+ [](void *additionalData, VolatilePtr profInterface, EventPipeProvider *provider)
+ {
+ return profInterface->EventPipeProviderCreated(provider);
+ },
+ provider);
}
//---------------------------------------------------------------------------------------
@@ -90,14 +1664,18 @@ inline void ProfControlBlock::ResetPerSessionStatus()
// and what features it enabled callbacks for.
//---------------------------------------------------------------------------------------
-
-// CORProfilerPresent() returns whether or not a CLR Profiler is actively loaded
-// (meaning it's initialized and ready to receive callbacks).
inline BOOL CORProfilerPresent()
{
LIMITED_METHOD_DAC_CONTRACT;
- return ((&g_profControlBlock)->curProfStatus.Get() == kProfStatusActive);
+ return AnyProfilerPassesCondition([](ProfilerInfo *pProfilerInfo) { return pProfilerInfo->curProfStatus.Get() >= kProfStatusActive; });
+}
+
+inline BOOL CORMainProfilerPresent()
+{
+ LIMITED_METHOD_DAC_CONTRACT;
+
+ return (&g_profControlBlock)->mainProfilerInfo.curProfStatus.Get() >= kProfStatusActive;
}
// These return whether a CLR Profiler is actively loaded AND has requested the
@@ -113,10 +1691,10 @@ inline BOOL CORProfilerFunctionIDMapperEnabled()
}
CONTRACTL_END;
- return (CORProfilerPresent() &&
+ return (CORMainProfilerPresent() &&
(
- ((&g_profControlBlock)->pProfInterface->GetFunctionIDMapper() != NULL) ||
- ((&g_profControlBlock)->pProfInterface->GetFunctionIDMapper2() != NULL)
+ ((&g_profControlBlock)->mainProfilerInfo.pProfInterface->GetFunctionIDMapper() != NULL) ||
+ ((&g_profControlBlock)->mainProfilerInfo.pProfInterface->GetFunctionIDMapper2() != NULL)
));
}
@@ -131,7 +1709,7 @@ inline BOOL CORProfilerTrackJITInfo()
CONTRACTL_END;
return (CORProfilerPresent() &&
- ((&g_profControlBlock)->dwEventMask & COR_PRF_MONITOR_JIT_COMPILATION));
+ (&g_profControlBlock)->globalEventMask.IsEventMaskSet(COR_PRF_MONITOR_JIT_COMPILATION));
}
inline BOOL CORProfilerTrackCacheSearches()
@@ -145,7 +1723,7 @@ inline BOOL CORProfilerTrackCacheSearches()
CONTRACTL_END;
return (CORProfilerPresent() &&
- ((&g_profControlBlock)->dwEventMask & COR_PRF_MONITOR_CACHE_SEARCHES));
+ (&g_profControlBlock)->globalEventMask.IsEventMaskSet(COR_PRF_MONITOR_CACHE_SEARCHES));
}
inline BOOL CORProfilerTrackModuleLoads()
@@ -159,7 +1737,7 @@ inline BOOL CORProfilerTrackModuleLoads()
CONTRACTL_END;
return (CORProfilerPresent() &&
- ((&g_profControlBlock)->dwEventMask & COR_PRF_MONITOR_MODULE_LOADS));
+ (&g_profControlBlock)->globalEventMask.IsEventMaskSet(COR_PRF_MONITOR_MODULE_LOADS));
}
inline BOOL CORProfilerTrackAssemblyLoads()
@@ -173,7 +1751,7 @@ inline BOOL CORProfilerTrackAssemblyLoads()
CONTRACTL_END;
return (CORProfilerPresent() &&
- ((&g_profControlBlock)->dwEventMask & COR_PRF_MONITOR_ASSEMBLY_LOADS));
+ (&g_profControlBlock)->globalEventMask.IsEventMaskSet(COR_PRF_MONITOR_ASSEMBLY_LOADS));
}
inline BOOL CORProfilerTrackAppDomainLoads()
@@ -187,7 +1765,7 @@ inline BOOL CORProfilerTrackAppDomainLoads()
CONTRACTL_END;
return (CORProfilerPresent() &&
- ((&g_profControlBlock)->dwEventMask & COR_PRF_MONITOR_APPDOMAIN_LOADS));
+ (&g_profControlBlock)->globalEventMask.IsEventMaskSet(COR_PRF_MONITOR_APPDOMAIN_LOADS));
}
inline BOOL CORProfilerTrackThreads()
@@ -201,7 +1779,7 @@ inline BOOL CORProfilerTrackThreads()
CONTRACTL_END;
return (CORProfilerPresent() &&
- ((&g_profControlBlock)->dwEventMask & COR_PRF_MONITOR_THREADS));
+ (&g_profControlBlock)->globalEventMask.IsEventMaskSet(COR_PRF_MONITOR_THREADS));
}
inline BOOL CORProfilerTrackClasses()
@@ -215,7 +1793,7 @@ inline BOOL CORProfilerTrackClasses()
CONTRACTL_END;
return (CORProfilerPresent() &&
- ((&g_profControlBlock)->dwEventMask & COR_PRF_MONITOR_CLASS_LOADS));
+ (&g_profControlBlock)->globalEventMask.IsEventMaskSet(COR_PRF_MONITOR_CLASS_LOADS));
}
inline BOOL CORProfilerTrackGC()
@@ -229,7 +1807,7 @@ inline BOOL CORProfilerTrackGC()
CONTRACTL_END;
return (CORProfilerPresent() &&
- ((&g_profControlBlock)->dwEventMask & COR_PRF_MONITOR_GC));
+ (&g_profControlBlock)->globalEventMask.IsEventMaskSet(COR_PRF_MONITOR_GC));
}
inline BOOL CORProfilerTrackAllocationsEnabled()
@@ -248,7 +1826,7 @@ inline BOOL CORProfilerTrackAllocationsEnabled()
(&g_profControlBlock)->fTestOnlyForceObjectAllocated ||
#endif
(CORProfilerPresent() &&
- ((&g_profControlBlock)->dwEventMask & COR_PRF_ENABLE_OBJECT_ALLOCATED))
+ (&g_profControlBlock)->globalEventMask.IsEventMaskSet(COR_PRF_ENABLE_OBJECT_ALLOCATED))
);
}
@@ -264,7 +1842,7 @@ inline BOOL CORProfilerTrackAllocations()
return
(CORProfilerTrackAllocationsEnabled() &&
- ((&g_profControlBlock)->dwEventMask & COR_PRF_MONITOR_OBJECT_ALLOCATED));
+ (&g_profControlBlock)->globalEventMask.IsEventMaskSet(COR_PRF_MONITOR_OBJECT_ALLOCATED));
}
inline BOOL CORProfilerTrackLargeAllocations()
@@ -279,7 +1857,7 @@ inline BOOL CORProfilerTrackLargeAllocations()
return
(CORProfilerPresent() &&
- ((&g_profControlBlock)->dwEventMaskHigh & COR_PRF_HIGH_MONITOR_LARGEOBJECT_ALLOCATED));
+ (&g_profControlBlock)->globalEventMask.IsEventMaskHighSet(COR_PRF_HIGH_MONITOR_LARGEOBJECT_ALLOCATED));
}
inline BOOL CORProfilerEnableRejit()
@@ -292,8 +1870,8 @@ inline BOOL CORProfilerEnableRejit()
}
CONTRACTL_END;
- return (CORProfilerPresent() &&
- ((&g_profControlBlock)->dwEventMask & COR_PRF_ENABLE_REJIT));
+ return (CORMainProfilerPresent() &&
+ (&g_profControlBlock)->globalEventMask.IsEventMaskSet(COR_PRF_ENABLE_REJIT));
}
inline BOOL CORProfilerTrackExceptions()
@@ -307,7 +1885,7 @@ inline BOOL CORProfilerTrackExceptions()
CONTRACTL_END;
return (CORProfilerPresent() &&
- ((&g_profControlBlock)->dwEventMask & COR_PRF_MONITOR_EXCEPTIONS));
+ (&g_profControlBlock)->globalEventMask.IsEventMaskSet(COR_PRF_MONITOR_EXCEPTIONS));
}
inline BOOL CORProfilerTrackTransitions()
@@ -321,7 +1899,7 @@ inline BOOL CORProfilerTrackTransitions()
CONTRACTL_END;
return (CORProfilerPresent() &&
- ((&g_profControlBlock)->dwEventMask & COR_PRF_MONITOR_CODE_TRANSITIONS));
+ (&g_profControlBlock)->globalEventMask.IsEventMaskSet(COR_PRF_MONITOR_CODE_TRANSITIONS));
}
inline BOOL CORProfilerTrackEnterLeave()
@@ -340,7 +1918,7 @@ inline BOOL CORProfilerTrackEnterLeave()
#endif // PROF_TEST_ONLY_FORCE_ELT
return (CORProfilerPresent() &&
- ((&g_profControlBlock)->dwEventMask & COR_PRF_MONITOR_ENTERLEAVE));
+ (&g_profControlBlock)->globalEventMask.IsEventMaskSet(COR_PRF_MONITOR_ENTERLEAVE));
}
inline BOOL CORProfilerTrackCCW()
@@ -354,53 +1932,7 @@ inline BOOL CORProfilerTrackCCW()
CONTRACTL_END;
return (CORProfilerPresent() &&
- ((&g_profControlBlock)->dwEventMask & COR_PRF_MONITOR_CCW));
-}
-
-inline BOOL CORProfilerTrackRemoting()
-{
- CONTRACTL
- {
- NOTHROW;
- GC_NOTRIGGER;
- CANNOT_TAKE_LOCK;
- }
- CONTRACTL_END;
-
- return (CORProfilerPresent() &&
- ((&g_profControlBlock)->dwEventMask & COR_PRF_MONITOR_REMOTING));
-}
-
-inline BOOL CORProfilerTrackRemotingCookie()
-{
- CONTRACTL
- {
- NOTHROW;
- GC_NOTRIGGER;
- CANNOT_TAKE_LOCK;
- }
- CONTRACTL_END;
-
- return
- (CORProfilerPresent() &&
- (((&g_profControlBlock)->dwEventMask & COR_PRF_MONITOR_REMOTING_COOKIE)
- == COR_PRF_MONITOR_REMOTING_COOKIE));
-}
-
-inline BOOL CORProfilerTrackRemotingAsync()
-{
- CONTRACTL
- {
- NOTHROW;
- GC_NOTRIGGER;
- CANNOT_TAKE_LOCK;
- }
- CONTRACTL_END;
-
- return
- (CORProfilerPresent() &&
- (((&g_profControlBlock)->dwEventMask & COR_PRF_MONITOR_REMOTING_ASYNC)
- == COR_PRF_MONITOR_REMOTING_ASYNC));
+ (&g_profControlBlock)->globalEventMask.IsEventMaskSet(COR_PRF_MONITOR_CCW));
}
inline BOOL CORProfilerTrackSuspends()
@@ -414,7 +1946,7 @@ inline BOOL CORProfilerTrackSuspends()
CONTRACTL_END;
return (CORProfilerPresent() &&
- ((&g_profControlBlock)->dwEventMask & COR_PRF_MONITOR_SUSPENDS));
+ (&g_profControlBlock)->globalEventMask.IsEventMaskSet(COR_PRF_MONITOR_SUSPENDS));
}
inline BOOL CORProfilerDisableInlining()
@@ -428,21 +1960,7 @@ inline BOOL CORProfilerDisableInlining()
CONTRACTL_END;
return (CORProfilerPresent() &&
- ((&g_profControlBlock)->dwEventMask & COR_PRF_DISABLE_INLINING));
-}
-
-inline BOOL CORProfilerJITMapEnabled()
-{
- CONTRACTL
- {
- NOTHROW;
- GC_NOTRIGGER;
- CANNOT_TAKE_LOCK;
- }
- CONTRACTL_END;
-
- return (CORProfilerPresent() &&
- ((&g_profControlBlock)->dwEventMask & COR_PRF_ENABLE_JIT_MAPS));
+ (&g_profControlBlock)->globalEventMask.IsEventMaskSet(COR_PRF_DISABLE_INLINING));
}
inline BOOL CORProfilerDisableOptimizations()
@@ -457,7 +1975,7 @@ inline BOOL CORProfilerDisableOptimizations()
CONTRACTL_END;
return (CORProfilerPresent() &&
- ((&g_profControlBlock)->dwEventMask & COR_PRF_DISABLE_OPTIMIZATIONS));
+ (&g_profControlBlock)->globalEventMask.IsEventMaskSet(COR_PRF_DISABLE_OPTIMIZATIONS));
}
inline BOOL CORProfilerUseProfileImages()
@@ -475,14 +1993,8 @@ inline BOOL CORProfilerUseProfileImages()
return TRUE;
#endif // PROF_TEST_ONLY_FORCE_ELT
- if (!CORProfilerPresent())
- return FALSE;
-
- if (((&g_profControlBlock)->dwEventMask &
- COR_PRF_REQUIRE_PROFILE_IMAGE) == 0)
- return FALSE;
-
- return TRUE;
+ return (CORProfilerPresent() &&
+ (&g_profControlBlock)->globalEventMask.IsEventMaskSet(COR_PRF_REQUIRE_PROFILE_IMAGE));
}
inline BOOL CORProfilerDisableAllNGenImages()
@@ -490,14 +2002,14 @@ inline BOOL CORProfilerDisableAllNGenImages()
LIMITED_METHOD_DAC_CONTRACT;
return (CORProfilerPresent() &&
- ((&g_profControlBlock)->dwEventMask & COR_PRF_DISABLE_ALL_NGEN_IMAGES));
+ (&g_profControlBlock)->globalEventMask.IsEventMaskSet(COR_PRF_DISABLE_ALL_NGEN_IMAGES));
}
inline BOOL CORProfilerTrackConditionalWeakTableElements()
{
LIMITED_METHOD_DAC_CONTRACT;
- return CORProfilerTrackGC() && (&g_profControlBlock)->pProfInterface->IsCallback5Supported();
+ return CORProfilerTrackGC() && (&g_profControlBlock)->IsCallback5Supported();
}
// CORProfilerPresentOrInitializing() returns nonzero iff a CLR Profiler is actively
@@ -508,8 +2020,8 @@ inline BOOL CORProfilerTrackConditionalWeakTableElements()
// that may still be initializing, and this function is appropriate for that code.
inline BOOL CORProfilerPresentOrInitializing()
{
- LIMITED_METHOD_DAC_CONTRACT;
- return ((&g_profControlBlock)->curProfStatus.Get() > kProfStatusDetaching);
+ LIMITED_METHOD_CONTRACT;
+ return AnyProfilerPassesCondition([](ProfilerInfo *pProfilerInfo) { return pProfilerInfo->curProfStatus.Get() > kProfStatusDetaching; });
}
// These return whether a CLR Profiler has requested the specified functionality.
@@ -531,7 +2043,7 @@ inline BOOL CORProfilerELT3SlowPathEnabled()
CONTRACTL_END;
return (CORProfilerPresentOrInitializing() &&
- ((&g_profControlBlock)->dwEventMask & (COR_PRF_ENABLE_FUNCTION_ARGS | COR_PRF_ENABLE_FUNCTION_RETVAL | COR_PRF_ENABLE_FRAME_INFO)));
+ (&g_profControlBlock)->globalEventMask.IsEventMaskSet((COR_PRF_ENABLE_FUNCTION_ARGS | COR_PRF_ENABLE_FUNCTION_RETVAL | COR_PRF_ENABLE_FRAME_INFO)));
}
inline BOOL CORProfilerELT3SlowPathEnterEnabled()
@@ -545,7 +2057,7 @@ inline BOOL CORProfilerELT3SlowPathEnterEnabled()
CONTRACTL_END;
return (CORProfilerPresentOrInitializing() &&
- ((&g_profControlBlock)->dwEventMask & (COR_PRF_ENABLE_FUNCTION_ARGS | COR_PRF_ENABLE_FRAME_INFO)));
+ (&g_profControlBlock)->globalEventMask.IsEventMaskSet((COR_PRF_ENABLE_FUNCTION_ARGS | COR_PRF_ENABLE_FRAME_INFO)));
}
inline BOOL CORProfilerELT3SlowPathLeaveEnabled()
@@ -559,7 +2071,7 @@ inline BOOL CORProfilerELT3SlowPathLeaveEnabled()
CONTRACTL_END;
return (CORProfilerPresentOrInitializing() &&
- ((&g_profControlBlock)->dwEventMask & (COR_PRF_ENABLE_FUNCTION_RETVAL | COR_PRF_ENABLE_FRAME_INFO)));
+ (&g_profControlBlock)->globalEventMask.IsEventMaskSet((COR_PRF_ENABLE_FUNCTION_RETVAL | COR_PRF_ENABLE_FRAME_INFO)));
}
inline BOOL CORProfilerELT3SlowPathTailcallEnabled()
@@ -573,7 +2085,7 @@ inline BOOL CORProfilerELT3SlowPathTailcallEnabled()
CONTRACTL_END;
return (CORProfilerPresentOrInitializing() &&
- ((&g_profControlBlock)->dwEventMask & (COR_PRF_ENABLE_FRAME_INFO)));
+ (&g_profControlBlock)->globalEventMask.IsEventMaskSet((COR_PRF_ENABLE_FRAME_INFO)));
}
inline BOOL CORProfilerELT2FastPathEnterEnabled()
@@ -587,7 +2099,7 @@ inline BOOL CORProfilerELT2FastPathEnterEnabled()
CONTRACTL_END;
return (CORProfilerPresentOrInitializing() &&
- (!((&g_profControlBlock)->dwEventMask & (COR_PRF_ENABLE_STACK_SNAPSHOT | COR_PRF_ENABLE_FUNCTION_ARGS | COR_PRF_ENABLE_FRAME_INFO))));
+ !((&g_profControlBlock)->globalEventMask.IsEventMaskSet((COR_PRF_ENABLE_STACK_SNAPSHOT | COR_PRF_ENABLE_FUNCTION_ARGS | COR_PRF_ENABLE_FRAME_INFO))));
}
inline BOOL CORProfilerELT2FastPathLeaveEnabled()
@@ -601,7 +2113,7 @@ inline BOOL CORProfilerELT2FastPathLeaveEnabled()
CONTRACTL_END;
return (CORProfilerPresentOrInitializing() &&
- (!((&g_profControlBlock)->dwEventMask & (COR_PRF_ENABLE_STACK_SNAPSHOT | COR_PRF_ENABLE_FUNCTION_RETVAL | COR_PRF_ENABLE_FRAME_INFO))));
+ !((&g_profControlBlock)->globalEventMask.IsEventMaskSet((COR_PRF_ENABLE_STACK_SNAPSHOT | COR_PRF_ENABLE_FUNCTION_RETVAL | COR_PRF_ENABLE_FRAME_INFO))));
}
inline BOOL CORProfilerELT2FastPathTailcallEnabled()
@@ -615,7 +2127,7 @@ inline BOOL CORProfilerELT2FastPathTailcallEnabled()
CONTRACTL_END;
return (CORProfilerPresentOrInitializing() &&
- (!((&g_profControlBlock)->dwEventMask & (COR_PRF_ENABLE_STACK_SNAPSHOT | COR_PRF_ENABLE_FRAME_INFO))));
+ !((&g_profControlBlock)->globalEventMask.IsEventMaskSet((COR_PRF_ENABLE_STACK_SNAPSHOT | COR_PRF_ENABLE_FRAME_INFO))));
}
inline BOOL CORProfilerFunctionArgsEnabled()
@@ -629,7 +2141,7 @@ inline BOOL CORProfilerFunctionArgsEnabled()
CONTRACTL_END;
return (CORProfilerPresentOrInitializing() &&
- ((&g_profControlBlock)->dwEventMask & COR_PRF_ENABLE_FUNCTION_ARGS));
+ (&g_profControlBlock)->globalEventMask.IsEventMaskSet(COR_PRF_ENABLE_FUNCTION_ARGS));
}
inline BOOL CORProfilerFunctionReturnValueEnabled()
@@ -643,7 +2155,7 @@ inline BOOL CORProfilerFunctionReturnValueEnabled()
CONTRACTL_END;
return (CORProfilerPresentOrInitializing() &&
- ((&g_profControlBlock)->dwEventMask & COR_PRF_ENABLE_FUNCTION_RETVAL));
+ (&g_profControlBlock)->globalEventMask.IsEventMaskSet(COR_PRF_ENABLE_FUNCTION_RETVAL));
}
inline BOOL CORProfilerFrameInfoEnabled()
@@ -657,7 +2169,7 @@ inline BOOL CORProfilerFrameInfoEnabled()
CONTRACTL_END;
return (CORProfilerPresentOrInitializing() &&
- ((&g_profControlBlock)->dwEventMask & COR_PRF_ENABLE_FRAME_INFO));
+ (&g_profControlBlock)->globalEventMask.IsEventMaskSet(COR_PRF_ENABLE_FRAME_INFO));
}
inline BOOL CORProfilerStackSnapshotEnabled()
@@ -671,21 +2183,7 @@ inline BOOL CORProfilerStackSnapshotEnabled()
CONTRACTL_END;
return (CORProfilerPresentOrInitializing() &&
- ((&g_profControlBlock)->dwEventMask & COR_PRF_ENABLE_STACK_SNAPSHOT));
-}
-
-inline BOOL CORProfilerAddsAssemblyReferences()
-{
- CONTRACTL
- {
- NOTHROW;
- GC_NOTRIGGER;
- CANNOT_TAKE_LOCK;
- }
- CONTRACTL_END;
-
- return (CORProfilerPresent() &&
- ((&g_profControlBlock)->dwEventMaskHigh & COR_PRF_HIGH_ADD_ASSEMBLY_REFERENCES));
+ (&g_profControlBlock)->globalEventMask.IsEventMaskSet(COR_PRF_ENABLE_STACK_SNAPSHOT));
}
inline BOOL CORProfilerInMemorySymbolsUpdatesEnabled()
@@ -699,10 +2197,10 @@ inline BOOL CORProfilerInMemorySymbolsUpdatesEnabled()
CONTRACTL_END;
return (CORProfilerPresent() &&
- ((&g_profControlBlock)->dwEventMaskHigh & COR_PRF_HIGH_IN_MEMORY_SYMBOLS_UPDATED));
+ (&g_profControlBlock)->globalEventMask.IsEventMaskHighSet(COR_PRF_HIGH_IN_MEMORY_SYMBOLS_UPDATED));
}
-inline BOOL CORProfilerIsMonitoringDynamicFunctionUnloads()
+inline BOOL CORProfilerTrackDynamicFunctionUnloads()
{
CONTRACTL
{
@@ -713,7 +2211,7 @@ inline BOOL CORProfilerIsMonitoringDynamicFunctionUnloads()
CONTRACTL_END;
return (CORProfilerPresent() &&
- ((&g_profControlBlock)->dwEventMaskHigh & COR_PRF_HIGH_MONITOR_DYNAMIC_FUNCTION_UNLOADS));
+ (&g_profControlBlock)->globalEventMask.IsEventMaskHighSet(COR_PRF_HIGH_MONITOR_DYNAMIC_FUNCTION_UNLOADS));
}
inline BOOL CORProfilerDisableTieredCompilation()
@@ -728,7 +2226,7 @@ inline BOOL CORProfilerDisableTieredCompilation()
return (CORProfilerPresent() &&
- ((&g_profControlBlock)->dwEventMaskHigh & COR_PRF_HIGH_DISABLE_TIERED_COMPILATION));
+ (&g_profControlBlock)->globalEventMask.IsEventMaskHighSet(COR_PRF_HIGH_DISABLE_TIERED_COMPILATION));
}
inline BOOL CORProfilerTrackBasicGC()
@@ -742,7 +2240,7 @@ inline BOOL CORProfilerTrackBasicGC()
CONTRACTL_END;
return (CORProfilerPresent() &&
- ((&g_profControlBlock)->dwEventMaskHigh & COR_PRF_HIGH_BASIC_GC));
+ (&g_profControlBlock)->globalEventMask.IsEventMaskHighSet(COR_PRF_HIGH_BASIC_GC));
}
inline BOOL CORProfilerTrackGCMovedObjects()
@@ -756,10 +2254,10 @@ inline BOOL CORProfilerTrackGCMovedObjects()
CONTRACTL_END;
return (CORProfilerPresent() &&
- ((&g_profControlBlock)->dwEventMaskHigh & COR_PRF_HIGH_MONITOR_GC_MOVED_OBJECTS));
+ (&g_profControlBlock)->globalEventMask.IsEventMaskHighSet(COR_PRF_HIGH_MONITOR_GC_MOVED_OBJECTS));
}
-inline BOOL CORProfilerIsMonitoringEventPipe()
+inline BOOL CORProfilerTrackEventPipe()
{
CONTRACTL
{
@@ -770,55 +2268,38 @@ inline BOOL CORProfilerIsMonitoringEventPipe()
CONTRACTL_END;
return (CORProfilerPresent() &&
- ((&g_profControlBlock)->dwEventMaskHigh & COR_PRF_HIGH_MONITOR_EVENT_PIPE));
+ (&g_profControlBlock)->globalEventMask.IsEventMaskHighSet(COR_PRF_HIGH_MONITOR_EVENT_PIPE));
}
#if defined(PROFILING_SUPPORTED) && !defined(CROSSGEN_COMPILE)
-#if defined(FEATURE_PROFAPI_ATTACH_DETACH)
-
-//---------------------------------------------------------------------------------------
-// When EE calls into the profiler, an EvacuationCounterHolder object is instantiated on
-// the stack to increment the evacuation counter inside the EE Thread. Upon returning to
-// EE, this EvacuationCounterHolder object when being destroyed decreases the evacuation
-// counter by one.
-//
-// Do not use this object directly. Use BEGIN_PIN_PROFILER / END_PIN_PROFILER defined
-// below.
-//
-// See code:ProfilingAPIUtility::InitializeProfiling#LoadUnloadCallbackSynchronization.
-//
-typedef Wrapper> EvacuationCounterHolder;
-
-
//---------------------------------------------------------------------------------------
-// These macros must be placed around any access to g_profControlBlock.pProfInterface by
+// These macros must be placed around any callbacks to g_profControlBlock by
// the EE. Example:
// {
-// BEGIN_PIN_PROFILER(CORProfilerTrackAppDomainLoads());
-// g_profControlBlock.pProfInterface->AppDomainCreationStarted(MyAppDomainID);
-// END_PIN_PROFILER();
+// BEGIN_PROFILER_CALLBACK(CORProfilerTrackAppDomainLoads;
+// g_profControlBlock.AppDomainCreationStarted(MyAppDomainID);
+// END_PROFILER_CALLBACK();
// }
-// The parameter to the BEGIN_PIN_PROFILER is the condition you want to check for, to
+// The parameter to the BEGIN_PROFILER_CALLBACK is the condition you want to check for, to
// determine whether the profiler is loaded and requesting the callback you're about to
// issue. Typically, this will be a call to one of the inline functions in
// profilepriv.inl. If the condition is true, the macro will increment an evacuation
// counter that effectively pins the profiler, recheck the condition, and (if still
-// true), execute whatever code you place inside the BEGIN/END_PIN_PROFILER block. If
+// true), execute whatever code you place inside the BEGIN/END_PROFILER_CALLBACK block. If
// your condition is more complex than a simple profiler status check, then place the
// profiler status check as parameter to the macro, and add a separate if inside the
// block. Example:
//
// {
-// BEGIN_PIN_PROFILER(CORProfilerTrackTransitions());
+// BEGIN_PROFILER_CALLBACK(CorProfilerTrackTransitions);
// if (!pNSL->pMD->IsQCall())
// {
-// g_profControlBlock.pProfInterface->
+// g_profControlBlock.
// ManagedToUnmanagedTransition((FunctionID) pNSL->pMD,
// COR_PRF_TRANSITION_CALL);
// }
-// END_PIN_PROFILER();
+// END_PROFILER_CALLBACK();
// }
//
// This ensures that the extra condition check (in this case "if
@@ -827,35 +2308,19 @@ typedef Wrapper::Delete(const Iterator &i, COUNT_T cou
}
template
-inline void SArray:: Replace(const Iterator &i, COUNT_T deleteCount, COUNT_T insertCount)
+inline void SArray::Replace(const Iterator &i, COUNT_T deleteCount, COUNT_T insertCount)
{
WRAPPER_NO_CONTRACT;
DestructBuffer(i, deleteCount);
diff --git a/src/coreclr/inc/stresslog.h b/src/coreclr/inc/stresslog.h
index 8f1f81002b5abc..508bc91edb72a0 100644
--- a/src/coreclr/inc/stresslog.h
+++ b/src/coreclr/inc/stresslog.h
@@ -787,131 +787,7 @@ class ThreadStressLog {
}
#endif //STRESS_LOG_READONLY
- static const char* gcStartMsg()
- {
- STATIC_CONTRACT_LEAF;
- return "{ =========== BEGINGC %d, (requested generation = %lu, collect_classes = %lu) ==========\n";
- }
-
- static const char* gcEndMsg()
- {
- STATIC_CONTRACT_LEAF;
- return "========== ENDGC %d (gen = %lu, collect_classes = %lu) ===========}\n";
- }
-
- static const char* gcRootMsg()
- {
- STATIC_CONTRACT_LEAF;
- return " GC Root %p RELOCATED %p -> %p MT = %pT\n";
- }
-
- static const char* gcRootPromoteMsg()
- {
- STATIC_CONTRACT_LEAF;
- return " IGCHeap::Promote: Promote GC Root *%p = %p MT = %pT\n";
- }
-
- static const char* gcPlugMoveMsg()
- {
- STATIC_CONTRACT_LEAF;
- return "GC_HEAP RELOCATING Objects in heap within range [%p %p) by -0x%x bytes\n";
- }
-
- static const char* gcServerThread0StartMsg()
- {
- STATIC_CONTRACT_LEAF;
- return "%d gc thread waiting...";
- }
-
- static const char* gcServerThreadNStartMsg()
- {
- STATIC_CONTRACT_LEAF;
- return "%d gc thread waiting... Done";
- }
-
- static const char* gcDetailedStartMsg()
- {
- STATIC_CONTRACT_LEAF;
- return "*GC* %d(gen0:%d)(%d)(alloc: %Id)(%s)(%d)";
- }
-
- static const char* gcDetailedEndMsg()
- {
- STATIC_CONTRACT_LEAF;
- return "*EGC* %Id(gen0:%Id)(%Id)(%d)(%s)(%s)(%s)(ml: %d->%d)";
- }
-
- static const char* gcStartMarkMsg()
- {
- STATIC_CONTRACT_LEAF;
- return "---- Mark Phase on heap %d condemning %d ----";
- }
-
- static const char* gcStartPlanMsg()
- {
- STATIC_CONTRACT_LEAF;
- return "---- Plan Phase on heap %d ---- Condemned generation %d, promotion: %d";
- }
-
- static const char* gcStartRelocateMsg()
- {
- STATIC_CONTRACT_LEAF;
- return "---- Relocate phase on heap %d -----";
- }
-
- static const char* gcEndRelocateMsg()
- {
- STATIC_CONTRACT_LEAF;
- return "---- End of Relocate phase on heap %d ----";
- }
-
- static const char* gcStartCompactMsg()
- {
- STATIC_CONTRACT_LEAF;
- return "---- Compact Phase on heap %d: %Ix(%Ix)----";
- }
-
- static const char* gcEndCompactMsg()
- {
- STATIC_CONTRACT_LEAF;
- return "---- End of Compact phase on heap %d ----";
- }
-
- static const char* gcMemCopyMsg()
- {
- STATIC_CONTRACT_LEAF;
- return " mc: [%Ix->%Ix, %Ix->%Ix[";
- }
-
- static const char* gcPlanPlugMsg()
- {
- STATIC_CONTRACT_LEAF;
- return "(%Ix)[%Ix->%Ix, NA: [%Ix(%Id), %Ix[: %Ix(%d), x: %Ix (%s)";
- }
-
- static const char* gcPlanPinnedPlugMsg()
- {
- STATIC_CONTRACT_LEAF;
- return "(%Ix)PP: [%Ix, %Ix[%Ix](m:%d)";
- }
-
- static const char* gcDesiredNewAllocationMsg()
- {
- STATIC_CONTRACT_LEAF;
- return "h%d g%d surv: %Id current: %Id alloc: %Id (%d%%) f: %d%% new-size: %Id new-alloc: %Id";
- }
-
- static const char* gcMakeUnusedArrayMsg()
- {
- STATIC_CONTRACT_LEAF;
- return "Making unused array [%Ix, %Ix[";
- }
-
- static const char* gcStartBgcThread()
- {
- STATIC_CONTRACT_LEAF;
- return "beginning of bgc on heap %d: gen2 FL: %d, FO: %d, frag: %d";
- }
+ #include "gcmsg.inl"
static const char* TaskSwitchMsg()
{
diff --git a/src/coreclr/inc/switches.h b/src/coreclr/inc/switches.h
index 322a71ea11f440..d0ccfad62797c0 100644
--- a/src/coreclr/inc/switches.h
+++ b/src/coreclr/inc/switches.h
@@ -53,7 +53,7 @@
#if defined(TARGET_X86) || defined(TARGET_ARM)
#define USE_UPPER_ADDRESS 0
-#elif defined(TARGET_AMD64) || defined(TARGET_ARM64)
+#elif defined(TARGET_AMD64) || defined(TARGET_ARM64) || defined(TARGET_S390X)
#define UPPER_ADDRESS_MAPPING_FACTOR 2
#define CLR_UPPER_ADDRESS_MIN 0x64400000000
#define CODEHEAP_START_ADDRESS 0x64480000000
diff --git a/src/coreclr/inc/volatile.h b/src/coreclr/inc/volatile.h
index f5bb1f09f063d1..610a408f39fb6e 100644
--- a/src/coreclr/inc/volatile.h
+++ b/src/coreclr/inc/volatile.h
@@ -68,8 +68,8 @@
#error The Volatile type is currently only defined for Visual C++ and GNU C++
#endif
-#if defined(__GNUC__) && !defined(HOST_X86) && !defined(HOST_AMD64) && !defined(HOST_ARM) && !defined(HOST_ARM64)
-#error The Volatile type is currently only defined for GCC when targeting x86, AMD64, ARM or ARM64 CPUs
+#if defined(__GNUC__) && !defined(HOST_X86) && !defined(HOST_AMD64) && !defined(HOST_ARM) && !defined(HOST_ARM64) && !defined(HOST_S390X)
+#error The Volatile type is currently only defined for GCC when targeting x86, AMD64, ARM, ARM64, or S390X CPUs
#endif
#if defined(__GNUC__)
diff --git a/src/coreclr/jit/CMakeLists.txt b/src/coreclr/jit/CMakeLists.txt
index cd78586ed0db7d..f06c011e5b9639 100644
--- a/src/coreclr/jit/CMakeLists.txt
+++ b/src/coreclr/jit/CMakeLists.txt
@@ -31,6 +31,8 @@ function(create_standalone_jit)
set(JIT_ARCH_SOURCES ${JIT_I386_SOURCES})
elseif(TARGETDETAILS_ARCH STREQUAL "arm64")
set(JIT_ARCH_SOURCES ${JIT_ARM64_SOURCES})
+ elseif(TARGETDETAILS_ARCH STREQUAL "s390x")
+ set(JIT_ARCH_SOURCES ${JIT_S390X_SOURCES})
else()
clr_unknown_arch()
endif()
@@ -345,6 +347,10 @@ set( JIT_ARM64_SOURCES
hwintrinsiccodegenarm64.cpp
)
+set( JIT_S390X_SOURCES
+ # Not supported as JIT target
+)
+
if(CLR_CMAKE_TARGET_ARCH_AMD64)
set(JIT_ARCH_SOURCES ${JIT_AMD64_SOURCES})
elseif(CLR_CMAKE_TARGET_ARCH_ARM)
@@ -353,6 +359,8 @@ elseif(CLR_CMAKE_TARGET_ARCH_I386)
set(JIT_ARCH_SOURCES ${JIT_I386_SOURCES})
elseif(CLR_CMAKE_TARGET_ARCH_ARM64)
set(JIT_ARCH_SOURCES ${JIT_ARM64_SOURCES})
+elseif(CLR_CMAKE_TARGET_ARCH_S390X)
+ set(JIT_ARCH_SOURCES ${JIT_S390X_SOURCES})
else()
clr_unknown_arch()
endif()
@@ -508,8 +516,14 @@ target_compile_definitions(clrjit_unix_arm_${ARCH_HOST_NAME} PRIVATE ARM_SOFTFP
create_standalone_jit(TARGET clrjit_win_arm_${ARCH_HOST_NAME} OS win ARCH arm DESTINATIONS .)
create_standalone_jit(TARGET clrjit_win_x86_${ARCH_HOST_NAME} OS win ARCH x86 DESTINATIONS .)
+if (CLR_CMAKE_TARGET_ARCH_I386 AND CLR_CMAKE_TARGET_UNIX)
+ create_standalone_jit(TARGET clrjit_unix_x86_${ARCH_HOST_NAME} OS unix ARCH x86 DESTINATIONS .)
+endif (CLR_CMAKE_TARGET_ARCH_I386 AND CLR_CMAKE_TARGET_UNIX)
+
if (CLR_CMAKE_TARGET_UNIX)
- install_clr(TARGETS clrjit_unix_${ARCH_TARGET_NAME}_${ARCH_HOST_NAME} DESTINATIONS . COMPONENT jit)
+ if (NOT ARCH_TARGET_NAME STREQUAL s390x)
+ install_clr(TARGETS clrjit_unix_${ARCH_TARGET_NAME}_${ARCH_HOST_NAME} DESTINATIONS . COMPONENT jit)
+ endif(NOT ARCH_TARGET_NAME STREQUAL s390x)
if (ARCH_TARGET_NAME STREQUAL arm)
target_compile_definitions(clrjit_unix_arm_${ARCH_HOST_NAME} PRIVATE ARM_SOFTFP CONFIGURABLE_ARM_ABI)
endif (ARCH_TARGET_NAME STREQUAL arm)
diff --git a/src/coreclr/jit/assertionprop.cpp b/src/coreclr/jit/assertionprop.cpp
index 82a578f6f754b4..981879142bf265 100644
--- a/src/coreclr/jit/assertionprop.cpp
+++ b/src/coreclr/jit/assertionprop.cpp
@@ -4818,15 +4818,15 @@ ASSERT_TP* Compiler::optComputeAssertionGen()
{
ASSERT_TP* jumpDestGen = fgAllocateTypeForEachBlk();
- for (BasicBlock* block = fgFirstBB; block != nullptr; block = block->bbNext)
+ for (BasicBlock* const block : Blocks())
{
ASSERT_TP valueGen = BitVecOps::MakeEmpty(apTraits);
GenTree* jtrue = nullptr;
// Walk the statement trees in this basic block.
- for (Statement* stmt : block->Statements())
+ for (Statement* const stmt : block->Statements())
{
- for (GenTree* tree = stmt->GetTreeList(); tree != nullptr; tree = tree->gtNext)
+ for (GenTree* const tree : stmt->TreeList())
{
if (tree->gtOper == GT_JTRUE)
{
@@ -4930,7 +4930,7 @@ ASSERT_TP* Compiler::optInitAssertionDataflowFlags()
// Initially estimate the OUT sets to everything except killed expressions
// Also set the IN sets to 1, so that we can perform the intersection.
- for (BasicBlock* block = fgFirstBB; block; block = block->bbNext)
+ for (BasicBlock* const block : Blocks())
{
block->bbAssertionIn = BitVecOps::MakeCopy(apTraits, apValidFull);
block->bbAssertionGen = BitVecOps::MakeEmpty(apTraits);
@@ -5338,7 +5338,7 @@ void Compiler::optAssertionPropMain()
noway_assert(optAssertionCount == 0);
// First discover all value assignments and record them in the table.
- for (BasicBlock* block = fgFirstBB; block; block = block->bbNext)
+ for (BasicBlock* const block : Blocks())
{
compCurBB = block;
@@ -5375,7 +5375,7 @@ void Compiler::optAssertionPropMain()
}
// Perform assertion gen for control flow based assertions.
- for (GenTree* tree = stmt->GetTreeList(); tree != nullptr; tree = tree->gtNext)
+ for (GenTree* const tree : stmt->TreeList())
{
optAssertionGen(tree);
}
@@ -5390,7 +5390,7 @@ void Compiler::optAssertionPropMain()
// Zero out the bbAssertionIn values, as these can be referenced in RangeCheck::MergeAssertion
// and this is sharedstate with the CSE phase: bbCseIn
//
- for (BasicBlock* block = fgFirstBB; block; block = block->bbNext)
+ for (BasicBlock* const block : Blocks())
{
block->bbAssertionIn = BitVecOps::MakeEmpty(apTraits);
}
@@ -5410,7 +5410,7 @@ void Compiler::optAssertionPropMain()
AssertionPropFlowCallback ap(this, bbJtrueAssertionOut, jumpDestGen);
flow.ForwardAnalysis(ap);
- for (BasicBlock* block = fgFirstBB; block; block = block->bbNext)
+ for (BasicBlock* const block : Blocks())
{
// Compute any implied non-Null assertions for block->bbAssertionIn
optImpliedByTypeOfAssertions(block->bbAssertionIn);
@@ -5420,7 +5420,7 @@ void Compiler::optAssertionPropMain()
if (verbose)
{
printf("\n");
- for (BasicBlock* block = fgFirstBB; block; block = block->bbNext)
+ for (BasicBlock* const block : Blocks())
{
printf("\n" FMT_BB, block->bbNum);
printf(" valueIn = %s", BitVecOps::ToString(apTraits, block->bbAssertionIn));
@@ -5438,7 +5438,7 @@ void Compiler::optAssertionPropMain()
ASSERT_TP assertions = BitVecOps::MakeEmpty(apTraits);
// Perform assertion propagation (and constant folding)
- for (BasicBlock* block = fgFirstBB; block; block = block->bbNext)
+ for (BasicBlock* const block : Blocks())
{
BitVecOps::Assign(apTraits, assertions, block->bbAssertionIn);
diff --git a/src/coreclr/jit/block.cpp b/src/coreclr/jit/block.cpp
index fed7b59d329e75..128c4e033d9fa5 100644
--- a/src/coreclr/jit/block.cpp
+++ b/src/coreclr/jit/block.cpp
@@ -163,10 +163,9 @@ flowList* Compiler::BlockPredsWithEH(BasicBlock* blk)
// Find the first block of the try.
EHblkDsc* ehblk = ehGetDsc(tryIndex);
BasicBlock* tryStart = ehblk->ebdTryBeg;
- for (flowList* tryStartPreds = tryStart->bbPreds; tryStartPreds != nullptr;
- tryStartPreds = tryStartPreds->flNext)
+ for (BasicBlock* const tryStartPredBlock : tryStart->PredBlocks())
{
- res = new (this, CMK_FlowList) flowList(tryStartPreds->getBlock(), res);
+ res = new (this, CMK_FlowList) flowList(tryStartPredBlock, res);
#if MEASURE_BLOCK_SIZE
genFlowNodeCnt += 1;
@@ -182,7 +181,7 @@ flowList* Compiler::BlockPredsWithEH(BasicBlock* blk)
// (plus adding in any filter blocks outside the try whose exceptions are handled here).
// That doesn't work, however: funclets have caused us to sometimes split the body of a try into
// more than one sequence of contiguous blocks. We need to find a better way to do this.
- for (BasicBlock* bb = fgFirstBB; bb != nullptr; bb = bb->bbNext)
+ for (BasicBlock* const bb : Blocks())
{
if (bbInExnFlowRegions(tryIndex, bb) && !bb->isBBCallAlwaysPairTail())
{
@@ -217,9 +216,9 @@ flowList* Compiler::BlockPredsWithEH(BasicBlock* blk)
bool BasicBlock::checkPredListOrder()
{
unsigned lastBBNum = 0;
- for (flowList* pred = bbPreds; pred != nullptr; pred = pred->flNext)
+ for (BasicBlock* const predBlock : PredBlocks())
{
- const unsigned bbNum = pred->getBlock()->bbNum;
+ const unsigned bbNum = predBlock->bbNum;
if (bbNum <= lastBBNum)
{
assert(bbNum != lastBBNum);
@@ -261,7 +260,7 @@ void BasicBlock::reorderPredList(Compiler* compiler)
// Count number or entries.
//
int count = 0;
- for (flowList* pred = bbPreds; pred != nullptr; pred = pred->flNext)
+ for (flowList* const pred : PredEdges())
{
count++;
}
@@ -286,7 +285,7 @@ void BasicBlock::reorderPredList(Compiler* compiler)
// Fill in the vector from the list.
//
- for (flowList* pred = bbPreds; pred != nullptr; pred = pred->flNext)
+ for (flowList* const pred : PredEdges())
{
sortVector->push_back(pred);
}
@@ -516,7 +515,7 @@ void BasicBlock::dspFlags()
unsigned BasicBlock::dspPreds()
{
unsigned count = 0;
- for (flowList* pred = bbPreds; pred != nullptr; pred = pred->flNext)
+ for (flowList* const pred : PredEdges())
{
if (count != 0)
{
@@ -575,20 +574,16 @@ unsigned BasicBlock::dspCheapPreds()
/*****************************************************************************
*
* Display the basic block successors.
- * Returns the count of successors.
*/
-unsigned BasicBlock::dspSuccs(Compiler* compiler)
+void BasicBlock::dspSuccs(Compiler* compiler)
{
- unsigned numSuccs = NumSucc(compiler);
- unsigned count = 0;
- for (unsigned i = 0; i < numSuccs; i++)
+ bool first = true;
+ for (BasicBlock* const succ : Succs(compiler))
{
- printf("%s", (count == 0) ? "" : ",");
- printf(FMT_BB, GetSucc(i, compiler)->bbNum);
- count++;
+ printf("%s" FMT_BB, first ? "" : ",", succ->bbNum);
+ first = false;
}
- return count;
}
// Display a compact representation of the bbJumpKind, that is, where this block branches.
@@ -773,7 +768,7 @@ bool BasicBlock::CloneBlockState(
to->bbTgtStkDepth = from->bbTgtStkDepth;
#endif // DEBUG
- for (Statement* fromStmt : from->Statements())
+ for (Statement* const fromStmt : from->Statements())
{
auto newExpr = compiler->gtCloneExpr(fromStmt->GetRootNode(), GTF_EMPTY, varNum, varVal);
if (!newExpr)
@@ -800,7 +795,7 @@ void BasicBlock::MakeLIR(GenTree* firstNode, GenTree* lastNode)
bbFlags |= BBF_IS_LIR;
}
-bool BasicBlock::IsLIR()
+bool BasicBlock::IsLIR() const
{
assert(isValid());
const bool isLIR = ((bbFlags & BBF_IS_LIR) != 0);
@@ -845,7 +840,7 @@ Statement* BasicBlock::lastStmt() const
//------------------------------------------------------------------------
// BasicBlock::firstNode: Returns the first node in the block.
//
-GenTree* BasicBlock::firstNode()
+GenTree* BasicBlock::firstNode() const
{
return IsLIR() ? GetFirstLIRNode() : Compiler::fgGetFirstNode(firstStmt()->GetRootNode());
}
@@ -853,7 +848,7 @@ GenTree* BasicBlock::firstNode()
//------------------------------------------------------------------------
// BasicBlock::lastNode: Returns the last node in the block.
//
-GenTree* BasicBlock::lastNode()
+GenTree* BasicBlock::lastNode() const
{
return IsLIR() ? m_lastNode : lastStmt()->GetRootNode();
}
@@ -873,7 +868,7 @@ GenTree* BasicBlock::lastNode()
// a backedge), we never want to consider it "unique" because the prolog is an
// implicit predecessor.
-BasicBlock* BasicBlock::GetUniquePred(Compiler* compiler)
+BasicBlock* BasicBlock::GetUniquePred(Compiler* compiler) const
{
if ((bbPreds == nullptr) || (bbPreds->flNext != nullptr) || (this == compiler->fgFirstBB))
{
@@ -895,7 +890,7 @@ BasicBlock* BasicBlock::GetUniquePred(Compiler* compiler)
// Return Value:
// The unique successor of a block, or nullptr if there is no unique successor.
-BasicBlock* BasicBlock::GetUniqueSucc()
+BasicBlock* BasicBlock::GetUniqueSucc() const
{
if (bbJumpKind == BBJ_ALWAYS)
{
@@ -933,20 +928,16 @@ unsigned JitPtrKeyFuncs::GetHashCode(const BasicBlock* ptr)
// True if block is empty, or contains only PHI assignments,
// or contains zero or more PHI assignments followed by NOPs.
//
-bool BasicBlock::isEmpty()
+bool BasicBlock::isEmpty() const
{
if (!IsLIR())
{
- Statement* stmt = FirstNonPhiDef();
-
- while (stmt != nullptr)
+ for (Statement* const stmt : NonPhiStatements())
{
if (!stmt->GetRootNode()->OperIs(GT_NOP))
{
return false;
}
-
- stmt = stmt->GetNextStmt();
}
}
else
@@ -969,7 +960,7 @@ bool BasicBlock::isEmpty()
// Return Value:
// True if it a valid basic block.
//
-bool BasicBlock::isValid()
+bool BasicBlock::isValid() const
{
const bool isLIR = ((bbFlags & BBF_IS_LIR) != 0);
if (isLIR)
@@ -984,7 +975,7 @@ bool BasicBlock::isValid()
}
}
-Statement* BasicBlock::FirstNonPhiDef()
+Statement* BasicBlock::FirstNonPhiDef() const
{
Statement* stmt = firstStmt();
if (stmt == nullptr)
@@ -1005,7 +996,7 @@ Statement* BasicBlock::FirstNonPhiDef()
return stmt;
}
-Statement* BasicBlock::FirstNonPhiDefOrCatchArgAsg()
+Statement* BasicBlock::FirstNonPhiDefOrCatchArgAsg() const
{
Statement* stmt = FirstNonPhiDef();
if (stmt == nullptr)
@@ -1026,11 +1017,10 @@ Statement* BasicBlock::FirstNonPhiDefOrCatchArgAsg()
* Can a BasicBlock be inserted after this without altering the flowgraph
*/
-bool BasicBlock::bbFallsThrough()
+bool BasicBlock::bbFallsThrough() const
{
switch (bbJumpKind)
{
-
case BBJ_THROW:
case BBJ_EHFINALLYRET:
case BBJ_EHFILTERRET:
@@ -1063,7 +1053,7 @@ bool BasicBlock::bbFallsThrough()
// Return Value:
// Count of block successors.
//
-unsigned BasicBlock::NumSucc()
+unsigned BasicBlock::NumSucc() const
{
switch (bbJumpKind)
{
@@ -1107,7 +1097,7 @@ unsigned BasicBlock::NumSucc()
// Return Value:
// Requested successor block
//
-BasicBlock* BasicBlock::GetSucc(unsigned i)
+BasicBlock* BasicBlock::GetSucc(unsigned i) const
{
assert(i < NumSucc()); // Index bounds check.
switch (bbJumpKind)
@@ -1279,7 +1269,7 @@ void BasicBlock::InitVarSets(Compiler* comp)
}
// Returns true if the basic block ends with GT_JMP
-bool BasicBlock::endsWithJmpMethod(Compiler* comp)
+bool BasicBlock::endsWithJmpMethod(Compiler* comp) const
{
if (comp->compJmpOpUsed && (bbJumpKind == BBJ_RETURN) && (bbFlags & BBF_HAS_JMP))
{
@@ -1299,7 +1289,7 @@ bool BasicBlock::endsWithJmpMethod(Compiler* comp)
// comp - Compiler instance
// fastTailCallsOnly - Only consider fast tail calls excluding tail calls via helper.
//
-bool BasicBlock::endsWithTailCallOrJmp(Compiler* comp, bool fastTailCallsOnly /*=false*/)
+bool BasicBlock::endsWithTailCallOrJmp(Compiler* comp, bool fastTailCallsOnly /*=false*/) const
{
GenTree* tailCall = nullptr;
bool tailCallsConvertibleToLoopOnly = false;
@@ -1326,7 +1316,7 @@ bool BasicBlock::endsWithTailCallOrJmp(Compiler* comp, bool fastTailCallsOnly /*
bool BasicBlock::endsWithTailCall(Compiler* comp,
bool fastTailCallsOnly,
bool tailCallsConvertibleToLoopOnly,
- GenTree** tailCall)
+ GenTree** tailCall) const
{
assert(!fastTailCallsOnly || !tailCallsConvertibleToLoopOnly);
*tailCall = nullptr;
@@ -1393,7 +1383,7 @@ bool BasicBlock::endsWithTailCall(Compiler* comp,
// Return Value:
// true if the block ends with a tail call convertible to loop.
//
-bool BasicBlock::endsWithTailCallConvertibleToLoop(Compiler* comp, GenTree** tailCall)
+bool BasicBlock::endsWithTailCallConvertibleToLoop(Compiler* comp, GenTree** tailCall) const
{
bool fastTailCallsOnly = false;
bool tailCallsConvertibleToLoopOnly = true;
@@ -1522,7 +1512,7 @@ BasicBlock* Compiler::bbNewBasicBlock(BBjumpKinds jumpKind)
//------------------------------------------------------------------------
// isBBCallAlwaysPair: Determine if this is the first block of a BBJ_CALLFINALLY/BBJ_ALWAYS pair
-
+//
// Return Value:
// True iff "this" is the first block of a BBJ_CALLFINALLY/BBJ_ALWAYS pair
// -- a block corresponding to an exit from the try of a try/finally.
@@ -1540,7 +1530,7 @@ BasicBlock* Compiler::bbNewBasicBlock(BBjumpKinds jumpKind)
// "retless" BBJ_CALLFINALLY blocks due to a requirement to use the BBJ_ALWAYS for
// generating code.
//
-bool BasicBlock::isBBCallAlwaysPair()
+bool BasicBlock::isBBCallAlwaysPair() const
{
#if defined(FEATURE_EH_FUNCLETS) && defined(TARGET_ARM)
if (this->bbJumpKind == BBJ_CALLFINALLY)
@@ -1576,7 +1566,7 @@ bool BasicBlock::isBBCallAlwaysPair()
// Notes:
// See notes on isBBCallAlwaysPair(), above.
//
-bool BasicBlock::isBBCallAlwaysPairTail()
+bool BasicBlock::isBBCallAlwaysPairTail() const
{
return (bbPrev != nullptr) && bbPrev->isBBCallAlwaysPair();
}
@@ -1597,7 +1587,7 @@ bool BasicBlock::isBBCallAlwaysPairTail()
// this block might be entered via flow that is not represented by an edge
// in the flowgraph.
//
-bool BasicBlock::hasEHBoundaryIn()
+bool BasicBlock::hasEHBoundaryIn() const
{
bool returnVal = (bbCatchTyp != BBCT_NONE);
if (!returnVal)
@@ -1621,7 +1611,7 @@ bool BasicBlock::hasEHBoundaryIn()
// live in registers if any successor is a normal flow edge. That's because the
// EH write-thru semantics ensure that we always have an up-to-date value on the stack.
//
-bool BasicBlock::hasEHBoundaryOut()
+bool BasicBlock::hasEHBoundaryOut() const
{
bool returnVal = false;
if (bbJumpKind == BBJ_EHFILTERRET)
diff --git a/src/coreclr/jit/block.h b/src/coreclr/jit/block.h
index 042c376cd2f94d..0cbe51281b3536 100644
--- a/src/coreclr/jit/block.h
+++ b/src/coreclr/jit/block.h
@@ -319,6 +319,162 @@ class AllSuccessorIterPosition
}
};
+// PredEdgeList: adapter class for forward iteration of the predecessor edge linked list using range-based `for`,
+// normally used via BasicBlock::PredEdges(), e.g.:
+// for (flowList* const edge : block->PredEdges()) ...
+//
+class PredEdgeList
+{
+ flowList* m_begin;
+
+ // Forward iterator for the predecessor edges linked list.
+ // The caller can't make changes to the preds list when using this.
+ //
+ class iterator
+ {
+ flowList* m_pred;
+
+#ifdef DEBUG
+ // Try to guard against the user of the iterator from making changes to the IR that would invalidate
+ // the iterator: cache the edge we think should be next, then check it when we actually do the `++`
+ // operation. This is a bit conservative, but attempts to protect against callers assuming too much about
+ // this iterator implementation.
+ flowList* m_next;
+#endif
+
+ public:
+ iterator(flowList* pred);
+
+ flowList* operator*() const
+ {
+ return m_pred;
+ }
+
+ iterator& operator++();
+
+ bool operator!=(const iterator& i) const
+ {
+ return m_pred != i.m_pred;
+ }
+ };
+
+public:
+ PredEdgeList(flowList* pred) : m_begin(pred)
+ {
+ }
+
+ iterator begin() const
+ {
+ return iterator(m_begin);
+ }
+
+ iterator end() const
+ {
+ return iterator(nullptr);
+ }
+};
+
+// PredBlockList: adapter class for forward iteration of the predecessor edge linked list yielding
+// predecessor blocks, using range-based `for`, normally used via BasicBlock::PredBlocks(), e.g.:
+// for (BasicBlock* const predBlock : block->PredBlocks()) ...
+//
+class PredBlockList
+{
+ flowList* m_begin;
+
+ // Forward iterator for the predecessor edges linked list, yielding the predecessor block, not the edge.
+ // The caller can't make changes to the preds list when using this.
+ //
+ class iterator
+ {
+ flowList* m_pred;
+
+#ifdef DEBUG
+ // Try to guard against the user of the iterator from making changes to the IR that would invalidate
+ // the iterator: cache the edge we think should be next, then check it when we actually do the `++`
+ // operation. This is a bit conservative, but attempts to protect against callers assuming too much about
+ // this iterator implementation.
+ flowList* m_next;
+#endif
+
+ public:
+ iterator(flowList* pred);
+
+ BasicBlock* operator*() const;
+
+ iterator& operator++();
+
+ bool operator!=(const iterator& i) const
+ {
+ return m_pred != i.m_pred;
+ }
+ };
+
+public:
+ PredBlockList(flowList* pred) : m_begin(pred)
+ {
+ }
+
+ iterator begin() const
+ {
+ return iterator(m_begin);
+ }
+
+ iterator end() const
+ {
+ return iterator(nullptr);
+ }
+};
+
+// BBArrayIterator: forward iterator for an array of BasicBlock*, such as the BBswtDesc->bbsDstTab.
+// It is an error (with assert) to yield a nullptr BasicBlock* in this array.
+// `m_bbEntry` can be nullptr, but it only makes sense if both the begin and end of an iteration range are nullptr
+// (meaning, no actual iteration will happen).
+//
+class BBArrayIterator
+{
+ BasicBlock* const* m_bbEntry;
+
+public:
+ BBArrayIterator(BasicBlock* const* bbEntry) : m_bbEntry(bbEntry)
+ {
+ }
+
+ BasicBlock* operator*() const
+ {
+ assert(m_bbEntry != nullptr);
+ BasicBlock* bTarget = *m_bbEntry;
+ assert(bTarget != nullptr);
+ return bTarget;
+ }
+
+ BBArrayIterator& operator++()
+ {
+ assert(m_bbEntry != nullptr);
+ ++m_bbEntry;
+ return *this;
+ }
+
+ bool operator!=(const BBArrayIterator& i) const
+ {
+ return m_bbEntry != i.m_bbEntry;
+ }
+};
+
+// BBSwitchTargetList: adapter class for forward iteration of switch targets, using range-based `for`,
+// normally used via BasicBlock::SwitchTargets(), e.g.:
+// for (BasicBlock* const target : block->SwitchTargets()) ...
+//
+class BBSwitchTargetList
+{
+ BBswtDesc* m_bbsDesc;
+
+public:
+ BBSwitchTargetList(BBswtDesc* bbsDesc);
+ BBArrayIterator begin() const;
+ BBArrayIterator end() const;
+};
+
//------------------------------------------------------------------------
// BasicBlockFlags: a bitmask of flags for BasicBlock
//
@@ -499,12 +655,12 @@ struct BasicBlock : private LIR::Range
}
#ifdef DEBUG
- void dspFlags(); // Print the flags
- unsigned dspCheapPreds(); // Print the predecessors (bbCheapPreds)
- unsigned dspPreds(); // Print the predecessors (bbPreds)
- unsigned dspSuccs(Compiler* compiler); // Print the successors. The 'compiler' argument determines whether EH
- // regions are printed: see NumSucc() for details.
- void dspJumpKind(); // Print the block jump kind (e.g., BBJ_NONE, BBJ_COND, etc.).
+ void dspFlags(); // Print the flags
+ unsigned dspCheapPreds(); // Print the predecessors (bbCheapPreds)
+ unsigned dspPreds(); // Print the predecessors (bbPreds)
+ void dspSuccs(Compiler* compiler); // Print the successors. The 'compiler' argument determines whether EH
+ // regions are printed: see NumSucc() for details.
+ void dspJumpKind(); // Print the block jump kind (e.g., BBJ_NONE, BBJ_COND, etc.).
// Print a simple basic block header for various output, including a list of predecessors and successors.
void dspBlockHeader(Compiler* compiler, bool showKind = true, bool showFlags = false, bool showPreds = true);
@@ -641,16 +797,17 @@ struct BasicBlock : private LIR::Range
// Returns "true" if the block is empty. Empty here means there are no statement
// trees *except* PHI definitions.
- bool isEmpty();
+ bool isEmpty() const;
- bool isValid();
+ bool isValid() const;
// Returns "true" iff "this" is the first block of a BBJ_CALLFINALLY/BBJ_ALWAYS pair --
// a block corresponding to an exit from the try of a try/finally.
- bool isBBCallAlwaysPair();
+ bool isBBCallAlwaysPair() const;
+
// Returns "true" iff "this" is the last block of a BBJ_CALLFINALLY/BBJ_ALWAYS pair --
// a block corresponding to an exit from the try of a try/finally.
- bool isBBCallAlwaysPairTail();
+ bool isBBCallAlwaysPairTail() const;
BBjumpKinds bbJumpKind; // jump (if any) at the end of this block
@@ -689,18 +846,27 @@ struct BasicBlock : private LIR::Range
// Note that for BBJ_COND, which has two successors (fall through and condition true branch target),
// only the unique targets are returned. Thus, if both targets are the same, NumSucc() will only return 1
// instead of 2.
-
+ //
// NumSucc: Returns the number of successors of "this".
- unsigned NumSucc();
+ unsigned NumSucc() const;
unsigned NumSucc(Compiler* comp);
// GetSucc: Returns the "i"th successor. Requires (0 <= i < NumSucc()).
- BasicBlock* GetSucc(unsigned i);
+ BasicBlock* GetSucc(unsigned i) const;
BasicBlock* GetSucc(unsigned i, Compiler* comp);
- BasicBlock* GetUniquePred(Compiler* comp);
+ // SwitchTargets: convenience methods for enabling range-based `for` iteration over a switch block's targets, e.g.:
+ // for (BasicBlock* const bTarget : block->SwitchTargets()) ...
+ //
+ BBSwitchTargetList SwitchTargets() const
+ {
+ assert(bbJumpKind == BBJ_SWITCH);
+ return BBSwitchTargetList(bbJumpSwt);
+ }
+
+ BasicBlock* GetUniquePred(Compiler* comp) const;
- BasicBlock* GetUniqueSucc();
+ BasicBlock* GetUniqueSucc() const;
unsigned countOfInEdges() const
{
@@ -837,8 +1003,8 @@ struct BasicBlock : private LIR::Range
return sameTryRegion(blk1, blk2) && sameHndRegion(blk1, blk2);
}
- bool hasEHBoundaryIn();
- bool hasEHBoundaryOut();
+ bool hasEHBoundaryIn() const;
+ bool hasEHBoundaryOut() const;
// Some non-zero value that will not collide with real tokens for bbCatchTyp
#define BBCT_NONE 0x00000000
@@ -875,6 +1041,22 @@ struct BasicBlock : private LIR::Range
flowList* bbPreds; // ptr to list of predecessors
};
+ // PredEdges: convenience method for enabling range-based `for` iteration over predecessor edges, e.g.:
+ // for (flowList* const edge : block->PredEdges()) ...
+ //
+ PredEdgeList PredEdges() const
+ {
+ return PredEdgeList(bbPreds);
+ }
+
+ // PredBlocks: convenience method for enabling range-based `for` iteration over predecessor blocks, e.g.:
+ // for (BasicBlock* const predBlock : block->PredBlocks()) ...
+ //
+ PredBlockList PredBlocks() const
+ {
+ return PredBlockList(bbPreds);
+ }
+
// Pred list maintenance
//
bool checkPredListOrder();
@@ -997,7 +1179,7 @@ struct BasicBlock : private LIR::Range
static size_t s_Count;
#endif // MEASURE_BLOCK_SIZE
- bool bbFallsThrough();
+ bool bbFallsThrough() const;
// Our slop fraction is 1/128 of the block weight rounded off
static weight_t GetSlopFraction(weight_t weightBlk)
@@ -1025,14 +1207,14 @@ struct BasicBlock : private LIR::Range
unsigned bbID;
#endif // DEBUG
- ThisInitState bbThisOnEntry();
- unsigned bbStackDepthOnEntry();
+ ThisInitState bbThisOnEntry() const;
+ unsigned bbStackDepthOnEntry() const;
void bbSetStack(void* stackBuffer);
- StackEntry* bbStackOnEntry();
+ StackEntry* bbStackOnEntry() const;
// "bbNum" is one-based (for unknown reasons); it is sometimes useful to have the corresponding
// zero-based number for use as an array index.
- unsigned bbInd()
+ unsigned bbInd() const
{
assert(bbNum > 0);
return bbNum - 1;
@@ -1041,29 +1223,41 @@ struct BasicBlock : private LIR::Range
Statement* firstStmt() const;
Statement* lastStmt() const;
+ // Statements: convenience method for enabling range-based `for` iteration over the statement list, e.g.:
+ // for (Statement* const stmt : block->Statements())
+ //
StatementList Statements() const
{
return StatementList(firstStmt());
}
- GenTree* firstNode();
- GenTree* lastNode();
+ // NonPhiStatements: convenience method for enabling range-based `for` iteration over the statement list,
+ // excluding any initial PHI statements, e.g.:
+ // for (Statement* const stmt : block->NonPhiStatements())
+ //
+ StatementList NonPhiStatements() const
+ {
+ return StatementList(FirstNonPhiDef());
+ }
+
+ GenTree* firstNode() const;
+ GenTree* lastNode() const;
- bool endsWithJmpMethod(Compiler* comp);
+ bool endsWithJmpMethod(Compiler* comp) const;
bool endsWithTailCall(Compiler* comp,
bool fastTailCallsOnly,
bool tailCallsConvertibleToLoopOnly,
- GenTree** tailCall);
+ GenTree** tailCall) const;
- bool endsWithTailCallOrJmp(Compiler* comp, bool fastTailCallsOnly = false);
+ bool endsWithTailCallOrJmp(Compiler* comp, bool fastTailCallsOnly = false) const;
- bool endsWithTailCallConvertibleToLoop(Compiler* comp, GenTree** tailCall);
+ bool endsWithTailCallConvertibleToLoop(Compiler* comp, GenTree** tailCall) const;
// Returns the first statement in the statement list of "this" that is
// not an SSA definition (a lcl = phi(...) assignment).
- Statement* FirstNonPhiDef();
- Statement* FirstNonPhiDefOrCatchArgAsg();
+ Statement* FirstNonPhiDef() const;
+ Statement* FirstNonPhiDefOrCatchArgAsg() const;
BasicBlock() : bbStmtList(nullptr), bbLiveIn(VarSetOps::UninitVal()), bbLiveOut(VarSetOps::UninitVal())
{
@@ -1138,6 +1332,103 @@ struct BasicBlock : private LIR::Range
return Successors(comp, this);
}
+ // BBSuccList: adapter class for forward iteration of block successors, using range-based `for`,
+ // normally used via BasicBlock::Succs(), e.g.:
+ // for (BasicBlock* const target : block->Succs()) ...
+ //
+ class BBSuccList
+ {
+ // For one or two successors, pre-compute and stash the successors inline, in m_succs[], so we don't
+ // need to call a function or execute another `switch` to get them. Also, pre-compute the begin and end
+ // points of the iteration, for use by BBArrayIterator. `m_begin` and `m_end` will either point at
+ // `m_succs` or at the switch table successor array.
+ BasicBlock* m_succs[2];
+ BasicBlock* const* m_begin;
+ BasicBlock* const* m_end;
+
+ public:
+ BBSuccList(const BasicBlock* block);
+ BBArrayIterator begin() const;
+ BBArrayIterator end() const;
+ };
+
+ // BBCompilerSuccList: adapter class for forward iteration of block successors, using range-based `for`,
+ // normally used via BasicBlock::Succs(), e.g.:
+ // for (BasicBlock* const target : block->Succs(compiler)) ...
+ //
+ // This version uses NumSucc(Compiler*)/GetSucc(Compiler*). See the documentation there for the explanation
+ // of the implications of this versus the version that does not take `Compiler*`.
+ class BBCompilerSuccList
+ {
+ Compiler* m_comp;
+ BasicBlock* m_block;
+
+ // iterator: forward iterator for an array of BasicBlock*, such as the BBswtDesc->bbsDstTab.
+ //
+ class iterator
+ {
+ Compiler* m_comp;
+ BasicBlock* m_block;
+ unsigned m_succNum;
+
+ public:
+ iterator(Compiler* comp, BasicBlock* block, unsigned succNum)
+ : m_comp(comp), m_block(block), m_succNum(succNum)
+ {
+ }
+
+ BasicBlock* operator*() const
+ {
+ assert(m_block != nullptr);
+ BasicBlock* bTarget = m_block->GetSucc(m_succNum, m_comp);
+ assert(bTarget != nullptr);
+ return bTarget;
+ }
+
+ iterator& operator++()
+ {
+ ++m_succNum;
+ return *this;
+ }
+
+ bool operator!=(const iterator& i) const
+ {
+ return m_succNum != i.m_succNum;
+ }
+ };
+
+ public:
+ BBCompilerSuccList(Compiler* comp, BasicBlock* block) : m_comp(comp), m_block(block)
+ {
+ }
+
+ iterator begin() const
+ {
+ return iterator(m_comp, m_block, 0);
+ }
+
+ iterator end() const
+ {
+ return iterator(m_comp, m_block, m_block->NumSucc(m_comp));
+ }
+ };
+
+ // Succs: convenience methods for enabling range-based `for` iteration over a block's successors, e.g.:
+ // for (BasicBlock* const succ : block->Succs()) ...
+ //
+ // There are two options: one that takes a Compiler* and one that doesn't. These correspond to the
+ // NumSucc()/GetSucc() functions that do or do not take a Compiler*. See the comment for NumSucc()/GetSucc()
+ // for the distinction.
+ BBSuccList Succs() const
+ {
+ return BBSuccList(this);
+ }
+
+ BBCompilerSuccList Succs(Compiler* comp)
+ {
+ return BBCompilerSuccList(comp, this);
+ }
+
// Try to clone block state and statements from `from` block to `to` block (which must be new/empty),
// optionally replacing uses of local `varNum` with IntCns `varVal`. Return true if all statements
// in the block are cloned successfully, false (with partially-populated `to` block) if one fails.
@@ -1145,20 +1436,20 @@ struct BasicBlock : private LIR::Range
Compiler* compiler, BasicBlock* to, const BasicBlock* from, unsigned varNum = (unsigned)-1, int varVal = 0);
void MakeLIR(GenTree* firstNode, GenTree* lastNode);
- bool IsLIR();
+ bool IsLIR() const;
void SetDominatedByExceptionalEntryFlag()
{
bbFlags |= BBF_DOMINATED_BY_EXCEPTIONAL_ENTRY;
}
- bool IsDominatedByExceptionalEntryFlag()
+ bool IsDominatedByExceptionalEntryFlag() const
{
return (bbFlags & BBF_DOMINATED_BY_EXCEPTIONAL_ENTRY) != 0;
}
#ifdef DEBUG
- bool Contains(const GenTree* node)
+ bool Contains(const GenTree* node) const
{
assert(IsLIR());
for (Iterator iter = begin(); iter != end(); ++iter)
@@ -1196,6 +1487,96 @@ typedef JitHashTable, BlkVector> BlkToBl
// Map from Block to Block. Used for a variety of purposes.
typedef JitHashTable, BasicBlock*> BlockToBlockMap;
+// BasicBlockIterator: forward iterator for the BasicBlock linked list.
+// It is allowed to make changes to the BasicBlock list as long as the current block remains in the list.
+// E.g., the current block `m_bbNext` pointer can be altered (such as when inserting a following block),
+// as long as the current block is still in the list.
+// The block list is expected to be properly doubly-linked.
+//
+class BasicBlockIterator
+{
+ BasicBlock* m_block;
+
+public:
+ BasicBlockIterator(BasicBlock* block) : m_block(block)
+ {
+ }
+
+ BasicBlock* operator*() const
+ {
+ return m_block;
+ }
+
+ BasicBlockIterator& operator++()
+ {
+ assert(m_block != nullptr);
+ // Check that we haven't been spliced out of the list.
+ assert((m_block->bbNext == nullptr) || (m_block->bbNext->bbPrev == m_block));
+ assert((m_block->bbPrev == nullptr) || (m_block->bbPrev->bbNext == m_block));
+
+ m_block = m_block->bbNext;
+ return *this;
+ }
+
+ bool operator!=(const BasicBlockIterator& i) const
+ {
+ return m_block != i.m_block;
+ }
+};
+
+// BasicBlockSimpleList: adapter class for forward iteration of a lexically contiguous range of
+// BasicBlock, starting at `begin` and going to the end of the function, using range-based `for`,
+// normally used via Compiler::Blocks(), e.g.:
+// for (BasicBlock* const block : Blocks()) ...
+//
+class BasicBlockSimpleList
+{
+ BasicBlock* m_begin;
+
+public:
+ BasicBlockSimpleList(BasicBlock* begin) : m_begin(begin)
+ {
+ }
+
+ BasicBlockIterator begin() const
+ {
+ return BasicBlockIterator(m_begin);
+ }
+
+ BasicBlockIterator end() const
+ {
+ return BasicBlockIterator(nullptr);
+ }
+};
+
+// BasicBlockRangeList: adapter class for forward iteration of a lexically contiguous range of
+// BasicBlock specified with both `begin` and `end` blocks. `begin` and `end` are *inclusive*
+// and must be non-null. E.g.,
+// for (BasicBlock* const block : BasicBlockRangeList(startBlock, endBlock)) ...
+//
+class BasicBlockRangeList
+{
+ BasicBlock* m_begin;
+ BasicBlock* m_end;
+
+public:
+ BasicBlockRangeList(BasicBlock* begin, BasicBlock* end) : m_begin(begin), m_end(end)
+ {
+ assert(begin != nullptr);
+ assert(end != nullptr);
+ }
+
+ BasicBlockIterator begin() const
+ {
+ return BasicBlockIterator(m_begin);
+ }
+
+ BasicBlockIterator end() const
+ {
+ return BasicBlockIterator(m_end->bbNext); // walk until we see the block *following* the `m_end` block
+ }
+};
+
// BBswtDesc -- descriptor for a switch block
//
// Things to know:
@@ -1241,6 +1622,98 @@ struct BBswtDesc
}
};
+// BBSwitchTargetList out-of-class-declaration implementations (here due to C++ ordering requirements).
+//
+
+inline BBSwitchTargetList::BBSwitchTargetList(BBswtDesc* bbsDesc) : m_bbsDesc(bbsDesc)
+{
+ assert(m_bbsDesc != nullptr);
+ assert(m_bbsDesc->bbsDstTab != nullptr);
+}
+
+inline BBArrayIterator BBSwitchTargetList::begin() const
+{
+ return BBArrayIterator(m_bbsDesc->bbsDstTab);
+}
+
+inline BBArrayIterator BBSwitchTargetList::end() const
+{
+ return BBArrayIterator(m_bbsDesc->bbsDstTab + m_bbsDesc->bbsCount);
+}
+
+// BBSuccList out-of-class-declaration implementations
+//
+inline BasicBlock::BBSuccList::BBSuccList(const BasicBlock* block)
+{
+ assert(block != nullptr);
+ switch (block->bbJumpKind)
+ {
+ case BBJ_THROW:
+ case BBJ_RETURN:
+ case BBJ_EHFINALLYRET:
+ case BBJ_EHFILTERRET:
+ // We don't need m_succs.
+ m_begin = nullptr;
+ m_end = nullptr;
+ break;
+
+ case BBJ_CALLFINALLY:
+ case BBJ_ALWAYS:
+ case BBJ_EHCATCHRET:
+ case BBJ_LEAVE:
+ m_succs[0] = block->bbJumpDest;
+ m_begin = &m_succs[0];
+ m_end = &m_succs[1];
+ break;
+
+ case BBJ_NONE:
+ m_succs[0] = block->bbNext;
+ m_begin = &m_succs[0];
+ m_end = &m_succs[1];
+ break;
+
+ case BBJ_COND:
+ m_succs[0] = block->bbNext;
+ m_begin = &m_succs[0];
+
+ // If both fall-through and branch successors are identical, then only include
+ // them once in the iteration (this is the same behavior as NumSucc()/GetSucc()).
+ if (block->bbJumpDest == block->bbNext)
+ {
+ m_end = &m_succs[1];
+ }
+ else
+ {
+ m_succs[1] = block->bbJumpDest;
+ m_end = &m_succs[2];
+ }
+ break;
+
+ case BBJ_SWITCH:
+ // We don't use the m_succs in-line data for switches; use the existing jump table in the block.
+ assert(block->bbJumpSwt != nullptr);
+ assert(block->bbJumpSwt->bbsDstTab != nullptr);
+ m_begin = block->bbJumpSwt->bbsDstTab;
+ m_end = block->bbJumpSwt->bbsDstTab + block->bbJumpSwt->bbsCount;
+ break;
+
+ default:
+ unreached();
+ }
+
+ assert(m_end >= m_begin);
+}
+
+inline BBArrayIterator BasicBlock::BBSuccList::begin() const
+{
+ return BBArrayIterator(m_begin);
+}
+
+inline BBArrayIterator BasicBlock::BBSuccList::end() const
+{
+ return BBArrayIterator(m_end);
+}
+
// In compiler terminology the control flow between two BasicBlocks
// is typically referred to as an "edge". Most well known are the
// backward branches for loops, which are often called "back-edges".
@@ -1354,6 +1827,55 @@ struct flowList
}
};
+// Pred list iterator implementations (that are required to be defined after the declaration of BasicBlock and flowList)
+
+inline PredEdgeList::iterator::iterator(flowList* pred) : m_pred(pred)
+{
+#ifdef DEBUG
+ m_next = (m_pred == nullptr) ? nullptr : m_pred->flNext;
+#endif
+}
+
+inline PredEdgeList::iterator& PredEdgeList::iterator::operator++()
+{
+ flowList* next = m_pred->flNext;
+
+#ifdef DEBUG
+ // Check that the next block is the one we expect to see.
+ assert(next == m_next);
+ m_next = (next == nullptr) ? nullptr : next->flNext;
+#endif // DEBUG
+
+ m_pred = next;
+ return *this;
+}
+
+inline PredBlockList::iterator::iterator(flowList* pred) : m_pred(pred)
+{
+#ifdef DEBUG
+ m_next = (m_pred == nullptr) ? nullptr : m_pred->flNext;
+#endif
+}
+
+inline BasicBlock* PredBlockList::iterator::operator*() const
+{
+ return m_pred->getBlock();
+}
+
+inline PredBlockList::iterator& PredBlockList::iterator::operator++()
+{
+ flowList* next = m_pred->flNext;
+
+#ifdef DEBUG
+ // Check that the next block is the one we expect to see.
+ assert(next == m_next);
+ m_next = (next == nullptr) ? nullptr : next->flNext;
+#endif // DEBUG
+
+ m_pred = next;
+ return *this;
+}
+
// This enum represents a pre/post-visit action state to emulate a depth-first
// spanning tree traversal of a tree or graph.
enum DfsStackState
diff --git a/src/coreclr/jit/clrjit.natvis b/src/coreclr/jit/clrjit.natvis
index a6ba8f53b19165..90a9ff703a4716 100644
--- a/src/coreclr/jit/clrjit.natvis
+++ b/src/coreclr/jit/clrjit.natvis
@@ -24,7 +24,7 @@ The .NET Foundation licenses this file to you under the MIT license.
- [{gtOper,en}, {gtType,en}}]
+ [{gtOper,en}, {gtType,en}]
[IntCon={((GenTreeIntCon*)this)->gtIconVal, d}]
@@ -55,6 +55,10 @@ The .NET Foundation licenses this file to you under the MIT license.
[{gtOper,en}, {gtType,en} V{((GenTreeLclVar*)this)->_gtLclNum,u}]
+
+ [{gtOper,en}, {gtType,en} V{((GenTreeLclFld*)this)->_gtLclNum,u}[+{((GenTreeLclFld*)this)->m_lclOffs,u}]]
+
+
LinearScan
@@ -157,6 +161,14 @@ The .NET Foundation licenses this file to you under the MIT license.
IG{igNum,d}
+
+ {_idIns,en} {_idReg1,en}
+ {_idIns,en} {_idReg1,en}, {_idLargeCns,d}
+ {_idIns,en} {_idReg1,en}, {_idLargeCns,d}
+ {_idIns,en} {_idReg1,en}, {_idSmallCns,d}
+ {_idIns,en}
+
+
Size={m_nSize}
diff --git a/src/coreclr/jit/codegenarm.cpp b/src/coreclr/jit/codegenarm.cpp
index cc572c663c242c..5993f7015ec2bd 100644
--- a/src/coreclr/jit/codegenarm.cpp
+++ b/src/coreclr/jit/codegenarm.cpp
@@ -460,7 +460,7 @@ void CodeGen::genLclHeap(GenTree* tree)
}
// regCnt will be the total number of bytes to locAlloc
- genSetRegToIcon(regCnt, amount, ((int)amount == amount) ? TYP_INT : TYP_LONG);
+ genSetRegToIcon(regCnt, amount, TYP_INT);
}
else
{
diff --git a/src/coreclr/jit/codegencommon.cpp b/src/coreclr/jit/codegencommon.cpp
index f6eaa8b044ccc9..e99f047c061a55 100644
--- a/src/coreclr/jit/codegencommon.cpp
+++ b/src/coreclr/jit/codegencommon.cpp
@@ -352,7 +352,7 @@ void CodeGen::genMarkLabelsForCodegen()
#ifdef DEBUG
// No label flags should be set before this.
- for (BasicBlock* block = compiler->fgFirstBB; block != nullptr; block = block->bbNext)
+ for (BasicBlock* const block : compiler->Blocks())
{
assert((block->bbFlags & BBF_HAS_LABEL) == 0);
}
@@ -372,7 +372,7 @@ void CodeGen::genMarkLabelsForCodegen()
compiler->fgFirstBB->bbFlags |= BBF_HAS_LABEL;
}
- for (BasicBlock* block = compiler->fgFirstBB; block != nullptr; block = block->bbNext)
+ for (BasicBlock* const block : compiler->Blocks())
{
switch (block->bbJumpKind)
{
@@ -384,15 +384,11 @@ void CodeGen::genMarkLabelsForCodegen()
break;
case BBJ_SWITCH:
- unsigned jumpCnt;
- jumpCnt = block->bbJumpSwt->bbsCount;
- BasicBlock** jumpTab;
- jumpTab = block->bbJumpSwt->bbsDstTab;
- do
+ for (BasicBlock* const bTarget : block->SwitchTargets())
{
- JITDUMP(" " FMT_BB " : branch target\n", (*jumpTab)->bbNum);
- (*jumpTab)->bbFlags |= BBF_HAS_LABEL;
- } while (++jumpTab, --jumpCnt);
+ JITDUMP(" " FMT_BB " : branch target\n", bTarget->bbNum);
+ bTarget->bbFlags |= BBF_HAS_LABEL;
+ }
break;
case BBJ_CALLFINALLY:
@@ -439,11 +435,7 @@ void CodeGen::genMarkLabelsForCodegen()
add->acdDstBlk->bbFlags |= BBF_HAS_LABEL;
}
- EHblkDsc* HBtab;
- EHblkDsc* HBtabEnd;
-
- for (HBtab = compiler->compHndBBtab, HBtabEnd = compiler->compHndBBtab + compiler->compHndBBtabCount;
- HBtab < HBtabEnd; HBtab++)
+ for (EHblkDsc* const HBtab : EHClauses(compiler))
{
HBtab->ebdTryBeg->bbFlags |= BBF_HAS_LABEL;
HBtab->ebdHndBeg->bbFlags |= BBF_HAS_LABEL;
@@ -2411,11 +2403,19 @@ void CodeGen::genEmitMachineCode()
if (compiler->opts.disAsm || verbose)
{
printf("\n; Total bytes of code %d, prolog size %d, PerfScore %.2f, instruction count %d, allocated bytes for "
- "code %d (MethodHash=%08x) for "
- "method %s\n",
+ "code %d",
codeSize, prologSize, compiler->info.compPerfScore, instrCount,
- GetEmitter()->emitTotalHotCodeSize + GetEmitter()->emitTotalColdCodeSize,
- compiler->info.compMethodHash(), compiler->info.compFullName);
+ GetEmitter()->emitTotalHotCodeSize + GetEmitter()->emitTotalColdCodeSize);
+
+#if TRACK_LSRA_STATS
+ if (JitConfig.DisplayLsraStats() == 3)
+ {
+ compiler->m_pLinearScan->dumpLsraStatsSummary(jitstdout);
+ }
+#endif // TRACK_LSRA_STATS
+
+ printf(" (MethodHash=%08x) for method %s\n", compiler->info.compMethodHash(), compiler->info.compFullName);
+
printf("; ============================================================\n\n");
printf(""); // in our logic this causes a flush
}
@@ -2623,9 +2623,7 @@ void CodeGen::genReportEH()
}
#endif // DEBUG
- unsigned XTnum;
- EHblkDsc* HBtab;
- EHblkDsc* HBtabEnd;
+ unsigned XTnum;
bool isCoreRTABI = compiler->IsTargetAbi(CORINFO_CORERT_ABI);
@@ -2664,8 +2662,7 @@ void CodeGen::genReportEH()
// clauses. If there aren't, we don't need to look for BBJ_CALLFINALLY.
bool anyFinallys = false;
- for (HBtab = compiler->compHndBBtab, HBtabEnd = compiler->compHndBBtab + compiler->compHndBBtabCount;
- HBtab < HBtabEnd; HBtab++)
+ for (EHblkDsc* const HBtab : EHClauses(compiler))
{
if (HBtab->HasFinallyHandler())
{
@@ -2675,7 +2672,7 @@ void CodeGen::genReportEH()
}
if (anyFinallys)
{
- for (BasicBlock* block = compiler->fgFirstBB; block != nullptr; block = block->bbNext)
+ for (BasicBlock* const block : compiler->Blocks())
{
if (block->bbJumpKind == BBJ_CALLFINALLY)
{
@@ -2715,8 +2712,7 @@ void CodeGen::genReportEH()
XTnum = 0; // This is the index we pass to the VM
- for (HBtab = compiler->compHndBBtab, HBtabEnd = compiler->compHndBBtab + compiler->compHndBBtabCount;
- HBtab < HBtabEnd; HBtab++)
+ for (EHblkDsc* const HBtab : EHClauses(compiler))
{
UNATIVE_OFFSET tryBeg, tryEnd, hndBeg, hndEnd, hndTyp;
@@ -2908,8 +2904,9 @@ void CodeGen::genReportEH()
if (duplicateClauseCount > 0)
{
- unsigned reportedDuplicateClauseCount = 0; // How many duplicated clauses have we reported?
- unsigned XTnum2;
+ unsigned reportedDuplicateClauseCount = 0; // How many duplicated clauses have we reported?
+ unsigned XTnum2;
+ EHblkDsc* HBtab;
for (XTnum2 = 0, HBtab = compiler->compHndBBtab; XTnum2 < compiler->compHndBBtabCount; XTnum2++, HBtab++)
{
unsigned enclosingTryIndex;
@@ -3001,7 +2998,7 @@ void CodeGen::genReportEH()
if (clonedFinallyCount > 0)
{
unsigned reportedClonedFinallyCount = 0;
- for (BasicBlock* block = compiler->fgFirstBB; block != nullptr; block = block->bbNext)
+ for (BasicBlock* const block : compiler->Blocks())
{
if (block->bbJumpKind == BBJ_CALLFINALLY)
{
@@ -7457,7 +7454,7 @@ void CodeGen::genFnProlog()
// Establish the AMD64 frame pointer after the OS-reported prolog.
if (doubleAlignOrFramePointerUsed())
{
- bool reportUnwindData = compiler->compLocallocUsed || compiler->opts.compDbgEnC;
+ const bool reportUnwindData = compiler->compLocallocUsed || compiler->opts.compDbgEnC;
genEstablishFramePointer(compiler->codeGen->genSPtoFPdelta(), reportUnwindData);
}
#endif // TARGET_AMD64
@@ -8150,7 +8147,31 @@ void CodeGen::genFnEpilog(BasicBlock* block)
/* Compute the size in bytes we've pushed/popped */
- if (!doubleAlignOrFramePointerUsed())
+ bool removeEbpFrame = doubleAlignOrFramePointerUsed();
+
+#ifdef TARGET_AMD64
+ // We only remove the EBP frame using the frame pointer (using `lea rsp, [rbp + const]`)
+ // if we reported the frame pointer in the prolog. The Windows x64 unwinding ABI specifically
+ // disallows this `lea` form:
+ //
+ // See https://docs.microsoft.com/en-us/cpp/build/prolog-and-epilog?view=msvc-160#epilog-code
+ //
+ // "When a frame pointer is not used, the epilog must use add RSP,constant to deallocate the fixed part of the
+ // stack. It may not use lea RSP,constant[RSP] instead. This restriction exists so the unwind code has fewer
+ // patterns to recognize when searching for epilogs."
+ //
+ // Otherwise, we must use `add RSP, constant`, as stated. So, we need to use the same condition
+ // as genFnProlog() used in determining whether to report the frame pointer in the unwind data.
+ // This is a subset of the `doubleAlignOrFramePointerUsed()` cases.
+ //
+ if (removeEbpFrame)
+ {
+ const bool reportUnwindData = compiler->compLocallocUsed || compiler->opts.compDbgEnC;
+ removeEbpFrame = removeEbpFrame && reportUnwindData;
+ }
+#endif // TARGET_AMD64
+
+ if (!removeEbpFrame)
{
// We have an ESP frame */
@@ -8180,6 +8201,15 @@ void CodeGen::genFnEpilog(BasicBlock* block)
genPopCalleeSavedRegisters();
+#ifdef TARGET_AMD64
+ // In the case where we have an RSP frame, and no frame pointer reported in the OS unwind info,
+ // but we do have a pushed frame pointer and established frame chain, we do need to pop RBP.
+ if (doubleAlignOrFramePointerUsed())
+ {
+ inst_RV(INS_pop, REG_EBP, TYP_I_IMPL);
+ }
+#endif // TARGET_AMD64
+
// Extra OSR adjust to get to where RBP was saved by the original frame, and
// restore RBP.
//
@@ -10843,7 +10873,7 @@ void CodeGen::genIPmappingGen()
//
//It turns out that the only thing we really have to assert is that the first statement in each basic
//block has an IL offset and appears in eeBoundaries.
- for (BasicBlock * block = compiler->fgFirstBB; block != nullptr; block = block->bbNext)
+ for (BasicBlock* const block : compiler->Blocks())
{
Statement* stmt = block->firstStmt();
if ((block->bbRefs > 1) && (stmt != nullptr))
@@ -11703,7 +11733,8 @@ void CodeGenInterface::VariableLiveKeeper::VariableLiveRange::dumpVariableLiveRa
const CodeGenInterface* codeGen) const
{
codeGen->dumpSiVarLoc(&m_VarLocation);
- printf(" [ ");
+
+ printf(" [");
m_StartEmitLocation.Print(codeGen->GetCompiler()->compMethodID);
printf(", ");
if (m_EndEmitLocation.Valid())
@@ -11712,9 +11743,9 @@ void CodeGenInterface::VariableLiveKeeper::VariableLiveRange::dumpVariableLiveRa
}
else
{
- printf("NON_CLOSED_RANGE");
+ printf("...");
}
- printf(" ]; ");
+ printf("]");
}
// Dump "VariableLiveRange" when code has been generated and we have the assembly native offset of each "emitLocation"
@@ -11730,7 +11761,7 @@ void CodeGenInterface::VariableLiveKeeper::VariableLiveRange::dumpVariableLiveRa
// If this is an open "VariableLiveRange", "m_EndEmitLocation" is non-valid and print -1
UNATIVE_OFFSET endAssemblyOffset = m_EndEmitLocation.Valid() ? m_EndEmitLocation.CodeOffset(emit) : -1;
- printf(" [%X , %X )", m_StartEmitLocation.CodeOffset(emit), m_EndEmitLocation.CodeOffset(emit));
+ printf(" [%X, %X)", m_StartEmitLocation.CodeOffset(emit), m_EndEmitLocation.CodeOffset(emit));
}
//------------------------------------------------------------------------
@@ -11974,25 +12005,31 @@ void CodeGenInterface::VariableLiveKeeper::VariableLiveDescriptor::updateLiveRan
void CodeGenInterface::VariableLiveKeeper::VariableLiveDescriptor::dumpAllRegisterLiveRangesForBlock(
emitter* emit, const CodeGenInterface* codeGen) const
{
- printf("[");
+ bool first = true;
for (LiveRangeListIterator it = m_VariableLiveRanges->begin(); it != m_VariableLiveRanges->end(); it++)
{
+ if (!first)
+ {
+ printf("; ");
+ }
it->dumpVariableLiveRange(emit, codeGen);
+ first = false;
}
- printf("]\n");
}
void CodeGenInterface::VariableLiveKeeper::VariableLiveDescriptor::dumpRegisterLiveRangesForBlockBeforeCodeGenerated(
const CodeGenInterface* codeGen) const
{
- noway_assert(codeGen != nullptr);
-
- printf("[");
+ bool first = true;
for (LiveRangeListIterator it = m_VariableLifeBarrier->getStartForDump(); it != m_VariableLiveRanges->end(); it++)
{
+ if (!first)
+ {
+ printf("; ");
+ }
it->dumpVariableLiveRange(codeGen);
+ first = false;
}
- printf("]\n");
}
// Returns true if a live range for this variable has been recorded
@@ -12431,41 +12468,33 @@ void CodeGenInterface::VariableLiveKeeper::psiClosePrologVariableRanges()
#ifdef DEBUG
void CodeGenInterface::VariableLiveKeeper::dumpBlockVariableLiveRanges(const BasicBlock* block)
{
- // "block" will be dereferenced
- noway_assert(block != nullptr);
+ assert(block != nullptr);
bool hasDumpedHistory = false;
- if (m_Compiler->verbose)
- {
- printf("////////////////////////////////////////\n");
- printf("////////////////////////////////////////\n");
- printf("Variable Live Range History Dump for Block %d \n", block->bbNum);
+ printf("\nVariable Live Range History Dump for " FMT_BB "\n", block->bbNum);
- if (m_Compiler->opts.compDbgInfo)
+ if (m_Compiler->opts.compDbgInfo)
+ {
+ for (unsigned int varNum = 0; varNum < m_LiveDscCount; varNum++)
{
- for (unsigned int varNum = 0; varNum < m_LiveDscCount; varNum++)
- {
- VariableLiveDescriptor* varLiveDsc = m_vlrLiveDsc + varNum;
+ VariableLiveDescriptor* varLiveDsc = m_vlrLiveDsc + varNum;
- if (varLiveDsc->hasVarLiveRangesFromLastBlockToDump())
- {
- hasDumpedHistory = true;
- printf("IL Var Num %d:\n", m_Compiler->compMap2ILvarNum(varNum));
- varLiveDsc->dumpRegisterLiveRangesForBlockBeforeCodeGenerated(m_Compiler->codeGen);
- varLiveDsc->endBlockLiveRanges();
- }
+ if (varLiveDsc->hasVarLiveRangesFromLastBlockToDump())
+ {
+ hasDumpedHistory = true;
+ m_Compiler->gtDispLclVar(varNum, false);
+ printf(": ");
+ varLiveDsc->dumpRegisterLiveRangesForBlockBeforeCodeGenerated(m_Compiler->codeGen);
+ varLiveDsc->endBlockLiveRanges();
+ printf("\n");
}
}
+ }
- if (!hasDumpedHistory)
- {
- printf("..None..\n");
- }
-
- printf("////////////////////////////////////////\n");
- printf("////////////////////////////////////////\n");
- printf("End Generating code for Block %d \n", block->bbNum);
+ if (!hasDumpedHistory)
+ {
+ printf("..None..\n");
}
}
@@ -12473,34 +12502,28 @@ void CodeGenInterface::VariableLiveKeeper::dumpLvaVariableLiveRanges() const
{
bool hasDumpedHistory = false;
- if (m_Compiler->verbose)
- {
- printf("////////////////////////////////////////\n");
- printf("////////////////////////////////////////\n");
- printf("PRINTING VARIABLE LIVE RANGES:\n");
+ printf("VARIABLE LIVE RANGES:\n");
- if (m_Compiler->opts.compDbgInfo)
+ if (m_Compiler->opts.compDbgInfo)
+ {
+ for (unsigned int varNum = 0; varNum < m_LiveDscCount; varNum++)
{
- for (unsigned int varNum = 0; varNum < m_LiveDscCount; varNum++)
- {
- VariableLiveDescriptor* varLiveDsc = m_vlrLiveDsc + varNum;
+ VariableLiveDescriptor* varLiveDsc = m_vlrLiveDsc + varNum;
- if (varLiveDsc->hasVarLiveRangesToDump())
- {
- hasDumpedHistory = true;
- printf("IL Var Num %d:\n", m_Compiler->compMap2ILvarNum(varNum));
- varLiveDsc->dumpAllRegisterLiveRangesForBlock(m_Compiler->GetEmitter(), m_Compiler->codeGen);
- }
+ if (varLiveDsc->hasVarLiveRangesToDump())
+ {
+ hasDumpedHistory = true;
+ m_Compiler->gtDispLclVar(varNum, false);
+ printf(": ");
+ varLiveDsc->dumpAllRegisterLiveRangesForBlock(m_Compiler->GetEmitter(), m_Compiler->codeGen);
+ printf("\n");
}
}
+ }
- if (!hasDumpedHistory)
- {
- printf("..None..\n");
- }
-
- printf("////////////////////////////////////////\n");
- printf("////////////////////////////////////////\n");
+ if (!hasDumpedHistory)
+ {
+ printf("..None..\n");
}
}
#endif // DEBUG
diff --git a/src/coreclr/jit/codegenxarch.cpp b/src/coreclr/jit/codegenxarch.cpp
index e830007e51b4f4..2bd0142381f628 100644
--- a/src/coreclr/jit/codegenxarch.cpp
+++ b/src/coreclr/jit/codegenxarch.cpp
@@ -2332,7 +2332,8 @@ void CodeGen::genLclHeap(GenTree* tree)
noway_assert(isFramePointerUsed()); // localloc requires Frame Pointer to be established since SP changes
noway_assert(genStackLevel == 0); // Can't have anything on the stack
- unsigned stackAdjustment = 0;
+ target_size_t stackAdjustment = 0;
+ target_size_t locAllocStackOffset = 0;
// compute the amount of memory to allocate to properly STACK_ALIGN.
size_t amount = 0;
@@ -2410,6 +2411,9 @@ void CodeGen::genLclHeap(GenTree* tree)
}
}
+ bool initMemOrLargeAlloc; // Declaration must be separate from initialization to avoid clang compiler error.
+ initMemOrLargeAlloc = compiler->info.compInitMem || (amount >= compiler->eeGetPageSize()); // must be >= not >
+
#if FEATURE_FIXED_OUT_ARGS
// If we have an outgoing arg area then we must adjust the SP by popping off the
// outgoing arg area. We will restore it right before we return from this method.
@@ -2425,8 +2429,22 @@ void CodeGen::genLclHeap(GenTree* tree)
{
assert((compiler->lvaOutgoingArgSpaceSize % STACK_ALIGN) == 0); // This must be true for the stack to remain
// aligned
+
+ // If the localloc amount is a small enough constant, and we're not initializing the allocated
+ // memory, then don't bother popping off the ougoing arg space first; just allocate the amount
+ // of space needed by the allocation, and call the bottom part the new outgoing arg space.
+
+ if ((amount > 0) && !initMemOrLargeAlloc)
+ {
+ lastTouchDelta = genStackPointerConstantAdjustmentLoopWithProbe(-(ssize_t)amount, REG_NA);
+ stackAdjustment = 0;
+ locAllocStackOffset = (target_size_t)compiler->lvaOutgoingArgSpaceSize;
+ goto ALLOC_DONE;
+ }
+
inst_RV_IV(INS_add, REG_SPBASE, compiler->lvaOutgoingArgSpaceSize, EA_PTRSIZE);
- stackAdjustment += compiler->lvaOutgoingArgSpaceSize;
+ stackAdjustment += (target_size_t)compiler->lvaOutgoingArgSpaceSize;
+ locAllocStackOffset = stackAdjustment;
}
#endif
@@ -2451,9 +2469,6 @@ void CodeGen::genLclHeap(GenTree* tree)
goto ALLOC_DONE;
}
- bool initMemOrLargeAlloc =
- compiler->info.compInitMem || (amount >= compiler->eeGetPageSize()); // must be >= not >
-
#ifdef TARGET_X86
bool needRegCntRegister = true;
#else // !TARGET_X86
@@ -2552,7 +2567,7 @@ void CodeGen::genLclHeap(GenTree* tree)
assert(lastTouchDelta >= -1);
if ((lastTouchDelta == (target_ssize_t)-1) ||
- (stackAdjustment + (unsigned)lastTouchDelta + STACK_PROBE_BOUNDARY_THRESHOLD_BYTES >
+ (stackAdjustment + (target_size_t)lastTouchDelta + STACK_PROBE_BOUNDARY_THRESHOLD_BYTES >
compiler->eeGetPageSize()))
{
genStackPointerConstantAdjustmentLoopWithProbe(-(ssize_t)stackAdjustment, REG_NA);
@@ -2564,8 +2579,8 @@ void CodeGen::genLclHeap(GenTree* tree)
}
// Return the stackalloc'ed address in result register.
- // TargetReg = RSP + stackAdjustment.
- GetEmitter()->emitIns_R_AR(INS_lea, EA_PTRSIZE, targetReg, REG_SPBASE, stackAdjustment);
+ // TargetReg = RSP + locAllocStackOffset
+ GetEmitter()->emitIns_R_AR(INS_lea, EA_PTRSIZE, targetReg, REG_SPBASE, (int)locAllocStackOffset);
if (endLabel != nullptr)
{
@@ -2737,26 +2752,47 @@ void CodeGen::genCodeForInitBlkUnroll(GenTreeBlk* node)
src = src->AsUnOp()->gtGetOp1();
}
+ unsigned size = node->GetLayout()->GetSize();
+
+ // An SSE mov that accesses data larger than 8 bytes may be implemented using
+ // multiple memory accesses. Hence, the JIT must not use such stores when
+ // INITBLK zeroes a struct that contains GC pointers and can be observed by
+ // other threads (i.e. when dstAddr is not an address of a local).
+ // For example, this can happen when initializing a struct field of an object.
+ const bool canUse16BytesSimdMov = !node->IsOnHeapAndContainsReferences();
+
+#ifdef TARGET_AMD64
+ // On Amd64 the JIT will not use SIMD stores for such structs and instead
+ // will always allocate a GP register for src node.
+ const bool willUseSimdMov = canUse16BytesSimdMov && (size >= XMM_REGSIZE_BYTES);
+#else
+ // On X86 the JIT will use movq for structs that are larger than 16 bytes
+ // since it is more beneficial than using two mov-s from a GP register.
+ const bool willUseSimdMov = (size >= 16);
+#endif
+
if (!src->isContained())
{
srcIntReg = genConsumeReg(src);
}
else
{
- // If src is contained then it must be 0 and the size must be a multiple
- // of XMM_REGSIZE_BYTES so initialization can use only SSE2 instructions.
+ // If src is contained then it must be 0.
assert(src->IsIntegralConst(0));
- assert((node->GetLayout()->GetSize() % XMM_REGSIZE_BYTES) == 0);
+ assert(willUseSimdMov);
+#ifdef TARGET_AMD64
+ assert(size % 16 == 0);
+#else
+ assert(size % 8 == 0);
+#endif
}
emitter* emit = GetEmitter();
- unsigned size = node->GetLayout()->GetSize();
assert(size <= INT32_MAX);
assert(dstOffset < (INT32_MAX - static_cast(size)));
- // Fill as much as possible using SSE2 stores.
- if (size >= XMM_REGSIZE_BYTES)
+ if (willUseSimdMov)
{
regNumber srcXmmReg = node->GetSingleTempReg(RBM_ALLFLOAT);
@@ -2776,9 +2812,25 @@ void CodeGen::genCodeForInitBlkUnroll(GenTreeBlk* node)
#endif
}
- instruction simdMov = simdUnalignedMovIns();
- for (unsigned regSize = XMM_REGSIZE_BYTES; size >= regSize; size -= regSize, dstOffset += regSize)
+ instruction simdMov = simdUnalignedMovIns();
+ unsigned regSize = XMM_REGSIZE_BYTES;
+ unsigned bytesWritten = 0;
+
+ while (bytesWritten < size)
{
+#ifdef TARGET_X86
+ if (!canUse16BytesSimdMov || (bytesWritten + regSize > size))
+ {
+ simdMov = INS_movq;
+ regSize = 8;
+ }
+#endif
+ if (bytesWritten + regSize > size)
+ {
+ assert(srcIntReg != REG_NA);
+ break;
+ }
+
if (dstLclNum != BAD_VAR_NUM)
{
emit->emitIns_S_R(simdMov, EA_ATTR(regSize), srcXmmReg, dstLclNum, dstOffset);
@@ -2788,11 +2840,12 @@ void CodeGen::genCodeForInitBlkUnroll(GenTreeBlk* node)
emit->emitIns_ARX_R(simdMov, EA_ATTR(regSize), srcXmmReg, dstAddrBaseReg, dstAddrIndexReg,
dstAddrIndexScale, dstOffset);
}
+
+ dstOffset += regSize;
+ bytesWritten += regSize;
}
- // TODO-CQ-XArch: On x86 we could initialize 8 byte at once by using MOVQ instead of two 4 byte MOV stores.
- // On x64 it may also be worth zero initializing a 4/8 byte remainder using MOVD/MOVQ, that avoids the need
- // to allocate a GPR just for the remainder.
+ size -= bytesWritten;
}
// Fill the remainder using normal stores.
@@ -4589,7 +4642,7 @@ void CodeGen::genCodeForIndexAddr(GenTreeIndexAddr* node)
// The VM doesn't allow such large array elements but let's be sure.
noway_assert(scale <= INT32_MAX);
#else // !TARGET_64BIT
- tmpReg = node->GetSingleTempReg();
+ tmpReg = node->GetSingleTempReg();
#endif // !TARGET_64BIT
GetEmitter()->emitIns_R_I(emitter::inst3opImulForReg(tmpReg), EA_PTRSIZE, indexReg,
@@ -6035,8 +6088,7 @@ void CodeGen::genCompareInt(GenTree* treeNode)
// TYP_UINT and TYP_ULONG should not appear here, only small types can be unsigned
assert(!varTypeIsUnsigned(type) || varTypeIsSmall(type));
- bool needsOCFlags = !tree->OperIs(GT_EQ, GT_NE);
- if (canReuseFlags && emit->AreFlagsSetToZeroCmp(op1->GetRegNum(), emitTypeSize(type), needsOCFlags))
+ if (canReuseFlags && emit->AreFlagsSetToZeroCmp(op1->GetRegNum(), emitTypeSize(type), tree->OperGet()))
{
JITDUMP("Not emitting compare due to flags being already set\n");
}
diff --git a/src/coreclr/jit/compiler.cpp b/src/coreclr/jit/compiler.cpp
index 5f086e1b8c5050..0093b0e0dbb41c 100644
--- a/src/coreclr/jit/compiler.cpp
+++ b/src/coreclr/jit/compiler.cpp
@@ -1466,7 +1466,19 @@ void Compiler::compShutdown()
#if defined(DEBUG) || defined(INLINE_DATA)
// Finish reading and/or writing inline xml
- InlineStrategy::FinalizeXml();
+ if (JitConfig.JitInlineDumpXmlFile() != nullptr)
+ {
+ FILE* file = _wfopen(JitConfig.JitInlineDumpXmlFile(), W("a"));
+ if (file != nullptr)
+ {
+ InlineStrategy::FinalizeXml(file);
+ fclose(file);
+ }
+ else
+ {
+ InlineStrategy::FinalizeXml();
+ }
+ }
#endif // defined(DEBUG) || defined(INLINE_DATA)
#if defined(DEBUG) || MEASURE_NODE_SIZE || MEASURE_BLOCK_SIZE || DISPLAY_SIZES || CALL_ARG_STATS
@@ -5107,9 +5119,9 @@ void Compiler::compCompile(void** methodCodePtr, uint32_t* methodCodeSize, JitFl
{
compSizeEstimate = 0;
compCycleEstimate = 0;
- for (BasicBlock* block = fgFirstBB; block != nullptr; block = block->bbNext)
+ for (BasicBlock* const block : Blocks())
{
- for (Statement* stmt : block->Statements())
+ for (Statement* const stmt : block->Statements())
{
compSizeEstimate += stmt->GetCostSz();
compCycleEstimate += stmt->GetCostEx();
@@ -5295,12 +5307,12 @@ void Compiler::generatePatchpointInfo()
}
// Special offsets
-
- if (lvaReportParamTypeArg() || lvaKeepAliveAndReportThis())
+ //
+ if (lvaReportParamTypeArg())
{
- const int offset = lvaToCallerSPRelativeOffset(lvaCachedGenericContextArgOffset(), true);
+ const int offset = lvaCachedGenericContextArgOffset();
patchpointInfo->SetGenericContextArgOffset(offset);
- JITDUMP("--OSR-- cached generic context offset is CallerSP %d\n", patchpointInfo->GenericContextArgOffset());
+ JITDUMP("--OSR-- cached generic context offset is FP %d\n", patchpointInfo->GenericContextArgOffset());
}
if (lvaKeepAliveAndReportThis())
@@ -5342,11 +5354,11 @@ void Compiler::ResetOptAnnotations()
fgSsaPassesCompleted = 0;
fgVNPassesCompleted = 0;
- for (BasicBlock* block = fgFirstBB; block != nullptr; block = block->bbNext)
+ for (BasicBlock* const block : Blocks())
{
- for (Statement* stmt : block->Statements())
+ for (Statement* const stmt : block->Statements())
{
- for (GenTree* tree = stmt->GetTreeList(); tree != nullptr; tree = tree->gtNext)
+ for (GenTree* const tree : stmt->TreeList())
{
tree->ClearVN();
tree->ClearAssertion();
@@ -5371,7 +5383,7 @@ void Compiler::RecomputeLoopInfo()
// Recompute reachability sets, dominators, and loops.
optLoopCount = 0;
fgDomsComputed = false;
- for (BasicBlock* block = fgFirstBB; block != nullptr; block = block->bbNext)
+ for (BasicBlock* const block : Blocks())
{
block->bbFlags &= ~BBF_LOOP_FLAGS;
}
@@ -5898,7 +5910,24 @@ void Compiler::compCompileFinish()
#if defined(DEBUG) || defined(INLINE_DATA)
m_inlineStrategy->DumpData();
- m_inlineStrategy->DumpXml();
+
+ if (JitConfig.JitInlineDumpXmlFile() != nullptr)
+ {
+ FILE* file = _wfopen(JitConfig.JitInlineDumpXmlFile(), W("a"));
+ if (file != nullptr)
+ {
+ m_inlineStrategy->DumpXml(file);
+ fclose(file);
+ }
+ else
+ {
+ m_inlineStrategy->DumpXml();
+ }
+ }
+ else
+ {
+ m_inlineStrategy->DumpXml();
+ }
#endif
@@ -7366,11 +7395,11 @@ Compiler::NodeToIntMap* Compiler::FindReachableNodesInNodeTestData()
// Otherwise, iterate.
- for (BasicBlock* block = fgFirstBB; block != nullptr; block = block->bbNext)
+ for (BasicBlock* const block : Blocks())
{
- for (Statement* stmt = block->FirstNonPhiDef(); stmt != nullptr; stmt = stmt->GetNextStmt())
+ for (Statement* const stmt : block->NonPhiStatements())
{
- for (GenTree* tree = stmt->GetTreeList(); tree != nullptr; tree = tree->gtNext)
+ for (GenTree* const tree : stmt->TreeList())
{
TestLabelAndNum tlAndN;
@@ -7494,22 +7523,17 @@ void Compiler::compCallArgStats()
assert(fgStmtListThreaded);
- for (BasicBlock* block = fgFirstBB; block != nullptr; block = block->bbNext)
+ for (BasicBlock* const block : Blocks())
{
- for (Statement* stmt : block->Statements())
+ for (Statement* const stmt : block->Statements())
{
- for (GenTree* call = stmt->GetTreeList(); call != nullptr; call = call->gtNext)
+ for (GenTree* const call : stmt->TreeList())
{
if (call->gtOper != GT_CALL)
continue;
- argNum =
-
- regArgNum = regArgDeferred = regArgTemp =
-
- regArgConst = regArgLclVar =
-
- argDWordNum = argLngNum = argFltNum = argDblNum = 0;
+ argNum = regArgNum = regArgDeferred = regArgTemp = regArgConst = regArgLclVar = argDWordNum =
+ argLngNum = argFltNum = argDblNum = 0;
argTotalCalls++;
@@ -8919,16 +8943,15 @@ GenTree* dFindTree(GenTree* tree, unsigned id)
GenTree* dFindTree(unsigned id)
{
- Compiler* comp = JitTls::GetCompiler();
- BasicBlock* block;
- GenTree* tree;
+ Compiler* comp = JitTls::GetCompiler();
+ GenTree* tree;
dbTreeBlock = nullptr;
dbTree = nullptr;
- for (block = comp->fgFirstBB; block != nullptr; block = block->bbNext)
+ for (BasicBlock* const block : comp->Blocks())
{
- for (Statement* stmt : block->Statements())
+ for (Statement* const stmt : block->Statements())
{
tree = dFindTree(stmt->GetRootNode(), id);
if (tree != nullptr)
@@ -8944,15 +8967,14 @@ GenTree* dFindTree(unsigned id)
Statement* dFindStmt(unsigned id)
{
- Compiler* comp = JitTls::GetCompiler();
- BasicBlock* block;
+ Compiler* comp = JitTls::GetCompiler();
dbStmt = nullptr;
unsigned stmtId = 0;
- for (block = comp->fgFirstBB; block != nullptr; block = block->bbNext)
+ for (BasicBlock* const block : comp->Blocks())
{
- for (Statement* stmt : block->Statements())
+ for (Statement* const stmt : block->Statements())
{
stmtId++;
if (stmtId == id)
@@ -9721,6 +9743,21 @@ const char* Compiler::devirtualizationDetailToString(CORINFO_DEVIRTUALIZATION_DE
return "devirtualization crossed version bubble";
case CORINFO_DEVIRTUALIZATION_MULTIPLE_IMPL:
return "object class has multiple implementations of interface";
+ case CORINFO_DEVIRTUALIZATION_FAILED_BUBBLE_CLASS_DECL:
+ return "decl method is defined on class and decl method not in version bubble, and decl method not in "
+ "type closest to version bubble";
+ case CORINFO_DEVIRTUALIZATION_FAILED_BUBBLE_INTERFACE_DECL:
+ return "decl method is defined on interface and not in version bubble, and implementation type not "
+ "entirely defined in bubble";
+ case CORINFO_DEVIRTUALIZATION_FAILED_BUBBLE_IMPL:
+ return "object class not defined within version bubble";
+ case CORINFO_DEVIRTUALIZATION_FAILED_BUBBLE_IMPL_NOT_REFERENCEABLE:
+ return "object class cannot be referenced from R2R code due to missing tokens";
+ case CORINFO_DEVIRTUALIZATION_FAILED_DUPLICATE_INTERFACE:
+ return "crossgen2 virtual method algorithm and runtime algorithm differ in the presence of duplicate "
+ "interface implementations";
+ case CORINFO_DEVIRTUALIZATION_FAILED_DECL_NOT_REPRESENTABLE:
+ return "Decl method cannot be represented in R2R image";
default:
return "undefined";
}
diff --git a/src/coreclr/jit/compiler.h b/src/coreclr/jit/compiler.h
index 58340e00922036..117d7d28a167a4 100644
--- a/src/coreclr/jit/compiler.h
+++ b/src/coreclr/jit/compiler.h
@@ -1131,7 +1131,8 @@ class LinearScanInterface
virtual void recordVarLocationsAtStartOfBB(BasicBlock* bb) = 0;
virtual bool willEnregisterLocalVars() const = 0;
#if TRACK_LSRA_STATS
- virtual void dumpLsraStatsCsv(FILE* file) = 0;
+ virtual void dumpLsraStatsCsv(FILE* file) = 0;
+ virtual void dumpLsraStatsSummary(FILE* file) = 0;
#endif // TRACK_LSRA_STATS
};
@@ -3568,7 +3569,7 @@ class Compiler
unsigned lvaFrameSize(FrameLayoutState curState);
// Returns the caller-SP-relative offset for the SP/FP relative offset determined by FP based.
- int lvaToCallerSPRelativeOffset(int offs, bool isFpBased) const;
+ int lvaToCallerSPRelativeOffset(int offs, bool isFpBased, bool forRootFrame = true) const;
// Returns the caller-SP-relative offset for the local variable "varNum."
int lvaGetCallerSPRelativeOffset(unsigned varNum);
@@ -3947,6 +3948,7 @@ class Compiler
}
void impDevirtualizeCall(GenTreeCall* call,
+ CORINFO_RESOLVED_TOKEN* pResolvedToken,
CORINFO_METHOD_HANDLE* method,
unsigned* methodFlags,
CORINFO_CONTEXT_HANDLE* contextHandle,
@@ -3988,7 +3990,10 @@ class Compiler
GenTree* impImportLdvirtftn(GenTree* thisPtr, CORINFO_RESOLVED_TOKEN* pResolvedToken, CORINFO_CALL_INFO* pCallInfo);
- int impBoxPatternMatch(CORINFO_RESOLVED_TOKEN* pResolvedToken, const BYTE* codeAddr, const BYTE* codeEndp);
+ int impBoxPatternMatch(CORINFO_RESOLVED_TOKEN* pResolvedToken,
+ const BYTE* codeAddr,
+ const BYTE* codeEndp,
+ bool makeInlineObservation = false);
void impImportAndPushBox(CORINFO_RESOLVED_TOKEN* pResolvedToken);
void impImportNewObjArray(CORINFO_RESOLVED_TOKEN* pResolvedToken, CORINFO_CALL_INFO* pCallInfo);
@@ -4978,6 +4983,28 @@ class Compiler
void fgInterBlockLocalVarLiveness();
+ // Blocks: convenience methods for enabling range-based `for` iteration over the function's blocks, e.g.:
+ // 1. for (BasicBlock* const block : compiler->Blocks()) ...
+ // 2. for (BasicBlock* const block : compiler->Blocks(startBlock)) ...
+ // 3. for (BasicBlock* const block : compiler->Blocks(startBlock, endBlock)) ...
+ // In case (1), the block list can be empty. In case (2), `startBlock` can be nullptr. In case (3),
+ // both `startBlock` and `endBlock` must be non-null.
+ //
+ BasicBlockSimpleList Blocks() const
+ {
+ return BasicBlockSimpleList(fgFirstBB);
+ }
+
+ BasicBlockSimpleList Blocks(BasicBlock* startBlock) const
+ {
+ return BasicBlockSimpleList(startBlock);
+ }
+
+ BasicBlockRangeList Blocks(BasicBlock* startBlock, BasicBlock* endBlock) const
+ {
+ return BasicBlockRangeList(startBlock, endBlock);
+ }
+
// The presence of a partial definition presents some difficulties for SSA: this is both a use of some SSA name
// of "x", and a def of a new SSA name for "x". The tree only has one local variable for "x", so it has to choose
// whether to treat that as the use or def. It chooses the "use", and thus the old SSA name. This map allows us
@@ -6479,6 +6506,18 @@ class Compiler
lpEntry->bbNum <= lpBottom->bbNum &&
(lpHead->bbNum < lpTop->bbNum || lpHead->bbNum > lpBottom->bbNum);
}
+
+ // LoopBlocks: convenience method for enabling range-based `for` iteration over all the
+ // blocks in a loop, e.g.:
+ // for (BasicBlock* const block : loop->LoopBlocks()) ...
+ // Currently, the loop blocks are expected to be in linear, lexical, `bbNext` order
+ // from `lpFirst` through `lpBottom`, inclusive. All blocks in this range are considered
+ // to be part of the loop.
+ //
+ BasicBlockRangeList LoopBlocks() const
+ {
+ return BasicBlockRangeList(lpFirst, lpBottom);
+ }
};
protected:
@@ -11256,6 +11295,59 @@ class DomTreeVisitor
}
};
+// EHClauses: adapter class for forward iteration of the exception handling table using range-based `for`, e.g.:
+// for (EHblkDsc* const ehDsc : EHClauses(compiler))
+//
+class EHClauses
+{
+ EHblkDsc* m_begin;
+ EHblkDsc* m_end;
+
+ // Forward iterator for the exception handling table entries. Iteration is in table order.
+ //
+ class iterator
+ {
+ EHblkDsc* m_ehDsc;
+
+ public:
+ iterator(EHblkDsc* ehDsc) : m_ehDsc(ehDsc)
+ {
+ }
+
+ EHblkDsc* operator*() const
+ {
+ return m_ehDsc;
+ }
+
+ iterator& operator++()
+ {
+ ++m_ehDsc;
+ return *this;
+ }
+
+ bool operator!=(const iterator& i) const
+ {
+ return m_ehDsc != i.m_ehDsc;
+ }
+ };
+
+public:
+ EHClauses(Compiler* comp) : m_begin(comp->compHndBBtab), m_end(comp->compHndBBtab + comp->compHndBBtabCount)
+ {
+ assert((m_begin != nullptr) || (m_begin == m_end));
+ }
+
+ iterator begin() const
+ {
+ return iterator(m_begin);
+ }
+
+ iterator end() const
+ {
+ return iterator(m_end);
+ }
+};
+
/*
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
@@ -11471,16 +11563,6 @@ extern const BYTE genActualTypes[];
/*****************************************************************************/
-// foreach_block: An iterator over all blocks in the function.
-// __compiler: the Compiler* object
-// __block : a BasicBlock*, already declared, that gets updated each iteration.
-
-#define foreach_block(__compiler, __block) \
- for ((__block) = (__compiler)->fgFirstBB; (__block); (__block) = (__block)->bbNext)
-
-/*****************************************************************************/
-/*****************************************************************************/
-
#ifdef DEBUG
void dumpConvertedVarSet(Compiler* comp, VARSET_VALARG_TP vars);
#endif // DEBUG
diff --git a/src/coreclr/jit/compiler.hpp b/src/coreclr/jit/compiler.hpp
index 091758bcdeeca3..e898605c7b6a5c 100644
--- a/src/coreclr/jit/compiler.hpp
+++ b/src/coreclr/jit/compiler.hpp
@@ -1832,7 +1832,7 @@ inline VARSET_VALRET_TP Compiler::lvaStmtLclMask(Statement* stmt)
assert(fgStmtListThreaded);
- for (GenTree* tree = stmt->GetTreeList(); tree != nullptr; tree = tree->gtNext)
+ for (GenTree* const tree : stmt->TreeList())
{
if (tree->gtOper != GT_LCL_VAR)
{
diff --git a/src/coreclr/jit/copyprop.cpp b/src/coreclr/jit/copyprop.cpp
index ca01e90250f654..c0d72123e1a11d 100644
--- a/src/coreclr/jit/copyprop.cpp
+++ b/src/coreclr/jit/copyprop.cpp
@@ -28,9 +28,9 @@
*/
void Compiler::optBlockCopyPropPopStacks(BasicBlock* block, LclNumToGenTreePtrStack* curSsaName)
{
- for (Statement* stmt : block->Statements())
+ for (Statement* const stmt : block->Statements())
{
- for (GenTree* tree = stmt->GetTreeList(); tree != nullptr; tree = tree->gtNext)
+ for (GenTree* const tree : stmt->TreeList())
{
if (!tree->IsLocal())
{
@@ -97,16 +97,6 @@ int Compiler::optCopyProp_LclVarScore(LclVarDsc* lclVarDsc, LclVarDsc* copyVarDs
score -= 4;
}
- if (lclVarDsc->lvDoNotEnregister)
- {
- score += 4;
- }
-
- if (copyVarDsc->lvDoNotEnregister)
- {
- score -= 4;
- }
-
#ifdef TARGET_X86
// For doubles we also prefer to change parameters into non-parameter local variables
if (lclVarDsc->lvType == TYP_DOUBLE)
@@ -363,12 +353,12 @@ void Compiler::optBlockCopyProp(BasicBlock* block, LclNumToGenTreePtrStack* curS
// There are no definitions at the start of the block. So clear it.
compCurLifeTree = nullptr;
VarSetOps::Assign(this, compCurLife, block->bbLiveIn);
- for (Statement* stmt : block->Statements())
+ for (Statement* const stmt : block->Statements())
{
VarSetOps::ClearD(this, optCopyPropKillSet);
// Walk the tree to find if any local variable can be replaced with current live definitions.
- for (GenTree* tree = stmt->GetTreeList(); tree != nullptr; tree = tree->gtNext)
+ for (GenTree* const tree : stmt->TreeList())
{
treeLifeUpdater.UpdateLife(tree);
@@ -396,7 +386,7 @@ void Compiler::optBlockCopyProp(BasicBlock* block, LclNumToGenTreePtrStack* curS
}
// This logic must be in sync with SSA renaming process.
- for (GenTree* tree = stmt->GetTreeList(); tree != nullptr; tree = tree->gtNext)
+ for (GenTree* const tree : stmt->TreeList())
{
const unsigned lclNum = optIsSsaLocal(tree);
if (lclNum == BAD_VAR_NUM)
diff --git a/src/coreclr/jit/cpp.hint b/src/coreclr/jit/cpp.hint
index b2fe33cc9815a0..297be6b8d49324 100644
--- a/src/coreclr/jit/cpp.hint
+++ b/src/coreclr/jit/cpp.hint
@@ -7,21 +7,9 @@
// See the article on hints in MSDN for more information on their necessity and use:
// http://msdn.microsoft.com/en-us/library/dd997977.aspx
-#define foreach_treenode_execution_order(__node, __stmt) for (;;)
-
-#define foreach_block(__compiler, __block) for (;;)
-
#define FOREACH_REGISTER_FILE(file) for (;;)
// From jit.h
-#define DECLARE_TYPED_ENUM(tag,baseType) enum tag : baseType
-
-#define END_DECLARE_TYPED_ENUM(tag,baseType) ;
-
#define INDEBUG(x) x
-#define INDEBUG_COMMA(x) x,
#define DEBUGARG(x) , x
-
-#define PROTO_ARG(x) x ,
-#define PROTO_ARGL(x) , x
\ No newline at end of file
diff --git a/src/coreclr/jit/earlyprop.cpp b/src/coreclr/jit/earlyprop.cpp
index fbdc6881ea0155..f30b33eb1f3fdb 100644
--- a/src/coreclr/jit/earlyprop.cpp
+++ b/src/coreclr/jit/earlyprop.cpp
@@ -242,7 +242,7 @@ void Compiler::optEarlyProp()
assert(fgSsaPassesCompleted == 1);
- for (BasicBlock* block = fgFirstBB; block != nullptr; block = block->bbNext)
+ for (BasicBlock* const block : Blocks())
{
#ifndef DEBUG
if (!optDoEarlyPropForBlock(block))
diff --git a/src/coreclr/jit/emitxarch.cpp b/src/coreclr/jit/emitxarch.cpp
index da7febee92b0d8..6578232a92996e 100644
--- a/src/coreclr/jit/emitxarch.cpp
+++ b/src/coreclr/jit/emitxarch.cpp
@@ -149,6 +149,98 @@ bool emitter::IsDstSrcSrcAVXInstruction(instruction ins)
return ((CodeGenInterface::instInfo[ins] & INS_Flags_IsDstSrcSrcAVXInstruction) != 0) && IsAVXInstruction(ins);
}
+//------------------------------------------------------------------------
+// DoesWriteZeroFlag: check if the instruction write the
+// ZF flag.
+//
+// Arguments:
+// ins - instruction to test
+//
+// Return Value:
+// true if instruction writes the ZF flag, false otherwise.
+//
+bool emitter::DoesWriteZeroFlag(instruction ins)
+{
+ return (CodeGenInterface::instInfo[ins] & Writes_ZF) != 0;
+}
+
+//------------------------------------------------------------------------
+// DoesResetOverflowAndCarryFlags: check if the instruction resets the
+// OF and CF flag to 0.
+//
+// Arguments:
+// ins - instruction to test
+//
+// Return Value:
+// true if instruction resets the OF and CF flag, false otherwise.
+//
+bool emitter::DoesResetOverflowAndCarryFlags(instruction ins)
+{
+ return (CodeGenInterface::instInfo[ins] & (Resets_OF | Resets_CF)) == (Resets_OF | Resets_CF);
+}
+
+//------------------------------------------------------------------------
+// IsFlagsAlwaysModified: check if the instruction guarantee to modify any flags.
+//
+// Arguments:
+// id - instruction to test
+//
+// Return Value:
+// false, if instruction is guaranteed to not modify any flag.
+// true, if instruction will modify some flag.
+//
+bool emitter::IsFlagsAlwaysModified(instrDesc* id)
+{
+ instruction ins = id->idIns();
+ insFormat fmt = id->idInsFmt();
+
+ if (fmt == IF_RRW_SHF)
+ {
+ if (id->idIsLargeCns())
+ {
+ return true;
+ }
+ else if (id->idSmallCns() == 0)
+ {
+ switch (ins)
+ {
+ // If shift-amount for below instructions is 0, then flags are unaffected.
+ case INS_rcl_N:
+ case INS_rcr_N:
+ case INS_rol_N:
+ case INS_ror_N:
+ case INS_shl_N:
+ case INS_shr_N:
+ case INS_sar_N:
+ return false;
+ default:
+ return true;
+ }
+ }
+ }
+ else if (fmt == IF_RRW)
+ {
+ switch (ins)
+ {
+ // If shift-amount for below instructions is 0, then flags are unaffected.
+ // So, to be conservative, do not optimize if the instruction has register
+ // as the shift-amount operand.
+ case INS_rcl:
+ case INS_rcr:
+ case INS_rol:
+ case INS_ror:
+ case INS_shl:
+ case INS_shr:
+ case INS_sar:
+ return false;
+ default:
+ return true;
+ }
+ }
+
+ return true;
+}
+
//------------------------------------------------------------------------
// AreUpper32BitsZero: check if some previously emitted
// instruction set the upper 32 bits of reg to zero.
@@ -225,9 +317,9 @@ bool emitter::AreUpper32BitsZero(regNumber reg)
// the same values as if there were a compare to 0
//
// Arguments:
-// reg - register of interest
-// opSize - size of register
-// needsOCFlags - additionally check the overflow and carry flags
+// reg - register of interest
+// opSize - size of register
+// treeOps - type of tree node operation
//
// Return Value:
// true if the previous instruction set the flags for reg
@@ -235,17 +327,19 @@ bool emitter::AreUpper32BitsZero(regNumber reg)
//
// Notes:
// Currently only looks back one instruction.
-bool emitter::AreFlagsSetToZeroCmp(regNumber reg, emitAttr opSize, bool needsOCFlags)
+bool emitter::AreFlagsSetToZeroCmp(regNumber reg, emitAttr opSize, genTreeOps treeOps)
{
assert(reg != REG_NA);
+
// Don't look back across IG boundaries (possible control flow)
if (emitCurIGinsCnt == 0 && ((emitCurIG->igFlags & IGF_EXTEND) == 0))
{
return false;
}
- instrDesc* id = emitLastIns;
- insFormat fmt = id->idInsFmt();
+ instrDesc* id = emitLastIns;
+ instruction lastIns = id->idIns();
+ insFormat fmt = id->idInsFmt();
// make sure op1 is a reg
switch (fmt)
@@ -264,7 +358,6 @@ bool emitter::AreFlagsSetToZeroCmp(regNumber reg, emitAttr opSize, bool needsOCF
case IF_RRD:
case IF_RRW:
break;
-
default:
return false;
}
@@ -274,34 +367,20 @@ bool emitter::AreFlagsSetToZeroCmp(regNumber reg, emitAttr opSize, bool needsOCF
return false;
}
- switch (id->idIns())
+ // Certain instruction like and, or and xor modifies exactly same flags
+ // as "test" instruction.
+ // They reset OF and CF to 0 and modifies SF, ZF and PF.
+ if (DoesResetOverflowAndCarryFlags(lastIns))
{
- case INS_adc:
- case INS_add:
- case INS_dec:
- case INS_dec_l:
- case INS_inc:
- case INS_inc_l:
- case INS_neg:
- case INS_shr_1:
- case INS_shl_1:
- case INS_sar_1:
- case INS_sbb:
- case INS_sub:
- case INS_xadd:
- if (needsOCFlags)
- {
- return false;
- }
- FALLTHROUGH;
- // these always set OC to 0
- case INS_and:
- case INS_or:
- case INS_xor:
- return id->idOpSize() == opSize;
+ return id->idOpSize() == opSize;
+ }
- default:
- break;
+ if ((treeOps == GT_EQ) || (treeOps == GT_NE))
+ {
+ if (DoesWriteZeroFlag(lastIns) && IsFlagsAlwaysModified(id))
+ {
+ return id->idOpSize() == opSize;
+ }
}
return false;
@@ -4164,6 +4243,20 @@ void emitter::emitIns_C(instruction ins, emitAttr attr, CORINFO_FIELD_HANDLE fld
// Arguments:
// ins -- The instruction being checked
//
+// Return Value:
+// true if the instruction is a qualifying move instruction; otherwise, false
+//
+// Remarks:
+// This methods covers most kinds of two operand move instructions that copy a
+// value between two registers. It does not cover all move-like instructions
+// and so doesn't currently cover things like movsb/movsw/movsd/movsq or cmovcc
+// and doesn't currently cover cases where a value is read/written from memory.
+//
+// The reason it doesn't cover all instructions was namely to limit the scope
+// of the initial change to that which was impactful to move elision so that
+// it could be centrally managed and optimized. It may be beneficial to support
+// the other move instructions in the future but that may require more extensive
+// changes to ensure relevant codegen/emit paths flow and check things correctly.
bool emitter::IsMovInstruction(instruction ins)
{
switch (ins)
@@ -4174,7 +4267,6 @@ bool emitter::IsMovInstruction(instruction ins)
case INS_movd:
case INS_movdqa:
case INS_movdqu:
- case INS_movsd:
case INS_movsdsse2:
case INS_movss:
case INS_movsx:
@@ -4200,6 +4292,183 @@ bool emitter::IsMovInstruction(instruction ins)
}
}
+//----------------------------------------------------------------------------------------
+// IsRedundantMov:
+// Check if the current `mov` instruction is redundant and can be omitted.
+// A `mov` is redundant in following 3 cases:
+//
+// 1. Move to same register on TARGET_AMD64
+// (Except 4-byte movement like "mov eax, eax" which zeros out upper bits of eax register)
+//
+// mov rax, rax
+//
+// 2. Move that is identical to last instruction emitted.
+//
+// mov rax, rbx # <-- last instruction
+// mov rax, rbx # <-- current instruction can be omitted.
+//
+// 3. Opposite Move as that of last instruction emitted.
+//
+// mov rax, rbx # <-- last instruction
+// mov rbx, rax # <-- current instruction can be omitted.
+//
+// Arguments:
+// ins - The current instruction
+// fmt - The current format
+// size - Operand size of current instruction
+// dst - The current destination
+// src - The current source
+// canIgnoreSideEffects - The move can be skipped as it doesn't represent special semantics
+//
+// Return Value:
+// true if the move instruction is redundant; otherwise, false.
+
+bool emitter::IsRedundantMov(
+ instruction ins, insFormat fmt, emitAttr size, regNumber dst, regNumber src, bool canIgnoreSideEffects)
+{
+ assert(IsMovInstruction(ins));
+
+ if (canIgnoreSideEffects && (dst == src))
+ {
+ // These elisions used to be explicit even when optimizations were disabled
+
+ // Some instructions have a side effect and shouldn't be skipped
+ // however existing codepaths were skipping these instructions in
+ // certain scenarios and so we skip them as well for back-compat
+ // when canIgnoreSideEffects is true (see below for which have a
+ // side effect).
+ //
+ // Long term, these paths should be audited and should likely be
+ // replaced with copies rather than extensions.
+ return true;
+ }
+
+ if (!emitComp->opts.OptimizationEnabled())
+ {
+ // The remaining move elisions should only happen if optimizations are enabled
+ return false;
+ }
+
+ // TODO-XArch-CQ: There are places where the fact that an instruction zero-extends
+ // is not an important detail, such as when "regular" floating-point code is generated
+ //
+ // This differs from cases like HWIntrinsics that deal with the entire vector and so
+ // they need to be "aware" that a given move impacts the upper-bits.
+ //
+ // Ideally we can detect this difference, likely via canIgnoreSideEffects, and allow
+ // the below optimizations for those scenarios as well.
+
+ // Track whether the instruction has a zero/sign-extension or clearing of the upper-bits as a side-effect
+ bool hasSideEffect = false;
+
+ switch (ins)
+ {
+ case INS_mov:
+ {
+ // non EA_PTRSIZE moves may zero-extend the source
+ hasSideEffect = (size != EA_PTRSIZE);
+ break;
+ }
+
+ case INS_movapd:
+ case INS_movaps:
+ case INS_movdqa:
+ case INS_movdqu:
+ case INS_movupd:
+ case INS_movups:
+ {
+ // non EA_32BYTE moves clear the upper bits under VEX encoding
+ hasSideEffect = UseVEXEncoding() && (size != EA_32BYTE);
+ break;
+ }
+
+ case INS_movd:
+ {
+ // Clears the upper bits
+ hasSideEffect = true;
+ break;
+ }
+
+ case INS_movsdsse2:
+ case INS_movss:
+ {
+ // Clears the upper bits under VEX encoding
+ hasSideEffect = UseVEXEncoding();
+ break;
+ }
+
+ case INS_movsx:
+ case INS_movzx:
+ {
+ // Sign/Zero-extends the source
+ hasSideEffect = true;
+ break;
+ }
+
+#if defined(TARGET_AMD64)
+ case INS_movq:
+ {
+ // Clears the upper bits
+ hasSideEffect = true;
+ break;
+ }
+
+ case INS_movsxd:
+ {
+ // Sign-extends the source
+ hasSideEffect = true;
+ break;
+ }
+#endif // TARGET_AMD64
+
+ default:
+ {
+ unreached();
+ }
+ }
+
+ // Check if we are already in the correct register and don't have a side effect
+ if ((dst == src) && !hasSideEffect)
+ {
+ JITDUMP("\n -- suppressing mov because src and dst is same register and the mov has no side-effects.\n");
+ return true;
+ }
+
+ bool isFirstInstrInBlock = (emitCurIGinsCnt == 0) && ((emitCurIG->igFlags & IGF_EXTEND) == 0);
+
+ // TODO-XArch-CQ: Certain instructions, such as movaps vs movups, are equivalent in
+ // functionality even if their actual identifier differs and we should optimize these
+
+ if (isFirstInstrInBlock || // Don't optimize if instruction is the first instruction in IG.
+ (emitLastIns == nullptr) || // or if a last instruction doesn't exist
+ (emitLastIns->idIns() != ins) || // or if the instruction is different from the last instruction
+ (emitLastIns->idOpSize() != size) || // or if the operand size is different from the last instruction
+ (emitLastIns->idInsFmt() != fmt)) // or if the format is different from the last instruction
+ {
+ return false;
+ }
+
+ regNumber lastDst = emitLastIns->idReg1();
+ regNumber lastSrc = emitLastIns->idReg2();
+
+ // Check if we did same move in last instruction, side effects don't matter since they already happened
+ if ((lastDst == dst) && (lastSrc == src))
+ {
+ JITDUMP("\n -- suppressing mov because last instruction already moved from src to dst register.\n");
+ return true;
+ }
+
+ // Check if we did a switched mov in the last instruction and don't have a side effect
+ if ((lastDst == src) && (lastSrc == dst) && !hasSideEffect)
+ {
+ JITDUMP("\n -- suppressing mov because last instruction already moved from dst to src register and the mov has "
+ "no side-effects.\n");
+ return true;
+ }
+
+ return false;
+}
+
//------------------------------------------------------------------------
// emitIns_Mov: Emits a move instruction
//
@@ -4230,7 +4499,6 @@ void emitter::emitIns_Mov(instruction ins, emitAttr attr, regNumber dstReg, regN
case INS_movaps:
case INS_movdqa:
case INS_movdqu:
- case INS_movsd:
case INS_movsdsse2:
case INS_movss:
case INS_movupd:
@@ -4272,67 +4540,14 @@ void emitter::emitIns_Mov(instruction ins, emitAttr attr, regNumber dstReg, regN
assert(size <= EA_32BYTE);
noway_assert(emitVerifyEncodable(ins, size, dstReg, srcReg));
- if (canSkip && (dstReg == srcReg))
- {
- switch (ins)
- {
- case INS_mov:
- {
- // These instructions have no side effect and can be skipped
- return;
- }
-
- case INS_movapd:
- case INS_movaps:
- case INS_movdqa:
- case INS_movdqu:
- case INS_movupd:
- case INS_movups:
- {
- // These instructions have no side effect and can be skipped
- return;
- }
-
- case INS_movd:
- case INS_movsd:
- case INS_movsdsse2:
- case INS_movss:
- case INS_movsx:
- case INS_movzx:
- {
- // These instructions have a side effect and shouldn't be skipped
- // however existing codepaths were skipping these instructions in
- // certain scenarios and so we skip them as well for back-compat.
- //
- // Long term, these paths should be audited and should likely be
- // replaced with copies rather than extensions.
- return;
- }
-
-#if defined(TARGET_AMD64)
- case INS_movq:
- case INS_movsxd:
- {
- // These instructions have a side effect and shouldn't be skipped
- // however existing codepaths were skipping these instructions in
- // certain scenarios and so we skip them as well for back-compat.
- //
- // Long term, these paths should be audited and should likely be
- // replaced with copies rather than extensions.
- return;
- }
-#endif // TARGET_AMD64
-
- default:
- {
- unreached();
- }
- }
- }
-
UNATIVE_OFFSET sz = emitInsSizeRR(ins, dstReg, srcReg, attr);
insFormat fmt = emitInsModeFormat(ins, IF_RRD_RRD);
+ if (IsRedundantMov(ins, fmt, attr, dstReg, srcReg, canSkip))
+ {
+ return;
+ }
+
instrDesc* id = emitNewInstrSmall(attr);
id->idIns(ins);
id->idInsFmt(fmt);
diff --git a/src/coreclr/jit/emitxarch.h b/src/coreclr/jit/emitxarch.h
index d395a29ec9b138..8260445686be09 100644
--- a/src/coreclr/jit/emitxarch.h
+++ b/src/coreclr/jit/emitxarch.h
@@ -95,10 +95,12 @@ code_t AddRexPrefix(instruction ins, code_t code);
bool EncodedBySSE38orSSE3A(instruction ins);
bool Is4ByteSSEInstruction(instruction ins);
static bool IsMovInstruction(instruction ins);
+bool IsRedundantMov(
+ instruction ins, insFormat fmt, emitAttr size, regNumber dst, regNumber src, bool canIgnoreSideEffects);
bool AreUpper32BitsZero(regNumber reg);
-bool AreFlagsSetToZeroCmp(regNumber reg, emitAttr opSize, bool needsOCFlags);
+bool AreFlagsSetToZeroCmp(regNumber reg, emitAttr opSize, genTreeOps treeOps);
bool hasRexPrefix(code_t code)
{
@@ -171,6 +173,10 @@ void SetContains256bitAVX(bool value)
bool IsDstDstSrcAVXInstruction(instruction ins);
bool IsDstSrcSrcAVXInstruction(instruction ins);
+bool DoesWriteZeroFlag(instruction ins);
+bool DoesResetOverflowAndCarryFlags(instruction ins);
+bool IsFlagsAlwaysModified(instrDesc* id);
+
bool IsThreeOperandAVXInstruction(instruction ins)
{
return (IsDstDstSrcAVXInstruction(ins) || IsDstSrcSrcAVXInstruction(ins));
diff --git a/src/coreclr/jit/fgbasic.cpp b/src/coreclr/jit/fgbasic.cpp
index ca15afbff50925..0807b69d7eee13 100644
--- a/src/coreclr/jit/fgbasic.cpp
+++ b/src/coreclr/jit/fgbasic.cpp
@@ -382,15 +382,9 @@ void Compiler::fgChangeSwitchBlock(BasicBlock* oldSwitchBlock, BasicBlock* newSw
noway_assert(newSwitchBlock != nullptr);
noway_assert(oldSwitchBlock->bbJumpKind == BBJ_SWITCH);
- unsigned jumpCnt = oldSwitchBlock->bbJumpSwt->bbsCount;
- BasicBlock** jumpTab = oldSwitchBlock->bbJumpSwt->bbsDstTab;
-
- unsigned i;
-
// Walk the switch's jump table, updating the predecessor for each branch.
- for (i = 0; i < jumpCnt; i++)
+ for (BasicBlock* const bJump : oldSwitchBlock->SwitchTargets())
{
- BasicBlock* bJump = jumpTab[i];
noway_assert(bJump != nullptr);
// Note that if there are duplicate branch targets in the switch jump table,
@@ -623,7 +617,7 @@ void Compiler::fgReplacePred(BasicBlock* block, BasicBlock* oldPred, BasicBlock*
bool modified = false;
- for (flowList* pred = block->bbPreds; pred != nullptr; pred = pred->flNext)
+ for (flowList* const pred : block->PredEdges())
{
if (oldPred == pred->getBlock())
{
@@ -659,7 +653,6 @@ BasicBlock* Compiler::fgFirstBlockOfHandler(BasicBlock* block)
void Compiler::fgInitBBLookup()
{
BasicBlock** dscBBptr;
- BasicBlock* tmpBBdesc;
/* Allocate the basic block table */
@@ -667,9 +660,9 @@ void Compiler::fgInitBBLookup()
/* Walk all the basic blocks, filling in the table */
- for (tmpBBdesc = fgFirstBB; tmpBBdesc; tmpBBdesc = tmpBBdesc->bbNext)
+ for (BasicBlock* const block : Blocks())
{
- *dscBBptr++ = tmpBBdesc;
+ *dscBBptr++ = block;
}
noway_assert(dscBBptr == fgBBs + fgBBcount);
@@ -771,7 +764,7 @@ class FgStack
}
void PushArgument(unsigned arg)
{
- Push(SLOT_ARGUMENT + arg);
+ Push(static_cast(SLOT_ARGUMENT + arg));
}
unsigned GetSlot0() const
{
@@ -814,7 +807,7 @@ class FgStack
}
private:
- enum
+ enum FgSlot
{
SLOT_INVALID = UINT_MAX,
SLOT_UNKNOWN = 0,
@@ -823,7 +816,7 @@ class FgStack
SLOT_ARGUMENT = 3
};
- void Push(int type)
+ void Push(FgSlot type)
{
switch (depth)
{
@@ -840,8 +833,8 @@ class FgStack
}
}
- unsigned slot0;
- unsigned slot1;
+ FgSlot slot0;
+ FgSlot slot1;
unsigned depth;
};
@@ -878,6 +871,12 @@ void Compiler::fgFindJumpTargets(const BYTE* codeAddr, IL_OFFSET codeSize, Fixed
compInlineResult->Note(InlineObservation::CALLSITE_IN_TRY_REGION);
}
+ // Determine if the call site is in a no-return block
+ if (isInlining && (impInlineInfo->iciBlock->bbJumpKind == BBJ_THROW))
+ {
+ compInlineResult->Note(InlineObservation::CALLSITE_IN_NORETURN_REGION);
+ }
+
// Determine if the call site is in a loop.
if (isInlining && ((impInlineInfo->iciBlock->bbFlags & BBF_BACKWARD_JUMP) != 0))
{
@@ -948,6 +947,32 @@ void Compiler::fgFindJumpTargets(const BYTE* codeAddr, IL_OFFSET codeSize, Fixed
BADCODE3("Illegal opcode", ": %02X", (int)opcode);
}
+ case CEE_THROW:
+ {
+ if (makeInlineObservations)
+ {
+ compInlineResult->Note(InlineObservation::CALLEE_THROW_BLOCK);
+ }
+ break;
+ }
+
+ case CEE_BOX:
+ {
+ if (makeInlineObservations)
+ {
+ int toSkip = impBoxPatternMatch(nullptr, codeAddr + sz, codeEndp, true);
+ if (toSkip > 0)
+ {
+ // toSkip > 0 means we most likely will hit a pattern (e.g. box+isinst+brtrue) that
+ // will be folded into a const
+
+ // TODO: uncomment later
+ // codeAddr += toSkip;
+ }
+ }
+ break;
+ }
+
case CEE_CALL:
case CEE_CALLVIRT:
{
@@ -1781,7 +1806,7 @@ void Compiler::fgMarkBackwardJump(BasicBlock* targetBlock, BasicBlock* sourceBlo
{
noway_assert(targetBlock->bbNum <= sourceBlock->bbNum);
- for (BasicBlock* block = targetBlock; block != sourceBlock->bbNext; block = block->bbNext)
+ for (BasicBlock* const block : Blocks(targetBlock, sourceBlock))
{
if (((block->bbFlags & BBF_BACKWARD_JUMP) == 0) && (block->bbJumpKind != BBJ_RETURN))
{
@@ -1810,7 +1835,7 @@ void Compiler::fgLinkBasicBlocks()
/* Walk all the basic blocks, filling in the target addresses */
- for (BasicBlock* curBBdesc = fgFirstBB; curBBdesc; curBBdesc = curBBdesc->bbNext)
+ for (BasicBlock* const curBBdesc : Blocks())
{
switch (curBBdesc->bbJumpKind)
{
@@ -2938,8 +2963,7 @@ void Compiler::fgFindBasicBlocks()
#if !defined(FEATURE_EH_FUNCLETS)
- EHblkDsc* HBtabEnd;
- for (HBtab = compHndBBtab, HBtabEnd = compHndBBtab + compHndBBtabCount; HBtab < HBtabEnd; HBtab++)
+ for (EHblkDsc* const HBtab : EHClauses(this))
{
if (ehMaxHndNestingCount <= HBtab->ebdHandlerNestingLevel)
ehMaxHndNestingCount = HBtab->ebdHandlerNestingLevel + 1;
@@ -2990,7 +3014,7 @@ void Compiler::fgCheckBasicBlockControlFlow()
EHblkDsc* HBtab;
- for (BasicBlock* blk = fgFirstBB; blk; blk = blk->bbNext)
+ for (BasicBlock* const blk : Blocks())
{
if (blk->bbFlags & BBF_INTERNAL)
{
@@ -3072,18 +3096,10 @@ void Compiler::fgCheckBasicBlockControlFlow()
break;
case BBJ_SWITCH: // block ends with a switch statement
-
- BBswtDesc* swtDesc;
- swtDesc = blk->bbJumpSwt;
-
- assert(swtDesc);
-
- unsigned i;
- for (i = 0; i < swtDesc->bbsCount; i++)
+ for (BasicBlock* const bTarget : blk->SwitchTargets())
{
- fgControlFlowPermitted(blk, swtDesc->bbsDstTab[i]);
+ fgControlFlowPermitted(blk, bTarget);
}
-
break;
case BBJ_EHCATCHRET: // block ends with a leave out of a catch (only #if defined(FEATURE_EH_FUNCLETS))
@@ -3473,7 +3489,7 @@ IL_OFFSET Compiler::fgFindBlockILOffset(BasicBlock* block)
// could have a similar function for LIR that searches for GT_IL_OFFSET nodes.
assert(!block->IsLIR());
- for (Statement* stmt : block->Statements())
+ for (Statement* const stmt : block->Statements())
{
if (stmt->GetILOffsetX() != BAD_IL_OFFSET)
{
@@ -3504,10 +3520,8 @@ BasicBlock* Compiler::fgSplitBlockAtEnd(BasicBlock* curr)
// Without these arcs, a block 'b' may not be a member of succs(preds(b))
if (curr->bbJumpKind != BBJ_SWITCH)
{
- unsigned numSuccs = curr->NumSucc(this);
- for (unsigned i = 0; i < numSuccs; i++)
+ for (BasicBlock* const succ : curr->Succs(this))
{
- BasicBlock* succ = curr->GetSucc(i, this);
if (succ != newBlock)
{
JITDUMP(FMT_BB " previous predecessor was " FMT_BB ", now is " FMT_BB "\n", succ->bbNum, curr->bbNum,
@@ -4131,7 +4145,7 @@ void Compiler::fgRemoveBlock(BasicBlock* block, bool unreachable)
fgRemoveRefPred(succBlock, block);
- for (flowList* pred = block->bbPreds; pred; pred = pred->flNext)
+ for (flowList* const pred : block->PredEdges())
{
BasicBlock* predBlock = pred->getBlock();
@@ -4481,7 +4495,7 @@ bool Compiler::fgRenumberBlocks()
//
if (renumbered && fgComputePredsDone)
{
- for (block = fgFirstBB; block != nullptr; block = block->bbNext)
+ for (BasicBlock* const block : Blocks())
{
block->ensurePredListOrder(this);
}
@@ -4982,7 +4996,7 @@ bool Compiler::fgMightHaveLoop()
BitVecTraits blockVecTraits(fgBBNumMax + 1, this);
BitVec blocksSeen(BitVecOps::MakeEmpty(&blockVecTraits));
- for (BasicBlock* block = fgFirstBB; block; block = block->bbNext)
+ for (BasicBlock* const block : Blocks())
{
BitVecOps::AddElemD(&blockVecTraits, blocksSeen, block->bbNum);
diff --git a/src/coreclr/jit/fgdiagnostic.cpp b/src/coreclr/jit/fgdiagnostic.cpp
index f9f9d3c86b6d00..d284f075bd30d9 100644
--- a/src/coreclr/jit/fgdiagnostic.cpp
+++ b/src/coreclr/jit/fgdiagnostic.cpp
@@ -14,19 +14,15 @@
#ifdef DEBUG
void Compiler::fgPrintEdgeWeights()
{
- BasicBlock* bSrc;
- BasicBlock* bDst;
- flowList* edge;
-
// Print out all of the edge weights
- for (bDst = fgFirstBB; bDst != nullptr; bDst = bDst->bbNext)
+ for (BasicBlock* const bDst : Blocks())
{
if (bDst->bbPreds != nullptr)
{
printf(" Edge weights into " FMT_BB " :", bDst->bbNum);
- for (edge = bDst->bbPreds; edge != nullptr; edge = edge->flNext)
+ for (flowList* const edge : bDst->PredEdges())
{
- bSrc = edge->getBlock();
+ BasicBlock* bSrc = edge->getBlock();
// This is the control flow edge (bSrc -> bDst)
printf(FMT_BB " ", bSrc->bbNum);
@@ -845,7 +841,7 @@ bool Compiler::fgDumpFlowGraph(Phases phase, PhasePosition pos)
unsigned blockOrdinal = 1;
unsigned* blkMap = new (this, CMK_DebugOnly) unsigned[blkMapSize];
memset(blkMap, 0, sizeof(unsigned) * blkMapSize);
- for (BasicBlock* block = fgFirstBB; block != nullptr; block = block->bbNext)
+ for (BasicBlock* const block : Blocks())
{
assert(block->bbNum < blkMapSize);
blkMap[block->bbNum] = blockOrdinal++;
@@ -989,8 +985,7 @@ bool Compiler::fgDumpFlowGraph(Phases phase, PhasePosition pos)
targetWeightDivisor = (double)bTarget->bbWeight;
}
- flowList* edge;
- for (edge = bTarget->bbPreds; edge != nullptr; edge = edge->flNext, edgeNum++)
+ for (flowList* const edge : bTarget->PredEdges())
{
BasicBlock* bSource = edge->getBlock();
double sourceWeightDivisor;
@@ -1084,6 +1079,8 @@ bool Compiler::fgDumpFlowGraph(Phases phase, PhasePosition pos)
fprintf(fgxFile, ">");
fprintf(fgxFile, "\n ");
}
+
+ ++edgeNum;
}
}
}
@@ -1093,7 +1090,7 @@ bool Compiler::fgDumpFlowGraph(Phases phase, PhasePosition pos)
//
if (createDotFile)
{
- for (BasicBlock* bSource = fgFirstBB; bSource != nullptr; bSource = bSource->bbNext)
+ for (BasicBlock* const bSource : Blocks())
{
if (constrained)
{
@@ -1115,11 +1112,8 @@ bool Compiler::fgDumpFlowGraph(Phases phase, PhasePosition pos)
// Emit successor edges
//
- const unsigned numSuccs = bSource->NumSucc();
-
- for (unsigned i = 0; i < numSuccs; i++)
+ for (BasicBlock* const bTarget : bSource->Succs())
{
- BasicBlock* const bTarget = bSource->GetSucc(i);
fprintf(fgxFile, " " FMT_BB " -> " FMT_BB, bSource->bbNum, bTarget->bbNum);
if (blkMap[bSource->bbNum] > blkMap[bTarget->bbNum])
{
@@ -1696,7 +1690,7 @@ void Compiler::fgDispReach()
printf("BBnum Reachable by \n");
printf("------------------------------------------------\n");
- for (BasicBlock* block = fgFirstBB; block != nullptr; block = block->bbNext)
+ for (BasicBlock* const block : Blocks())
{
printf(FMT_BB " : ", block->bbNum);
BlockSetOps::Iter iter(this, block->bbReach);
@@ -2044,10 +2038,7 @@ void Compiler::fgTableDispBasicBlock(BasicBlock* block, int ibcColWidth /* = 0 *
{
// Output a brace for every try region that this block opens
- EHblkDsc* HBtab;
- EHblkDsc* HBtabEnd;
-
- for (HBtab = compHndBBtab, HBtabEnd = compHndBBtab + compHndBBtabCount; HBtab < HBtabEnd; HBtab++)
+ for (EHblkDsc* const HBtab : EHClauses(this))
{
if (HBtab->ebdTryBeg == block)
{
@@ -2058,10 +2049,7 @@ void Compiler::fgTableDispBasicBlock(BasicBlock* block, int ibcColWidth /* = 0 *
}
}
- EHblkDsc* HBtab;
- EHblkDsc* HBtabEnd;
-
- for (HBtab = compHndBBtab, HBtabEnd = compHndBBtab + compHndBBtabCount; HBtab < HBtabEnd; HBtab++)
+ for (EHblkDsc* const HBtab : EHClauses(this))
{
if (HBtab->ebdTryLast == block)
{
@@ -2242,7 +2230,7 @@ void Compiler::fgDumpBlock(BasicBlock* block)
if (!block->IsLIR())
{
- for (Statement* stmt : block->Statements())
+ for (Statement* const stmt : block->Statements())
{
fgDumpStmtTree(stmt, block->bbNum);
}
@@ -2264,7 +2252,7 @@ void Compiler::fgDumpTrees(BasicBlock* firstBlock, BasicBlock* lastBlock)
{
// Note that typically we have already called fgDispBasicBlocks()
// so we don't need to print the preds and succs again here.
- for (BasicBlock* block = firstBlock; block; block = block->bbNext)
+ for (BasicBlock* block = firstBlock; block != nullptr; block = block->bbNext)
{
fgDumpBlock(block);
@@ -2369,7 +2357,7 @@ unsigned BBPredsChecker::CheckBBPreds(BasicBlock* block, unsigned curTraversalSt
}
unsigned blockRefs = 0;
- for (flowList* pred = block->bbPreds; pred != nullptr; pred = pred->flNext)
+ for (flowList* const pred : block->PredEdges())
{
blockRefs += pred->flDupCount;
@@ -2504,22 +2492,15 @@ bool BBPredsChecker::CheckJump(BasicBlock* blockPred, BasicBlock* block)
break;
case BBJ_SWITCH:
- {
- unsigned jumpCnt = blockPred->bbJumpSwt->bbsCount;
-
- for (unsigned i = 0; i < jumpCnt; ++i)
+ for (BasicBlock* const bTarget : blockPred->SwitchTargets())
{
- BasicBlock* jumpTab = blockPred->bbJumpSwt->bbsDstTab[i];
- assert(jumpTab != nullptr);
- if (block == jumpTab)
+ if (block == bTarget)
{
return true;
}
}
-
assert(!"SWITCH in the predecessor list with no jump label to BLOCK!");
- }
- break;
+ break;
default:
assert(!"Unexpected bbJumpKind");
@@ -2568,7 +2549,7 @@ bool BBPredsChecker::CheckEHFinallyRet(BasicBlock* blockPred, BasicBlock* block)
// we find a potential 'hit' we check if the funclet we're looking at is
// from the correct try region.
- for (BasicBlock* bcall = comp->fgFirstFuncletBB; bcall != nullptr; bcall = bcall->bbNext)
+ for (BasicBlock* const bcall : comp->Blocks(comp->fgFirstFuncletBB))
{
if (bcall->bbJumpKind != BBJ_CALLFINALLY || bcall->bbJumpDest != finBeg)
{
@@ -2646,12 +2627,12 @@ void Compiler::fgDebugCheckBBlist(bool checkBBNum /* = false */, bool checkBBRef
/* Check bbNum, bbRefs and bbPreds */
// First, pick a traversal stamp, and label all the blocks with it.
unsigned curTraversalStamp = unsigned(InterlockedIncrement((LONG*)&bbTraverseLabel));
- for (BasicBlock* block = fgFirstBB; block != nullptr; block = block->bbNext)
+ for (BasicBlock* const block : Blocks())
{
block->bbTraversalStamp = curTraversalStamp;
}
- for (BasicBlock* block = fgFirstBB; block != nullptr; block = block->bbNext)
+ for (BasicBlock* const block : Blocks())
{
if (checkBBNum)
{
@@ -2737,8 +2718,7 @@ void Compiler::fgDebugCheckBBlist(bool checkBBNum /* = false */, bool checkBBRef
{
// Check to see if this block is the beginning of a filter or a handler and adjust the ref count
// appropriately.
- for (EHblkDsc *HBtab = compHndBBtab, *HBtabEnd = &compHndBBtab[compHndBBtabCount]; HBtab != HBtabEnd;
- HBtab++)
+ for (EHblkDsc* const HBtab : EHClauses(this))
{
if (HBtab->ebdHndBeg == block)
{
@@ -3466,7 +3446,7 @@ void Compiler::fgDebugCheckLinks(bool morphTrees)
fgDebugCheckBlockLinks();
// For each block check the links between the trees.
- for (BasicBlock* block = fgFirstBB; block != nullptr; block = block->bbNext)
+ for (BasicBlock* const block : Blocks())
{
if (block->IsLIR())
{
@@ -3496,7 +3476,7 @@ void Compiler::fgDebugCheckLinks(bool morphTrees)
void Compiler::fgDebugCheckStmtsList(BasicBlock* block, bool morphTrees)
{
- for (Statement* stmt : block->Statements())
+ for (Statement* const stmt : block->Statements())
{
// Verify that bbStmtList is threaded correctly.
// Note that for the statements list, the GetPrevStmt() list is circular.
@@ -3560,7 +3540,7 @@ void Compiler::fgDebugCheckBlockLinks()
{
assert(fgFirstBB->bbPrev == nullptr);
- for (BasicBlock* block = fgFirstBB; block != nullptr; block = block->bbNext)
+ for (BasicBlock* const block : Blocks())
{
if (block->bbNext)
{
@@ -3592,11 +3572,9 @@ void Compiler::fgDebugCheckBlockLinks()
// about the BlockSet epoch.
BitVecTraits bitVecTraits(fgBBNumMax + 1, this);
BitVec succBlocks(BitVecOps::MakeEmpty(&bitVecTraits));
- BasicBlock** jumpTable = block->bbJumpSwt->bbsDstTab;
- unsigned jumpCount = block->bbJumpSwt->bbsCount;
- for (unsigned i = 0; i < jumpCount; i++)
+ for (BasicBlock* const bTarget : block->SwitchTargets())
{
- BitVecOps::AddElemD(&bitVecTraits, succBlocks, jumpTable[i]->bbNum);
+ BitVecOps::AddElemD(&bitVecTraits, succBlocks, bTarget->bbNum);
}
// Now we should have a set of unique successors that matches what's in the switchMap.
// First, check the number of entries, then make sure all the blocks in uniqueSuccSet
@@ -3675,7 +3653,7 @@ void Compiler::fgDebugCheckNodesUniqueness()
{
UniquenessCheckWalker walker(this);
- for (BasicBlock* block = fgFirstBB; block != nullptr; block = block->bbNext)
+ for (BasicBlock* const block : Blocks())
{
if (block->IsLIR())
{
@@ -3686,7 +3664,7 @@ void Compiler::fgDebugCheckNodesUniqueness()
}
else
{
- for (Statement* stmt : block->Statements())
+ for (Statement* const stmt : block->Statements())
{
GenTree* root = stmt->GetRootNode();
fgWalkTreePre(&root, UniquenessCheckWalker::MarkTreeId, &walker);
@@ -3709,7 +3687,7 @@ void Compiler::fgDebugCheckLoopTable()
assert(optLoopTable != nullptr);
}
- for (BasicBlock* block = fgFirstBB; block != nullptr; block = block->bbNext)
+ for (BasicBlock* const block : Blocks())
{
if (optLoopCount == 0)
{
diff --git a/src/coreclr/jit/fgehopt.cpp b/src/coreclr/jit/fgehopt.cpp
index 0a5e1602857404..06abf455ac2df1 100644
--- a/src/coreclr/jit/fgehopt.cpp
+++ b/src/coreclr/jit/fgehopt.cpp
@@ -102,7 +102,7 @@ PhaseStatus Compiler::fgRemoveEmptyFinally()
// Limit for now to finallys that contain only a GT_RETFILT.
bool isEmpty = true;
- for (Statement* stmt : firstBlock->Statements())
+ for (Statement* const stmt : firstBlock->Statements())
{
GenTree* stmtExpr = stmt->GetRootNode();
@@ -197,7 +197,7 @@ PhaseStatus Compiler::fgRemoveEmptyFinally()
BasicBlock* const lastTryBlock = HBtab->ebdTryLast;
assert(firstTryBlock->getTryIndex() == XTnum);
- for (BasicBlock* block = firstTryBlock; block != nullptr; block = block->bbNext)
+ for (BasicBlock* const block : Blocks(firstTryBlock))
{
// Look for blocks directly contained in this try, and
// update the try region appropriately.
@@ -349,7 +349,6 @@ PhaseStatus Compiler::fgRemoveEmptyTry()
BasicBlock* const lastTryBlock = HBtab->ebdTryLast;
BasicBlock* const firstHandlerBlock = HBtab->ebdHndBeg;
BasicBlock* const lastHandlerBlock = HBtab->ebdHndLast;
- BasicBlock* const endHandlerBlock = lastHandlerBlock->bbNext;
assert(firstTryBlock->getTryIndex() == XTnum);
@@ -474,7 +473,7 @@ PhaseStatus Compiler::fgRemoveEmptyTry()
// handler region (if any) won't change.
//
// Kind of overkill to loop here, but hey.
- for (BasicBlock* block = firstTryBlock; block != nullptr; block = block->bbNext)
+ for (BasicBlock* const block : Blocks(firstTryBlock))
{
// Look for blocks directly contained in this try, and
// update the try region appropriately.
@@ -511,7 +510,7 @@ PhaseStatus Compiler::fgRemoveEmptyTry()
// remove the EH table entry. Change handler exits to jump to
// the continuation. Clear catch type on handler entry.
// Decrement nesting level of enclosed GT_END_LFINs.
- for (BasicBlock* block = firstHandlerBlock; block != endHandlerBlock; block = block->bbNext)
+ for (BasicBlock* const block : Blocks(firstHandlerBlock, lastHandlerBlock))
{
if (block == firstHandlerBlock)
{
@@ -545,7 +544,7 @@ PhaseStatus Compiler::fgRemoveEmptyTry()
// If we're in a non-funclet model, decrement the nesting
// level of any GT_END_LFIN we find in the handler region,
// since we're removing the enclosing handler.
- for (Statement* stmt : block->Statements())
+ for (Statement* const stmt : block->Statements())
{
GenTree* expr = stmt->GetRootNode();
if (expr->gtOper == GT_END_LFIN)
@@ -733,7 +732,7 @@ PhaseStatus Compiler::fgCloneFinally()
// Should we compute statement cost here, or is it
// premature...? For now just count statements I guess.
- for (Statement* stmt : block->Statements())
+ for (Statement* const stmt : block->Statements())
{
regionStmtCount++;
}
@@ -1244,7 +1243,7 @@ PhaseStatus Compiler::fgCloneFinally()
JITDUMP("Profile scale factor (" FMT_WT "/" FMT_WT ") => clone " FMT_WT " / original " FMT_WT "\n",
retargetedWeight, originalWeight, clonedScale, originalScale);
- for (BasicBlock* block = firstBlock; block != lastBlock->bbNext; block = block->bbNext)
+ for (BasicBlock* const block : Blocks(firstBlock, lastBlock))
{
if (block->hasProfileWeight())
{
@@ -1343,10 +1342,9 @@ void Compiler::fgDebugCheckTryFinallyExits()
BasicBlock* const lastTryBlock = HBtab->ebdTryLast;
assert(firstTryBlock->getTryIndex() <= XTnum);
assert(lastTryBlock->getTryIndex() <= XTnum);
- BasicBlock* const afterTryBlock = lastTryBlock->bbNext;
- BasicBlock* const finallyBlock = isFinally ? HBtab->ebdHndBeg : nullptr;
+ BasicBlock* const finallyBlock = isFinally ? HBtab->ebdHndBeg : nullptr;
- for (BasicBlock* block = firstTryBlock; block != afterTryBlock; block = block->bbNext)
+ for (BasicBlock* const block : Blocks(firstTryBlock, lastTryBlock))
{
// Only check the directly contained blocks.
assert(block->hasTryIndex());
@@ -1357,12 +1355,8 @@ void Compiler::fgDebugCheckTryFinallyExits()
}
// Look at each of the normal control flow possibilities.
- const unsigned numSuccs = block->NumSucc();
-
- for (unsigned i = 0; i < numSuccs; i++)
+ for (BasicBlock* const succBlock : block->Succs())
{
- BasicBlock* const succBlock = block->GetSucc(i);
-
if (succBlock->hasTryIndex() && succBlock->getTryIndex() <= XTnum)
{
// Successor does not exit this try region.
@@ -1514,7 +1508,7 @@ void Compiler::fgCleanupContinuation(BasicBlock* continuation)
// Remove the GT_END_LFIN from the continuation,
// Note we only expect to see one such statement.
bool foundEndLFin = false;
- for (Statement* stmt : continuation->Statements())
+ for (Statement* const stmt : continuation->Statements())
{
GenTree* expr = stmt->GetRootNode();
if (expr->gtOper == GT_END_LFIN)
@@ -1566,7 +1560,7 @@ void Compiler::fgClearAllFinallyTargetBits()
// in case bits are left over from EH clauses being deleted.
// Walk all blocks, and reset the target bits.
- for (BasicBlock* block = fgFirstBB; block != nullptr; block = block->bbNext)
+ for (BasicBlock* const block : Blocks())
{
block->bbFlags &= ~BBF_FINALLY_TARGET;
}
@@ -1585,7 +1579,7 @@ void Compiler::fgAddFinallyTargetFlags()
return;
}
- for (BasicBlock* block = fgFirstBB; block != nullptr; block = block->bbNext)
+ for (BasicBlock* const block : Blocks())
{
if (block->isBBCallAlwaysPair())
{
@@ -1682,10 +1676,8 @@ PhaseStatus Compiler::fgMergeFinallyChains()
// Look for finallys.
bool hasFinally = false;
- for (unsigned XTnum = 0; XTnum < compHndBBtabCount; XTnum++)
+ for (EHblkDsc* const HBtab : EHClauses(this))
{
- EHblkDsc* const HBtab = &compHndBBtab[XTnum];
-
// Check if this is a try/finally.
if (HBtab->HasFinallyHandler())
{
diff --git a/src/coreclr/jit/fgflow.cpp b/src/coreclr/jit/fgflow.cpp
index fda7f901451818..c267dc558ed688 100644
--- a/src/coreclr/jit/fgflow.cpp
+++ b/src/coreclr/jit/fgflow.cpp
@@ -29,9 +29,7 @@ flowList* Compiler::fgGetPredForBlock(BasicBlock* block, BasicBlock* blockPred)
assert(blockPred);
assert(!fgCheapPredsValid);
- flowList* pred;
-
- for (pred = block->bbPreds; pred != nullptr; pred = pred->flNext)
+ for (flowList* const pred : block->PredEdges())
{
if (blockPred == pred->getBlock())
{
@@ -461,17 +459,11 @@ void Compiler::fgRemoveBlockAsPred(BasicBlock* block)
break;
case BBJ_SWITCH:
- {
- unsigned jumpCnt = block->bbJumpSwt->bbsCount;
- BasicBlock** jumpTab = block->bbJumpSwt->bbsDstTab;
-
- do
+ for (BasicBlock* const bTarget : block->SwitchTargets())
{
- fgRemoveRefPred(*jumpTab, block);
- } while (++jumpTab, --jumpCnt);
-
+ fgRemoveRefPred(bTarget, block);
+ }
break;
- }
default:
noway_assert(!"Block doesn't have a valid bbJumpKind!!!!");
@@ -499,8 +491,6 @@ void Compiler::fgComputeCheapPreds()
noway_assert(!fgComputePredsDone); // We can't do this if we've got the full preds.
noway_assert(fgFirstBB != nullptr);
- BasicBlock* block;
-
#ifdef DEBUG
if (verbose)
{
@@ -513,7 +503,7 @@ void Compiler::fgComputeCheapPreds()
// Clear out the cheap preds lists.
fgRemovePreds();
- for (block = fgFirstBB; block != nullptr; block = block->bbNext)
+ for (BasicBlock* const block : Blocks())
{
switch (block->bbJumpKind)
{
@@ -546,16 +536,10 @@ void Compiler::fgComputeCheapPreds()
break;
case BBJ_SWITCH:
- unsigned jumpCnt;
- jumpCnt = block->bbJumpSwt->bbsCount;
- BasicBlock** jumpTab;
- jumpTab = block->bbJumpSwt->bbsDstTab;
-
- do
+ for (BasicBlock* const bTarget : block->SwitchTargets())
{
- fgAddCheapPred(*jumpTab, block);
- } while (++jumpTab, --jumpCnt);
-
+ fgAddCheapPred(bTarget, block);
+ }
break;
case BBJ_EHFINALLYRET: // It's expensive to compute the preds for this case, so we don't for the cheap
@@ -659,7 +643,7 @@ void Compiler::fgRemovePreds()
// and are the same size. So, this function removes both.
static_assert_no_msg(sizeof(((BasicBlock*)nullptr)->bbPreds) == sizeof(((BasicBlock*)nullptr)->bbCheapPreds));
- for (BasicBlock* block = fgFirstBB; block != nullptr; block = block->bbNext)
+ for (BasicBlock* const block : Blocks())
{
block->bbPreds = nullptr;
}
@@ -688,8 +672,6 @@ void Compiler::fgComputePreds()
{
noway_assert(fgFirstBB != nullptr);
- BasicBlock* block;
-
#ifdef DEBUG
if (verbose)
{
@@ -700,7 +682,7 @@ void Compiler::fgComputePreds()
// Check that the block numbers are increasing order.
unsigned lastBBnum = fgFirstBB->bbNum;
- for (BasicBlock* block = fgFirstBB->bbNext; block != nullptr; block = block->bbNext)
+ for (BasicBlock* const block : Blocks(fgFirstBB->bbNext))
{
assert(lastBBnum < block->bbNum);
lastBBnum = block->bbNum;
@@ -708,7 +690,7 @@ void Compiler::fgComputePreds()
#endif // DEBUG
// Reset everything pred related
- for (BasicBlock* block = fgFirstBB; block != nullptr; block = block->bbNext)
+ for (BasicBlock* const block : Blocks())
{
block->bbPreds = nullptr;
block->bbLastPred = nullptr;
@@ -726,7 +708,7 @@ void Compiler::fgComputePreds()
fgEntryBB->bbRefs = 1;
}
- for (block = fgFirstBB; block; block = block->bbNext)
+ for (BasicBlock* const block : Blocks())
{
switch (block->bbJumpKind)
{
@@ -823,16 +805,10 @@ void Compiler::fgComputePreds()
break;
case BBJ_SWITCH:
- unsigned jumpCnt;
- jumpCnt = block->bbJumpSwt->bbsCount;
- BasicBlock** jumpTab;
- jumpTab = block->bbJumpSwt->bbsDstTab;
-
- do
+ for (BasicBlock* const bTarget : block->SwitchTargets())
{
- fgAddRefPred(*jumpTab, block, nullptr, true);
- } while (++jumpTab, --jumpCnt);
-
+ fgAddRefPred(bTarget, block, nullptr, true);
+ }
break;
default:
@@ -841,10 +817,8 @@ void Compiler::fgComputePreds()
}
}
- for (unsigned EHnum = 0; EHnum < compHndBBtabCount; EHnum++)
+ for (EHblkDsc* const ehDsc : EHClauses(this))
{
- EHblkDsc* ehDsc = ehGetDsc(EHnum);
-
if (ehDsc->HasFilter())
{
// The first block of a filter has an artifical extra refcount.
@@ -949,11 +923,8 @@ Compiler::SwitchUniqueSuccSet Compiler::GetDescriptorForSwitch(BasicBlock* switc
BitVecTraits blockVecTraits(fgBBNumMax + 1, this);
BitVec uniqueSuccBlocks(BitVecOps::MakeEmpty(&blockVecTraits));
- BasicBlock** jumpTable = switchBlk->bbJumpSwt->bbsDstTab;
- unsigned jumpCount = switchBlk->bbJumpSwt->bbsCount;
- for (unsigned i = 0; i < jumpCount; i++)
+ for (BasicBlock* const targ : switchBlk->SwitchTargets())
{
- BasicBlock* targ = jumpTable[i];
BitVecOps::AddElemD(&blockVecTraits, uniqueSuccBlocks, targ->bbNum);
}
// Now we have a set of unique successors.
@@ -964,9 +935,8 @@ Compiler::SwitchUniqueSuccSet Compiler::GetDescriptorForSwitch(BasicBlock* switc
unsigned nonDupInd = 0;
// At this point, all unique targets are in "uniqueSuccBlocks". As we encounter each,
// add to nonDups, remove from "uniqueSuccBlocks".
- for (unsigned i = 0; i < jumpCount; i++)
+ for (BasicBlock* const targ : switchBlk->SwitchTargets())
{
- BasicBlock* targ = jumpTable[i];
if (BitVecOps::IsMember(&blockVecTraits, uniqueSuccBlocks, targ->bbNum))
{
nonDups[nonDupInd] = targ;
@@ -990,14 +960,12 @@ void Compiler::SwitchUniqueSuccSet::UpdateTarget(CompAllocator alloc,
BasicBlock* to)
{
assert(switchBlk->bbJumpKind == BBJ_SWITCH); // Precondition.
- unsigned jmpTabCnt = switchBlk->bbJumpSwt->bbsCount;
- BasicBlock** jmpTab = switchBlk->bbJumpSwt->bbsDstTab;
// Is "from" still in the switch table (because it had more than one entry before?)
bool fromStillPresent = false;
- for (unsigned i = 0; i < jmpTabCnt; i++)
+ for (BasicBlock* const bTarget : switchBlk->SwitchTargets())
{
- if (jmpTab[i] == from)
+ if (bTarget == from)
{
fromStillPresent = true;
break;
diff --git a/src/coreclr/jit/fginline.cpp b/src/coreclr/jit/fginline.cpp
index 06d750eb9c1a7d..29aabbaf40a902 100644
--- a/src/coreclr/jit/fginline.cpp
+++ b/src/coreclr/jit/fginline.cpp
@@ -99,30 +99,28 @@ PhaseStatus Compiler::fgInline()
&info.compMethodInfo->args);
#endif // DEBUG
- BasicBlock* block = fgFirstBB;
- bool madeChanges = false;
- noway_assert(block != nullptr);
+ noway_assert(fgFirstBB != nullptr);
// Set the root inline context on all statements
InlineContext* rootContext = m_inlineStrategy->GetRootContext();
- for (; block != nullptr; block = block->bbNext)
+ for (BasicBlock* const block : Blocks())
{
- for (Statement* stmt : block->Statements())
+ for (Statement* const stmt : block->Statements())
{
stmt->SetInlineContext(rootContext);
}
}
- // Reset block back to start for inlining
- block = fgFirstBB;
+ BasicBlock* block = fgFirstBB;
+ bool madeChanges = false;
do
{
// Make the current basic block address available globally
compCurBB = block;
- for (Statement* stmt : block->Statements())
+ for (Statement* const stmt : block->Statements())
{
#ifdef DEBUG
@@ -212,7 +210,7 @@ PhaseStatus Compiler::fgInline()
do
{
- for (Statement* stmt : block->Statements())
+ for (Statement* const stmt : block->Statements())
{
// Call Compiler::fgDebugCheckInlineCandidates on each node
fgWalkTreePre(stmt->GetRootNodePointer(), fgDebugCheckInlineCandidates);
@@ -731,7 +729,7 @@ Compiler::fgWalkResult Compiler::fgLateDevirtualization(GenTree** pTree, fgWalkD
CORINFO_CONTEXT_HANDLE context = nullptr;
const bool isLateDevirtualization = true;
bool explicitTailCall = (call->AsCall()->gtCallMoreFlags & GTF_CALL_M_EXPLICIT_TAILCALL) != 0;
- comp->impDevirtualizeCall(call, &method, &methodFlags, &context, nullptr, isLateDevirtualization,
+ comp->impDevirtualizeCall(call, nullptr, &method, &methodFlags, &context, nullptr, isLateDevirtualization,
explicitTailCall);
}
}
@@ -1095,7 +1093,6 @@ void Compiler::fgInsertInlineeBlocks(InlineInfo* pInlineInfo)
GenTreeCall* iciCall = pInlineInfo->iciCall;
Statement* iciStmt = pInlineInfo->iciStmt;
BasicBlock* iciBlock = pInlineInfo->iciBlock;
- BasicBlock* block;
noway_assert(iciBlock->bbStmtList != nullptr);
noway_assert(iciStmt->GetRootNode() != nullptr);
@@ -1118,9 +1115,9 @@ void Compiler::fgInsertInlineeBlocks(InlineInfo* pInlineInfo)
// Create a new inline context and mark the inlined statements with it
InlineContext* calleeContext = m_inlineStrategy->NewSuccess(pInlineInfo);
- for (block = InlineeCompiler->fgFirstBB; block != nullptr; block = block->bbNext)
+ for (BasicBlock* const block : InlineeCompiler->Blocks())
{
- for (Statement* stmt : block->Statements())
+ for (Statement* const stmt : block->Statements())
{
stmt->SetInlineContext(calleeContext);
}
@@ -1272,7 +1269,7 @@ void Compiler::fgInsertInlineeBlocks(InlineInfo* pInlineInfo)
//
// Set the try and handler index and fix the jump types of inlinee's blocks.
//
- for (block = InlineeCompiler->fgFirstBB; block != nullptr; block = block->bbNext)
+ for (BasicBlock* const block : InlineeCompiler->Blocks())
{
noway_assert(!block->hasTryIndex());
noway_assert(!block->hasHndIndex());
diff --git a/src/coreclr/jit/fgopt.cpp b/src/coreclr/jit/fgopt.cpp
index 5f029de9a8f2b6..3b1b1739919d54 100644
--- a/src/coreclr/jit/fgopt.cpp
+++ b/src/coreclr/jit/fgopt.cpp
@@ -48,9 +48,9 @@ bool Compiler::fgDominate(BasicBlock* b1, BasicBlock* b2)
return true;
}
- for (flowList* pred = b2->bbPreds; pred != nullptr; pred = pred->flNext)
+ for (BasicBlock* const predBlock : b2->PredBlocks())
{
- if (!fgDominate(b1, pred->getBlock()))
+ if (!fgDominate(b1, predBlock))
{
return false;
}
@@ -132,9 +132,9 @@ bool Compiler::fgReachable(BasicBlock* b1, BasicBlock* b2)
return true;
}
- for (flowList* pred = b2->bbPreds; pred != nullptr; pred = pred->flNext)
+ for (BasicBlock* const predBlock : b2->PredBlocks())
{
- if (fgReachable(b1, pred->getBlock()))
+ if (fgReachable(b1, predBlock))
{
return true;
}
@@ -220,9 +220,7 @@ void Compiler::fgComputeReachabilitySets()
fgReachabilitySetsValid = false;
#endif // DEBUG
- BasicBlock* block;
-
- for (block = fgFirstBB; block != nullptr; block = block->bbNext)
+ for (BasicBlock* const block : Blocks())
{
// Initialize the per-block bbReach sets. It creates a new empty set,
// because the block epoch could change since the previous initialization
@@ -241,16 +239,14 @@ void Compiler::fgComputeReachabilitySets()
{
change = false;
- for (block = fgFirstBB; block != nullptr; block = block->bbNext)
+ for (BasicBlock* const block : Blocks())
{
BlockSetOps::Assign(this, newReach, block->bbReach);
bool predGcSafe = (block->bbPreds != nullptr); // Do all of our predecessor blocks have a GC safe bit?
- for (flowList* pred = block->bbPreds; pred != nullptr; pred = pred->flNext)
+ for (BasicBlock* const predBlock : block->PredBlocks())
{
- BasicBlock* predBlock = pred->getBlock();
-
/* Union the predecessor's reachability set into newReach */
BlockSetOps::UnionD(this, newReach, predBlock->bbReach);
@@ -308,9 +304,7 @@ void Compiler::fgComputeEnterBlocksSet()
if (compHndBBtabCount > 0)
{
/* Also 'or' in the handler basic blocks */
- EHblkDsc* HBtab;
- EHblkDsc* HBtabEnd;
- for (HBtab = compHndBBtab, HBtabEnd = compHndBBtab + compHndBBtabCount; HBtab < HBtabEnd; HBtab++)
+ for (EHblkDsc* const HBtab : EHClauses(this))
{
if (HBtab->HasFilter())
{
@@ -325,7 +319,7 @@ void Compiler::fgComputeEnterBlocksSet()
// to the enter blocks is a bit of a compromise, because sometimes the blocks are already reachable,
// and it messes up DFS ordering to have them marked as enter block. We should prevent the
// creation of retless calls some other way.
- for (BasicBlock* block = fgFirstBB; block != nullptr; block = block->bbNext)
+ for (BasicBlock* const block : Blocks())
{
if (block->bbJumpKind == BBJ_CALLFINALLY)
{
@@ -380,12 +374,11 @@ bool Compiler::fgRemoveUnreachableBlocks()
assert(!fgCheapPredsValid);
assert(fgReachabilitySetsValid);
- bool hasLoops = false;
- bool hasUnreachableBlocks = false;
- BasicBlock* block;
+ bool hasLoops = false;
+ bool hasUnreachableBlocks = false;
/* Record unreachable blocks */
- for (block = fgFirstBB; block != nullptr; block = block->bbNext)
+ for (BasicBlock* const block : Blocks())
{
/* Internal throw blocks are also reachable */
if (fgIsThrowHlpBlk(block))
@@ -458,9 +451,8 @@ bool Compiler::fgRemoveUnreachableBlocks()
// Set BBF_LOOP_HEAD if we have backwards branches to this block.
unsigned blockNum = block->bbNum;
- for (flowList* pred = block->bbPreds; pred != nullptr; pred = pred->flNext)
+ for (BasicBlock* const predBlock : block->PredBlocks())
{
- BasicBlock* predBlock = pred->getBlock();
if (blockNum <= predBlock->bbNum)
{
if (predBlock->bbJumpKind == BBJ_CALLFINALLY)
@@ -486,7 +478,7 @@ bool Compiler::fgRemoveUnreachableBlocks()
if (hasUnreachableBlocks)
{
// Now remove the unreachable blocks
- for (block = fgFirstBB; block != nullptr; block = block->bbNext)
+ for (BasicBlock* block = fgFirstBB; block != nullptr; block = block->bbNext)
{
// If we mark the block with BBF_REMOVED then
// we need to call fgRemovedBlock() on it
@@ -541,7 +533,7 @@ void Compiler::fgComputeReachability()
/* Create a list of all BBJ_RETURN blocks. The head of the list is 'fgReturnBlocks'. */
fgReturnBlocks = nullptr;
- for (BasicBlock* block = fgFirstBB; block != nullptr; block = block->bbNext)
+ for (BasicBlock* const block : Blocks())
{
// If this is a BBJ_RETURN block, add it to our list of all BBJ_RETURN blocks. This list is only
// used to find return blocks.
@@ -660,7 +652,7 @@ void Compiler::fgDfsInvPostOrder()
// Call the flowgraph DFS traversal helper.
unsigned postIndex = 1;
- for (BasicBlock* block = fgFirstBB; block != nullptr; block = block->bbNext)
+ for (BasicBlock* const block : Blocks())
{
// If the block has no predecessors, and we haven't already visited it (because it's in fgEnterBlks but also
// reachable from the first block), go ahead and traverse starting from this block.
@@ -699,17 +691,12 @@ void Compiler::fgDfsInvPostOrder()
//
BlockSet_ValRet_T Compiler::fgDomFindStartNodes()
{
- unsigned j;
- BasicBlock* block;
-
BlockSet startNodes(BlockSetOps::MakeFull(this));
- for (block = fgFirstBB; block != nullptr; block = block->bbNext)
+ for (BasicBlock* const block : Blocks())
{
- unsigned cSucc = block->NumSucc(this);
- for (j = 0; j < cSucc; ++j)
+ for (BasicBlock* const succ : block->Succs(this))
{
- BasicBlock* succ = block->GetSucc(j, this);
BlockSetOps::RemoveElemD(this, startNodes, succ->bbNum);
}
}
@@ -773,11 +760,8 @@ void Compiler::fgDfsInvPostOrderHelper(BasicBlock* block, BlockSet& visited, uns
// pre and post actions are processed.
stack.Push(DfsBlockEntry(DSS_Post, currentBlock));
- unsigned cSucc = currentBlock->NumSucc(this);
- for (unsigned j = 0; j < cSucc; ++j)
+ for (BasicBlock* const succ : currentBlock->Succs(this))
{
- BasicBlock* succ = currentBlock->GetSucc(j, this);
-
// If this is a node we haven't seen before, go ahead and process
if (!BlockSetOps::IsMember(this, visited, succ->bbNum))
{
@@ -894,9 +878,7 @@ void Compiler::fgComputeDoms()
// Mark the EH blocks as entry blocks and also flag them as processed.
if (compHndBBtabCount > 0)
{
- EHblkDsc* HBtab;
- EHblkDsc* HBtabEnd;
- for (HBtab = compHndBBtab, HBtabEnd = compHndBBtab + compHndBBtabCount; HBtab < HBtabEnd; HBtab++)
+ for (EHblkDsc* const HBtab : EHClauses(this))
{
if (HBtab->HasFilter())
{
@@ -973,7 +955,7 @@ void Compiler::fgComputeDoms()
// As stated before, once we have computed immediate dominance we need to clear
// all the basic blocks whose predecessor list was set to flRoot. This
// reverts that and leaves the blocks the same as before.
- for (block = fgFirstBB; block != nullptr; block = block->bbNext)
+ for (BasicBlock* const block : Blocks())
{
if (block->bbPreds == &flRoot)
{
@@ -1037,7 +1019,7 @@ DomTreeNode* Compiler::fgBuildDomTree()
// Traverse the entire block list to build the dominator tree. Skip fgFirstBB
// as it is always a root of the dominator forest.
- for (BasicBlock* block = fgFirstBB->bbNext; block != nullptr; block = block->bbNext)
+ for (BasicBlock* const block : Blocks(fgFirstBB->bbNext))
{
BasicBlock* parent = block->bbIDom;
@@ -1231,7 +1213,7 @@ BlockSet_ValRet_T Compiler::fgGetDominatorSet(BasicBlock* block)
//
void Compiler::fgInitBlockVarSets()
{
- for (BasicBlock* block = fgFirstBB; block != nullptr; block = block->bbNext)
+ for (BasicBlock* const block : Blocks())
{
block->InitVarSets(this);
}
@@ -1568,9 +1550,9 @@ bool Compiler::fgCanCompactBlocks(BasicBlock* block, BasicBlock* bNext)
// If there is a switch predecessor don't bother because we'd have to update the uniquesuccs as well
// (if they are valid).
- for (flowList* pred = bNext->bbPreds; pred != nullptr; pred = pred->flNext)
+ for (BasicBlock* const predBlock : bNext->PredBlocks())
{
- if (pred->getBlock()->bbJumpKind == BBJ_SWITCH)
+ if (predBlock->bbJumpKind == BBJ_SWITCH)
{
return false;
}
@@ -1628,13 +1610,13 @@ void Compiler::fgCompactBlocks(BasicBlock* block, BasicBlock* bNext)
JITDUMP("Second block has multiple incoming edges\n");
assert(block->isEmpty());
- for (flowList* pred = bNext->bbPreds; pred; pred = pred->flNext)
+ for (BasicBlock* const predBlock : bNext->PredBlocks())
{
- fgReplaceJumpTarget(pred->getBlock(), block, bNext);
+ fgReplaceJumpTarget(predBlock, block, bNext);
- if (pred->getBlock() != block)
+ if (predBlock != block)
{
- fgAddRefPred(block, pred->getBlock());
+ fgAddRefPred(block, predBlock);
}
}
bNext->bbPreds = nullptr;
@@ -1999,7 +1981,7 @@ void Compiler::fgCompactBlocks(BasicBlock* block, BasicBlock* bNext)
// and we avoid most of the work if pred lists are already in order,
// we'll just ensure everything is properly ordered.
//
- for (BasicBlock* checkBlock = fgFirstBB; checkBlock != nullptr; checkBlock = checkBlock->bbNext)
+ for (BasicBlock* const checkBlock : Blocks())
{
checkBlock->ensurePredListOrder(this);
}
@@ -2154,7 +2136,7 @@ void Compiler::fgUnreachableBlock(BasicBlock* block)
block->bbStmtList = firstNonPhi;
}
- for (Statement* stmt : block->Statements())
+ for (Statement* const stmt : block->Statements())
{
fgRemoveStmt(block, stmt);
}
@@ -2557,11 +2539,11 @@ bool Compiler::fgOptimizeEmptyBlock(BasicBlock* block)
// EH regions. Is this a case where they can't be merged?
bool okToMerge = true; // assume it's ok
- for (flowList* pred = block->bbPreds; pred; pred = pred->flNext)
+ for (BasicBlock* const predBlock : block->PredBlocks())
{
- if (pred->getBlock()->bbJumpKind == BBJ_EHCATCHRET)
+ if (predBlock->bbJumpKind == BBJ_EHCATCHRET)
{
- assert(pred->getBlock()->bbJumpDest == block);
+ assert(predBlock->bbJumpDest == block);
okToMerge = false; // we can't get rid of the empty block
break;
}
@@ -3466,7 +3448,7 @@ bool Compiler::fgOptimizeBranch(BasicBlock* bJump)
assert(!bDest->IsLIR());
unsigned estDupCostSz = 0;
- for (Statement* stmt : bDest->Statements())
+ for (Statement* const stmt : bDest->Statements())
{
// We want to compute the costs of the statement. Unfortunately, gtPrepareCost() / gtSetStmtInfo()
// call gtSetEvalOrder(), which can reorder nodes. If it does so, we need to re-thread the gtNext/gtPrev
@@ -3576,7 +3558,7 @@ bool Compiler::fgOptimizeBranch(BasicBlock* bJump)
/* Visit all the statements in bDest */
- for (Statement* curStmt : bDest->Statements())
+ for (Statement* const curStmt : bDest->Statements())
{
// Clone/substitute the expression.
Statement* stmt = gtCloneStmt(curStmt);
@@ -3730,7 +3712,7 @@ bool Compiler::fgOptimizeSwitchJumps()
bool modified = false;
- for (BasicBlock* block = fgFirstBB; block != NULL; block = block->bbNext)
+ for (BasicBlock* const block : Blocks())
{
// Lowering expands switches, so calling this method on lowered IR
// does not make sense.
@@ -4087,10 +4069,10 @@ bool Compiler::fgExpandRarelyRunBlocks()
bool rare = true;
/* Make sure that block has at least one normal predecessor */
- for (flowList* pred = block->bbPreds; pred != nullptr; pred = pred->flNext)
+ for (BasicBlock* const predBlock : block->PredBlocks())
{
/* Find the fall through predecessor, if any */
- if (!pred->getBlock()->isRunRarely())
+ if (!predBlock->isRunRarely())
{
rare = false;
break;
@@ -4354,7 +4336,7 @@ bool Compiler::fgReorderBlocks()
noway_assert(edgeFromPrev != nullptr);
// Examine all of the other edges into bDest
- for (flowList* edge = bDest->bbPreds; edge != nullptr; edge = edge->flNext)
+ for (flowList* const edge : bDest->PredEdges())
{
if (edge != edgeFromPrev)
{
@@ -4374,11 +4356,9 @@ bool Compiler::fgReorderBlocks()
//
// Examine all of the other edges into bDest
- for (flowList* edge = bDest->bbPreds; edge != nullptr; edge = edge->flNext)
+ for (BasicBlock* const predBlock : bDest->PredBlocks())
{
- BasicBlock* bTemp = edge->getBlock();
-
- if ((bTemp != bPrev) && (bTemp->bbWeight >= bPrev->bbWeight))
+ if ((predBlock != bPrev) && (predBlock->bbWeight >= bPrev->bbWeight))
{
moveDestUp = false;
break;
@@ -5704,11 +5684,7 @@ bool Compiler::fgUpdateFlowGraph(bool doTailDuplication)
// we will need to update ebdTryLast or ebdHndLast.
//
- EHblkDsc* HBtab;
- EHblkDsc* HBtabEnd;
-
- for (HBtab = compHndBBtab, HBtabEnd = compHndBBtab + compHndBBtabCount; HBtab < HBtabEnd;
- HBtab++)
+ for (EHblkDsc* const HBtab : EHClauses(this))
{
if ((HBtab->ebdTryLast == bNext) || (HBtab->ebdHndLast == bNext))
{
@@ -5888,7 +5864,7 @@ bool Compiler::fgUpdateFlowGraph(bool doTailDuplication)
if (compRationalIRForm)
{
- for (BasicBlock* block = fgFirstBB; block != nullptr; block = block->bbNext)
+ for (BasicBlock* const block : Blocks())
{
LIR::AsRange(block).CheckLIR(this);
}
@@ -5952,7 +5928,7 @@ unsigned Compiler::fgGetCodeEstimate(BasicBlock* block)
break;
}
- for (Statement* stmt : StatementList(block->FirstNonPhiDef()))
+ for (Statement* const stmt : block->NonPhiStatements())
{
unsigned char cost = stmt->GetCostSz();
costSz += cost;
@@ -5970,11 +5946,11 @@ unsigned Compiler::fgMeasureIR()
{
unsigned nodeCount = 0;
- for (BasicBlock* block = fgFirstBB; block != nullptr; block = block->bbNext)
+ for (BasicBlock* const block : Blocks())
{
if (!block->IsLIR())
{
- for (Statement* stmt : block->Statements())
+ for (Statement* const stmt : block->Statements())
{
fgWalkTreePre(stmt->GetRootNodePointer(),
[](GenTree** slot, fgWalkData* data) -> Compiler::fgWalkResult {
diff --git a/src/coreclr/jit/fgprofile.cpp b/src/coreclr/jit/fgprofile.cpp
index 1039d303a0d2e3..1659b2d3905293 100644
--- a/src/coreclr/jit/fgprofile.cpp
+++ b/src/coreclr/jit/fgprofile.cpp
@@ -131,9 +131,9 @@ void Compiler::fgApplyProfileScale()
calleeWeight, scale);
JITDUMP("Scaling inlinee blocks\n");
- for (BasicBlock* bb = fgFirstBB; bb != nullptr; bb = bb->bbNext)
+ for (BasicBlock* const block : Blocks())
{
- bb->scaleBBWeight(scale);
+ block->scaleBBWeight(scale);
}
}
@@ -314,7 +314,7 @@ void BlockCountInstrumentor::Prepare(bool preImport)
#ifdef DEBUG
// Set schema index to invalid value
//
- for (BasicBlock* block = m_comp->fgFirstBB; (block != nullptr); block = block->bbNext)
+ for (BasicBlock* const block : m_comp->Blocks())
{
block->bbCountSchemaIndex = -1;
}
@@ -563,10 +563,7 @@ void Compiler::WalkSpanningTree(SpanningTreeVisitor* visitor)
//
if (!compIsForInlining())
{
- EHblkDsc* HBtab = compHndBBtab;
- unsigned XTnum = 0;
-
- for (; XTnum < compHndBBtabCount; XTnum++, HBtab++)
+ for (EHblkDsc* const HBtab : EHClauses(this))
{
BasicBlock* hndBegBB = HBtab->ebdHndBeg;
stack.Push(hndBegBB);
@@ -1125,11 +1122,14 @@ void EfficientEdgeCountInstrumentor::Instrument(BasicBlock* block, Schema& schem
#ifdef DEBUG
// Verify the edge still exists.
//
- const unsigned numSucc = block->NumSucc(comp);
- bool found = false;
- for (unsigned i = 0; i < numSucc && !found; i++)
+ bool found = false;
+ for (BasicBlock* const succ : block->Succs(comp))
{
- found = (target == block->GetSucc(i, comp));
+ if (target == succ)
+ {
+ found = true;
+ break;
+ }
}
assert(found);
#endif
@@ -1391,7 +1391,7 @@ void ClassProbeInstrumentor::Prepare(bool isPreImport)
#ifdef DEBUG
// Set schema index to invalid value
//
- for (BasicBlock* block = m_comp->fgFirstBB; (block != nullptr); block = block->bbNext)
+ for (BasicBlock* const block : m_comp->Blocks())
{
block->bbClassSchemaIndex = -1;
}
@@ -1420,7 +1420,7 @@ void ClassProbeInstrumentor::BuildSchemaElements(BasicBlock* block, Schema& sche
//
BuildClassProbeSchemaGen schemaGen(schema, m_schemaCount);
ClassProbeVisitor visitor(m_comp, schemaGen);
- for (Statement* stmt : block->Statements())
+ for (Statement* const stmt : block->Statements())
{
visitor.WalkTree(stmt->GetRootNodePointer(), nullptr);
}
@@ -1453,7 +1453,7 @@ void ClassProbeInstrumentor::Instrument(BasicBlock* block, Schema& schema, BYTE*
ClassProbeInserter insertProbes(schema, profileMemory, &classSchemaIndex, m_instrCount);
ClassProbeVisitor visitor(m_comp, insertProbes);
- for (Statement* stmt : block->Statements())
+ for (Statement* const stmt : block->Statements())
{
visitor.WalkTree(stmt->GetRootNodePointer(), nullptr);
}
@@ -1474,14 +1474,14 @@ void ClassProbeInstrumentor::SuppressProbes()
SuppressProbesFunctor suppressProbes(cleanupCount);
ClassProbeVisitor visitor(m_comp, suppressProbes);
- for (BasicBlock* block = m_comp->fgFirstBB; (block != nullptr); block = block->bbNext)
+ for (BasicBlock* const block : m_comp->Blocks())
{
if ((block->bbFlags & BBF_HAS_CLASS_PROFILE) == 0)
{
continue;
}
- for (Statement* stmt : block->Statements())
+ for (Statement* const stmt : block->Statements())
{
visitor.WalkTree(stmt->GetRootNodePointer(), nullptr);
}
@@ -1589,7 +1589,7 @@ PhaseStatus Compiler::fgInstrumentMethod()
// Walk the flow graph to build up the instrumentation schema.
//
Schema schema(getAllocator(CMK_Pgo));
- for (BasicBlock* block = fgFirstBB; (block != nullptr); block = block->bbNext)
+ for (BasicBlock* const block : Blocks())
{
if (fgCountInstrumentor->ShouldProcess(block))
{
@@ -1670,7 +1670,7 @@ PhaseStatus Compiler::fgInstrumentMethod()
// Add the instrumentation code
//
- for (BasicBlock* block = fgFirstBB; (block != nullptr); block = block->bbNext)
+ for (BasicBlock* const block : Blocks())
{
if (fgCountInstrumentor->ShouldProcess(block))
{
@@ -1852,7 +1852,7 @@ void Compiler::fgSetProfileWeight(BasicBlock* block, BasicBlock::weight_t profil
//
void Compiler::fgIncorporateBlockCounts()
{
- for (BasicBlock* block = fgFirstBB; block != nullptr; block = block->bbNext)
+ for (BasicBlock* const block : Blocks())
{
BasicBlock::weight_t profileWeight;
@@ -2172,7 +2172,7 @@ void EfficientEdgeCountReconstructor::Prepare()
{
// Create per-block info, and set up the key to block map.
//
- for (BasicBlock* block = m_comp->fgFirstBB; (block != nullptr); block = block->bbNext)
+ for (BasicBlock* const block : m_comp->Blocks())
{
m_keyToBlockMap.Set(BlockToKey(block), block);
BlockInfo* const info = new (m_allocator) BlockInfo();
@@ -2536,7 +2536,7 @@ void EfficientEdgeCountReconstructor::Propagate()
// Set weight on all blocks.
//
- for (BasicBlock* block = m_comp->fgFirstBB; (block != nullptr); block = block->bbNext)
+ for (BasicBlock* const block : m_comp->Blocks())
{
BlockInfo* const info = BlockToInfo(block);
assert(info->m_weightKnown);
@@ -3222,7 +3222,6 @@ void Compiler::fgComputeEdgeWeights()
BasicBlock* bSrc;
BasicBlock* bDst;
- flowList* edge;
BasicBlock::weight_t slop;
unsigned goodEdgeCountCurrent = 0;
unsigned goodEdgeCountPrevious = 0;
@@ -3247,7 +3246,7 @@ void Compiler::fgComputeEdgeWeights()
bDstWeight -= fgCalledCount;
}
- for (edge = bDst->bbPreds; edge != nullptr; edge = edge->flNext)
+ for (flowList* const edge : bDst->PredEdges())
{
bool assignOK = true;
@@ -3327,7 +3326,7 @@ void Compiler::fgComputeEdgeWeights()
JITDUMP("\n -- step 1 --\n");
for (bDst = fgFirstBB; bDst != nullptr; bDst = bDst->bbNext)
{
- for (edge = bDst->bbPreds; edge != nullptr; edge = edge->flNext)
+ for (flowList* const edge : bDst->PredEdges())
{
bool assignOK = true;
@@ -3432,11 +3431,8 @@ void Compiler::fgComputeEdgeWeights()
BasicBlock::weight_t maxEdgeWeightSum = 0;
// Calculate the sums of the minimum and maximum edge weights
- for (edge = bDst->bbPreds; edge != nullptr; edge = edge->flNext)
+ for (flowList* const edge : bDst->PredEdges())
{
- // We are processing the control flow edge (bSrc -> bDst)
- bSrc = edge->getBlock();
-
maxEdgeWeightSum += edge->edgeWeightMax();
minEdgeWeightSum += edge->edgeWeightMin();
}
@@ -3444,7 +3440,7 @@ void Compiler::fgComputeEdgeWeights()
// maxEdgeWeightSum is the sum of all flEdgeWeightMax values into bDst
// minEdgeWeightSum is the sum of all flEdgeWeightMin values into bDst
- for (edge = bDst->bbPreds; edge != nullptr; edge = edge->flNext)
+ for (flowList* const edge : bDst->PredEdges())
{
bool assignOK = true;
@@ -3567,14 +3563,13 @@ EARLY_EXIT:;
// See if any edge weight are expressed in [min..max] form
- for (bDst = fgFirstBB; bDst != nullptr; bDst = bDst->bbNext)
+ for (BasicBlock* const bDst : Blocks())
{
if (bDst->bbPreds != nullptr)
{
- for (edge = bDst->bbPreds; edge != nullptr; edge = edge->flNext)
+ for (flowList* const edge : bDst->PredEdges())
{
- bSrc = edge->getBlock();
- // This is the control flow edge (bSrc -> bDst)
+ // This is the control flow edge (edge->getBlock() -> bDst)
if (edge->edgeWeightMin() != edge->edgeWeightMax())
{
@@ -3666,7 +3661,7 @@ void Compiler::fgDebugCheckProfileData()
// Verify each profiled block.
//
- for (BasicBlock* block = fgFirstBB; block != nullptr; block = block->bbNext)
+ for (BasicBlock* const block : Blocks())
{
if (!block->hasProfileWeight())
{
@@ -3801,7 +3796,7 @@ bool Compiler::fgDebugCheckIncomingProfileData(BasicBlock* block)
BasicBlock::weight_t incomingWeightMax = 0;
bool foundPreds = false;
- for (flowList* predEdge = block->bbPreds; predEdge != nullptr; predEdge = predEdge->flNext)
+ for (flowList* const predEdge : block->PredEdges())
{
incomingWeightMin += predEdge->edgeWeightMin();
incomingWeightMax += predEdge->edgeWeightMax();
@@ -3883,16 +3878,7 @@ bool Compiler::fgDebugCheckOutgoingProfileData(BasicBlock* block)
for (unsigned i = 0; i < numSuccs; i++)
{
BasicBlock* succBlock = block->GetSucc(i, this);
- flowList* succEdge = nullptr;
-
- for (flowList* edge = succBlock->bbPreds; edge != nullptr; edge = edge->flNext)
- {
- if (edge->getBlock() == block)
- {
- succEdge = edge;
- break;
- }
- }
+ flowList* succEdge = fgGetPredForBlock(succBlock, block);
if (succEdge == nullptr)
{
diff --git a/src/coreclr/jit/flowgraph.cpp b/src/coreclr/jit/flowgraph.cpp
index a76cc93dc40cbf..e7a281cdecaa18 100644
--- a/src/coreclr/jit/flowgraph.cpp
+++ b/src/coreclr/jit/flowgraph.cpp
@@ -25,11 +25,11 @@
static bool blockNeedsGCPoll(BasicBlock* block)
{
bool blockMayNeedGCPoll = false;
- for (Statement* stmt = block->FirstNonPhiDef(); stmt != nullptr; stmt = stmt->GetNextStmt())
+ for (Statement* const stmt : block->NonPhiStatements())
{
if ((stmt->GetRootNode()->gtFlags & GTF_CALL) != 0)
{
- for (GenTree* tree = stmt->GetTreeList(); tree != nullptr; tree = tree->gtNext)
+ for (GenTree* const tree : stmt->TreeList())
{
if (tree->OperGet() == GT_CALL)
{
@@ -91,7 +91,7 @@ PhaseStatus Compiler::fgInsertGCPolls()
BasicBlock* block;
// Walk through the blocks and hunt for a block that needs a GC Poll
- for (block = fgFirstBB; block; block = block->bbNext)
+ for (block = fgFirstBB; block != nullptr; block = block->bbNext)
{
// When optimizations are enabled, we can't rely on BBF_HAS_SUPPRESSGC_CALL flag:
// the call could've been moved, e.g., hoisted from a loop, CSE'd, etc.
@@ -629,7 +629,7 @@ PhaseStatus Compiler::fgImport()
// Note this includes (to some extent) the impact of importer folded
// branches, provided the folded tree covered the entire block's IL.
unsigned importedILSize = 0;
- for (BasicBlock* block = fgFirstBB; block != nullptr; block = block->bbNext)
+ for (BasicBlock* const block : Blocks())
{
if ((block->bbFlags & BBF_IMPORTED) != 0)
{
@@ -1395,8 +1395,6 @@ inline void Compiler::fgLoopCallTest(BasicBlock* srcBB, BasicBlock* dstBB)
void Compiler::fgLoopCallMark()
{
- BasicBlock* block;
-
/* If we've already marked all the block, bail */
if (fgLoopCallMarked)
@@ -1408,7 +1406,7 @@ void Compiler::fgLoopCallMark()
/* Walk the blocks, looking for backward edges */
- for (block = fgFirstBB; block; block = block->bbNext)
+ for (BasicBlock* const block : Blocks())
{
switch (block->bbJumpKind)
{
@@ -1420,17 +1418,10 @@ void Compiler::fgLoopCallMark()
break;
case BBJ_SWITCH:
-
- unsigned jumpCnt;
- jumpCnt = block->bbJumpSwt->bbsCount;
- BasicBlock** jumpPtr;
- jumpPtr = block->bbJumpSwt->bbsDstTab;
-
- do
+ for (BasicBlock* const bTarget : block->SwitchTargets())
{
- fgLoopCallTest(block, *jumpPtr);
- } while (++jumpPtr, --jumpCnt);
-
+ fgLoopCallTest(block, bTarget);
+ }
break;
default:
@@ -1849,7 +1840,7 @@ void Compiler::fgAddSyncMethodEnterExit()
fgCreateMonitorTree(lvaMonAcquired, lvaCopyThis, faultBB, false /*exit*/);
// non-exceptional cases
- for (BasicBlock* block = fgFirstBB; block != nullptr; block = block->bbNext)
+ for (BasicBlock* const block : Blocks())
{
if (block->bbJumpKind == BBJ_RETURN)
{
@@ -2091,7 +2082,7 @@ bool Compiler::fgMoreThanOneReturnBlock()
{
unsigned retCnt = 0;
- for (BasicBlock* block = fgFirstBB; block; block = block->bbNext)
+ for (BasicBlock* const block : Blocks())
{
if (block->bbJumpKind == BBJ_RETURN)
{
@@ -2920,10 +2911,10 @@ void Compiler::fgFindOperOrder()
/* Walk the basic blocks and for each statement determine
* the evaluation order, cost, FP levels, etc... */
- for (BasicBlock* block = fgFirstBB; block; block = block->bbNext)
+ for (BasicBlock* const block : Blocks())
{
compCurBB = block;
- for (Statement* stmt : block->Statements())
+ for (Statement* const stmt : block->Statements())
{
/* Recursively process the statement */
@@ -2953,7 +2944,7 @@ void Compiler::fgSimpleLowering()
unsigned outgoingArgSpaceSize = 0;
#endif // FEATURE_FIXED_OUT_ARGS
- for (BasicBlock* block = fgFirstBB; block; block = block->bbNext)
+ for (BasicBlock* const block : Blocks())
{
// Walk the statement trees in this basic block.
compCurBB = block; // Used in fgRngChkTarget.
@@ -3187,9 +3178,8 @@ void Compiler::fgInsertFuncletPrologBlock(BasicBlock* block)
// the handler go to the prolog. Edges coming from with the handler are back-edges, and
// go to the existing 'block'.
- for (flowList* pred = block->bbPreds; pred; pred = pred->flNext)
+ for (BasicBlock* const predBlock : block->PredBlocks())
{
- BasicBlock* predBlock = pred->getBlock();
if (!fgIsIntraHandlerPred(predBlock, block))
{
// It's a jump from outside the handler; add it to the newHead preds list and remove
@@ -3235,11 +3225,9 @@ void Compiler::fgCreateFuncletPrologBlocks()
noway_assert(!fgDomsComputed); // this function doesn't maintain the dom sets
assert(!fgFuncletsCreated);
- bool prologBlocksCreated = false;
- EHblkDsc* HBtabEnd;
- EHblkDsc* HBtab;
+ bool prologBlocksCreated = false;
- for (HBtab = compHndBBtab, HBtabEnd = compHndBBtab + compHndBBtabCount; HBtab < HBtabEnd; HBtab++)
+ for (EHblkDsc* const HBtab : EHClauses(this))
{
BasicBlock* head = HBtab->ebdHndBeg;
@@ -4295,7 +4283,7 @@ void Compiler::fgSetBlockOrder()
/* If we don't compute the doms, then we never mark blocks as loops. */
if (fgDomsComputed)
{
- for (BasicBlock* block = fgFirstBB; block; block = block->bbNext)
+ for (BasicBlock* const block : Blocks())
{
/* If this block is a loop header, mark it appropriately */
@@ -4310,11 +4298,7 @@ void Compiler::fgSetBlockOrder()
/* If we don't have the dominators, use an abbreviated test for fully interruptible. If there are
* any back edges, check the source and destination blocks to see if they're GC Safe. If not, then
* go fully interruptible. */
-
- /* XXX Mon 1/21/2008
- * Wouldn't it be nice to have a block iterator that can do this loop?
- */
- for (BasicBlock* block = fgFirstBB; block; block = block->bbNext)
+ for (BasicBlock* const block : Blocks())
{
// true if the edge is forward, or if it is a back edge and either the source and dest are GC safe.
#define EDGE_IS_GC_SAFE(src, dst) \
@@ -4329,17 +4313,10 @@ void Compiler::fgSetBlockOrder()
break;
case BBJ_SWITCH:
-
- unsigned jumpCnt;
- jumpCnt = block->bbJumpSwt->bbsCount;
- BasicBlock** jumpPtr;
- jumpPtr = block->bbJumpSwt->bbsDstTab;
-
- do
+ for (BasicBlock* const bTarget : block->SwitchTargets())
{
- partiallyInterruptible &= EDGE_IS_GC_SAFE(block, *jumpPtr);
- } while (++jumpPtr, --jumpCnt);
-
+ partiallyInterruptible &= EDGE_IS_GC_SAFE(block, bTarget);
+ }
break;
default:
@@ -4363,7 +4340,7 @@ void Compiler::fgSetBlockOrder()
}
}
- for (BasicBlock* block = fgFirstBB; block; block = block->bbNext)
+ for (BasicBlock* const block : Blocks())
{
#if FEATURE_FASTTAILCALL
@@ -4482,7 +4459,7 @@ void Compiler::fgSetStmtSeq(Statement* stmt)
void Compiler::fgSetBlockOrder(BasicBlock* block)
{
- for (Statement* stmt : block->Statements())
+ for (Statement* const stmt : block->Statements())
{
fgSetStmtSeq(stmt);
diff --git a/src/coreclr/jit/gcencode.cpp b/src/coreclr/jit/gcencode.cpp
index 8c5fcd65b046d5..60011aade54995 100644
--- a/src/coreclr/jit/gcencode.cpp
+++ b/src/coreclr/jit/gcencode.cpp
@@ -127,9 +127,8 @@ ReturnKind GCInfo::getReturnKind()
void GCInfo::gcMarkFilterVarsPinned()
{
assert(compiler->ehAnyFunclets());
- const EHblkDsc* endHBtab = &(compiler->compHndBBtab[compiler->compHndBBtabCount]);
- for (EHblkDsc* HBtab = compiler->compHndBBtab; HBtab < endHBtab; HBtab++)
+ for (EHblkDsc* const HBtab : EHClauses(compiler))
{
if (HBtab->HasFilter())
{
@@ -3907,19 +3906,22 @@ void GCInfo::gcInfoBlockHdrSave(GcInfoEncoder* gcInfoEncoder, unsigned methodSiz
assert(false);
}
- int offset = 0;
+ const int offset = compiler->lvaToCallerSPRelativeOffset(compiler->lvaCachedGenericContextArgOffset(),
+ compiler->isFramePointerUsed());
+#ifdef DEBUG
if (compiler->opts.IsOSR())
{
- PatchpointInfo* ppInfo = compiler->info.compPatchpointInfo;
- offset = ppInfo->GenericContextArgOffset();
- assert(offset != -1);
- }
- else
- {
- offset = compiler->lvaToCallerSPRelativeOffset(compiler->lvaCachedGenericContextArgOffset(),
- compiler->isFramePointerUsed());
+ // Sanity check the offset vs saved patchpoint info.
+ //
+ // PP info has FP relative offset, to get to caller SP we need to
+ // subtract off 2 register slots (saved FP, saved RA).
+ //
+ const PatchpointInfo* const ppInfo = compiler->info.compPatchpointInfo;
+ const int osrOffset = ppInfo->GenericContextArgOffset() - 2 * REGSIZE_BYTES;
+ assert(offset == osrOffset);
}
+#endif
gcInfoEncoderWithLog->SetGenericsInstContextStackSlot(offset, ctxtParamType);
}
@@ -3929,30 +3931,33 @@ void GCInfo::gcInfoBlockHdrSave(GcInfoEncoder* gcInfoEncoder, unsigned methodSiz
{
assert(compiler->info.compThisArg != BAD_VAR_NUM);
- int offset = 0;
-
// OSR can report the root method's frame slot, if that method reported context.
+ // If not, the OSR frame will have saved the needed context.
//
- bool isOsrAndUsingRootFrameSlot = false;
+ bool useRootFrameSlot = true;
if (compiler->opts.IsOSR())
{
- PatchpointInfo* const ppInfo = compiler->info.compPatchpointInfo;
+ const PatchpointInfo* const ppInfo = compiler->info.compPatchpointInfo;
- if (ppInfo->HasKeptAliveThis())
- {
- offset = ppInfo->GenericContextArgOffset();
- assert(offset != -1);
- isOsrAndUsingRootFrameSlot = true;
- }
+ useRootFrameSlot = ppInfo->HasKeptAliveThis();
}
- // If not OSR, or OSR but newly reporting context, use the current frame offset.
- //
- if (!isOsrAndUsingRootFrameSlot)
+ const int offset = compiler->lvaToCallerSPRelativeOffset(compiler->lvaCachedGenericContextArgOffset(),
+ compiler->isFramePointerUsed(), useRootFrameSlot);
+
+#ifdef DEBUG
+ if (compiler->opts.IsOSR() && useRootFrameSlot)
{
- offset = compiler->lvaToCallerSPRelativeOffset(compiler->lvaCachedGenericContextArgOffset(),
- compiler->isFramePointerUsed());
+ // Sanity check the offset vs saved patchpoint info.
+ //
+ // PP info has FP relative offset, to get to caller SP we need to
+ // subtract off 2 register slots (saved FP, saved RA).
+ //
+ const PatchpointInfo* const ppInfo = compiler->info.compPatchpointInfo;
+ const int osrOffset = ppInfo->KeptAliveThisOffset() - 2 * REGSIZE_BYTES;
+ assert(offset == osrOffset);
}
+#endif
gcInfoEncoderWithLog->SetGenericsInstContextStackSlot(offset, GENERIC_CONTEXTPARAM_THIS);
}
@@ -3962,22 +3967,7 @@ void GCInfo::gcInfoBlockHdrSave(GcInfoEncoder* gcInfoEncoder, unsigned methodSiz
assert(compiler->lvaGSSecurityCookie != BAD_VAR_NUM);
// The lv offset is FP-relative, and the using code expects caller-sp relative, so translate.
- int offset = compiler->lvaGetCallerSPRelativeOffset(compiler->lvaGSSecurityCookie);
-
- if (compiler->opts.IsOSR())
- {
- // The offset computed above already includes the OSR frame adjustment, plus the
- // pop of the "pseudo return address" from the OSR frame.
- //
- // To get to caller-SP, we need to subtract off the original frame size and the
- // pushed RA and RBP for that frame. But ppInfo's FpToSpDelta also accounts for the
- // pseudo RA between the original method frame and the OSR frame. So the net adjustment
- // is simply FpToSpDelta plus one register.
- PatchpointInfo* ppInfo = compiler->info.compPatchpointInfo;
- int adjustment = ppInfo->FpToSpDelta() + REGSIZE_BYTES;
- offset -= adjustment;
- JITDUMP("OSR cookie adjustment %d, final caller-SP offset %d\n", adjustment, offset);
- }
+ const int offset = compiler->lvaGetCallerSPRelativeOffset(compiler->lvaGSSecurityCookie);
// The code offset ranges assume that the GS Cookie slot is initialized in the prolog, and is valid
// through the remainder of the method. We will not query for the GS Cookie while we're in an epilog,
diff --git a/src/coreclr/jit/gentree.cpp b/src/coreclr/jit/gentree.cpp
index ca0ef63b174887..3596de621d664b 100644
--- a/src/coreclr/jit/gentree.cpp
+++ b/src/coreclr/jit/gentree.cpp
@@ -569,9 +569,9 @@ void GenTree::DumpNodeSizes(FILE* fp)
void Compiler::fgWalkAllTreesPre(fgWalkPreFn* visitor, void* pCallBackData)
{
- for (BasicBlock* block = fgFirstBB; block != nullptr; block = block->bbNext)
+ for (BasicBlock* const block : Blocks())
{
- for (Statement* stmt : block->Statements())
+ for (Statement* const stmt : block->Statements())
{
fgWalkTreePre(stmt->GetRootNodePointer(), visitor, pCallBackData);
}
@@ -7137,8 +7137,8 @@ bool GenTreeOp::UsesDivideByConstOptimized(Compiler* comp)
else
{
// If the divisor is greater or equal than 2^(N - 1) then the result is either 0 or 1
- if (((divType == TYP_INT) && (divisorValue > (UINT32_MAX / 2))) ||
- ((divType == TYP_LONG) && (divisorValue > (UINT64_MAX / 2))))
+ if (((divType == TYP_INT) && ((UINT32)divisorValue > (UINT32_MAX / 2))) ||
+ ((divType == TYP_LONG) && ((UINT64)divisorValue > (UINT64_MAX / 2))))
{
return true;
}
@@ -12417,7 +12417,7 @@ void Compiler::gtDispStmt(Statement* stmt, const char* msg /* = nullptr */)
//
void Compiler::gtDispBlockStmts(BasicBlock* block)
{
- for (Statement* stmt : block->Statements())
+ for (Statement* const stmt : block->Statements())
{
gtDispStmt(stmt);
printf("\n");
diff --git a/src/coreclr/jit/gentree.h b/src/coreclr/jit/gentree.h
index b476837739a850..e2a5045e9069b4 100644
--- a/src/coreclr/jit/gentree.h
+++ b/src/coreclr/jit/gentree.h
@@ -5686,6 +5686,13 @@ struct GenTreeBlk : public GenTreeIndir
bool gtBlkOpGcUnsafe;
#endif
+#ifdef TARGET_XARCH
+ bool IsOnHeapAndContainsReferences()
+ {
+ return (m_layout != nullptr) && m_layout->HasGCPtr() && !Addr()->OperIsLocalAddr();
+ }
+#endif
+
GenTreeBlk(genTreeOps oper, var_types type, GenTree* addr, ClassLayout* layout)
: GenTreeIndir(oper, type, addr, nullptr)
, m_layout(layout)
@@ -5946,6 +5953,58 @@ struct GenTreeILOffset : public GenTree
#endif
};
+// GenTreeList: adapter class for forward iteration of the execution order GenTree linked list
+// using range-based `for`, normally used via Statement::TreeList(), e.g.:
+// for (GenTree* const tree : stmt->TreeList()) ...
+//
+class GenTreeList
+{
+ GenTree* m_trees;
+
+ // Forward iterator for the execution order GenTree linked list (using `gtNext` pointer).
+ //
+ class iterator
+ {
+ GenTree* m_tree;
+
+ public:
+ iterator(GenTree* tree) : m_tree(tree)
+ {
+ }
+
+ GenTree* operator*() const
+ {
+ return m_tree;
+ }
+
+ iterator& operator++()
+ {
+ m_tree = m_tree->gtNext;
+ return *this;
+ }
+
+ bool operator!=(const iterator& i) const
+ {
+ return m_tree != i.m_tree;
+ }
+ };
+
+public:
+ GenTreeList(GenTree* trees) : m_trees(trees)
+ {
+ }
+
+ iterator begin() const
+ {
+ return iterator(m_trees);
+ }
+
+ iterator end() const
+ {
+ return iterator(nullptr);
+ }
+};
+
// We use the following format when printing the Statement number: Statement->GetID()
// This define is used with string concatenation to put this in printf format strings (Note that %u means unsigned int)
#define FMT_STMT "STMT%05u"
@@ -5993,6 +6052,15 @@ struct Statement
m_treeList = treeHead;
}
+ // TreeList: convenience method for enabling range-based `for` iteration over the
+ // execution order of the GenTree linked list, e.g.:
+ // for (GenTree* const tree : stmt->TreeList()) ...
+ //
+ GenTreeList TreeList() const
+ {
+ return GenTreeList(GetTreeList());
+ }
+
InlineContext* GetInlineContext() const
{
return m_inlineContext;
@@ -6103,49 +6171,57 @@ struct Statement
bool m_compilerAdded; // Was the statement created by optimizer?
};
-class StatementIterator
+// StatementList: adapter class for forward iteration of the statement linked list using range-based `for`,
+// normally used via BasicBlock::Statements(), e.g.:
+// for (Statement* const stmt : block->Statements()) ...
+// or:
+// for (Statement* const stmt : block->NonPhiStatements()) ...
+//
+class StatementList
{
- Statement* m_stmt;
+ Statement* m_stmts;
-public:
- StatementIterator(Statement* stmt) : m_stmt(stmt)
+ // Forward iterator for the statement linked list.
+ //
+ class iterator
{
- }
+ Statement* m_stmt;
- Statement* operator*() const
- {
- return m_stmt;
- }
+ public:
+ iterator(Statement* stmt) : m_stmt(stmt)
+ {
+ }
- StatementIterator& operator++()
- {
- m_stmt = m_stmt->GetNextStmt();
- return *this;
- }
+ Statement* operator*() const
+ {
+ return m_stmt;
+ }
- bool operator!=(const StatementIterator& i) const
- {
- return m_stmt != i.m_stmt;
- }
-};
+ iterator& operator++()
+ {
+ m_stmt = m_stmt->GetNextStmt();
+ return *this;
+ }
-class StatementList
-{
- Statement* m_stmts;
+ bool operator!=(const iterator& i) const
+ {
+ return m_stmt != i.m_stmt;
+ }
+ };
public:
StatementList(Statement* stmts) : m_stmts(stmts)
{
}
- StatementIterator begin() const
+ iterator begin() const
{
- return StatementIterator(m_stmts);
+ return iterator(m_stmts);
}
- StatementIterator end() const
+ iterator end() const
{
- return StatementIterator(nullptr);
+ return iterator(nullptr);
}
};
diff --git a/src/coreclr/jit/gschecks.cpp b/src/coreclr/jit/gschecks.cpp
index 3dc60f8e12d9a8..23d02f82bd57b5 100644
--- a/src/coreclr/jit/gschecks.cpp
+++ b/src/coreclr/jit/gschecks.cpp
@@ -480,9 +480,9 @@ void Compiler::gsParamsToShadows()
}
};
- for (BasicBlock* block = fgFirstBB; block != nullptr; block = block->bbNext)
+ for (BasicBlock* const block : Blocks())
{
- for (Statement* stmt : block->Statements())
+ for (Statement* const stmt : block->Statements())
{
ReplaceShadowParamsVisitor replaceShadowParamsVisitor(this);
replaceShadowParamsVisitor.WalkTree(stmt->GetRootNodePointer(), nullptr);
@@ -530,7 +530,7 @@ void Compiler::gsParamsToShadows()
{
// There could be more than one basic block ending with a "Jmp" type tail call.
// We would have to insert assignments in all such blocks, just before GT_JMP stmnt.
- for (BasicBlock* block = fgFirstBB; block; block = block->bbNext)
+ for (BasicBlock* const block : Blocks())
{
if (block->bbJumpKind != BBJ_RETURN)
{
diff --git a/src/coreclr/jit/importer.cpp b/src/coreclr/jit/importer.cpp
index 4521a68735910f..fc8544f9138ba6 100644
--- a/src/coreclr/jit/importer.cpp
+++ b/src/coreclr/jit/importer.cpp
@@ -6442,7 +6442,10 @@ GenTree* Compiler::impImportLdvirtftn(GenTree* thisPtr,
// pResolvedToken is known to be a value type; ref type boxing
// is handled in the CEE_BOX clause.
-int Compiler::impBoxPatternMatch(CORINFO_RESOLVED_TOKEN* pResolvedToken, const BYTE* codeAddr, const BYTE* codeEndp)
+int Compiler::impBoxPatternMatch(CORINFO_RESOLVED_TOKEN* pResolvedToken,
+ const BYTE* codeAddr,
+ const BYTE* codeEndp,
+ bool makeInlineObservation)
{
if (codeAddr >= codeEndp)
{
@@ -6455,6 +6458,12 @@ int Compiler::impBoxPatternMatch(CORINFO_RESOLVED_TOKEN* pResolvedToken, const B
// box + unbox.any
if (codeAddr + 1 + sizeof(mdToken) <= codeEndp)
{
+ if (makeInlineObservation)
+ {
+ compInlineResult->Note(InlineObservation::CALLEE_FOLDABLE_BOX);
+ return 1 + sizeof(mdToken);
+ }
+
CORINFO_RESOLVED_TOKEN unboxResolvedToken;
impResolveToken(codeAddr + 1, &unboxResolvedToken, CORINFO_TOKENKIND_Class);
@@ -6480,6 +6489,12 @@ int Compiler::impBoxPatternMatch(CORINFO_RESOLVED_TOKEN* pResolvedToken, const B
// box + br_true/false
if ((codeAddr + ((codeAddr[0] >= CEE_BRFALSE) ? 5 : 2)) <= codeEndp)
{
+ if (makeInlineObservation)
+ {
+ compInlineResult->Note(InlineObservation::CALLEE_FOLDABLE_BOX);
+ return 0;
+ }
+
GenTree* const treeToBox = impStackTop().val;
bool canOptimize = true;
GenTree* treeToNullcheck = nullptr;
@@ -6542,6 +6557,12 @@ int Compiler::impBoxPatternMatch(CORINFO_RESOLVED_TOKEN* pResolvedToken, const B
case CEE_BRFALSE_S:
if ((nextCodeAddr + ((nextCodeAddr[0] >= CEE_BRFALSE) ? 5 : 2)) <= codeEndp)
{
+ if (makeInlineObservation)
+ {
+ compInlineResult->Note(InlineObservation::CALLEE_FOLDABLE_BOX);
+ return 1 + sizeof(mdToken);
+ }
+
if (!(impStackTop().val->gtFlags & GTF_SIDE_EFFECT))
{
CorInfoHelpFunc boxHelper = info.compCompHnd->getBoxHelper(pResolvedToken->hClass);
@@ -6618,6 +6639,12 @@ int Compiler::impBoxPatternMatch(CORINFO_RESOLVED_TOKEN* pResolvedToken, const B
case CEE_UNBOX_ANY:
if ((nextCodeAddr + 1 + sizeof(mdToken)) <= codeEndp)
{
+ if (makeInlineObservation)
+ {
+ compInlineResult->Note(InlineObservation::CALLEE_FOLDABLE_BOX);
+ return 2 + sizeof(mdToken) * 2;
+ }
+
// See if the resolved tokens in box, isinst and unbox.any describe types that are equal.
CORINFO_RESOLVED_TOKEN isinstResolvedToken = {};
impResolveToken(codeAddr + 1, &isinstResolvedToken, CORINFO_TOKENKIND_Class);
@@ -8929,8 +8956,9 @@ var_types Compiler::impImportCall(OPCODE opcode,
const bool isExplicitTailCall = (tailCallFlags & PREFIX_TAILCALL_EXPLICIT) != 0;
const bool isLateDevirtualization = false;
- impDevirtualizeCall(call->AsCall(), &callInfo->hMethod, &callInfo->methodFlags, &callInfo->contextHandle,
- &exactContextHnd, isLateDevirtualization, isExplicitTailCall, rawILOffset);
+ impDevirtualizeCall(call->AsCall(), pResolvedToken, &callInfo->hMethod, &callInfo->methodFlags,
+ &callInfo->contextHandle, &exactContextHnd, isLateDevirtualization, isExplicitTailCall,
+ rawILOffset);
}
if (impIsThis(obj))
@@ -16258,11 +16286,25 @@ void Compiler::impImportBlockCode(BasicBlock* block)
"type operand incompatible with type of address");
}
- size = info.compCompHnd->getClassSize(resolvedToken.hClass); // Size
- op2 = gtNewIconNode(0); // Value
- op1 = impPopStack().val; // Dest
- op1 = gtNewBlockVal(op1, size);
- op1 = gtNewBlkOpNode(op1, op2, (prefixFlags & PREFIX_VOLATILE) != 0, false);
+ op2 = gtNewIconNode(0); // Value
+ op1 = impPopStack().val; // Dest
+
+ if (eeIsValueClass(resolvedToken.hClass))
+ {
+ op1 = gtNewStructVal(resolvedToken.hClass, op1);
+ if (op1->OperIs(GT_OBJ))
+ {
+ gtSetObjGcInfo(op1->AsObj());
+ }
+ }
+ else
+ {
+ size = info.compCompHnd->getClassSize(resolvedToken.hClass);
+ assert(size == TARGET_POINTER_SIZE);
+ op1 = gtNewBlockVal(op1, size);
+ }
+
+ op1 = gtNewBlkOpNode(op1, op2, (prefixFlags & PREFIX_VOLATILE) != 0, false);
goto SPILL_APPEND;
case CEE_INITBLK:
@@ -17376,10 +17418,9 @@ inline void Compiler::impReimportMarkBlock(BasicBlock* block)
void Compiler::impReimportMarkSuccessors(BasicBlock* block)
{
- const unsigned numSuccs = block->NumSucc();
- for (unsigned i = 0; i < numSuccs; i++)
+ for (BasicBlock* const succBlock : block->Succs())
{
- impReimportMarkBlock(block->GetSucc(i));
+ impReimportMarkBlock(succBlock);
}
}
@@ -17562,10 +17603,9 @@ void Compiler::impImportBlock(BasicBlock* block)
JITDUMP("Marking BBF_INTERNAL block " FMT_BB " as BBF_IMPORTED\n", block->bbNum);
block->bbFlags |= BBF_IMPORTED;
- const unsigned numSuccs = block->NumSucc();
- for (unsigned i = 0; i < numSuccs; i++)
+ for (BasicBlock* const succBlock : block->Succs())
{
- impImportBlockPending(block->GetSucc(i));
+ impImportBlockPending(succBlock);
}
return;
@@ -17722,20 +17762,11 @@ void Compiler::impImportBlock(BasicBlock* block)
break;
case BBJ_SWITCH:
-
- BasicBlock** jmpTab;
- unsigned jmpCnt;
-
addStmt = impExtractLastStmt();
assert(addStmt->GetRootNode()->gtOper == GT_SWITCH);
- jmpCnt = block->bbJumpSwt->bbsCount;
- jmpTab = block->bbJumpSwt->bbsDstTab;
-
- do
+ for (BasicBlock* const tgtBlock : block->SwitchTargets())
{
- tgtBlock = (*jmpTab);
-
multRef |= tgtBlock->bbRefs;
// Thanks to spill cliques, we should have assigned all or none
@@ -17745,8 +17776,7 @@ void Compiler::impImportBlock(BasicBlock* block)
{
break;
}
- } while (++jmpTab, --jmpCnt);
-
+ }
break;
case BBJ_CALLFINALLY:
@@ -17964,10 +17994,8 @@ void Compiler::impImportBlock(BasicBlock* block)
impReimportSpillClique(block);
// For blocks that haven't been imported yet, we still need to mark them as pending import.
- const unsigned numSuccs = block->NumSucc();
- for (unsigned i = 0; i < numSuccs; i++)
+ for (BasicBlock* const succ : block->Succs())
{
- BasicBlock* succ = block->GetSucc(i);
if ((succ->bbFlags & BBF_IMPORTED) == 0)
{
impImportBlockPending(succ);
@@ -17979,10 +18007,9 @@ void Compiler::impImportBlock(BasicBlock* block)
// otherwise just import the successors of block
/* Does this block jump to any other blocks? */
- const unsigned numSuccs = block->NumSucc();
- for (unsigned i = 0; i < numSuccs; i++)
+ for (BasicBlock* const succ : block->Succs())
{
- impImportBlockPending(block->GetSucc(i));
+ impImportBlockPending(succ);
}
}
}
@@ -18237,10 +18264,8 @@ void Compiler::impWalkSpillCliqueFromPred(BasicBlock* block, SpillCliqueWalker*
BasicBlock* blk = node->m_blk;
FreeBlockListNode(node);
- const unsigned numSuccs = blk->NumSucc();
- for (unsigned succNum = 0; succNum < numSuccs; succNum++)
+ for (BasicBlock* const succ : blk->Succs())
{
- BasicBlock* succ = blk->GetSucc(succNum);
// If it's not already in the clique, add it, and also add it
// as a member of the successor "toDo" set.
if (impSpillCliqueGetMember(SpillCliqueSucc, succ) == 0)
@@ -18491,12 +18516,12 @@ void Compiler::verResetCurrentState(BasicBlock* block, EntryState* destState)
return;
}
-ThisInitState BasicBlock::bbThisOnEntry()
+ThisInitState BasicBlock::bbThisOnEntry() const
{
return bbEntryState ? bbEntryState->thisInitialized : TIS_Bottom;
}
-unsigned BasicBlock::bbStackDepthOnEntry()
+unsigned BasicBlock::bbStackDepthOnEntry() const
{
return (bbEntryState ? bbEntryState->esStackDepth : 0);
}
@@ -18508,7 +18533,7 @@ void BasicBlock::bbSetStack(void* stackBuffer)
bbEntryState->esStack = (StackEntry*)stackBuffer;
}
-StackEntry* BasicBlock::bbStackOnEntry()
+StackEntry* BasicBlock::bbStackOnEntry() const
{
assert(bbEntryState);
return bbEntryState->esStack;
@@ -18753,7 +18778,7 @@ void Compiler::impImport()
}
// Used in impImportBlockPending() for STRESS_CHK_REIMPORT
- for (BasicBlock* block = fgFirstBB; block; block = block->bbNext)
+ for (BasicBlock* const block : Blocks())
{
block->bbFlags &= ~BBF_VISITED;
}
@@ -18885,6 +18910,86 @@ void Compiler::impMakeDiscretionaryInlineObservations(InlineInfo* pInlineInfo, I
}
}
+ bool callsiteIsGeneric = (rootCompiler->info.compMethodInfo->args.sigInst.methInstCount != 0) ||
+ (rootCompiler->info.compMethodInfo->args.sigInst.classInstCount != 0);
+
+ bool calleeIsGeneric = (info.compMethodInfo->args.sigInst.methInstCount != 0) ||
+ (info.compMethodInfo->args.sigInst.classInstCount != 0);
+
+ if (!callsiteIsGeneric && calleeIsGeneric)
+ {
+ inlineResult->Note(InlineObservation::CALLSITE_NONGENERIC_CALLS_GENERIC);
+ }
+
+ if (pInlineInfo != nullptr)
+ {
+ // Inspect callee's arguments (and the actual values at the callsite for them)
+ CORINFO_SIG_INFO sig = info.compMethodInfo->args;
+ CORINFO_ARG_LIST_HANDLE sigArg = sig.args;
+
+ GenTreeCall::Use* argUse = pInlineInfo->iciCall->AsCall()->gtCallArgs;
+
+ for (unsigned i = 0; i < info.compMethodInfo->args.numArgs; i++)
+ {
+ assert(argUse != nullptr);
+
+ CORINFO_CLASS_HANDLE sigClass;
+ CorInfoType corType = strip(info.compCompHnd->getArgType(&sig, sigArg, &sigClass));
+ GenTree* argNode = argUse->GetNode()->gtSkipPutArgType();
+
+ if (corType == CORINFO_TYPE_CLASS)
+ {
+ sigClass = info.compCompHnd->getArgClass(&sig, sigArg);
+ }
+ else if (corType == CORINFO_TYPE_VALUECLASS)
+ {
+ inlineResult->Note(InlineObservation::CALLEE_ARG_STRUCT);
+ }
+ else if (corType == CORINFO_TYPE_BYREF)
+ {
+ sigClass = info.compCompHnd->getArgClass(&sig, sigArg);
+ corType = info.compCompHnd->getChildType(sigClass, &sigClass);
+ }
+
+ bool isExact = false;
+ bool isNonNull = false;
+ CORINFO_CLASS_HANDLE argCls = gtGetClassHandle(argNode, &isExact, &isNonNull);
+ if (argCls != nullptr)
+ {
+ const bool isArgValueType = eeIsValueClass(argCls);
+ // Exact class of the arg is known
+ if (isExact && !isArgValueType)
+ {
+ inlineResult->Note(InlineObservation::CALLSITE_ARG_EXACT_CLS);
+ if ((argCls != sigClass) && (sigClass != nullptr))
+ {
+ // .. but the signature accepts a less concrete type.
+ inlineResult->Note(InlineObservation::CALLSITE_ARG_EXACT_CLS_SIG_IS_NOT);
+ }
+ }
+ // Arg is a reference type in the signature and a boxed value type was passed.
+ else if (isArgValueType && (corType == CORINFO_TYPE_CLASS))
+ {
+ inlineResult->Note(InlineObservation::CALLSITE_ARG_BOXED);
+ }
+ }
+
+ if (argNode->OperIsConst())
+ {
+ inlineResult->Note(InlineObservation::CALLSITE_ARG_CONST);
+ }
+
+ sigArg = info.compCompHnd->getArgNext(sigArg);
+ argUse = argUse->GetNext();
+ }
+ }
+
+ // Note if the callee's return type is a value type
+ if (info.compMethodInfo->args.retType == CORINFO_TYPE_VALUECLASS)
+ {
+ inlineResult->Note(InlineObservation::CALLEE_RETURNS_STRUCT);
+ }
+
// Note if the callee's class is a promotable struct
if ((info.compClassAttr & CORINFO_FLG_VALUECLASS) != 0)
{
@@ -20643,6 +20748,7 @@ bool Compiler::IsMathIntrinsic(GenTree* tree)
//
// Arguments:
// call -- the call node to examine/modify
+// pResolvedToken -- [IN] the resolved token used to create the call. Used for R2R.
// method -- [IN/OUT] the method handle for call. Updated iff call devirtualized.
// methodFlags -- [IN/OUT] flags for the method to call. Updated iff call devirtualized.
// pContextHandle -- [IN/OUT] context handle for the call. Updated iff call devirtualized.
@@ -20681,8 +20787,8 @@ bool Compiler::IsMathIntrinsic(GenTree* tree)
// When guarded devirtualization is enabled, this method will mark
// calls as guarded devirtualization candidates, if the type of `this`
// is not exactly known, and there is a plausible guess for the type.
-//
void Compiler::impDevirtualizeCall(GenTreeCall* call,
+ CORINFO_RESOLVED_TOKEN* pResolvedToken,
CORINFO_METHOD_HANDLE* method,
unsigned* methodFlags,
CORINFO_CONTEXT_HANDLE* pContextHandle,
@@ -20881,16 +20987,18 @@ void Compiler::impDevirtualizeCall(GenTreeCall* call,
// and prepare to fetch the method attributes.
//
CORINFO_DEVIRTUALIZATION_INFO dvInfo;
- dvInfo.virtualMethod = baseMethod;
- dvInfo.objClass = objClass;
- dvInfo.context = *pContextHandle;
- dvInfo.detail = CORINFO_DEVIRTUALIZATION_UNKNOWN;
+ dvInfo.virtualMethod = baseMethod;
+ dvInfo.objClass = objClass;
+ dvInfo.context = *pContextHandle;
+ dvInfo.detail = CORINFO_DEVIRTUALIZATION_UNKNOWN;
+ dvInfo.pResolvedTokenVirtualMethod = pResolvedToken;
info.compCompHnd->resolveVirtualMethod(&dvInfo);
- CORINFO_METHOD_HANDLE derivedMethod = dvInfo.devirtualizedMethod;
- CORINFO_CONTEXT_HANDLE exactContext = dvInfo.exactContext;
- CORINFO_CLASS_HANDLE derivedClass = NO_CLASS_HANDLE;
+ CORINFO_METHOD_HANDLE derivedMethod = dvInfo.devirtualizedMethod;
+ CORINFO_CONTEXT_HANDLE exactContext = dvInfo.exactContext;
+ CORINFO_CLASS_HANDLE derivedClass = NO_CLASS_HANDLE;
+ CORINFO_RESOLVED_TOKEN* pDerivedResolvedToken = &dvInfo.resolvedTokenDevirtualizedMethod;
if (derivedMethod != nullptr)
{
@@ -21104,14 +21212,6 @@ void Compiler::impDevirtualizeCall(GenTreeCall* call,
{
JITDUMP("Have a direct explicit tail call to boxed entry point; can't optimize further\n");
}
- else if (opts.jitFlags->IsSet(JitFlags::JIT_FLAG_PREJIT))
- {
- // Per https://github.com/dotnet/runtime/issues/52483, crossgen2 seemingly gets
- // confused about whether the unboxed entry requires an extra arg.
- // So defer further optimization for now.
- //
- JITDUMP("Have a direct boxed entry point, prejitting. Can't optimize further yet.\n");
- }
else
{
JITDUMP("Have a direct call to boxed entry point. Trying to optimize to call an unboxed entry point\n");
@@ -21189,8 +21289,9 @@ void Compiler::impDevirtualizeCall(GenTreeCall* call,
beforeArg->SetNext(gtNewCallArgs(methodTableArg));
}
- call->gtCallMethHnd = unboxedEntryMethod;
- derivedMethod = unboxedEntryMethod;
+ call->gtCallMethHnd = unboxedEntryMethod;
+ derivedMethod = unboxedEntryMethod;
+ pDerivedResolvedToken = &dvInfo.resolvedTokenDevirtualizedUnboxedMethod;
// Method attributes will differ because unboxed entry point is shared
//
@@ -21222,7 +21323,8 @@ void Compiler::impDevirtualizeCall(GenTreeCall* call,
call->gtCallThisArg = gtNewCallArgs(localCopyThis);
call->gtCallMethHnd = unboxedEntryMethod;
call->gtCallMoreFlags |= GTF_CALL_M_UNBOXED;
- derivedMethod = unboxedEntryMethod;
+ derivedMethod = unboxedEntryMethod;
+ pDerivedResolvedToken = &dvInfo.resolvedTokenDevirtualizedUnboxedMethod;
optimizedTheBox = true;
}
@@ -21290,8 +21392,9 @@ void Compiler::impDevirtualizeCall(GenTreeCall* call,
const DWORD unboxedMethodAttribs = info.compCompHnd->getMethodAttribs(unboxedEntryMethod);
JITDUMP("Updating method attribs from 0x%08x to 0x%08x\n", derivedMethodAttribs,
unboxedMethodAttribs);
- derivedMethod = unboxedEntryMethod;
- derivedMethodAttribs = unboxedMethodAttribs;
+ derivedMethod = unboxedEntryMethod;
+ pDerivedResolvedToken = &dvInfo.resolvedTokenDevirtualizedUnboxedMethod;
+ derivedMethodAttribs = unboxedMethodAttribs;
// Add the method table argument.
//
@@ -21334,7 +21437,8 @@ void Compiler::impDevirtualizeCall(GenTreeCall* call,
call->gtCallThisArg = gtNewCallArgs(boxPayload);
call->gtCallMethHnd = unboxedEntryMethod;
call->gtCallMoreFlags |= GTF_CALL_M_UNBOXED;
- derivedMethod = unboxedEntryMethod;
+ derivedMethod = unboxedEntryMethod;
+ pDerivedResolvedToken = &dvInfo.resolvedTokenDevirtualizedUnboxedMethod;
}
}
}
@@ -21370,21 +21474,11 @@ void Compiler::impDevirtualizeCall(GenTreeCall* call,
if (opts.IsReadyToRun())
{
// For R2R, getCallInfo triggers bookkeeping on the zap
- // side so we need to call it here.
- //
- // First, cons up a suitable resolved token.
- CORINFO_RESOLVED_TOKEN derivedResolvedToken = {};
-
- derivedResolvedToken.tokenScope = info.compCompHnd->getMethodModule(derivedMethod);
- derivedResolvedToken.tokenContext = *pContextHandle;
- derivedResolvedToken.token = info.compCompHnd->getMethodDefFromMethod(derivedMethod);
- derivedResolvedToken.tokenType = CORINFO_TOKENKIND_DevirtualizedMethod;
- derivedResolvedToken.hClass = derivedClass;
- derivedResolvedToken.hMethod = derivedMethod;
+ // side and acquires the actual symbol to call so we need to call it here.
// Look up the new call info.
CORINFO_CALL_INFO derivedCallInfo;
- eeGetCallInfo(&derivedResolvedToken, nullptr, addVerifyFlag(CORINFO_CALLINFO_ALLOWINSTPARAM), &derivedCallInfo);
+ eeGetCallInfo(pDerivedResolvedToken, nullptr, addVerifyFlag(CORINFO_CALLINFO_ALLOWINSTPARAM), &derivedCallInfo);
// Update the call.
call->gtCallMoreFlags &= ~GTF_CALL_M_VIRTSTUB_REL_INDIRECT;
@@ -21666,9 +21760,11 @@ void Compiler::considerGuardedDevirtualization(
// Figure out which method will be called.
//
CORINFO_DEVIRTUALIZATION_INFO dvInfo;
- dvInfo.virtualMethod = baseMethod;
- dvInfo.objClass = likelyClass;
- dvInfo.context = *pContextHandle;
+ dvInfo.virtualMethod = baseMethod;
+ dvInfo.objClass = likelyClass;
+ dvInfo.context = *pContextHandle;
+ dvInfo.exactContext = *pContextHandle;
+ dvInfo.pResolvedTokenVirtualMethod = nullptr;
const bool canResolve = info.compCompHnd->resolveVirtualMethod(&dvInfo);
diff --git a/src/coreclr/jit/indirectcalltransformer.cpp b/src/coreclr/jit/indirectcalltransformer.cpp
index 5b6b663e7e035d..f36a5a2c1691c4 100644
--- a/src/coreclr/jit/indirectcalltransformer.cpp
+++ b/src/coreclr/jit/indirectcalltransformer.cpp
@@ -80,7 +80,7 @@ class IndirectCallTransformer
{
int count = 0;
- for (BasicBlock* block = compiler->fgFirstBB; block != nullptr; block = block->bbNext)
+ for (BasicBlock* const block : compiler->Blocks())
{
count += TransformBlock(block);
}
@@ -100,7 +100,7 @@ class IndirectCallTransformer
{
int count = 0;
- for (Statement* stmt : block->Statements())
+ for (Statement* const stmt : block->Statements())
{
if (compiler->doesMethodHaveFatPointer() && ContainsFatCalli(stmt))
{
@@ -754,8 +754,8 @@ class IndirectCallTransformer
CORINFO_CONTEXT_HANDLE context = inlineInfo->exactContextHnd;
const bool isLateDevirtualization = true;
const bool explicitTailCall = (call->AsCall()->gtCallMoreFlags & GTF_CALL_M_EXPLICIT_TAILCALL) != 0;
- compiler->impDevirtualizeCall(call, &methodHnd, &methodFlags, &context, nullptr, isLateDevirtualization,
- explicitTailCall);
+ compiler->impDevirtualizeCall(call, nullptr, &methodHnd, &methodFlags, &context, nullptr,
+ isLateDevirtualization, explicitTailCall);
// We know this call can devirtualize or we would not have set up GDV here.
// So impDevirtualizeCall should succeed in devirtualizing.
@@ -1012,7 +1012,7 @@ class IndirectCallTransformer
}
};
- for (Statement* nextStmt : remainderBlock->Statements())
+ for (Statement* const nextStmt : remainderBlock->Statements())
{
JITDUMP(" Scouting " FMT_STMT "\n", nextStmt->GetID());
@@ -1273,9 +1273,9 @@ void Compiler::CheckNoTransformableIndirectCallsRemain()
assert(!doesMethodHaveGuardedDevirtualization());
assert(!doesMethodHaveExpRuntimeLookup());
- for (BasicBlock* block = fgFirstBB; block != nullptr; block = block->bbNext)
+ for (BasicBlock* const block : Blocks())
{
- for (Statement* stmt : block->Statements())
+ for (Statement* const stmt : block->Statements())
{
fgWalkTreePre(stmt->GetRootNodePointer(), fgDebugCheckForTransformableIndirectCalls);
}
diff --git a/src/coreclr/jit/inline.cpp b/src/coreclr/jit/inline.cpp
index b2db02dcd25c63..4e86755491b8f0 100644
--- a/src/coreclr/jit/inline.cpp
+++ b/src/coreclr/jit/inline.cpp
@@ -466,6 +466,36 @@ void InlineContext::DumpData(unsigned indent)
}
}
+//------------------------------------------------------------------------
+// EscapeNameForXml: Cheap xml quoting for values. Only < and & are
+// troublemakers, but change > for symmetry.
+//
+// Arguments:
+// name - string to escape (modifies content)
+
+static void EscapeNameForXml(char* name)
+{
+ int i = 0;
+ while (name[i] != '\0')
+ {
+ switch (name[i])
+ {
+ case '<':
+ name[i] = '[';
+ break;
+ case '>':
+ name[i] = ']';
+ break;
+ case '&':
+ name[i] = '#';
+ break;
+ default:
+ break;
+ }
+ i++;
+ }
+}
+
//------------------------------------------------------------------------
// DumpXml: Dump an InlineContext entry and all descendants in xml format
//
@@ -499,6 +529,12 @@ void InlineContext::DumpXml(FILE* file, unsigned indent)
mdMethodDef calleeToken = compiler->info.compCompHnd->getMethodDefFromMethod(m_Callee);
unsigned calleeHash = compiler->compMethodHash(m_Callee);
const char* inlineReason = InlGetObservationString(m_Observation);
+ const char* name = compiler->eeGetMethodFullName(m_Callee);
+
+ char buf[1024];
+ strncpy(buf, name, sizeof(buf));
+ buf[sizeof(buf) - 1] = 0;
+ EscapeNameForXml(buf);
int offset = -1;
if (m_Offset != BAD_IL_OFFSET)
@@ -511,6 +547,17 @@ void InlineContext::DumpXml(FILE* file, unsigned indent)
fprintf(file, "%*s%08x\n", indent + 2, "", calleeHash);
fprintf(file, "%*s%u\n", indent + 2, "", offset);
fprintf(file, "%*s%s\n", indent + 2, "", inlineReason);
+ fprintf(file, "%*s%s\n", indent + 2, "", buf);
+ fprintf(file, "%*s%d\n", indent + 2, "", m_ILSize);
+ fprintf(file, "%*s%s\n", indent + 2, "", m_Devirtualized ? "True" : "False");
+ fprintf(file, "%*s%s\n", indent + 2, "", m_Guarded ? "True" : "False");
+ fprintf(file, "%*s%s\n", indent + 2, "", m_Unboxed ? "True" : "False");
+
+ // Ask InlinePolicy if it has anything to dump as well:
+ if ((m_Policy != nullptr) && (JitConfig.JitInlinePolicyDumpXml() != 0))
+ {
+ m_Policy->DumpXml(file, indent + 2);
+ }
// Optionally, dump data about the inline
const int dumpDataSetting = JitConfig.JitInlineDumpData();
@@ -1548,8 +1595,7 @@ void InlineStrategy::DumpXml(FILE* file, unsigned indent)
const Compiler::Info& info = m_Compiler->info;
const Compiler::Options& opts = m_Compiler->opts;
- const bool isPrejitRoot = opts.jitFlags->IsSet(JitFlags::JIT_FLAG_PREJIT);
- const bool isForceInline = (info.compFlags & CORINFO_FLG_FORCEINLINE) != 0;
+ const bool isPrejitRoot = opts.jitFlags->IsSet(JitFlags::JIT_FLAG_PREJIT);
// We'd really like the method identifier to be unique and
// durable across crossgen invocations. Not clear how to
@@ -1573,44 +1619,23 @@ void InlineStrategy::DumpXml(FILE* file, unsigned indent)
// Get method name just for root method, to make it a bit easier
// to search for things in the inline xml.
- const char* methodName = info.compCompHnd->getMethodName(info.compMethodHnd, nullptr);
+ const char* methodName = m_Compiler->eeGetMethodFullName(info.compMethodHnd);
- // Cheap xml quoting for values. Only < and & are troublemakers,
- // but change > for symmetry.
- //
- // Ok to truncate name, just ensure it's null terminated.
- char buf[64];
+ char buf[1024];
strncpy(buf, methodName, sizeof(buf));
buf[sizeof(buf) - 1] = 0;
-
- for (size_t i = 0; i < _countof(buf); i++)
- {
- switch (buf[i])
- {
- case '<':
- buf[i] = '[';
- break;
- case '>':
- buf[i] = ']';
- break;
- case '&':
- buf[i] = '#';
- break;
- default:
- break;
- }
- }
+ EscapeNameForXml(buf);
fprintf(file, "%*s\n", indent, "");
fprintf(file, "%*s%08x\n", indent + 2, "", currentMethodToken);
fprintf(file, "%*s%08x\n", indent + 2, "", hash);
- fprintf(file, "%*s%s\n", indent + 2, "", buf);
fprintf(file, "%*s%u\n", indent + 2, "", m_InlineCount);
fprintf(file, "%*s%u\n", indent + 2, "", info.compTotalHotCodeSize);
fprintf(file, "%*s%u\n", indent + 2, "", info.compTotalColdCodeSize);
fprintf(file, "%*s%u\n", indent + 2, "", microsecondsSpentJitting);
fprintf(file, "%*s%u\n", indent + 2, "", m_CurrentSizeEstimate / 10);
fprintf(file, "%*s%u\n", indent + 2, "", m_CurrentTimeEstimate);
+ fprintf(file, "%*s%s\n", indent + 2, "", buf);
// For prejit roots also propagate out the assessment of the root method
if (isPrejitRoot)
diff --git a/src/coreclr/jit/inline.def b/src/coreclr/jit/inline.def
index 3c3ef462a150f7..001cb415491928 100644
--- a/src/coreclr/jit/inline.def
+++ b/src/coreclr/jit/inline.def
@@ -68,10 +68,20 @@ INLINE_OBSERVATION(TOO_MUCH_IL, bool, "too many il bytes",
INLINE_OBSERVATION(ARG_FEEDS_CONSTANT_TEST, bool, "argument feeds constant test", INFORMATION, CALLEE)
INLINE_OBSERVATION(ARG_FEEDS_TEST, bool, "argument feeds test", INFORMATION, CALLEE)
+INLINE_OBSERVATION(ARG_FEEDS_CAST, int, "argument feeds castclass or isinst", INFORMATION, CALLEE)
INLINE_OBSERVATION(ARG_FEEDS_RANGE_CHECK, bool, "argument feeds range check", INFORMATION, CALLEE)
+INLINE_OBSERVATION(ARG_STRUCT, int, "arg is a struct passed by value", INFORMATION, CALLEE)
+INLINE_OBSERVATION(RETURNS_STRUCT, bool, "returns a struct by value", INFORMATION, CALLEE)
+INLINE_OBSERVATION(ARG_STRUCT_FIELD_ACCESS, int, "ldfld/stfld over arg (struct)", INFORMATION, CALLEE)
+INLINE_OBSERVATION(BINARY_EXRP_WITH_CNS, int, "'X op CNS' pattern", INFORMATION, CALLEE)
INLINE_OBSERVATION(BEGIN_OPCODE_SCAN, bool, "prepare to look at opcodes", INFORMATION, CALLEE)
INLINE_OBSERVATION(BELOW_ALWAYS_INLINE_SIZE, bool, "below ALWAYS_INLINE size", INFORMATION, CALLEE)
INLINE_OBSERVATION(CLASS_PROMOTABLE, bool, "promotable value class", INFORMATION, CALLEE)
+INLINE_OBSERVATION(CLASS_VALUETYPE, bool, "value class", INFORMATION, CALLEE)
+INLINE_OBSERVATION(FOLDABLE_BOX, int, "foldable box/unbox operation", INFORMATION, CALLEE)
+INLINE_OBSERVATION(INTRINSIC, int, "call marked as intrinsic", INFORMATION, CALLEE)
+INLINE_OBSERVATION(BACKWARD_JUMP, int, "backward jump", INFORMATION, CALLEE)
+INLINE_OBSERVATION(THROW_BLOCK, int, "throw block", INFORMATION, CALLEE)
INLINE_OBSERVATION(DOES_NOT_RETURN, bool, "does not return", INFORMATION, CALLEE)
INLINE_OBSERVATION(END_OPCODE_SCAN, bool, "done looking at opcodes", INFORMATION, CALLEE)
INLINE_OBSERVATION(HAS_GC_STRUCT, bool, "has gc field in struct local", INFORMATION, CALLEE)
@@ -162,12 +172,23 @@ INLINE_OBSERVATION(RARE_GC_STRUCT, bool, "rarely called, has gc str
// ------ Call Site Information -------
+INLINE_OBSERVATION(NONGENERIC_CALLS_GENERIC, bool, "callee is generic and caller is not", INFORMATION, CALLSITE)
+INLINE_OBSERVATION(ARG_EXACT_CLS, int, "arg is of an exact class", INFORMATION, CALLSITE)
+INLINE_OBSERVATION(ARG_EXACT_CLS_SIG_IS_NOT, int, "arg is more concrete than in sig.", INFORMATION, CALLSITE)
+INLINE_OBSERVATION(ARG_CONST, int, "arg is a constant", INFORMATION, CALLSITE)
+INLINE_OBSERVATION(ARG_BOXED, int, "arg is boxed at call site", INFORMATION, CALLSITE)
+INLINE_OBSERVATION(FOLDABLE_INTRINSIC, int, "foldable intrinsic", INFORMATION, CALLSITE)
+INLINE_OBSERVATION(FOLDABLE_EXPR, int, "foldable binary expression", INFORMATION, CALLSITE)
+INLINE_OBSERVATION(FOLDABLE_EXPR_UN, int, "foldable unary expression", INFORMATION, CALLSITE)
+INLINE_OBSERVATION(FOLDABLE_BRANCH, int, "foldable branch", INFORMATION, CALLSITE)
+INLINE_OBSERVATION(DIV_BY_CNS, int, "dividy by const", INFORMATION, CALLSITE)
INLINE_OBSERVATION(CONSTANT_ARG_FEEDS_TEST, bool, "constant argument feeds test", INFORMATION, CALLSITE)
INLINE_OBSERVATION(DEPTH, int, "depth", INFORMATION, CALLSITE)
INLINE_OBSERVATION(FREQUENCY, int, "rough call site frequency", INFORMATION, CALLSITE)
INLINE_OBSERVATION(HAS_PROFILE, bool, "profile data is available", INFORMATION, CALLSITE)
INLINE_OBSERVATION(IN_LOOP, bool, "call site is in a loop", INFORMATION, CALLSITE)
INLINE_OBSERVATION(IN_TRY_REGION, bool, "call site is in a try region", INFORMATION, CALLSITE)
+INLINE_OBSERVATION(IN_NORETURN_REGION, bool, "call site is in a no-return region", INFORMATION, CALLSITE)
INLINE_OBSERVATION(IS_PROFITABLE_INLINE, bool, "profitable inline", INFORMATION, CALLSITE)
INLINE_OBSERVATION(IS_SAME_THIS, bool, "same this as root caller", INFORMATION, CALLSITE)
INLINE_OBSERVATION(IS_SIZE_DECREASING_INLINE, bool, "size decreasing inline", INFORMATION, CALLSITE)
diff --git a/src/coreclr/jit/inline.h b/src/coreclr/jit/inline.h
index cfa8de36cabffe..f74f9e53109681 100644
--- a/src/coreclr/jit/inline.h
+++ b/src/coreclr/jit/inline.h
@@ -263,6 +263,10 @@ class InlinePolicy
virtual void DumpSchema(FILE* file) const
{
}
+ // Detailed data value dump as XML
+ virtual void DumpXml(FILE* file, unsigned indent = 0) const
+ {
+ }
// True if this is the inline targeted by data collection
bool IsDataCollectionTarget()
{
diff --git a/src/coreclr/jit/inlinepolicy.cpp b/src/coreclr/jit/inlinepolicy.cpp
index bd8b363e977f82..25cd23cf522f67 100644
--- a/src/coreclr/jit/inlinepolicy.cpp
+++ b/src/coreclr/jit/inlinepolicy.cpp
@@ -288,10 +288,98 @@ void DefaultPolicy::NoteBool(InlineObservation obs, bool value)
m_IsInstanceCtor = value;
break;
+ case InlineObservation::CALLEE_RETURNS_STRUCT:
+ m_ReturnsStructByValue = value;
+ break;
+
+ case InlineObservation::CALLEE_CLASS_VALUETYPE:
+ m_IsFromValueClass = value;
+ break;
+
+ case InlineObservation::CALLSITE_NONGENERIC_CALLS_GENERIC:
+ m_NonGenericCallsGeneric = value;
+ break;
+
case InlineObservation::CALLEE_CLASS_PROMOTABLE:
m_IsFromPromotableValueClass = value;
break;
+ case InlineObservation::CALLEE_BINARY_EXRP_WITH_CNS:
+ m_BinaryExprWithCns++;
+ break;
+
+ case InlineObservation::CALLEE_ARG_STRUCT:
+ m_ArgIsStructByValue++;
+ break;
+
+ case InlineObservation::CALLEE_ARG_STRUCT_FIELD_ACCESS:
+ m_FldAccessOverArgStruct++;
+ break;
+
+ case InlineObservation::CALLEE_ARG_FEEDS_CAST:
+ m_ArgCasted++;
+ break;
+
+ case InlineObservation::CALLEE_FOLDABLE_BOX:
+ m_FoldableBox++;
+ break;
+
+ case InlineObservation::CALLEE_INTRINSIC:
+ m_Intrinsic++;
+ break;
+
+ case InlineObservation::CALLEE_BACKWARD_JUMP:
+ m_BackwardJump++;
+ break;
+
+ case InlineObservation::CALLEE_THROW_BLOCK:
+ m_ThrowBlock++;
+ break;
+
+ case InlineObservation::CALLSITE_ARG_EXACT_CLS:
+ m_ArgIsExactCls++;
+ break;
+
+ case InlineObservation::CALLSITE_ARG_BOXED:
+ m_ArgIsBoxedAtCallsite++;
+ break;
+
+ case InlineObservation::CALLSITE_ARG_CONST:
+ m_ArgIsConst++;
+ break;
+
+ case InlineObservation::CALLSITE_ARG_EXACT_CLS_SIG_IS_NOT:
+ m_ArgIsExactClsSigIsNot++;
+ break;
+
+ case InlineObservation::CALLSITE_FOLDABLE_INTRINSIC:
+ m_FoldableIntrinsic++;
+ break;
+
+ case InlineObservation::CALLSITE_FOLDABLE_EXPR:
+ m_FoldableExpr++;
+ break;
+
+ case InlineObservation::CALLSITE_FOLDABLE_EXPR_UN:
+ m_FoldableExprUn++;
+ break;
+
+ case InlineObservation::CALLSITE_FOLDABLE_BRANCH:
+ m_FoldableBranch++;
+ break;
+
+ case InlineObservation::CALLSITE_DIV_BY_CNS:
+ m_DivByCns++;
+ break;
+
+ case InlineObservation::CALLSITE_HAS_PROFILE:
+ m_HasProfile = value;
+ break;
+
+ case InlineObservation::CALLSITE_IN_TRY_REGION:
+ m_CallsiteIsInTryRegion = value;
+ break;
+
case InlineObservation::CALLEE_HAS_SIMD:
m_HasSimd = value;
break;
@@ -389,8 +477,8 @@ void DefaultPolicy::NoteBool(InlineObservation obs, bool value)
break;
}
- case InlineObservation::CALLSITE_IN_TRY_REGION:
- m_CallsiteIsInTryRegion = true;
+ case InlineObservation::CALLSITE_IN_NORETURN_REGION:
+ m_IsCallsiteInNoReturnRegion = value;
break;
case InlineObservation::CALLSITE_IN_LOOP:
@@ -449,6 +537,85 @@ void DefaultPolicy::NoteBool(InlineObservation obs, bool value)
}
}
+#if defined(DEBUG) || defined(INLINE_DATA)
+//------------------------------------------------------------------------
+// DumpXml: Dump DefaultPolicy data as XML
+//
+// Arguments:
+// file - stream to output to
+// indent - indent level
+
+void DefaultPolicy::DumpXml(FILE* file, unsigned indent) const
+{
+ fprintf(file, "%*s 0.01) \
+ { \
+ fprintf(file, " " #x "=\"%.2lf\"", x); \
+ }
+#define XATTR_B(x) \
+ if (x) \
+ { \
+ fprintf(file, " " #x "=\"True\""); \
+ }
+
+ XATTR_R8(m_Multiplier);
+ XATTR_R8(m_ProfileFrequency);
+ XATTR_I4(m_CodeSize);
+ XATTR_I4(m_CallsiteFrequency);
+ XATTR_I4(m_CallsiteDepth);
+ XATTR_I4(m_InstructionCount);
+ XATTR_I4(m_LoadStoreCount);
+ XATTR_I4(m_ArgFeedsTest);
+ XATTR_I4(m_ArgFeedsConstantTest);
+ XATTR_I4(m_ArgFeedsRangeCheck);
+ XATTR_I4(m_ConstantArgFeedsConstantTest);
+ XATTR_I4(m_BinaryExprWithCns);
+ XATTR_I4(m_ArgCasted);
+ XATTR_I4(m_ArgIsStructByValue);
+ XATTR_I4(m_FldAccessOverArgStruct);
+ XATTR_I4(m_FoldableBox);
+ XATTR_I4(m_Intrinsic);
+ XATTR_I4(m_BackwardJump);
+ XATTR_I4(m_ThrowBlock);
+ XATTR_I4(m_ArgIsExactCls);
+ XATTR_I4(m_ArgIsExactClsSigIsNot);
+ XATTR_I4(m_ArgIsConst);
+ XATTR_I4(m_ArgIsBoxedAtCallsite);
+ XATTR_I4(m_FoldableIntrinsic);
+ XATTR_I4(m_FoldableExpr);
+ XATTR_I4(m_FoldableExprUn);
+ XATTR_I4(m_FoldableBranch);
+ XATTR_I4(m_DivByCns);
+ XATTR_I4(m_CalleeNativeSizeEstimate);
+ XATTR_I4(m_CallsiteNativeSizeEstimate);
+ XATTR_B(m_IsForceInline);
+ XATTR_B(m_IsForceInlineKnown);
+ XATTR_B(m_IsInstanceCtor);
+ XATTR_B(m_IsFromPromotableValueClass);
+ XATTR_B(m_HasSimd);
+ XATTR_B(m_LooksLikeWrapperMethod);
+ XATTR_B(m_MethodIsMostlyLoadStore);
+ XATTR_B(m_CallsiteIsInTryRegion);
+ XATTR_B(m_CallsiteIsInLoop);
+ XATTR_B(m_IsNoReturn);
+ XATTR_B(m_IsNoReturnKnown);
+ XATTR_B(m_ReturnsStructByValue);
+ XATTR_B(m_IsFromValueClass);
+ XATTR_B(m_NonGenericCallsGeneric);
+ XATTR_B(m_IsCallsiteInNoReturnRegion);
+ XATTR_B(m_HasProfile);
+ fprintf(file, " />\n");
+}
+#endif
+
//------------------------------------------------------------------------
// BudgetCheck: see if this inline would exceed the current budget
//
@@ -654,9 +821,8 @@ void DefaultPolicy::NoteInt(InlineObservation obs, int value)
void DefaultPolicy::NoteDouble(InlineObservation obs, double value)
{
- // By default, ignore this observation.
- //
assert(obs == InlineObservation::CALLSITE_PROFILE_FREQUENCY);
+ m_ProfileFrequency = value;
}
//------------------------------------------------------------------------
@@ -761,6 +927,174 @@ double DefaultPolicy::DetermineMultiplier()
break;
}
+ if (m_ReturnsStructByValue)
+ {
+ // For structs-passed-by-value we might avoid expensive copy operations if we inline
+ JITDUMP("\nInline candidate returns a struct by value.");
+ }
+
+ if (m_ArgIsStructByValue > 0)
+ {
+ // Same here
+ JITDUMP("\n%d arguments are structs passed by value.", m_ArgIsStructByValue);
+ }
+
+ if (m_NonGenericCallsGeneric)
+ {
+ // Especially, if such a callee has many foldable branches like 'typeof(T) == typeof(T2)'
+ JITDUMP("\nInline candidate is generic and caller is not.");
+ }
+
+ if (m_IsCallsiteInNoReturnRegion)
+ {
+ // E.g.
+ //
+ // throw new ArgumentException(SR.GetMessage());
+ //
+ // ^ Here we have two calls inside a BBJ_THROW block
+ // Unfortunately, we're not able to detect ThrowHelpers calls yet.
+ JITDUMP("\nCallsite is in a no-return region.");
+ }
+
+ if (m_FoldableBranch > 0)
+ {
+ // Examples:
+ //
+ // if (typeof(T) == typeof(int)) {
+ // if (Avx2.IsSupported) {
+ // if (arg0 / 10 > 100) { // where arg0 is a constant at the callsite
+ // if (Math.Abs(arg0) > 10) { // same here
+ // etc.
+ //
+ JITDUMP("\nInline candidate has %d foldable branches.", m_FoldableBranch);
+ }
+
+ if (m_ArgCasted > 0)
+ {
+ JITDUMP("\nArgument feeds ISINST/CASTCLASS %d times.", m_ArgCasted);
+ }
+
+ if (m_FldAccessOverArgStruct > 0)
+ {
+ // Such ldfld/stfld are cheap for promotable structs
+ JITDUMP("\n%d ldfld or stfld over arguments which are structs", m_ArgIsStructByValue);
+ }
+
+ if (m_FoldableBox > 0)
+ {
+ // We met some BOX+ISINST+BR or BOX+UNBOX patterns (see impBoxPatternMatch).
+ // Especially useful with m_IsGenericFromNonGeneric
+ JITDUMP("\nInline has %d foldable BOX ops.", m_FoldableBox);
+ }
+
+ if (m_Intrinsic > 0)
+ {
+ // In most cases such intrinsics are lowered as single CPU instructions
+ JITDUMP("\nInline has %d intrinsics.", m_Intrinsic);
+ }
+
+ if (m_BinaryExprWithCns > 0)
+ {
+ // In some cases we're not able to detect potentially foldable expressions, e.g.:
+ //
+ // ldc.i4.0
+ // call int SomeFoldableNonIntrinsicCall
+ // ceq
+ //
+ // so at least we can note potential constant tests
+ JITDUMP("\nInline candidate has %d binary expressions with constants.", m_BinaryExprWithCns);
+ }
+
+ if (m_ThrowBlock > 0)
+ {
+ // 'throw' opcode and its friends (Exception's ctor, its exception message, etc) significantly increase
+ // NativeSizeEstimate. However, such basic-blocks won't hurt us since they are always moved to
+ // the end of the functions and don't impact Register Allocations.
+ // NOTE: Unfortunately, we're not able to recognize ThrowHelper calls here yet.
+ JITDUMP("\nInline has %d throw blocks.", m_ThrowBlock);
+ }
+
+ if (m_ArgIsBoxedAtCallsite > 0)
+ {
+ // Callsite is going to box n arguments. We might avoid boxing after inlining.
+ // Example:
+ //
+ // void DoNothing(object o) {} // o is unused, so the boxing is redundant
+ //
+ // void Caller() => DoNothing(42); // 42 is going to be boxed at the call site.
+ //
+ JITDUMP("\nCallsite is going to box %d arguments.", m_ArgIsBoxedAtCallsite);
+ }
+
+ if (m_ArgIsExactClsSigIsNot > 0)
+ {
+ // If we inline such a callee - we'll be able to devirtualize all the calls for such arguments
+ // Example:
+ //
+ // int Callee(object o) => o.GetHashCode(); // virtual call
+ //
+ // int Caller(string s) => Callee(s); // String is 'exact' (sealed)
+ //
+ JITDUMP("\nCallsite passes %d arguments of exact classes while callee accepts non-exact ones.",
+ m_ArgIsExactClsSigIsNot);
+ }
+
+ if (m_ArgIsExactCls > 0)
+ {
+ JITDUMP("\nCallsite passes %d arguments of exact classes.", m_ArgIsExactCls);
+ }
+
+ if (m_ArgIsConst > 0)
+ {
+ // Normally, we try to note all the places where constant arguments lead to folding/feed tests
+ // but just in case:
+ JITDUMP("\n%d arguments are constants at the callsite.", m_ArgIsConst);
+ }
+
+ if (m_FoldableIntrinsic > 0)
+ {
+ // Examples:
+ //
+ // typeof(T1) == typeof(T2)
+ // Math.Abs(constArg)
+ // BitOperation.PopCount(10)
+ JITDUMP("\nInline has %d foldable intrinsics.", m_FoldableIntrinsic);
+ }
+
+ if (m_FoldableExpr > 0)
+ {
+ // E.g. add/mul/ceq, etc. over constant/constant arguments
+ JITDUMP("\nInline has %d foldable binary expressions.", m_FoldableExpr);
+ }
+
+ if (m_FoldableExprUn > 0)
+ {
+ // E.g. casts, negations, etc. over constants/constant arguments
+ JITDUMP("\nInline has %d foldable unary expressions.", m_FoldableExprUn);
+ }
+
+ if (m_DivByCns > 0)
+ {
+ // E.g. callee has "x / arg0" where arg0 is a const at the call site -
+ // we'll avoid a very expensive DIV instruction after inlining.
+ JITDUMP("\nInline has %d Div-by-constArg expressions.", m_DivByCns);
+ }
+
+ if (m_BackwardJump)
+ {
+ const bool callSiteIsInLoop = m_CallsiteFrequency == InlineCallsiteFrequency::LOOP;
+ JITDUMP("\nInline has %d backward jumps (loops?).", m_BackwardJump);
+ if (callSiteIsInLoop)
+ {
+ JITDUMP(" And is inlined into a loop.")
+ }
+ }
+
+ if (m_HasProfile)
+ {
+ JITDUMP("\nCallsite has profile data: %g.", m_ProfileFrequency);
+ }
+
#ifdef DEBUG
int additionalMultiplier = JitConfig.JitInlineAdditionalMultiplier();
@@ -2028,6 +2362,28 @@ void DiscretionaryPolicy::DumpData(FILE* file) const
fprintf(file, ",%u", m_IsNoReturn ? 1 : 0);
fprintf(file, ",%u", m_CalleeHasGCStruct ? 1 : 0);
fprintf(file, ",%u", m_CallsiteDepth);
+ fprintf(file, ",%u", m_BinaryExprWithCns);
+ fprintf(file, ",%u", m_ArgCasted);
+ fprintf(file, ",%u", m_ArgIsStructByValue);
+ fprintf(file, ",%u", m_FldAccessOverArgStruct);
+ fprintf(file, ",%u", m_FoldableBox);
+ fprintf(file, ",%u", m_Intrinsic);
+ fprintf(file, ",%u", m_BackwardJump);
+ fprintf(file, ",%u", m_ThrowBlock);
+ fprintf(file, ",%u", m_ArgIsExactCls);
+ fprintf(file, ",%u", m_ArgIsExactClsSigIsNot);
+ fprintf(file, ",%u", m_ArgIsConst);
+ fprintf(file, ",%u", m_ArgIsBoxedAtCallsite);
+ fprintf(file, ",%u", m_FoldableIntrinsic);
+ fprintf(file, ",%u", m_FoldableExpr);
+ fprintf(file, ",%u", m_FoldableExprUn);
+ fprintf(file, ",%u", m_FoldableBranch);
+ fprintf(file, ",%u", m_DivByCns);
+ fprintf(file, ",%u", m_ReturnsStructByValue ? 1 : 0);
+ fprintf(file, ",%u", m_IsFromValueClass ? 1 : 0);
+ fprintf(file, ",%u", m_NonGenericCallsGeneric ? 1 : 0);
+ fprintf(file, ",%u", m_IsCallsiteInNoReturnRegion ? 1 : 0);
+ fprintf(file, ",%u", m_HasProfile ? 1 : 0);
}
#endif // defined(DEBUG) || defined(INLINE_DATA)
diff --git a/src/coreclr/jit/inlinepolicy.h b/src/coreclr/jit/inlinepolicy.h
index 3e0d3440a8e851..9c107f480c241a 100644
--- a/src/coreclr/jit/inlinepolicy.h
+++ b/src/coreclr/jit/inlinepolicy.h
@@ -87,6 +87,7 @@ class DefaultPolicy : public LegalPolicy
, m_RootCompiler(compiler)
, m_StateMachine(nullptr)
, m_Multiplier(0.0)
+ , m_ProfileFrequency(0.0)
, m_CodeSize(0)
, m_CallsiteFrequency(InlineCallsiteFrequency::UNUSED)
, m_CallsiteDepth(0)
@@ -96,6 +97,23 @@ class DefaultPolicy : public LegalPolicy
, m_ArgFeedsConstantTest(0)
, m_ArgFeedsRangeCheck(0)
, m_ConstantArgFeedsConstantTest(0)
+ , m_BinaryExprWithCns(0)
+ , m_ArgCasted(0)
+ , m_ArgIsStructByValue(0)
+ , m_FldAccessOverArgStruct(0)
+ , m_FoldableBox(0)
+ , m_Intrinsic(0)
+ , m_BackwardJump(0)
+ , m_ThrowBlock(0)
+ , m_ArgIsExactCls(0)
+ , m_ArgIsExactClsSigIsNot(0)
+ , m_ArgIsConst(0)
+ , m_ArgIsBoxedAtCallsite(0)
+ , m_FoldableIntrinsic(0)
+ , m_FoldableExpr(0)
+ , m_FoldableExprUn(0)
+ , m_FoldableBranch(0)
+ , m_DivByCns(0)
, m_CalleeNativeSizeEstimate(0)
, m_CallsiteNativeSizeEstimate(0)
, m_IsForceInline(false)
@@ -109,6 +127,11 @@ class DefaultPolicy : public LegalPolicy
, m_CallsiteIsInLoop(false)
, m_IsNoReturn(false)
, m_IsNoReturnKnown(false)
+ , m_ReturnsStructByValue(false)
+ , m_IsFromValueClass(false)
+ , m_NonGenericCallsGeneric(false)
+ , m_IsCallsiteInNoReturnRegion(false)
+ , m_HasProfile(false)
{
// empty
}
@@ -136,6 +159,8 @@ class DefaultPolicy : public LegalPolicy
return "DefaultPolicy";
}
+ void DumpXml(FILE* file, unsigned indent = 0) const override;
+
#endif // (DEBUG) || defined(INLINE_DATA)
protected:
@@ -155,6 +180,7 @@ class DefaultPolicy : public LegalPolicy
Compiler* m_RootCompiler; // root compiler instance
CodeSeqSM* m_StateMachine;
double m_Multiplier;
+ double m_ProfileFrequency;
unsigned m_CodeSize;
InlineCallsiteFrequency m_CallsiteFrequency;
unsigned m_CallsiteDepth;
@@ -164,6 +190,23 @@ class DefaultPolicy : public LegalPolicy
unsigned m_ArgFeedsConstantTest;
unsigned m_ArgFeedsRangeCheck;
unsigned m_ConstantArgFeedsConstantTest;
+ unsigned m_BinaryExprWithCns;
+ unsigned m_ArgCasted;
+ unsigned m_ArgIsStructByValue;
+ unsigned m_FldAccessOverArgStruct;
+ unsigned m_FoldableBox;
+ unsigned m_Intrinsic;
+ unsigned m_BackwardJump;
+ unsigned m_ThrowBlock;
+ unsigned m_ArgIsExactCls;
+ unsigned m_ArgIsExactClsSigIsNot;
+ unsigned m_ArgIsConst;
+ unsigned m_ArgIsBoxedAtCallsite;
+ unsigned m_FoldableIntrinsic;
+ unsigned m_FoldableExpr;
+ unsigned m_FoldableExprUn;
+ unsigned m_FoldableBranch;
+ unsigned m_DivByCns;
int m_CalleeNativeSizeEstimate;
int m_CallsiteNativeSizeEstimate;
bool m_IsForceInline : 1;
@@ -177,6 +220,11 @@ class DefaultPolicy : public LegalPolicy
bool m_CallsiteIsInLoop : 1;
bool m_IsNoReturn : 1;
bool m_IsNoReturnKnown : 1;
+ bool m_ReturnsStructByValue : 1;
+ bool m_IsFromValueClass : 1;
+ bool m_NonGenericCallsGeneric : 1;
+ bool m_IsCallsiteInNoReturnRegion : 1;
+ bool m_HasProfile : 1;
};
// DiscretionaryPolicy is a variant of the default policy. It
diff --git a/src/coreclr/jit/instr.h b/src/coreclr/jit/instr.h
index ed001fdc1bc722..d9e2b9319ee4c2 100644
--- a/src/coreclr/jit/instr.h
+++ b/src/coreclr/jit/instr.h
@@ -86,18 +86,56 @@ enum GCtype : unsigned
};
#if defined(TARGET_XARCH)
-enum insFlags: uint8_t
+
+enum insFlags : uint32_t
{
- INS_FLAGS_None = 0x00,
- INS_FLAGS_ReadsFlags = 0x01,
- INS_FLAGS_WritesFlags = 0x02,
- INS_FLAGS_x87Instr = 0x04,
- INS_Flags_IsDstDstSrcAVXInstruction = 0x08,
- INS_Flags_IsDstSrcSrcAVXInstruction = 0x10,
+ INS_FLAGS_None = 0,
+
+ // Reads
+ Reads_OF = 1 << 0,
+ Reads_SF = 1 << 1,
+ Reads_ZF = 1 << 2,
+ Reads_PF = 1 << 3,
+ Reads_CF = 1 << 4,
+ Reads_DF = 1 << 5,
+
+ // Writes
+ Writes_OF = 1 << 6,
+ Writes_SF = 1 << 7,
+ Writes_ZF = 1 << 8,
+ Writes_AF = 1 << 9,
+ Writes_PF = 1 << 10,
+ Writes_CF = 1 << 11,
+
+ // Resets
+ Resets_OF = 1 << 12,
+ Resets_SF = 1 << 13,
+ Resets_AF = 1 << 14,
+ Resets_PF = 1 << 15,
+ Resets_CF = 1 << 16,
+
+ // Undefined
+ Undefined_OF = 1 << 17,
+ Undefined_SF = 1 << 18,
+ Undefined_ZF = 1 << 19,
+ Undefined_AF = 1 << 20,
+ Undefined_PF = 1 << 21,
+ Undefined_CF = 1 << 22,
+
+ // Restore
+ Restore_SF_ZF_AF_PF_CF = 1 << 23,
+
+ // x87 instruction
+ INS_FLAGS_x87Instr = 1 << 24,
+
+ // Avx
+ INS_Flags_IsDstDstSrcAVXInstruction = 1 << 25,
+ INS_Flags_IsDstSrcSrcAVXInstruction = 1 << 26,
// TODO-Cleanup: Remove this flag and its usage from TARGET_XARCH
INS_FLAGS_DONT_CARE = 0x00,
};
+
#elif defined(TARGET_ARM) || defined(TARGET_ARM64)
// TODO-Cleanup: Move 'insFlags' under TARGET_ARM
enum insFlags: unsigned
diff --git a/src/coreclr/jit/instrsxarch.h b/src/coreclr/jit/instrsxarch.h
index 750f1b215036b1..262e0c052fc6a6 100644
--- a/src/coreclr/jit/instrsxarch.h
+++ b/src/coreclr/jit/instrsxarch.h
@@ -51,69 +51,71 @@
// id nm um mr mi rm a4 rr flags
INST5(invalid, "INVALID", IUM_RD, BAD_CODE, BAD_CODE, BAD_CODE, BAD_CODE, BAD_CODE, INS_FLAGS_None)
-INST5(push, "push", IUM_RD, 0x0030FE, 0x000068, BAD_CODE, BAD_CODE, 0x000050, INS_FLAGS_None)
-INST5(pop, "pop", IUM_WR, 0x00008E, BAD_CODE, BAD_CODE, BAD_CODE, 0x000058, INS_FLAGS_None)
+INST5(push, "push", IUM_RD, 0x0030FE, 0x000068, BAD_CODE, BAD_CODE, 0x000050, INS_FLAGS_None )
+INST5(pop, "pop", IUM_WR, 0x00008E, BAD_CODE, BAD_CODE, BAD_CODE, 0x000058, INS_FLAGS_None )
// Does not affect the stack tracking in the emitter
-INST5(push_hide, "push", IUM_RD, 0x0030FE, 0x000068, BAD_CODE, BAD_CODE, 0x000050, INS_FLAGS_None)
-INST5(pop_hide, "pop", IUM_WR, 0x00008E, BAD_CODE, BAD_CODE, BAD_CODE, 0x000058, INS_FLAGS_None)
+INST5(push_hide, "push", IUM_RD, 0x0030FE, 0x000068, BAD_CODE, BAD_CODE, 0x000050, INS_FLAGS_None )
+INST5(pop_hide, "pop", IUM_WR, 0x00008E, BAD_CODE, BAD_CODE, BAD_CODE, 0x000058, INS_FLAGS_None )
-INST5(inc, "inc", IUM_RW, 0x0000FE, BAD_CODE, BAD_CODE, BAD_CODE, 0x000040, INS_FLAGS_WritesFlags)
-INST5(inc_l, "inc", IUM_RW, 0x0000FE, BAD_CODE, BAD_CODE, BAD_CODE, 0x00C0FE, INS_FLAGS_WritesFlags)
-INST5(dec, "dec", IUM_RW, 0x0008FE, BAD_CODE, BAD_CODE, BAD_CODE, 0x000048, INS_FLAGS_WritesFlags)
-INST5(dec_l, "dec", IUM_RW, 0x0008FE, BAD_CODE, BAD_CODE, BAD_CODE, 0x00C8FE, INS_FLAGS_WritesFlags)
+INST5(inc, "inc", IUM_RW, 0x0000FE, BAD_CODE, BAD_CODE, BAD_CODE, 0x000040, Writes_OF | Writes_SF | Writes_ZF | Writes_AF | Writes_PF )
+INST5(inc_l, "inc", IUM_RW, 0x0000FE, BAD_CODE, BAD_CODE, BAD_CODE, 0x00C0FE, Writes_OF | Writes_SF | Writes_ZF | Writes_AF | Writes_PF )
+INST5(dec, "dec", IUM_RW, 0x0008FE, BAD_CODE, BAD_CODE, BAD_CODE, 0x000048, Writes_OF | Writes_SF | Writes_ZF | Writes_AF | Writes_PF )
+INST5(dec_l, "dec", IUM_RW, 0x0008FE, BAD_CODE, BAD_CODE, BAD_CODE, 0x00C8FE, Writes_OF | Writes_SF | Writes_ZF | Writes_AF | Writes_PF )
// Multi-byte opcodes without modrm are represented in mixed endian fashion.
// See comment around quarter way through this file for more information.
-INST5(bswap, "bswap", IUM_RW, 0x0F00C8, BAD_CODE, BAD_CODE, BAD_CODE, 0x00C80F, INS_FLAGS_None)
+INST5(bswap, "bswap", IUM_RW, 0x0F00C8, BAD_CODE, BAD_CODE, BAD_CODE, 0x00C80F, INS_FLAGS_None )
// id nm um mr mi rm a4 flags
-INST4(add, "add", IUM_RW, 0x000000, 0x000080, 0x000002, 0x000004, INS_FLAGS_WritesFlags)
-INST4(or, "or", IUM_RW, 0x000008, 0x000880, 0x00000A, 0x00000C, INS_FLAGS_WritesFlags)
-INST4(adc, "adc", IUM_RW, 0x000010, 0x001080, 0x000012, 0x000014, INS_FLAGS_ReadsFlags | INS_FLAGS_WritesFlags)
-INST4(sbb, "sbb", IUM_RW, 0x000018, 0x001880, 0x00001A, 0x00001C, INS_FLAGS_ReadsFlags | INS_FLAGS_WritesFlags)
-INST4(and, "and", IUM_RW, 0x000020, 0x002080, 0x000022, 0x000024, INS_FLAGS_WritesFlags)
-INST4(sub, "sub", IUM_RW, 0x000028, 0x002880, 0x00002A, 0x00002C, INS_FLAGS_WritesFlags)
-INST4(xor, "xor", IUM_RW, 0x000030, 0x003080, 0x000032, 0x000034, INS_FLAGS_WritesFlags)
-INST4(cmp, "cmp", IUM_RD, 0x000038, 0x003880, 0x00003A, 0x00003C, INS_FLAGS_WritesFlags)
-INST4(test, "test", IUM_RD, 0x000084, 0x0000F6, 0x000084, 0x0000A8, INS_FLAGS_WritesFlags)
-INST4(mov, "mov", IUM_WR, 0x000088, 0x0000C6, 0x00008A, 0x0000B0, INS_FLAGS_None)
-
-INST4(lea, "lea", IUM_WR, BAD_CODE, BAD_CODE, 0x00008D, BAD_CODE, INS_FLAGS_None)
+INST4(add, "add", IUM_RW, 0x000000, 0x000080, 0x000002, 0x000004, Writes_OF | Writes_SF | Writes_ZF | Writes_AF | Writes_PF | Writes_CF )
+INST4(or, "or", IUM_RW, 0x000008, 0x000880, 0x00000A, 0x00000C, Resets_OF | Writes_SF | Writes_ZF | Undefined_AF | Writes_PF | Resets_CF )
+INST4(adc, "adc", IUM_RW, 0x000010, 0x001080, 0x000012, 0x000014, Writes_OF | Writes_SF | Writes_ZF | Writes_AF | Writes_PF | Writes_CF
+ | Reads_CF )
+INST4(sbb, "sbb", IUM_RW, 0x000018, 0x001880, 0x00001A, 0x00001C, Writes_OF | Writes_SF | Writes_ZF | Writes_AF | Writes_PF | Writes_CF
+ | Reads_CF )
+INST4(and, "and", IUM_RW, 0x000020, 0x002080, 0x000022, 0x000024, Resets_OF | Writes_SF | Writes_ZF | Undefined_AF | Writes_PF | Resets_CF )
+INST4(sub, "sub", IUM_RW, 0x000028, 0x002880, 0x00002A, 0x00002C, Writes_OF | Writes_SF | Writes_ZF | Writes_AF | Writes_PF | Writes_CF )
+INST4(xor, "xor", IUM_RW, 0x000030, 0x003080, 0x000032, 0x000034, Resets_OF | Writes_SF | Writes_ZF | Undefined_AF | Writes_PF | Resets_CF )
+INST4(cmp, "cmp", IUM_RD, 0x000038, 0x003880, 0x00003A, 0x00003C, Writes_OF | Writes_SF | Writes_ZF | Writes_AF | Writes_PF | Writes_CF )
+INST4(test, "test", IUM_RD, 0x000084, 0x0000F6, 0x000084, 0x0000A8, Resets_OF | Writes_SF | Writes_ZF | Undefined_AF | Writes_PF | Resets_CF )
+INST4(mov, "mov", IUM_WR, 0x000088, 0x0000C6, 0x00008A, 0x0000B0, INS_FLAGS_None )
+
+INST4(lea, "lea", IUM_WR, BAD_CODE, BAD_CODE, 0x00008D, BAD_CODE, INS_FLAGS_None )
// id nm um mr mi rm flags
// Note that emitter has only partial support for BT. It can only emit the reg,reg form
// and the registers need to be reversed to get the correct encoding.
-INST3(bt, "bt", IUM_RD, 0x0F00A3, BAD_CODE, 0x0F00A3, INS_FLAGS_WritesFlags)
+INST3(bt, "bt", IUM_RD, 0x0F00A3, BAD_CODE, 0x0F00A3, Undefined_OF | Undefined_SF | Undefined_ZF | Undefined_AF | Undefined_PF | Writes_CF )
-INST3(bsf, "bsf", IUM_WR, BAD_CODE, BAD_CODE, 0x0F00BC, INS_FLAGS_WritesFlags)
-INST3(bsr, "bsr", IUM_WR, BAD_CODE, BAD_CODE, 0x0F00BD, INS_FLAGS_WritesFlags)
+INST3(bsf, "bsf", IUM_WR, BAD_CODE, BAD_CODE, 0x0F00BC, Undefined_OF | Undefined_SF | Writes_ZF | Undefined_AF | Undefined_PF | Undefined_CF )
+INST3(bsr, "bsr", IUM_WR, BAD_CODE, BAD_CODE, 0x0F00BD, Undefined_OF | Undefined_SF | Writes_ZF | Undefined_AF | Undefined_PF | Undefined_CF )
-INST3(movsx, "movsx", IUM_WR, BAD_CODE, BAD_CODE, 0x0F00BE, INS_FLAGS_None)
+INST3(movsx, "movsx", IUM_WR, BAD_CODE, BAD_CODE, 0x0F00BE, INS_FLAGS_None )
#ifdef TARGET_AMD64
-INST3(movsxd, "movsxd", IUM_WR, BAD_CODE, BAD_CODE, 0x4800000063, INS_FLAGS_None)
+INST3(movsxd, "movsxd", IUM_WR, BAD_CODE, BAD_CODE, 0x4800000063, INS_FLAGS_None )
#endif
-INST3(movzx, "movzx", IUM_WR, BAD_CODE, BAD_CODE, 0x0F00B6, INS_FLAGS_None)
-
-INST3(cmovo, "cmovo", IUM_WR, BAD_CODE, BAD_CODE, 0x0F0040, INS_FLAGS_ReadsFlags)
-INST3(cmovno, "cmovno", IUM_WR, BAD_CODE, BAD_CODE, 0x0F0041, INS_FLAGS_ReadsFlags)
-INST3(cmovb, "cmovb", IUM_WR, BAD_CODE, BAD_CODE, 0x0F0042, INS_FLAGS_ReadsFlags)
-INST3(cmovae, "cmovae", IUM_WR, BAD_CODE, BAD_CODE, 0x0F0043, INS_FLAGS_ReadsFlags)
-INST3(cmove, "cmove", IUM_WR, BAD_CODE, BAD_CODE, 0x0F0044, INS_FLAGS_ReadsFlags)
-INST3(cmovne, "cmovne", IUM_WR, BAD_CODE, BAD_CODE, 0x0F0045, INS_FLAGS_ReadsFlags)
-INST3(cmovbe, "cmovbe", IUM_WR, BAD_CODE, BAD_CODE, 0x0F0046, INS_FLAGS_ReadsFlags)
-INST3(cmova, "cmova", IUM_WR, BAD_CODE, BAD_CODE, 0x0F0047, INS_FLAGS_ReadsFlags)
-INST3(cmovs, "cmovs", IUM_WR, BAD_CODE, BAD_CODE, 0x0F0048, INS_FLAGS_ReadsFlags)
-INST3(cmovns, "cmovns", IUM_WR, BAD_CODE, BAD_CODE, 0x0F0049, INS_FLAGS_ReadsFlags)
-INST3(cmovp, "cmovp", IUM_WR, BAD_CODE, BAD_CODE, 0x0F004A, INS_FLAGS_ReadsFlags)
-INST3(cmovnp, "cmovnp", IUM_WR, BAD_CODE, BAD_CODE, 0x0F004B, INS_FLAGS_ReadsFlags)
-INST3(cmovl, "cmovl", IUM_WR, BAD_CODE, BAD_CODE, 0x0F004C, INS_FLAGS_ReadsFlags)
-INST3(cmovge, "cmovge", IUM_WR, BAD_CODE, BAD_CODE, 0x0F004D, INS_FLAGS_ReadsFlags)
-INST3(cmovle, "cmovle", IUM_WR, BAD_CODE, BAD_CODE, 0x0F004E, INS_FLAGS_ReadsFlags)
-INST3(cmovg, "cmovg", IUM_WR, BAD_CODE, BAD_CODE, 0x0F004F, INS_FLAGS_ReadsFlags)
-
-INST3(xchg, "xchg", IUM_RW, 0x000086, BAD_CODE, 0x000086, INS_FLAGS_None)
-INST3(imul, "imul", IUM_RW, 0x0F00AC, BAD_CODE, 0x0F00AF, INS_FLAGS_WritesFlags) // op1 *= op2
+INST3(movzx, "movzx", IUM_WR, BAD_CODE, BAD_CODE, 0x0F00B6, INS_FLAGS_None )
+
+INST3(cmovo, "cmovo", IUM_WR, BAD_CODE, BAD_CODE, 0x0F0040, Reads_OF )
+INST3(cmovno, "cmovno", IUM_WR, BAD_CODE, BAD_CODE, 0x0F0041, Reads_OF )
+INST3(cmovb, "cmovb", IUM_WR, BAD_CODE, BAD_CODE, 0x0F0042, Reads_CF )
+INST3(cmovae, "cmovae", IUM_WR, BAD_CODE, BAD_CODE, 0x0F0043, Reads_CF )
+INST3(cmove, "cmove", IUM_WR, BAD_CODE, BAD_CODE, 0x0F0044, Reads_ZF )
+INST3(cmovne, "cmovne", IUM_WR, BAD_CODE, BAD_CODE, 0x0F0045, Reads_ZF )
+INST3(cmovbe, "cmovbe", IUM_WR, BAD_CODE, BAD_CODE, 0x0F0046, Reads_ZF | Reads_CF )
+INST3(cmova, "cmova", IUM_WR, BAD_CODE, BAD_CODE, 0x0F0047, Reads_ZF | Reads_CF )
+INST3(cmovs, "cmovs", IUM_WR, BAD_CODE, BAD_CODE, 0x0F0048, Reads_SF )
+INST3(cmovns, "cmovns", IUM_WR, BAD_CODE, BAD_CODE, 0x0F0049, Reads_SF )
+INST3(cmovp, "cmovp", IUM_WR, BAD_CODE, BAD_CODE, 0x0F004A, Reads_PF )
+INST3(cmovnp, "cmovnp", IUM_WR, BAD_CODE, BAD_CODE, 0x0F004B, Reads_PF )
+INST3(cmovl, "cmovl", IUM_WR, BAD_CODE, BAD_CODE, 0x0F004C, Reads_OF | Reads_SF )
+INST3(cmovge, "cmovge", IUM_WR, BAD_CODE, BAD_CODE, 0x0F004D, Reads_OF | Reads_SF )
+INST3(cmovle, "cmovle", IUM_WR, BAD_CODE, BAD_CODE, 0x0F004E, Reads_OF | Reads_SF | Reads_ZF )
+INST3(cmovg, "cmovg", IUM_WR, BAD_CODE, BAD_CODE, 0x0F004F, Reads_OF | Reads_SF | Reads_ZF )
+
+INST3(xchg, "xchg", IUM_RW, 0x000086, BAD_CODE, 0x000086, INS_FLAGS_None )
+INST3(imul, "imul", IUM_RW, 0x0F00AC, BAD_CODE, 0x0F00AF, Writes_OF | Undefined_SF | Undefined_ZF | Undefined_AF | Undefined_PF | Writes_CF )
// id nm um mr mi rm flags
@@ -121,25 +123,25 @@ INST3(imul, "imul", IUM_RW, 0x0F00AC, BAD_CODE,
// as 2-operand instructions with the target register being implicit
// implicit_reg = op1*op2_icon
#define INSTMUL INST3
-INSTMUL(imul_AX, "imul", IUM_RD, BAD_CODE, 0x000068, BAD_CODE, INS_FLAGS_WritesFlags)
-INSTMUL(imul_CX, "imul", IUM_RD, BAD_CODE, 0x000868, BAD_CODE, INS_FLAGS_WritesFlags)
-INSTMUL(imul_DX, "imul", IUM_RD, BAD_CODE, 0x001068, BAD_CODE, INS_FLAGS_WritesFlags)
-INSTMUL(imul_BX, "imul", IUM_RD, BAD_CODE, 0x001868, BAD_CODE, INS_FLAGS_WritesFlags)
-INSTMUL(imul_SP, "imul", IUM_RD, BAD_CODE, BAD_CODE, BAD_CODE, INS_FLAGS_WritesFlags)
-INSTMUL(imul_BP, "imul", IUM_RD, BAD_CODE, 0x002868, BAD_CODE, INS_FLAGS_WritesFlags)
-INSTMUL(imul_SI, "imul", IUM_RD, BAD_CODE, 0x003068, BAD_CODE, INS_FLAGS_WritesFlags)
-INSTMUL(imul_DI, "imul", IUM_RD, BAD_CODE, 0x003868, BAD_CODE, INS_FLAGS_WritesFlags)
+INSTMUL(imul_AX, "imul", IUM_RD, BAD_CODE, 0x000068, BAD_CODE, Writes_OF | Undefined_SF | Undefined_ZF | Undefined_AF | Undefined_PF | Writes_CF )
+INSTMUL(imul_CX, "imul", IUM_RD, BAD_CODE, 0x000868, BAD_CODE, Writes_OF | Undefined_SF | Undefined_ZF | Undefined_AF | Undefined_PF | Writes_CF )
+INSTMUL(imul_DX, "imul", IUM_RD, BAD_CODE, 0x001068, BAD_CODE, Writes_OF | Undefined_SF | Undefined_ZF | Undefined_AF | Undefined_PF | Writes_CF )
+INSTMUL(imul_BX, "imul", IUM_RD, BAD_CODE, 0x001868, BAD_CODE, Writes_OF | Undefined_SF | Undefined_ZF | Undefined_AF | Undefined_PF | Writes_CF )
+INSTMUL(imul_SP, "imul", IUM_RD, BAD_CODE, BAD_CODE, BAD_CODE, Writes_OF | Undefined_SF | Undefined_ZF | Undefined_AF | Undefined_PF | Writes_CF )
+INSTMUL(imul_BP, "imul", IUM_RD, BAD_CODE, 0x002868, BAD_CODE, Writes_OF | Undefined_SF | Undefined_ZF | Undefined_AF | Undefined_PF | Writes_CF )
+INSTMUL(imul_SI, "imul", IUM_RD, BAD_CODE, 0x003068, BAD_CODE, Writes_OF | Undefined_SF | Undefined_ZF | Undefined_AF | Undefined_PF | Writes_CF )
+INSTMUL(imul_DI, "imul", IUM_RD, BAD_CODE, 0x003868, BAD_CODE, Writes_OF | Undefined_SF | Undefined_ZF | Undefined_AF | Undefined_PF | Writes_CF )
#ifdef TARGET_AMD64
-INSTMUL(imul_08, "imul", IUM_RD, BAD_CODE, 0x4400000068, BAD_CODE, INS_FLAGS_WritesFlags)
-INSTMUL(imul_09, "imul", IUM_RD, BAD_CODE, 0x4400000868, BAD_CODE, INS_FLAGS_WritesFlags)
-INSTMUL(imul_10, "imul", IUM_RD, BAD_CODE, 0x4400001068, BAD_CODE, INS_FLAGS_WritesFlags)
-INSTMUL(imul_11, "imul", IUM_RD, BAD_CODE, 0x4400001868, BAD_CODE, INS_FLAGS_WritesFlags)
-INSTMUL(imul_12, "imul", IUM_RD, BAD_CODE, 0x4400002068, BAD_CODE, INS_FLAGS_WritesFlags)
-INSTMUL(imul_13, "imul", IUM_RD, BAD_CODE, 0x4400002868, BAD_CODE, INS_FLAGS_WritesFlags)
-INSTMUL(imul_14, "imul", IUM_RD, BAD_CODE, 0x4400003068, BAD_CODE, INS_FLAGS_WritesFlags)
-INSTMUL(imul_15, "imul", IUM_RD, BAD_CODE, 0x4400003868, BAD_CODE, INS_FLAGS_WritesFlags)
+INSTMUL(imul_08, "imul", IUM_RD, BAD_CODE, 0x4400000068, BAD_CODE, Writes_OF | Undefined_SF | Undefined_ZF | Undefined_AF | Undefined_PF | Writes_CF )
+INSTMUL(imul_09, "imul", IUM_RD, BAD_CODE, 0x4400000868, BAD_CODE, Writes_OF | Undefined_SF | Undefined_ZF | Undefined_AF | Undefined_PF | Writes_CF )
+INSTMUL(imul_10, "imul", IUM_RD, BAD_CODE, 0x4400001068, BAD_CODE, Writes_OF | Undefined_SF | Undefined_ZF | Undefined_AF | Undefined_PF | Writes_CF )
+INSTMUL(imul_11, "imul", IUM_RD, BAD_CODE, 0x4400001868, BAD_CODE, Writes_OF | Undefined_SF | Undefined_ZF | Undefined_AF | Undefined_PF | Writes_CF )
+INSTMUL(imul_12, "imul", IUM_RD, BAD_CODE, 0x4400002068, BAD_CODE, Writes_OF | Undefined_SF | Undefined_ZF | Undefined_AF | Undefined_PF | Writes_CF )
+INSTMUL(imul_13, "imul", IUM_RD, BAD_CODE, 0x4400002868, BAD_CODE, Writes_OF | Undefined_SF | Undefined_ZF | Undefined_AF | Undefined_PF | Writes_CF )
+INSTMUL(imul_14, "imul", IUM_RD, BAD_CODE, 0x4400003068, BAD_CODE, Writes_OF | Undefined_SF | Undefined_ZF | Undefined_AF | Undefined_PF | Writes_CF )
+INSTMUL(imul_15, "imul", IUM_RD, BAD_CODE, 0x4400003868, BAD_CODE, Writes_OF | Undefined_SF | Undefined_ZF | Undefined_AF | Undefined_PF | Writes_CF )
#endif // TARGET_AMD64
@@ -301,10 +303,10 @@ INST3(cvttpd2dq, "cvttpd2dq", IUM_WR, BAD_CODE, BAD_CODE,
INST3(cvtdq2pd, "cvtdq2pd", IUM_WR, BAD_CODE, BAD_CODE, SSEFLT(0xE6), INS_FLAGS_None) // cvt packed DWORDs to doubles
// SSE2 comparison instructions
-INST3(comiss, "comiss", IUM_RD, BAD_CODE, BAD_CODE, PCKFLT(0x2F), INS_FLAGS_None) // ordered compare singles
-INST3(comisd, "comisd", IUM_RD, BAD_CODE, BAD_CODE, PCKDBL(0x2F), INS_FLAGS_None) // ordered compare doubles
-INST3(ucomiss, "ucomiss", IUM_RD, BAD_CODE, BAD_CODE, PCKFLT(0x2E), INS_FLAGS_None) // unordered compare singles
-INST3(ucomisd, "ucomisd", IUM_RD, BAD_CODE, BAD_CODE, PCKDBL(0x2E), INS_FLAGS_None) // unordered compare doubles
+INST3(comiss, "comiss", IUM_RD, BAD_CODE, BAD_CODE, PCKFLT(0x2F), Resets_OF | Resets_SF | Writes_ZF | Resets_AF | Writes_PF | Writes_CF ) // ordered compare singles
+INST3(comisd, "comisd", IUM_RD, BAD_CODE, BAD_CODE, PCKDBL(0x2F), Resets_OF | Resets_SF | Writes_ZF | Resets_AF | Writes_PF | Writes_CF ) // ordered compare doubles
+INST3(ucomiss, "ucomiss", IUM_RD, BAD_CODE, BAD_CODE, PCKFLT(0x2E), Resets_OF | Resets_SF | Writes_ZF | Resets_AF | Writes_PF | Writes_CF ) // unordered compare singles
+INST3(ucomisd, "ucomisd", IUM_RD, BAD_CODE, BAD_CODE, PCKDBL(0x2E), Resets_OF | Resets_SF | Writes_ZF | Resets_AF | Writes_PF | Writes_CF ) // unordered compare doubles
// SSE2 packed single/double comparison operations.
// Note that these instructions not only compare but also overwrite the first source.
@@ -600,10 +602,10 @@ INST3(bextr, "bextr", IUM_WR, BAD_CODE, BAD_CODE,
// BMI2
INST3(rorx, "rorx", IUM_WR, BAD_CODE, BAD_CODE, SSE3A(0xF0), INS_FLAGS_None)
-INST3(pdep, "pdep", IUM_WR, BAD_CODE, BAD_CODE, SSE38(0xF5), INS_Flags_IsDstDstSrcAVXInstruction) // Parallel Bits Deposit
-INST3(pext, "pext", IUM_WR, BAD_CODE, BAD_CODE, SSE38(0xF5), INS_Flags_IsDstDstSrcAVXInstruction) // Parallel Bits Extract
-INST3(bzhi, "bzhi", IUM_WR, BAD_CODE, BAD_CODE, SSE38(0xF5), INS_Flags_IsDstDstSrcAVXInstruction) // Zero High Bits Starting with Specified Bit Position
-INST3(mulx, "mulx", IUM_WR, BAD_CODE, BAD_CODE, SSE38(0xF6), INS_Flags_IsDstDstSrcAVXInstruction) // Unsigned Multiply Without Affecting Flags
+INST3(pdep, "pdep", IUM_WR, BAD_CODE, BAD_CODE, SSE38(0xF5), INS_Flags_IsDstDstSrcAVXInstruction) // Parallel Bits Deposit
+INST3(pext, "pext", IUM_WR, BAD_CODE, BAD_CODE, SSE38(0xF5), INS_Flags_IsDstDstSrcAVXInstruction) // Parallel Bits Extract
+INST3(bzhi, "bzhi", IUM_WR, BAD_CODE, BAD_CODE, SSE38(0xF5), Resets_OF | Writes_SF | Writes_ZF | Undefined_AF | Undefined_PF | Writes_CF | INS_Flags_IsDstDstSrcAVXInstruction) // Zero High Bits Starting with Specified Bit Position
+INST3(mulx, "mulx", IUM_WR, BAD_CODE, BAD_CODE, SSE38(0xF6), INS_Flags_IsDstDstSrcAVXInstruction) // Unsigned Multiply Without Affecting Flags
INST3(LAST_BMI_INSTRUCTION, "LAST_BMI_INSTRUCTION", IUM_WR, BAD_CODE, BAD_CODE, BAD_CODE, INS_FLAGS_None)
@@ -613,88 +615,94 @@ INST3(LAST_AVX_INSTRUCTION, "LAST_AVX_INSTRUCTION", IUM_WR, BAD_CODE, BAD_CODE,
INST3(crc32, "crc32", IUM_WR, BAD_CODE, BAD_CODE, PACK4(0xF2, 0x0F, 0x38, 0xF0), INS_FLAGS_None)
// BMI1
-INST3(tzcnt, "tzcnt", IUM_WR, BAD_CODE, BAD_CODE, SSEFLT(0xBC), INS_FLAGS_None) // Count the Number of Trailing Zero Bits
+INST3(tzcnt, "tzcnt", IUM_WR, BAD_CODE, BAD_CODE, SSEFLT(0xBC), Undefined_OF | Undefined_SF | Writes_ZF | Undefined_AF | Undefined_PF | Writes_CF ) // Count the Number of Trailing Zero Bits
// LZCNT
-INST3(lzcnt, "lzcnt", IUM_WR, BAD_CODE, BAD_CODE, SSEFLT(0xBD), INS_FLAGS_None)
+INST3(lzcnt, "lzcnt", IUM_WR, BAD_CODE, BAD_CODE, SSEFLT(0xBD), Undefined_OF | Undefined_SF | Writes_ZF | Undefined_AF | Undefined_PF | Writes_CF )
// POPCNT
-INST3(popcnt, "popcnt", IUM_WR, BAD_CODE, BAD_CODE, SSEFLT(0xB8), INS_FLAGS_None)
+INST3(popcnt, "popcnt", IUM_WR, BAD_CODE, BAD_CODE, SSEFLT(0xB8), Resets_OF | Resets_SF | Writes_ZF | Resets_AF | Resets_PF | Resets_CF )
// id nm um mr mi flags
-INST2(ret, "ret", IUM_RD, 0x0000C3, 0x0000C2, INS_FLAGS_None)
-INST2(loop, "loop", IUM_RD, BAD_CODE, 0x0000E2, INS_FLAGS_None)
-INST2(call, "call", IUM_RD, 0x0010FF, 0x0000E8, INS_FLAGS_WritesFlags)
-
-INST2(rol, "rol", IUM_RW, 0x0000D2, BAD_CODE, INS_FLAGS_WritesFlags)
-INST2(rol_1, "rol", IUM_RW, 0x0000D0, 0x0000D0, INS_FLAGS_WritesFlags)
-INST2(rol_N, "rol", IUM_RW, 0x0000C0, 0x0000C0, INS_FLAGS_WritesFlags)
-INST2(ror, "ror", IUM_RW, 0x0008D2, BAD_CODE, INS_FLAGS_WritesFlags)
-INST2(ror_1, "ror", IUM_RW, 0x0008D0, 0x0008D0, INS_FLAGS_WritesFlags)
-INST2(ror_N, "ror", IUM_RW, 0x0008C0, 0x0008C0, INS_FLAGS_WritesFlags)
-
-INST2(rcl, "rcl", IUM_RW, 0x0010D2, BAD_CODE, INS_FLAGS_ReadsFlags | INS_FLAGS_WritesFlags)
-INST2(rcl_1, "rcl", IUM_RW, 0x0010D0, 0x0010D0, INS_FLAGS_ReadsFlags | INS_FLAGS_WritesFlags)
-INST2(rcl_N, "rcl", IUM_RW, 0x0010C0, 0x0010C0, INS_FLAGS_ReadsFlags | INS_FLAGS_WritesFlags)
-INST2(rcr, "rcr", IUM_RW, 0x0018D2, BAD_CODE, INS_FLAGS_ReadsFlags | INS_FLAGS_WritesFlags)
-INST2(rcr_1, "rcr", IUM_RW, 0x0018D0, 0x0018D0, INS_FLAGS_ReadsFlags | INS_FLAGS_WritesFlags)
-INST2(rcr_N, "rcr", IUM_RW, 0x0018C0, 0x0018C0, INS_FLAGS_ReadsFlags | INS_FLAGS_WritesFlags)
-INST2(shl, "shl", IUM_RW, 0x0020D2, BAD_CODE, INS_FLAGS_WritesFlags)
-INST2(shl_1, "shl", IUM_RW, 0x0020D0, 0x0020D0, INS_FLAGS_WritesFlags)
-INST2(shl_N, "shl", IUM_RW, 0x0020C0, 0x0020C0, INS_FLAGS_WritesFlags)
-INST2(shr, "shr", IUM_RW, 0x0028D2, BAD_CODE, INS_FLAGS_WritesFlags)
-INST2(shr_1, "shr", IUM_RW, 0x0028D0, 0x0028D0, INS_FLAGS_WritesFlags)
-INST2(shr_N, "shr", IUM_RW, 0x0028C0, 0x0028C0, INS_FLAGS_WritesFlags)
-INST2(sar, "sar", IUM_RW, 0x0038D2, BAD_CODE, INS_FLAGS_WritesFlags)
-INST2(sar_1, "sar", IUM_RW, 0x0038D0, 0x0038D0, INS_FLAGS_WritesFlags)
-INST2(sar_N, "sar", IUM_RW, 0x0038C0, 0x0038C0, INS_FLAGS_WritesFlags)
+INST2(ret, "ret", IUM_RD, 0x0000C3, 0x0000C2, INS_FLAGS_None )
+INST2(loop, "loop", IUM_RD, BAD_CODE, 0x0000E2, INS_FLAGS_None )
+INST2(call, "call", IUM_RD, 0x0010FF, 0x0000E8, INS_FLAGS_None )
+
+INST2(rol, "rol", IUM_RW, 0x0000D2, BAD_CODE, Undefined_OF | Writes_CF )
+INST2(rol_1, "rol", IUM_RW, 0x0000D0, 0x0000D0, Writes_OF | Writes_CF )
+INST2(rol_N, "rol", IUM_RW, 0x0000C0, 0x0000C0, Undefined_OF | Writes_CF )
+INST2(ror, "ror", IUM_RW, 0x0008D2, BAD_CODE, Undefined_OF | Writes_CF )
+INST2(ror_1, "ror", IUM_RW, 0x0008D0, 0x0008D0, Writes_OF | Writes_CF )
+INST2(ror_N, "ror", IUM_RW, 0x0008C0, 0x0008C0, Undefined_OF | Writes_CF )
+
+INST2(rcl, "rcl", IUM_RW, 0x0010D2, BAD_CODE, Undefined_OF | Writes_CF
+ | Reads_CF )
+INST2(rcl_1, "rcl", IUM_RW, 0x0010D0, 0x0010D0, Writes_OF | Writes_CF
+ | Reads_CF )
+INST2(rcl_N, "rcl", IUM_RW, 0x0010C0, 0x0010C0, Undefined_OF | Writes_CF
+ | Reads_CF )
+INST2(rcr, "rcr", IUM_RW, 0x0018D2, BAD_CODE, Undefined_OF | Writes_CF
+ | Reads_CF )
+INST2(rcr_1, "rcr", IUM_RW, 0x0018D0, 0x0018D0, Writes_OF | Writes_CF
+ | Reads_CF )
+INST2(rcr_N, "rcr", IUM_RW, 0x0018C0, 0x0018C0, Undefined_OF | Writes_CF
+ | Reads_CF )
+INST2(shl, "shl", IUM_RW, 0x0020D2, BAD_CODE, Undefined_OF | Writes_SF | Writes_ZF | Undefined_AF | Writes_PF | Writes_CF )
+INST2(shl_1, "shl", IUM_RW, 0x0020D0, 0x0020D0, Writes_OF | Writes_SF | Writes_ZF | Undefined_AF | Writes_PF | Writes_CF )
+INST2(shl_N, "shl", IUM_RW, 0x0020C0, 0x0020C0, Undefined_OF | Writes_SF | Writes_ZF | Undefined_AF | Writes_PF | Writes_CF )
+INST2(shr, "shr", IUM_RW, 0x0028D2, BAD_CODE, Undefined_OF | Writes_SF | Writes_ZF | Undefined_AF | Writes_PF | Writes_CF )
+INST2(shr_1, "shr", IUM_RW, 0x0028D0, 0x0028D0, Writes_OF | Writes_SF | Writes_ZF | Undefined_AF | Writes_PF | Writes_CF )
+INST2(shr_N, "shr", IUM_RW, 0x0028C0, 0x0028C0, Undefined_OF | Writes_SF | Writes_ZF | Undefined_AF | Writes_PF | Writes_CF )
+INST2(sar, "sar", IUM_RW, 0x0038D2, BAD_CODE, Undefined_OF | Writes_SF | Writes_ZF | Undefined_AF | Writes_PF | Writes_CF )
+INST2(sar_1, "sar", IUM_RW, 0x0038D0, 0x0038D0, Writes_OF | Writes_SF | Writes_ZF | Undefined_AF | Writes_PF | Writes_CF )
+INST2(sar_N, "sar", IUM_RW, 0x0038C0, 0x0038C0, Undefined_OF | Writes_SF | Writes_ZF | Undefined_AF | Writes_PF | Writes_CF )
// id nm um mr flags
-INST1(r_movsb, "rep movsb", IUM_RD, 0x00A4F3, INS_FLAGS_None)
-INST1(r_movsd, "rep movsd", IUM_RD, 0x00A5F3, INS_FLAGS_None)
+INST1(r_movsb, "rep movsb", IUM_RD, 0x00A4F3, Reads_DF )
+INST1(r_movsd, "rep movsd", IUM_RD, 0x00A5F3, Reads_DF )
#if defined(TARGET_AMD64)
-INST1(r_movsq, "rep movsq", IUM_RD, 0xF3A548, INS_FLAGS_None)
+INST1(r_movsq, "rep movsq", IUM_RD, 0xF3A548, Reads_DF )
#endif // defined(TARGET_AMD64)
-INST1(movsb, "movsb", IUM_RD, 0x0000A4, INS_FLAGS_None)
-INST1(movsd, "movsd", IUM_RD, 0x0000A5, INS_FLAGS_None)
+INST1(movsb, "movsb", IUM_RD, 0x0000A4, Reads_DF )
+INST1(movsd, "movsd", IUM_RD, 0x0000A5, Reads_DF )
#if defined(TARGET_AMD64)
-INST1(movsq, "movsq", IUM_RD, 0x00A548, INS_FLAGS_None)
+INST1(movsq, "movsq", IUM_RD, 0x00A548, Reads_DF )
#endif // defined(TARGET_AMD64)
-INST1(r_stosb, "rep stosb", IUM_RD, 0x00AAF3, INS_FLAGS_None)
-INST1(r_stosd, "rep stosd", IUM_RD, 0x00ABF3, INS_FLAGS_None)
+INST1(r_stosb, "rep stosb", IUM_RD, 0x00AAF3, Reads_DF )
+INST1(r_stosd, "rep stosd", IUM_RD, 0x00ABF3, Reads_DF )
#if defined(TARGET_AMD64)
-INST1(r_stosq, "rep stosq", IUM_RD, 0xF3AB48, INS_FLAGS_None)
+INST1(r_stosq, "rep stosq", IUM_RD, 0xF3AB48, Reads_DF )
#endif // defined(TARGET_AMD64)
-INST1(stosb, "stosb", IUM_RD, 0x0000AA, INS_FLAGS_None)
-INST1(stosd, "stosd", IUM_RD, 0x0000AB, INS_FLAGS_None)
+INST1(stosb, "stosb", IUM_RD, 0x0000AA, Reads_DF )
+INST1(stosd, "stosd", IUM_RD, 0x0000AB, Reads_DF )
#if defined(TARGET_AMD64)
-INST1(stosq, "stosq", IUM_RD, 0x00AB48, INS_FLAGS_None)
+INST1(stosq, "stosq", IUM_RD, 0x00AB48, Reads_DF )
#endif // defined(TARGET_AMD64)
-INST1(int3, "int3", IUM_RD, 0x0000CC, INS_FLAGS_None)
-INST1(nop, "nop", IUM_RD, 0x000090, INS_FLAGS_None)
-INST1(lock, "lock", IUM_RD, 0x0000F0, INS_FLAGS_None)
-INST1(leave, "leave", IUM_RD, 0x0000C9, INS_FLAGS_None)
+INST1(int3, "int3", IUM_RD, 0x0000CC, INS_FLAGS_None )
+INST1(nop, "nop", IUM_RD, 0x000090, INS_FLAGS_None )
+INST1(lock, "lock", IUM_RD, 0x0000F0, INS_FLAGS_None )
+INST1(leave, "leave", IUM_RD, 0x0000C9, INS_FLAGS_None )
-INST1(neg, "neg", IUM_RW, 0x0018F6, INS_FLAGS_WritesFlags)
-INST1(not, "not", IUM_RW, 0x0010F6, INS_FLAGS_WritesFlags)
+INST1(neg, "neg", IUM_RW, 0x0018F6, Writes_OF | Writes_SF | Writes_ZF | Writes_AF | Writes_PF | Writes_CF )
+INST1(not, "not", IUM_RW, 0x0010F6, INS_FLAGS_None )
-INST1(cdq, "cdq", IUM_RD, 0x000099, INS_FLAGS_WritesFlags)
-INST1(idiv, "idiv", IUM_RD, 0x0038F6, INS_FLAGS_WritesFlags)
-INST1(imulEAX, "imul", IUM_RD, 0x0028F6, INS_FLAGS_WritesFlags) // edx:eax = eax*op1
-INST1(div, "div", IUM_RD, 0x0030F6, INS_FLAGS_WritesFlags)
-INST1(mulEAX, "mul", IUM_RD, 0x0020F6, INS_FLAGS_WritesFlags)
+INST1(cdq, "cdq", IUM_RD, 0x000099, INS_FLAGS_None)
+INST1(idiv, "idiv", IUM_RD, 0x0038F6, Undefined_OF | Undefined_SF | Undefined_ZF | Undefined_AF | Undefined_PF | Undefined_CF )
+INST1(imulEAX, "imul", IUM_RD, 0x0028F6, Writes_OF | Undefined_SF | Undefined_ZF | Undefined_AF | Undefined_PF | Writes_CF )
+INST1(div, "div", IUM_RD, 0x0030F6, Undefined_OF | Undefined_SF | Undefined_ZF | Undefined_AF | Undefined_PF | Undefined_CF )
+INST1(mulEAX, "mul", IUM_RD, 0x0020F6, Writes_OF | Undefined_SF | Undefined_ZF | Undefined_AF | Undefined_PF | Writes_CF )
-INST1(sahf, "sahf", IUM_RD, 0x00009E, INS_FLAGS_WritesFlags)
+INST1(sahf, "sahf", IUM_RD, 0x00009E, Restore_SF_ZF_AF_PF_CF )
-INST1(xadd, "xadd", IUM_RW, 0x0F00C0, INS_FLAGS_WritesFlags)
-INST1(cmpxchg, "cmpxchg", IUM_RW, 0x0F00B0, INS_FLAGS_WritesFlags)
+INST1(xadd, "xadd", IUM_RW, 0x0F00C0, Writes_OF | Writes_SF | Writes_ZF | Writes_AF | Writes_PF | Writes_CF )
+INST1(cmpxchg, "cmpxchg", IUM_RW, 0x0F00B0, Writes_OF | Writes_SF | Writes_ZF | Writes_AF | Writes_PF | Writes_CF )
-INST1(shld, "shld", IUM_RW, 0x0F00A4, INS_FLAGS_WritesFlags)
-INST1(shrd, "shrd", IUM_RW, 0x0F00AC, INS_FLAGS_WritesFlags)
+INST1(shld, "shld", IUM_RW, 0x0F00A4, Undefined_OF | Writes_SF | Writes_ZF | Undefined_AF | Writes_PF | Writes_CF )
+INST1(shrd, "shrd", IUM_RW, 0x0F00AC, Undefined_OF | Writes_SF | Writes_ZF | Undefined_AF | Writes_PF | Writes_CF )
// For RyuJIT/x86, we follow the x86 calling convention that requires
// us to return floating point value on the x87 FP stack, so we need
@@ -704,22 +712,22 @@ INST1(fld, "fld", IUM_WR, 0x0000D9,
INST1(fstp, "fstp", IUM_WR, 0x0018D9, INS_FLAGS_x87Instr)
#endif // TARGET_X86
-INST1(seto, "seto", IUM_WR, 0x0F0090, INS_FLAGS_ReadsFlags)
-INST1(setno, "setno", IUM_WR, 0x0F0091, INS_FLAGS_ReadsFlags)
-INST1(setb, "setb", IUM_WR, 0x0F0092, INS_FLAGS_ReadsFlags)
-INST1(setae, "setae", IUM_WR, 0x0F0093, INS_FLAGS_ReadsFlags)
-INST1(sete, "sete", IUM_WR, 0x0F0094, INS_FLAGS_ReadsFlags)
-INST1(setne, "setne", IUM_WR, 0x0F0095, INS_FLAGS_ReadsFlags)
-INST1(setbe, "setbe", IUM_WR, 0x0F0096, INS_FLAGS_ReadsFlags)
-INST1(seta, "seta", IUM_WR, 0x0F0097, INS_FLAGS_ReadsFlags)
-INST1(sets, "sets", IUM_WR, 0x0F0098, INS_FLAGS_ReadsFlags)
-INST1(setns, "setns", IUM_WR, 0x0F0099, INS_FLAGS_ReadsFlags)
-INST1(setp, "setp", IUM_WR, 0x0F009A, INS_FLAGS_ReadsFlags)
-INST1(setnp, "setnp", IUM_WR, 0x0F009B, INS_FLAGS_ReadsFlags)
-INST1(setl, "setl", IUM_WR, 0x0F009C, INS_FLAGS_ReadsFlags)
-INST1(setge, "setge", IUM_WR, 0x0F009D, INS_FLAGS_ReadsFlags)
-INST1(setle, "setle", IUM_WR, 0x0F009E, INS_FLAGS_ReadsFlags)
-INST1(setg, "setg", IUM_WR, 0x0F009F, INS_FLAGS_ReadsFlags)
+INST1(seto, "seto", IUM_WR, 0x0F0090, Reads_OF )
+INST1(setno, "setno", IUM_WR, 0x0F0091, Reads_OF )
+INST1(setb, "setb", IUM_WR, 0x0F0092, Reads_CF )
+INST1(setae, "setae", IUM_WR, 0x0F0093, Reads_CF )
+INST1(sete, "sete", IUM_WR, 0x0F0094, Reads_ZF )
+INST1(setne, "setne", IUM_WR, 0x0F0095, Reads_ZF )
+INST1(setbe, "setbe", IUM_WR, 0x0F0096, Reads_ZF | Reads_CF )
+INST1(seta, "seta", IUM_WR, 0x0F0097, Reads_ZF | Reads_CF )
+INST1(sets, "sets", IUM_WR, 0x0F0098, Reads_SF )
+INST1(setns, "setns", IUM_WR, 0x0F0099, Reads_SF )
+INST1(setp, "setp", IUM_WR, 0x0F009A, Reads_PF )
+INST1(setnp, "setnp", IUM_WR, 0x0F009B, Reads_PF )
+INST1(setl, "setl", IUM_WR, 0x0F009C, Reads_OF | Reads_SF )
+INST1(setge, "setge", IUM_WR, 0x0F009D, Reads_OF | Reads_SF )
+INST1(setle, "setle", IUM_WR, 0x0F009E, Reads_OF | Reads_SF | Reads_ZF )
+INST1(setg, "setg", IUM_WR, 0x0F009F, Reads_OF | Reads_SF | Reads_ZF )
#ifdef TARGET_AMD64
// A jump with rex prefix. This is used for register indirect
@@ -727,43 +735,43 @@ INST1(setg, "setg", IUM_WR, 0x0F009F,
INST1(rex_jmp, "rex.jmp", IUM_RD, 0x0020FE, INS_FLAGS_None)
#endif
-INST1(i_jmp, "jmp", IUM_RD, 0x0020FE, INS_FLAGS_None)
-
-INST0(jmp, "jmp", IUM_RD, 0x0000EB, INS_FLAGS_None)
-INST0(jo, "jo", IUM_RD, 0x000070, INS_FLAGS_ReadsFlags)
-INST0(jno, "jno", IUM_RD, 0x000071, INS_FLAGS_ReadsFlags)
-INST0(jb, "jb", IUM_RD, 0x000072, INS_FLAGS_ReadsFlags)
-INST0(jae, "jae", IUM_RD, 0x000073, INS_FLAGS_ReadsFlags)
-INST0(je, "je", IUM_RD, 0x000074, INS_FLAGS_ReadsFlags)
-INST0(jne, "jne", IUM_RD, 0x000075, INS_FLAGS_ReadsFlags)
-INST0(jbe, "jbe", IUM_RD, 0x000076, INS_FLAGS_ReadsFlags)
-INST0(ja, "ja", IUM_RD, 0x000077, INS_FLAGS_ReadsFlags)
-INST0(js, "js", IUM_RD, 0x000078, INS_FLAGS_ReadsFlags)
-INST0(jns, "jns", IUM_RD, 0x000079, INS_FLAGS_ReadsFlags)
-INST0(jp, "jp", IUM_RD, 0x00007A, INS_FLAGS_ReadsFlags)
-INST0(jnp, "jnp", IUM_RD, 0x00007B, INS_FLAGS_ReadsFlags)
-INST0(jl, "jl", IUM_RD, 0x00007C, INS_FLAGS_ReadsFlags)
-INST0(jge, "jge", IUM_RD, 0x00007D, INS_FLAGS_ReadsFlags)
-INST0(jle, "jle", IUM_RD, 0x00007E, INS_FLAGS_ReadsFlags)
-INST0(jg, "jg", IUM_RD, 0x00007F, INS_FLAGS_ReadsFlags)
-
-INST0(l_jmp, "jmp", IUM_RD, 0x0000E9, INS_FLAGS_None)
-INST0(l_jo, "jo", IUM_RD, 0x00800F, INS_FLAGS_ReadsFlags)
-INST0(l_jno, "jno", IUM_RD, 0x00810F, INS_FLAGS_ReadsFlags)
-INST0(l_jb, "jb", IUM_RD, 0x00820F, INS_FLAGS_ReadsFlags)
-INST0(l_jae, "jae", IUM_RD, 0x00830F, INS_FLAGS_ReadsFlags)
-INST0(l_je, "je", IUM_RD, 0x00840F, INS_FLAGS_ReadsFlags)
-INST0(l_jne, "jne", IUM_RD, 0x00850F, INS_FLAGS_ReadsFlags)
-INST0(l_jbe, "jbe", IUM_RD, 0x00860F, INS_FLAGS_ReadsFlags)
-INST0(l_ja, "ja", IUM_RD, 0x00870F, INS_FLAGS_ReadsFlags)
-INST0(l_js, "js", IUM_RD, 0x00880F, INS_FLAGS_ReadsFlags)
-INST0(l_jns, "jns", IUM_RD, 0x00890F, INS_FLAGS_ReadsFlags)
-INST0(l_jp, "jp", IUM_RD, 0x008A0F, INS_FLAGS_ReadsFlags)
-INST0(l_jnp, "jnp", IUM_RD, 0x008B0F, INS_FLAGS_ReadsFlags)
-INST0(l_jl, "jl", IUM_RD, 0x008C0F, INS_FLAGS_ReadsFlags)
-INST0(l_jge, "jge", IUM_RD, 0x008D0F, INS_FLAGS_ReadsFlags)
-INST0(l_jle, "jle", IUM_RD, 0x008E0F, INS_FLAGS_ReadsFlags)
-INST0(l_jg, "jg", IUM_RD, 0x008F0F, INS_FLAGS_ReadsFlags)
+INST1(i_jmp, "jmp", IUM_RD, 0x0020FE, INS_FLAGS_None )
+
+INST0(jmp, "jmp", IUM_RD, 0x0000EB, INS_FLAGS_None )
+INST0(jo, "jo", IUM_RD, 0x000070, Reads_OF )
+INST0(jno, "jno", IUM_RD, 0x000071, Reads_OF )
+INST0(jb, "jb", IUM_RD, 0x000072, Reads_CF )
+INST0(jae, "jae", IUM_RD, 0x000073, Reads_CF )
+INST0(je, "je", IUM_RD, 0x000074, Reads_ZF )
+INST0(jne, "jne", IUM_RD, 0x000075, Reads_ZF )
+INST0(jbe, "jbe", IUM_RD, 0x000076, Reads_ZF | Reads_CF )
+INST0(ja, "ja", IUM_RD, 0x000077, Reads_ZF | Reads_CF )
+INST0(js, "js", IUM_RD, 0x000078, Reads_SF )
+INST0(jns, "jns", IUM_RD, 0x000079, Reads_SF )
+INST0(jp, "jp", IUM_RD, 0x00007A, Reads_PF )
+INST0(jnp, "jnp", IUM_RD, 0x00007B, Reads_PF )
+INST0(jl, "jl", IUM_RD, 0x00007C, Reads_OF | Reads_SF )
+INST0(jge, "jge", IUM_RD, 0x00007D, Reads_OF | Reads_SF )
+INST0(jle, "jle", IUM_RD, 0x00007E, Reads_OF | Reads_SF | Reads_ZF )
+INST0(jg, "jg", IUM_RD, 0x00007F, Reads_OF | Reads_SF | Reads_ZF )
+
+INST0(l_jmp, "jmp", IUM_RD, 0x0000E9, INS_FLAGS_None )
+INST0(l_jo, "jo", IUM_RD, 0x00800F, Reads_OF )
+INST0(l_jno, "jno", IUM_RD, 0x00810F, Reads_OF )
+INST0(l_jb, "jb", IUM_RD, 0x00820F, Reads_CF )
+INST0(l_jae, "jae", IUM_RD, 0x00830F, Reads_CF )
+INST0(l_je, "je", IUM_RD, 0x00840F, Reads_ZF )
+INST0(l_jne, "jne", IUM_RD, 0x00850F, Reads_ZF )
+INST0(l_jbe, "jbe", IUM_RD, 0x00860F, Reads_ZF | Reads_CF )
+INST0(l_ja, "ja", IUM_RD, 0x00870F, Reads_ZF | Reads_CF )
+INST0(l_js, "js", IUM_RD, 0x00880F, Reads_SF )
+INST0(l_jns, "jns", IUM_RD, 0x00890F, Reads_SF )
+INST0(l_jp, "jp", IUM_RD, 0x008A0F, Reads_PF )
+INST0(l_jnp, "jnp", IUM_RD, 0x008B0F, Reads_PF )
+INST0(l_jl, "jl", IUM_RD, 0x008C0F, Reads_OF | Reads_SF )
+INST0(l_jge, "jge", IUM_RD, 0x008D0F, Reads_OF | Reads_SF )
+INST0(l_jle, "jle", IUM_RD, 0x008E0F, Reads_OF | Reads_SF | Reads_ZF )
+INST0(l_jg, "jg", IUM_RD, 0x008F0F, Reads_OF | Reads_SF | Reads_ZF )
INST0(align, "align", IUM_RD, BAD_CODE, INS_FLAGS_None)
diff --git a/src/coreclr/jit/jit.h b/src/coreclr/jit/jit.h
index 1a5c80c3218aa1..2dd39559c282ee 100644
--- a/src/coreclr/jit/jit.h
+++ b/src/coreclr/jit/jit.h
@@ -86,9 +86,6 @@
#if defined(TARGET_ARM64)
#error Cannot define both TARGET_X86 and TARGET_ARM64
#endif
-#if !defined(HOST_X86)
-#define _CROSS_COMPILER_
-#endif
#elif defined(TARGET_AMD64)
#if defined(TARGET_X86)
#error Cannot define both TARGET_AMD64 and TARGET_X86
@@ -99,9 +96,6 @@
#if defined(TARGET_ARM64)
#error Cannot define both TARGET_AMD64 and TARGET_ARM64
#endif
-#if !defined(HOST_AMD64)
-#define _CROSS_COMPILER_
-#endif
#elif defined(TARGET_ARM)
#if defined(TARGET_X86)
#error Cannot define both TARGET_ARM and TARGET_X86
@@ -112,9 +106,6 @@
#if defined(TARGET_ARM64)
#error Cannot define both TARGET_ARM and TARGET_ARM64
#endif
-#if !defined(HOST_ARM)
-#define _CROSS_COMPILER_
-#endif
#elif defined(TARGET_ARM64)
#if defined(TARGET_X86)
#error Cannot define both TARGET_ARM64 and TARGET_X86
@@ -125,9 +116,6 @@
#if defined(TARGET_ARM)
#error Cannot define both TARGET_ARM64 and TARGET_ARM
#endif
-#if !defined(HOST_ARM64)
-#define _CROSS_COMPILER_
-#endif
#else
#error Unsupported or unset target architecture
#endif
@@ -198,11 +186,9 @@
#ifdef DEBUG
#define INDEBUG(x) x
-#define INDEBUG_COMMA(x) x,
#define DEBUGARG(x) , x
#else
#define INDEBUG(x)
-#define INDEBUG_COMMA(x)
#define DEBUGARG(x)
#endif
@@ -223,7 +209,7 @@
#if defined(DEBUG) && !defined(OSX_ARM64_ABI)
// On all platforms except Arm64 OSX arguments on the stack are taking
// register size slots. On these platforms we could check that stack slots count
-// matchs out new byte size calculations.
+// matches our new byte size calculations.
#define DEBUG_ARG_SLOTS
#endif
@@ -239,11 +225,6 @@
#if defined(UNIX_AMD64_ABI) || !defined(TARGET_64BIT) || defined(TARGET_ARM64)
#define FEATURE_PUT_STRUCT_ARG_STK 1
-#define PUT_STRUCT_ARG_STK_ONLY_ARG(x) , x
-#define PUT_STRUCT_ARG_STK_ONLY(x) x
-#else
-#define PUT_STRUCT_ARG_STK_ONLY_ARG(x)
-#define PUT_STRUCT_ARG_STK_ONLY(x)
#endif
#if defined(UNIX_AMD64_ABI)
@@ -297,8 +278,6 @@ XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
#define INFO6 LL_INFO10000 // Did Jit or Inline succeeded?
#define INFO7 LL_INFO100000 // NYI stuff
#define INFO8 LL_INFO1000000 // Weird failures
-#define INFO9 LL_EVERYTHING // Info about incoming settings
-#define INFO10 LL_EVERYTHING // Totally verbose
#endif // DEBUG
@@ -308,11 +287,6 @@ const CORINFO_CLASS_HANDLE NO_CLASS_HANDLE = (CORINFO_CLASS_HANDLE) nullptr;
/*****************************************************************************/
-inline bool False()
-{
- return false;
-} // Use to disable code while keeping prefast happy
-
// We define two IL offset types, as follows:
//
// IL_OFFSET: either a distinguished value, or an IL offset.
@@ -496,21 +470,17 @@ class GlobalJitOptions
#endif
/*****************************************************************************/
-#ifdef DEBUG
-/*****************************************************************************/
-
-#define DUMPER
-
-#else // !DEBUG
+#if !defined(DEBUG)
#if DUMP_GC_TABLES
#pragma message("NOTE: this non-debug build has GC ptr table dumping always enabled!")
-const bool dspGCtbls = true;
+const bool dspGCtbls = true;
#endif
-/*****************************************************************************/
#endif // !DEBUG
+/*****************************************************************************/
+
#ifdef DEBUG
#define JITDUMP(...) \
{ \
@@ -591,21 +561,8 @@ inline bool IsUninitialized(T data);
/*****************************************************************************/
-enum accessLevel
-{
- ACL_NONE,
- ACL_PRIVATE,
- ACL_DEFAULT,
- ACL_PROTECTED,
- ACL_PUBLIC,
-};
-
-/*****************************************************************************/
-
#define castto(var, typ) (*(typ*)&var)
-#define sizeto(typ, mem) (offsetof(typ, mem) + sizeof(((typ*)0)->mem))
-
/*****************************************************************************/
#ifdef NO_MISALIGNED_ACCESS
@@ -645,23 +602,11 @@ inline size_t roundUp(size_t size, size_t mult = sizeof(size_t))
return (size + (mult - 1)) & ~(mult - 1);
}
-inline size_t roundDn(size_t size, size_t mult = sizeof(size_t))
-{
- assert(mult && ((mult & (mult - 1)) == 0)); // power of two test
-
- return (size) & ~(mult - 1);
-}
-
#ifdef HOST_64BIT
inline unsigned int roundUp(unsigned size, unsigned mult)
{
return (unsigned int)roundUp((size_t)size, (size_t)mult);
}
-
-inline unsigned int roundDn(unsigned size, unsigned mult)
-{
- return (unsigned int)roundDn((size_t)size, (size_t)mult);
-}
#endif // HOST_64BIT
inline unsigned int unsigned_abs(int x)
@@ -698,12 +643,6 @@ class Histogram
#endif // CALL_ARG_STATS || COUNT_BASIC_BLOCKS || COUNT_LOOPS || EMITTER_STATS || MEASURE_NODE_SIZE
-/*****************************************************************************/
-#ifdef ICECAP
-#include "icapexp.h"
-#include "icapctrl.h"
-#endif
-
/*****************************************************************************/
#include "error.h"
@@ -783,10 +722,6 @@ extern int jitNativeCode(CORINFO_METHOD_HANDLE methodHnd,
const size_t MAX_USHORT_SIZE_T = static_cast(static_cast(-1));
const size_t MAX_UNSIGNED_SIZE_T = static_cast(static_cast(-1));
-// These assume 2's complement...
-const int MAX_SHORT_AS_INT = 32767;
-const int MIN_SHORT_AS_INT = -32768;
-
/*****************************************************************************/
class Compiler;
diff --git a/src/coreclr/jit/jitconfigvalues.h b/src/coreclr/jit/jitconfigvalues.h
index 5d1cd06faaad9d..b1c9e522883209 100644
--- a/src/coreclr/jit/jitconfigvalues.h
+++ b/src/coreclr/jit/jitconfigvalues.h
@@ -26,10 +26,12 @@ CONFIG_INTEGER(DiffableDasm, W("JitDiffableDasm"), 0) // Make the disas
CONFIG_INTEGER(JitDasmWithAddress, W("JitDasmWithAddress"), 0) // Print the process address next to each instruction of
// the disassembly
CONFIG_INTEGER(DisplayLoopHoistStats, W("JitLoopHoistStats"), 0) // Display JIT loop hoisting statistics
-CONFIG_INTEGER(DisplayLsraStats, W("JitLsraStats"), 0) // Display JIT Linear Scan Register Allocator statistics
- // if set to 1. If set to "2", display the stats in csv format.
- // Recommended to use with JitStdOutFile flag.
-CONFIG_STRING(JitLsraOrdering, W("JitLsraOrdering")) // LSRA heuristics ordering
+CONFIG_INTEGER(DisplayLsraStats, W("JitLsraStats"), 0) // Display JIT Linear Scan Register Allocator statistics
+ // If set to "1", display the stats in textual format.
+ // If set to "2", display the stats in csv format.
+ // If set to "3", display the stats in summarize format.
+ // Recommended to use with JitStdOutFile flag.
+CONFIG_STRING(JitLsraOrdering, W("JitLsraOrdering")) // LSRA heuristics ordering
CONFIG_INTEGER(DumpJittedMethods, W("DumpJittedMethods"), 0) // Prints all jitted methods to the console
CONFIG_INTEGER(EnablePCRelAddr, W("JitEnablePCRelAddr"), 1) // Whether absolute addr be encoded as PC-rel offset by
// RyuJIT where possible
@@ -439,6 +441,8 @@ CONFIG_INTEGER(JitInlineDumpData, W("JitInlineDumpData"), 0)
CONFIG_INTEGER(JitInlineDumpXml, W("JitInlineDumpXml"), 0) // 1 = full xml (+ failures in DEBUG)
// 2 = only methods with inlines (+ failures in DEBUG)
// 3 = only methods with inlines, no failures
+CONFIG_STRING(JitInlineDumpXmlFile, W("JitInlineDumpXmlFile"))
+CONFIG_INTEGER(JitInlinePolicyDumpXml, W("JitInlinePolicyDumpXml"), 0)
CONFIG_INTEGER(JitInlineLimit, W("JitInlineLimit"), -1)
CONFIG_INTEGER(JitInlinePolicyDiscretionary, W("JitInlinePolicyDiscretionary"), 0)
CONFIG_INTEGER(JitInlinePolicyFull, W("JitInlinePolicyFull"), 0)
diff --git a/src/coreclr/jit/jiteh.cpp b/src/coreclr/jit/jiteh.cpp
index f0adcfce6be185..da431dd82e2de9 100644
--- a/src/coreclr/jit/jiteh.cpp
+++ b/src/coreclr/jit/jiteh.cpp
@@ -870,10 +870,7 @@ bool Compiler::ehCanDeleteEmptyBlock(BasicBlock* block)
*/
void Compiler::ehUpdateLastBlocks(BasicBlock* oldLast, BasicBlock* newLast)
{
- EHblkDsc* HBtab;
- EHblkDsc* HBtabEnd;
-
- for (HBtab = compHndBBtab, HBtabEnd = compHndBBtab + compHndBBtabCount; HBtab < HBtabEnd; HBtab++)
+ for (EHblkDsc* const HBtab : EHClauses(this))
{
if (HBtab->ebdTryLast == oldLast)
{
@@ -1004,11 +1001,9 @@ bool Compiler::ehAnyFunclets()
unsigned Compiler::ehFuncletCount()
{
- unsigned funcletCnt = 0;
- EHblkDsc* HBtab;
- EHblkDsc* HBtabEnd;
+ unsigned funcletCnt = 0;
- for (HBtab = compHndBBtab, HBtabEnd = compHndBBtab + compHndBBtabCount; HBtab < HBtabEnd; HBtab++)
+ for (EHblkDsc* const HBtab : EHClauses(this))
{
if (HBtab->HasFilter())
{
@@ -1420,9 +1415,7 @@ void Compiler::fgRemoveEHTableEntry(unsigned XTnum)
HBtab = compHndBBtab + XTnum;
- EHblkDsc* xtabEnd;
- EHblkDsc* xtab;
- for (xtab = compHndBBtab, xtabEnd = compHndBBtab + compHndBBtabCount; xtab < xtabEnd; xtab++)
+ for (EHblkDsc* const xtab : EHClauses(this))
{
if ((xtab != HBtab) && (xtab->ebdEnclosingTryIndex != EHblkDsc::NO_ENCLOSING_INDEX) &&
(xtab->ebdEnclosingTryIndex >= XTnum))
@@ -1457,7 +1450,7 @@ void Compiler::fgRemoveEHTableEntry(unsigned XTnum)
/* We need to update all of the blocks' bbTryIndex */
- for (BasicBlock* blk = fgFirstBB; blk; blk = blk->bbNext)
+ for (BasicBlock* const blk : Blocks())
{
if (blk->hasTryIndex())
{
@@ -1518,9 +1511,7 @@ EHblkDsc* Compiler::fgAddEHTableEntry(unsigned XTnum)
{
// Update all enclosing links that will get invalidated by inserting an entry at 'XTnum'
- EHblkDsc* xtabEnd;
- EHblkDsc* xtab;
- for (xtab = compHndBBtab, xtabEnd = compHndBBtab + compHndBBtabCount; xtab < xtabEnd; xtab++)
+ for (EHblkDsc* const xtab : EHClauses(this))
{
if ((xtab->ebdEnclosingTryIndex != EHblkDsc::NO_ENCLOSING_INDEX) && (xtab->ebdEnclosingTryIndex >= XTnum))
{
@@ -1536,7 +1527,7 @@ EHblkDsc* Compiler::fgAddEHTableEntry(unsigned XTnum)
// We need to update the BasicBlock bbTryIndex and bbHndIndex field for all blocks
- for (BasicBlock* blk = fgFirstBB; blk; blk = blk->bbNext)
+ for (BasicBlock* const blk : Blocks())
{
if (blk->hasTryIndex() && (blk->getTryIndex() >= XTnum))
{
@@ -1762,8 +1753,7 @@ void Compiler::fgRemoveEH()
#ifdef DEBUG
// Make sure none of the remaining blocks have any EH.
- BasicBlock* blk;
- foreach_block(this, blk)
+ for (BasicBlock* const blk : Blocks())
{
assert(!blk->hasTryIndex());
assert(!blk->hasHndIndex());
@@ -3124,9 +3114,8 @@ void Compiler::fgVerifyHandlerTab()
unsigned* blockNumMap = (unsigned*)_alloca(blockNumBytes);
memset(blockNumMap, 0, blockNumBytes);
- BasicBlock* block;
- unsigned newBBnum = 1;
- for (block = fgFirstBB; block != nullptr; block = block->bbNext)
+ unsigned newBBnum = 1;
+ for (BasicBlock* const block : Blocks())
{
assert((block->bbFlags & BBF_REMOVED) == 0);
assert(1 <= block->bbNum && block->bbNum <= bbNumMax);
@@ -3151,15 +3140,12 @@ void Compiler::fgVerifyHandlerTab()
#endif
// To verify that bbCatchTyp is set properly on all blocks, and that some BBF_* flags are only set on the first
- // block
- // of 'try' or handlers, create two bool arrays indexed by block number: one for the set of blocks that are the
- // beginning
- // blocks of 'try' regions, and one for blocks that are the beginning of handlers (including filters). Note that
- // since
- // this checking function runs before EH normalization, we have to handle the case where blocks can be both the
- // beginning
- // of a 'try' as well as the beginning of a handler. After we've iterated over the EH table, loop
- // over all blocks and verify that only handler begin blocks have bbCatchTyp == BBCT_NONE, and some other things.
+ // block of 'try' or handlers, create two bool arrays indexed by block number: one for the set of blocks that
+ // are the beginning blocks of 'try' regions, and one for blocks that are the beginning of handlers (including
+ // filters). Note that since this checking function runs before EH normalization, we have to handle the case
+ // where blocks can be both the beginning of a 'try' as well as the beginning of a handler. After we've iterated
+ // over the EH table, loop over all blocks and verify that only handler begin blocks have bbCatchTyp == BBCT_NONE,
+ // and some other things.
size_t blockBoolSetBytes = (bbNumMax + 1) * sizeof(bool);
bool* blockTryBegSet = (bool*)_alloca(blockBoolSetBytes);
@@ -3495,6 +3481,8 @@ void Compiler::fgVerifyHandlerTab()
// otherwise set. The duplicate clause handler is truly a duplicate of
// a previously processed handler, so we ignore it.
+ BasicBlock* block;
+
size_t blockIndexBytes = (bbNumMax + 1) * sizeof(unsigned short);
unsigned short* blockTryIndex = (unsigned short*)_alloca(blockIndexBytes);
unsigned short* blockHndIndex = (unsigned short*)_alloca(blockIndexBytes);
@@ -3556,7 +3544,7 @@ void Compiler::fgVerifyHandlerTab()
#endif // FEATURE_EH_FUNCLETS
// Make sure that all blocks have the right index, including those blocks that should have zero (no EH region).
- for (block = fgFirstBB; block != nullptr; block = block->bbNext)
+ for (BasicBlock* const block : Blocks())
{
assert(block->bbTryIndex == blockTryIndex[block->bbNum]);
assert(block->bbHndIndex == blockHndIndex[block->bbNum]);
@@ -4088,12 +4076,12 @@ void Compiler::fgClearFinallyTargetBit(BasicBlock* block)
assert(fgComputePredsDone);
assert((block->bbFlags & BBF_FINALLY_TARGET) != 0);
- for (flowList* pred = block->bbPreds; pred; pred = pred->flNext)
+ for (BasicBlock* const predBlock : block->PredBlocks())
{
- if (pred->getBlock()->bbJumpKind == BBJ_ALWAYS && pred->getBlock()->bbJumpDest == block)
+ if (predBlock->bbJumpKind == BBJ_ALWAYS && predBlock->bbJumpDest == block)
{
- BasicBlock* pPrev = pred->getBlock()->bbPrev;
- if (pPrev != NULL)
+ BasicBlock* pPrev = predBlock->bbPrev;
+ if (pPrev != nullptr)
{
if (pPrev->bbJumpKind == BBJ_CALLFINALLY)
{
@@ -4241,12 +4229,8 @@ bool Compiler::fgAnyIntraHandlerPreds(BasicBlock* block)
assert(block->hasHndIndex());
assert(fgFirstBlockOfHandler(block) == block); // this block is the first block of a handler
- flowList* pred;
-
- for (pred = block->bbPreds; pred; pred = pred->flNext)
+ for (BasicBlock* const predBlock : block->PredBlocks())
{
- BasicBlock* predBlock = pred->getBlock();
-
if (fgIsIntraHandlerPred(predBlock, block))
{
// We have a predecessor that is not from our try region
@@ -4395,10 +4379,7 @@ void Compiler::fgExtendEHRegionBefore(BasicBlock* block)
bPrev->bbCatchTyp = block->bbCatchTyp;
block->bbCatchTyp = BBCT_NONE;
- EHblkDsc* HBtab;
- EHblkDsc* HBtabEnd;
-
- for (HBtab = compHndBBtab, HBtabEnd = compHndBBtab + compHndBBtabCount; HBtab < HBtabEnd; HBtab++)
+ for (EHblkDsc* const HBtab : EHClauses(this))
{
/* Multiple pointers in EHblkDsc can point to same block. We can not early out after the first match. */
if (HBtab->ebdTryBeg == block)
diff --git a/src/coreclr/jit/lclmorph.cpp b/src/coreclr/jit/lclmorph.cpp
index bfa47db004ab6a..349c5ef88e542f 100644
--- a/src/coreclr/jit/lclmorph.cpp
+++ b/src/coreclr/jit/lclmorph.cpp
@@ -1212,12 +1212,12 @@ void Compiler::fgMarkAddressExposedLocals()
LocalAddressVisitor visitor(this);
- for (BasicBlock* block = fgFirstBB; block != nullptr; block = block->bbNext)
+ for (BasicBlock* const block : Blocks())
{
// Make the current basic block address available globally
compCurBB = block;
- for (Statement* stmt : block->Statements())
+ for (Statement* const stmt : block->Statements())
{
visitor.VisitStmt(stmt);
}
diff --git a/src/coreclr/jit/lclvars.cpp b/src/coreclr/jit/lclvars.cpp
index e517df3bde5ea9..9f1c01042a4b8a 100644
--- a/src/coreclr/jit/lclvars.cpp
+++ b/src/coreclr/jit/lclvars.cpp
@@ -4246,7 +4246,7 @@ void Compiler::lvaMarkLocalVars(BasicBlock* block, bool isRecompute)
JITDUMP("\n*** %s local variables in block " FMT_BB " (weight=%s)\n", isRecompute ? "recomputing" : "marking",
block->bbNum, refCntWtd2str(block->getBBWeight(this)));
- for (Statement* stmt : StatementList(block->FirstNonPhiDef()))
+ for (Statement* const stmt : block->NonPhiStatements())
{
MarkLocalVarsVisitor visitor(this, block, stmt, isRecompute);
DISPSTMT(stmt);
@@ -4514,7 +4514,7 @@ void Compiler::lvaComputeRefCounts(bool isRecompute, bool setSlotNumbers)
JITDUMP("\n*** lvaComputeRefCounts -- explicit counts ***\n");
// Second, account for all explicit local variable references
- for (BasicBlock* block = fgFirstBB; block; block = block->bbNext)
+ for (BasicBlock* const block : Blocks())
{
if (block->IsLIR())
{
@@ -6273,15 +6273,25 @@ void Compiler::lvaAssignVirtualFrameOffsetsToLocals()
//
// Inlining done under OSR may introduce new reporting, in which case the OSR frame
// must allocate a slot.
- if (!opts.IsOSR() && lvaReportParamTypeArg())
+ if (lvaReportParamTypeArg())
{
#ifdef JIT32_GCENCODER
noway_assert(codeGen->isFramePointerUsed());
#endif
- // For CORINFO_CALLCONV_PARAMTYPE (if needed)
- lvaIncrementFrameSize(TARGET_POINTER_SIZE);
- stkOffs -= TARGET_POINTER_SIZE;
- lvaCachedGenericContextArgOffs = stkOffs;
+ if (opts.IsOSR())
+ {
+ PatchpointInfo* ppInfo = info.compPatchpointInfo;
+ assert(ppInfo->HasGenericContextArgOffset());
+ const int originalOffset = ppInfo->GenericContextArgOffset();
+ lvaCachedGenericContextArgOffs = originalFrameStkOffs + originalOffset;
+ }
+ else
+ {
+ // For CORINFO_CALLCONV_PARAMTYPE (if needed)
+ lvaIncrementFrameSize(TARGET_POINTER_SIZE);
+ stkOffs -= TARGET_POINTER_SIZE;
+ lvaCachedGenericContextArgOffs = stkOffs;
+ }
}
#ifndef JIT32_GCENCODER
else if (lvaKeepAliveAndReportThis())
@@ -6292,7 +6302,7 @@ void Compiler::lvaAssignVirtualFrameOffsetsToLocals()
PatchpointInfo* ppInfo = info.compPatchpointInfo;
if (ppInfo->HasKeptAliveThis())
{
- int originalOffset = ppInfo->KeptAliveThisOffset();
+ const int originalOffset = ppInfo->KeptAliveThisOffset();
lvaCachedGenericContextArgOffs = originalFrameStkOffs + originalOffset;
canUseExistingSlot = true;
}
@@ -7753,25 +7763,24 @@ int Compiler::lvaGetCallerSPRelativeOffset(unsigned varNum)
return lvaToCallerSPRelativeOffset(varDsc->GetStackOffset(), varDsc->lvFramePointerBased);
}
-int Compiler::lvaToCallerSPRelativeOffset(int offset, bool isFpBased) const
+//-----------------------------------------------------------------------------
+// lvaToCallerSPRelativeOffset: translate a frame offset into an offset from
+// the caller's stack pointer.
+//
+// Arguments:
+// offset - frame offset
+// isFpBase - if true, offset is from FP, otherwise offset is from SP
+// forRootFrame - if the current method is an OSR method, adjust the offset
+// to be relative to the SP for the root method, instead of being relative
+// to the SP for the OSR method.
+//
+// Returins:
+// suitable offset
+//
+int Compiler::lvaToCallerSPRelativeOffset(int offset, bool isFpBased, bool forRootFrame) const
{
assert(lvaDoneFrameLayout == FINAL_FRAME_LAYOUT);
- // TODO-Cleanup
- //
- // This current should not be called for OSR as caller SP relative
- // offsets computed below do not reflect the extra stack space
- // taken up by the original method frame.
- //
- // We should make it work.
- //
- // Instead we record the needed offsets in the patchpoint info
- // when doing the original method compile(see special offsets
- // in generatePatchpointInfo) and consume those values in the OSR
- // compile. If we fix this we may be able to reduce the size
- // of the patchpoint info and have less special casing for these
- // frame slots.
-
if (isFpBased)
{
offset += codeGen->genCallerSPtoFPdelta();
@@ -7781,6 +7790,30 @@ int Compiler::lvaToCallerSPRelativeOffset(int offset, bool isFpBased) const
offset += codeGen->genCallerSPtoInitialSPdelta();
}
+#ifdef TARGET_AMD64
+ if (forRootFrame && opts.IsOSR())
+ {
+ // The offset computed above already includes the OSR frame adjustment, plus the
+ // pop of the "pseudo return address" from the OSR frame.
+ //
+ // To get to root method caller-SP, we need to subtract off the original frame
+ // size and the pushed return address and RBP for that frame (which we know is an
+ // RPB frame).
+ //
+ // ppInfo's FpToSpDelta also accounts for the popped pseudo return address
+ // between the original method frame and the OSR frame. So the net adjustment
+ // is simply FpToSpDelta plus one register.
+ //
+
+ const PatchpointInfo* const ppInfo = info.compPatchpointInfo;
+ const int adjustment = ppInfo->FpToSpDelta() + REGSIZE_BYTES;
+ offset -= adjustment;
+ }
+#else
+ // OSR NYI for other targets.
+ assert(!opts.IsOSR());
+#endif
+
return offset;
}
diff --git a/src/coreclr/jit/lir.cpp b/src/coreclr/jit/lir.cpp
index b34b35f8a9c367..db941e7638dcf6 100644
--- a/src/coreclr/jit/lir.cpp
+++ b/src/coreclr/jit/lir.cpp
@@ -1583,6 +1583,11 @@ LIR::Range& LIR::AsRange(BasicBlock* block)
return *static_cast(block);
}
+const LIR::Range& LIR::AsRange(const BasicBlock* block)
+{
+ return *static_cast(block);
+}
+
//------------------------------------------------------------------------
// LIR::EmptyRange: Constructs and returns an empty range.
//
diff --git a/src/coreclr/jit/lir.h b/src/coreclr/jit/lir.h
index a700bb16728558..f205527df5c716 100644
--- a/src/coreclr/jit/lir.h
+++ b/src/coreclr/jit/lir.h
@@ -298,6 +298,7 @@ class LIR final
public:
static Range& AsRange(BasicBlock* block);
+ static const LIR::Range& LIR::AsRange(const BasicBlock* block);
static Range EmptyRange();
static Range SeqTree(Compiler* compiler, GenTree* tree);
diff --git a/src/coreclr/jit/liveness.cpp b/src/coreclr/jit/liveness.cpp
index 20366bd8f1b5e2..745cbb9d1a9af3 100644
--- a/src/coreclr/jit/liveness.cpp
+++ b/src/coreclr/jit/liveness.cpp
@@ -508,10 +508,10 @@ void Compiler::fgPerBlockLocalVarLiveness()
}
else
{
- for (Statement* stmt : StatementList(block->FirstNonPhiDef()))
+ for (Statement* const stmt : block->NonPhiStatements())
{
compCurStmt = stmt;
- for (GenTree* node = stmt->GetTreeList(); node != nullptr; node = node->gtNext)
+ for (GenTree* const node : stmt->TreeList())
{
fgPerNodeLocalVarLiveness(node);
}
@@ -680,8 +680,7 @@ void Compiler::fgDispDebugScopes()
{
printf("\nDebug scopes:\n");
- BasicBlock* block;
- for (block = fgFirstBB; block; block = block->bbNext)
+ for (BasicBlock* const block : Blocks())
{
printf(FMT_BB ": ", block->bbNum);
dumpConvertedVarSet(this, block->bbScope);
@@ -719,7 +718,7 @@ void Compiler::fgExtendDbgScopes()
// Mark all tracked LocalVars live over their scope - walk the blocks
// keeping track of the current life, and assign it to the blocks.
- for (BasicBlock* block = fgFirstBB; block; block = block->bbNext)
+ for (BasicBlock* const block : Blocks())
{
// If we get to a funclet, reset the scope lists and start again, since the block
// offsets will be out of order compared to the previous block.
@@ -772,8 +771,7 @@ void Compiler::fgExtendDbgScopes()
// Mark all tracked LocalVars live over their scope - walk the blocks
// keeping track of the current life, and assign it to the blocks.
- BasicBlock* block;
- for (block = fgFirstBB; block; block = block->bbNext)
+ for (BasicBlock* const block : Blocks())
{
// Find scopes becoming alive. If there is a gap in the instr
// sequence, we need to process any scopes on those missing offsets.
@@ -915,7 +913,7 @@ void Compiler::fgExtendDbgLifetimes()
VARSET_TP initVars(VarSetOps::MakeEmpty(this)); // Vars which are artificially made alive
- for (BasicBlock* block = fgFirstBB; block; block = block->bbNext)
+ for (BasicBlock* const block : Blocks())
{
VarSetOps::ClearD(this, initVars);
@@ -949,19 +947,11 @@ void Compiler::fgExtendDbgLifetimes()
break;
case BBJ_SWITCH:
- {
- BasicBlock** jmpTab;
- unsigned jmpCnt;
-
- jmpCnt = block->bbJumpSwt->bbsCount;
- jmpTab = block->bbJumpSwt->bbsDstTab;
-
- do
+ for (BasicBlock* const bTarget : block->SwitchTargets())
{
- VarSetOps::UnionD(this, initVars, (*jmpTab)->bbScope);
- } while (++jmpTab, --jmpCnt);
- }
- break;
+ VarSetOps::UnionD(this, initVars, bTarget->bbScope);
+ }
+ break;
case BBJ_EHFINALLYRET:
case BBJ_RETURN:
@@ -2559,12 +2549,11 @@ void Compiler::fgInterBlockLocalVarLiveness()
// Variables involved in exception-handlers and finally blocks need
// to be specially marked
//
- BasicBlock* block;
VARSET_TP exceptVars(VarSetOps::MakeEmpty(this)); // vars live on entry to a handler
VARSET_TP finallyVars(VarSetOps::MakeEmpty(this)); // vars live on exit of a 'finally' block
- for (block = fgFirstBB; block; block = block->bbNext)
+ for (BasicBlock* const block : Blocks())
{
if (block->hasEHBoundaryIn())
{
@@ -2641,7 +2630,7 @@ void Compiler::fgInterBlockLocalVarLiveness()
* Now fill in liveness info within each basic block - Backward DataFlow
*/
- for (block = fgFirstBB; block; block = block->bbNext)
+ for (BasicBlock* const block : Blocks())
{
/* Tell everyone what block we're working on */
@@ -2781,7 +2770,7 @@ void Compiler::fgDispBBLiveness(BasicBlock* block)
void Compiler::fgDispBBLiveness()
{
- for (BasicBlock* block = fgFirstBB; block; block = block->bbNext)
+ for (BasicBlock* const block : Blocks())
{
fgDispBBLiveness(block);
}
diff --git a/src/coreclr/jit/loopcloning.cpp b/src/coreclr/jit/loopcloning.cpp
index d9798d98e09a2a..0fe22420f783e4 100644
--- a/src/coreclr/jit/loopcloning.cpp
+++ b/src/coreclr/jit/loopcloning.cpp
@@ -1314,9 +1314,8 @@ bool Compiler::optIsLoopClonable(unsigned loopInd)
// for the cloned loop (and its embedded EH regions).
//
// Also, count the number of return blocks within the loop for future use.
- BasicBlock* stopAt = loop.lpBottom->bbNext;
- unsigned loopRetCount = 0;
- for (BasicBlock* blk = loop.lpFirst; blk != stopAt; blk = blk->bbNext)
+ unsigned loopRetCount = 0;
+ for (BasicBlock* const blk : loop.LoopBlocks())
{
if (blk->bbJumpKind == BBJ_RETURN)
{
@@ -1570,10 +1569,8 @@ void Compiler::optEnsureUniqueHead(unsigned loopInd, BasicBlock::weight_t ambien
BlockToBlockMap* blockMap = new (getAllocator(CMK_LoopClone)) BlockToBlockMap(getAllocator(CMK_LoopClone));
blockMap->Set(e, h2);
- for (flowList* predEntry = e->bbPreds; predEntry != nullptr; predEntry = predEntry->flNext)
+ for (BasicBlock* const predBlock : e->PredBlocks())
{
- BasicBlock* predBlock = predEntry->getBlock();
-
// Skip if predBlock is in the loop.
if (t->bbNum <= predBlock->bbNum && predBlock->bbNum <= b->bbNum)
{
@@ -1749,7 +1746,7 @@ void Compiler::optCloneLoop(unsigned loopInd, LoopCloneContext* context)
BasicBlock* newFirst = nullptr;
BlockToBlockMap* blockMap = new (getAllocator(CMK_LoopClone)) BlockToBlockMap(getAllocator(CMK_LoopClone));
- for (BasicBlock* blk = loop.lpFirst; blk != loop.lpBottom->bbNext; blk = blk->bbNext)
+ for (BasicBlock* const blk : loop.LoopBlocks())
{
BasicBlock* newBlk = fgNewBBafter(blk->bbJumpKind, newPred, /*extendRegion*/ true);
JITDUMP("Adding " FMT_BB " (copy of " FMT_BB ") after " FMT_BB "\n", newBlk->bbNum, blk->bbNum, newPred->bbNum);
@@ -1798,7 +1795,7 @@ void Compiler::optCloneLoop(unsigned loopInd, LoopCloneContext* context)
// Now go through the new blocks, remapping their jump targets within the loop
// and updating the preds lists.
- for (BasicBlock* blk = loop.lpFirst; blk != loop.lpBottom->bbNext; blk = blk->bbNext)
+ for (BasicBlock* const blk : loop.LoopBlocks())
{
BasicBlock* newblk = nullptr;
bool b = blockMap->Lookup(blk, &newblk);
@@ -1830,14 +1827,11 @@ void Compiler::optCloneLoop(unsigned loopInd, LoopCloneContext* context)
break;
case BBJ_SWITCH:
- {
- for (unsigned i = 0; i < newblk->bbJumpSwt->bbsCount; i++)
+ for (BasicBlock* const switchDest : newblk->SwitchTargets())
{
- BasicBlock* switchDest = newblk->bbJumpSwt->bbsDstTab[i];
fgAddRefPred(switchDest, newblk);
}
- }
- break;
+ break;
default:
break;
@@ -1847,15 +1841,15 @@ void Compiler::optCloneLoop(unsigned loopInd, LoopCloneContext* context)
#ifdef DEBUG
// Display the preds for the new blocks, after all the new blocks have been redirected.
JITDUMP("Preds after loop copy:\n");
- for (BasicBlock* blk = loop.lpFirst; blk != loop.lpBottom->bbNext; blk = blk->bbNext)
+ for (BasicBlock* const blk : loop.LoopBlocks())
{
BasicBlock* newblk = nullptr;
bool b = blockMap->Lookup(blk, &newblk);
assert(b && newblk != nullptr);
JITDUMP(FMT_BB ":", newblk->bbNum);
- for (flowList* pred = newblk->bbPreds; pred != nullptr; pred = pred->flNext)
+ for (BasicBlock* const predBlock : newblk->PredBlocks())
{
- JITDUMP(" " FMT_BB, pred->getBlock()->bbNum);
+ JITDUMP(" " FMT_BB, predBlock->bbNum);
}
JITDUMP("\n");
}
@@ -2311,21 +2305,15 @@ Compiler::fgWalkResult Compiler::optCanOptimizeByLoopCloningVisitor(GenTree** pT
//
bool Compiler::optIdentifyLoopOptInfo(unsigned loopNum, LoopCloneContext* context)
{
- noway_assert(loopNum < optLoopCount);
-
- LoopDsc* pLoop = &optLoopTable[loopNum];
-
- BasicBlock* head = pLoop->lpHead;
- BasicBlock* beg = head->bbNext; // should this be pLoop->lpFirst or pLoop->lpTop instead?
- BasicBlock* end = pLoop->lpBottom;
+ JITDUMP("Checking loop " FMT_LP " for optimization candidates\n", loopNum);
- JITDUMP("Checking blocks " FMT_BB ".." FMT_BB " for optimization candidates\n", beg->bbNum, end->bbNum);
+ const LoopDsc& loop = optLoopTable[loopNum];
LoopCloneVisitorInfo info(context, loopNum, nullptr);
- for (BasicBlock* block = beg; block != end->bbNext; block = block->bbNext)
+ for (BasicBlock* const block : loop.LoopBlocks())
{
compCurBB = block;
- for (Statement* stmt : block->Statements())
+ for (Statement* const stmt : block->Statements())
{
info.stmt = stmt;
const bool lclVarsOnly = false;
diff --git a/src/coreclr/jit/lower.cpp b/src/coreclr/jit/lower.cpp
index 559daa9e268078..389c29f7643056 100644
--- a/src/coreclr/jit/lower.cpp
+++ b/src/coreclr/jit/lower.cpp
@@ -5888,7 +5888,7 @@ PhaseStatus Lowering::DoPhase()
}
#endif // !defined(TARGET_64BIT)
- for (BasicBlock* block = comp->fgFirstBB; block; block = block->bbNext)
+ for (BasicBlock* const block : comp->Blocks())
{
/* Make the block publicly available */
comp->compCurBB = block;
@@ -6210,17 +6210,22 @@ bool Lowering::IndirsAreEquivalent(GenTree* candidate, GenTree* storeInd)
}
}
-/** Test whether the two given nodes are the same leaves.
- * Right now, only constant integers and local variables are supported
- */
+//------------------------------------------------------------------------
+// NodesAreEquivalentLeaves: Check whether the two given nodes are the same leaves.
+//
+// Arguments:
+// tree1 and tree2 are nodes to be checked.
+// Return Value:
+// Returns true if they are same leaves, false otherwise.
+//
+// static
bool Lowering::NodesAreEquivalentLeaves(GenTree* tree1, GenTree* tree2)
{
- if (tree1 == nullptr && tree2 == nullptr)
+ if (tree1 == tree2)
{
return true;
}
- // both null, they are equivalent, otherwise if either is null not equivalent
if (tree1 == nullptr || tree2 == nullptr)
{
return false;
@@ -6247,7 +6252,7 @@ bool Lowering::NodesAreEquivalentLeaves(GenTree* tree1, GenTree* tree2)
switch (tree1->OperGet())
{
case GT_CNS_INT:
- return tree1->AsIntCon()->gtIconVal == tree2->AsIntCon()->gtIconVal &&
+ return tree1->AsIntCon()->IconValue() == tree2->AsIntCon()->IconValue() &&
tree1->IsIconHandle() == tree2->IsIconHandle();
case GT_LCL_VAR:
case GT_LCL_VAR_ADDR:
diff --git a/src/coreclr/jit/lowerxarch.cpp b/src/coreclr/jit/lowerxarch.cpp
index 4d4253006047cd..55bfab94f6f5f8 100644
--- a/src/coreclr/jit/lowerxarch.cpp
+++ b/src/coreclr/jit/lowerxarch.cpp
@@ -216,11 +216,18 @@ void Lowering::LowerBlockStore(GenTreeBlk* blkNode)
if (fill == 0)
{
- // If the size is multiple of XMM register size there's no need to load 0 in a GPR,
- // codegen will use xorps to generate 0 directly in the temporary XMM register.
- if ((size % XMM_REGSIZE_BYTES) == 0)
+ if (size >= XMM_REGSIZE_BYTES)
{
- src->SetContained();
+ const bool canUse16BytesSimdMov = !blkNode->IsOnHeapAndContainsReferences();
+#ifdef TARGET_AMD64
+ const bool willUseOnlySimdMov = canUse16BytesSimdMov && (size % 16 == 0);
+#else
+ const bool willUseOnlySimdMov = (size % 8 == 0);
+#endif
+ if (willUseOnlySimdMov)
+ {
+ src->SetContained();
+ }
}
}
#ifdef TARGET_AMD64
diff --git a/src/coreclr/jit/lsra.cpp b/src/coreclr/jit/lsra.cpp
index 0006d26451f07b..e137b7f678a0d7 100644
--- a/src/coreclr/jit/lsra.cpp
+++ b/src/coreclr/jit/lsra.cpp
@@ -831,8 +831,10 @@ void LinearScan::setBlockSequence()
}
#endif // TRACK_LSRA_STATS
+ JITDUMP("Start LSRA Block Sequence: \n");
for (BasicBlock* block = compiler->fgFirstBB; block != nullptr; block = nextBlock)
{
+ JITDUMP("Current block: " FMT_BB "\n", block->bbNum);
blockSequence[bbSeqCount] = block;
markBlockVisited(block);
bbSeqCount++;
@@ -866,9 +868,8 @@ void LinearScan::setBlockSequence()
}
bool hasUniquePred = (block->GetUniquePred(compiler) != nullptr);
- for (flowList* pred = block->bbPreds; pred != nullptr; pred = pred->flNext)
+ for (BasicBlock* const predBlock : block->PredBlocks())
{
- BasicBlock* predBlock = pred->getBlock();
if (!hasUniquePred)
{
if (predBlock->NumSucc(compiler) > 1)
@@ -903,13 +904,13 @@ void LinearScan::setBlockSequence()
// First, update the NORMAL successors of the current block, adding them to the worklist
// according to the desired order. We will handle the EH successors below.
- bool checkForCriticalOutEdge = (block->NumSucc(compiler) > 1);
+ const unsigned numSuccs = block->NumSucc(compiler);
+ bool checkForCriticalOutEdge = (numSuccs > 1);
if (!checkForCriticalOutEdge && block->bbJumpKind == BBJ_SWITCH)
{
assert(!"Switch with single successor");
}
- const unsigned numSuccs = block->NumSucc(compiler);
for (unsigned succIndex = 0; succIndex < numSuccs; succIndex++)
{
BasicBlock* succ = block->GetSucc(succIndex, compiler);
@@ -931,6 +932,7 @@ void LinearScan::setBlockSequence()
// (i.e. pred-first or random, since layout order is handled above).
if (!BlockSetOps::IsMember(compiler, readySet, succ->bbNum))
{
+ JITDUMP("\tSucc block: " FMT_BB, succ->bbNum);
addToBlockSequenceWorkList(readySet, succ, predSet);
BlockSetOps::AddElemD(compiler, readySet, succ->bbNum);
}
@@ -954,27 +956,18 @@ void LinearScan::setBlockSequence()
// If we don't encounter all blocks by traversing the regular successor links, do a full
// traversal of all the blocks, and add them in layout order.
// This may include:
- // - internal-only blocks (in the fgAddCodeList) which may not be in the flow graph
- // (these are not even in the bbNext links).
+ // - internal-only blocks which may not be in the flow graph
// - blocks that have become unreachable due to optimizations, but that are strongly
// connected (these are not removed)
// - EH blocks
- for (Compiler::AddCodeDsc* desc = compiler->fgAddCodeList; desc != nullptr; desc = desc->acdNext)
+ for (BasicBlock* const seqBlock : compiler->Blocks())
{
- if (!isBlockVisited(block))
+ if (!isBlockVisited(seqBlock))
{
- addToBlockSequenceWorkList(readySet, block, predSet);
- BlockSetOps::AddElemD(compiler, readySet, block->bbNum);
- }
- }
-
- for (BasicBlock* block = compiler->fgFirstBB; block; block = block->bbNext)
- {
- if (!isBlockVisited(block))
- {
- addToBlockSequenceWorkList(readySet, block, predSet);
- BlockSetOps::AddElemD(compiler, readySet, block->bbNum);
+ JITDUMP("\tUnvisited block: " FMT_BB, seqBlock->bbNum);
+ addToBlockSequenceWorkList(readySet, seqBlock, predSet);
+ BlockSetOps::AddElemD(compiler, readySet, seqBlock->bbNum);
}
}
verifiedAllBBs = true;
@@ -989,25 +982,17 @@ void LinearScan::setBlockSequence()
#ifdef DEBUG
// Make sure that we've visited all the blocks.
- for (BasicBlock* block = compiler->fgFirstBB; block != nullptr; block = block->bbNext)
+ for (BasicBlock* const block : compiler->Blocks())
{
assert(isBlockVisited(block));
}
- JITDUMP("LSRA Block Sequence: ");
+ JITDUMP("Final LSRA Block Sequence: \n");
int i = 1;
for (BasicBlock *block = startBlockSequence(); block != nullptr; ++i, block = moveToNextBlock())
{
JITDUMP(FMT_BB, block->bbNum);
-
- if (block->isMaxBBWeight())
- {
- JITDUMP("(MAX) ");
- }
- else
- {
- JITDUMP("(%6s) ", refCntWtd2str(block->getBBWeight(compiler)));
- }
+ JITDUMP("(%6s) ", refCntWtd2str(block->getBBWeight(compiler)));
if (blockInfo[block->bbNum].hasEHBoundaryIn)
{
@@ -1111,18 +1096,17 @@ void LinearScan::addToBlockSequenceWorkList(BlockSet sequencedBlockSet, BasicBlo
// Get predSet of block
BlockSetOps::ClearD(compiler, predSet);
- flowList* pred;
- for (pred = block->bbPreds; pred != nullptr; pred = pred->flNext)
+ for (BasicBlock* const predBlock : block->PredBlocks())
{
- BlockSetOps::AddElemD(compiler, predSet, pred->getBlock()->bbNum);
+ BlockSetOps::AddElemD(compiler, predSet, predBlock->bbNum);
}
// If either a rarely run block or all its preds are already sequenced, use block's weight to sequence
bool useBlockWeight = block->isRunRarely() || BlockSetOps::IsSubset(compiler, sequencedBlockSet, predSet);
+ JITDUMP(", Criteria: %s", useBlockWeight ? "weight" : "bbNum");
BasicBlockList* prevNode = nullptr;
BasicBlockList* nextNode = blockSequenceWorkList;
-
while (nextNode != nullptr)
{
int seqResult;
@@ -1160,6 +1144,17 @@ void LinearScan::addToBlockSequenceWorkList(BlockSet sequencedBlockSet, BasicBlo
{
prevNode->next = newListNode;
}
+
+#ifdef DEBUG
+ nextNode = blockSequenceWorkList;
+ JITDUMP(", Worklist: [");
+ while (nextNode != nullptr)
+ {
+ JITDUMP(FMT_BB " ", nextNode->block->bbNum);
+ nextNode = nextNode->next;
+ }
+ JITDUMP("]\n");
+#endif
}
void LinearScan::removeFromBlockSequenceWorkList(BasicBlockList* listNode, BasicBlockList* prevNode)
@@ -1397,9 +1392,7 @@ void Interval::setLocalNumber(Compiler* compiler, unsigned lclNum, LinearScan* l
//
void LinearScan::identifyCandidatesExceptionDataflow()
{
- BasicBlock* block;
-
- foreach_block(compiler, block)
+ for (BasicBlock* const block : compiler->Blocks())
{
if (block->hasEHBoundaryIn())
{
@@ -2415,9 +2408,8 @@ BasicBlock* LinearScan::findPredBlockForLiveIn(BasicBlock* block,
}
else
{
- for (flowList* pred = otherBlock->bbPreds; pred != nullptr; pred = pred->flNext)
+ for (BasicBlock* const otherPred : otherBlock->PredBlocks())
{
- BasicBlock* otherPred = pred->getBlock();
if (otherPred->bbNum == blockInfo[otherBlock->bbNum].predBBNum)
{
predBlock = otherPred;
@@ -2435,10 +2427,8 @@ BasicBlock* LinearScan::findPredBlockForLiveIn(BasicBlock* block,
}
else
{
- for (flowList* pred = block->bbPreds; pred != nullptr; pred = pred->flNext)
+ for (BasicBlock* const candidatePredBlock : block->PredBlocks())
{
- BasicBlock* candidatePredBlock = pred->getBlock();
-
if (isBlockVisited(candidatePredBlock))
{
if ((predBlock == nullptr) || (predBlock->bbWeight < candidatePredBlock->bbWeight))
@@ -6945,7 +6935,7 @@ void LinearScan::resolveRegisters()
printf("Has %sCritical Edges\n\n", hasCriticalEdges ? "" : "No ");
printf("Prior to Resolution\n");
- foreach_block(compiler, block)
+ for (BasicBlock* const block : compiler->Blocks())
{
printf("\n" FMT_BB, block->bbNum);
if (block->hasEHBoundaryIn())
@@ -7857,8 +7847,6 @@ void LinearScan::resolveEdges()
return;
}
- BasicBlock *block, *prevBlock = nullptr;
-
// Handle all the critical edges first.
// We will try to avoid resolution across critical edges in cases where all the critical-edge
// targets of a block have the same home. We will then split the edges only for the
@@ -7867,7 +7855,7 @@ void LinearScan::resolveEdges()
if (hasCriticalEdges)
{
- foreach_block(compiler, block)
+ for (BasicBlock* const block : compiler->Blocks())
{
if (block->bbNum > bbNumMaxBeforeResolution)
{
@@ -7878,12 +7866,10 @@ void LinearScan::resolveEdges()
{
handleOutgoingCriticalEdges(block);
}
- prevBlock = block;
}
}
- prevBlock = nullptr;
- foreach_block(compiler, block)
+ for (BasicBlock* const block : compiler->Blocks())
{
if (block->bbNum > bbNumMaxBeforeResolution)
{
@@ -7943,7 +7929,7 @@ void LinearScan::resolveEdges()
// would only improve the debug case, and would clutter up the code somewhat.
if (compiler->fgBBNumMax > bbNumMaxBeforeResolution)
{
- foreach_block(compiler, block)
+ for (BasicBlock* const block : compiler->Blocks())
{
if (block->bbNum > bbNumMaxBeforeResolution)
{
@@ -8000,16 +7986,15 @@ void LinearScan::resolveEdges()
#ifdef DEBUG
// Make sure the varToRegMaps match up on all edges.
bool foundMismatch = false;
- foreach_block(compiler, block)
+ for (BasicBlock* const block : compiler->Blocks())
{
if (block->isEmpty() && block->bbNum > bbNumMaxBeforeResolution)
{
continue;
}
VarToRegMap toVarToRegMap = getInVarToRegMap(block->bbNum);
- for (flowList* pred = block->bbPreds; pred != nullptr; pred = pred->flNext)
+ for (BasicBlock* const predBlock : block->PredBlocks())
{
- BasicBlock* predBlock = pred->getBlock();
VarToRegMap fromVarToRegMap = getOutVarToRegMap(predBlock->bbNum);
VarSetOps::Iter iter(compiler, block->bbLiveIn);
unsigned varIndex = 0;
@@ -8636,7 +8621,8 @@ void LinearScan::dumpLsraStats(FILE* file)
fprintf(file, "----------\n");
#ifdef DEBUG
- fprintf(file, "Register selection order: %S\n", JitConfig.JitLsraOrdering());
+ fprintf(file, "Register selection order: %S\n",
+ JitConfig.JitLsraOrdering() == nullptr ? W("ABCDEFGHIJKLMNOPQ") : JitConfig.JitLsraOrdering());
#endif
fprintf(file, "Total Tracked Vars: %d\n", compiler->lvaTrackedCount);
fprintf(file, "Total Reg Cand Vars: %d\n", regCandidateVarCount);
@@ -8655,6 +8641,7 @@ void LinearScan::dumpLsraStats(FILE* file)
bool addedBlockHeader = false;
bool anyNonZeroStat = false;
+ // Iterate for block 0
for (int statIndex = 0; statIndex < LsraStat::COUNT; statIndex++)
{
unsigned lsraStat = blockInfo[0].stats[statIndex];
@@ -8682,7 +8669,8 @@ void LinearScan::dumpLsraStats(FILE* file)
fprintf(file, "\n");
}
- for (BasicBlock* block = compiler->fgFirstBB; block != nullptr; block = block->bbNext)
+ // Iterate for remaining blocks
+ for (BasicBlock* const block : compiler->Blocks())
{
if (block->bbNum > bbNumMaxBeforeResolution)
{
@@ -8727,7 +8715,6 @@ void LinearScan::dumpLsraStats(FILE* file)
{
fprintf(file, "..........\n");
}
- // TODO-review: I don't see a point of displaying Stats (SpillCount, etc.) if they are zero. Thoughts?
if ((regSelectI < firstRegSelStat) || (sumStats[regSelectI] != 0))
{
// Print register selection stats
@@ -8775,7 +8762,7 @@ void LinearScan::dumpLsraStatsCsv(FILE* file)
}
// blocks
- for (BasicBlock* block = compiler->fgFirstBB; block != nullptr; block = block->bbNext)
+ for (BasicBlock* const block : compiler->Blocks())
{
if (block->bbNum > bbNumMaxBeforeResolution)
{
@@ -8795,6 +8782,48 @@ void LinearScan::dumpLsraStatsCsv(FILE* file)
}
fprintf(file, ",%.2f\n", compiler->info.compPerfScore);
}
+
+// -----------------------------------------------------------
+// dumpLsraStatsSummary - dumps Lsra stats summary to given file
+//
+// Arguments:
+// file - file to which stats are to be written.
+//
+void LinearScan::dumpLsraStatsSummary(FILE* file)
+{
+ unsigned sumStats[LsraStat::STAT_FREE] = {0};
+ BasicBlock::weight_t wtdStats[LsraStat::STAT_FREE] = {0.0};
+
+ // Iterate for block 0
+ for (int statIndex = 0; statIndex < LsraStat::STAT_FREE; statIndex++)
+ {
+ unsigned lsraStat = blockInfo[0].stats[statIndex];
+ sumStats[statIndex] += lsraStat;
+ wtdStats[statIndex] += (lsraStat * blockInfo[0].weight);
+ }
+
+ // Iterate for remaining blocks
+ for (BasicBlock* const block : compiler->Blocks())
+ {
+ if (block->bbNum > bbNumMaxBeforeResolution)
+ {
+ continue;
+ }
+
+ for (int statIndex = 0; statIndex < LsraStat::STAT_FREE; statIndex++)
+ {
+ unsigned lsraStat = blockInfo[block->bbNum].stats[statIndex];
+ sumStats[statIndex] += lsraStat;
+ wtdStats[statIndex] += (lsraStat * block->bbWeight);
+ }
+ }
+
+ for (int regSelectI = 0; regSelectI < LsraStat::STAT_FREE; regSelectI++)
+ {
+ fprintf(file, ", %s %u %sWt %f", getStatName(regSelectI), sumStats[regSelectI], getStatName(regSelectI),
+ wtdStats[regSelectI]);
+ }
+}
#endif // TRACK_LSRA_STATS
#ifdef DEBUG
@@ -10618,7 +10647,7 @@ void LinearScan::verifyFinalAllocation()
// Now, verify the resolution blocks.
// Currently these are nearly always at the end of the method, but that may not always be the case.
// So, we'll go through all the BBs looking for blocks whose bbNum is greater than bbNumMaxBeforeResolution.
- for (BasicBlock* currentBlock = compiler->fgFirstBB; currentBlock != nullptr; currentBlock = currentBlock->bbNext)
+ for (BasicBlock* const currentBlock : compiler->Blocks())
{
if (currentBlock->bbNum > bbNumMaxBeforeResolution)
{
@@ -11229,13 +11258,36 @@ void LinearScan::RegisterSelection::try_SPILL_COST()
continue;
}
- float currentSpillWeight = linearScan->spillCost[spillCandidateRegNum];
-#ifdef TARGET_ARM
- if (currentInterval->registerType == TYP_DOUBLE)
+ float currentSpillWeight = 0;
+ RefPosition* recentRefPosition = spillCandidateRegRecord->assignedInterval != nullptr
+ ? spillCandidateRegRecord->assignedInterval->recentRefPosition
+ : nullptr;
+ if ((recentRefPosition != nullptr) && (recentRefPosition->RegOptional()) &&
+ !(currentInterval->isLocalVar && recentRefPosition->IsActualRef()))
{
- currentSpillWeight = max(currentSpillWeight, linearScan->spillCost[REG_NEXT(spillCandidateRegNum)]);
+ // We do not "spillAfter" if previous (recent) refPosition was regOptional or if it
+ // is not an actual ref. In those cases, we will reload in future (next) refPosition.
+ // For such cases, consider the spill cost of next refposition.
+ // See notes in "spillInterval()".
+ RefPosition* reloadRefPosition = spillCandidateRegRecord->assignedInterval->getNextRefPosition();
+ if (reloadRefPosition != nullptr)
+ {
+ currentSpillWeight = linearScan->getWeight(reloadRefPosition);
+ }
}
+
+ // Only consider spillCost if we were not able to calculate weight of reloadRefPosition.
+ if (currentSpillWeight == 0)
+ {
+ currentSpillWeight = linearScan->spillCost[spillCandidateRegNum];
+#ifdef TARGET_ARM
+ if (currentInterval->registerType == TYP_DOUBLE)
+ {
+ currentSpillWeight = max(currentSpillWeight, linearScan->spillCost[REG_NEXT(spillCandidateRegNum)]);
+ }
#endif
+ }
+
if (currentSpillWeight < bestSpillWeight)
{
bestSpillWeight = currentSpillWeight;
diff --git a/src/coreclr/jit/lsra.h b/src/coreclr/jit/lsra.h
index d944c4530739d5..eda5d014c658a7 100644
--- a/src/coreclr/jit/lsra.h
+++ b/src/coreclr/jit/lsra.h
@@ -1455,6 +1455,7 @@ class LinearScan : public LinearScanInterface
public:
virtual void dumpLsraStatsCsv(FILE* file);
+ virtual void dumpLsraStatsSummary(FILE* file);
static const char* getStatName(unsigned stat);
#define INTRACK_STATS(x) x
diff --git a/src/coreclr/jit/lsraarm64.cpp b/src/coreclr/jit/lsraarm64.cpp
index 223dc906badbf2..a7a14fdfe54f96 100644
--- a/src/coreclr/jit/lsraarm64.cpp
+++ b/src/coreclr/jit/lsraarm64.cpp
@@ -1112,54 +1112,11 @@ int LinearScan::BuildHWIntrinsic(GenTreeHWIntrinsic* intrinsicTree)
// RMW intrinsic operands doesn't have to be delayFree when they can be assigned the same register as op1Reg
// (i.e. a register that corresponds to read-modify-write operand) and one of them is the last use.
- bool op2DelayFree = isRMW;
- bool op3DelayFree = isRMW;
- bool op4DelayFree = isRMW;
-
assert(intrin.op1 != nullptr);
- if (isRMW && intrin.op1->OperIs(GT_LCL_VAR))
- {
- unsigned int varNum1 = intrin.op1->AsLclVar()->GetLclNum();
- bool op1LastUse = false;
-
- unsigned int varNum2 = BAD_VAR_NUM;
- unsigned int varNum3 = BAD_VAR_NUM;
- unsigned int varNum4 = BAD_VAR_NUM;
-
- if (intrin.op2->OperIs(GT_LCL_VAR))
- {
- varNum2 = intrin.op2->AsLclVar()->GetLclNum();
- op1LastUse |= ((varNum1 == varNum2) && intrin.op2->HasLastUse());
- }
-
- if (intrin.op3 != nullptr)
- {
- if (intrin.op3->OperIs(GT_LCL_VAR))
- {
- varNum3 = intrin.op3->AsLclVar()->GetLclNum();
- op1LastUse |= ((varNum1 == varNum3) && intrin.op3->HasLastUse());
- }
-
- if ((intrin.op4 != nullptr) && intrin.op4->OperIs(GT_LCL_VAR))
- {
- varNum4 = intrin.op4->AsLclVar()->GetLclNum();
- op1LastUse |= ((varNum1 == varNum4) && intrin.op4->HasLastUse());
- }
- }
-
- if (op1LastUse)
- {
- op2DelayFree = (varNum1 != varNum2);
- op3DelayFree = (varNum1 != varNum3);
- op4DelayFree = (varNum1 != varNum4);
- }
- }
-
+ bool forceOp2DelayFree = false;
if ((intrin.id == NI_Vector64_GetElement) || (intrin.id == NI_Vector128_GetElement))
{
- assert(!op2DelayFree);
-
if (!intrin.op2->IsCnsIntOrI() && (!intrin.op1->isContained() || intrin.op1->OperIsLocal()))
{
// If the index is not a constant and the object is not contained or is a local
@@ -1168,7 +1125,7 @@ int LinearScan::BuildHWIntrinsic(GenTreeHWIntrinsic* intrinsicTree)
// TODO-Cleanup: An internal register will never clobber a source; this code actually
// ensures that the index (op2) doesn't interfere with the target.
buildInternalIntRegisterDefForNode(intrinsicTree);
- op2DelayFree = true;
+ forceOp2DelayFree = true;
}
if (!intrin.op2->IsCnsIntOrI() && !intrin.op1->isContained())
@@ -1179,15 +1136,22 @@ int LinearScan::BuildHWIntrinsic(GenTreeHWIntrinsic* intrinsicTree)
}
}
- srcCount += op2DelayFree ? BuildDelayFreeUses(intrin.op2) : BuildOperandUses(intrin.op2);
+ if (forceOp2DelayFree)
+ {
+ srcCount += BuildDelayFreeUses(intrin.op2);
+ }
+ else
+ {
+ srcCount += isRMW ? BuildDelayFreeUses(intrin.op2, intrin.op1) : BuildOperandUses(intrin.op2);
+ }
if (intrin.op3 != nullptr)
{
- srcCount += op3DelayFree ? BuildDelayFreeUses(intrin.op3) : BuildOperandUses(intrin.op3);
+ srcCount += isRMW ? BuildDelayFreeUses(intrin.op3, intrin.op1) : BuildOperandUses(intrin.op3);
if (intrin.op4 != nullptr)
{
- srcCount += op4DelayFree ? BuildDelayFreeUses(intrin.op4) : BuildOperandUses(intrin.op4);
+ srcCount += isRMW ? BuildDelayFreeUses(intrin.op4, intrin.op1) : BuildOperandUses(intrin.op4);
}
}
}
diff --git a/src/coreclr/jit/lsrabuild.cpp b/src/coreclr/jit/lsrabuild.cpp
index db02482b6b436a..dcea8813119196 100644
--- a/src/coreclr/jit/lsrabuild.cpp
+++ b/src/coreclr/jit/lsrabuild.cpp
@@ -2064,7 +2064,7 @@ void LinearScan::buildIntervals()
printf("\n-----------------\n");
printf("LIVENESS:\n");
printf("-----------------\n");
- foreach_block(compiler, block)
+ for (BasicBlock* const block : compiler->Blocks())
{
printf(FMT_BB " use def in out\n", block->bbNum);
dumpConvertedVarSet(compiler, block->bbVarUse);
@@ -2584,7 +2584,7 @@ void LinearScan::buildIntervals()
#ifdef DEBUG
// Make sure we don't have any blocks that were not visited
- foreach_block(compiler, block)
+ for (BasicBlock* const block : compiler->Blocks())
{
assert(isBlockVisited(block));
}
@@ -3097,6 +3097,10 @@ int LinearScan::BuildDelayFreeUses(GenTree* node, GenTree* rmwNode, regMaskTP ca
}
if (use != nullptr)
{
+ // If node != rmwNode, then definitely node should be marked as "delayFree".
+ // However, if node == rmwNode, then we can mark node as "delayFree" only
+ // none of the node/rmwNode are the last uses. If either of them are last use,
+ // we can safely reuse the rmwNode as destination.
if ((use->getInterval() != rmwInterval) || (!rmwIsLastUse && !use->lastUse))
{
setDelayFree(use);
diff --git a/src/coreclr/jit/lsraxarch.cpp b/src/coreclr/jit/lsraxarch.cpp
index 1cd81124d05663..854e4521ec9001 100644
--- a/src/coreclr/jit/lsraxarch.cpp
+++ b/src/coreclr/jit/lsraxarch.cpp
@@ -1069,7 +1069,7 @@ int LinearScan::BuildCall(GenTreeCall* call)
// The return value will be on the X87 stack, and we will need to move it.
dstCandidates = allRegs(registerType);
#else // !TARGET_X86
- dstCandidates = RBM_FLOATRET;
+ dstCandidates = RBM_FLOATRET;
#endif // !TARGET_X86
}
else if (registerType == TYP_LONG)
@@ -1297,7 +1297,14 @@ int LinearScan::BuildBlockStore(GenTreeBlk* blkNode)
switch (blkNode->gtBlkOpKind)
{
case GenTreeBlk::BlkOpKindUnroll:
- if (size >= XMM_REGSIZE_BYTES)
+ {
+#ifdef TARGET_AMD64
+ const bool canUse16BytesSimdMov = !blkNode->IsOnHeapAndContainsReferences();
+ const bool willUseSimdMov = canUse16BytesSimdMov && (size >= 16);
+#else
+ const bool willUseSimdMov = (size >= 16);
+#endif
+ if (willUseSimdMov)
{
buildInternalFloatRegisterDefForNode(blkNode, internalFloatRegCandidates());
SetContainsAVXFlags();
@@ -1310,7 +1317,8 @@ int LinearScan::BuildBlockStore(GenTreeBlk* blkNode)
srcRegMask = allByteRegs();
}
#endif
- break;
+ }
+ break;
case GenTreeBlk::BlkOpKindRepInstr:
dstAddrRegMask = RBM_RDI;
@@ -2420,9 +2428,9 @@ int LinearScan::BuildHWIntrinsic(GenTreeHWIntrinsic* intrinsicTree)
// Any pair of the index, mask, or destination registers should be different
srcCount += BuildOperandUses(op1);
- srcCount += BuildDelayFreeUses(op2);
- srcCount += BuildDelayFreeUses(op3);
- srcCount += BuildDelayFreeUses(op4);
+ srcCount += BuildDelayFreeUses(op2, op1);
+ srcCount += BuildDelayFreeUses(op3, op1);
+ srcCount += BuildDelayFreeUses(op4, op1);
// op5 should always be contained
assert(argList->Rest()->Current()->isContained());
@@ -2481,8 +2489,7 @@ int LinearScan::BuildHWIntrinsic(GenTreeHWIntrinsic* intrinsicTree)
// When op2 is not contained or if we are producing a scalar value
// we need to mark it as delay free because the operand and target
// exist in the same register set.
-
- srcCount += BuildDelayFreeUses(op2);
+ srcCount += BuildDelayFreeUses(op2, op1);
}
else
{
@@ -2500,7 +2507,7 @@ int LinearScan::BuildHWIntrinsic(GenTreeHWIntrinsic* intrinsicTree)
if (op3 != nullptr)
{
- srcCount += isRMW ? BuildDelayFreeUses(op3) : BuildOperandUses(op3);
+ srcCount += isRMW ? BuildDelayFreeUses(op3, op1) : BuildOperandUses(op3);
}
}
}
diff --git a/src/coreclr/jit/morph.cpp b/src/coreclr/jit/morph.cpp
index 546782cabe5eaf..6d7f8c7bc6fdc9 100644
--- a/src/coreclr/jit/morph.cpp
+++ b/src/coreclr/jit/morph.cpp
@@ -258,17 +258,26 @@ GenTree* Compiler::fgMorphCast(GenTree* tree)
}
#endif //! TARGET_64BIT
+#ifdef TARGET_ARMARCH
+ // AArch, unlike x86/amd64, has instructions that can cast directly from
+ // all integers (except for longs on AArch32 of course) to floats.
+ // Because there is no IL instruction conv.r4.un, uint/ulong -> float
+ // casts are always imported as CAST(float <- CAST(double <- uint/ulong)).
+ // We can eliminate the redundant intermediate cast as an optimization.
+ else if ((dstType == TYP_FLOAT) && (srcType == TYP_DOUBLE) && oper->OperIs(GT_CAST)
#ifdef TARGET_ARM
- else if ((dstType == TYP_FLOAT) && (srcType == TYP_DOUBLE) && (oper->gtOper == GT_CAST) &&
- !varTypeIsLong(oper->AsCast()->CastOp()))
+ && !varTypeIsLong(oper->AsCast()->CastOp())
+#endif
+ )
{
- // optimization: conv.r4(conv.r8(?)) -> conv.r4(d)
- // except when the ultimate source is a long because there is no long-to-float helper, so it must be 2 step.
- // This happens semi-frequently because there is no IL 'conv.r4.un'
oper->gtType = TYP_FLOAT;
oper->CastToType() = TYP_FLOAT;
+
return fgMorphTree(oper);
}
+#endif // TARGET_ARMARCH
+
+#ifdef TARGET_ARM
// converts long/ulong --> float/double casts into helper calls.
else if (varTypeIsFloating(dstType) && varTypeIsLong(srcType))
{
@@ -5500,8 +5509,16 @@ GenTree* Compiler::fgMorphArrayIndex(GenTree* tree)
fgSetRngChkTarget(indexAddr);
}
- // Change `tree` into an indirection and return.
- tree->ChangeOper(GT_IND);
+ if (!tree->TypeIs(TYP_STRUCT))
+ {
+ tree->ChangeOper(GT_IND);
+ }
+ else
+ {
+ DEBUG_DESTROY_NODE(tree);
+ tree = gtNewObjNode(elemStructType, indexAddr);
+ INDEBUG(tree->gtDebugFlags |= GTF_DEBUG_NODE_MORPHED);
+ }
GenTreeIndir* const indir = tree->AsIndir();
indir->Addr() = indexAddr;
bool canCSE = indir->CanCSE();
@@ -5511,9 +5528,7 @@ GenTree* Compiler::fgMorphArrayIndex(GenTree* tree)
indir->SetDoNotCSE();
}
-#ifdef DEBUG
- indexAddr->gtDebugFlags |= GTF_DEBUG_NODE_MORPHED;
-#endif // DEBUG
+ INDEBUG(indexAddr->gtDebugFlags |= GTF_DEBUG_NODE_MORPHED);
return indir;
}
@@ -8630,7 +8645,7 @@ void Compiler::fgMorphTailCallViaJitHelper(GenTreeCall* call)
GenTree* objp = call->gtCallThisArg->GetNode();
call->gtCallThisArg = nullptr;
- if ((call->IsDelegateInvoke() || call->IsVirtualVtable()) && !objp->IsLocal())
+ if ((call->IsDelegateInvoke() || call->IsVirtualVtable()) && !objp->OperIs(GT_LCL_VAR))
{
// tmp = "this"
unsigned lclNum = lvaGrabTemp(true DEBUGARG("tail call thisptr"));
@@ -16933,7 +16948,7 @@ void Compiler::fgMorphStmts(BasicBlock* block, bool* lnot, bool* loadw)
fgCurrentlyInUseArgTemps = hashBv::Create(this);
- for (Statement* stmt : block->Statements())
+ for (Statement* const stmt : block->Statements())
{
if (fgRemoveRestOfBlock)
{
@@ -18061,9 +18076,9 @@ void Compiler::fgExpandQmarkNodes()
{
if (compQmarkUsed)
{
- for (BasicBlock* block = fgFirstBB; block != nullptr; block = block->bbNext)
+ for (BasicBlock* const block : Blocks())
{
- for (Statement* stmt : block->Statements())
+ for (Statement* const stmt : block->Statements())
{
GenTree* expr = stmt->GetRootNode();
#ifdef DEBUG
@@ -18087,9 +18102,9 @@ void Compiler::fgExpandQmarkNodes()
*/
void Compiler::fgPostExpandQmarkChecks()
{
- for (BasicBlock* block = fgFirstBB; block != nullptr; block = block->bbNext)
+ for (BasicBlock* const block : Blocks())
{
- for (Statement* stmt : block->Statements())
+ for (Statement* const stmt : block->Statements())
{
GenTree* expr = stmt->GetRootNode();
fgWalkTreePre(&expr, Compiler::fgAssertNoQmark, nullptr);
diff --git a/src/coreclr/jit/objectalloc.cpp b/src/coreclr/jit/objectalloc.cpp
index e41a6117517968..720ec2585c4d61 100644
--- a/src/coreclr/jit/objectalloc.cpp
+++ b/src/coreclr/jit/objectalloc.cpp
@@ -213,11 +213,9 @@ void ObjectAllocator::MarkEscapingVarsAndBuildConnGraph()
}
}
- BasicBlock* block;
-
- foreach_block(comp, block)
+ for (BasicBlock* const block : comp->Blocks())
{
- for (Statement* stmt : block->Statements())
+ for (Statement* const stmt : block->Statements())
{
BuildConnGraphVisitor buildConnGraphVisitor(this);
buildConnGraphVisitor.WalkTree(stmt->GetRootNodePointer(), nullptr);
@@ -340,9 +338,7 @@ bool ObjectAllocator::MorphAllocObjNodes()
m_PossiblyStackPointingPointers = BitVecOps::MakeEmpty(&m_bitVecTraits);
m_DefinitelyStackPointingPointers = BitVecOps::MakeEmpty(&m_bitVecTraits);
- BasicBlock* block;
-
- foreach_block(comp, block)
+ for (BasicBlock* const block : comp->Blocks())
{
const bool basicBlockHasNewObj = (block->bbFlags & BBF_HAS_NEWOBJ) == BBF_HAS_NEWOBJ;
const bool basicBlockHasBackwardJump = (block->bbFlags & BBF_BACKWARD_JUMP) == BBF_BACKWARD_JUMP;
@@ -353,7 +349,7 @@ bool ObjectAllocator::MorphAllocObjNodes()
}
#endif // DEBUG
- for (Statement* stmt : block->Statements())
+ for (Statement* const stmt : block->Statements())
{
GenTree* stmtExpr = stmt->GetRootNode();
GenTree* op2 = nullptr;
@@ -917,11 +913,9 @@ void ObjectAllocator::RewriteUses()
}
};
- BasicBlock* block;
-
- foreach_block(comp, block)
+ for (BasicBlock* const block : comp->Blocks())
{
- for (Statement* stmt : block->Statements())
+ for (Statement* const stmt : block->Statements())
{
RewriteUsesVisitor rewriteUsesVisitor(this);
rewriteUsesVisitor.WalkTree(stmt->GetRootNodePointer(), nullptr);
diff --git a/src/coreclr/jit/optcse.cpp b/src/coreclr/jit/optcse.cpp
index c37e7207f088e4..0a202303efa8ff 100644
--- a/src/coreclr/jit/optcse.cpp
+++ b/src/coreclr/jit/optcse.cpp
@@ -779,7 +779,7 @@ unsigned Compiler::optValnumCSE_Locate()
}
#endif
- for (BasicBlock* block = fgFirstBB; block != nullptr; block = block->bbNext)
+ for (BasicBlock* const block : Blocks())
{
/* Make the block publicly available */
@@ -790,13 +790,13 @@ unsigned Compiler::optValnumCSE_Locate()
noway_assert((block->bbFlags & (BBF_VISITED | BBF_MARKED)) == 0);
/* Walk the statement trees in this basic block */
- for (Statement* stmt : StatementList(block->FirstNonPhiDef()))
+ for (Statement* const stmt : block->NonPhiStatements())
{
const bool isReturn = stmt->GetRootNode()->OperIs(GT_RETURN);
/* We walk the tree in the forwards direction (bottom up) */
bool stmtHasArrLenCandidate = false;
- for (GenTree* tree = stmt->GetTreeList(); tree != nullptr; tree = tree->gtNext)
+ for (GenTree* const tree : stmt->TreeList())
{
if (tree->OperIsCompare() && stmtHasArrLenCandidate)
{
@@ -1009,7 +1009,7 @@ void Compiler::optValnumCSE_InitDataFlow()
BitVecOps::AddElemD(cseLivenessTraits, cseCallKillsMask, cseAvailBit);
}
- for (BasicBlock* block = fgFirstBB; block; block = block->bbNext)
+ for (BasicBlock* const block : Blocks())
{
/* Initialize the blocks's bbCseIn set */
@@ -1078,7 +1078,7 @@ void Compiler::optValnumCSE_InitDataFlow()
}
}
- for (BasicBlock* block = fgFirstBB; block; block = block->bbNext)
+ for (BasicBlock* const block : Blocks())
{
// If the block doesn't contains a call then skip it...
//
@@ -1140,7 +1140,7 @@ void Compiler::optValnumCSE_InitDataFlow()
if (verbose)
{
bool headerPrinted = false;
- for (BasicBlock* block = fgFirstBB; block; block = block->bbNext)
+ for (BasicBlock* const block : Blocks())
{
if (block->bbCseGen != nullptr)
{
@@ -1328,7 +1328,7 @@ void Compiler::optValnumCSE_DataFlow()
{
printf("\nAfter performing DataFlow for ValnumCSE's\n");
- for (BasicBlock* block = fgFirstBB; block; block = block->bbNext)
+ for (BasicBlock* const block : Blocks())
{
printf(FMT_BB, block->bbNum);
printf(" cseIn = %s,", genES2str(cseLivenessTraits, block->bbCseIn));
@@ -1387,7 +1387,7 @@ void Compiler::optValnumCSE_Availablity()
#endif
EXPSET_TP available_cses = BitVecOps::MakeEmpty(cseLivenessTraits);
- for (BasicBlock* block = fgFirstBB; block != nullptr; block = block->bbNext)
+ for (BasicBlock* const block : Blocks())
{
// Make the block publicly available
@@ -1399,11 +1399,11 @@ void Compiler::optValnumCSE_Availablity()
// Walk the statement trees in this basic block
- for (Statement* stmt : StatementList(block->FirstNonPhiDef()))
+ for (Statement* const stmt : block->NonPhiStatements())
{
// We walk the tree in the forwards direction (bottom up)
- for (GenTree* tree = stmt->GetTreeList(); tree != nullptr; tree = tree->gtNext)
+ for (GenTree* const tree : stmt->TreeList())
{
bool isUse = false;
bool isDef = false;
@@ -3832,13 +3832,13 @@ void Compiler::optOptimizeCSEs()
void Compiler::optCleanupCSEs()
{
// We must clear the BBF_VISITED and BBF_MARKED flags.
- for (BasicBlock* block = fgFirstBB; block != nullptr; block = block->bbNext)
+ for (BasicBlock* const block : Blocks())
{
// And clear all the "visited" bits on the block.
block->bbFlags &= ~(BBF_VISITED | BBF_MARKED);
// Walk the statement trees in this basic block.
- for (Statement* stmt : StatementList(block->FirstNonPhiDef()))
+ for (Statement* const stmt : block->NonPhiStatements())
{
// We must clear the gtCSEnum field.
for (GenTree* tree = stmt->GetRootNode(); tree; tree = tree->gtPrev)
@@ -3859,13 +3859,11 @@ void Compiler::optCleanupCSEs()
void Compiler::optEnsureClearCSEInfo()
{
- for (BasicBlock* block = fgFirstBB; block; block = block->bbNext)
+ for (BasicBlock* const block : Blocks())
{
assert((block->bbFlags & (BBF_VISITED | BBF_MARKED)) == 0);
- // Initialize 'stmt' to the first non-Phi statement
- // Walk the statement trees in this basic block
- for (Statement* stmt : StatementList(block->FirstNonPhiDef()))
+ for (Statement* const stmt : block->NonPhiStatements())
{
for (GenTree* tree = stmt->GetRootNode(); tree; tree = tree->gtPrev)
{
diff --git a/src/coreclr/jit/optimizer.cpp b/src/coreclr/jit/optimizer.cpp
index 460314a327f8d8..a7df5b95905a2d 100644
--- a/src/coreclr/jit/optimizer.cpp
+++ b/src/coreclr/jit/optimizer.cpp
@@ -69,7 +69,7 @@ void Compiler::optSetBlockWeights()
bool firstBBDominatesAllReturns = true;
const bool usingProfileWeights = fgIsUsingProfileWeights();
- for (BasicBlock* block = fgFirstBB; block != nullptr; block = block->bbNext)
+ for (BasicBlock* const block : Blocks())
{
/* Blocks that can't be reached via the first block are rarely executed */
if (!fgReachable(fgFirstBB, block))
@@ -168,19 +168,17 @@ void Compiler::optMarkLoopBlocks(BasicBlock* begBlk, BasicBlock* endBlk, bool ex
/* Build list of backedges for block begBlk */
flowList* backedgeList = nullptr;
- for (flowList* pred = begBlk->bbPreds; pred != nullptr; pred = pred->flNext)
+ for (BasicBlock* const predBlock : begBlk->PredBlocks())
{
/* Is this a backedge? */
- if (pred->getBlock()->bbNum >= begBlk->bbNum)
+ if (predBlock->bbNum >= begBlk->bbNum)
{
- flowList* flow = new (this, CMK_FlowList) flowList(pred->getBlock(), backedgeList);
+ backedgeList = new (this, CMK_FlowList) flowList(predBlock, backedgeList);
#if MEASURE_BLOCK_SIZE
genFlowNodeCnt += 1;
genFlowNodeSize += sizeof(flowList);
#endif // MEASURE_BLOCK_SIZE
-
- backedgeList = flow;
}
}
@@ -278,23 +276,20 @@ void Compiler::optUnmarkLoopBlocks(BasicBlock* begBlk, BasicBlock* endBlk)
noway_assert(!opts.MinOpts());
- BasicBlock* curBlk;
- unsigned backEdgeCount = 0;
+ unsigned backEdgeCount = 0;
- for (flowList* pred = begBlk->bbPreds; pred != nullptr; pred = pred->flNext)
+ for (BasicBlock* const predBlock : begBlk->PredBlocks())
{
- curBlk = pred->getBlock();
-
- /* is this a backward edge? (from curBlk to begBlk) */
+ /* is this a backward edge? (from predBlock to begBlk) */
- if (begBlk->bbNum > curBlk->bbNum)
+ if (begBlk->bbNum > predBlock->bbNum)
{
continue;
}
/* We only consider back-edges that are BBJ_COND or BBJ_ALWAYS for loops */
- if ((curBlk->bbJumpKind != BBJ_COND) && (curBlk->bbJumpKind != BBJ_ALWAYS))
+ if ((predBlock->bbJumpKind != BBJ_COND) && (predBlock->bbJumpKind != BBJ_ALWAYS))
{
continue;
}
@@ -330,7 +325,7 @@ void Compiler::optUnmarkLoopBlocks(BasicBlock* begBlk, BasicBlock* endBlk)
}
#endif
- curBlk = begBlk;
+ BasicBlock* curBlk = begBlk;
while (true)
{
noway_assert(curBlk);
@@ -449,9 +444,6 @@ void Compiler::optUpdateLoopsBeforeRemoveBlock(BasicBlock* block, bool skipUnmar
switch (block->bbJumpKind)
{
- unsigned jumpCnt;
- BasicBlock** jumpTab;
-
case BBJ_NONE:
case BBJ_COND:
if (block->bbNext == loop.lpEntry)
@@ -475,17 +467,14 @@ void Compiler::optUpdateLoopsBeforeRemoveBlock(BasicBlock* block, bool skipUnmar
break;
case BBJ_SWITCH:
- jumpCnt = block->bbJumpSwt->bbsCount;
- jumpTab = block->bbJumpSwt->bbsDstTab;
-
- do
+ for (BasicBlock* const bTarget : block->SwitchTargets())
{
- noway_assert(*jumpTab);
- if ((*jumpTab) == loop.lpEntry)
+ if (bTarget == loop.lpEntry)
{
removeLoop = true;
+ break;
}
- } while (++jumpTab, --jumpCnt);
+ }
break;
default:
@@ -497,21 +486,17 @@ void Compiler::optUpdateLoopsBeforeRemoveBlock(BasicBlock* block, bool skipUnmar
/* Check if the entry has other predecessors outside the loop
* TODO: Replace this when predecessors are available */
- BasicBlock* auxBlock;
- for (auxBlock = fgFirstBB; auxBlock; auxBlock = auxBlock->bbNext)
+ for (BasicBlock* const auxBlock : Blocks())
{
/* Ignore blocks in the loop */
- if (auxBlock->bbNum > loop.lpHead->bbNum && auxBlock->bbNum <= loop.lpBottom->bbNum)
+ if (loop.lpContains(auxBlock))
{
continue;
}
switch (auxBlock->bbJumpKind)
{
- unsigned jumpCnt;
- BasicBlock** jumpTab;
-
case BBJ_NONE:
case BBJ_COND:
if (auxBlock->bbNext == loop.lpEntry)
@@ -535,17 +520,14 @@ void Compiler::optUpdateLoopsBeforeRemoveBlock(BasicBlock* block, bool skipUnmar
break;
case BBJ_SWITCH:
- jumpCnt = auxBlock->bbJumpSwt->bbsCount;
- jumpTab = auxBlock->bbJumpSwt->bbsDstTab;
-
- do
+ for (BasicBlock* const bTarget : auxBlock->SwitchTargets())
{
- noway_assert(*jumpTab);
- if ((*jumpTab) == loop.lpEntry)
+ if (bTarget == loop.lpEntry)
{
removeLoop = false;
+ break;
}
- } while (++jumpTab, --jumpCnt);
+ }
break;
default:
@@ -1183,9 +1165,8 @@ bool Compiler::optRecordLoop(BasicBlock* head,
// Make sure the "iterVar" initialization is never skipped,
// i.e. every pred of ENTRY other than HEAD is in the loop.
- for (flowList* predEdge = entry->bbPreds; predEdge; predEdge = predEdge->flNext)
+ for (BasicBlock* const predBlock : entry->PredBlocks())
{
- BasicBlock* predBlock = predEdge->getBlock();
if ((predBlock != head) && !optLoopTable[loopInd].lpContains(predBlock))
{
goto DONE_LOOP;
@@ -1244,7 +1225,7 @@ bool Compiler::optRecordLoop(BasicBlock* head,
do
{
block = block->bbNext;
- for (Statement* stmt : block->Statements())
+ for (Statement* const stmt : block->Statements())
{
if (stmt->GetRootNode() == incr)
{
@@ -1318,38 +1299,35 @@ void Compiler::optPrintLoopRecording(unsigned loopInd) const
void Compiler::optCheckPreds()
{
- BasicBlock* block;
- BasicBlock* blockPred;
- flowList* pred;
-
- for (block = fgFirstBB; block; block = block->bbNext)
+ for (BasicBlock* const block : Blocks())
{
- for (pred = block->bbPreds; pred; pred = pred->flNext)
+ for (BasicBlock* const predBlock : block->PredBlocks())
{
// make sure this pred is part of the BB list
- for (blockPred = fgFirstBB; blockPred; blockPred = blockPred->bbNext)
+ BasicBlock* bb;
+ for (bb = fgFirstBB; bb; bb = bb->bbNext)
{
- if (blockPred == pred->getBlock())
+ if (bb == predBlock)
{
break;
}
}
- noway_assert(blockPred);
- switch (blockPred->bbJumpKind)
+ noway_assert(bb);
+ switch (bb->bbJumpKind)
{
case BBJ_COND:
- if (blockPred->bbJumpDest == block)
+ if (bb->bbJumpDest == block)
{
break;
}
FALLTHROUGH;
case BBJ_NONE:
- noway_assert(blockPred->bbNext == block);
+ noway_assert(bb->bbNext == block);
break;
case BBJ_EHFILTERRET:
case BBJ_ALWAYS:
case BBJ_EHCATCHRET:
- noway_assert(blockPred->bbJumpDest == block);
+ noway_assert(bb->bbJumpDest == block);
break;
default:
break;
@@ -1824,18 +1802,16 @@ class LoopSearch
}
// Add preds to the worklist, checking for side-entries.
- for (flowList* predIter = block->bbPreds; predIter != nullptr; predIter = predIter->flNext)
+ for (BasicBlock* const predBlock : block->PredBlocks())
{
- BasicBlock* pred = predIter->getBlock();
-
- unsigned int testNum = PositionNum(pred);
+ unsigned int testNum = PositionNum(predBlock);
if ((testNum < top->bbNum) || (testNum > bottom->bbNum))
{
// Pred is out of loop range
if (block == entry)
{
- if (pred == head)
+ if (predBlock == head)
{
// This is the single entry we expect.
continue;
@@ -1843,9 +1819,9 @@ class LoopSearch
// ENTRY has some pred other than head outside the loop. If ENTRY does not
// dominate this pred, we'll consider this a side-entry and skip this loop;
// otherwise the loop is still valid and this may be a (flow-wise) back-edge
- // of an outer loop. For the dominance test, if `pred` is a new block, use
+ // of an outer loop. For the dominance test, if `predBlock` is a new block, use
// its unique predecessor since the dominator tree has info for that.
- BasicBlock* effectivePred = (pred->bbNum > oldBlockMaxNum ? pred->bbPrev : pred);
+ BasicBlock* effectivePred = (predBlock->bbNum > oldBlockMaxNum ? predBlock->bbPrev : predBlock);
if (comp->fgDominate(entry, effectivePred))
{
// Outer loop back-edge
@@ -1858,32 +1834,33 @@ class LoopSearch
}
bool isFirstVisit;
- if (pred == entry)
+ if (predBlock == entry)
{
// We have indeed found a cycle in the flow graph.
isFirstVisit = !foundCycle;
foundCycle = true;
- assert(loopBlocks.IsMember(pred->bbNum));
+ assert(loopBlocks.IsMember(predBlock->bbNum));
}
- else if (loopBlocks.TestAndInsert(pred->bbNum))
+ else if (loopBlocks.TestAndInsert(predBlock->bbNum))
{
// Already visited this pred
isFirstVisit = false;
}
else
{
- // Add this pred to the worklist
- worklist.push_back(pred);
+ // Add this predBlock to the worklist
+ worklist.push_back(predBlock);
isFirstVisit = true;
}
- if (isFirstVisit && (pred->bbNext != nullptr) && (PositionNum(pred->bbNext) == pred->bbNum))
+ if (isFirstVisit && (predBlock->bbNext != nullptr) &&
+ (PositionNum(predBlock->bbNext) == predBlock->bbNum))
{
- // We've created a new block immediately after `pred` to
+ // We've created a new block immediately after `predBlock` to
// reconnect what was fall-through. Mark it as in-loop also;
// it needs to stay with `prev` and if it exits the loop we'd
// just need to re-create it if we tried to move it out.
- loopBlocks.Insert(pred->bbNext->bbNum);
+ loopBlocks.Insert(predBlock->bbNext->bbNum);
}
}
}
@@ -2102,9 +2079,9 @@ class LoopSearch
// of an edge from the run of blocks being moved to `newMoveAfter` -- doing so would
// introduce a new lexical back-edge, which could (maybe?) confuse the loop search
// algorithm, and isn't desirable layout anyway.
- for (flowList* predIter = newMoveAfter->bbPreds; predIter != nullptr; predIter = predIter->flNext)
+ for (BasicBlock* const predBlock : newMoveAfter->PredBlocks())
{
- unsigned int predNum = predIter->getBlock()->bbNum;
+ unsigned int predNum = predBlock->bbNum;
if ((predNum >= top->bbNum) && (predNum <= bottom->bbNum) && !loopBlocks.IsMember(predNum))
{
@@ -2169,12 +2146,10 @@ class LoopSearch
//
bool CanTreatAsLoopBlocks(BasicBlock* firstNonLoopBlock, BasicBlock* lastNonLoopBlock)
{
- BasicBlock* nextLoopBlock = lastNonLoopBlock->bbNext;
- for (BasicBlock* testBlock = firstNonLoopBlock; testBlock != nextLoopBlock; testBlock = testBlock->bbNext)
+ for (BasicBlock* const testBlock : comp->Blocks(firstNonLoopBlock, lastNonLoopBlock))
{
- for (flowList* predIter = testBlock->bbPreds; predIter != nullptr; predIter = predIter->flNext)
+ for (BasicBlock* const testPred : testBlock->PredBlocks())
{
- BasicBlock* testPred = predIter->getBlock();
unsigned int predPosNum = PositionNum(testPred);
unsigned int firstNonLoopPosNum = PositionNum(firstNonLoopBlock);
unsigned int lastNonLoopPosNum = PositionNum(lastNonLoopBlock);
@@ -2329,23 +2304,14 @@ class LoopSearch
break;
case BBJ_SWITCH:
-
- unsigned jumpCnt;
- jumpCnt = block->bbJumpSwt->bbsCount;
- BasicBlock** jumpTab;
- jumpTab = block->bbJumpSwt->bbsDstTab;
-
- do
+ for (BasicBlock* const exitPoint : block->SwitchTargets())
{
- noway_assert(*jumpTab);
- exitPoint = *jumpTab;
-
if (!loopBlocks.IsMember(exitPoint->bbNum))
{
lastExit = block;
exitCount++;
}
- } while (++jumpTab, --jumpCnt);
+ }
break;
default:
@@ -2389,7 +2355,7 @@ void Compiler::optFindNaturalLoops()
LoopSearch search(this);
- for (BasicBlock* head = fgFirstBB; head->bbNext; head = head->bbNext)
+ for (BasicBlock* head = fgFirstBB; head->bbNext != nullptr; head = head->bbNext)
{
BasicBlock* top = head->bbNext;
@@ -2401,9 +2367,9 @@ void Compiler::optFindNaturalLoops()
continue;
}
- for (flowList* pred = top->bbPreds; pred; pred = pred->flNext)
+ for (BasicBlock* const predBlock : top->PredBlocks())
{
- if (search.FindLoop(head, top, pred->getBlock()))
+ if (search.FindLoop(head, top, predBlock))
{
// Found a loop; record it and see if we've hit the limit.
bool recordedLoop = search.RecordLoop();
@@ -2500,16 +2466,9 @@ void Compiler::optFindNaturalLoops()
// this -- the innermost loop labeling will be done last.
for (unsigned char loopInd = 0; loopInd < optLoopCount; loopInd++)
{
- BasicBlock* first = optLoopTable[loopInd].lpFirst;
- BasicBlock* bottom = optLoopTable[loopInd].lpBottom;
- for (BasicBlock* blk = first; blk != nullptr; blk = blk->bbNext)
+ for (BasicBlock* const blk : optLoopTable[loopInd].LoopBlocks())
{
blk->bbNatLoopNum = loopInd;
- if (blk == bottom)
- {
- break;
- }
- assert(blk->bbNext != nullptr); // We should never reach nullptr.
}
}
@@ -2678,10 +2637,8 @@ void Compiler::optCopyBlkDest(BasicBlock* from, BasicBlock* to)
break;
case BBJ_SWITCH:
- {
to->bbJumpSwt = new (this, CMK_BasicBlock) BBswtDesc(this, from->bbJumpSwt);
- }
- break;
+ break;
default:
break;
@@ -2859,10 +2816,8 @@ bool Compiler::optCanonicalizeLoop(unsigned char loopInd)
// This is ok, because after the first redirection, the topPredBlock branch target will no longer match the source
// edge of the blockMap, so nothing will happen.
bool firstPred = true;
- for (flowList* topPred = t->bbPreds; topPred != nullptr; topPred = topPred->flNext)
+ for (BasicBlock* const topPredBlock : t->PredBlocks())
{
- BasicBlock* topPredBlock = topPred->getBlock();
-
// Skip if topPredBlock is in the loop.
// Note that this uses block number to detect membership in the loop. We are adding blocks during
// canonicalization, and those block numbers will be new, and larger than previous blocks. However, we work
@@ -3770,7 +3725,7 @@ PhaseStatus Compiler::optUnrollLoops()
++loopRetCount;
}
- for (Statement* stmt : block->Statements())
+ for (Statement* const stmt : block->Statements())
{
gtSetStmtInfo(stmt);
loopCostSz += stmt->GetCostSz();
@@ -4251,7 +4206,7 @@ bool Compiler::optInvertWhileLoop(BasicBlock* block)
unsigned estDupCostSz = 0;
- for (Statement* stmt : bTest->Statements())
+ for (Statement* const stmt : bTest->Statements())
{
GenTree* tree = stmt->GetRootNode();
gtPrepareCost(tree);
@@ -4340,7 +4295,7 @@ bool Compiler::optInvertWhileLoop(BasicBlock* block)
//
// If the condition has array.Length operations, also boost, as they are likely to be CSE'd.
- for (Statement* stmt : bTest->Statements())
+ for (Statement* const stmt : bTest->Statements())
{
GenTree* tree = stmt->GetRootNode();
@@ -4399,7 +4354,7 @@ bool Compiler::optInvertWhileLoop(BasicBlock* block)
BasicBlock* bNewCond = fgNewBBafter(BBJ_COND, block, /*extendRegion*/ true);
// Clone each statement in bTest and append to bNewCond.
- for (Statement* stmt : bTest->Statements())
+ for (Statement* const stmt : bTest->Statements())
{
GenTree* originalTree = stmt->GetRootNode();
GenTree* clonedTree = gtCloneExpr(originalTree);
@@ -4471,10 +4426,9 @@ bool Compiler::optInvertWhileLoop(BasicBlock* block)
unsigned loopFirstNum = bNewCond->bbNext->bbNum;
unsigned loopBottomNum = bTest->bbNum;
- for (flowList* pred = bTest->bbPreds; pred != nullptr; pred = pred->flNext)
+ for (BasicBlock* const predBlock : bTest->PredBlocks())
{
- BasicBlock* predBlock = pred->getBlock();
- unsigned bNum = predBlock->bbNum;
+ unsigned bNum = predBlock->bbNum;
if ((loopFirstNum <= bNum) && (bNum <= loopBottomNum))
{
// Looks like the predecessor is from within the potential loop; skip it.
@@ -4602,7 +4556,7 @@ PhaseStatus Compiler::optInvertLoops()
}
bool madeChanges = false; // Assume no changes made
- for (BasicBlock* block = fgFirstBB; block; block = block->bbNext)
+ for (BasicBlock* const block : Blocks())
{
// Make sure the appropriate fields are initialized
//
@@ -4697,20 +4651,14 @@ PhaseStatus Compiler::optFindLoops()
* lastBottom - used when we have multiple back-edges to the same top
*/
- flowList* pred;
-
- BasicBlock* top;
-
- for (top = fgFirstBB; top; top = top->bbNext)
+ for (BasicBlock* const top : Blocks())
{
BasicBlock* foundBottom = nullptr;
- for (pred = top->bbPreds; pred; pred = pred->flNext)
+ for (BasicBlock* const bottom : top->PredBlocks())
{
/* Is this a loop candidate? - We look for "back edges" */
- BasicBlock* bottom = pred->getBlock();
-
/* is this a backward edge? (from BOTTOM to TOP) */
if (top->bbNum > bottom->bbNum)
@@ -5360,7 +5308,7 @@ bool Compiler::optIsVarAssigned(BasicBlock* beg, BasicBlock* end, GenTree* skip,
{
noway_assert(beg != nullptr);
- for (Statement* stmt : beg->Statements())
+ for (Statement* const stmt : beg->Statements())
{
if (fgWalkTreePre(stmt->GetRootNodePointer(), optIsVarAssgCB, &desc) != WALK_CONTINUE)
{
@@ -5405,12 +5353,8 @@ bool Compiler::optIsVarAssgLoop(unsigned lnum, unsigned var)
/*****************************************************************************/
int Compiler::optIsSetAssgLoop(unsigned lnum, ALLVARSET_VALARG_TP vars, varRefKinds inds)
{
- LoopDsc* loop;
-
- /* Get hold of the loop descriptor */
-
noway_assert(lnum < optLoopCount);
- loop = optLoopTable + lnum;
+ LoopDsc* loop = &optLoopTable[lnum];
/* Do we already know what variables are assigned within this loop? */
@@ -5418,9 +5362,6 @@ int Compiler::optIsSetAssgLoop(unsigned lnum, ALLVARSET_VALARG_TP vars, varRefKi
{
isVarAssgDsc desc;
- BasicBlock* beg;
- BasicBlock* end;
-
/* Prepare the descriptor used by the tree walker call-back */
desc.ivaVar = (unsigned)-1;
@@ -5435,14 +5376,9 @@ int Compiler::optIsSetAssgLoop(unsigned lnum, ALLVARSET_VALARG_TP vars, varRefKi
/* Now walk all the statements of the loop */
- beg = loop->lpHead->bbNext;
- end = loop->lpBottom;
-
- for (/**/; /**/; beg = beg->bbNext)
+ for (BasicBlock* const block : loop->LoopBlocks())
{
- noway_assert(beg);
-
- for (Statement* stmt : StatementList(beg->FirstNonPhiDef()))
+ for (Statement* const stmt : block->NonPhiStatements())
{
fgWalkTreePre(stmt->GetRootNodePointer(), optIsVarAssgCB, &desc);
@@ -5451,11 +5387,6 @@ int Compiler::optIsSetAssgLoop(unsigned lnum, ALLVARSET_VALARG_TP vars, varRefKi
loop->lpFlags |= LPFLG_ASGVARS_INC;
}
}
-
- if (beg == end)
- {
- break;
- }
}
AllVarSetOps::Assign(this, loop->lpAsgVars, desc.ivaMaskVal);
@@ -6197,7 +6128,7 @@ void Compiler::optHoistLoopBlocks(unsigned loopNum, ArrayStack* blo
void HoistBlock(BasicBlock* block)
{
- for (Statement* stmt : StatementList(block->FirstNonPhiDef()))
+ for (Statement* const stmt : block->NonPhiStatements())
{
WalkTree(stmt->GetRootNodePointer(), nullptr);
assert(m_valueStack.TopRef().Node() == stmt->GetRootNode());
@@ -6805,7 +6736,7 @@ void Compiler::fgCreateLoopPreHeader(unsigned lnum)
// into the phi via the loop header block will now flow through the preheader
// block from the header block.
- for (Statement* stmt : top->Statements())
+ for (Statement* const stmt : top->Statements())
{
GenTree* tree = stmt->GetRootNode();
if (tree->OperGet() != GT_ASG)
@@ -6851,10 +6782,8 @@ void Compiler::fgCreateLoopPreHeader(unsigned lnum)
edgeToPreHeader->setEdgeWeights(preHead->bbWeight, preHead->bbWeight, preHead);
bool checkNestedLoops = false;
- for (flowList* pred = top->bbPreds; pred; pred = pred->flNext)
+ for (BasicBlock* const predBlock : top->PredBlocks())
{
- BasicBlock* predBlock = pred->getBlock();
-
if (fgDominate(top, predBlock))
{
// note: if 'top' dominates predBlock, 'head' dominates predBlock too
@@ -7028,9 +6957,8 @@ void Compiler::optComputeLoopSideEffects()
void Compiler::optComputeLoopNestSideEffects(unsigned lnum)
{
assert(optLoopTable[lnum].lpParent == BasicBlock::NOT_IN_LOOP); // Requires: lnum is outermost.
- BasicBlock* botNext = optLoopTable[lnum].lpBottom->bbNext;
- JITDUMP("optComputeLoopSideEffects botNext is " FMT_BB ", lnum is %d\n", botNext->bbNum, lnum);
- for (BasicBlock* bbInLoop = optLoopTable[lnum].lpFirst; bbInLoop != botNext; bbInLoop = bbInLoop->bbNext)
+ JITDUMP("optComputeLoopSideEffects lnum is %d\n", lnum);
+ for (BasicBlock* const bbInLoop : optLoopTable[lnum].LoopBlocks())
{
if (!optComputeLoopSideEffectsOfBlock(bbInLoop))
{
@@ -7084,9 +7012,9 @@ bool Compiler::optComputeLoopSideEffectsOfBlock(BasicBlock* blk)
MemoryKindSet memoryHavoc = emptyMemoryKindSet;
// Now iterate over the remaining statements, and their trees.
- for (Statement* stmt : StatementList(blk->FirstNonPhiDef()))
+ for (Statement* const stmt : blk->NonPhiStatements())
{
- for (GenTree* tree = stmt->GetTreeList(); tree != nullptr; tree = tree->gtNext)
+ for (GenTree* const tree : stmt->TreeList())
{
genTreeOps oper = tree->OperGet();
@@ -7840,7 +7768,7 @@ void Compiler::optOptimizeBools()
{
change = false;
- for (BasicBlock* b1 = fgFirstBB; b1; b1 = b1->bbNext)
+ for (BasicBlock* const b1 : Blocks())
{
/* We're only interested in conditional jumps here */
@@ -8233,7 +8161,7 @@ void Compiler::optRemoveRedundantZeroInits()
for (Statement* stmt = block->FirstNonPhiDef(); stmt != nullptr;)
{
Statement* next = stmt->GetNextStmt();
- for (GenTree* tree = stmt->GetTreeList(); tree != nullptr; tree = tree->gtNext)
+ for (GenTree* const tree : stmt->TreeList())
{
if (((tree->gtFlags & GTF_CALL) != 0))
{
diff --git a/src/coreclr/jit/patchpoint.cpp b/src/coreclr/jit/patchpoint.cpp
index 8ce03ea9590620..bb5b5ffe0e79fc 100644
--- a/src/coreclr/jit/patchpoint.cpp
+++ b/src/coreclr/jit/patchpoint.cpp
@@ -50,7 +50,7 @@ class PatchpointTransformer
}
int count = 0;
- for (BasicBlock* block = compiler->fgFirstBB->bbNext; block != nullptr; block = block->bbNext)
+ for (BasicBlock* const block : compiler->Blocks(compiler->fgFirstBB->bbNext))
{
if (block->bbFlags & BBF_PATCHPOINT)
{
diff --git a/src/coreclr/jit/rangecheck.cpp b/src/coreclr/jit/rangecheck.cpp
index a8beca893765b1..5c95b91883c521 100644
--- a/src/coreclr/jit/rangecheck.cpp
+++ b/src/coreclr/jit/rangecheck.cpp
@@ -1445,9 +1445,9 @@ Compiler::fgWalkResult MapMethodDefsVisitor(GenTree** ptr, Compiler::fgWalkData*
void RangeCheck::MapMethodDefs()
{
// First, gather where all definitions occur in the program and store it in a map.
- for (BasicBlock* block = m_pCompiler->fgFirstBB; block != nullptr; block = block->bbNext)
+ for (BasicBlock* const block : m_pCompiler->Blocks())
{
- for (Statement* stmt : block->Statements())
+ for (Statement* const stmt : block->Statements())
{
MapMethodDefsData data(this, block, stmt);
m_pCompiler->fgWalkTreePre(stmt->GetRootNodePointer(), MapMethodDefsVisitor, &data, false, true);
@@ -1474,11 +1474,11 @@ void RangeCheck::OptimizeRangeChecks()
#endif
// Walk through trees looking for arrBndsChk node and check if it can be optimized.
- for (BasicBlock* block = m_pCompiler->fgFirstBB; block; block = block->bbNext)
+ for (BasicBlock* const block : m_pCompiler->Blocks())
{
- for (Statement* stmt : block->Statements())
+ for (Statement* const stmt : block->Statements())
{
- for (GenTree* tree = stmt->GetTreeList(); tree; tree = tree->gtNext)
+ for (GenTree* const tree : stmt->TreeList())
{
if (IsOverBudget())
{
diff --git a/src/coreclr/jit/rationalize.cpp b/src/coreclr/jit/rationalize.cpp
index ea0e5fb9dc0ed3..ede632657f8cc5 100644
--- a/src/coreclr/jit/rationalize.cpp
+++ b/src/coreclr/jit/rationalize.cpp
@@ -302,14 +302,13 @@ void Rationalizer::ValidateStatement(Statement* stmt, BasicBlock* block)
void Rationalizer::SanityCheck()
{
// TODO: assert(!IsLIR());
- BasicBlock* block;
- foreach_block(comp, block)
+ for (BasicBlock* const block : comp->Blocks())
{
- for (Statement* statement : block->Statements())
+ for (Statement* const stmt : block->Statements())
{
- ValidateStatement(statement, block);
+ ValidateStatement(stmt, block);
- for (GenTree* tree = statement->GetTreeList(); tree; tree = tree->gtNext)
+ for (GenTree* const tree : stmt->TreeList())
{
// QMARK and PUT_ARG_TYPE nodes should have been removed before this phase.
assert(!tree->OperIs(GT_QMARK, GT_PUTARG_TYPE));
@@ -933,7 +932,7 @@ PhaseStatus Rationalizer::DoPhase()
comp->fgOrder = Compiler::FGOrderLinear;
RationalizeVisitor visitor(*this);
- for (BasicBlock* block = comp->fgFirstBB; block != nullptr; block = block->bbNext)
+ for (BasicBlock* const block : comp->Blocks())
{
comp->compCurBB = block;
m_block = block;
@@ -949,7 +948,7 @@ PhaseStatus Rationalizer::DoPhase()
continue;
}
- for (Statement* statement : StatementList(firstStatement))
+ for (Statement* const statement : block->Statements())
{
assert(statement->GetTreeList() != nullptr);
assert(statement->GetTreeList()->gtPrev == nullptr);
diff --git a/src/coreclr/jit/redundantbranchopts.cpp b/src/coreclr/jit/redundantbranchopts.cpp
index 63125a8c6c3371..34fe9da3503995 100644
--- a/src/coreclr/jit/redundantbranchopts.cpp
+++ b/src/coreclr/jit/redundantbranchopts.cpp
@@ -21,7 +21,7 @@ PhaseStatus Compiler::optRedundantBranches()
bool madeChanges = false;
- for (BasicBlock* block = fgFirstBB; block != nullptr; block = block->bbNext)
+ for (BasicBlock* const block : Blocks())
{
// Skip over any removed blocks.
//
@@ -40,7 +40,7 @@ PhaseStatus Compiler::optRedundantBranches()
// Reset visited flags, in case we set any.
//
- for (BasicBlock* block = fgFirstBB; block != nullptr; block = block->bbNext)
+ for (BasicBlock* const block : Blocks())
{
block->bbFlags &= ~BBF_VISITED;
}
@@ -311,7 +311,7 @@ bool Compiler::optJumpThread(BasicBlock* const block, BasicBlock* const domBlock
//
Statement* const lastStmt = block->lastStmt();
- for (Statement* stmt = block->FirstNonPhiDef(); stmt != nullptr; stmt = stmt->GetNextStmt())
+ for (Statement* const stmt : block->NonPhiStatements())
{
GenTree* const tree = stmt->GetRootNode();
@@ -389,9 +389,8 @@ bool Compiler::optJumpThread(BasicBlock* const block, BasicBlock* const domBlock
BasicBlock* const trueTarget = block->bbJumpDest;
BasicBlock* const falseTarget = block->bbNext;
- for (flowList* pred = block->bbPreds; pred != nullptr; pred = pred->flNext)
+ for (BasicBlock* const predBlock : block->PredBlocks())
{
- BasicBlock* const predBlock = pred->getBlock();
numPreds++;
// Treat switch preds as ambiguous for now.
@@ -510,10 +509,8 @@ bool Compiler::optJumpThread(BasicBlock* const block, BasicBlock* const domBlock
// flow directly by changing their jump targets to the appropriate successor,
// provided it's a permissable flow in our EH model.
//
- for (flowList* pred = block->bbPreds; pred != nullptr; pred = pred->flNext)
+ for (BasicBlock* const predBlock : block->PredBlocks())
{
- BasicBlock* const predBlock = pred->getBlock();
-
if (predBlock->bbJumpKind == BBJ_SWITCH)
{
// Skip over switch preds, they will continue to flow to block.
@@ -633,7 +630,7 @@ bool Compiler::optReachable(BasicBlock* const fromBlock, BasicBlock* const toBlo
return true;
}
- for (BasicBlock* block = fgFirstBB; block != nullptr; block = block->bbNext)
+ for (BasicBlock* const block : Blocks())
{
block->bbFlags &= ~BBF_VISITED;
}
diff --git a/src/coreclr/jit/simdashwintrinsic.cpp b/src/coreclr/jit/simdashwintrinsic.cpp
index e8f742cfc78a9a..638353f5642c4b 100644
--- a/src/coreclr/jit/simdashwintrinsic.cpp
+++ b/src/coreclr/jit/simdashwintrinsic.cpp
@@ -479,6 +479,27 @@ GenTree* Compiler::impSimdAsHWIntrinsicSpecial(NamedIntrinsic intrinsic,
}
break;
}
+
+ case NI_VectorT128_Sum:
+ {
+ // TODO-XArch-CQ: We could support this all the way down to SSE2 and that might be
+ // worthwhile so we can accelerate cases like byte/sbyte and long/ulong
+
+ if (varTypeIsFloating(simdBaseType))
+ {
+ if (!compOpportunisticallyDependsOn(InstructionSet_SSE3))
+ {
+ // Floating-point types require SSE3.HorizontalAdd
+ return nullptr;
+ }
+ }
+ else if (!compOpportunisticallyDependsOn(InstructionSet_SSSE3))
+ {
+ // Integral types require SSSE3.HorizontalAdd
+ return nullptr;
+ }
+ break;
+ }
#endif // TARGET_XARCH
default:
@@ -719,12 +740,115 @@ GenTree* Compiler::impSimdAsHWIntrinsicSpecial(NamedIntrinsic intrinsic,
}
break;
}
+ case NI_VectorT128_Sum:
+ {
+
+ GenTree* tmp;
+ unsigned vectorLength = getSIMDVectorLength(simdSize, simdBaseType);
+ int haddCount = genLog2(vectorLength);
+
+ NamedIntrinsic horizontalAdd =
+ varTypeIsFloating(simdBaseType) ? NI_SSE3_HorizontalAdd : NI_SSSE3_HorizontalAdd;
+
+ for (int i = 0; i < haddCount; i++)
+ {
+ op1 = impCloneExpr(op1, &tmp, clsHnd, (unsigned)CHECK_SPILL_ALL,
+ nullptr DEBUGARG("Clone op1 for Vector.Sum"));
+ op1 = gtNewSimdAsHWIntrinsicNode(simdType, op1, tmp, horizontalAdd, simdBaseJitType, simdSize);
+ }
+
+ return gtNewSimdAsHWIntrinsicNode(retType, op1, NI_Vector128_ToScalar, simdBaseJitType, simdSize);
+ }
+ case NI_VectorT256_Sum:
+ {
+ // HorizontalAdd combines pairs so we need log2(vectorLength) passes to sum all elements together.
+ unsigned vectorLength = getSIMDVectorLength(simdSize, simdBaseType);
+ int haddCount = genLog2(vectorLength) - 1; // Minus 1 because for the last pass we split the vector
+ // to low / high and add them together.
+ GenTree* tmp;
+ NamedIntrinsic horizontalAdd = NI_AVX2_HorizontalAdd;
+ NamedIntrinsic add = NI_SSE2_Add;
+
+ if (simdBaseType == TYP_DOUBLE)
+ {
+ horizontalAdd = NI_AVX_HorizontalAdd;
+ }
+ else if (simdBaseType == TYP_FLOAT)
+ {
+ horizontalAdd = NI_AVX_HorizontalAdd;
+ add = NI_SSE_Add;
+ }
+
+ for (int i = 0; i < haddCount; i++)
+ {
+ op1 = impCloneExpr(op1, &tmp, clsHnd, (unsigned)CHECK_SPILL_ALL,
+ nullptr DEBUGARG("Clone op1 for Vector.Sum"));
+ op1 = gtNewSimdAsHWIntrinsicNode(simdType, op1, tmp, horizontalAdd, simdBaseJitType, simdSize);
+ }
+
+ op1 = impCloneExpr(op1, &tmp, clsHnd, (unsigned)CHECK_SPILL_ALL,
+ nullptr DEBUGARG("Clone op1 for Vector.Sum"));
+ op1 = gtNewSimdAsHWIntrinsicNode(TYP_SIMD16, op1, gtNewIconNode(0x01, TYP_INT),
+ NI_AVX_ExtractVector128, simdBaseJitType, simdSize);
+
+ tmp = gtNewSimdAsHWIntrinsicNode(simdType, tmp, NI_Vector256_GetLower, simdBaseJitType, simdSize);
+ op1 = gtNewSimdAsHWIntrinsicNode(TYP_SIMD16, op1, tmp, add, simdBaseJitType, 16);
+
+ return gtNewSimdAsHWIntrinsicNode(retType, op1, NI_Vector128_ToScalar, simdBaseJitType, 16);
+ }
#elif defined(TARGET_ARM64)
case NI_VectorT128_Abs:
{
assert(varTypeIsUnsigned(simdBaseType));
return op1;
}
+ case NI_VectorT128_Sum:
+ {
+ GenTree* tmp;
+
+ switch (simdBaseType)
+ {
+ case TYP_BYTE:
+ case TYP_UBYTE:
+ case TYP_SHORT:
+ case TYP_USHORT:
+ case TYP_INT:
+ case TYP_UINT:
+ {
+ tmp = gtNewSimdAsHWIntrinsicNode(simdType, op1, NI_AdvSimd_Arm64_AddAcross, simdBaseJitType,
+ simdSize);
+ return gtNewSimdAsHWIntrinsicNode(retType, tmp, NI_Vector64_ToScalar, simdBaseJitType, 8);
+ }
+ case TYP_FLOAT:
+ {
+ unsigned vectorLength = getSIMDVectorLength(simdSize, simdBaseType);
+ int haddCount = genLog2(vectorLength);
+
+ for (int i = 0; i < haddCount; i++)
+ {
+ op1 = impCloneExpr(op1, &tmp, clsHnd, (unsigned)CHECK_SPILL_ALL,
+ nullptr DEBUGARG("Clone op1 for Vector.Sum"));
+ op1 = gtNewSimdAsHWIntrinsicNode(simdType, op1, tmp, NI_AdvSimd_Arm64_AddPairwise,
+ simdBaseJitType, simdSize);
+ }
+
+ return gtNewSimdAsHWIntrinsicNode(retType, op1, NI_Vector128_ToScalar, simdBaseJitType,
+ simdSize);
+ }
+ case TYP_DOUBLE:
+ case TYP_LONG:
+ case TYP_ULONG:
+ {
+ op1 = gtNewSimdAsHWIntrinsicNode(TYP_SIMD8, op1, NI_AdvSimd_Arm64_AddPairwiseScalar,
+ simdBaseJitType, simdSize);
+ return gtNewSimdAsHWIntrinsicNode(retType, op1, NI_Vector64_ToScalar, simdBaseJitType, 8);
+ }
+ default:
+ {
+ unreached();
+ }
+ }
+ }
#else
#error Unsupported platform
#endif // !TARGET_XARCH && !TARGET_ARM64
diff --git a/src/coreclr/jit/simdashwintrinsiclistarm64.h b/src/coreclr/jit/simdashwintrinsiclistarm64.h
index 4eba54135a54b7..229222882f7202 100644
--- a/src/coreclr/jit/simdashwintrinsiclistarm64.h
+++ b/src/coreclr/jit/simdashwintrinsiclistarm64.h
@@ -132,6 +132,7 @@ SIMD_AS_HWINTRINSIC_ID(VectorT128, op_Inequality,
SIMD_AS_HWINTRINSIC_ID(VectorT128, op_Multiply, 2, {NI_VectorT128_op_Multiply, NI_VectorT128_op_Multiply, NI_VectorT128_op_Multiply, NI_VectorT128_op_Multiply, NI_VectorT128_op_Multiply, NI_VectorT128_op_Multiply, NI_Illegal, NI_Illegal, NI_VectorT128_op_Multiply, NI_VectorT128_op_Multiply}, SimdAsHWIntrinsicFlag::None)
SIMD_AS_HWINTRINSIC_ID(VectorT128, op_Subtraction, 2, {NI_AdvSimd_Subtract, NI_AdvSimd_Subtract, NI_AdvSimd_Subtract, NI_AdvSimd_Subtract, NI_AdvSimd_Subtract, NI_AdvSimd_Subtract, NI_AdvSimd_Subtract, NI_AdvSimd_Subtract, NI_AdvSimd_Subtract, NI_AdvSimd_Arm64_Subtract}, SimdAsHWIntrinsicFlag::None)
SIMD_AS_HWINTRINSIC_ID(VectorT128, SquareRoot, 1, {NI_Illegal, NI_Illegal, NI_Illegal, NI_Illegal, NI_Illegal, NI_Illegal, NI_Illegal, NI_Illegal, NI_AdvSimd_Arm64_Sqrt, NI_AdvSimd_Arm64_Sqrt}, SimdAsHWIntrinsicFlag::None)
+SIMD_AS_HWINTRINSIC_ID(VectorT128, Sum, 1, {NI_VectorT128_Sum, NI_VectorT128_Sum, NI_VectorT128_Sum, NI_VectorT128_Sum, NI_VectorT128_Sum, NI_VectorT128_Sum, NI_VectorT128_Sum, NI_VectorT128_Sum, NI_VectorT128_Sum, NI_VectorT128_Sum}, SimdAsHWIntrinsicFlag::None)
#undef SIMD_AS_HWINTRINSIC_NM
#undef SIMD_AS_HWINTRINSIC_ID
diff --git a/src/coreclr/jit/simdashwintrinsiclistxarch.h b/src/coreclr/jit/simdashwintrinsiclistxarch.h
index af75fb75fae6d9..92d665c2de8a71 100644
--- a/src/coreclr/jit/simdashwintrinsiclistxarch.h
+++ b/src/coreclr/jit/simdashwintrinsiclistxarch.h
@@ -132,6 +132,7 @@ SIMD_AS_HWINTRINSIC_ID(VectorT128, op_Inequality,
SIMD_AS_HWINTRINSIC_ID(VectorT128, op_Multiply, 2, {NI_Illegal, NI_Illegal, NI_VectorT128_op_Multiply, NI_VectorT128_op_Multiply, NI_VectorT128_op_Multiply, NI_VectorT128_op_Multiply, NI_Illegal, NI_Illegal, NI_VectorT128_op_Multiply, NI_VectorT128_op_Multiply}, SimdAsHWIntrinsicFlag::None)
SIMD_AS_HWINTRINSIC_ID(VectorT128, op_Subtraction, 2, {NI_SSE2_Subtract, NI_SSE2_Subtract, NI_SSE2_Subtract, NI_SSE2_Subtract, NI_SSE2_Subtract, NI_SSE2_Subtract, NI_SSE2_Subtract, NI_SSE2_Subtract, NI_SSE_Subtract, NI_SSE2_Subtract}, SimdAsHWIntrinsicFlag::None)
SIMD_AS_HWINTRINSIC_ID(VectorT128, SquareRoot, 1, {NI_Illegal, NI_Illegal, NI_Illegal, NI_Illegal, NI_Illegal, NI_Illegal, NI_Illegal, NI_Illegal, NI_SSE_Sqrt, NI_SSE2_Sqrt}, SimdAsHWIntrinsicFlag::None)
+SIMD_AS_HWINTRINSIC_ID(VectorT128, Sum, 1, {NI_Illegal, NI_Illegal, NI_VectorT128_Sum, NI_VectorT128_Sum, NI_VectorT128_Sum, NI_VectorT128_Sum, NI_Illegal, NI_Illegal, NI_VectorT128_Sum, NI_VectorT128_Sum}, SimdAsHWIntrinsicFlag::None)
// *************************************************************************************************************************************************************************************************************************************************************************************************************************************************************************************************************************************************************************************************************************************************************************
// ISA ID Name NumArg Instructions Flags
@@ -170,6 +171,7 @@ SIMD_AS_HWINTRINSIC_ID(VectorT256, op_Inequality,
SIMD_AS_HWINTRINSIC_ID(VectorT256, op_Multiply, 2, {NI_Illegal, NI_Illegal, NI_VectorT256_op_Multiply, NI_VectorT256_op_Multiply, NI_VectorT256_op_Multiply, NI_VectorT256_op_Multiply, NI_Illegal, NI_Illegal, NI_VectorT256_op_Multiply, NI_VectorT256_op_Multiply}, SimdAsHWIntrinsicFlag::None)
SIMD_AS_HWINTRINSIC_ID(VectorT256, op_Subtraction, 2, {NI_AVX2_Subtract, NI_AVX2_Subtract, NI_AVX2_Subtract, NI_AVX2_Subtract, NI_AVX2_Subtract, NI_AVX2_Subtract, NI_AVX2_Subtract, NI_AVX2_Subtract, NI_AVX_Subtract, NI_AVX_Subtract}, SimdAsHWIntrinsicFlag::None)
SIMD_AS_HWINTRINSIC_ID(VectorT256, SquareRoot, 1, {NI_Illegal, NI_Illegal, NI_Illegal, NI_Illegal, NI_Illegal, NI_Illegal, NI_Illegal, NI_Illegal, NI_AVX_Sqrt, NI_AVX_Sqrt}, SimdAsHWIntrinsicFlag::None)
+SIMD_AS_HWINTRINSIC_ID(VectorT256, Sum, 1, {NI_Illegal, NI_Illegal, NI_VectorT256_Sum, NI_VectorT256_Sum, NI_VectorT256_Sum, NI_VectorT256_Sum, NI_Illegal, NI_Illegal, NI_VectorT256_Sum, NI_VectorT256_Sum}, SimdAsHWIntrinsicFlag::None)
#undef SIMD_AS_HWINTRINSIC_NM
#undef SIMD_AS_HWINTRINSIC_ID
diff --git a/src/coreclr/jit/ssabuilder.cpp b/src/coreclr/jit/ssabuilder.cpp
index 46671f8444bcc5..ea5d708b208c54 100644
--- a/src/coreclr/jit/ssabuilder.cpp
+++ b/src/coreclr/jit/ssabuilder.cpp
@@ -89,7 +89,7 @@ void Compiler::fgResetForSsa()
m_memorySsaMap[memoryKind] = nullptr;
}
- for (BasicBlock* blk = fgFirstBB; blk != nullptr; blk = blk->bbNext)
+ for (BasicBlock* const blk : Blocks())
{
// Eliminate phis.
for (MemoryKind memoryKind : allMemoryKinds())
@@ -106,14 +106,13 @@ void Compiler::fgResetForSsa()
}
}
- for (Statement* stmt : blk->Statements())
+ for (Statement* const stmt : blk->Statements())
{
- for (GenTree* tree = stmt->GetTreeList(); tree != nullptr; tree = tree->gtNext)
+ for (GenTree* const tree : stmt->TreeList())
{
if (tree->IsLocal())
{
tree->AsLclVarCommon()->SetSsaNum(SsaConfig::RESERVED_SSA_NUM);
- continue;
}
}
}
@@ -486,7 +485,7 @@ void SsaBuilder::ComputeIteratedDominanceFrontier(BasicBlock* b, const BlkToBlkV
static GenTree* GetPhiNode(BasicBlock* block, unsigned lclNum)
{
// Walk the statements for phi nodes.
- for (Statement* stmt : block->Statements())
+ for (Statement* const stmt : block->Statements())
{
// A prefix of the statements of the block are phi definition nodes. If we complete processing
// that prefix, exit.
@@ -540,7 +539,7 @@ void SsaBuilder::InsertPhi(BasicBlock* block, unsigned lclNum)
#ifdef DEBUG
unsigned seqNum = 1;
- for (GenTree* node = stmt->GetTreeList(); node != nullptr; node = node->gtNext)
+ for (GenTree* const node : stmt->TreeList())
{
node->gtSeqNum = seqNum++;
}
@@ -590,7 +589,7 @@ void SsaBuilder::AddPhiArg(
#ifdef DEBUG
unsigned seqNum = 1;
- for (GenTree* node = stmt->GetTreeList(); node != nullptr; node = node->gtNext)
+ for (GenTree* const node : stmt->TreeList())
{
node->gtSeqNum = seqNum++;
}
@@ -904,7 +903,7 @@ void SsaBuilder::AddDefToHandlerPhis(BasicBlock* block, unsigned lclNum, unsigne
bool phiFound = false;
#endif
// A prefix of blocks statements will be SSA definitions. Search those for "lclNum".
- for (Statement* stmt : handler->Statements())
+ for (Statement* const stmt : handler->Statements())
{
// If the tree is not an SSA def, break out of the loop: we're done.
if (!stmt->IsPhiDefnStmt())
@@ -1055,9 +1054,9 @@ void SsaBuilder::BlockRenameVariables(BasicBlock* block)
}
// Walk the statements of the block and rename definitions and uses.
- for (Statement* stmt : block->Statements())
+ for (Statement* const stmt : block->Statements())
{
- for (GenTree* tree = stmt->GetTreeList(); tree != nullptr; tree = tree->gtNext)
+ for (GenTree* const tree : stmt->TreeList())
{
if (tree->OperIs(GT_ASG))
{
@@ -1120,7 +1119,7 @@ void SsaBuilder::AddPhiArgsToSuccessors(BasicBlock* block)
for (BasicBlock* succ : block->GetAllSuccs(m_pCompiler))
{
// Walk the statements for phi nodes.
- for (Statement* stmt : succ->Statements())
+ for (Statement* const stmt : succ->Statements())
{
// A prefix of the statements of the block are phi definition nodes. If we complete processing
// that prefix, exit.
@@ -1251,7 +1250,7 @@ void SsaBuilder::AddPhiArgsToSuccessors(BasicBlock* block)
// For a filter, we consider the filter to be the "real" handler.
BasicBlock* handlerStart = succTry->ExFlowBlock();
- for (Statement* stmt : handlerStart->Statements())
+ for (Statement* const stmt : handlerStart->Statements())
{
GenTree* tree = stmt->GetRootNode();
@@ -1393,7 +1392,7 @@ void SsaBuilder::RenameVariables()
// Initialize the memory ssa numbers for unreachable blocks. ValueNum expects
// memory ssa numbers to have some intitial value.
- for (BasicBlock* block = m_pCompiler->fgFirstBB; block; block = block->bbNext)
+ for (BasicBlock* const block : m_pCompiler->Blocks())
{
if (block->bbIDom == nullptr)
{
@@ -1528,7 +1527,7 @@ void SsaBuilder::Build()
// tree is built. The pre/post order numbers that were generated previously and used for loop
// recognition are still being used by optPerformHoistExpr via fgCreateLoopPreHeader. That's rather
// odd, considering that SetupBBRoot may have added a new block.
- for (BasicBlock* block = m_pCompiler->fgFirstBB; block != nullptr; block = block->bbNext)
+ for (BasicBlock* const block : m_pCompiler->Blocks())
{
block->bbIDom = nullptr;
block->bbPostOrderNum = 0;
diff --git a/src/coreclr/jit/stacklevelsetter.cpp b/src/coreclr/jit/stacklevelsetter.cpp
index 361a4faadf5370..da20ff6fe22574 100644
--- a/src/coreclr/jit/stacklevelsetter.cpp
+++ b/src/coreclr/jit/stacklevelsetter.cpp
@@ -42,12 +42,12 @@ StackLevelSetter::StackLevelSetter(Compiler* compiler)
//
PhaseStatus StackLevelSetter::DoPhase()
{
- for (BasicBlock* block = comp->fgFirstBB; block != nullptr; block = block->bbNext)
+ for (BasicBlock* const block : comp->Blocks())
{
ProcessBlock(block);
}
-#if !FEATURE_FIXED_OUT_ARGS
+#if !FEATURE_FIXED_OUT_ARGS
if (framePointerRequired)
{
comp->codeGen->setFramePointerRequired(true);
diff --git a/src/coreclr/jit/valuenum.cpp b/src/coreclr/jit/valuenum.cpp
index cff036bba092d9..e49ba55eca816a 100644
--- a/src/coreclr/jit/valuenum.cpp
+++ b/src/coreclr/jit/valuenum.cpp
@@ -3787,27 +3787,43 @@ ValueNum ValueNumStore::VNApplySelectorsTypeCheck(ValueNum elem, var_types indTy
return elem;
}
-ValueNum ValueNumStore::VNApplySelectorsAssignTypeCoerce(ValueNum elem, var_types indType, BasicBlock* block)
+//------------------------------------------------------------------------
+// VNApplySelectorsAssignTypeCoerce: Compute the value number corresponding to `srcVN`
+// being written using an indirection of 'dstIndType'.
+//
+// Arguments:
+// srcVN - value number for the value being stored;
+// dstIndType - type of the indirection storing the value to the memory;
+// block - block where the assignment occurs
+//
+// Return Value:
+// The value number corresponding to memory after the assignment.
+//
+// Notes: It may insert a cast to dstIndType or return a unique value number for an incompatible indType.
+//
+ValueNum ValueNumStore::VNApplySelectorsAssignTypeCoerce(ValueNum srcVN, var_types dstIndType, BasicBlock* block)
{
- var_types elemTyp = TypeOfVN(elem);
+ var_types srcType = TypeOfVN(srcVN);
- // Check if the elemTyp is matching/compatible
+ ValueNum dstVN;
- if (indType != elemTyp)
+ // Check if the elemTyp is matching/compatible.
+ if (dstIndType != srcType)
{
- bool isConstant = IsVNConstant(elem);
- if (isConstant && (elemTyp == genActualType(indType)))
+ bool isConstant = IsVNConstant(srcVN);
+ if (isConstant && (srcType == genActualType(dstIndType)))
{
// (i.e. We recorded a constant of TYP_INT for a TYP_BYTE field)
+ dstVN = srcVN;
}
else
{
// We are trying to write an 'elem' of type 'elemType' using 'indType' store
- if (varTypeIsStruct(indType))
+ if (varTypeIsStruct(dstIndType))
{
// return a new unique value number
- elem = VNMakeNormalUnique(elem);
+ dstVN = VNMakeNormalUnique(srcVN);
JITDUMP(" *** Mismatched types in VNApplySelectorsAssignTypeCoerce (indType is TYP_STRUCT)\n");
}
@@ -3816,14 +3832,18 @@ ValueNum ValueNumStore::VNApplySelectorsAssignTypeCoerce(ValueNum elem, var_type
// We are trying to write an 'elem' of type 'elemType' using 'indType' store
// insert a cast of elem to 'indType'
- elem = VNForCast(elem, indType, elemTyp);
+ dstVN = VNForCast(srcVN, dstIndType, srcType);
JITDUMP(" Cast to %s inserted in VNApplySelectorsAssignTypeCoerce (elemTyp is %s)\n",
- varTypeName(indType), varTypeName(elemTyp));
+ varTypeName(dstIndType), varTypeName(srcType));
}
}
}
- return elem;
+ else
+ {
+ dstVN = srcVN;
+ }
+ return dstVN;
}
//------------------------------------------------------------------------
@@ -6191,12 +6211,11 @@ void Compiler::fgValueNumber()
{
lvMemoryPerSsaData.GetSsaDefByIndex(i)->m_vnPair = noVnp;
}
- for (BasicBlock* blk = fgFirstBB; blk != nullptr; blk = blk->bbNext)
+ for (BasicBlock* const blk : Blocks())
{
- // Now iterate over the block's statements, and their trees.
- for (Statement* stmt : StatementList(blk->FirstNonPhiDef()))
+ for (Statement* const stmt : blk->NonPhiStatements())
{
- for (GenTree* tree = stmt->GetTreeList(); tree != nullptr; tree = tree->gtNext)
+ for (GenTree* const tree : stmt->TreeList())
{
tree->gtVNPair.SetBoth(ValueNumStore::NoVN);
}
@@ -6542,7 +6561,7 @@ void Compiler::fgValueNumberBlock(BasicBlock* blk)
}
#endif
- for (GenTree* tree = stmt->GetTreeList(); tree != nullptr; tree = tree->gtNext)
+ for (GenTree* const tree : stmt->TreeList())
{
fgValueNumberTree(tree);
}
@@ -7410,18 +7429,26 @@ void Compiler::fgValueNumberTree(GenTree* tree)
//
if (lcl->gtVNPair.GetLiberal() == ValueNumStore::NoVN)
{
- // So far, we know about two of these cases:
+#ifdef DEBUG
+
+ // So far, we know about three of these cases:
// Case 1) We have a local var who has never been defined but it's seen as a use.
// This is the case of storeIndir(addr(lclvar)) = expr. In this case since we only
// take the address of the variable, this doesn't mean it's a use nor we have to
- // initialize it, so in this very rare case, we fabricate a value number.
- // Case 2) Local variables that represent structs which are assigned using CpBlk.
+ // initialize it, so in this very rare case, we fabricate a value number;
+ // Case 2) Local variables that represent structs which are assigned using CpBlk;
+ // Case 3) Local variable was written using a partial write,
+ // for example, BLK<1>(ADDR(LCL_VAR int)) = 1, it will change only the first byte.
+ // Check that there was ld-addr-op on the local.
//
- // Make sure we have either case 1 or case 2
+ // Make sure we have one of these cases.
//
- GenTree* nextNode = lcl->gtNext;
+ const GenTree* nextNode = lcl->gtNext;
+ const LclVarDsc* varDsc = lvaGetDesc(lcl);
+
assert((nextNode->gtOper == GT_ADDR && nextNode->AsOp()->gtOp1 == lcl) ||
- varTypeIsStruct(lcl->TypeGet()));
+ varTypeIsStruct(lcl->TypeGet()) || varDsc->lvHasLdAddrOp);
+#endif // DEBUG
// We will assign a unique value number for these
//
@@ -7888,11 +7915,19 @@ void Compiler::fgValueNumberTree(GenTree* tree)
if (fieldSeq == FieldSeqStore::NotAField())
{
+ assert(!isEntire && "did not expect an entire NotAField write.");
// We don't know where we're storing, so give the local a new, unique VN.
// Do this by considering it an "entire" assignment, with an unknown RHS.
isEntire = true;
rhsVNPair.SetBoth(vnStore->VNForExpr(compCurBB, lclVarTree->TypeGet()));
}
+ else if ((fieldSeq == nullptr) && !isEntire)
+ {
+ // It is a partial store of a LCL_VAR without using LCL_FLD.
+ // Generate a unique VN.
+ isEntire = true;
+ rhsVNPair.SetBoth(vnStore->VNForExpr(compCurBB, lclVarTree->TypeGet()));
+ }
if (isEntire)
{
diff --git a/src/coreclr/jit/valuenum.h b/src/coreclr/jit/valuenum.h
index 3d81e37a3c8d33..c1cf8af6c2a274 100644
--- a/src/coreclr/jit/valuenum.h
+++ b/src/coreclr/jit/valuenum.h
@@ -543,10 +543,7 @@ class ValueNumStore
ValueNum VNApplySelectorsAssign(
ValueNumKind vnk, ValueNum map, FieldSeqNode* fieldSeq, ValueNum rhs, var_types indType, BasicBlock* block);
- // Used after VNApplySelectorsAssign has determined that "elem" is to be writen into a Map using VNForMapStore
- // It determines whether the 'elem' is of an appropriate type to be writen using using an indirection of 'indType'
- // It may insert a cast to indType or return a unique value number for an incompatible indType.
- ValueNum VNApplySelectorsAssignTypeCoerce(ValueNum elem, var_types indType, BasicBlock* block);
+ ValueNum VNApplySelectorsAssignTypeCoerce(ValueNum srcElem, var_types dstIndType, BasicBlock* block);
ValueNumPair VNPairApplySelectors(ValueNumPair map, FieldSeqNode* fieldSeq, var_types indType);
diff --git a/src/coreclr/md/enc/CMakeLists.txt b/src/coreclr/md/enc/CMakeLists.txt
index 6bd2518d868c90..d4abb371ffbc46 100644
--- a/src/coreclr/md/enc/CMakeLists.txt
+++ b/src/coreclr/md/enc/CMakeLists.txt
@@ -62,10 +62,6 @@ add_library_clr(mdruntimerw-dbi ${MDRUNTIMERW_SOURCES})
set_target_properties(mdruntimerw-dbi PROPERTIES DBI_COMPONENT TRUE)
target_precompile_headers(mdruntimerw-dbi PRIVATE stdafx.h)
-add_library_clr(mdruntimerw_crossgen ${MDRUNTIMERW_SOURCES})
-set_target_properties(mdruntimerw_crossgen PROPERTIES CROSSGEN_COMPONENT TRUE)
-target_precompile_headers(mdruntimerw_crossgen PRIVATE stdafx.h)
-
add_library_clr(mdruntimerw_ppdb ${MDRUNTIMERW_SOURCES})
target_compile_definitions(mdruntimerw_ppdb PRIVATE FEATURE_METADATA_EMIT_ALL FEATURE_METADATA_EMIT_PORTABLE_PDB)
target_precompile_headers(mdruntimerw_ppdb PRIVATE stdafx.h)
diff --git a/src/coreclr/md/inc/VerifyLayouts.inc b/src/coreclr/md/inc/VerifyLayouts.inc
index 14e068ecefa3e0..2ca6384b868417 100644
--- a/src/coreclr/md/inc/VerifyLayouts.inc
+++ b/src/coreclr/md/inc/VerifyLayouts.inc
@@ -156,6 +156,9 @@ FIELD(CLiteWeightStgdbRW, m_wszFileName, sizeof(void*))
FIELD(CLiteWeightStgdbRW, m_dwDatabaseLFT, 4)
FIELD(CLiteWeightStgdbRW, m_dwDatabaseLFS, 4)
FIELD(CLiteWeightStgdbRW, m_pStgIO, sizeof(void*))
+#ifdef FEATURE_METADATA_EMIT_PORTABLE_PDB
+FIELD(CLiteWeightStgdbRW, m_pPdbHeap, sizeof(void*))
+#endif
END_TYPE(CLiteWeightStgdbRW, 8)
USING_ALIAS(CLiteWeightStgdb__CMiniMdRW__, CLiteWeightStgdb)
diff --git a/src/coreclr/md/inc/metamodel.h b/src/coreclr/md/inc/metamodel.h
index e3206a589ad99e..a1cb17724601eb 100644
--- a/src/coreclr/md/inc/metamodel.h
+++ b/src/coreclr/md/inc/metamodel.h
@@ -594,6 +594,7 @@ class CMiniMdBase : public IMetaModelCommonRO
protected:
+ DAC_ALIGNAS(8)
CMiniMdSchema m_Schema; // data header.
ULONG m_TblCount; // Tables in this database.
BOOL m_fVerifiedByTrustedSource; // whether the data was verified by a trusted source
diff --git a/src/coreclr/md/runtime/CMakeLists.txt b/src/coreclr/md/runtime/CMakeLists.txt
index 6ff49d3e803e21..3e1fc8eda75c1a 100644
--- a/src/coreclr/md/runtime/CMakeLists.txt
+++ b/src/coreclr/md/runtime/CMakeLists.txt
@@ -59,10 +59,6 @@ add_library_clr(mdruntime-dbi ${MDRUNTIME_SOURCES})
set_target_properties(mdruntime-dbi PROPERTIES DBI_COMPONENT TRUE)
target_precompile_headers(mdruntime-dbi PRIVATE stdafx.h)
-add_library_clr(mdruntime_crossgen ${MDRUNTIME_SOURCES})
-set_target_properties(mdruntime_crossgen PROPERTIES CROSSGEN_COMPONENT TRUE)
-target_precompile_headers(mdruntime_crossgen PRIVATE stdafx.h)
-
add_library_clr(mdruntime_ppdb ${MDRUNTIME_SOURCES})
target_compile_definitions(mdruntime_ppdb PRIVATE FEATURE_METADATA_EMIT_ALL FEATURE_METADATA_EMIT_PORTABLE_PDB)
target_precompile_headers(mdruntime_ppdb PRIVATE stdafx.h)
diff --git a/src/coreclr/pal/inc/pal.h b/src/coreclr/pal/inc/pal.h
index 25f16e4a42eec0..8e0553e02e8889 100644
--- a/src/coreclr/pal/inc/pal.h
+++ b/src/coreclr/pal/inc/pal.h
@@ -94,6 +94,8 @@ typedef PVOID NATIVE_LIBRARY_HANDLE;
#define _M_ARM 7
#elif defined(__aarch64__) && !defined(_M_ARM64)
#define _M_ARM64 1
+#elif defined(__s390x__) && !defined(_M_S390X)
+#define _M_S390X 1
#endif
#if defined(_M_IX86) && !defined(HOST_X86)
@@ -104,6 +106,8 @@ typedef PVOID NATIVE_LIBRARY_HANDLE;
#define HOST_ARM
#elif defined(_M_ARM64) && !defined(HOST_ARM64)
#define HOST_ARM64
+#elif defined(_M_S390X) && !defined(HOST_S390X)
+#define HOST_S390X
#endif
#endif // !_MSC_VER
@@ -2189,6 +2193,112 @@ typedef struct _KNONVOLATILE_CONTEXT_POINTERS {
} KNONVOLATILE_CONTEXT_POINTERS, *PKNONVOLATILE_CONTEXT_POINTERS;
+#elif defined(HOST_S390X)
+
+// There is no context for s390x defined in winnt.h,
+// so we re-use the amd64 values.
+#define CONTEXT_S390X 0x100000
+
+#define CONTEXT_CONTROL (CONTEXT_S390X | 0x1L)
+#define CONTEXT_INTEGER (CONTEXT_S390X | 0x2L)
+#define CONTEXT_FLOATING_POINT (CONTEXT_S390X | 0x4L)
+
+#define CONTEXT_FULL (CONTEXT_CONTROL | CONTEXT_INTEGER | CONTEXT_FLOATING_POINT)
+
+#define CONTEXT_ALL (CONTEXT_CONTROL | CONTEXT_INTEGER | CONTEXT_FLOATING_POINT)
+
+#define CONTEXT_EXCEPTION_ACTIVE 0x8000000
+#define CONTEXT_SERVICE_ACTIVE 0x10000000
+#define CONTEXT_EXCEPTION_REQUEST 0x40000000
+#define CONTEXT_EXCEPTION_REPORTING 0x80000000
+
+typedef struct DECLSPEC_ALIGN(8) _CONTEXT {
+
+ //
+ // Control flags.
+ //
+
+ DWORD ContextFlags;
+
+ //
+ // Integer registers.
+ //
+
+ union {
+ DWORD64 Gpr[16];
+ struct {
+ DWORD64 R0;
+ DWORD64 R1;
+ DWORD64 R2;
+ DWORD64 R3;
+ DWORD64 R4;
+ DWORD64 R5;
+ DWORD64 R6;
+ DWORD64 R7;
+ DWORD64 R8;
+ DWORD64 R9;
+ DWORD64 R10;
+ DWORD64 R11;
+ DWORD64 R12;
+ DWORD64 R13;
+ DWORD64 R14;
+ DWORD64 R15;
+ };
+ };
+
+ //
+ // Floating-point registers.
+ //
+
+ union {
+ DWORD64 Fpr[16];
+ struct {
+ DWORD64 F0;
+ DWORD64 F1;
+ DWORD64 F2;
+ DWORD64 F3;
+ DWORD64 F4;
+ DWORD64 F5;
+ DWORD64 F6;
+ DWORD64 F7;
+ DWORD64 F8;
+ DWORD64 F9;
+ DWORD64 F10;
+ DWORD64 F11;
+ DWORD64 F12;
+ DWORD64 F13;
+ DWORD64 F14;
+ DWORD64 F15;
+ };
+ };
+
+ //
+ // Control registers.
+ //
+
+ DWORD64 PSWMask;
+ DWORD64 PSWAddr;
+
+} CONTEXT, *PCONTEXT, *LPCONTEXT;
+
+//
+// Nonvolatile context pointer record.
+//
+
+typedef struct _KNONVOLATILE_CONTEXT_POINTERS {
+ PDWORD64 R6;
+ PDWORD64 R7;
+ PDWORD64 R8;
+ PDWORD64 R9;
+ PDWORD64 R10;
+ PDWORD64 R11;
+ PDWORD64 R12;
+ PDWORD64 R13;
+ PDWORD64 R14;
+ PDWORD64 R15;
+
+} KNONVOLATILE_CONTEXT_POINTERS, *PKNONVOLATILE_CONTEXT_POINTERS;
+
#else
#error Unknown architecture for defining CONTEXT.
#endif
@@ -2324,6 +2434,8 @@ PALIMPORT BOOL PALAPI PAL_VirtualUnwindOutOfProc(CONTEXT *context, KNONVOLATILE_
#define PAL_CS_NATIVE_DATA_SIZE 76
#elif defined(__linux__) && defined(__x86_64__)
#define PAL_CS_NATIVE_DATA_SIZE 96
+#elif defined(__linux__) && defined(HOST_S390X)
+#define PAL_CS_NATIVE_DATA_SIZE 96
#elif defined(__NetBSD__) && defined(__amd64__)
#define PAL_CS_NATIVE_DATA_SIZE 96
#elif defined(__NetBSD__) && defined(__earm__)
@@ -2648,33 +2760,17 @@ VirtualFree(
IN SIZE_T dwSize,
IN DWORD dwFreeType);
+
#if defined(HOST_OSX) && defined(HOST_ARM64)
-#ifdef __cplusplus
-extern "C++" {
-struct PAL_JITWriteEnableHolder
-{
-public:
- PAL_JITWriteEnableHolder(bool jitWriteEnable)
- {
- m_jitWriteEnableRestore = JITWriteEnable(jitWriteEnable);
- };
- ~PAL_JITWriteEnableHolder()
- {
- JITWriteEnable(m_jitWriteEnableRestore);
- }
-private:
- bool JITWriteEnable(bool enable);
- bool m_jitWriteEnableRestore;
-};
+PALIMPORT
+VOID
+PALAPI
+PAL_JitWriteProtect(bool writeEnable);
-inline
-PAL_JITWriteEnableHolder
-PAL_JITWriteEnable(IN bool enable) { return PAL_JITWriteEnableHolder(enable); }
-}
-#endif // __cplusplus
#endif // defined(HOST_OSX) && defined(HOST_ARM64)
+
PALIMPORT
BOOL
PALAPI
diff --git a/src/coreclr/pal/inc/rt/intsafe.h b/src/coreclr/pal/inc/rt/intsafe.h
index 9159c1bb1676f5..2b607e3a312e12 100644
--- a/src/coreclr/pal/inc/rt/intsafe.h
+++ b/src/coreclr/pal/inc/rt/intsafe.h
@@ -31,7 +31,7 @@
#define LODWORD(_qw) ((ULONG)(_qw))
#if defined(MIDL_PASS) || defined(RC_INVOKED) || defined(_M_CEE_PURE) \
- || defined(_M_AMD64) || defined(__ARM_ARCH)
+ || defined(_M_AMD64) || defined(__ARM_ARCH) || defined(_M_S390X)
#ifndef UInt32x32To64
#define UInt32x32To64(a, b) ((unsigned __int64)((ULONG)(a)) * (unsigned __int64)((ULONG)(b)))
diff --git a/src/coreclr/pal/inc/rt/palrt.h b/src/coreclr/pal/inc/rt/palrt.h
index db50a288af0bf6..0855f5991cd8c8 100644
--- a/src/coreclr/pal/inc/rt/palrt.h
+++ b/src/coreclr/pal/inc/rt/palrt.h
@@ -1215,6 +1215,14 @@ typedef struct _DISPATCHER_CONTEXT {
BOOLEAN ControlPcIsUnwound;
} DISPATCHER_CONTEXT, *PDISPATCHER_CONTEXT;
+#elif defined(HOST_S390X)
+
+typedef struct _DISPATCHER_CONTEXT {
+ // S390X does not build the VM or JIT at this point,
+ // so we only provide a dummy definition.
+ DWORD Reserved;
+} DISPATCHER_CONTEXT, *PDISPATCHER_CONTEXT;
+
#else
#error Unknown architecture for defining DISPATCHER_CONTEXT.
diff --git a/src/coreclr/pal/inc/unixasmmacros.inc b/src/coreclr/pal/inc/unixasmmacros.inc
index 3740ab1b365824..1fe285eef162d2 100644
--- a/src/coreclr/pal/inc/unixasmmacros.inc
+++ b/src/coreclr/pal/inc/unixasmmacros.inc
@@ -9,7 +9,7 @@
#define LOCAL_LABEL(name) L##name
#else
#define C_FUNC(name) name
-#if defined(HOST_AMD64) || defined(HOST_X86)
+#if defined(HOST_AMD64) || defined(HOST_X86) || defined(HOST_S390X)
#define EXTERNAL_C_FUNC(name) C_FUNC(name)@plt
#else
#define EXTERNAL_C_FUNC(name) C_FUNC(name)
@@ -41,4 +41,6 @@
#include "unixasmmacrosarm.inc"
#elif defined(HOST_ARM64)
#include "unixasmmacrosarm64.inc"
+#elif defined(HOST_S390X)
+#include "unixasmmacross390x.inc"
#endif
diff --git a/src/coreclr/pal/inc/unixasmmacross390x.inc b/src/coreclr/pal/inc/unixasmmacross390x.inc
new file mode 100644
index 00000000000000..ae6a894ad7cd5b
--- /dev/null
+++ b/src/coreclr/pal/inc/unixasmmacross390x.inc
@@ -0,0 +1,37 @@
+// Licensed to the .NET Foundation under one or more agreements.
+// The .NET Foundation licenses this file to you under the MIT license.
+
+.macro NESTED_ENTRY Name, Section, Handler
+ LEAF_ENTRY \Name, \Section
+ .ifnc \Handler, NoHandler
+ .cfi_personality 0x1b, C_FUNC(\Handler) // 0x1b == DW_EH_PE_pcrel | DW_EH_PE_sdata4
+ .endif
+.endm
+
+.macro NESTED_END Name, Section
+ LEAF_END \Name, \Section
+.endm
+
+.macro PATCH_LABEL Name
+ .global C_FUNC(\Name)
+C_FUNC(\Name):
+.endm
+
+.macro LEAF_ENTRY Name, Section
+ .global C_FUNC(\Name)
+ .type \Name, %function
+C_FUNC(\Name):
+ .cfi_startproc
+.endm
+
+.macro LEAF_END Name, Section
+ .size \Name, .-\Name
+ .cfi_endproc
+.endm
+
+.macro LEAF_END_MARKED Name, Section
+C_FUNC(\Name\()_End):
+ .global C_FUNC(\Name\()_End)
+ LEAF_END \Name, \Section
+.endm
+
diff --git a/src/coreclr/pal/prebuilt/idl/corprof_i.cpp b/src/coreclr/pal/prebuilt/idl/corprof_i.cpp
index 498c52b3914c3f..07b78a4fb33952 100644
--- a/src/coreclr/pal/prebuilt/idl/corprof_i.cpp
+++ b/src/coreclr/pal/prebuilt/idl/corprof_i.cpp
@@ -90,6 +90,9 @@ MIDL_DEFINE_GUID(IID, IID_ICorProfilerCallback9,0x27583EC3,0xC8F5,0x482F,0x80,0x
MIDL_DEFINE_GUID(IID, IID_ICorProfilerCallback10,0xCEC5B60E,0xC69C,0x495F,0x87,0xF6,0x84,0xD2,0x8E,0xE1,0x6F,0xFB);
+MIDL_DEFINE_GUID(IID, IID_ICorProfilerCallback11,0x42350846,0xAAED,0x47F7,0xB1,0x28,0xFD,0x0C,0x98,0x88,0x1C,0xDE);
+
+
MIDL_DEFINE_GUID(IID, IID_ICorProfilerInfo,0x28B5557D,0x3F3F,0x48b4,0x90,0xB2,0x5F,0x9E,0xEA,0x2F,0x6C,0x48);
@@ -141,6 +144,9 @@ MIDL_DEFINE_GUID(IID, IID_ICorProfilerInfo11,0x06398876,0x8987,0x4154,0xB6,0x21,
MIDL_DEFINE_GUID(IID, IID_ICorProfilerInfo12,0x27b24ccd,0x1cb1,0x47c5,0x96,0xee,0x98,0x19,0x0d,0xc3,0x09,0x59);
+MIDL_DEFINE_GUID(IID, IID_ICorProfilerInfo13,0x19C4179D,0xF92C,0x4D25,0x9F,0x20,0x5F,0xEB,0xFB,0xBD,0x29,0x78);
+
+
MIDL_DEFINE_GUID(IID, IID_ICorProfilerMethodEnum,0xFCCEE788,0x0088,0x454B,0xA8,0x11,0xC9,0x9F,0x29,0x8D,0x19,0x42);
diff --git a/src/coreclr/pal/prebuilt/inc/corprof.h b/src/coreclr/pal/prebuilt/inc/corprof.h
index 2311aa50686262..85ce86870bf8c0 100644
--- a/src/coreclr/pal/prebuilt/inc/corprof.h
+++ b/src/coreclr/pal/prebuilt/inc/corprof.h
@@ -4,10 +4,8 @@
/* File created by MIDL compiler version 8.01.0622 */
-/* at Mon Jan 18 19:14:07 2038
- */
-/* Compiler settings for C:/git/runtime/src/coreclr/inc/corprof.idl:
- Oicf, W1, Zp8, env=Win32 (32b run), target_arch=X86 8.01.0622
+/* Compiler settings for corprof.idl:
+ Oicf, W1, Zp8, env=Win64 (32b run), target_arch=AMD64 8.01.0622
protocol : dce , ms_ext, c_ext, robust
error checks: allocation ref bounds_check enum stub_data
VC __declspec() decoration level:
@@ -115,6 +113,13 @@ typedef interface ICorProfilerCallback10 ICorProfilerCallback10;
#endif /* __ICorProfilerCallback10_FWD_DEFINED__ */
+#ifndef __ICorProfilerCallback11_FWD_DEFINED__
+#define __ICorProfilerCallback11_FWD_DEFINED__
+typedef interface ICorProfilerCallback11 ICorProfilerCallback11;
+
+#endif /* __ICorProfilerCallback11_FWD_DEFINED__ */
+
+
#ifndef __ICorProfilerInfo_FWD_DEFINED__
#define __ICorProfilerInfo_FWD_DEFINED__
typedef interface ICorProfilerInfo ICorProfilerInfo;
@@ -552,6 +557,7 @@ enum __MIDL___MIDL_itf_corprof_0000_0000_0005
COR_PRF_ALL = 0x8fffffff,
COR_PRF_REQUIRE_PROFILE_IMAGE = ( ( COR_PRF_USE_PROFILE_IMAGES | COR_PRF_MONITOR_CODE_TRANSITIONS ) | COR_PRF_MONITOR_ENTERLEAVE ) ,
COR_PRF_ALLOWABLE_AFTER_ATTACH = ( ( ( ( ( ( ( ( ( ( COR_PRF_MONITOR_THREADS | COR_PRF_MONITOR_MODULE_LOADS ) | COR_PRF_MONITOR_ASSEMBLY_LOADS ) | COR_PRF_MONITOR_APPDOMAIN_LOADS ) | COR_PRF_ENABLE_STACK_SNAPSHOT ) | COR_PRF_MONITOR_GC ) | COR_PRF_MONITOR_SUSPENDS ) | COR_PRF_MONITOR_CLASS_LOADS ) | COR_PRF_MONITOR_EXCEPTIONS ) | COR_PRF_MONITOR_JIT_COMPILATION ) | COR_PRF_ENABLE_REJIT ) ,
+ COR_PRF_ALLOWABLE_NOTIFICATION_PROFILER = ( ( ( ( ( ( ( ( ( ( ( ( ( ( ( ( ( ( ( COR_PRF_MONITOR_FUNCTION_UNLOADS | COR_PRF_MONITOR_CLASS_LOADS ) | COR_PRF_MONITOR_MODULE_LOADS ) | COR_PRF_MONITOR_ASSEMBLY_LOADS ) | COR_PRF_MONITOR_APPDOMAIN_LOADS ) | COR_PRF_MONITOR_JIT_COMPILATION ) | COR_PRF_MONITOR_EXCEPTIONS ) | COR_PRF_MONITOR_OBJECT_ALLOCATED ) | COR_PRF_MONITOR_THREADS ) | COR_PRF_MONITOR_CODE_TRANSITIONS ) | COR_PRF_MONITOR_CCW ) | COR_PRF_MONITOR_SUSPENDS ) | COR_PRF_MONITOR_CACHE_SEARCHES ) | COR_PRF_DISABLE_INLINING ) | COR_PRF_DISABLE_OPTIMIZATIONS ) | COR_PRF_ENABLE_OBJECT_ALLOCATED ) | COR_PRF_MONITOR_CLR_EXCEPTIONS ) | COR_PRF_ENABLE_STACK_SNAPSHOT ) | COR_PRF_USE_PROFILE_IMAGES ) | COR_PRF_DISABLE_ALL_NGEN_IMAGES ) ,
COR_PRF_MONITOR_IMMUTABLE = ( ( ( ( ( ( ( ( ( ( ( ( ( ( COR_PRF_MONITOR_CODE_TRANSITIONS | COR_PRF_MONITOR_REMOTING ) | COR_PRF_MONITOR_REMOTING_COOKIE ) | COR_PRF_MONITOR_REMOTING_ASYNC ) | COR_PRF_ENABLE_INPROC_DEBUGGING ) | COR_PRF_ENABLE_JIT_MAPS ) | COR_PRF_DISABLE_OPTIMIZATIONS ) | COR_PRF_DISABLE_INLINING ) | COR_PRF_ENABLE_OBJECT_ALLOCATED ) | COR_PRF_ENABLE_FUNCTION_ARGS ) | COR_PRF_ENABLE_FUNCTION_RETVAL ) | COR_PRF_ENABLE_FRAME_INFO ) | COR_PRF_USE_PROFILE_IMAGES ) | COR_PRF_DISABLE_TRANSPARENCY_CHECKS_UNDER_FULL_TRUST ) | COR_PRF_DISABLE_ALL_NGEN_IMAGES )
} COR_PRF_MONITOR;
@@ -569,6 +575,7 @@ enum __MIDL___MIDL_itf_corprof_0000_0000_0006
COR_PRF_HIGH_MONITOR_LARGEOBJECT_ALLOCATED = 0x40,
COR_PRF_HIGH_MONITOR_EVENT_PIPE = 0x80,
COR_PRF_HIGH_ALLOWABLE_AFTER_ATTACH = ( ( ( ( ( COR_PRF_HIGH_IN_MEMORY_SYMBOLS_UPDATED | COR_PRF_HIGH_MONITOR_DYNAMIC_FUNCTION_UNLOADS ) | COR_PRF_HIGH_BASIC_GC ) | COR_PRF_HIGH_MONITOR_GC_MOVED_OBJECTS ) | COR_PRF_HIGH_MONITOR_LARGEOBJECT_ALLOCATED ) | COR_PRF_HIGH_MONITOR_EVENT_PIPE ) ,
+ COR_PRF_HIGH_ALLOWABLE_NOTIFICATION_PROFILER = ( ( ( ( ( ( COR_PRF_HIGH_IN_MEMORY_SYMBOLS_UPDATED | COR_PRF_HIGH_MONITOR_DYNAMIC_FUNCTION_UNLOADS ) | COR_PRF_HIGH_DISABLE_TIERED_COMPILATION ) | COR_PRF_HIGH_BASIC_GC ) | COR_PRF_HIGH_MONITOR_GC_MOVED_OBJECTS ) | COR_PRF_HIGH_MONITOR_LARGEOBJECT_ALLOCATED ) | COR_PRF_HIGH_MONITOR_EVENT_PIPE ) ,
COR_PRF_HIGH_MONITOR_IMMUTABLE = COR_PRF_HIGH_DISABLE_TIERED_COMPILATION
} COR_PRF_HIGH_MONITOR;
@@ -8351,11 +8358,829 @@ EXTERN_C const IID IID_ICorProfilerCallback10;
#endif /* __ICorProfilerCallback10_INTERFACE_DEFINED__ */
-/* interface __MIDL_itf_corprof_0000_0010 */
+#ifndef __ICorProfilerCallback11_INTERFACE_DEFINED__
+#define __ICorProfilerCallback11_INTERFACE_DEFINED__
+
+/* interface ICorProfilerCallback11 */
+/* [local][unique][uuid][object] */
+
+
+EXTERN_C const IID IID_ICorProfilerCallback11;
+
+#if defined(__cplusplus) && !defined(CINTERFACE)
+
+ MIDL_INTERFACE("42350846-AAED-47F7-B128-FD0C98881CDE")
+ ICorProfilerCallback11 : public ICorProfilerCallback10
+ {
+ public:
+ virtual HRESULT STDMETHODCALLTYPE LoadAsNotficationOnly(
+ BOOL *pbNotificationOnly) = 0;
+
+ };
+
+
+#else /* C style interface */
+
+ typedef struct ICorProfilerCallback11Vtbl
+ {
+ BEGIN_INTERFACE
+
+ HRESULT ( STDMETHODCALLTYPE *QueryInterface )(
+ ICorProfilerCallback11 * This,
+ /* [in] */ REFIID riid,
+ /* [annotation][iid_is][out] */
+ _COM_Outptr_ void **ppvObject);
+
+ ULONG ( STDMETHODCALLTYPE *AddRef )(
+ ICorProfilerCallback11 * This);
+
+ ULONG ( STDMETHODCALLTYPE *Release )(
+ ICorProfilerCallback11 * This);
+
+ HRESULT ( STDMETHODCALLTYPE *Initialize )(
+ ICorProfilerCallback11 * This,
+ /* [in] */ IUnknown *pICorProfilerInfoUnk);
+
+ HRESULT ( STDMETHODCALLTYPE *Shutdown )(
+ ICorProfilerCallback11 * This);
+
+ HRESULT ( STDMETHODCALLTYPE *AppDomainCreationStarted )(
+ ICorProfilerCallback11 * This,
+ /* [in] */ AppDomainID appDomainId);
+
+ HRESULT ( STDMETHODCALLTYPE *AppDomainCreationFinished )(
+ ICorProfilerCallback11 * This,
+ /* [in] */ AppDomainID appDomainId,
+ /* [in] */ HRESULT hrStatus);
+
+ HRESULT ( STDMETHODCALLTYPE *AppDomainShutdownStarted )(
+ ICorProfilerCallback11 * This,
+ /* [in] */ AppDomainID appDomainId);
+
+ HRESULT ( STDMETHODCALLTYPE *AppDomainShutdownFinished )(
+ ICorProfilerCallback11 * This,
+ /* [in] */ AppDomainID appDomainId,
+ /* [in] */ HRESULT hrStatus);
+
+ HRESULT ( STDMETHODCALLTYPE *AssemblyLoadStarted )(
+ ICorProfilerCallback11 * This,
+ /* [in] */ AssemblyID assemblyId);
+
+ HRESULT ( STDMETHODCALLTYPE *AssemblyLoadFinished )(
+ ICorProfilerCallback11 * This,
+ /* [in] */ AssemblyID assemblyId,
+ /* [in] */ HRESULT hrStatus);
+
+ HRESULT ( STDMETHODCALLTYPE *AssemblyUnloadStarted )(
+ ICorProfilerCallback11 * This,
+ /* [in] */ AssemblyID assemblyId);
+
+ HRESULT ( STDMETHODCALLTYPE *AssemblyUnloadFinished )(
+ ICorProfilerCallback11 * This,
+ /* [in] */ AssemblyID assemblyId,
+ /* [in] */ HRESULT hrStatus);
+
+ HRESULT ( STDMETHODCALLTYPE *ModuleLoadStarted )(
+ ICorProfilerCallback11 * This,
+ /* [in] */ ModuleID moduleId);
+
+ HRESULT ( STDMETHODCALLTYPE *ModuleLoadFinished )(
+ ICorProfilerCallback11 * This,
+ /* [in] */ ModuleID moduleId,
+ /* [in] */ HRESULT hrStatus);
+
+ HRESULT ( STDMETHODCALLTYPE *ModuleUnloadStarted )(
+ ICorProfilerCallback11 * This,
+ /* [in] */ ModuleID moduleId);
+
+ HRESULT ( STDMETHODCALLTYPE *ModuleUnloadFinished )(
+ ICorProfilerCallback11 * This,
+ /* [in] */ ModuleID moduleId,
+ /* [in] */ HRESULT hrStatus);
+
+ HRESULT ( STDMETHODCALLTYPE *ModuleAttachedToAssembly )(
+ ICorProfilerCallback11 * This,
+ /* [in] */ ModuleID moduleId,
+ /* [in] */ AssemblyID AssemblyId);
+
+ HRESULT ( STDMETHODCALLTYPE *ClassLoadStarted )(
+ ICorProfilerCallback11 * This,
+ /* [in] */ ClassID classId);
+
+ HRESULT ( STDMETHODCALLTYPE *ClassLoadFinished )(
+ ICorProfilerCallback11 * This,
+ /* [in] */ ClassID classId,
+ /* [in] */ HRESULT hrStatus);
+
+ HRESULT ( STDMETHODCALLTYPE *ClassUnloadStarted )(
+ ICorProfilerCallback11 * This,
+ /* [in] */ ClassID classId);
+
+ HRESULT ( STDMETHODCALLTYPE *ClassUnloadFinished )(
+ ICorProfilerCallback11 * This,
+ /* [in] */ ClassID classId,
+ /* [in] */ HRESULT hrStatus);
+
+ HRESULT ( STDMETHODCALLTYPE *FunctionUnloadStarted )(
+ ICorProfilerCallback11 * This,
+ /* [in] */ FunctionID functionId);
+
+ HRESULT ( STDMETHODCALLTYPE *JITCompilationStarted )(
+ ICorProfilerCallback11 * This,
+ /* [in] */ FunctionID functionId,
+ /* [in] */ BOOL fIsSafeToBlock);
+
+ HRESULT ( STDMETHODCALLTYPE *JITCompilationFinished )(
+ ICorProfilerCallback11 * This,
+ /* [in] */ FunctionID functionId,
+ /* [in] */ HRESULT hrStatus,
+ /* [in] */ BOOL fIsSafeToBlock);
+
+ HRESULT ( STDMETHODCALLTYPE *JITCachedFunctionSearchStarted )(
+ ICorProfilerCallback11 * This,
+ /* [in] */ FunctionID functionId,
+ /* [out] */ BOOL *pbUseCachedFunction);
+
+ HRESULT ( STDMETHODCALLTYPE *JITCachedFunctionSearchFinished )(
+ ICorProfilerCallback11 * This,
+ /* [in] */ FunctionID functionId,
+ /* [in] */ COR_PRF_JIT_CACHE result);
+
+ HRESULT ( STDMETHODCALLTYPE *JITFunctionPitched )(
+ ICorProfilerCallback11 * This,
+ /* [in] */ FunctionID functionId);
+
+ HRESULT ( STDMETHODCALLTYPE *JITInlining )(
+ ICorProfilerCallback11 * This,
+ /* [in] */ FunctionID callerId,
+ /* [in] */ FunctionID calleeId,
+ /* [out] */ BOOL *pfShouldInline);
+
+ HRESULT ( STDMETHODCALLTYPE *ThreadCreated )(
+ ICorProfilerCallback11 * This,
+ /* [in] */ ThreadID threadId);
+
+ HRESULT ( STDMETHODCALLTYPE *ThreadDestroyed )(
+ ICorProfilerCallback11 * This,
+ /* [in] */ ThreadID threadId);
+
+ HRESULT ( STDMETHODCALLTYPE *ThreadAssignedToOSThread )(
+ ICorProfilerCallback11 * This,
+ /* [in] */ ThreadID managedThreadId,
+ /* [in] */ DWORD osThreadId);
+
+ HRESULT ( STDMETHODCALLTYPE *RemotingClientInvocationStarted )(
+ ICorProfilerCallback11 * This);
+
+ HRESULT ( STDMETHODCALLTYPE *RemotingClientSendingMessage )(
+ ICorProfilerCallback11 * This,
+ /* [in] */ GUID *pCookie,
+ /* [in] */ BOOL fIsAsync);
+
+ HRESULT ( STDMETHODCALLTYPE *RemotingClientReceivingReply )(
+ ICorProfilerCallback11 * This,
+ /* [in] */ GUID *pCookie,
+ /* [in] */ BOOL fIsAsync);
+
+ HRESULT ( STDMETHODCALLTYPE *RemotingClientInvocationFinished )(
+ ICorProfilerCallback11 * This);
+
+ HRESULT ( STDMETHODCALLTYPE *RemotingServerReceivingMessage )(
+ ICorProfilerCallback11 * This,
+ /* [in] */ GUID *pCookie,
+ /* [in] */ BOOL fIsAsync);
+
+ HRESULT ( STDMETHODCALLTYPE *RemotingServerInvocationStarted )(
+ ICorProfilerCallback11 * This);
+
+ HRESULT ( STDMETHODCALLTYPE *RemotingServerInvocationReturned )(
+ ICorProfilerCallback11 * This);
+
+ HRESULT ( STDMETHODCALLTYPE *RemotingServerSendingReply )(
+ ICorProfilerCallback11 * This,
+ /* [in] */ GUID *pCookie,
+ /* [in] */ BOOL fIsAsync);
+
+ HRESULT ( STDMETHODCALLTYPE *UnmanagedToManagedTransition )(
+ ICorProfilerCallback11 * This,
+ /* [in] */ FunctionID functionId,
+ /* [in] */ COR_PRF_TRANSITION_REASON reason);
+
+ HRESULT ( STDMETHODCALLTYPE *ManagedToUnmanagedTransition )(
+ ICorProfilerCallback11 * This,
+ /* [in] */ FunctionID functionId,
+ /* [in] */ COR_PRF_TRANSITION_REASON reason);
+
+ HRESULT ( STDMETHODCALLTYPE *RuntimeSuspendStarted )(
+ ICorProfilerCallback11 * This,
+ /* [in] */ COR_PRF_SUSPEND_REASON suspendReason);
+
+ HRESULT ( STDMETHODCALLTYPE *RuntimeSuspendFinished )(
+ ICorProfilerCallback11 * This);
+
+ HRESULT ( STDMETHODCALLTYPE *RuntimeSuspendAborted )(
+ ICorProfilerCallback11 * This);
+
+ HRESULT ( STDMETHODCALLTYPE *RuntimeResumeStarted )(
+ ICorProfilerCallback11 * This);
+
+ HRESULT ( STDMETHODCALLTYPE *RuntimeResumeFinished )(
+ ICorProfilerCallback11 * This);
+
+ HRESULT ( STDMETHODCALLTYPE *RuntimeThreadSuspended )(
+ ICorProfilerCallback11 * This,
+ /* [in] */ ThreadID threadId);
+
+ HRESULT ( STDMETHODCALLTYPE *RuntimeThreadResumed )(
+ ICorProfilerCallback11 * This,
+ /* [in] */ ThreadID threadId);
+
+ HRESULT ( STDMETHODCALLTYPE *MovedReferences )(
+ ICorProfilerCallback11 * This,
+ /* [in] */ ULONG cMovedObjectIDRanges,
+ /* [size_is][in] */ ObjectID oldObjectIDRangeStart[ ],
+ /* [size_is][in] */ ObjectID newObjectIDRangeStart[ ],
+ /* [size_is][in] */ ULONG cObjectIDRangeLength[ ]);
+
+ HRESULT ( STDMETHODCALLTYPE *ObjectAllocated )(
+ ICorProfilerCallback11 * This,
+ /* [in] */ ObjectID objectId,
+ /* [in] */ ClassID classId);
+
+ HRESULT ( STDMETHODCALLTYPE *ObjectsAllocatedByClass )(
+ ICorProfilerCallback11 * This,
+ /* [in] */ ULONG cClassCount,
+ /* [size_is][in] */ ClassID classIds[ ],
+ /* [size_is][in] */ ULONG cObjects[ ]);
+
+ HRESULT ( STDMETHODCALLTYPE *ObjectReferences )(
+ ICorProfilerCallback11 * This,
+ /* [in] */ ObjectID objectId,
+ /* [in] */ ClassID classId,
+ /* [in] */ ULONG cObjectRefs,
+ /* [size_is][in] */ ObjectID objectRefIds[ ]);
+
+ HRESULT ( STDMETHODCALLTYPE *RootReferences )(
+ ICorProfilerCallback11 * This,
+ /* [in] */ ULONG cRootRefs,
+ /* [size_is][in] */ ObjectID rootRefIds[ ]);
+
+ HRESULT ( STDMETHODCALLTYPE *ExceptionThrown )(
+ ICorProfilerCallback11 * This,
+ /* [in] */ ObjectID thrownObjectId);
+
+ HRESULT ( STDMETHODCALLTYPE *ExceptionSearchFunctionEnter )(
+ ICorProfilerCallback11 * This,
+ /* [in] */ FunctionID functionId);
+
+ HRESULT ( STDMETHODCALLTYPE *ExceptionSearchFunctionLeave )(
+ ICorProfilerCallback11 * This);
+
+ HRESULT ( STDMETHODCALLTYPE *ExceptionSearchFilterEnter )(
+ ICorProfilerCallback11 * This,
+ /* [in] */ FunctionID functionId);
+
+ HRESULT ( STDMETHODCALLTYPE *ExceptionSearchFilterLeave )(
+ ICorProfilerCallback11 * This);
+
+ HRESULT ( STDMETHODCALLTYPE *ExceptionSearchCatcherFound )(
+ ICorProfilerCallback11 * This,
+ /* [in] */ FunctionID functionId);
+
+ HRESULT ( STDMETHODCALLTYPE *ExceptionOSHandlerEnter )(
+ ICorProfilerCallback11 * This,
+ /* [in] */ UINT_PTR __unused);
+
+ HRESULT ( STDMETHODCALLTYPE *ExceptionOSHandlerLeave )(
+ ICorProfilerCallback11 * This,
+ /* [in] */ UINT_PTR __unused);
+
+ HRESULT ( STDMETHODCALLTYPE *ExceptionUnwindFunctionEnter )(
+ ICorProfilerCallback11 * This,
+ /* [in] */ FunctionID functionId);
+
+ HRESULT ( STDMETHODCALLTYPE *ExceptionUnwindFunctionLeave )(
+ ICorProfilerCallback11 * This);
+
+ HRESULT ( STDMETHODCALLTYPE *ExceptionUnwindFinallyEnter )(
+ ICorProfilerCallback11 * This,
+ /* [in] */ FunctionID functionId);
+
+ HRESULT ( STDMETHODCALLTYPE *ExceptionUnwindFinallyLeave )(
+ ICorProfilerCallback11 * This);
+
+ HRESULT ( STDMETHODCALLTYPE *ExceptionCatcherEnter )(
+ ICorProfilerCallback11 * This,
+ /* [in] */ FunctionID functionId,
+ /* [in] */ ObjectID objectId);
+
+ HRESULT ( STDMETHODCALLTYPE *ExceptionCatcherLeave )(
+ ICorProfilerCallback11 * This);
+
+ HRESULT ( STDMETHODCALLTYPE *COMClassicVTableCreated )(
+ ICorProfilerCallback11 * This,
+ /* [in] */ ClassID wrappedClassId,
+ /* [in] */ REFGUID implementedIID,
+ /* [in] */ void *pVTable,
+ /* [in] */ ULONG cSlots);
+
+ HRESULT ( STDMETHODCALLTYPE *COMClassicVTableDestroyed )(
+ ICorProfilerCallback11 * This,
+ /* [in] */ ClassID wrappedClassId,
+ /* [in] */ REFGUID implementedIID,
+ /* [in] */ void *pVTable);
+
+ HRESULT ( STDMETHODCALLTYPE *ExceptionCLRCatcherFound )(
+ ICorProfilerCallback11 * This);
+
+ HRESULT ( STDMETHODCALLTYPE *ExceptionCLRCatcherExecute )(
+ ICorProfilerCallback11 * This);
+
+ HRESULT ( STDMETHODCALLTYPE *ThreadNameChanged )(
+ ICorProfilerCallback11 * This,
+ /* [in] */ ThreadID threadId,
+ /* [in] */ ULONG cchName,
+ /* [annotation][in] */
+ _In_reads_opt_(cchName) WCHAR name[ ]);
+
+ HRESULT ( STDMETHODCALLTYPE *GarbageCollectionStarted )(
+ ICorProfilerCallback11 * This,
+ /* [in] */ int cGenerations,
+ /* [size_is][in] */ BOOL generationCollected[ ],
+ /* [in] */ COR_PRF_GC_REASON reason);
+
+ HRESULT ( STDMETHODCALLTYPE *SurvivingReferences )(
+ ICorProfilerCallback11 * This,
+ /* [in] */ ULONG cSurvivingObjectIDRanges,
+ /* [size_is][in] */ ObjectID objectIDRangeStart[ ],
+ /* [size_is][in] */ ULONG cObjectIDRangeLength[ ]);
+
+ HRESULT ( STDMETHODCALLTYPE *GarbageCollectionFinished )(
+ ICorProfilerCallback11 * This);
+
+ HRESULT ( STDMETHODCALLTYPE *FinalizeableObjectQueued )(
+ ICorProfilerCallback11 * This,
+ /* [in] */ DWORD finalizerFlags,
+ /* [in] */ ObjectID objectID);
+
+ HRESULT ( STDMETHODCALLTYPE *RootReferences2 )(
+ ICorProfilerCallback11 * This,
+ /* [in] */ ULONG cRootRefs,
+ /* [size_is][in] */ ObjectID rootRefIds[ ],
+ /* [size_is][in] */ COR_PRF_GC_ROOT_KIND rootKinds[ ],
+ /* [size_is][in] */ COR_PRF_GC_ROOT_FLAGS rootFlags[ ],
+ /* [size_is][in] */ UINT_PTR rootIds[ ]);
+
+ HRESULT ( STDMETHODCALLTYPE *HandleCreated )(
+ ICorProfilerCallback11 * This,
+ /* [in] */ GCHandleID handleId,
+ /* [in] */ ObjectID initialObjectId);
+
+ HRESULT ( STDMETHODCALLTYPE *HandleDestroyed )(
+ ICorProfilerCallback11 * This,
+ /* [in] */ GCHandleID handleId);
+
+ HRESULT ( STDMETHODCALLTYPE *InitializeForAttach )(
+ ICorProfilerCallback11 * This,
+ /* [in] */ IUnknown *pCorProfilerInfoUnk,
+ /* [in] */ void *pvClientData,
+ /* [in] */ UINT cbClientData);
+
+ HRESULT ( STDMETHODCALLTYPE *ProfilerAttachComplete )(
+ ICorProfilerCallback11 * This);
+
+ HRESULT ( STDMETHODCALLTYPE *ProfilerDetachSucceeded )(
+ ICorProfilerCallback11 * This);
+
+ HRESULT ( STDMETHODCALLTYPE *ReJITCompilationStarted )(
+ ICorProfilerCallback11 * This,
+ /* [in] */ FunctionID functionId,
+ /* [in] */ ReJITID rejitId,
+ /* [in] */ BOOL fIsSafeToBlock);
+
+ HRESULT ( STDMETHODCALLTYPE *GetReJITParameters )(
+ ICorProfilerCallback11 * This,
+ /* [in] */ ModuleID moduleId,
+ /* [in] */ mdMethodDef methodId,
+ /* [in] */ ICorProfilerFunctionControl *pFunctionControl);
+
+ HRESULT ( STDMETHODCALLTYPE *ReJITCompilationFinished )(
+ ICorProfilerCallback11 * This,
+ /* [in] */ FunctionID functionId,
+ /* [in] */ ReJITID rejitId,
+ /* [in] */ HRESULT hrStatus,
+ /* [in] */ BOOL fIsSafeToBlock);
+
+ HRESULT ( STDMETHODCALLTYPE *ReJITError )(
+ ICorProfilerCallback11 * This,
+ /* [in] */ ModuleID moduleId,
+ /* [in] */ mdMethodDef methodId,
+ /* [in] */ FunctionID functionId,
+ /* [in] */ HRESULT hrStatus);
+
+ HRESULT ( STDMETHODCALLTYPE *MovedReferences2 )(
+ ICorProfilerCallback11 * This,
+ /* [in] */ ULONG cMovedObjectIDRanges,
+ /* [size_is][in] */ ObjectID oldObjectIDRangeStart[ ],
+ /* [size_is][in] */ ObjectID newObjectIDRangeStart[ ],
+ /* [size_is][in] */ SIZE_T cObjectIDRangeLength[ ]);
+
+ HRESULT ( STDMETHODCALLTYPE *SurvivingReferences2 )(
+ ICorProfilerCallback11 * This,
+ /* [in] */ ULONG cSurvivingObjectIDRanges,
+ /* [size_is][in] */ ObjectID objectIDRangeStart[ ],
+ /* [size_is][in] */ SIZE_T cObjectIDRangeLength[ ]);
+
+ HRESULT ( STDMETHODCALLTYPE *ConditionalWeakTableElementReferences )(
+ ICorProfilerCallback11 * This,
+ /* [in] */ ULONG cRootRefs,
+ /* [size_is][in] */ ObjectID keyRefIds[ ],
+ /* [size_is][in] */ ObjectID valueRefIds[ ],
+ /* [size_is][in] */ GCHandleID rootIds[ ]);
+
+ HRESULT ( STDMETHODCALLTYPE *GetAssemblyReferences )(
+ ICorProfilerCallback11 * This,
+ /* [string][in] */ const WCHAR *wszAssemblyPath,
+ /* [in] */ ICorProfilerAssemblyReferenceProvider *pAsmRefProvider);
+
+ HRESULT ( STDMETHODCALLTYPE *ModuleInMemorySymbolsUpdated )(
+ ICorProfilerCallback11 * This,
+ ModuleID moduleId);
+
+ HRESULT ( STDMETHODCALLTYPE *DynamicMethodJITCompilationStarted )(
+ ICorProfilerCallback11 * This,
+ /* [in] */ FunctionID functionId,
+ /* [in] */ BOOL fIsSafeToBlock,
+ /* [in] */ LPCBYTE pILHeader,
+ /* [in] */ ULONG cbILHeader);
+
+ HRESULT ( STDMETHODCALLTYPE *DynamicMethodJITCompilationFinished )(
+ ICorProfilerCallback11 * This,
+ /* [in] */ FunctionID functionId,
+ /* [in] */ HRESULT hrStatus,
+ /* [in] */ BOOL fIsSafeToBlock);
+
+ HRESULT ( STDMETHODCALLTYPE *DynamicMethodUnloaded )(
+ ICorProfilerCallback11 * This,
+ /* [in] */ FunctionID functionId);
+
+ HRESULT ( STDMETHODCALLTYPE *EventPipeEventDelivered )(
+ ICorProfilerCallback11 * This,
+ /* [in] */ EVENTPIPE_PROVIDER provider,
+ /* [in] */ DWORD eventId,
+ /* [in] */ DWORD eventVersion,
+ /* [in] */ ULONG cbMetadataBlob,
+ /* [size_is][in] */ LPCBYTE metadataBlob,
+ /* [in] */ ULONG cbEventData,
+ /* [size_is][in] */ LPCBYTE eventData,
+ /* [in] */ LPCGUID pActivityId,
+ /* [in] */ LPCGUID pRelatedActivityId,
+ /* [in] */ ThreadID eventThread,
+ /* [in] */ ULONG numStackFrames,
+ /* [length_is][in] */ UINT_PTR stackFrames[ ]);
+
+ HRESULT ( STDMETHODCALLTYPE *EventPipeProviderCreated )(
+ ICorProfilerCallback11 * This,
+ /* [in] */ EVENTPIPE_PROVIDER provider);
+
+ HRESULT ( STDMETHODCALLTYPE *LoadAsNotficationOnly )(
+ ICorProfilerCallback11 * This,
+ BOOL *pbNotificationOnly);
+
+ END_INTERFACE
+ } ICorProfilerCallback11Vtbl;
+
+ interface ICorProfilerCallback11
+ {
+ CONST_VTBL struct ICorProfilerCallback11Vtbl *lpVtbl;
+ };
+
+
+
+#ifdef COBJMACROS
+
+
+#define ICorProfilerCallback11_QueryInterface(This,riid,ppvObject) \
+ ( (This)->lpVtbl -> QueryInterface(This,riid,ppvObject) )
+
+#define ICorProfilerCallback11_AddRef(This) \
+ ( (This)->lpVtbl -> AddRef(This) )
+
+#define ICorProfilerCallback11_Release(This) \
+ ( (This)->lpVtbl -> Release(This) )
+
+
+#define ICorProfilerCallback11_Initialize(This,pICorProfilerInfoUnk) \
+ ( (This)->lpVtbl -> Initialize(This,pICorProfilerInfoUnk) )
+
+#define ICorProfilerCallback11_Shutdown(This) \
+ ( (This)->lpVtbl -> Shutdown(This) )
+
+#define ICorProfilerCallback11_AppDomainCreationStarted(This,appDomainId) \
+ ( (This)->lpVtbl -> AppDomainCreationStarted(This,appDomainId) )
+
+#define ICorProfilerCallback11_AppDomainCreationFinished(This,appDomainId,hrStatus) \
+ ( (This)->lpVtbl -> AppDomainCreationFinished(This,appDomainId,hrStatus) )
+
+#define ICorProfilerCallback11_AppDomainShutdownStarted(This,appDomainId) \
+ ( (This)->lpVtbl -> AppDomainShutdownStarted(This,appDomainId) )
+
+#define ICorProfilerCallback11_AppDomainShutdownFinished(This,appDomainId,hrStatus) \
+ ( (This)->lpVtbl -> AppDomainShutdownFinished(This,appDomainId,hrStatus) )
+
+#define ICorProfilerCallback11_AssemblyLoadStarted(This,assemblyId) \
+ ( (This)->lpVtbl -> AssemblyLoadStarted(This,assemblyId) )
+
+#define ICorProfilerCallback11_AssemblyLoadFinished(This,assemblyId,hrStatus) \
+ ( (This)->lpVtbl -> AssemblyLoadFinished(This,assemblyId,hrStatus) )
+
+#define ICorProfilerCallback11_AssemblyUnloadStarted(This,assemblyId) \
+ ( (This)->lpVtbl -> AssemblyUnloadStarted(This,assemblyId) )
+
+#define ICorProfilerCallback11_AssemblyUnloadFinished(This,assemblyId,hrStatus) \
+ ( (This)->lpVtbl -> AssemblyUnloadFinished(This,assemblyId,hrStatus) )
+
+#define ICorProfilerCallback11_ModuleLoadStarted(This,moduleId) \
+ ( (This)->lpVtbl -> ModuleLoadStarted(This,moduleId) )
+
+#define ICorProfilerCallback11_ModuleLoadFinished(This,moduleId,hrStatus) \
+ ( (This)->lpVtbl -> ModuleLoadFinished(This,moduleId,hrStatus) )
+
+#define ICorProfilerCallback11_ModuleUnloadStarted(This,moduleId) \
+ ( (This)->lpVtbl -> ModuleUnloadStarted(This,moduleId) )
+
+#define ICorProfilerCallback11_ModuleUnloadFinished(This,moduleId,hrStatus) \
+ ( (This)->lpVtbl -> ModuleUnloadFinished(This,moduleId,hrStatus) )
+
+#define ICorProfilerCallback11_ModuleAttachedToAssembly(This,moduleId,AssemblyId) \
+ ( (This)->lpVtbl -> ModuleAttachedToAssembly(This,moduleId,AssemblyId) )
+
+#define ICorProfilerCallback11_ClassLoadStarted(This,classId) \
+ ( (This)->lpVtbl -> ClassLoadStarted(This,classId) )
+
+#define ICorProfilerCallback11_ClassLoadFinished(This,classId,hrStatus) \
+ ( (This)->lpVtbl -> ClassLoadFinished(This,classId,hrStatus) )
+
+#define ICorProfilerCallback11_ClassUnloadStarted(This,classId) \
+ ( (This)->lpVtbl -> ClassUnloadStarted(This,classId) )
+
+#define ICorProfilerCallback11_ClassUnloadFinished(This,classId,hrStatus) \
+ ( (This)->lpVtbl -> ClassUnloadFinished(This,classId,hrStatus) )
+
+#define ICorProfilerCallback11_FunctionUnloadStarted(This,functionId) \
+ ( (This)->lpVtbl -> FunctionUnloadStarted(This,functionId) )
+
+#define ICorProfilerCallback11_JITCompilationStarted(This,functionId,fIsSafeToBlock) \
+ ( (This)->lpVtbl -> JITCompilationStarted(This,functionId,fIsSafeToBlock) )
+
+#define ICorProfilerCallback11_JITCompilationFinished(This,functionId,hrStatus,fIsSafeToBlock) \
+ ( (This)->lpVtbl -> JITCompilationFinished(This,functionId,hrStatus,fIsSafeToBlock) )
+
+#define ICorProfilerCallback11_JITCachedFunctionSearchStarted(This,functionId,pbUseCachedFunction) \
+ ( (This)->lpVtbl -> JITCachedFunctionSearchStarted(This,functionId,pbUseCachedFunction) )
+
+#define ICorProfilerCallback11_JITCachedFunctionSearchFinished(This,functionId,result) \
+ ( (This)->lpVtbl -> JITCachedFunctionSearchFinished(This,functionId,result) )
+
+#define ICorProfilerCallback11_JITFunctionPitched(This,functionId) \
+ ( (This)->lpVtbl -> JITFunctionPitched(This,functionId) )
+
+#define ICorProfilerCallback11_JITInlining(This,callerId,calleeId,pfShouldInline) \
+ ( (This)->lpVtbl -> JITInlining(This,callerId,calleeId,pfShouldInline) )
+
+#define ICorProfilerCallback11_ThreadCreated(This,threadId) \
+ ( (This)->lpVtbl -> ThreadCreated(This,threadId) )
+
+#define ICorProfilerCallback11_ThreadDestroyed(This,threadId) \
+ ( (This)->lpVtbl -> ThreadDestroyed(This,threadId) )
+
+#define ICorProfilerCallback11_ThreadAssignedToOSThread(This,managedThreadId,osThreadId) \
+ ( (This)->lpVtbl -> ThreadAssignedToOSThread(This,managedThreadId,osThreadId) )
+
+#define ICorProfilerCallback11_RemotingClientInvocationStarted(This) \
+ ( (This)->lpVtbl -> RemotingClientInvocationStarted(This) )
+
+#define ICorProfilerCallback11_RemotingClientSendingMessage(This,pCookie,fIsAsync) \
+ ( (This)->lpVtbl -> RemotingClientSendingMessage(This,pCookie,fIsAsync) )
+
+#define ICorProfilerCallback11_RemotingClientReceivingReply(This,pCookie,fIsAsync) \
+ ( (This)->lpVtbl -> RemotingClientReceivingReply(This,pCookie,fIsAsync) )
+
+#define ICorProfilerCallback11_RemotingClientInvocationFinished(This) \
+ ( (This)->lpVtbl -> RemotingClientInvocationFinished(This) )
+
+#define ICorProfilerCallback11_RemotingServerReceivingMessage(This,pCookie,fIsAsync) \
+ ( (This)->lpVtbl -> RemotingServerReceivingMessage(This,pCookie,fIsAsync) )
+
+#define ICorProfilerCallback11_RemotingServerInvocationStarted(This) \
+ ( (This)->lpVtbl -> RemotingServerInvocationStarted(This) )
+
+#define ICorProfilerCallback11_RemotingServerInvocationReturned(This) \
+ ( (This)->lpVtbl -> RemotingServerInvocationReturned(This) )
+
+#define ICorProfilerCallback11_RemotingServerSendingReply(This,pCookie,fIsAsync) \
+ ( (This)->lpVtbl -> RemotingServerSendingReply(This,pCookie,fIsAsync) )
+
+#define ICorProfilerCallback11_UnmanagedToManagedTransition(This,functionId,reason) \
+ ( (This)->lpVtbl -> UnmanagedToManagedTransition(This,functionId,reason) )
+
+#define ICorProfilerCallback11_ManagedToUnmanagedTransition(This,functionId,reason) \
+ ( (This)->lpVtbl -> ManagedToUnmanagedTransition(This,functionId,reason) )
+
+#define ICorProfilerCallback11_RuntimeSuspendStarted(This,suspendReason) \
+ ( (This)->lpVtbl -> RuntimeSuspendStarted(This,suspendReason) )
+
+#define ICorProfilerCallback11_RuntimeSuspendFinished(This) \
+ ( (This)->lpVtbl -> RuntimeSuspendFinished(This) )
+
+#define ICorProfilerCallback11_RuntimeSuspendAborted(This) \
+ ( (This)->lpVtbl -> RuntimeSuspendAborted(This) )
+
+#define ICorProfilerCallback11_RuntimeResumeStarted(This) \
+ ( (This)->lpVtbl -> RuntimeResumeStarted(This) )
+
+#define ICorProfilerCallback11_RuntimeResumeFinished(This) \
+ ( (This)->lpVtbl -> RuntimeResumeFinished(This) )
+
+#define ICorProfilerCallback11_RuntimeThreadSuspended(This,threadId) \
+ ( (This)->lpVtbl -> RuntimeThreadSuspended(This,threadId) )
+
+#define ICorProfilerCallback11_RuntimeThreadResumed(This,threadId) \
+ ( (This)->lpVtbl -> RuntimeThreadResumed(This,threadId) )
+
+#define ICorProfilerCallback11_MovedReferences(This,cMovedObjectIDRanges,oldObjectIDRangeStart,newObjectIDRangeStart,cObjectIDRangeLength) \
+ ( (This)->lpVtbl -> MovedReferences(This,cMovedObjectIDRanges,oldObjectIDRangeStart,newObjectIDRangeStart,cObjectIDRangeLength) )
+
+#define ICorProfilerCallback11_ObjectAllocated(This,objectId,classId) \
+ ( (This)->lpVtbl -> ObjectAllocated(This,objectId,classId) )
+
+#define ICorProfilerCallback11_ObjectsAllocatedByClass(This,cClassCount,classIds,cObjects) \
+ ( (This)->lpVtbl -> ObjectsAllocatedByClass(This,cClassCount,classIds,cObjects) )
+
+#define ICorProfilerCallback11_ObjectReferences(This,objectId,classId,cObjectRefs,objectRefIds) \
+ ( (This)->lpVtbl -> ObjectReferences(This,objectId,classId,cObjectRefs,objectRefIds) )
+
+#define ICorProfilerCallback11_RootReferences(This,cRootRefs,rootRefIds) \
+ ( (This)->lpVtbl -> RootReferences(This,cRootRefs,rootRefIds) )
+
+#define ICorProfilerCallback11_ExceptionThrown(This,thrownObjectId) \
+ ( (This)->lpVtbl -> ExceptionThrown(This,thrownObjectId) )
+
+#define ICorProfilerCallback11_ExceptionSearchFunctionEnter(This,functionId) \
+ ( (This)->lpVtbl -> ExceptionSearchFunctionEnter(This,functionId) )
+
+#define ICorProfilerCallback11_ExceptionSearchFunctionLeave(This) \
+ ( (This)->lpVtbl -> ExceptionSearchFunctionLeave(This) )
+
+#define ICorProfilerCallback11_ExceptionSearchFilterEnter(This,functionId) \
+ ( (This)->lpVtbl -> ExceptionSearchFilterEnter(This,functionId) )
+
+#define ICorProfilerCallback11_ExceptionSearchFilterLeave(This) \
+ ( (This)->lpVtbl -> ExceptionSearchFilterLeave(This) )
+
+#define ICorProfilerCallback11_ExceptionSearchCatcherFound(This,functionId) \
+ ( (This)->lpVtbl -> ExceptionSearchCatcherFound(This,functionId) )
+
+#define ICorProfilerCallback11_ExceptionOSHandlerEnter(This,__unused) \
+ ( (This)->lpVtbl -> ExceptionOSHandlerEnter(This,__unused) )
+
+#define ICorProfilerCallback11_ExceptionOSHandlerLeave(This,__unused) \
+ ( (This)->lpVtbl -> ExceptionOSHandlerLeave(This,__unused) )
+
+#define ICorProfilerCallback11_ExceptionUnwindFunctionEnter(This,functionId) \
+ ( (This)->lpVtbl -> ExceptionUnwindFunctionEnter(This,functionId) )
+
+#define ICorProfilerCallback11_ExceptionUnwindFunctionLeave(This) \
+ ( (This)->lpVtbl -> ExceptionUnwindFunctionLeave(This) )
+
+#define ICorProfilerCallback11_ExceptionUnwindFinallyEnter(This,functionId) \
+ ( (This)->lpVtbl -> ExceptionUnwindFinallyEnter(This,functionId) )
+
+#define ICorProfilerCallback11_ExceptionUnwindFinallyLeave(This) \
+ ( (This)->lpVtbl -> ExceptionUnwindFinallyLeave(This) )
+
+#define ICorProfilerCallback11_ExceptionCatcherEnter(This,functionId,objectId) \
+ ( (This)->lpVtbl -> ExceptionCatcherEnter(This,functionId,objectId) )
+
+#define ICorProfilerCallback11_ExceptionCatcherLeave(This) \
+ ( (This)->lpVtbl -> ExceptionCatcherLeave(This) )
+
+#define ICorProfilerCallback11_COMClassicVTableCreated(This,wrappedClassId,implementedIID,pVTable,cSlots) \
+ ( (This)->lpVtbl -> COMClassicVTableCreated(This,wrappedClassId,implementedIID,pVTable,cSlots) )
+
+#define ICorProfilerCallback11_COMClassicVTableDestroyed(This,wrappedClassId,implementedIID,pVTable) \
+ ( (This)->lpVtbl -> COMClassicVTableDestroyed(This,wrappedClassId,implementedIID,pVTable) )
+
+#define ICorProfilerCallback11_ExceptionCLRCatcherFound(This) \
+ ( (This)->lpVtbl -> ExceptionCLRCatcherFound(This) )
+
+#define ICorProfilerCallback11_ExceptionCLRCatcherExecute(This) \
+ ( (This)->lpVtbl -> ExceptionCLRCatcherExecute(This) )
+
+
+#define ICorProfilerCallback11_ThreadNameChanged(This,threadId,cchName,name) \
+ ( (This)->lpVtbl -> ThreadNameChanged(This,threadId,cchName,name) )
+
+#define ICorProfilerCallback11_GarbageCollectionStarted(This,cGenerations,generationCollected,reason) \
+ ( (This)->lpVtbl -> GarbageCollectionStarted(This,cGenerations,generationCollected,reason) )
+
+#define ICorProfilerCallback11_SurvivingReferences(This,cSurvivingObjectIDRanges,objectIDRangeStart,cObjectIDRangeLength) \
+ ( (This)->lpVtbl -> SurvivingReferences(This,cSurvivingObjectIDRanges,objectIDRangeStart,cObjectIDRangeLength) )
+
+#define ICorProfilerCallback11_GarbageCollectionFinished(This) \
+ ( (This)->lpVtbl -> GarbageCollectionFinished(This) )
+
+#define ICorProfilerCallback11_FinalizeableObjectQueued(This,finalizerFlags,objectID) \
+ ( (This)->lpVtbl -> FinalizeableObjectQueued(This,finalizerFlags,objectID) )
+
+#define ICorProfilerCallback11_RootReferences2(This,cRootRefs,rootRefIds,rootKinds,rootFlags,rootIds) \
+ ( (This)->lpVtbl -> RootReferences2(This,cRootRefs,rootRefIds,rootKinds,rootFlags,rootIds) )
+
+#define ICorProfilerCallback11_HandleCreated(This,handleId,initialObjectId) \
+ ( (This)->lpVtbl -> HandleCreated(This,handleId,initialObjectId) )
+
+#define ICorProfilerCallback11_HandleDestroyed(This,handleId) \
+ ( (This)->lpVtbl -> HandleDestroyed(This,handleId) )
+
+
+#define ICorProfilerCallback11_InitializeForAttach(This,pCorProfilerInfoUnk,pvClientData,cbClientData) \
+ ( (This)->lpVtbl -> InitializeForAttach(This,pCorProfilerInfoUnk,pvClientData,cbClientData) )
+
+#define ICorProfilerCallback11_ProfilerAttachComplete(This) \
+ ( (This)->lpVtbl -> ProfilerAttachComplete(This) )
+
+#define ICorProfilerCallback11_ProfilerDetachSucceeded(This) \
+ ( (This)->lpVtbl -> ProfilerDetachSucceeded(This) )
+
+
+#define ICorProfilerCallback11_ReJITCompilationStarted(This,functionId,rejitId,fIsSafeToBlock) \
+ ( (This)->lpVtbl -> ReJITCompilationStarted(This,functionId,rejitId,fIsSafeToBlock) )
+
+#define ICorProfilerCallback11_GetReJITParameters(This,moduleId,methodId,pFunctionControl) \
+ ( (This)->lpVtbl -> GetReJITParameters(This,moduleId,methodId,pFunctionControl) )
+
+#define ICorProfilerCallback11_ReJITCompilationFinished(This,functionId,rejitId,hrStatus,fIsSafeToBlock) \
+ ( (This)->lpVtbl -> ReJITCompilationFinished(This,functionId,rejitId,hrStatus,fIsSafeToBlock) )
+
+#define ICorProfilerCallback11_ReJITError(This,moduleId,methodId,functionId,hrStatus) \
+ ( (This)->lpVtbl -> ReJITError(This,moduleId,methodId,functionId,hrStatus) )
+
+#define ICorProfilerCallback11_MovedReferences2(This,cMovedObjectIDRanges,oldObjectIDRangeStart,newObjectIDRangeStart,cObjectIDRangeLength) \
+ ( (This)->lpVtbl -> MovedReferences2(This,cMovedObjectIDRanges,oldObjectIDRangeStart,newObjectIDRangeStart,cObjectIDRangeLength) )
+
+#define ICorProfilerCallback11_SurvivingReferences2(This,cSurvivingObjectIDRanges,objectIDRangeStart,cObjectIDRangeLength) \
+ ( (This)->lpVtbl -> SurvivingReferences2(This,cSurvivingObjectIDRanges,objectIDRangeStart,cObjectIDRangeLength) )
+
+
+#define ICorProfilerCallback11_ConditionalWeakTableElementReferences(This,cRootRefs,keyRefIds,valueRefIds,rootIds) \
+ ( (This)->lpVtbl -> ConditionalWeakTableElementReferences(This,cRootRefs,keyRefIds,valueRefIds,rootIds) )
+
+
+#define ICorProfilerCallback11_GetAssemblyReferences(This,wszAssemblyPath,pAsmRefProvider) \
+ ( (This)->lpVtbl -> GetAssemblyReferences(This,wszAssemblyPath,pAsmRefProvider) )
+
+
+#define ICorProfilerCallback11_ModuleInMemorySymbolsUpdated(This,moduleId) \
+ ( (This)->lpVtbl -> ModuleInMemorySymbolsUpdated(This,moduleId) )
+
+
+#define ICorProfilerCallback11_DynamicMethodJITCompilationStarted(This,functionId,fIsSafeToBlock,pILHeader,cbILHeader) \
+ ( (This)->lpVtbl -> DynamicMethodJITCompilationStarted(This,functionId,fIsSafeToBlock,pILHeader,cbILHeader) )
+
+#define ICorProfilerCallback11_DynamicMethodJITCompilationFinished(This,functionId,hrStatus,fIsSafeToBlock) \
+ ( (This)->lpVtbl -> DynamicMethodJITCompilationFinished(This,functionId,hrStatus,fIsSafeToBlock) )
+
+
+#define ICorProfilerCallback11_DynamicMethodUnloaded(This,functionId) \
+ ( (This)->lpVtbl -> DynamicMethodUnloaded(This,functionId) )
+
+
+#define ICorProfilerCallback11_EventPipeEventDelivered(This,provider,eventId,eventVersion,cbMetadataBlob,metadataBlob,cbEventData,eventData,pActivityId,pRelatedActivityId,eventThread,numStackFrames,stackFrames) \
+ ( (This)->lpVtbl -> EventPipeEventDelivered(This,provider,eventId,eventVersion,cbMetadataBlob,metadataBlob,cbEventData,eventData,pActivityId,pRelatedActivityId,eventThread,numStackFrames,stackFrames) )
+
+#define ICorProfilerCallback11_EventPipeProviderCreated(This,provider) \
+ ( (This)->lpVtbl -> EventPipeProviderCreated(This,provider) )
+
+
+#define ICorProfilerCallback11_LoadAsNotficationOnly(This,pbNotificationOnly) \
+ ( (This)->lpVtbl -> LoadAsNotficationOnly(This,pbNotificationOnly) )
+
+#endif /* COBJMACROS */
+
+
+#endif /* C style interface */
+
+
+
+
+#endif /* __ICorProfilerCallback11_INTERFACE_DEFINED__ */
+
+
+/* interface __MIDL_itf_corprof_0000_0011 */
/* [local] */
typedef /* [public] */
-enum __MIDL___MIDL_itf_corprof_0000_0010_0001
+enum __MIDL___MIDL_itf_corprof_0000_0011_0001
{
COR_PRF_CODEGEN_DISABLE_INLINING = 0x1,
COR_PRF_CODEGEN_DISABLE_ALL_OPTIMIZATIONS = 0x2
@@ -8363,8 +9188,8 @@ enum __MIDL___MIDL_itf_corprof_0000_0010_0001
-extern RPC_IF_HANDLE __MIDL_itf_corprof_0000_0010_v0_0_c_ifspec;
-extern RPC_IF_HANDLE __MIDL_itf_corprof_0000_0010_v0_0_s_ifspec;
+extern RPC_IF_HANDLE __MIDL_itf_corprof_0000_0011_v0_0_c_ifspec;
+extern RPC_IF_HANDLE __MIDL_itf_corprof_0000_0011_v0_0_s_ifspec;
#ifndef __ICorProfilerInfo_INTERFACE_DEFINED__
#define __ICorProfilerInfo_INTERFACE_DEFINED__
diff --git a/src/coreclr/pal/src/CMakeLists.txt b/src/coreclr/pal/src/CMakeLists.txt
index 0c2021dab02d92..ba13ab392f0c53 100644
--- a/src/coreclr/pal/src/CMakeLists.txt
+++ b/src/coreclr/pal/src/CMakeLists.txt
@@ -49,6 +49,8 @@ elseif(CLR_CMAKE_HOST_ARCH_ARM64)
set(PAL_ARCH_SOURCES_DIR arm64)
elseif(CLR_CMAKE_HOST_ARCH_I386)
set(PAL_ARCH_SOURCES_DIR i386)
+elseif(CLR_CMAKE_HOST_ARCH_S390X)
+ set(PAL_ARCH_SOURCES_DIR s390x)
endif()
if(CLR_CMAKE_USE_SYSTEM_LIBUNWIND)
@@ -112,7 +114,7 @@ if(CLR_CMAKE_HOST_ARCH_ARM)
endif()
endif(CLR_CMAKE_HOST_ARCH_ARM)
-if (CMAKE_CXX_COMPILER_ID MATCHES "GNU")
+if (CMAKE_CXX_COMPILER_ID MATCHES "GNU" AND (CLR_CMAKE_HOST_ARCH_AMD64 OR CLR_CMAKE_HOST_ARCH_I386))
add_compile_options(-Wa,--divide)
endif()
diff --git a/src/coreclr/pal/src/arch/s390x/asmconstants.h b/src/coreclr/pal/src/arch/s390x/asmconstants.h
new file mode 100644
index 00000000000000..e0079dc6ef38fd
--- /dev/null
+++ b/src/coreclr/pal/src/arch/s390x/asmconstants.h
@@ -0,0 +1,52 @@
+// Licensed to the .NET Foundation under one or more agreements.
+// The .NET Foundation licenses this file to you under the MIT license.
+
+#ifndef __PAL_S390X_ASMCONSTANTS_H__
+#define __PAL_S390X_ASMCONSTANTS_H__
+
+#define CONTEXT_S390X 0x100000
+
+#define CONTEXT_CONTROL 1 // PSW and R15
+#define CONTEXT_INTEGER 2 // R0-R14
+#define CONTEXT_FLOATING_POINT 4 // F0-F15
+
+#define CONTEXT_FULL (CONTEXT_CONTROL | CONTEXT_INTEGER | CONTEXT_FLOATING_POINT)
+
+#define CONTEXT_ContextFlags 0
+#define CONTEXT_R0 CONTEXT_ContextFlags+8
+#define CONTEXT_R1 CONTEXT_R0+8
+#define CONTEXT_R2 CONTEXT_R1+8
+#define CONTEXT_R3 CONTEXT_R2+8
+#define CONTEXT_R4 CONTEXT_R3+8
+#define CONTEXT_R5 CONTEXT_R4+8
+#define CONTEXT_R6 CONTEXT_R5+8
+#define CONTEXT_R7 CONTEXT_R6+8
+#define CONTEXT_R8 CONTEXT_R7+8
+#define CONTEXT_R9 CONTEXT_R8+8
+#define CONTEXT_R10 CONTEXT_R9+8
+#define CONTEXT_R11 CONTEXT_R10+8
+#define CONTEXT_R12 CONTEXT_R11+8
+#define CONTEXT_R13 CONTEXT_R12+8
+#define CONTEXT_R14 CONTEXT_R13+8
+#define CONTEXT_R15 CONTEXT_R14+8
+#define CONTEXT_F0 CONTEXT_R15+8
+#define CONTEXT_F1 CONTEXT_F0+8
+#define CONTEXT_F2 CONTEXT_F1+8
+#define CONTEXT_F3 CONTEXT_F2+8
+#define CONTEXT_F4 CONTEXT_F3+8
+#define CONTEXT_F5 CONTEXT_F4+8
+#define CONTEXT_F6 CONTEXT_F5+8
+#define CONTEXT_F7 CONTEXT_F6+8
+#define CONTEXT_F8 CONTEXT_F7+8
+#define CONTEXT_F9 CONTEXT_F8+8
+#define CONTEXT_F10 CONTEXT_F9+8
+#define CONTEXT_F11 CONTEXT_F10+8
+#define CONTEXT_F12 CONTEXT_F11+8
+#define CONTEXT_F13 CONTEXT_F12+8
+#define CONTEXT_F14 CONTEXT_F13+8
+#define CONTEXT_F15 CONTEXT_F14+8
+#define CONTEXT_PSWMask CONTEXT_F15+8
+#define CONTEXT_PSWAddr CONTEXT_PSWMask+8
+#define CONTEXT_Size CONTEXT_PSWAddr+8
+
+#endif
diff --git a/src/coreclr/pal/src/arch/s390x/callsignalhandlerwrapper.S b/src/coreclr/pal/src/arch/s390x/callsignalhandlerwrapper.S
new file mode 100644
index 00000000000000..a290f5a580c78d
--- /dev/null
+++ b/src/coreclr/pal/src/arch/s390x/callsignalhandlerwrapper.S
@@ -0,0 +1,34 @@
+// Licensed to the .NET Foundation under one or more agreements.
+// The .NET Foundation licenses this file to you under the MIT license.
+
+#include "unixasmmacros.inc"
+#include "asmconstants.h"
+
+.macro CALL_SIGNAL_HANDLER_WRAPPER Alignment
+
+.globl C_FUNC(SignalHandlerWorkerReturnOffset\Alignment)
+C_FUNC(SignalHandlerWorkerReturnOffset\Alignment):
+ .int LOCAL_LABEL(SignalHandlerWorkerReturn\Alignment)-C_FUNC(CallSignalHandlerWrapper\Alignment)
+
+// This function is never called, only a fake stack frame will be setup to have a return
+// address set to SignalHandlerWorkerReturn during SIGSEGV handling.
+// It enables the unwinder to unwind stack from the handling code to the actual failure site.
+NESTED_ENTRY CallSignalHandlerWrapper\Alignment, _TEXT, NoHandler
+ .cfi_def_cfa 15, 160
+ stmg %r14, %r15, 112(%r15)
+ .cfi_offset 14, -48
+ .cfi_offset 15, -40
+ aghi %r15, -160
+ .cfi_def_cfa_offset 320
+ brasl %r14,EXTERNAL_C_FUNC(signal_handler_worker)
+LOCAL_LABEL(SignalHandlerWorkerReturn\Alignment):
+ lmg %r14, %r15, 272(%r11)
+ .cfi_restore 15
+ .cfi_restore 14
+ .cfi_def_cfa 15, 160
+ br %r14
+NESTED_END CallSignalHandlerWrapper\Alignment, _TEXT
+
+.endm
+
+CALL_SIGNAL_HANDLER_WRAPPER 0
diff --git a/src/coreclr/pal/src/arch/s390x/context2.S b/src/coreclr/pal/src/arch/s390x/context2.S
new file mode 100644
index 00000000000000..a44d81c19a2ebf
--- /dev/null
+++ b/src/coreclr/pal/src/arch/s390x/context2.S
@@ -0,0 +1,101 @@
+// Licensed to the .NET Foundation under one or more agreements.
+// The .NET Foundation licenses this file to you under the MIT license.
+//
+// Implementation of _CONTEXT_CaptureContext for the IBM s390x platform.
+// This function is processor dependent. It is used by exception handling,
+// and is always apply to the current thread.
+//
+
+#include "unixasmmacros.inc"
+#include "asmconstants.h"
+
+// Incoming:
+// R2: Context*
+//
+LEAF_ENTRY CONTEXT_CaptureContext, _TEXT
+
+ tm CONTEXT_ContextFlags+3(%r2), CONTEXT_INTEGER
+ je LOCAL_LABEL(Done_CONTEXT_INTEGER)
+ stmg %r0, %r14, CONTEXT_R0(%r2)
+LOCAL_LABEL(Done_CONTEXT_INTEGER):
+
+ tm CONTEXT_ContextFlags+3(%r2), CONTEXT_CONTROL
+ je LOCAL_LABEL(Done_CONTEXT_CONTROL)
+ // Set PSW address to return address from %r14
+ stg %r14, CONTEXT_PSWAddr(%r2)
+ // Extract PSW mask (CC is already changed; we ignore this here)
+ epsw %r0, %r1
+ stm %r0, %r1, CONTEXT_PSWMask(%r2)
+ // Stack pointer is still unchanged
+ stg %r15, CONTEXT_R15(%r2)
+LOCAL_LABEL(Done_CONTEXT_CONTROL):
+
+ tm CONTEXT_ContextFlags+3(%r2), CONTEXT_FLOATING_POINT
+ je LOCAL_LABEL(Done_CONTEXT_FLOATING_POINT)
+ std %f0, CONTEXT_F0(%r2)
+ std %f1, CONTEXT_F1(%r2)
+ std %f2, CONTEXT_F2(%r2)
+ std %f3, CONTEXT_F3(%r2)
+ std %f4, CONTEXT_F4(%r2)
+ std %f5, CONTEXT_F5(%r2)
+ std %f6, CONTEXT_F6(%r2)
+ std %f7, CONTEXT_F7(%r2)
+ std %f8, CONTEXT_F8(%r2)
+ std %f9, CONTEXT_F9(%r2)
+ std %f10, CONTEXT_F10(%r2)
+ std %f11, CONTEXT_F11(%r2)
+ std %f12, CONTEXT_F12(%r2)
+ std %f13, CONTEXT_F13(%r2)
+ std %f14, CONTEXT_F14(%r2)
+ std %f15, CONTEXT_F15(%r2)
+LOCAL_LABEL(Done_CONTEXT_FLOATING_POINT):
+
+ br %r14
+LEAF_END CONTEXT_CaptureContext, _TEXT
+
+LEAF_ENTRY RtlCaptureContext, _TEXT
+ mvhhi CONTEXT_ContextFlags+2(%r2), ((CONTEXT_S390X | CONTEXT_FULL) & 0xffff)
+ mvhhi CONTEXT_ContextFlags(%r2), ((CONTEXT_S390X | CONTEXT_FULL) >> 16)
+ jg C_FUNC(CONTEXT_CaptureContext)
+LEAF_END RtlCaptureContext, _TEXT
+
+LEAF_ENTRY RtlRestoreContext, _TEXT
+
+ lgr %r1, %r14
+
+ tm CONTEXT_ContextFlags+3(%r2), CONTEXT_FLOATING_POINT
+ je LOCAL_LABEL(Done_Restore_CONTEXT_FLOATING_POINT)
+ ld %f0, CONTEXT_F0(%r2)
+ ld %f1, CONTEXT_F1(%r2)
+ ld %f2, CONTEXT_F2(%r2)
+ ld %f3, CONTEXT_F3(%r2)
+ ld %f4, CONTEXT_F4(%r2)
+ ld %f5, CONTEXT_F5(%r2)
+ ld %f6, CONTEXT_F6(%r2)
+ ld %f7, CONTEXT_F7(%r2)
+ ld %f8, CONTEXT_F8(%r2)
+ ld %f9, CONTEXT_F9(%r2)
+ ld %f10, CONTEXT_F10(%r2)
+ ld %f11, CONTEXT_F11(%r2)
+ ld %f12, CONTEXT_F12(%r2)
+ ld %f13, CONTEXT_F13(%r2)
+ ld %f14, CONTEXT_F14(%r2)
+ ld %f15, CONTEXT_F15(%r2)
+LOCAL_LABEL(Done_Restore_CONTEXT_FLOATING_POINT):
+
+ tm CONTEXT_ContextFlags+3(%r2), CONTEXT_CONTROL
+ je LOCAL_LABEL(Done_Restore_CONTEXT_CONTROL)
+ // We do *not* attempt to restore the PSW mask here!
+ lg %r1, CONTEXT_PSWAddr(%r2)
+ lg %r15, CONTEXT_R15(%r2)
+LOCAL_LABEL(Done_Restore_CONTEXT_CONTROL):
+
+ tm CONTEXT_ContextFlags+3(%r2), CONTEXT_INTEGER
+ je LOCAL_LABEL(Done_Restore_CONTEXT_INTEGER)
+ // We do *not* restore %r0 and %r1 here!
+ lmg %r2, %r14, CONTEXT_R2(%r2)
+LOCAL_LABEL(Done_Restore_CONTEXT_INTEGER):
+
+ br %r1
+LEAF_END RtlRestoreContext, _TEXT
+
diff --git a/src/coreclr/pal/src/arch/s390x/debugbreak.S b/src/coreclr/pal/src/arch/s390x/debugbreak.S
new file mode 100644
index 00000000000000..3c38a09c257bf8
--- /dev/null
+++ b/src/coreclr/pal/src/arch/s390x/debugbreak.S
@@ -0,0 +1,10 @@
+// Licensed to the .NET Foundation under one or more agreements.
+// The .NET Foundation licenses this file to you under the MIT license.
+
+#include "unixasmmacros.inc"
+
+LEAF_ENTRY DBG_DebugBreak, _TEXT
+ .word 0x0001
+ br %r14
+LEAF_END_MARKED DBG_DebugBreak, _TEXT
+
diff --git a/src/coreclr/pal/src/arch/s390x/exceptionhelper.S b/src/coreclr/pal/src/arch/s390x/exceptionhelper.S
new file mode 100644
index 00000000000000..321612746ce755
--- /dev/null
+++ b/src/coreclr/pal/src/arch/s390x/exceptionhelper.S
@@ -0,0 +1,55 @@
+// Licensed to the .NET Foundation under one or more agreements.
+// The .NET Foundation licenses this file to you under the MIT license.
+
+#include "unixasmmacros.inc"
+#include "asmconstants.h"
+
+//////////////////////////////////////////////////////////////////////////
+//
+// This function creates a stack frame right below the target frame, restores all callee
+// saved registers from the passed in context, sets R15 to that frame and sets the
+// return address to the target frame's PSW address.
+// Then it uses the ThrowExceptionHelper to throw the passed in exception from that context.
+// EXTERN_C void ThrowExceptionFromContextInternal(CONTEXT* context, PAL_SEHException* ex);
+LEAF_ENTRY ThrowExceptionFromContextInternal, _TEXT
+ // Save callee-saved registers to the stack so that unwinding can work at
+ // any intermediate step while loading up target registers.
+ stmg %r6, %r15, 48(%r15)
+ .cfi_offset 6, -112
+ .cfi_offset 7, -104
+ .cfi_offset 8, -96
+ .cfi_offset 9, -88
+ .cfi_offset 10, -80
+ .cfi_offset 11, -72
+ .cfi_offset 12, -64
+ .cfi_offset 13, -56
+ .cfi_offset 14, -48
+ .cfi_offset 15, -40
+
+ lg %r6, CONTEXT_R6(%r2)
+ lg %r7, CONTEXT_R7(%r2)
+ lg %r8, CONTEXT_R8(%r2)
+ lg %r9, CONTEXT_R9(%r2)
+ lg %r10, CONTEXT_R10(%r2)
+ lg %r11, CONTEXT_R11(%r2)
+ lg %r12, CONTEXT_R12(%r2)
+ lg %r13, CONTEXT_R13(%r2)
+ lg %r14, CONTEXT_PSWAddr(%r2)
+ lg %r15, CONTEXT_R15(%r2)
+ // After changing %r15, unwinding no longer finds the registers
+ // saved above. Switch to the target frame at this point.
+ .cfi_restore 6
+ .cfi_restore 7
+ .cfi_restore 8
+ .cfi_restore 9
+ .cfi_restore 10
+ .cfi_restore 11
+ .cfi_restore 12
+ .cfi_restore 13
+ .cfi_restore 14
+ .cfi_restore 15
+
+ // The PAL_SEHException pointer
+ lgr %r2, %r3
+ jg EXTERNAL_C_FUNC(ThrowExceptionHelper)
+LEAF_END ThrowExceptionFromContextInternal, _TEXT
diff --git a/src/coreclr/pal/src/arch/s390x/processor.cpp b/src/coreclr/pal/src/arch/s390x/processor.cpp
new file mode 100644
index 00000000000000..d2d0fdd5e8f083
--- /dev/null
+++ b/src/coreclr/pal/src/arch/s390x/processor.cpp
@@ -0,0 +1,22 @@
+// Licensed to the .NET Foundation under one or more agreements.
+// The .NET Foundation licenses this file to you under the MIT license.
+
+/*++
+
+
+
+Module Name:
+
+ processor.cpp
+
+Abstract:
+
+ Implementation of processor related functions for the IBM s390x
+ platforms. These functions are processor dependent.
+
+
+
+--*/
+
+#include "pal/palinternal.h"
+
diff --git a/src/coreclr/pal/src/arch/s390x/signalhandlerhelper.cpp b/src/coreclr/pal/src/arch/s390x/signalhandlerhelper.cpp
new file mode 100644
index 00000000000000..5e2e7219a84ba8
--- /dev/null
+++ b/src/coreclr/pal/src/arch/s390x/signalhandlerhelper.cpp
@@ -0,0 +1,69 @@
+// Licensed to the .NET Foundation under one or more agreements.
+// The .NET Foundation licenses this file to you under the MIT license.
+
+#include "pal/dbgmsg.h"
+SET_DEFAULT_DEBUG_CHANNEL(EXCEPT); // some headers have code with asserts, so do this first
+
+#include "pal/palinternal.h"
+#include "pal/context.h"
+#include "pal/signal.hpp"
+#include "pal/utils.h"
+#include
+
+/*++
+Function :
+ ExecuteHandlerOnCustomStack
+
+ Execute signal handler on a custom stack, the current stack pointer is specified by the customSp
+ If the customSp is 0, then the handler is executed on the original stack where the signal was fired.
+ It installs a fake stack frame to enable stack unwinding to the signal source location.
+
+Parameters :
+ POSIX signal handler parameter list ("man sigaction" for details)
+ returnPoint - context to which the function returns if the common_signal_handler returns
+
+ (no return value)
+--*/
+void ExecuteHandlerOnCustomStack(int code, siginfo_t *siginfo, void *context, size_t customSp, SignalHandlerWorkerReturnPoint* returnPoint)
+{
+ ucontext_t *ucontext = (ucontext_t *)context;
+ size_t faultSp = (size_t)MCREG_R15(ucontext->uc_mcontext);
+
+ _ASSERTE(IS_ALIGNED(faultSp, 8));
+
+ if (customSp == 0)
+ {
+ customSp = faultSp;
+ }
+
+ size_t fakeFrameReturnAddress;
+ fakeFrameReturnAddress = (size_t)SignalHandlerWorkerReturnOffset0 + (size_t)CallSignalHandlerWrapper0;
+
+ // Build fake stack frame to enable the stack unwinder to unwind from signal_handler_worker to the faulting instruction
+ size_t* saveArea = (size_t*)(customSp - 160);
+ saveArea[14] = (size_t)MCREG_PSWAddr(ucontext->uc_mcontext);
+ saveArea[15] = faultSp;
+ size_t sp = (size_t)saveArea - 160;
+
+ // Switch the current context to the signal_handler_worker and the custom stack
+ CONTEXT context2;
+ RtlCaptureContext(&context2);
+
+ context2.PSWAddr = (size_t)signal_handler_worker;
+ context2.R2 = code;
+ context2.R3 = (size_t)siginfo;
+ context2.R4 = (size_t)context;
+ context2.R5 = (size_t)returnPoint;
+ context2.R6 = (size_t)MCREG_R6(ucontext->uc_mcontext);
+ context2.R7 = (size_t)MCREG_R7(ucontext->uc_mcontext);
+ context2.R8 = (size_t)MCREG_R8(ucontext->uc_mcontext);
+ context2.R9 = (size_t)MCREG_R9(ucontext->uc_mcontext);
+ context2.R10 = (size_t)MCREG_R10(ucontext->uc_mcontext);
+ context2.R11 = (size_t)MCREG_R11(ucontext->uc_mcontext);
+ context2.R12 = (size_t)MCREG_R12(ucontext->uc_mcontext);
+ context2.R13 = (size_t)MCREG_R13(ucontext->uc_mcontext);
+ context2.R14 = fakeFrameReturnAddress;
+ context2.R15 = sp;
+
+ RtlRestoreContext(&context2, NULL);
+}
diff --git a/src/coreclr/pal/src/exception/remote-unwind.cpp b/src/coreclr/pal/src/exception/remote-unwind.cpp
index 2d00f09377a7a2..b82960cc723721 100644
--- a/src/coreclr/pal/src/exception/remote-unwind.cpp
+++ b/src/coreclr/pal/src/exception/remote-unwind.cpp
@@ -123,7 +123,7 @@ typedef BOOL(*UnwindReadMemoryCallback)(PVOID address, PVOID buffer, SIZE_T size
#define PRId PRId32
#define PRIA "08"
#define PRIxA PRIA PRIx
-#elif defined(TARGET_AMD64) || defined(TARGET_ARM64)
+#elif defined(TARGET_AMD64) || defined(TARGET_ARM64) || defined(TARGET_S390X)
#define PRIx PRIx64
#define PRIu PRIu64
#define PRId PRId64
@@ -1606,6 +1606,17 @@ static void GetContextPointers(unw_cursor_t *cursor, unw_context_t *unwContext,
GetContextPointer(cursor, unwContext, UNW_AARCH64_X27, &contextPointers->X27);
GetContextPointer(cursor, unwContext, UNW_AARCH64_X28, &contextPointers->X28);
GetContextPointer(cursor, unwContext, UNW_AARCH64_X29, &contextPointers->Fp);
+#elif defined(TARGET_S390X)
+ GetContextPointer(cursor, unwContext, UNW_S390X_R6, &contextPointers->R6);
+ GetContextPointer(cursor, unwContext, UNW_S390X_R7, &contextPointers->R7);
+ GetContextPointer(cursor, unwContext, UNW_S390X_R8, &contextPointers->R8);
+ GetContextPointer(cursor, unwContext, UNW_S390X_R9, &contextPointers->R9);
+ GetContextPointer(cursor, unwContext, UNW_S390X_R10, &contextPointers->R10);
+ GetContextPointer(cursor, unwContext, UNW_S390X_R11, &contextPointers->R11);
+ GetContextPointer(cursor, unwContext, UNW_S390X_R12, &contextPointers->R12);
+ GetContextPointer(cursor, unwContext, UNW_S390X_R13, &contextPointers->R13);
+ GetContextPointer(cursor, unwContext, UNW_S390X_R14, &contextPointers->R14);
+ GetContextPointer(cursor, unwContext, UNW_S390X_R15, &contextPointers->R15);
#else
#error unsupported architecture
#endif
@@ -1658,6 +1669,19 @@ static void UnwindContextToContext(unw_cursor_t *cursor, CONTEXT *winContext)
unw_get_reg(cursor, UNW_AARCH64_X29, (unw_word_t *) &winContext->Fp);
unw_get_reg(cursor, UNW_AARCH64_X30, (unw_word_t *) &winContext->Lr);
TRACE("sp %p pc %p lr %p fp %p\n", winContext->Sp, winContext->Pc, winContext->Lr, winContext->Fp);
+#elif defined(TARGET_S390X)
+ unw_get_reg(cursor, UNW_REG_IP, (unw_word_t *) &winContext->PSWAddr);
+ unw_get_reg(cursor, UNW_REG_SP, (unw_word_t *) &winContext->R15);
+ unw_get_reg(cursor, UNW_S390X_R6, (unw_word_t *) &winContext->R6);
+ unw_get_reg(cursor, UNW_S390X_R7, (unw_word_t *) &winContext->R7);
+ unw_get_reg(cursor, UNW_S390X_R8, (unw_word_t *) &winContext->R8);
+ unw_get_reg(cursor, UNW_S390X_R9, (unw_word_t *) &winContext->R9);
+ unw_get_reg(cursor, UNW_S390X_R10, (unw_word_t *) &winContext->R10);
+ unw_get_reg(cursor, UNW_S390X_R11, (unw_word_t *) &winContext->R11);
+ unw_get_reg(cursor, UNW_S390X_R12, (unw_word_t *) &winContext->R12);
+ unw_get_reg(cursor, UNW_S390X_R13, (unw_word_t *) &winContext->R13);
+ unw_get_reg(cursor, UNW_S390X_R14, (unw_word_t *) &winContext->R14);
+ TRACE("sp %p pc %p lr %p\n", winContext->R15, winContext->PSWAddr, winContext->R14);
#else
#error unsupported architecture
#endif
@@ -1746,6 +1770,18 @@ access_reg(unw_addr_space_t as, unw_regnum_t regnum, unw_word_t *valp, int write
case UNW_AARCH64_X30: *valp = (unw_word_t)winContext->Lr; break;
case UNW_AARCH64_SP: *valp = (unw_word_t)winContext->Sp; break;
case UNW_AARCH64_PC: *valp = (unw_word_t)winContext->Pc; break;
+#elif defined(TARGET_S390X)
+ case UNW_S390X_R6: *valp = (unw_word_t)winContext->R6; break;
+ case UNW_S390X_R7: *valp = (unw_word_t)winContext->R7; break;
+ case UNW_S390X_R8: *valp = (unw_word_t)winContext->R8; break;
+ case UNW_S390X_R9: *valp = (unw_word_t)winContext->R9; break;
+ case UNW_S390X_R10: *valp = (unw_word_t)winContext->R10; break;
+ case UNW_S390X_R11: *valp = (unw_word_t)winContext->R11; break;
+ case UNW_S390X_R12: *valp = (unw_word_t)winContext->R12; break;
+ case UNW_S390X_R13: *valp = (unw_word_t)winContext->R13; break;
+ case UNW_S390X_R14: *valp = (unw_word_t)winContext->R14; break;
+ case UNW_S390X_R15: *valp = (unw_word_t)winContext->R15; break;
+ case UNW_S390X_IP: *valp = (unw_word_t)winContext->PSWAddr; break;
#else
#error unsupported architecture
#endif
diff --git a/src/coreclr/pal/src/exception/seh-unwind.cpp b/src/coreclr/pal/src/exception/seh-unwind.cpp
index 5f4df9ae4dc781..b4427294e438da 100644
--- a/src/coreclr/pal/src/exception/seh-unwind.cpp
+++ b/src/coreclr/pal/src/exception/seh-unwind.cpp
@@ -115,6 +115,19 @@ enum
ASSIGN_REG(Ebx) \
ASSIGN_REG(Esi) \
ASSIGN_REG(Edi)
+#elif (defined(HOST_UNIX) && defined(HOST_S390X))
+#define ASSIGN_UNWIND_REGS \
+ ASSIGN_REG(PSWAddr) \
+ ASSIGN_REG(R6) \
+ ASSIGN_REG(R7) \
+ ASSIGN_REG(R8) \
+ ASSIGN_REG(R9) \
+ ASSIGN_REG(R10) \
+ ASSIGN_REG(R11) \
+ ASSIGN_REG(R12) \
+ ASSIGN_REG(R13) \
+ ASSIGN_REG(R14) \
+ ASSIGN_REG(R15)
#else
#error unsupported architecture
#endif
@@ -261,6 +274,18 @@ void UnwindContextToWinContext(unw_cursor_t *cursor, CONTEXT *winContext)
// errors with "this target does not support pointer authentication"
winContext->Pc = winContext->Pc & 0x7fffffffffffull;
#endif // defined(TARGET_OSX) && defined(TARGET_ARM64)
+#elif (defined(HOST_UNIX) && defined(HOST_S390X))
+ unw_get_reg(cursor, UNW_REG_SP, (unw_word_t *) &winContext->R15);
+ unw_get_reg(cursor, UNW_REG_IP, (unw_word_t *) &winContext->PSWAddr);
+ unw_get_reg(cursor, UNW_S390X_R6, (unw_word_t *) &winContext->R6);
+ unw_get_reg(cursor, UNW_S390X_R7, (unw_word_t *) &winContext->R7);
+ unw_get_reg(cursor, UNW_S390X_R8, (unw_word_t *) &winContext->R8);
+ unw_get_reg(cursor, UNW_S390X_R9, (unw_word_t *) &winContext->R9);
+ unw_get_reg(cursor, UNW_S390X_R10, (unw_word_t *) &winContext->R10);
+ unw_get_reg(cursor, UNW_S390X_R11, (unw_word_t *) &winContext->R11);
+ unw_get_reg(cursor, UNW_S390X_R12, (unw_word_t *) &winContext->R12);
+ unw_get_reg(cursor, UNW_S390X_R13, (unw_word_t *) &winContext->R13);
+ unw_get_reg(cursor, UNW_S390X_R14, (unw_word_t *) &winContext->R14);
#else
#error unsupported architecture
#endif
@@ -319,6 +344,17 @@ void GetContextPointers(unw_cursor_t *cursor, unw_context_t *unwContext, KNONVOL
GetContextPointer(cursor, unwContext, UNW_AARCH64_X27, &contextPointers->X27);
GetContextPointer(cursor, unwContext, UNW_AARCH64_X28, &contextPointers->X28);
GetContextPointer(cursor, unwContext, UNW_AARCH64_X29, &contextPointers->Fp);
+#elif (defined(HOST_UNIX) && defined(HOST_S390X))
+ GetContextPointer(cursor, unwContext, UNW_S390X_R6, &contextPointers->R6);
+ GetContextPointer(cursor, unwContext, UNW_S390X_R7, &contextPointers->R7);
+ GetContextPointer(cursor, unwContext, UNW_S390X_R8, &contextPointers->R8);
+ GetContextPointer(cursor, unwContext, UNW_S390X_R9, &contextPointers->R9);
+ GetContextPointer(cursor, unwContext, UNW_S390X_R10, &contextPointers->R10);
+ GetContextPointer(cursor, unwContext, UNW_S390X_R11, &contextPointers->R11);
+ GetContextPointer(cursor, unwContext, UNW_S390X_R12, &contextPointers->R12);
+ GetContextPointer(cursor, unwContext, UNW_S390X_R13, &contextPointers->R13);
+ GetContextPointer(cursor, unwContext, UNW_S390X_R14, &contextPointers->R14);
+ GetContextPointer(cursor, unwContext, UNW_S390X_R15, &contextPointers->R15);
#else
#error unsupported architecture
#endif
diff --git a/src/coreclr/pal/src/include/pal/context.h b/src/coreclr/pal/src/include/pal/context.h
index afbafe3fd9eb25..281c418aef4d2a 100644
--- a/src/coreclr/pal/src/include/pal/context.h
+++ b/src/coreclr/pal/src/include/pal/context.h
@@ -54,7 +54,28 @@ using asm_sigcontext::_xstate;
#include
#endif // !HAVE_MACH_EXCEPTIONS else
-#if HAVE___GREGSET_T
+#ifdef HOST_S390X
+
+#define MCREG_PSWMask(mc) ((mc).psw.mask)
+#define MCREG_PSWAddr(mc) ((mc).psw.addr)
+#define MCREG_R0(mc) ((mc).gregs[0])
+#define MCREG_R1(mc) ((mc).gregs[1])
+#define MCREG_R2(mc) ((mc).gregs[2])
+#define MCREG_R3(mc) ((mc).gregs[3])
+#define MCREG_R4(mc) ((mc).gregs[4])
+#define MCREG_R5(mc) ((mc).gregs[5])
+#define MCREG_R6(mc) ((mc).gregs[6])
+#define MCREG_R7(mc) ((mc).gregs[7])
+#define MCREG_R8(mc) ((mc).gregs[8])
+#define MCREG_R9(mc) ((mc).gregs[9])
+#define MCREG_R10(mc) ((mc).gregs[10])
+#define MCREG_R11(mc) ((mc).gregs[11])
+#define MCREG_R12(mc) ((mc).gregs[12])
+#define MCREG_R13(mc) ((mc).gregs[13])
+#define MCREG_R14(mc) ((mc).gregs[14])
+#define MCREG_R15(mc) ((mc).gregs[15])
+
+#elif HAVE___GREGSET_T
#ifdef HOST_64BIT
#define MCREG_Rbx(mc) ((mc).__gregs[_REG_RBX])
@@ -710,6 +731,8 @@ inline static DWORD64 CONTEXTGetPC(LPCONTEXT pContext)
return pContext->Eip;
#elif defined(HOST_ARM64) || defined(HOST_ARM)
return pContext->Pc;
+#elif defined(HOST_S390X)
+ return pContext->PSWAddr;
#else
#error "don't know how to get the program counter for this architecture"
#endif
@@ -723,6 +746,8 @@ inline static void CONTEXTSetPC(LPCONTEXT pContext, DWORD64 pc)
pContext->Eip = pc;
#elif defined(HOST_ARM64) || defined(HOST_ARM)
pContext->Pc = pc;
+#elif defined(HOST_S390X)
+ pContext->PSWAddr = pc;
#else
#error "don't know how to set the program counter for this architecture"
#endif
@@ -738,6 +763,8 @@ inline static DWORD64 CONTEXTGetFP(LPCONTEXT pContext)
return pContext->R7;
#elif defined(HOST_ARM64)
return pContext->Fp;
+#elif defined(HOST_S390X)
+ return pContext->R11;
#else
#error "don't know how to get the frame pointer for this architecture"
#endif
diff --git a/src/coreclr/pal/src/libunwind/CMakeLists.txt b/src/coreclr/pal/src/libunwind/CMakeLists.txt
index 44e9bc6c35eed3..6e31eae9f716ef 100644
--- a/src/coreclr/pal/src/libunwind/CMakeLists.txt
+++ b/src/coreclr/pal/src/libunwind/CMakeLists.txt
@@ -28,6 +28,8 @@ if(CLR_CMAKE_HOST_UNIX)
set(arch arm)
elseif(CLR_CMAKE_HOST_ARCH_I386)
set(arch x86)
+ elseif(CLR_CMAKE_HOST_ARCH_S390X)
+ set(arch s390x)
endif ()
# Disable warning due to incorrect format specifier in debugging printf via the Debug macro
@@ -105,6 +107,10 @@ if(CLR_CMAKE_HOST_WIN32)
set(TARGET_ARM 1)
set(arch arm)
add_definitions(-D__arm__)
+ elseif(CLR_CMAKE_TARGET_ARCH_S390X)
+ set(TARGET_S390X 1)
+ set(arch s390x)
+ add_definitions(-D__s390x__)
else ()
message(FATAL_ERROR "Unrecognized TARGET")
endif ()
diff --git a/src/coreclr/pal/src/libunwind/include/tdep-s390x/libunwind_i.h b/src/coreclr/pal/src/libunwind/include/tdep-s390x/libunwind_i.h
index 8c34d6a9584670..ba75c0742f62cd 100644
--- a/src/coreclr/pal/src/libunwind/include/tdep-s390x/libunwind_i.h
+++ b/src/coreclr/pal/src/libunwind/include/tdep-s390x/libunwind_i.h
@@ -164,16 +164,16 @@ dwarf_get (struct dwarf_cursor *c, dwarf_loc_t loc, unw_word_t *val)
if (DWARF_IS_NULL_LOC (loc))
return -UNW_EBADREG;
+ /* GPRs may be saved in FPRs */
+ if (DWARF_IS_FP_LOC (loc))
+ return (*c->as->acc.access_fpreg) (c->as, DWARF_GET_LOC (loc), (unw_fpreg_t*)val,
+ 0, c->as_arg);
if (DWARF_IS_REG_LOC (loc))
return (*c->as->acc.access_reg) (c->as, DWARF_GET_LOC (loc), val,
0, c->as_arg);
if (DWARF_IS_MEM_LOC (loc))
return (*c->as->acc.access_mem) (c->as, DWARF_GET_LOC (loc), val,
0, c->as_arg);
- /* GPRs may be saved in FPRs */
- if (DWARF_IS_FP_LOC (loc))
- return (*c->as->acc.access_fpreg) (c->as, DWARF_GET_LOC (loc), (unw_fpreg_t*)val,
- 0, c->as_arg);
assert(DWARF_IS_VAL_LOC (loc));
*val = DWARF_GET_LOC (loc);
return 0;
@@ -188,13 +188,13 @@ dwarf_put (struct dwarf_cursor *c, dwarf_loc_t loc, unw_word_t val)
if (DWARF_IS_NULL_LOC (loc))
return -UNW_EBADREG;
- if (DWARF_IS_REG_LOC (loc))
- return (*c->as->acc.access_reg) (c->as, DWARF_GET_LOC (loc), &val,
- 1, c->as_arg);
/* GPRs may be saved in FPRs */
if (DWARF_IS_FP_LOC (loc))
return (*c->as->acc.access_fpreg) (c->as, DWARF_GET_LOC (loc), (unw_fpreg_t*) &val,
1, c->as_arg);
+ if (DWARF_IS_REG_LOC (loc))
+ return (*c->as->acc.access_reg) (c->as, DWARF_GET_LOC (loc), &val,
+ 1, c->as_arg);
assert(DWARF_IS_MEM_LOC (loc));
return (*c->as->acc.access_mem) (c->as, DWARF_GET_LOC (loc), &val,
diff --git a/src/coreclr/pal/src/libunwind/include/tdep/dwarf-config.h b/src/coreclr/pal/src/libunwind/include/tdep/dwarf-config.h
index c759a46c63b9da..e27e2a2369033d 100644
--- a/src/coreclr/pal/src/libunwind/include/tdep/dwarf-config.h
+++ b/src/coreclr/pal/src/libunwind/include/tdep/dwarf-config.h
@@ -15,6 +15,8 @@
# include "tdep-ppc32/dwarf-config.h"
#elif defined __powerpc64__
# include "tdep-ppc64/dwarf-config.h"
+#elif defined __s390x__
+# include "tdep-s390x/dwarf-config.h"
#elif defined __sh__
# include "tdep-sh/dwarf-config.h"
#elif defined __i386__
diff --git a/src/coreclr/pal/src/libunwind/libunwind-version.txt b/src/coreclr/pal/src/libunwind/libunwind-version.txt
index 9ebc86f5b15a7d..566086b14aee91 100644
--- a/src/coreclr/pal/src/libunwind/libunwind-version.txt
+++ b/src/coreclr/pal/src/libunwind/libunwind-version.txt
@@ -4,3 +4,4 @@ https://github.com/libunwind/libunwind/commit/9165d2a150d707d3037c2045f2cdc0fabd
Remove upstream CMakelist.txt & src/CMakelist.txt, keep .NET Core custom version
Keep .NET Core oop directory
Apply https://github.com/libunwind/libunwind/pull/186
+Apply https://github.com/libunwind/libunwind/pull/245
diff --git a/src/coreclr/pal/src/libunwind/src/CMakeLists.txt b/src/coreclr/pal/src/libunwind/src/CMakeLists.txt
index 56420c1558f6c8..b6dee4f413a809 100644
--- a/src/coreclr/pal/src/libunwind/src/CMakeLists.txt
+++ b/src/coreclr/pal/src/libunwind/src/CMakeLists.txt
@@ -276,6 +276,35 @@ SET(libunwind_x86_64_la_SOURCES_x86_64
x86_64/Gstash_frame.c x86_64/Gstep.c x86_64/Gtrace.c
)
+# The list of files that go both into libunwind and libunwind-s390x:
+SET(libunwind_la_SOURCES_s390x_common
+ ${libunwind_la_SOURCES_common}
+ s390x/is_fpreg.c s390x/regname.c
+)
+
+# The list of files that go into libunwind:
+SET(libunwind_la_SOURCES_s390x
+ ${libunwind_la_SOURCES_s390x_common}
+ ${libunwind_la_SOURCES_local}
+ s390x/setcontext.S s390x/getcontext.S
+ s390x/Lapply_reg_state.c s390x/Lreg_states_iterate.c
+ s390x/Lcreate_addr_space.c s390x/Lget_save_loc.c s390x/Lglobal.c
+ s390x/Linit.c s390x/Linit_local.c s390x/Linit_remote.c
+ s390x/Lget_proc_info.c s390x/Lregs.c s390x/Lresume.c
+ s390x/Lis_signal_frame.c s390x/Lstep.c
+)
+
+# The list of files that go into libunwind-s390x:
+SET(libunwind_s390x_la_SOURCES_s390x
+ ${libunwind_la_SOURCES_s390x_common}
+ ${libunwind_la_SOURCES_generic}
+ s390x/Gapply_reg_state.c s390x/Greg_states_iterate.c
+ s390x/Gcreate_addr_space.c s390x/Gget_save_loc.c s390x/Gglobal.c
+ s390x/Ginit.c s390x/Ginit_local.c s390x/Ginit_remote.c
+ s390x/Gget_proc_info.c s390x/Gregs.c s390x/Gresume.c
+ s390x/Gis_signal_frame.c s390x/Gstep.c
+)
+
if(CLR_CMAKE_HOST_UNIX)
if(CLR_CMAKE_HOST_ARCH_ARM64)
SET(libunwind_la_SOURCES ${libunwind_la_SOURCES_aarch64})
@@ -297,6 +326,10 @@ if(CLR_CMAKE_HOST_UNIX)
SET(libunwind_remote_la_SOURCES ${libunwind_x86_64_la_SOURCES_x86_64})
SET(libunwind_elf_la_SOURCES ${libunwind_elf64_la_SOURCES})
list(APPEND libunwind_setjmp_la_SOURCES x86_64/longjmp.S x86_64/siglongjmp.SA)
+ elseif(CLR_CMAKE_HOST_ARCH_S390X)
+ SET(libunwind_la_SOURCES ${libunwind_la_SOURCES_s390x})
+ SET(libunwind_remote_la_SOURCES ${libunwind_s390x_la_SOURCES_s390x})
+ SET(libunwind_elf_la_SOURCES ${libunwind_elf64_la_SOURCES})
endif()
if(CLR_CMAKE_HOST_OSX)
@@ -340,6 +373,10 @@ else(CLR_CMAKE_HOST_UNIX)
SET(libunwind_remote_la_SOURCES ${libunwind_x86_64_la_SOURCES_x86_64})
SET(libunwind_elf_la_SOURCES ${libunwind_elf64_la_SOURCES})
list(APPEND libunwind_setjmp_la_SOURCES x86_64/longjmp.S x86_64/siglongjmp.SA)
+ elseif(CLR_CMAKE_TARGET_ARCH_S390X)
+ SET(libunwind_la_SOURCES ${libunwind_la_SOURCES_s390x})
+ SET(libunwind_remote_la_SOURCES ${libunwind_s390x_la_SOURCES_s390x})
+ SET(libunwind_elf_la_SOURCES ${libunwind_elf64_la_SOURCES})
endif()
set_source_files_properties(${CLR_DIR}/pal/src/exception/remote-unwind.cpp PROPERTIES COMPILE_FLAGS /TP INCLUDE_DIRECTORIES ${CLR_DIR}/inc)
diff --git a/src/coreclr/pal/src/libunwind/src/s390x/Gresume.c b/src/coreclr/pal/src/libunwind/src/s390x/Gresume.c
index fd9d13027e2a85..bfc4d5cab4e465 100644
--- a/src/coreclr/pal/src/libunwind/src/s390x/Gresume.c
+++ b/src/coreclr/pal/src/libunwind/src/s390x/Gresume.c
@@ -68,7 +68,7 @@ s390x_local_resume (unw_addr_space_t as, unw_cursor_t *cursor, void *arg)
sp = c->sigcontext_sp;
ip = c->sigcontext_pc;
__asm__ __volatile__ (
- "lgr 15, %[sp]\n"
+ "lgr %%r15, %[sp]\n"
"br %[ip]\n"
: : [sp] "r" (sp), [ip] "r" (ip)
);
@@ -86,7 +86,7 @@ s390x_local_resume (unw_addr_space_t as, unw_cursor_t *cursor, void *arg)
sp = c->sigcontext_sp;
ip = c->sigcontext_pc;
__asm__ __volatile__ (
- "lgr 15, %[sp]\n"
+ "lgr %%r15, %[sp]\n"
"br %[ip]\n"
: : [sp] "r" (sp), [ip] "r" (ip)
);
diff --git a/src/coreclr/pal/src/map/virtual.cpp b/src/coreclr/pal/src/map/virtual.cpp
index cea55e86e2538f..645cf10aec8d6a 100644
--- a/src/coreclr/pal/src/map/virtual.cpp
+++ b/src/coreclr/pal/src/map/virtual.cpp
@@ -1761,21 +1761,26 @@ VirtualProtect(
}
#if defined(HOST_OSX) && defined(HOST_ARM64)
-bool
-PAL_JITWriteEnableHolder::JITWriteEnable(bool writeEnable)
+PALAPI VOID PAL_JitWriteProtect(bool writeEnable)
{
- // Use a thread local to track per thread JIT Write enable state
- // Per Apple, new threads start with MAP_JIT pages readable and executable (R-X) by default.
- thread_local bool enabled = false;
- bool result = enabled;
- if (enabled != writeEnable)
+ thread_local int enabledCount = 0;
+ if (writeEnable)
{
- pthread_jit_write_protect_np(writeEnable ? 0 : 1);
- enabled = writeEnable;
+ if (enabledCount++ == 0)
+ {
+ pthread_jit_write_protect_np(0);
+ }
+ }
+ else
+ {
+ if (--enabledCount == 0)
+ {
+ pthread_jit_write_protect_np(1);
+ }
+ _ASSERTE(enabledCount >= 0);
}
- return result;
}
-#endif
+#endif // HOST_OSX && HOST_ARM64
#if HAVE_VM_ALLOCATE
//---------------------------------------------------------------------------------------
diff --git a/src/coreclr/pal/src/misc/jitsupport.cpp b/src/coreclr/pal/src/misc/jitsupport.cpp
index 8678a479f10dc3..973de4033e3d4b 100644
--- a/src/coreclr/pal/src/misc/jitsupport.cpp
+++ b/src/coreclr/pal/src/misc/jitsupport.cpp
@@ -82,7 +82,7 @@ static const CpuCapability CpuCapabilities[] = {
// If the capability name is not recognized or unused at present, zero is returned.
static unsigned long LookupCpuCapabilityFlag(const char* start, size_t length)
{
- for (int i = 0; i < _countof(CpuCapabilities); i++)
+ for (size_t i = 0; i < _countof(CpuCapabilities); i++)
{
const char* capabilityName = CpuCapabilities[i].name;
if ((length == strlen(capabilityName)) && (memcmp(start, capabilityName, length) == 0))
diff --git a/src/coreclr/pal/src/misc/perfjitdump.cpp b/src/coreclr/pal/src/misc/perfjitdump.cpp
index a1fe107421dcbf..d80bd58038cbad 100644
--- a/src/coreclr/pal/src/misc/perfjitdump.cpp
+++ b/src/coreclr/pal/src/misc/perfjitdump.cpp
@@ -46,6 +46,8 @@ namespace
ELF_MACHINE = EM_X86_64,
#elif defined(HOST_ARM64)
ELF_MACHINE = EM_AARCH64,
+#elif defined(HOST_S390X)
+ ELF_MACHINE = EM_S390,
#else
#error ELF_MACHINE unsupported for target
#endif
diff --git a/src/coreclr/pal/src/misc/sysinfo.cpp b/src/coreclr/pal/src/misc/sysinfo.cpp
index 1a9ca8fbfba72e..8f935b3e3ea1a8 100644
--- a/src/coreclr/pal/src/misc/sysinfo.cpp
+++ b/src/coreclr/pal/src/misc/sysinfo.cpp
@@ -558,7 +558,7 @@ PAL_GetLogicalProcessorCacheSizeFromOS()
cacheSize = std::max(cacheSize, (size_t)sysconf(_SC_LEVEL4_CACHE_SIZE));
#endif
-#if defined(TARGET_LINUX) && !defined(HOST_ARM)
+#if defined(TARGET_LINUX) && !defined(HOST_ARM) && !defined(HOST_X86)
if (cacheSize == 0)
{
//
diff --git a/src/coreclr/pal/src/thread/context.cpp b/src/coreclr/pal/src/thread/context.cpp
index 5bb3a6079aaa11..8d630fa170fb43 100644
--- a/src/coreclr/pal/src/thread/context.cpp
+++ b/src/coreclr/pal/src/thread/context.cpp
@@ -45,6 +45,8 @@ extern PGET_GCMARKER_EXCEPTION_CODE g_getGcMarkerExceptionCode;
#define CONTEXT_ALL_FLOATING CONTEXT_FLOATING_POINT
#elif defined(HOST_ARM64)
#define CONTEXT_ALL_FLOATING CONTEXT_FLOATING_POINT
+#elif defined(HOST_S390X)
+#define CONTEXT_ALL_FLOATING CONTEXT_FLOATING_POINT
#else
#error Unexpected architecture.
#endif
@@ -169,6 +171,30 @@ typedef int __ptrace_request;
ASSIGN_REG(X27) \
ASSIGN_REG(X28)
+#elif defined(HOST_S390X)
+#define ASSIGN_CONTROL_REGS \
+ ASSIGN_REG(PSWMask) \
+ ASSIGN_REG(PSWAddr) \
+ ASSIGN_REG(R15) \
+
+#define ASSIGN_INTEGER_REGS \
+ ASSIGN_REG(R0) \
+ ASSIGN_REG(R1) \
+ ASSIGN_REG(R2) \
+ ASSIGN_REG(R3) \
+ ASSIGN_REG(R4) \
+ ASSIGN_REG(R5) \
+ ASSIGN_REG(R5) \
+ ASSIGN_REG(R6) \
+ ASSIGN_REG(R7) \
+ ASSIGN_REG(R8) \
+ ASSIGN_REG(R9) \
+ ASSIGN_REG(R10) \
+ ASSIGN_REG(R11) \
+ ASSIGN_REG(R12) \
+ ASSIGN_REG(R13) \
+ ASSIGN_REG(R14)
+
#else
#error "Don't know how to assign registers on this architecture"
#endif
@@ -436,7 +462,7 @@ void CONTEXTToNativeContext(CONST CONTEXT *lpContext, native_context_t *native)
#undef ASSIGN_REG
#if !HAVE_FPREGS_WITH_CW
-#if HAVE_GREGSET_T || HAVE_GREGSET_T
+#if (HAVE_GREGSET_T || HAVE___GREGSET_T) && !defined(HOST_S390X)
#if HAVE_GREGSET_T
if (native->uc_mcontext.fpregs == nullptr)
#elif HAVE___GREGSET_T
@@ -448,7 +474,7 @@ void CONTEXTToNativeContext(CONST CONTEXT *lpContext, native_context_t *native)
// whether CONTEXT_FLOATING_POINT is set in the CONTEXT's flags.
return;
}
-#endif // HAVE_GREGSET_T || HAVE_GREGSET_T
+#endif // (HAVE_GREGSET_T || HAVE___GREGSET_T) && !HOST_S390X
#endif // !HAVE_FPREGS_WITH_CW
if ((lpContext->ContextFlags & CONTEXT_FLOATING_POINT) == CONTEXT_FLOATING_POINT)
@@ -509,6 +535,10 @@ void CONTEXTToNativeContext(CONST CONTEXT *lpContext, native_context_t *native)
fp->D[i] = lpContext->D[i];
}
}
+#elif defined(HOST_S390X)
+ fpregset_t *fp = &native->uc_mcontext.fpregs;
+ static_assert_no_msg(sizeof(fp->fprs) == sizeof(lpContext->Fpr));
+ memcpy(fp->fprs, lpContext->Fpr, sizeof(lpContext->Fpr));
#endif
}
@@ -562,7 +592,7 @@ void CONTEXTFromNativeContext(const native_context_t *native, LPCONTEXT lpContex
#undef ASSIGN_REG
#if !HAVE_FPREGS_WITH_CW
-#if HAVE_GREGSET_T || HAVE___GREGSET_T
+#if (HAVE_GREGSET_T || HAVE___GREGSET_T) && !defined(HOST_S390X)
#if HAVE_GREGSET_T
if (native->uc_mcontext.fpregs == nullptr)
#elif HAVE___GREGSET_T
@@ -584,7 +614,7 @@ void CONTEXTFromNativeContext(const native_context_t *native, LPCONTEXT lpContex
// Bail out regardless of whether the caller wanted CONTEXT_FLOATING_POINT or CONTEXT_XSTATE
return;
}
-#endif // HAVE_GREGSET_T || HAVE___GREGSET_T
+#endif // (HAVE_GREGSET_T || HAVE___GREGSET_T) && !HOST_S390X
#endif // !HAVE_FPREGS_WITH_CW
if ((contextFlags & CONTEXT_FLOATING_POINT) == CONTEXT_FLOATING_POINT)
@@ -650,6 +680,10 @@ void CONTEXTFromNativeContext(const native_context_t *native, LPCONTEXT lpContex
// Mark the context correctly
lpContext->ContextFlags &= ~(ULONG)CONTEXT_FLOATING_POINT;
}
+#elif defined(HOST_S390X)
+ const fpregset_t *fp = &native->uc_mcontext.fpregs;
+ static_assert_no_msg(sizeof(fp->fprs) == sizeof(lpContext->Fpr));
+ memcpy(lpContext->Fpr, fp->fprs, sizeof(lpContext->Fpr));
#endif
}
@@ -699,6 +733,8 @@ LPVOID GetNativeContextPC(const native_context_t *context)
return (LPVOID) MCREG_Pc(context->uc_mcontext);
#elif defined(HOST_ARM64)
return (LPVOID) MCREG_Pc(context->uc_mcontext);
+#elif defined(HOST_S390X)
+ return (LPVOID) MCREG_PSWAddr(context->uc_mcontext);
#else
# error implement me for this architecture
#endif
@@ -727,6 +763,8 @@ LPVOID GetNativeContextSP(const native_context_t *context)
return (LPVOID) MCREG_Sp(context->uc_mcontext);
#elif defined(HOST_ARM64)
return (LPVOID) MCREG_Sp(context->uc_mcontext);
+#elif defined(HOST_S390X)
+ return (LPVOID) MCREG_R15(context->uc_mcontext);
#else
# error implement me for this architecture
#endif
diff --git a/src/coreclr/scripts/jitrollingbuild.py b/src/coreclr/scripts/jitrollingbuild.py
index 83434438dac10a..c5bf6d065b590e 100644
--- a/src/coreclr/scripts/jitrollingbuild.py
+++ b/src/coreclr/scripts/jitrollingbuild.py
@@ -172,11 +172,12 @@ def determine_jit_name(coreclr_args):
raise RuntimeError("Unknown OS.")
-def list_az_jits(filter_func=lambda unused: True):
+def list_az_jits(filter_func=lambda unused: True, prefix_string = None):
""" List the JITs in Azure Storage using REST api
Args:
filter_func (lambda: string -> bool): filter to apply to the list. The filter takes a URL and returns True if this URL is acceptable.
+ prefix_string: Optional. Specifies a string prefix for the Azure Storage query.
Returns:
urls (list): set of URLs in Azure Storage that match the filter.
@@ -187,46 +188,72 @@ def list_az_jits(filter_func=lambda unused: True):
# This URI will return *all* the blobs, for all git_hash/OS/architecture/build_type combinations.
# pass "prefix=foo/bar/..." to only show a subset. Or, we can filter later using string search.
- list_az_container_uri = az_blob_storage_container_uri + "?restype=container&comp=list&prefix=" + az_builds_root_folder + "/"
-
- try:
- contents = urllib.request.urlopen(list_az_container_uri).read().decode('utf-8')
- except Exception as exception:
- print("Didn't find any collections using {}".format(list_az_container_uri))
- print(" Error: {}".format(exception))
- return None
-
- # Contents is an XML file with contents like:
- #
- # builds/
- #
- #
- # builds/755f01659f03196487ec41225de8956911f8049b/Linux/x64/Checked/libclrjit.so
- # https://clrjit2.blob.core.windows.net/jitrollingbuild/builds/755f01659f03196487ec41225de8956911f8049b/Linux/x64/Checked/libclrjit.so
- #
- # ...
- #
- #
- #
- # builds/755f01659f03196487ec41225de8956911f8049b/OSX/x64/Checked/libclrjit.dylib
- # https://clrjit2.blob.core.windows.net/jitrollingbuild/builds/755f01659f03196487ec41225de8956911f8049b/OSX/x64/Checked/libclrjit.dylib
- #
- # ...
- #
- #
- # ... etc. ...
- #
- #
#
- # We just want to extract the entries. We could probably use an XML parsing package, but we just
- # use regular expressions.
+ # Note that there is a maximum number of results returned in one query of 5000. So we might need to
+ # iterate. In that case, the XML result contains a `` element like:
+ #
+ # 2!184!MDAwMDkyIWJ1aWxkcy8wMTZlYzI5OTAzMzkwMmY2ZTY4Yzg0YWMwYTNlYzkxN2Y5MzA0OTQ2L0xpbnV4L3g2NC9DaGVja2VkL2xpYmNscmppdF93aW5fYXJtNjRfeDY0LnNvITAwMDAyOCE5OTk5LTEyLTMxVDIzOjU5OjU5Ljk5OTk5OTlaIQ--
+ #
+ # which we need to pass to the REST API with `marker=...`.
- urls_split = contents.split("")[1:]
urls = []
- for item in urls_split:
- url = item.split("")[0].strip()
- if filter_func(url):
- urls.append(url)
+
+ list_az_container_uri_root = az_blob_storage_container_uri + "?restype=container&comp=list&prefix=" + az_builds_root_folder + "/"
+ if prefix_string:
+ list_az_container_uri_root += prefix_string
+
+ iter = 1
+ marker = ""
+
+ while True:
+ list_az_container_uri = list_az_container_uri_root + marker
+
+ try:
+ contents = urllib.request.urlopen(list_az_container_uri).read().decode('utf-8')
+ except Exception as exception:
+ print("Didn't find any collections using {}".format(list_az_container_uri))
+ print(" Error: {}".format(exception))
+ return None
+
+ # Contents is an XML file with contents like:
+ #
+ # builds/
+ #
+ #
+ # builds/755f01659f03196487ec41225de8956911f8049b/Linux/x64/Checked/libclrjit.so
+ # https://clrjit2.blob.core.windows.net/jitrollingbuild/builds/755f01659f03196487ec41225de8956911f8049b/Linux/x64/Checked/libclrjit.so
+ #
+ # ...
+ #
+ #
+ #
+ # builds/755f01659f03196487ec41225de8956911f8049b/OSX/x64/Checked/libclrjit.dylib
+ # https://clrjit2.blob.core.windows.net/jitrollingbuild/builds/755f01659f03196487ec41225de8956911f8049b/OSX/x64/Checked/libclrjit.dylib
+ #
+ # ...
+ #
+ #
+ # ... etc. ...
+ #
+ #
+ #
+ # We just want to extract the entries. We could probably use an XML parsing package, but we just
+ # use regular expressions.
+
+ urls_split = contents.split("")[1:]
+ for item in urls_split:
+ url = item.split("")[0].strip()
+ if filter_func(url):
+ urls.append(url)
+
+ # Look for a continuation marker.
+ re_match = re.match(r'.*(.*).*', contents)
+ if re_match:
+ marker_text = re_match.group(1)
+ marker = "&marker=" + marker_text
+ iter += 1
+ else:
+ break
return urls
@@ -449,7 +476,7 @@ def filter_jits(url):
url = url.lower()
return find_all or url.startswith(blob_prefix_filter)
- return list_az_jits(filter_jits)
+ return list_az_jits(filter_jits, None if find_all else blob_filter_string)
def download_command(coreclr_args):
diff --git a/src/coreclr/scripts/superpmi.proj b/src/coreclr/scripts/superpmi.proj
index 05ad302eba2de3..963e117688f572 100644
--- a/src/coreclr/scripts/superpmi.proj
+++ b/src/coreclr/scripts/superpmi.proj
@@ -85,6 +85,10 @@
5:00
+
+
+
+
@(HelixPreCommand)
+ @(HelixPostCommand)
- -->
diff --git a/src/coreclr/scripts/superpmi.py b/src/coreclr/scripts/superpmi.py
index 37f069d959cf8f..ce4332c314547e 100755
--- a/src/coreclr/scripts/superpmi.py
+++ b/src/coreclr/scripts/superpmi.py
@@ -262,9 +262,8 @@
collect_parser.add_argument("collection_args", nargs='?', help="Arguments to pass to the SuperPMI collect command. This is a single string; quote it if necessary if the arguments contain spaces.")
collect_parser.add_argument("--pmi", action="store_true", help="Run PMI on a set of directories or assemblies.")
-collect_parser.add_argument("--crossgen", action="store_true", help="Run crossgen on a set of directories or assemblies.")
collect_parser.add_argument("--crossgen2", action="store_true", help="Run crossgen2 on a set of directories or assemblies.")
-collect_parser.add_argument("-assemblies", dest="assemblies", nargs="+", default=[], help="A list of managed dlls or directories to recursively use while collecting with PMI, crossgen, or crossgen2. Required if --pmi, --crossgen, or --crossgen2 is specified.")
+collect_parser.add_argument("-assemblies", dest="assemblies", nargs="+", default=[], help="A list of managed dlls or directories to recursively use while collecting with PMI or crossgen2. Required if --pmi or --crossgen2 is specified.")
collect_parser.add_argument("-exclude", dest="exclude", nargs="+", default=[], help="A list of files or directories to exclude from the files and directories specified by `-assemblies`.")
collect_parser.add_argument("-pmi_location", help="Path to pmi.dll to use during PMI run. Optional; pmi.dll will be downloaded from Azure Storage if necessary.")
collect_parser.add_argument("-output_mch_path", help="Location to place the final MCH file.")
@@ -631,6 +630,39 @@ def create_unique_directory_name(root_directory, base_name):
return full_path
+def create_unique_file_name(directory, base_name, extension):
+ """ Create a unique file name in the specified directory by joining `base_name` and `extension`.
+ If this name already exists, append ".1", ".2", ".3", etc., to the `base_name`
+ name component until the full file name is not found.
+
+ Args:
+ directory (str) : directory in which a new file will be created
+ base_name (str) : the base name of the new filename to be added
+ extension (str) : the filename extension of the new filename to be added
+
+ Returns:
+ (str) The full absolute path of the new filename.
+ """
+
+ directory = os.path.abspath(directory)
+ if not os.path.isdir(directory):
+ try:
+ os.makedirs(directory)
+ except Exception as exception:
+ logging.critical(exception)
+ raise exception
+
+ full_path = os.path.join(directory, base_name + "." + extension)
+
+ count = 1
+ while os.path.isfile(full_path):
+ new_full_path = os.path.join(directory, base_name + "." + str(count) + "." + extension)
+ count += 1
+ full_path = new_full_path
+
+ return full_path
+
+
def get_files_from_path(path, match_func=lambda path: True):
""" Return all files in a directory tree matching a criteria.
@@ -906,15 +938,12 @@ def __init__(self, coreclr_args):
if coreclr_args.host_os == "OSX":
self.collection_shim_name = "libsuperpmi-shim-collector.dylib"
self.corerun_tool_name = "corerun"
- self.crossgen_tool_name = "crossgen"
elif coreclr_args.host_os == "Linux":
self.collection_shim_name = "libsuperpmi-shim-collector.so"
self.corerun_tool_name = "corerun"
- self.crossgen_tool_name = "crossgen"
elif coreclr_args.host_os == "windows":
self.collection_shim_name = "superpmi-shim-collector.dll"
self.corerun_tool_name = "corerun.exe"
- self.crossgen_tool_name = "crossgen.exe"
else:
raise RuntimeError("Unsupported OS.")
@@ -931,9 +960,6 @@ def __init__(self, coreclr_args):
self.pmi_location = determine_pmi_location(coreclr_args)
self.corerun = os.path.join(self.core_root, self.corerun_tool_name)
- if coreclr_args.crossgen:
- self.crossgen_tool = os.path.join(self.core_root, self.crossgen_tool_name)
-
if coreclr_args.crossgen2:
self.corerun = os.path.join(self.core_root, self.corerun_tool_name)
if coreclr_args.dotnet_tool_path is None:
@@ -942,7 +968,7 @@ def __init__(self, coreclr_args):
self.crossgen2_driver_tool = coreclr_args.dotnet_tool_path
logging.debug("Using crossgen2 driver tool %s", self.crossgen2_driver_tool)
- if coreclr_args.pmi or coreclr_args.crossgen or coreclr_args.crossgen2:
+ if coreclr_args.pmi or coreclr_args.crossgen2:
self.assemblies = coreclr_args.assemblies
self.exclude = coreclr_args.exclude
@@ -1081,7 +1107,7 @@ def set_and_report_env(env, root_env, complus_env = None):
# If we need them, collect all the assemblies we're going to use for the collection(s).
# Remove the files matching the `-exclude` arguments (case-insensitive) from the list.
- if self.coreclr_args.pmi or self.coreclr_args.crossgen or self.coreclr_args.crossgen2:
+ if self.coreclr_args.pmi or self.coreclr_args.crossgen2:
assemblies = []
for item in self.assemblies:
assemblies += get_files_from_path(item, match_func=lambda file: any(file.endswith(extension) for extension in [".dll", ".exe"]) and (self.exclude is None or not any(e.lower() in file.lower() for e in self.exclude)))
@@ -1180,84 +1206,6 @@ async def run_pmi(print_prefix, assembly, self):
os.environ.update(old_env)
################################################################################################ end of "self.coreclr_args.pmi is True"
- ################################################################################################ Do collection using crossgen
- if self.coreclr_args.crossgen is True:
- logging.debug("Starting collection using crossgen")
-
- async def run_crossgen(print_prefix, assembly, self):
- """ Run crossgen over all dlls
- """
-
- root_crossgen_output_filename = make_safe_filename("crossgen_" + assembly) + ".out.dll"
- crossgen_output_assembly_filename = os.path.join(self.temp_location, root_crossgen_output_filename)
- try:
- if os.path.exists(crossgen_output_assembly_filename):
- os.remove(crossgen_output_assembly_filename)
- except OSError as ose:
- if "[WinError 32] The process cannot access the file because it is being used by another " \
- "process:" in format(ose):
- logging.warning("Skipping file %s. Got error: %s", crossgen_output_assembly_filename, ose)
- return
- else:
- raise ose
-
- command = [self.crossgen_tool, "/Platform_Assemblies_Paths", self.core_root, "/in", assembly, "/out", crossgen_output_assembly_filename]
- command_string = " ".join(command)
- logging.debug("%s%s", print_prefix, command_string)
-
- # Save the stdout and stderr to files, so we can see if crossgen wrote any interesting messages.
- # Use the name of the assembly as the basename of the file. mkstemp() will ensure the file
- # is unique.
- root_output_filename = make_safe_filename("crossgen_" + assembly + "_")
- try:
- stdout_file_handle, stdout_filepath = tempfile.mkstemp(suffix=".stdout", prefix=root_output_filename, dir=self.temp_location)
- stderr_file_handle, stderr_filepath = tempfile.mkstemp(suffix=".stderr", prefix=root_output_filename, dir=self.temp_location)
-
- proc = await asyncio.create_subprocess_shell(
- command_string,
- stdout=stdout_file_handle,
- stderr=stderr_file_handle)
-
- await proc.communicate()
-
- os.close(stdout_file_handle)
- os.close(stderr_file_handle)
-
- # No need to keep zero-length files
- if is_zero_length_file(stdout_filepath):
- os.remove(stdout_filepath)
- if is_zero_length_file(stderr_filepath):
- os.remove(stderr_filepath)
-
- return_code = proc.returncode
- if return_code != 0:
- logging.debug("'%s': Error return code: %s", command_string, return_code)
- write_file_to_log(stdout_filepath, log_level=logging.DEBUG)
-
- write_file_to_log(stderr_filepath, log_level=logging.DEBUG)
- except OSError as ose:
- if "[WinError 32] The process cannot access the file because it is being used by another " \
- "process:" in format(ose):
- logging.warning("Skipping file %s. Got error: %s", root_output_filename, ose)
- else:
- raise ose
-
- # Set environment variables.
- crossgen_command_env = env_copy.copy()
- crossgen_complus_env = complus_env.copy()
- crossgen_complus_env["JitName"] = self.collection_shim_name
- set_and_report_env(crossgen_command_env, root_env, crossgen_complus_env)
-
- old_env = os.environ.copy()
- os.environ.update(crossgen_command_env)
-
- helper = AsyncSubprocessHelper(assemblies, verbose=True)
- helper.run_to_completion(run_crossgen, self)
-
- os.environ.clear()
- os.environ.update(old_env)
- ################################################################################################ end of "self.coreclr_args.crossgen is True"
-
################################################################################################ Do collection using crossgen2
if self.coreclr_args.crossgen2 is True:
logging.debug("Starting collection using crossgen2")
@@ -1602,14 +1550,6 @@ def replay(self):
result = True # Assume success
- # Possible return codes from SuperPMI
- #
- # 0 : success
- # -1 : general fatal error (e.g., failed to initialize, failed to read files)
- # -2 : JIT failed to initialize
- # 1 : there were compilation failures
- # 2 : there were assembly diffs
-
with TempDir() as temp_location:
logging.debug("")
logging.debug("Temp Location: %s", temp_location)
@@ -1681,8 +1621,12 @@ def replay(self):
if return_code == 0:
logging.info("Clean SuperPMI replay")
else:
- files_with_replay_failures.append(mch_file)
result = False
+ # Don't report as replay failure missing data (return code 3).
+ # Anything else, such as compilation failure (return code 1, typically a JIT assert) will be
+ # reported as a replay failure.
+ if return_code != 3:
+ files_with_replay_failures.append(mch_file)
if is_nonzero_length_file(fail_mcl_file):
# Unclean replay. Examine the contents of the fail.mcl file to dig into failures.
@@ -1754,14 +1698,6 @@ def replay_with_asm_diffs(self):
result = True # Assume success
- # Possible return codes from SuperPMI
- #
- # 0 : success
- # -1 : general fatal error (e.g., failed to initialize, failed to read files)
- # -2 : JIT failed to initialize
- # 1 : there were compilation failures
- # 2 : there were assembly diffs
-
# Set up some settings we'll use below.
asm_complus_vars = {
@@ -1829,6 +1765,9 @@ def replay_with_asm_diffs(self):
files_with_asm_diffs = []
files_with_replay_failures = []
+ # List of all Markdown summary files
+ all_md_summary_files = []
+
with TempDir(self.coreclr_args.temp_dir, self.coreclr_args.skip_cleanup) as temp_location:
logging.debug("")
logging.debug("Temp Location: %s", temp_location)
@@ -1889,8 +1828,12 @@ def replay_with_asm_diffs(self):
if return_code == 0:
logging.info("Clean SuperPMI replay")
else:
- files_with_replay_failures.append(mch_file)
result = False
+ # Don't report as replay failure asm diffs (return code 2) or missing data (return code 3).
+ # Anything else, such as compilation failure (return code 1, typically a JIT assert) will be
+ # reported as a replay failure.
+ if return_code != 2 and return_code != 3:
+ files_with_replay_failures.append(mch_file)
artifacts_base_name = create_artifacts_base_name(self.coreclr_args, mch_file)
@@ -2023,7 +1966,10 @@ async def create_one_artifact(jit_path: str, location: str, flags) -> str:
jit_analyze_path = find_file(jit_analyze_file, path_var.split(os.pathsep))
if jit_analyze_path is not None:
# It appears we have a built jit-analyze on the path, so try to run it.
- command = [ jit_analyze_path, "-r", "--base", base_asm_location, "--diff", diff_asm_location ]
+ md_summary_file = os.path.join(asm_root_dir, "summary.md")
+ summary_file_info = ( mch_file, md_summary_file )
+ all_md_summary_files.append(summary_file_info)
+ command = [ jit_analyze_path, "--md", md_summary_file, "-r", "--base", base_asm_location, "--diff", diff_asm_location ]
run_and_log(command, logging.INFO)
ran_jit_analyze = True
@@ -2056,8 +2002,32 @@ async def create_one_artifact(jit_path: str, location: str, flags) -> str:
################################################################################################ end of for mch_file in self.mch_files
+ # Report the overall results summary of the asmdiffs run
+
logging.info("Asm diffs summary:")
+ # Construct an overall Markdown summary file.
+
+ if len(all_md_summary_files) > 0:
+ overall_md_summary_file = create_unique_file_name(self.coreclr_args.spmi_location, "diff_summary", "md")
+ if not os.path.isdir(self.coreclr_args.spmi_location):
+ os.makedirs(self.coreclr_args.spmi_location)
+ if os.path.isfile(overall_md_summary_file):
+ os.remove(overall_md_summary_file)
+
+ with open(overall_md_summary_file, "w") as write_fh:
+ for summary_file_info in all_md_summary_files:
+ summary_mch = summary_file_info[0]
+ summary_mch_filename = os.path.basename(summary_mch) # Display just the MCH filename, not the full path
+ summary_file = summary_file_info[1]
+ with open(summary_file, "r") as read_fh:
+ write_fh.write("## " + summary_mch_filename + ":\n\n")
+ shutil.copyfileobj(read_fh, write_fh)
+
+ logging.info(" Summary Markdown file: %s", overall_md_summary_file)
+
+ # Report the set of MCH files with asm diffs and replay failures.
+
if len(files_with_replay_failures) != 0:
logging.info(" Replay failures in %s MCH files:", len(files_with_replay_failures))
for file in files_with_replay_failures:
@@ -2407,45 +2377,69 @@ def list_superpmi_collections_container_via_rest_api(path_filter=lambda unused:
# This URI will return *all* the blobs, for all jit-ee-version/OS/architecture combinations.
# pass "prefix=foo/bar/..." to only show a subset. Or, we can filter later using string search.
- list_superpmi_container_uri = az_blob_storage_superpmi_container_uri + "?restype=container&comp=list&prefix=" + az_collections_root_folder + "/"
-
- try:
- contents = urllib.request.urlopen(list_superpmi_container_uri).read().decode('utf-8')
- except Exception as exception:
- logging.error("Didn't find any collections using %s", list_superpmi_container_uri)
- logging.error(" Error: %s", exception)
- return None
-
- # Contents is an XML file with contents like:
#
- #
- #
- #
- # jit-ee-guid/Linux/x64/Linux.x64.Checked.frameworks.mch.zip
- # https://clrjit.blob.core.windows.net/superpmi/collections/jit-ee-guid/Linux/x64/Linux.x64.Checked.frameworks.mch.zip
- #
- # ...
- #
- #
- #
- # jit-ee-guid/Linux/x64/Linux.x64.Checked.mch.zip
- # https://clrjit.blob.core.windows.net/superpmi/collections/jit-ee-guid/Linux/x64/Linux.x64.Checked.mch.zip
- # ... etc. ...
- #
- #
+ # Note that there is a maximum number of results returned in one query of 5000. So we might need to
+ # iterate. In that case, the XML result contains a `` element like:
#
- # We just want to extract the entries. We could probably use an XML parsing package, but we just
- # use regular expressions.
-
- url_prefix = az_blob_storage_superpmi_container_uri + "/" + az_collections_root_folder + "/"
+ # 2!184!MDAwMDkyIWJ1aWxkcy8wMTZlYzI5OTAzMzkwMmY2ZTY4Yzg0YWMwYTNlYzkxN2Y5MzA0OTQ2L0xpbnV4L3g2NC9DaGVja2VkL2xpYmNscmppdF93aW5fYXJtNjRfeDY0LnNvITAwMDAyOCE5OTk5LTEyLTMxVDIzOjU5OjU5Ljk5OTk5OTlaIQ--
+ #
+ # which we need to pass to the REST API with `marker=...`.
- urls_split = contents.split("")[1:]
paths = []
- for item in urls_split:
- url = item.split("")[0].strip()
- path = remove_prefix(url, url_prefix)
- if path_filter(path):
- paths.append(path)
+
+ list_superpmi_container_uri_base = az_blob_storage_superpmi_container_uri + "?restype=container&comp=list&prefix=" + az_collections_root_folder + "/"
+
+ iter = 1
+ marker = ""
+
+ while True:
+ list_superpmi_container_uri = list_superpmi_container_uri_base + marker
+
+ try:
+ contents = urllib.request.urlopen(list_superpmi_container_uri).read().decode('utf-8')
+ except Exception as exception:
+ logging.error("Didn't find any collections using %s", list_superpmi_container_uri)
+ logging.error(" Error: %s", exception)
+ return None
+
+ # Contents is an XML file with contents like:
+ #
+ #
+ #
+ #
+ # jit-ee-guid/Linux/x64/Linux.x64.Checked.frameworks.mch.zip
+ # https://clrjit.blob.core.windows.net/superpmi/collections/jit-ee-guid/Linux/x64/Linux.x64.Checked.frameworks.mch.zip
+ #
+ # ...
+ #
+ #
+ #
+ # jit-ee-guid/Linux/x64/Linux.x64.Checked.mch.zip
+ # https://clrjit.blob.core.windows.net/superpmi/collections/jit-ee-guid/Linux/x64/Linux.x64.Checked.mch.zip
+ # ... etc. ...
+ #
+ #
+ #
+ # We just want to extract the entries. We could probably use an XML parsing package, but we just
+ # use regular expressions.
+
+ url_prefix = az_blob_storage_superpmi_container_uri + "/" + az_collections_root_folder + "/"
+
+ urls_split = contents.split("")[1:]
+ for item in urls_split:
+ url = item.split("")[0].strip()
+ path = remove_prefix(url, url_prefix)
+ if path_filter(path):
+ paths.append(path)
+
+ # Look for a continuation marker.
+ re_match = re.match(r'.*(.*).*', contents)
+ if re_match:
+ marker_text = re_match.group(1)
+ marker = "&marker=" + marker_text
+ iter += 1
+ else:
+ break
return paths
@@ -3234,7 +3228,7 @@ def setup_spmi_location_arg(spmi_location):
log_file = None
if coreclr_args.log_file is None:
if hasattr(coreclr_args, "spmi_location"):
- log_file = os.path.join(coreclr_args.spmi_location, "superpmi.log")
+ log_file = create_unique_file_name(coreclr_args.spmi_location, "superpmi", "log")
if not os.path.isdir(coreclr_args.spmi_location):
os.makedirs(coreclr_args.spmi_location)
else:
@@ -3411,11 +3405,6 @@ def verify_replay_common_args():
lambda unused: True,
"Unable to set pmi")
- coreclr_args.verify(args,
- "crossgen",
- lambda unused: True,
- "Unable to set crossgen")
-
coreclr_args.verify(args,
"crossgen2",
lambda unused: True,
@@ -3488,8 +3477,8 @@ def verify_replay_common_args():
lambda unused: True,
"Unable to set tiered_compilation")
- if (args.collection_command is None) and (args.pmi is False) and (args.crossgen is False) and (args.crossgen2 is False):
- print("Either a collection command or `--pmi` or `--crossgen` or `--crossgen2` must be specified")
+ if (args.collection_command is None) and (args.pmi is False) and (args.crossgen2 is False):
+ print("Either a collection command or `--pmi` or `--crossgen2` must be specified")
sys.exit(1)
if (args.collection_command is not None) and (len(args.assemblies) > 0):
@@ -3500,13 +3489,13 @@ def verify_replay_common_args():
print("Don't specify `-exclude` if a collection command is given")
sys.exit(1)
- if ((args.pmi is True) or (args.crossgen is True) or (args.crossgen2 is True)) and (len(args.assemblies) == 0):
- print("Specify `-assemblies` if `--pmi` or `--crossgen` or `--crossgen2` is given")
+ if ((args.pmi is True) or (args.crossgen2 is True)) and (len(args.assemblies) == 0):
+ print("Specify `-assemblies` if `--pmi` or `--crossgen2` is given")
sys.exit(1)
if args.collection_command is None and args.merge_mch_files is not True:
assert args.collection_args is None
- assert (args.pmi is True) or (args.crossgen is True) or (args.crossgen2 is True)
+ assert (args.pmi is True) or (args.crossgen2 is True)
assert len(args.assemblies) > 0
if coreclr_args.merge_mch_files:
diff --git a/src/coreclr/scripts/superpmi_setup.py b/src/coreclr/scripts/superpmi_setup.py
index e4b9b638d8c3f8..7fd7d62b256009 100644
--- a/src/coreclr/scripts/superpmi_setup.py
+++ b/src/coreclr/scripts/superpmi_setup.py
@@ -28,7 +28,7 @@
# | x86 | Windows.10.Amd64.X86 | |
# | x64 | Windows.10.Amd64.X86 | Ubuntu.1804.Amd64 |
# | arm | - | (Ubuntu.1804.Arm32)Ubuntu.1804.Armarch@mcr.microsoft.com/dotnet-buildtools/prereqs:ubuntu-18.04-helix-arm32v7-bfcd90a-20200121150440 |
-# | arm64 | Windows.10.Arm64 | (Ubuntu.1804.Arm64)Ubuntu.1804.ArmArch@mcr.microsoft.com/dotnet-buildtools/prereqs:ubuntu-18.04-helix-arm64v8-a45aeeb-20190620155855 |
+# | arm64 | Windows.10.Arm64 | (Ubuntu.1804.Arm64)Ubuntu.1804.ArmArch@mcr.microsoft.com/dotnet-buildtools/prereqs:ubuntu-18.04-helix-arm64v8-20210531091519-97d8652 |
################################################################################
################################################################################
@@ -59,6 +59,46 @@
is_windows = platform.system() == "Windows"
native_binaries_to_ignore = [
+ "api-ms-win-core-console-l1-1-0.dll",
+ "api-ms-win-core-datetime-l1-1-0.dll",
+ "api-ms-win-core-debug-l1-1-0.dll",
+ "api-ms-win-core-errorhandling-l1-1-0.dll",
+ "api-ms-win-core-file-l1-1-0.dll",
+ "api-ms-win-core-file-l1-2-0.dll",
+ "api-ms-win-core-file-l2-1-0.dll",
+ "api-ms-win-core-handle-l1-1-0.dll",
+ "api-ms-win-core-heap-l1-1-0.dll",
+ "api-ms-win-core-interlocked-l1-1-0.dll",
+ "api-ms-win-core-libraryloader-l1-1-0.dll",
+ "api-ms-win-core-localization-l1-2-0.dll",
+ "api-ms-win-core-memory-l1-1-0.dll",
+ "api-ms-win-core-namedpipe-l1-1-0.dll",
+ "api-ms-win-core-processenvironment-l1-1-0.dll",
+ "api-ms-win-core-processthreads-l1-1-0.dll",
+ "api-ms-win-core-processthreads-l1-1-1.dll",
+ "api-ms-win-core-profile-l1-1-0.dll",
+ "api-ms-win-core-rtlsupport-l1-1-0.dll",
+ "api-ms-win-core-string-l1-1-0.dll",
+ "api-ms-win-core-synch-l1-1-0.dll",
+ "api-ms-win-core-synch-l1-2-0.dll",
+ "api-ms-win-core-sysinfo-l1-1-0.dll",
+ "api-ms-win-core-timezone-l1-1-0.dll",
+ "api-ms-win-core-util-l1-1-0.dll",
+ "api-ms-win-crt-conio-l1-1-0.dll",
+ "api-ms-win-crt-convert-l1-1-0.dll",
+ "api-ms-win-crt-environment-l1-1-0.dll",
+ "api-ms-win-crt-filesystem-l1-1-0.dll",
+ "api-ms-win-crt-heap-l1-1-0.dll",
+ "api-ms-win-crt-locale-l1-1-0.dll",
+ "api-ms-win-crt-math-l1-1-0.dll",
+ "api-ms-win-crt-multibyte-l1-1-0.dll",
+ "api-ms-win-crt-private-l1-1-0.dll",
+ "api-ms-win-crt-process-l1-1-0.dll",
+ "api-ms-win-crt-runtime-l1-1-0.dll",
+ "api-ms-win-crt-stdio-l1-1-0.dll",
+ "api-ms-win-crt-string-l1-1-0.dll",
+ "api-ms-win-crt-time-l1-1-0.dll",
+ "api-ms-win-crt-utility-l1-1-0.dll",
"clretwrc.dll",
"clrgc.dll",
"clrjit.dll",
@@ -68,6 +108,9 @@
"clrjit_unix_arm_x86.dll",
"clrjit_unix_arm64_arm64.dll",
"clrjit_unix_arm64_x64.dll",
+ "clrjit_unix_armel_arm.dll",
+ "clrjit_unix_armel_arm64.dll",
+ "clrjit_unix_armel_x64.dll",
"clrjit_unix_armel_x86.dll",
"clrjit_unix_osx_arm64_arm64.dll",
"clrjit_unix_osx_arm64_x64.dll",
@@ -92,6 +135,7 @@
"CoreShim.dll",
"createdump.exe",
"crossgen.exe",
+ "crossgen2.exe",
"dbgshim.dll",
"ilasm.exe",
"ildasm.exe",
@@ -108,12 +152,15 @@
"mscordbi.dll",
"mscorrc.dll",
"msdia140.dll",
+ "R2RDump.exe",
+ "R2RTest.exe",
"superpmi.exe",
"superpmi-shim-collector.dll",
"superpmi-shim-counter.dll",
"superpmi-shim-simple.dll",
"System.IO.Compression.Native.dll",
"ucrtbase.dll",
+ "xunit.console.exe",
]
MAX_FILES_COUNT = 1500
@@ -202,7 +249,9 @@ def sorter_by_size(pair):
# Credit: https://stackoverflow.com/a/19859907
dirs[:] = [d for d in dirs if d not in exclude_directories]
for name in files:
- if name in exclude_files:
+ # Make the exclude check case-insensitive
+ exclude_files_lower = [filename.lower() for filename in exclude_files]
+ if name.lower() in exclude_files_lower:
continue
curr_file_path = path.join(file_path, name)
@@ -443,7 +492,7 @@ def main(main_args):
if arch == "arm":
helix_queue = "(Ubuntu.1804.Arm32)Ubuntu.1804.Armarch@mcr.microsoft.com/dotnet-buildtools/prereqs:ubuntu-18.04-helix-arm32v7-bfcd90a-20200121150440"
elif arch == "arm64":
- helix_queue = "(Ubuntu.1804.Arm64)Ubuntu.1804.ArmArch@mcr.microsoft.com/dotnet-buildtools/prereqs:ubuntu-18.04-helix-arm64v8-a45aeeb-20190620155855"
+ helix_queue = "(Ubuntu.1804.Arm64)Ubuntu.1804.ArmArch@mcr.microsoft.com/dotnet-buildtools/prereqs:ubuntu-18.04-helix-arm64v8-20210531091519-97d8652"
else:
helix_queue = "Ubuntu.1804.Amd64"
diff --git a/src/coreclr/tools/CMakeLists.txt b/src/coreclr/tools/CMakeLists.txt
index e2e3083136be4f..8bc7696b332ddc 100644
--- a/src/coreclr/tools/CMakeLists.txt
+++ b/src/coreclr/tools/CMakeLists.txt
@@ -1,4 +1,3 @@
-add_subdirectory(crossgen)
if (CLR_CMAKE_TARGET_WIN32 AND NOT CLR_CMAKE_CROSS_ARCH)
add_subdirectory(GenClrDebugResource)
add_subdirectory(InjectResource)
diff --git a/src/coreclr/tools/Common/Compiler/DevirtualizationManager.cs b/src/coreclr/tools/Common/Compiler/DevirtualizationManager.cs
index 6b2ff1b95cfaae..aba41341a27095 100644
--- a/src/coreclr/tools/Common/Compiler/DevirtualizationManager.cs
+++ b/src/coreclr/tools/Common/Compiler/DevirtualizationManager.cs
@@ -1,8 +1,9 @@
// Licensed to the .NET Foundation under one or more agreements.
// The .NET Foundation licenses this file to you under the MIT license.
+using System;
using Internal.TypeSystem;
-
+using CORINFO_DEVIRTUALIZATION_DETAIL = Internal.JitInterface.CORINFO_DEVIRTUALIZATION_DETAIL;
using Debug = System.Diagnostics.Debug;
namespace ILCompiler
@@ -53,17 +54,19 @@ public virtual bool IsEffectivelySealed(MethodDesc method)
/// Note that if is a value type, the result of the resolution
/// might have to be treated as an unboxing thunk by the caller.
///
- public MethodDesc ResolveVirtualMethod(MethodDesc declMethod, TypeDesc implType)
+ public MethodDesc ResolveVirtualMethod(MethodDesc declMethod, TypeDesc implType, out CORINFO_DEVIRTUALIZATION_DETAIL devirtualizationDetail)
{
Debug.Assert(declMethod.IsVirtual);
// We're operating on virtual methods. This means that if implType is an array, we need
// to get the type that has all the virtual methods provided by the class library.
- return ResolveVirtualMethod(declMethod, implType.GetClosestDefType());
+ return ResolveVirtualMethod(declMethod, implType.GetClosestDefType(), out devirtualizationDetail);
}
- protected virtual MethodDesc ResolveVirtualMethod(MethodDesc declMethod, DefType implType)
+ protected virtual MethodDesc ResolveVirtualMethod(MethodDesc declMethod, DefType implType, out CORINFO_DEVIRTUALIZATION_DETAIL devirtualizationDetail)
{
+ devirtualizationDetail = CORINFO_DEVIRTUALIZATION_DETAIL.CORINFO_DEVIRTUALIZATION_UNKNOWN;
+
MethodDesc impl;
if (declMethod.OwningType.IsInterface)
@@ -84,20 +87,97 @@ protected virtual MethodDesc ResolveVirtualMethod(MethodDesc declMethod, DefType
{
// We cannot resolve the interface as we don't know with exact enough detail which interface
// of multiple possible interfaces is being called.
+ devirtualizationDetail = CORINFO_DEVIRTUALIZATION_DETAIL.CORINFO_DEVIRTUALIZATION_MULTIPLE_IMPL;
return null;
}
}
}
}
- impl = implType.ResolveInterfaceMethodTarget(declMethod);
+ if (!implType.CanCastTo(declMethod.OwningType))
+ {
+ devirtualizationDetail = CORINFO_DEVIRTUALIZATION_DETAIL.CORINFO_DEVIRTUALIZATION_FAILED_CAST;
+ return null;
+ }
+
+ impl = implType.ResolveInterfaceMethodTargetWithVariance(declMethod);
if (impl != null)
{
impl = implType.FindVirtualFunctionTargetMethodOnObjectType(impl);
}
+ else
+ {
+ MethodDesc dimMethod = null;
+ // This isn't the correct lookup algorithm for variant default interface methods
+ // but as we will drop any results we find in any case, it doesn't matter much.
+ // Non-variant dispatch can simply use ResolveInterfaceMethodToDefaultImplementationOnType
+ // but that implemenation currently cannot handle variance.
+
+ MethodDesc defaultInterfaceDispatchDeclMethod = null;
+ foreach (TypeDesc iface in implType.RuntimeInterfaces)
+ {
+ if (iface == declMethod.OwningType)
+ {
+ defaultInterfaceDispatchDeclMethod = declMethod;
+ break;
+ }
+ if (iface.HasSameTypeDefinition(declMethod.OwningType) && iface.CanCastTo(declMethod.OwningType))
+ {
+ defaultInterfaceDispatchDeclMethod = iface.FindMethodOnTypeWithMatchingTypicalMethod(declMethod);
+ // Prefer to find the exact match, so don't break immediately
+ }
+ }
+
+ if (defaultInterfaceDispatchDeclMethod != null)
+ {
+ switch (implType.ResolveInterfaceMethodToDefaultImplementationOnType(defaultInterfaceDispatchDeclMethod, out dimMethod))
+ {
+ case DefaultInterfaceMethodResolution.Diamond:
+ case DefaultInterfaceMethodResolution.Reabstraction:
+ devirtualizationDetail = CORINFO_DEVIRTUALIZATION_DETAIL.CORINFO_DEVIRTUALIZATION_FAILED_DIM;
+ return null;
+
+ case DefaultInterfaceMethodResolution.DefaultImplementation:
+ if (dimMethod.OwningType.HasInstantiation || (declMethod != defaultInterfaceDispatchDeclMethod))
+ {
+ // If we devirtualized into a default interface method on a generic type, we should actually return an
+ // instantiating stub but this is not happening.
+ // Making this work is tracked by https://github.com/dotnet/runtime/issues/9588
+
+ // In addition, we fail here for variant default interface dispatch
+ devirtualizationDetail = CORINFO_DEVIRTUALIZATION_DETAIL.CORINFO_DEVIRTUALIZATION_FAILED_DIM;
+ return null;
+ }
+ else
+ {
+ impl = dimMethod;
+ }
+ break;
+ }
+ }
+ }
}
else
{
+ // The derived class should be a subclass of the the base class.
+ // this check is perfomed via typedef checking instead of casting, as we accept canon methods calling exact types
+ TypeDesc checkType;
+ for (checkType = implType; checkType != null && !checkType.HasSameTypeDefinition(declMethod.OwningType); checkType = checkType.BaseType)
+ { }
+
+ if ((checkType == null) || (checkType.ConvertToCanonForm(CanonicalFormKind.Specific) != declMethod.OwningType.ConvertToCanonForm(CanonicalFormKind.Specific)))
+ {
+ // The derived class should be a subclass of the the base class.
+ devirtualizationDetail = CORINFO_DEVIRTUALIZATION_DETAIL.CORINFO_DEVIRTUALIZATION_FAILED_SUBCLASS;
+ return null;
+ }
+ else
+ {
+ // At this point, the decl method may be only canonically compatible, but not an exact match to a method in the type hierarchy
+ // Convert it to an exact match. (Or if it is an exact match, the FindMethodOnTypeWithMatchingTypicalMethod will be a no-op)
+ declMethod = checkType.FindMethodOnTypeWithMatchingTypicalMethod(declMethod);
+ }
+
impl = implType.FindVirtualFunctionTargetMethodOnObjectType(declMethod);
if (impl != null && (impl != declMethod))
{
@@ -106,7 +186,14 @@ protected virtual MethodDesc ResolveVirtualMethod(MethodDesc declMethod, DefType
if (slotDefiningMethodImpl != slotDefiningMethodDecl)
{
- // We cannot resolve virtual method in case the impl is a different slot from the declMethod
+ // If the derived method's slot does not match the vtable slot,
+ // bail on devirtualization, as the method was installed into
+ // the vtable slot via an explicit override and even if the
+ // method is final, the slot may not be.
+ //
+ // Note the jit could still safely devirtualize if it had an exact
+ // class, but such cases are likely rare.
+ devirtualizationDetail = CORINFO_DEVIRTUALIZATION_DETAIL.CORINFO_DEVIRTUALIZATION_FAILED_SLOT;
impl = null;
}
}
diff --git a/src/coreclr/tools/Common/Internal/Runtime/ModuleHeaders.cs b/src/coreclr/tools/Common/Internal/Runtime/ModuleHeaders.cs
index feacb0ba06d5d9..eee208cfb2de0f 100644
--- a/src/coreclr/tools/Common/Internal/Runtime/ModuleHeaders.cs
+++ b/src/coreclr/tools/Common/Internal/Runtime/ModuleHeaders.cs
@@ -15,7 +15,7 @@ internal struct ReadyToRunHeaderConstants
public const uint Signature = 0x00525452; // 'RTR'
public const ushort CurrentMajorVersion = 5;
- public const ushort CurrentMinorVersion = 3;
+ public const ushort CurrentMinorVersion = 4;
}
#pragma warning disable 0169
diff --git a/src/coreclr/tools/Common/Internal/Runtime/ReadyToRunConstants.cs b/src/coreclr/tools/Common/Internal/Runtime/ReadyToRunConstants.cs
index fe2c5a9bd4a1d7..6f718756b205fb 100644
--- a/src/coreclr/tools/Common/Internal/Runtime/ReadyToRunConstants.cs
+++ b/src/coreclr/tools/Common/Internal/Runtime/ReadyToRunConstants.cs
@@ -4,6 +4,9 @@
using System;
using Internal.TypeSystem;
+// If any of these constants change, update src/coreclr/inc/readytorun.h and
+// src/coreclr/tools/Common/Internal/Runtime/ModuleHeaders.cs with the new R2R minor version
+
namespace Internal.ReadyToRunConstants
{
[Flags]
@@ -31,6 +34,7 @@ public enum ReadyToRunMethodSigFlags : byte
READYTORUN_METHOD_SIG_MemberRefToken = 0x10,
READYTORUN_METHOD_SIG_Constrained = 0x20,
READYTORUN_METHOD_SIG_OwnerType = 0x40,
+ READYTORUN_METHOD_SIG_UpdateContext = 0x80,
}
[Flags]
@@ -51,6 +55,13 @@ public enum ReadyToRunTypeLayoutFlags : byte
READYTORUN_LAYOUT_GCLayout_Empty = 0x10,
}
+ [Flags]
+ public enum ReadyToRunVirtualFunctionOverrideFlags : uint
+ {
+ None = 0x00,
+ VirtualFunctionOverriden = 0x01,
+ }
+
public enum DictionaryEntryKind
{
EmptySlot = 0,
@@ -121,6 +132,9 @@ public enum ReadyToRunFixupKind
Verify_FieldOffset = 0x31, // Generate a runtime check to ensure that the field offset matches between compile and runtime. Unlike CheckFieldOffset, this will generate a runtime exception on failure instead of silently dropping the method
Verify_TypeLayout = 0x32, // Generate a runtime check to ensure that the type layout (size, alignment, HFA, reference map) matches between compile and runtime. Unlike Check_TypeLayout, this will generate a runtime failure instead of silently dropping the method
+ Check_VirtualFunctionOverride = 0x33, // Generate a runtime check to ensure that virtual function resolution has equivalent behavior at runtime as at compile time. If not equivalent, code will not be used
+ Verify_VirtualFunctionOverride = 0x34, // Generate a runtime check to ensure that virtual function resolution has equivalent behavior at runtime as at compile time. If not equivalent, generate runtime failure.
+
ModuleOverride = 0x80,
// followed by sig-encoded UInt with assemblyref index into either the assemblyref
// table of the MSIL metadata of the master context module for the signature or
diff --git a/src/coreclr/tools/Common/JitInterface/CorInfoImpl.cs b/src/coreclr/tools/Common/JitInterface/CorInfoImpl.cs
index 43991db1ef644f..f102d06976af10 100644
--- a/src/coreclr/tools/Common/JitInterface/CorInfoImpl.cs
+++ b/src/coreclr/tools/Common/JitInterface/CorInfoImpl.cs
@@ -561,8 +561,8 @@ private Object HandleToObject(IntPtr handle)
private CORINFO_CLASS_STRUCT_* ObjectToHandle(TypeDesc type) => (CORINFO_CLASS_STRUCT_*)ObjectToHandle((Object)type);
private FieldDesc HandleToObject(CORINFO_FIELD_STRUCT_* field) => (FieldDesc)HandleToObject((IntPtr)field);
private CORINFO_FIELD_STRUCT_* ObjectToHandle(FieldDesc field) => (CORINFO_FIELD_STRUCT_*)ObjectToHandle((object)field);
- private MethodIL HandleToObject(CORINFO_MODULE_STRUCT_* module) => (MethodIL)HandleToObject((IntPtr)module);
- private CORINFO_MODULE_STRUCT_* ObjectToHandle(MethodIL methodIL) => (CORINFO_MODULE_STRUCT_*)ObjectToHandle((object)methodIL);
+ private MethodILScope HandleToObject(CORINFO_MODULE_STRUCT_* module) => (MethodIL)HandleToObject((IntPtr)module);
+ private CORINFO_MODULE_STRUCT_* ObjectToHandle(MethodILScope methodIL) => (CORINFO_MODULE_STRUCT_*)ObjectToHandle((object)methodIL);
private MethodSignature HandleToObject(MethodSignatureInfo* method) => (MethodSignature)HandleToObject((IntPtr)method);
private MethodSignatureInfo* ObjectToHandle(MethodSignature method) => (MethodSignatureInfo*)ObjectToHandle((object)method);
@@ -1138,12 +1138,14 @@ private bool resolveVirtualMethod(CORINFO_DEVIRTUALIZATION_INFO* info)
info->devirtualizedMethod = null;
info->requiresInstMethodTableArg = false;
info->exactContext = null;
+ info->detail = CORINFO_DEVIRTUALIZATION_DETAIL.CORINFO_DEVIRTUALIZATION_UNKNOWN;
TypeDesc objType = HandleToObject(info->objClass);
// __Canon cannot be devirtualized
if (objType.IsCanonicalDefinitionType(CanonicalFormKind.Any))
{
+ info->detail = CORINFO_DEVIRTUALIZATION_DETAIL.CORINFO_DEVIRTUALIZATION_FAILED_CANON;
return false;
}
@@ -1160,23 +1162,141 @@ private bool resolveVirtualMethod(CORINFO_DEVIRTUALIZATION_INFO* info)
}
}
- MethodDesc impl = _compilation.ResolveVirtualMethod(decl, objType);
+ MethodDesc originalImpl = _compilation.ResolveVirtualMethod(decl, objType, out info->detail);
- if (impl == null)
+ if (originalImpl == null)
{
+ // If this assert fires, we failed to devirtualize, probably due to a failure to resolve the
+ // virtual to an exact target. This should never happen in practice if the input IL is valid,
+ // and the algorithm for virtual function resolution is correct; however, if it does, this is
+ // a safe condition, and we could delete this assert. This assert exists in order to help identify
+ // cases where the virtual function resolution algorithm either does not function, or is not used
+ // correctly.
+#if DEBUG
+ if (info->detail == CORINFO_DEVIRTUALIZATION_DETAIL.CORINFO_DEVIRTUALIZATION_UNKNOWN)
+ {
+ Console.Error.WriteLine($"Failed devirtualization with unexpected unknown failure while compiling {MethodBeingCompiled} with decl {decl} targetting type {objType}");
+ Debug.Assert(info->detail != CORINFO_DEVIRTUALIZATION_DETAIL.CORINFO_DEVIRTUALIZATION_UNKNOWN);
+ }
+#endif
return false;
}
- if (impl.OwningType.IsValueType)
+ TypeDesc owningType = originalImpl.OwningType;
+
+ // RyuJIT expects to get the canonical form back
+ MethodDesc impl = originalImpl.GetCanonMethodTarget(CanonicalFormKind.Specific);
+
+ bool unboxingStub = impl.OwningType.IsValueType;
+
+ MethodDesc nonUnboxingImpl = impl;
+ if (unboxingStub)
{
impl = getUnboxingThunk(impl);
}
+#if READYTORUN
+ // As there are a variety of situations where the resolved virtual method may be different at compile and runtime (primarily due to subtle differences
+ // in the virtual resolution algorithm between the runtime and the compiler, although details such as whether or not type equivalence is enabled
+ // can also have an effect), record any decisions made, and if there are differences, simply skip use of the compiled method.
+ var resolver = _compilation.NodeFactory.Resolver;
+
+ MethodWithToken methodWithTokenDecl;
+
+ if (info->pResolvedTokenVirtualMethod != null)
+ {
+ methodWithTokenDecl = ComputeMethodWithToken(decl, ref *info->pResolvedTokenVirtualMethod, null, false);
+ }
+ else
+ {
+ ModuleToken declToken = resolver.GetModuleTokenForMethod(decl.GetTypicalMethodDefinition(), throwIfNotFound: false);
+ if (declToken.IsNull)
+ {
+ info->detail = CORINFO_DEVIRTUALIZATION_DETAIL.CORINFO_DEVIRTUALIZATION_FAILED_DECL_NOT_REPRESENTABLE;
+ return false;
+ }
+ if (!_compilation.CompilationModuleGroup.VersionsWithTypeReference(decl.OwningType))
+ {
+ info->detail = CORINFO_DEVIRTUALIZATION_DETAIL.CORINFO_DEVIRTUALIZATION_FAILED_DECL_NOT_REPRESENTABLE;
+ return false;
+ }
+ methodWithTokenDecl = new MethodWithToken(decl, declToken, null, false, null, devirtualizedMethodOwner: decl.OwningType);
+ }
+ MethodWithToken methodWithTokenImpl;
+
+ if (decl == originalImpl)
+ {
+ methodWithTokenImpl = methodWithTokenDecl;
+ if (info->pResolvedTokenVirtualMethod != null)
+ {
+ info->resolvedTokenDevirtualizedMethod = *info->pResolvedTokenVirtualMethod;
+ }
+ else
+ {
+ info->resolvedTokenDevirtualizedMethod = CreateResolvedTokenFromMethod(this, decl, methodWithTokenDecl);
+ }
+ info->resolvedTokenDevirtualizedUnboxedMethod = default(CORINFO_RESOLVED_TOKEN);
+ }
+ else
+ {
+ methodWithTokenImpl = new MethodWithToken(nonUnboxingImpl, resolver.GetModuleTokenForMethod(nonUnboxingImpl.GetTypicalMethodDefinition()), null, unboxingStub, null, devirtualizedMethodOwner: impl.OwningType);
+
+ info->resolvedTokenDevirtualizedMethod = CreateResolvedTokenFromMethod(this, impl, methodWithTokenImpl);
+
+ if (unboxingStub)
+ {
+ info->resolvedTokenDevirtualizedUnboxedMethod = info->resolvedTokenDevirtualizedMethod;
+ info->resolvedTokenDevirtualizedUnboxedMethod.tokenContext = contextFromMethod(nonUnboxingImpl);
+ info->resolvedTokenDevirtualizedUnboxedMethod.hMethod = ObjectToHandle(nonUnboxingImpl);
+ }
+ else
+ {
+ info->resolvedTokenDevirtualizedUnboxedMethod = default(CORINFO_RESOLVED_TOKEN);
+ }
+ }
+
+ // Testing has not shown that concerns about virtual matching are significant
+ // Only generate verification for builds with the stress mode enabled
+ if (_compilation.SymbolNodeFactory.VerifyTypeAndFieldLayout)
+ {
+ ISymbolNode virtualResolutionNode = _compilation.SymbolNodeFactory.CheckVirtualFunctionOverride(methodWithTokenDecl, objType, methodWithTokenImpl);
+ _methodCodeNode.Fixups.Add(virtualResolutionNode);
+ }
+#else
+ info->resolvedTokenDevirtualizedMethod = default(CORINFO_RESOLVED_TOKEN);
+ info->resolvedTokenDevirtualizedUnboxedMethod = default(CORINFO_RESOLVED_TOKEN);
+#endif
+ info->detail = CORINFO_DEVIRTUALIZATION_DETAIL.CORINFO_DEVIRTUALIZATION_SUCCESS;
info->devirtualizedMethod = ObjectToHandle(impl);
info->requiresInstMethodTableArg = false;
- info->exactContext = contextFromType(impl.OwningType);
+ info->exactContext = contextFromType(owningType);
return true;
+
+#if READYTORUN
+ static CORINFO_RESOLVED_TOKEN CreateResolvedTokenFromMethod(CorInfoImpl jitInterface, MethodDesc method, MethodWithToken methodWithToken)
+ {
+ CORINFO_RESOLVED_TOKEN result = default(CORINFO_RESOLVED_TOKEN);
+ MethodILScope scope = jitInterface._compilation.GetMethodIL(methodWithToken.Method);
+ if (scope == null)
+ {
+ scope = Internal.IL.EcmaMethodILScope.Create((EcmaMethod)methodWithToken.Method.GetTypicalMethodDefinition());
+ }
+ result.tokenScope = jitInterface.ObjectToHandle(scope);
+ result.tokenContext = jitInterface.contextFromMethod(method);
+ result.token = methodWithToken.Token.Token;
+ if (methodWithToken.Token.TokenType != CorTokenType.mdtMethodDef)
+ {
+ Debug.Assert(false); // This should never happen, but we protect against total failure with the throw below.
+ throw new RequiresRuntimeJitException("Attempt to devirtualize and unable to create token for devirtualized method");
+ }
+ result.tokenType = CorInfoTokenKind.CORINFO_TOKENKIND_DevirtualizedMethod;
+ result.hClass = jitInterface.ObjectToHandle(methodWithToken.OwningType);
+ result.hMethod = jitInterface.ObjectToHandle(method);
+
+ return result;
+ }
+#endif
}
private CORINFO_METHOD_STRUCT_* getUnboxedEntry(CORINFO_METHOD_STRUCT_* ftn, ref bool requiresInstMethodTableArg)
@@ -1331,11 +1451,11 @@ private void methodMustBeLoadedBeforeCodeIsRun(CORINFO_METHOD_STRUCT_* method)
private CORINFO_METHOD_STRUCT_* mapMethodDeclToMethodImpl(CORINFO_METHOD_STRUCT_* method)
{ throw new NotImplementedException("mapMethodDeclToMethodImpl"); }
- private static object ResolveTokenWithSubstitution(MethodIL methodIL, mdToken token, Instantiation typeInst, Instantiation methodInst)
+ private static object ResolveTokenWithSubstitution(MethodILScope methodIL, mdToken token, Instantiation typeInst, Instantiation methodInst)
{
// Grab the generic definition of the method IL, resolve the token within the definition,
// and instantiate it with the given context.
- object result = methodIL.GetMethodILDefinition().GetObject((int)token);
+ object result = methodIL.GetMethodILScopeDefinition().GetObject((int)token);
if (result is MethodDesc methodResult)
{
@@ -1353,7 +1473,7 @@ private static object ResolveTokenWithSubstitution(MethodIL methodIL, mdToken to
return result;
}
- private static object ResolveTokenInScope(MethodIL methodIL, object typeOrMethodContext, mdToken token)
+ private static object ResolveTokenInScope(MethodILScope methodIL, object typeOrMethodContext, mdToken token)
{
MethodDesc owningMethod = methodIL.OwningMethod;
@@ -1445,7 +1565,7 @@ private object GetRuntimeDeterminedObjectForToken(ref CORINFO_RESOLVED_TOKEN pRe
/* If the resolved type is not runtime determined there's a chance we went down this path
because there was a literal typeof(__Canon) in the compiled IL - check for that
by resolving the token in the definition. */
- ((TypeDesc)methodIL.GetMethodILDefinition().GetObject((int)pResolvedToken.token)).IsCanonicalDefinitionType(CanonicalFormKind.Any));
+ ((TypeDesc)methodIL.GetMethodILScopeDefinition().GetObject((int)pResolvedToken.token)).IsCanonicalDefinitionType(CanonicalFormKind.Any));
}
if (pResolvedToken.tokenType == CorInfoTokenKind.CORINFO_TOKENKIND_Newarr)
@@ -1473,9 +1593,8 @@ private void resolveToken(ref CORINFO_RESOLVED_TOKEN pResolvedToken)
bool recordToken = _compilation.CompilationModuleGroup.VersionsWithType(owningType) && owningType is EcmaType;
#endif
- if (result is MethodDesc)
+ if (result is MethodDesc method)
{
- MethodDesc method = result as MethodDesc;
pResolvedToken.hMethod = ObjectToHandle(method);
TypeDesc owningClass = method.OwningType;
@@ -1488,7 +1607,9 @@ private void resolveToken(ref CORINFO_RESOLVED_TOKEN pResolvedToken)
#if READYTORUN
if (recordToken)
{
- _compilation.NodeFactory.Resolver.AddModuleTokenForMethod(method, HandleToModuleToken(ref pResolvedToken));
+ ModuleToken methodModuleToken = HandleToModuleToken(ref pResolvedToken);
+ var resolver = _compilation.NodeFactory.Resolver;
+ resolver.AddModuleTokenForMethod(method, methodModuleToken);
}
#else
_compilation.NodeFactory.MetadataManager.GetDependenciesDueToAccess(ref _additionalDependencies, _compilation.NodeFactory, methodIL, method);
@@ -1612,7 +1733,7 @@ private bool isValidStringRef(CORINFO_MODULE_STRUCT_* module, uint metaTOK)
private char* getStringLiteral(CORINFO_MODULE_STRUCT_* module, uint metaTOK, ref int length)
{
- MethodIL methodIL = HandleToObject(module);
+ MethodILScope methodIL = HandleToObject(module);
string s = (string)methodIL.GetObject((int)metaTOK);
length = (int)s.Length;
return (char*)GetPin(s);
diff --git a/src/coreclr/tools/Common/JitInterface/CorInfoTypes.cs b/src/coreclr/tools/Common/JitInterface/CorInfoTypes.cs
index 91114435396e75..73f7a5f4eef7c1 100644
--- a/src/coreclr/tools/Common/JitInterface/CorInfoTypes.cs
+++ b/src/coreclr/tools/Common/JitInterface/CorInfoTypes.cs
@@ -1072,17 +1072,24 @@ public unsafe struct CORINFO_CALL_INFO
public enum CORINFO_DEVIRTUALIZATION_DETAIL
{
- CORINFO_DEVIRTUALIZATION_UNKNOWN, // no details available
- CORINFO_DEVIRTUALIZATION_SUCCESS, // devirtualization was successful
- CORINFO_DEVIRTUALIZATION_FAILED_CANON, // object class was canonical
- CORINFO_DEVIRTUALIZATION_FAILED_COM, // object class was com
- CORINFO_DEVIRTUALIZATION_FAILED_CAST, // object class could not be cast to interface class
- CORINFO_DEVIRTUALIZATION_FAILED_LOOKUP, // interface method could not be found
- CORINFO_DEVIRTUALIZATION_FAILED_DIM, // interface method was default interface method
- CORINFO_DEVIRTUALIZATION_FAILED_SUBCLASS, // object not subclass of base class
- CORINFO_DEVIRTUALIZATION_FAILED_SLOT, // virtual method installed via explicit override
- CORINFO_DEVIRTUALIZATION_FAILED_BUBBLE, // devirtualization crossed version bubble
- CORINFO_DEVIRTUALIZATION_COUNT, // sentinel for maximum value
+ CORINFO_DEVIRTUALIZATION_UNKNOWN, // no details available
+ CORINFO_DEVIRTUALIZATION_SUCCESS, // devirtualization was successful
+ CORINFO_DEVIRTUALIZATION_FAILED_CANON, // object class was canonical
+ CORINFO_DEVIRTUALIZATION_FAILED_COM, // object class was com
+ CORINFO_DEVIRTUALIZATION_FAILED_CAST, // object class could not be cast to interface class
+ CORINFO_DEVIRTUALIZATION_FAILED_LOOKUP, // interface method could not be found
+ CORINFO_DEVIRTUALIZATION_FAILED_DIM, // interface method was default interface method
+ CORINFO_DEVIRTUALIZATION_FAILED_SUBCLASS, // object not subclass of base class
+ CORINFO_DEVIRTUALIZATION_FAILED_SLOT, // virtual method installed via explicit override
+ CORINFO_DEVIRTUALIZATION_FAILED_BUBBLE, // devirtualization crossed version bubble
+ CORINFO_DEVIRTUALIZATION_MULTIPLE_IMPL, // object has multiple implementations of interface class
+ CORINFO_DEVIRTUALIZATION_FAILED_BUBBLE_CLASS_DECL, // decl method is defined on class and decl method not in version bubble, and decl method not in closest to version bubble
+ CORINFO_DEVIRTUALIZATION_FAILED_BUBBLE_INTERFACE_DECL, // decl method is defined on interface and not in version bubble, and implementation type not entirely defined in bubble
+ CORINFO_DEVIRTUALIZATION_FAILED_BUBBLE_IMPL, // object class not defined within version bubble
+ CORINFO_DEVIRTUALIZATION_FAILED_BUBBLE_IMPL_NOT_REFERENCEABLE, // object class cannot be referenced from R2R code due to missing tokens
+ CORINFO_DEVIRTUALIZATION_FAILED_DUPLICATE_INTERFACE, // crossgen2 virtual method algorithm and runtime algorithm differ in the presence of duplicate interface implementations
+ CORINFO_DEVIRTUALIZATION_FAILED_DECL_NOT_REPRESENTABLE, // Decl method cannot be represented in R2R image
+ CORINFO_DEVIRTUALIZATION_COUNT, // sentinel for maximum value
}
public unsafe struct CORINFO_DEVIRTUALIZATION_INFO
@@ -1093,6 +1100,7 @@ public unsafe struct CORINFO_DEVIRTUALIZATION_INFO
public CORINFO_METHOD_STRUCT_* virtualMethod;
public CORINFO_CLASS_STRUCT_* objClass;
public CORINFO_CONTEXT_STRUCT* context;
+ public CORINFO_RESOLVED_TOKEN* pResolvedTokenVirtualMethod;
//
// [Out] results of resolveVirtualMethod.
@@ -1107,6 +1115,8 @@ public unsafe struct CORINFO_DEVIRTUALIZATION_INFO
public bool requiresInstMethodTableArg { get { return _requiresInstMethodTableArg != 0; } set { _requiresInstMethodTableArg = value ? (byte)1 : (byte)0; } }
public CORINFO_CONTEXT_STRUCT* exactContext;
public CORINFO_DEVIRTUALIZATION_DETAIL detail;
+ public CORINFO_RESOLVED_TOKEN resolvedTokenDevirtualizedMethod;
+ public CORINFO_RESOLVED_TOKEN resolvedTokenDevirtualizedUnboxedMethod;
}
//----------------------------------------------------------------------------
diff --git a/src/coreclr/tools/Common/Pgo/PgoFormat.cs b/src/coreclr/tools/Common/Pgo/PgoFormat.cs
index 99164557fee8a4..afc05ae3d8b758 100644
--- a/src/coreclr/tools/Common/Pgo/PgoFormat.cs
+++ b/src/coreclr/tools/Common/Pgo/PgoFormat.cs
@@ -79,6 +79,14 @@ public struct PgoSchemaElem
((InstrumentationKind & PgoInstrumentationKind.MarshalMask) == PgoInstrumentationKind.EightByte)));
}
+ // Flags stored in 'Other' field of TypeHandleHistogram*Count entries.
+ [Flags]
+ public enum ClassProfileFlags : uint
+ {
+ IsInterface = 0x40000000,
+ IsClass = 0x80000000,
+ }
+
public class PgoProcessor
{
private enum InstrumentationDataProcessingState
diff --git a/src/coreclr/tools/Common/TypeSystem/Common/MetadataFieldLayoutAlgorithm.cs b/src/coreclr/tools/Common/TypeSystem/Common/MetadataFieldLayoutAlgorithm.cs
index 2bf40935a38b60..4109bb701aa9ac 100644
--- a/src/coreclr/tools/Common/TypeSystem/Common/MetadataFieldLayoutAlgorithm.cs
+++ b/src/coreclr/tools/Common/TypeSystem/Common/MetadataFieldLayoutAlgorithm.cs
@@ -782,10 +782,9 @@ public LayoutInt CalculateFieldBaseOffset(MetadataType type, bool requiresAlign8
if (!type.IsValueType && type.HasBaseType)
{
cumulativeInstanceFieldPos = type.BaseType.InstanceByteCountUnaligned;
- if (!type.BaseType.InstanceByteCountUnaligned.IsIndeterminate)
+ if (!cumulativeInstanceFieldPos.IsIndeterminate)
{
- cumulativeInstanceFieldPos = type.BaseType.InstanceByteCountUnaligned;
- if (type.BaseType.IsZeroSizedReferenceType && ((MetadataType)type.BaseType).HasLayout())
+ if (requiresAlignedBase && type.BaseType.IsZeroSizedReferenceType && ((MetadataType)type.BaseType).HasLayout())
{
cumulativeInstanceFieldPos += LayoutInt.One;
}
diff --git a/src/coreclr/tools/Common/TypeSystem/Common/MetadataVirtualMethodAlgorithm.cs b/src/coreclr/tools/Common/TypeSystem/Common/MetadataVirtualMethodAlgorithm.cs
index 5a33ab437a5f87..8b878afcc0c061 100644
--- a/src/coreclr/tools/Common/TypeSystem/Common/MetadataVirtualMethodAlgorithm.cs
+++ b/src/coreclr/tools/Common/TypeSystem/Common/MetadataVirtualMethodAlgorithm.cs
@@ -459,6 +459,11 @@ private static void FindBaseUnificationGroup(MetadataType currentType, Unificati
foreach (MethodDesc memberMethod in unificationGroup.Members)
{
+ // If a method is both overriden via MethodImpl and name/sig, we don't remove it from the unification list
+ // as the local MethodImpl takes priority over the name/sig match, and prevents the slot disunificaiton
+ if (FindSlotDefiningMethodForVirtualMethod(memberMethod) == FindSlotDefiningMethodForVirtualMethod(originalDefiningMethod))
+ continue;
+
MethodDesc nameSigMatchMemberMethod = FindMatchingVirtualMethodOnTypeByNameAndSigWithSlotCheck(memberMethod, currentType, reverseMethodSearch: true);
if (nameSigMatchMemberMethod != null && nameSigMatchMemberMethod != memberMethod)
{
@@ -665,7 +670,7 @@ public static MethodDesc ResolveVariantInterfaceMethodToVirtualMethodOnType(Meth
foreach (TypeDesc iface in currentType.RuntimeInterfaces)
{
- if (iface.CanCastTo(interfaceType))
+ if (iface.HasSameTypeDefinition(interfaceType) && iface.CanCastTo(interfaceType))
{
implMethod = iface.FindMethodOnTypeWithMatchingTypicalMethod(interfaceMethod);
Debug.Assert(implMethod != null);
diff --git a/src/coreclr/tools/Common/TypeSystem/Common/TypeSystemHelpers.cs b/src/coreclr/tools/Common/TypeSystem/Common/TypeSystemHelpers.cs
index 2442f04fee8963..3732586a45e75a 100644
--- a/src/coreclr/tools/Common/TypeSystem/Common/TypeSystemHelpers.cs
+++ b/src/coreclr/tools/Common/TypeSystem/Common/TypeSystemHelpers.cs
@@ -382,6 +382,26 @@ public static MethodDesc ResolveInterfaceMethodTarget(this TypeDesc thisType, Me
return result;
}
+ ///
+ /// Scan the type and its base types for an implementation of an interface method. Returns null if no
+ /// implementation is found.
+ ///
+ public static MethodDesc ResolveInterfaceMethodTargetWithVariance(this TypeDesc thisType, MethodDesc interfaceMethodToResolve)
+ {
+ Debug.Assert(interfaceMethodToResolve.OwningType.IsInterface);
+
+ MethodDesc result = null;
+ TypeDesc currentType = thisType;
+ do
+ {
+ result = currentType.ResolveVariantInterfaceMethodToVirtualMethodOnType(interfaceMethodToResolve);
+ currentType = currentType.BaseType;
+ }
+ while (result == null && currentType != null);
+
+ return result;
+ }
+
public static bool ContainsSignatureVariables(this TypeDesc thisType, bool treatGenericParameterLikeSignatureVariable = false)
{
switch (thisType.Category)
diff --git a/src/coreclr/tools/Common/TypeSystem/IL/EcmaMethodIL.cs b/src/coreclr/tools/Common/TypeSystem/IL/EcmaMethodIL.cs
index 02e5207c90a8b7..ce2ced7e34ded5 100644
--- a/src/coreclr/tools/Common/TypeSystem/IL/EcmaMethodIL.cs
+++ b/src/coreclr/tools/Common/TypeSystem/IL/EcmaMethodIL.cs
@@ -11,7 +11,10 @@
namespace Internal.IL
{
- public sealed partial class EcmaMethodIL : MethodIL
+ // Marker interface implemented by EcmaMethodIL and EcmaMethodILScope
+ public interface IEcmaMethodIL { }
+
+ public sealed partial class EcmaMethodIL : MethodIL, IEcmaMethodIL
{
private readonly EcmaModule _module;
private readonly EcmaMethod _method;
@@ -140,4 +143,46 @@ public override object GetObject(int token, NotFoundBehavior notFoundBehavior =
return _module.GetObject(MetadataTokens.EntityHandle(token), notFoundBehavior);
}
}
+
+ public sealed partial class EcmaMethodILScope : MethodILScope, IEcmaMethodIL
+ {
+ private readonly EcmaModule _module;
+ private readonly EcmaMethod _method;
+
+ public static EcmaMethodILScope Create(EcmaMethod method)
+ {
+ return new EcmaMethodILScope(method);
+ }
+
+ private EcmaMethodILScope(EcmaMethod method)
+ {
+ _method = method;
+ _module = method.Module;
+ }
+
+ public EcmaModule Module
+ {
+ get
+ {
+ return _module;
+ }
+ }
+
+ public override MethodDesc OwningMethod
+ {
+ get
+ {
+ return _method;
+ }
+ }
+
+ public override object GetObject(int token, NotFoundBehavior notFoundBehavior = NotFoundBehavior.Throw)
+ {
+ // UserStrings cannot be wrapped in EntityHandle
+ if ((token & 0xFF000000) == 0x70000000)
+ return _module.GetUserString(MetadataTokens.UserStringHandle(token));
+
+ return _module.GetObject(MetadataTokens.EntityHandle(token), notFoundBehavior);
+ }
+ }
}
diff --git a/src/coreclr/tools/Common/TypeSystem/IL/MethodIL.cs b/src/coreclr/tools/Common/TypeSystem/IL/MethodIL.cs
index cfe1f93998b08b..2f8b54249197e2 100644
--- a/src/coreclr/tools/Common/TypeSystem/IL/MethodIL.cs
+++ b/src/coreclr/tools/Common/TypeSystem/IL/MethodIL.cs
@@ -53,13 +53,40 @@ public ILExceptionRegion(
/// Represents a method body.
///
[System.Diagnostics.DebuggerTypeProxy(typeof(MethodILDebugView))]
- public abstract partial class MethodIL
+ public abstract partial class MethodILScope
{
///
- /// Gets the method whose body this represents.
+ /// Gets the method whose body this represents.
///
public abstract MethodDesc OwningMethod { get; }
+ ///
+ /// Gets the open (uninstantiated) version of the .
+ ///
+ public virtual MethodILScope GetMethodILScopeDefinition()
+ {
+ return this;
+ }
+
+ ///
+ /// Resolves a token from within the method body into a type system object
+ /// (typically a , , ,
+ /// or ).
+ ///
+ public abstract Object GetObject(int token, NotFoundBehavior notFoundBehavior = NotFoundBehavior.Throw);
+
+ public override string ToString()
+ {
+ return OwningMethod.ToString();
+ }
+ }
+
+ ///
+ /// Represents a method body.
+ ///
+ [System.Diagnostics.DebuggerTypeProxy(typeof(MethodILDebugView))]
+ public abstract partial class MethodIL : MethodILScope
+ {
///
/// Gets the maximum possible stack depth this method declares.
///
@@ -81,18 +108,16 @@ public abstract partial class MethodIL
///
public abstract LocalVariableDefinition[] GetLocals();
- ///
- /// Resolves a token from within the method body into a type system object
- /// (typically a , , ,
- /// or ).
- ///
- public abstract Object GetObject(int token, NotFoundBehavior notFoundBehavior = NotFoundBehavior.Throw);
-
///
/// Gets a list of exception regions this method body defines.
///
public abstract ILExceptionRegion[] GetExceptionRegions();
+ public override sealed MethodILScope GetMethodILScopeDefinition()
+ {
+ return GetMethodILDefinition();
+ }
+
///
/// Gets the open (uninstantiated) version of the .
///
@@ -100,10 +125,5 @@ public virtual MethodIL GetMethodILDefinition()
{
return this;
}
-
- public override string ToString()
- {
- return OwningMethod.ToString();
- }
}
}
diff --git a/src/coreclr/tools/Common/TypeSystem/Interop/IL/MarshalHelpers.cs b/src/coreclr/tools/Common/TypeSystem/Interop/IL/MarshalHelpers.cs
index 64ba412f6afbcf..458b4c6a6b676e 100644
--- a/src/coreclr/tools/Common/TypeSystem/Interop/IL/MarshalHelpers.cs
+++ b/src/coreclr/tools/Common/TypeSystem/Interop/IL/MarshalHelpers.cs
@@ -450,6 +450,9 @@ internal static MarshallerKind GetMarshallerKind(
{
case NativeTypeKind.Array:
{
+ if (isField)
+ return MarshallerKind.Invalid;
+
var arrayType = (ArrayType)type;
elementMarshallerKind = GetArrayElementMarshallerKind(
diff --git a/src/coreclr/tools/Common/TypeSystem/Interop/IL/MarshalUtils.cs b/src/coreclr/tools/Common/TypeSystem/Interop/IL/MarshalUtils.cs
index 84dcc80be1323e..33c609ee23b807 100644
--- a/src/coreclr/tools/Common/TypeSystem/Interop/IL/MarshalUtils.cs
+++ b/src/coreclr/tools/Common/TypeSystem/Interop/IL/MarshalUtils.cs
@@ -23,8 +23,8 @@ public static bool IsBlittableType(TypeDesc type)
&& !baseType.IsWellKnownType(WellKnownType.Object)
&& !baseType.IsWellKnownType(WellKnownType.ValueType);
- // Type is blittable only if parent is also blittable and is not empty.
- if (hasNonTrivialParent && (!IsBlittableType(baseType) || baseType.IsZeroSizedReferenceType))
+ // Type is blittable only if parent is also blittable.
+ if (hasNonTrivialParent && !IsBlittableType(baseType))
{
return false;
}
diff --git a/src/coreclr/tools/ILVerify/ILVerify.csproj b/src/coreclr/tools/ILVerify/ILVerify.csproj
index 606b4ea4c8fa7e..28f18b58cb004a 100644
--- a/src/coreclr/tools/ILVerify/ILVerify.csproj
+++ b/src/coreclr/tools/ILVerify/ILVerify.csproj
@@ -1,7 +1,7 @@
Exe
- netcoreapp3.1
+ $(NetCoreAppToolCurrent)
true
false
Major
diff --git a/src/coreclr/tools/StressLogAnalyzer/StressLogPlugin/StressLogPlugin.cpp b/src/coreclr/tools/StressLogAnalyzer/StressLogPlugin/StressLogPlugin.cpp
index 8b4efa8aa38a56..42e778312dcbc9 100644
--- a/src/coreclr/tools/StressLogAnalyzer/StressLogPlugin/StressLogPlugin.cpp
+++ b/src/coreclr/tools/StressLogAnalyzer/StressLogPlugin/StressLogPlugin.cpp
@@ -139,6 +139,7 @@ d(IS_PLAN_PINNED_PLUG, ThreadStressLog::gcPlanPinnedPlugMsg())
d(IS_DESIRED_NEW_ALLOCATION, ThreadStressLog::gcDesiredNewAllocationMsg()) \
d(IS_MAKE_UNUSED_ARRAY, ThreadStressLog::gcMakeUnusedArrayMsg()) \
d(IS_START_BGC_THREAD, ThreadStressLog::gcStartBgcThread()) \
+d(IS_RELOCATE_REFERENCE, ThreadStressLog::gcRelocateReferenceMsg()) \
d(IS_UNINTERESTING, "")
enum InterestingStringId : unsigned char
@@ -469,6 +470,26 @@ bool FilterMessage(StressLog::StressLogHeader* hdr, ThreadStressLog* tsl, uint32
case IS_START_BGC_THREAD:
RememberThreadForHeap(tsl->threadId, (int64_t)args[0], GC_THREAD_BG);
break;
+ case IS_RELOCATE_REFERENCE:
+ if (s_valueFilterCount > 0)
+ {
+ size_t src = (size_t)args[0];
+ size_t dst_from = (size_t)args[1];
+ size_t dst_to = (size_t)args[2];
+ // print this message if the source or destination contain (part of) the range we're looking for
+ for (int i = 0; i < s_valueFilterCount; i++)
+ {
+ if ((s_valueFilter[i].end < src || src < s_valueFilter[i].start) &&
+ (s_valueFilter[i].end < dst_from || dst_from < s_valueFilter[i].start) &&
+ (s_valueFilter[i].end < dst_to || dst_to < s_valueFilter[i].start))
+ {
+ // empty intersection with both the source and the destination
+ continue;
+ }
+ return true;
+ }
+ }
+ break;
}
return fLevelFilter || s_interestingStringFilter[isd];
}
@@ -521,7 +542,7 @@ static ThreadStressLogDesc s_threadStressLogDesc[MAX_THREADSTRESSLOGS];
static int s_threadStressLogCount;
static LONG s_wrappedWriteThreadCount;
-static const LONG MAX_MESSAGE_COUNT = 1024 * 1024 * 1024;
+static const LONG MAX_MESSAGE_COUNT = 64 * 1024 * 1024;
static StressThreadAndMsg* s_threadMsgBuf;
static volatile LONG s_msgCount = 0;
static volatile LONG s_totalMsgCount = 0;
@@ -956,6 +977,10 @@ bool ParseOptions(int argc, wchar_t* argv[])
return false;
}
}
+ else
+ {
+ return false;
+ }
i++;
}
return true;
diff --git a/src/coreclr/tools/aot/ILCompiler.ReadyToRun/CodeGen/ReadyToRunObjectWriter.cs b/src/coreclr/tools/aot/ILCompiler.ReadyToRun/CodeGen/ReadyToRunObjectWriter.cs
index e54207ff00fb17..74da51246b620c 100644
--- a/src/coreclr/tools/aot/ILCompiler.ReadyToRun/CodeGen/ReadyToRunObjectWriter.cs
+++ b/src/coreclr/tools/aot/ILCompiler.ReadyToRun/CodeGen/ReadyToRunObjectWriter.cs
@@ -198,11 +198,7 @@ public void EmitPortableExecutable()
if (_nodeFactory.CompilationModuleGroup.IsCompositeBuildMode && _componentModule == null)
{
- headerBuilder = PEHeaderProvider.Create(
- imageCharacteristics: Characteristics.ExecutableImage | Characteristics.Dll,
- dllCharacteristics: default(DllCharacteristics),
- Subsystem.Unknown,
- _nodeFactory.Target);
+ headerBuilder = PEHeaderProvider.Create(Subsystem.Unknown, _nodeFactory.Target);
peIdProvider = new Func, BlobContentId>(content => BlobContentId.FromHash(CryptographicHashProvider.ComputeSourceHash(content)));
timeDateStamp = null;
r2rHeaderExportSymbol = _nodeFactory.Header;
@@ -210,7 +206,7 @@ public void EmitPortableExecutable()
else
{
PEReader inputPeReader = (_componentModule != null ? _componentModule.PEReader : _nodeFactory.CompilationModuleGroup.CompilationModuleSet.First().PEReader);
- headerBuilder = PEHeaderProvider.Copy(inputPeReader.PEHeaders, _nodeFactory.Target);
+ headerBuilder = PEHeaderProvider.Create(inputPeReader.PEHeaders.PEHeader.Subsystem, _nodeFactory.Target);
timeDateStamp = inputPeReader.PEHeaders.CoffHeader.TimeDateStamp;
r2rHeaderExportSymbol = null;
}
diff --git a/src/coreclr/tools/aot/ILCompiler.ReadyToRun/Compiler/CompilationModuleGroup.ReadyToRun.cs b/src/coreclr/tools/aot/ILCompiler.ReadyToRun/Compiler/CompilationModuleGroup.ReadyToRun.cs
index e2ec6059d5dc77..6ef8c57322d6f9 100644
--- a/src/coreclr/tools/aot/ILCompiler.ReadyToRun/Compiler/CompilationModuleGroup.ReadyToRun.cs
+++ b/src/coreclr/tools/aot/ILCompiler.ReadyToRun/Compiler/CompilationModuleGroup.ReadyToRun.cs
@@ -25,6 +25,14 @@ partial class CompilationModuleGroup
/// True if the given type versions with the current compilation module group
public virtual bool VersionsWithType(TypeDesc typeDesc) => ContainsType(typeDesc);
+ ///
+ /// Returns true when all of the tokens necessary to refer to a given type belong to the same version
+ /// bubble as the compilation module group. By default return the same outcome as VersionsWithType.
+ ///
+ /// Type to check
+ /// True if the given type can safely be referred to within the current compilation module group
+ public virtual bool VersionsWithTypeReference(TypeDesc typeDesc) => VersionsWithType(typeDesc);
+
///
/// Returns true when a given method belongs to the same version bubble as the compilation module group.
/// By default return the same outcome as ContainsMethodBody.
diff --git a/src/coreclr/tools/aot/ILCompiler.ReadyToRun/Compiler/DependencyAnalysis/ReadyToRun/DevirtualizationManager.cs b/src/coreclr/tools/aot/ILCompiler.ReadyToRun/Compiler/DependencyAnalysis/ReadyToRun/DevirtualizationManager.cs
index 189dd6a0adc563..ab85bb84936f71 100644
--- a/src/coreclr/tools/aot/ILCompiler.ReadyToRun/Compiler/DependencyAnalysis/ReadyToRun/DevirtualizationManager.cs
+++ b/src/coreclr/tools/aot/ILCompiler.ReadyToRun/Compiler/DependencyAnalysis/ReadyToRun/DevirtualizationManager.cs
@@ -2,6 +2,7 @@
// The .NET Foundation licenses this file to you under the MIT license.
using Internal.TypeSystem;
+using CORINFO_DEVIRTUALIZATION_DETAIL = Internal.JitInterface.CORINFO_DEVIRTUALIZATION_DETAIL;
namespace ILCompiler.DependencyAnalysis.ReadyToRun
{
@@ -24,44 +25,191 @@ public override bool IsEffectivelySealed(MethodDesc method)
return _compilationModuleGroup.VersionsWithMethodBody(method) && base.IsEffectivelySealed(method);
}
- protected override MethodDesc ResolveVirtualMethod(MethodDesc declMethod, DefType implType)
+ protected override MethodDesc ResolveVirtualMethod(MethodDesc declMethod, DefType implType, out CORINFO_DEVIRTUALIZATION_DETAIL devirtualizationDetail)
{
- if (_compilationModuleGroup.VersionsWithMethodBody(declMethod) &&
- _compilationModuleGroup.VersionsWithType(implType))
+ devirtualizationDetail = CORINFO_DEVIRTUALIZATION_DETAIL.CORINFO_DEVIRTUALIZATION_UNKNOWN;
+
+ // Versioning resiliency rules here are complex
+ // Decl method checking
+ // 1. If the declMethod is a class method, then we do not need to check if it is within the version bubble with a VersionsWithCode check
+ // but the metadata for the open definition must be within the bubble, or the decl method is in the direct parent type
+ // of a type which is in the version bubble relative to the implType.
+ // 2. If the declMethod is an interface method, we can allow it if interface type is defined within the version
+ // bubble, or if the implementation type hierarchy is entirely within the version bubble (excluding System.Object and System.ValueType).
+ // 3. At all times the declMethod must be representable as a token. That check is handled internally in the
+ // jit interface logic after the logic that executes here.
+ //
+ // ImplType checking
+ // 1. At all times the metadata definition of the implementation type must version with the application.
+ // 2. Additionally, the exact implementation type must be representable within the R2R image (this is checked via VersionsWithTypeReference
+ //
+ // Result method checking
+ // 1. Ensure that the resolved result versions with the code, or is the decl method
+ // 2. Devirtualizing to a default interface method is not currently considered to be useful, and how to check for version
+ // resilience has not yet been analyzed.
+ // 3. When checking that the resolved result versions with the code, validate that all of the types
+ // From implType to the owning type of resolved result method also version with the code.
+
+ bool declMethodCheckFailed;
+ var firstTypeInImplTypeHierarchyNotInVersionBubble = FindVersionBubbleEdge(_compilationModuleGroup, implType, out TypeDesc lastTypeInHierarchyInVersionBubble);
+ if (!declMethod.OwningType.IsInterface)
+ {
+ if (_compilationModuleGroup.VersionsWithType(declMethod.OwningType.GetTypeDefinition()))
+ {
+ declMethodCheckFailed = false;
+ }
+ else
+ {
+ if (firstTypeInImplTypeHierarchyNotInVersionBubble != declMethod.OwningType)
+ {
+ devirtualizationDetail = CORINFO_DEVIRTUALIZATION_DETAIL.CORINFO_DEVIRTUALIZATION_FAILED_BUBBLE_CLASS_DECL;
+ declMethodCheckFailed = true;
+ }
+ else
+ {
+ declMethodCheckFailed = false;
+ }
+ }
+ }
+ else
+ {
+ if (_compilationModuleGroup.VersionsWithType(declMethod.OwningType.GetTypeDefinition()))
+ {
+ declMethodCheckFailed = false;
+ }
+ else
+ {
+ if (firstTypeInImplTypeHierarchyNotInVersionBubble == null || implType.IsValueType || firstTypeInImplTypeHierarchyNotInVersionBubble.IsObject)
+ declMethodCheckFailed = false;
+ else
+ {
+ devirtualizationDetail = CORINFO_DEVIRTUALIZATION_DETAIL.CORINFO_DEVIRTUALIZATION_FAILED_BUBBLE_INTERFACE_DECL;
+ declMethodCheckFailed = true;
+ }
+ }
+ }
+
+ if (declMethodCheckFailed)
+ return null;
+
+ // Impl type check
+ if (!_compilationModuleGroup.VersionsWithType(implType.GetTypeDefinition()))
{
- /**
- * It is possible for us to hit a scenario where a type implements
- * the same interface more than once due to generic instantiations.
- *
- * In some instances of those cases, the VirtualMethodAlgorithm
- * does not produce identical output as CoreCLR would, leading to
- * behavioral differences in compiled outputs.
- *
- * Instead of fixing the algorithm (in which the work to fix it is
- * tracked in https://github.com/dotnet/corert/issues/208), the
- * following duplication detection algorithm will detect the case and
- * refuse to devirtualize for those scenarios.
- */
- if (declMethod.OwningType.IsInterface)
+ devirtualizationDetail = CORINFO_DEVIRTUALIZATION_DETAIL.CORINFO_DEVIRTUALIZATION_FAILED_BUBBLE_IMPL;
+ return null;
+ }
+
+ if (!_compilationModuleGroup.VersionsWithTypeReference(implType))
+ {
+ devirtualizationDetail = CORINFO_DEVIRTUALIZATION_DETAIL.CORINFO_DEVIRTUALIZATION_FAILED_BUBBLE_IMPL_NOT_REFERENCEABLE;
+ return null;
+ }
+
+ /**
+ * It is possible for us to hit a scenario where a type implements
+ * the same interface more than once due to generic instantiations.
+ *
+ * In some instances of those cases, the VirtualMethodAlgorithm
+ * does not produce identical output as CoreCLR would, leading to
+ * behavioral differences in compiled outputs.
+ *
+ * Instead of fixing the algorithm (in which the work to fix it is
+ * tracked in https://github.com/dotnet/corert/issues/208), the
+ * following duplication detection algorithm will detect the case and
+ * refuse to devirtualize for those scenarios.
+ */
+ if (declMethod.OwningType.IsInterface)
+ {
+ DefType[] implTypeRuntimeInterfaces = implType.RuntimeInterfaces;
+ for (int i = 0; i < implTypeRuntimeInterfaces.Length; i++)
{
- DefType[] implTypeRuntimeInterfaces = implType.RuntimeInterfaces;
- for (int i = 0; i < implTypeRuntimeInterfaces.Length; i++)
+ for (int j = i + 1; j < implTypeRuntimeInterfaces.Length; j++)
{
- for (int j = i + 1; j < implTypeRuntimeInterfaces.Length; j++)
+ if (implTypeRuntimeInterfaces[i] == implTypeRuntimeInterfaces[j])
{
- if (implTypeRuntimeInterfaces[i] == implTypeRuntimeInterfaces[j])
- {
- return null;
- }
+ devirtualizationDetail = CORINFO_DEVIRTUALIZATION_DETAIL.CORINFO_DEVIRTUALIZATION_FAILED_DUPLICATE_INTERFACE;
+ return null;
}
}
}
+ }
+
- return base.ResolveVirtualMethod(declMethod, implType);
+ if (declMethod.OwningType.IsInterface)
+ {
+ // Check for ComImport class, as we don't support devirtualization of ComImport classes
+ // Run this check on all platforms, to avoid possible future versioning problems if we implement
+ // COM on other architectures.
+ if (!implType.IsObject)
+ {
+ TypeDesc typeThatDerivesFromObject = implType;
+ while(!typeThatDerivesFromObject.BaseType.IsObject)
+ {
+ typeThatDerivesFromObject = typeThatDerivesFromObject.BaseType;
+ }
+
+ if (typeThatDerivesFromObject is Internal.TypeSystem.Ecma.EcmaType ecmaType)
+ {
+ if ((ecmaType.Attributes & System.Reflection.TypeAttributes.Import) != 0)
+ {
+ devirtualizationDetail = CORINFO_DEVIRTUALIZATION_DETAIL.CORINFO_DEVIRTUALIZATION_FAILED_COM;
+ return null;
+ }
+ }
+ }
}
- // Cannot devirtualize across version bubble boundary
+ MethodDesc resolvedVirtualMethod = base.ResolveVirtualMethod(declMethod, implType, out devirtualizationDetail);
+
+ if (resolvedVirtualMethod != null)
+ {
+ // Validate that the inheritance chain for resolution is within version bubble
+ // The rule is somewhat tricky here.
+ // If the resolved method is the declMethod, then only types which derive from the
+ // OwningType of the decl method need to be within the version bubble.
+ //
+ // If not, then then all the types from the implType to the Owning type of the resolved
+ // virtual method must be within the version bubble.
+ if (firstTypeInImplTypeHierarchyNotInVersionBubble == null)
+ {
+ // The entire type hierarchy of the implType is within the version bubble, and there is no more to check
+ return resolvedVirtualMethod;
+ }
+
+ if (declMethod == resolvedVirtualMethod && firstTypeInImplTypeHierarchyNotInVersionBubble == declMethod.OwningType)
+ {
+ // Exact match for use of decl method check
+ return resolvedVirtualMethod;
+ }
+
+ // Ensure that declMethod is implemented on a type within the type hierarchy that is within the version bubble
+ for (TypeDesc typeExamine = resolvedVirtualMethod.OwningType; typeExamine != null; typeExamine = typeExamine.BaseType)
+ {
+ if (typeExamine == lastTypeInHierarchyInVersionBubble)
+ {
+ return resolvedVirtualMethod;
+ }
+ }
+ devirtualizationDetail = CORINFO_DEVIRTUALIZATION_DETAIL.CORINFO_DEVIRTUALIZATION_FAILED_BUBBLE;
+ }
+
+ // Cannot devirtualize, as we can't resolve to a target.
return null;
+
+ // This function returns the type where the metadata is not in the version bubble of the application, and has an out parameter
+ // which is the last type examined before that is found via a base type walk.
+ static TypeDesc FindVersionBubbleEdge(CompilationModuleGroup compilationModuleGroup, TypeDesc type, out TypeDesc lastTypeInVersionBubble)
+ {
+ lastTypeInVersionBubble = null;
+ while (compilationModuleGroup.VersionsWithType(type.GetTypeDefinition()))
+ {
+ lastTypeInVersionBubble = type;
+ type = type.BaseType;
+ if (type == null)
+ return null;
+ }
+ return type;
+ }
}
}
}
diff --git a/src/coreclr/tools/aot/ILCompiler.ReadyToRun/Compiler/DependencyAnalysis/ReadyToRun/InstrumentationDataTableNode.cs b/src/coreclr/tools/aot/ILCompiler.ReadyToRun/Compiler/DependencyAnalysis/ReadyToRun/InstrumentationDataTableNode.cs
index 5b4b2f5c7826da..90883bf0b073d4 100644
--- a/src/coreclr/tools/aot/ILCompiler.ReadyToRun/Compiler/DependencyAnalysis/ReadyToRun/InstrumentationDataTableNode.cs
+++ b/src/coreclr/tools/aot/ILCompiler.ReadyToRun/Compiler/DependencyAnalysis/ReadyToRun/InstrumentationDataTableNode.cs
@@ -93,7 +93,7 @@ private int TypeToInt(TypeSystemEntityOrUnknown type)
{
return computedInt;
}
- if (type.AsType != null && _compilationGroup.VersionsWithType(type.AsType))
+ if (type.AsType != null && _compilationGroup.VersionsWithTypeReference(type.AsType))
{
Import typeHandleImport = (Import)_symbolFactory.CreateReadyToRunHelper(ReadyToRunHelperId.TypeHandle, type.AsType);
_imports.Add(typeHandleImport);
diff --git a/src/coreclr/tools/aot/ILCompiler.ReadyToRun/Compiler/DependencyAnalysis/ReadyToRun/ModuleTokenResolver.cs b/src/coreclr/tools/aot/ILCompiler.ReadyToRun/Compiler/DependencyAnalysis/ReadyToRun/ModuleTokenResolver.cs
index 8ee9b426d5626c..86d507ab57af8d 100644
--- a/src/coreclr/tools/aot/ILCompiler.ReadyToRun/Compiler/DependencyAnalysis/ReadyToRun/ModuleTokenResolver.cs
+++ b/src/coreclr/tools/aot/ILCompiler.ReadyToRun/Compiler/DependencyAnalysis/ReadyToRun/ModuleTokenResolver.cs
@@ -139,7 +139,8 @@ public void AddModuleTokenForMethod(MethodDesc method, ModuleToken token)
{
MemberReference memberRef = token.MetadataReader.GetMemberReference((MemberReferenceHandle)token.Handle);
EntityHandle owningTypeHandle = memberRef.Parent;
- AddModuleTokenForType(method.OwningType, new ModuleToken(token.Module, owningTypeHandle));
+ TypeDesc owningType = (TypeDesc)token.Module.GetObject(owningTypeHandle, NotFoundBehavior.Throw);
+ AddModuleTokenForType(owningType, new ModuleToken(token.Module, owningTypeHandle));
memberRef.DecodeMethodSignature(new TokenResolverProvider(this, token.Module), this);
}
}
diff --git a/src/coreclr/tools/aot/ILCompiler.ReadyToRun/Compiler/DependencyAnalysis/ReadyToRun/RuntimeFunctionsTableNode.cs b/src/coreclr/tools/aot/ILCompiler.ReadyToRun/Compiler/DependencyAnalysis/ReadyToRun/RuntimeFunctionsTableNode.cs
index c2ab705ef5c2f6..f2d72fd9d4d2f1 100644
--- a/src/coreclr/tools/aot/ILCompiler.ReadyToRun/Compiler/DependencyAnalysis/ReadyToRun/RuntimeFunctionsTableNode.cs
+++ b/src/coreclr/tools/aot/ILCompiler.ReadyToRun/Compiler/DependencyAnalysis/ReadyToRun/RuntimeFunctionsTableNode.cs
@@ -67,6 +67,7 @@ public override ObjectData GetData(NodeFactory factory, bool relocsOnly = false)
LayoutRuntimeFunctions();
ObjectDataBuilder runtimeFunctionsBuilder = new ObjectDataBuilder(factory, relocsOnly);
+ runtimeFunctionsBuilder.RequireInitialAlignment(4);
// Add the symbol representing this object node
runtimeFunctionsBuilder.AddSymbol(this);
diff --git a/src/coreclr/tools/aot/ILCompiler.ReadyToRun/Compiler/DependencyAnalysis/ReadyToRun/SignatureBuilder.cs b/src/coreclr/tools/aot/ILCompiler.ReadyToRun/Compiler/DependencyAnalysis/ReadyToRun/SignatureBuilder.cs
index a91aecaa25e417..4f0fc3ddb21c4f 100644
--- a/src/coreclr/tools/aot/ILCompiler.ReadyToRun/Compiler/DependencyAnalysis/ReadyToRun/SignatureBuilder.cs
+++ b/src/coreclr/tools/aot/ILCompiler.ReadyToRun/Compiler/DependencyAnalysis/ReadyToRun/SignatureBuilder.cs
@@ -417,7 +417,7 @@ public void EmitMethodSignature(
flags |= (uint)ReadyToRunMethodSigFlags.READYTORUN_METHOD_SIG_OwnerType;
}
- EmitMethodSpecificationSignature(method, flags, enforceDefEncoding, context);
+ EmitMethodSpecificationSignature(method, flags, enforceDefEncoding, enforceOwningType, context);
if (method.ConstrainedType != null)
{
@@ -438,9 +438,10 @@ public void EmitMethodRefToken(ModuleToken memberRefToken)
}
private void EmitMethodSpecificationSignature(MethodWithToken method,
- uint flags, bool enforceDefEncoding, SignatureContext context)
+ uint flags, bool enforceDefEncoding, bool enforceOwningType, SignatureContext context)
{
ModuleToken methodToken = method.Token;
+
if (method.Method.HasInstantiation && !method.Method.IsGenericMethodDefinition)
{
flags |= (uint)ReadyToRunMethodSigFlags.READYTORUN_METHOD_SIG_MethodInstantiation;
@@ -469,7 +470,23 @@ private void EmitMethodSpecificationSignature(MethodWithToken method,
throw new NotImplementedException();
}
+ if ((method.Token.Module != context.LocalContext) && !enforceOwningType)
+ {
+ // If enforeOwningType is set, this is an entry for the InstanceEntryPoint or InstrumentationDataTable nodes
+ // which are not used in quite the same way, and for which the MethodDef is always matched to the module
+ // which defines the type
+ flags |= (uint)ReadyToRunMethodSigFlags.READYTORUN_METHOD_SIG_UpdateContext;
+ }
+
EmitUInt(flags);
+
+ if ((flags & (uint)ReadyToRunMethodSigFlags.READYTORUN_METHOD_SIG_UpdateContext) != 0)
+ {
+ uint moduleIndex = (uint)context.Resolver.GetModuleIndex(method.Token.Module);
+ EmitUInt(moduleIndex);
+ context = context.InnerContext(method.Token.Module);
+ }
+
if ((flags & (uint)ReadyToRunMethodSigFlags.READYTORUN_METHOD_SIG_OwnerType) != 0)
{
// The type here should be the type referred to by the memberref (if this is one, not the type where the method was eventually found!
diff --git a/src/coreclr/tools/aot/ILCompiler.ReadyToRun/Compiler/DependencyAnalysis/ReadyToRun/VirtualResolutionFixupSignature.cs b/src/coreclr/tools/aot/ILCompiler.ReadyToRun/Compiler/DependencyAnalysis/ReadyToRun/VirtualResolutionFixupSignature.cs
new file mode 100644
index 00000000000000..20a428e3931100
--- /dev/null
+++ b/src/coreclr/tools/aot/ILCompiler.ReadyToRun/Compiler/DependencyAnalysis/ReadyToRun/VirtualResolutionFixupSignature.cs
@@ -0,0 +1,104 @@
+// Licensed to the .NET Foundation under one or more agreements.
+// The .NET Foundation licenses this file to you under the MIT license.
+
+using System;
+using System.Diagnostics;
+
+using Internal.Text;
+using Internal.TypeSystem;
+using Internal.TypeSystem.Ecma;
+using Internal.TypeSystem.Interop;
+using Internal.ReadyToRunConstants;
+using Internal.CorConstants;
+using Internal.JitInterface;
+
+namespace ILCompiler.DependencyAnalysis.ReadyToRun
+{
+ public class VirtualResolutionFixupSignature : Signature, IEquatable
+ {
+ private readonly ReadyToRunFixupKind _fixupKind;
+
+ private readonly MethodWithToken _declMethod;
+ private readonly TypeDesc _implType;
+ private readonly MethodWithToken _implMethod;
+
+ public VirtualResolutionFixupSignature(ReadyToRunFixupKind fixupKind, MethodWithToken declMethod, TypeDesc implType, MethodWithToken implMethod)
+ {
+ _fixupKind = fixupKind;
+ _declMethod = declMethod;
+ _implType = implType;
+ _implMethod = implMethod;
+
+ // Ensure types in signature are loadable and resolvable, otherwise we'll fail later while emitting the signature
+ CompilerTypeSystemContext compilerContext = (CompilerTypeSystemContext)declMethod.Method.Context;
+ compilerContext.EnsureLoadableMethod(declMethod.Method);
+ compilerContext.EnsureLoadableType(implType);
+ if (implMethod != null)
+ compilerContext.EnsureLoadableMethod(implMethod.Method);
+ }
+
+ public override int ClassCode => 1092747257;
+
+ public override ObjectData GetData(NodeFactory factory, bool relocsOnly = false)
+ {
+ ObjectDataSignatureBuilder dataBuilder = new ObjectDataSignatureBuilder();
+
+ if (!relocsOnly)
+ {
+ dataBuilder.AddSymbol(this);
+
+ SignatureContext innerContext = dataBuilder.EmitFixup(factory, _fixupKind, _declMethod.Token.Module, factory.SignatureContext);
+ dataBuilder.EmitUInt((uint)(_implMethod != null ? ReadyToRunVirtualFunctionOverrideFlags.VirtualFunctionOverriden : ReadyToRunVirtualFunctionOverrideFlags.None));
+ dataBuilder.EmitMethodSignature(_declMethod, enforceDefEncoding: false, enforceOwningType: false, innerContext, isInstantiatingStub: false);
+ dataBuilder.EmitTypeSignature(_implType, innerContext);
+ if (_implMethod != null)
+ dataBuilder.EmitMethodSignature(_implMethod, enforceDefEncoding: false, enforceOwningType: false, innerContext, isInstantiatingStub: false);
+ }
+
+ return dataBuilder.ToObjectData();
+ }
+
+ public override void AppendMangledName(NameMangler nameMangler, Utf8StringBuilder sb)
+ {
+ sb.Append(nameMangler.CompilationUnitPrefix);
+ sb.Append($@"VirtualResolutionFixupSignature({_fixupKind.ToString()}): ");
+ _declMethod.AppendMangledName(nameMangler, sb);
+ sb.Append(":");
+ sb.Append(nameMangler.GetMangledTypeName(_implType));
+ sb.Append(":");
+ if (_implMethod == null)
+ sb.Append("(null)");
+ else
+ _implMethod.AppendMangledName(nameMangler, sb);
+ }
+
+ public override int CompareToImpl(ISortableNode other, CompilerComparer comparer)
+ {
+ VirtualResolutionFixupSignature otherNode = (VirtualResolutionFixupSignature)other;
+ int result = ((int)_fixupKind).CompareTo((int)otherNode._fixupKind);
+ if (result != 0)
+ return result;
+
+ result = comparer.Compare(_implType, otherNode._implType);
+ if (result != 0)
+ return result;
+
+ result = _declMethod.CompareTo(otherNode._declMethod, comparer);
+ if (result != 0)
+ return result;
+
+ // Handle null _implMethod scenario
+ if (_implMethod == otherNode._implMethod)
+ return 0;
+
+ return _implMethod.CompareTo(otherNode._implMethod, comparer);
+ }
+
+ public override string ToString()
+ {
+ return $"VirtualResolutionFixupSignature {_fixupKind} {_declMethod} {_implType} {_implMethod}";
+ }
+
+ public bool Equals(VirtualResolutionFixupSignature other) => object.ReferenceEquals(other, this);
+ }
+}
diff --git a/src/coreclr/tools/aot/ILCompiler.ReadyToRun/Compiler/DependencyAnalysis/ReadyToRunCodegenNodeFactory.cs b/src/coreclr/tools/aot/ILCompiler.ReadyToRun/Compiler/DependencyAnalysis/ReadyToRunCodegenNodeFactory.cs
index 1a577b32b86246..da9c4592b549a6 100644
--- a/src/coreclr/tools/aot/ILCompiler.ReadyToRun/Compiler/DependencyAnalysis/ReadyToRunCodegenNodeFactory.cs
+++ b/src/coreclr/tools/aot/ILCompiler.ReadyToRun/Compiler/DependencyAnalysis/ReadyToRunCodegenNodeFactory.cs
@@ -53,7 +53,7 @@ public sealed class NodeFactory
public TargetDetails Target { get; }
- public CompilationModuleGroup CompilationModuleGroup { get; }
+ public ReadyToRunCompilationModuleGroupBase CompilationModuleGroup { get; }
public ProfileDataManager ProfileDataManager { get; }
@@ -149,7 +149,7 @@ public override int GetHashCode()
public NodeFactory(
CompilerTypeSystemContext context,
- CompilationModuleGroup compilationModuleGroup,
+ ReadyToRunCompilationModuleGroupBase compilationModuleGroup,
ProfileDataManager profileDataManager,
NameMangler nameMangler,
CopiedCorHeaderNode corHeaderNode,
@@ -165,7 +165,7 @@ public NodeFactory(
MetadataManager = new ReadyToRunTableManager(context);
CopiedCorHeaderNode = corHeaderNode;
DebugDirectoryNode = debugDirectoryNode;
- Resolver = new ModuleTokenResolver(compilationModuleGroup, TypeSystemContext);
+ Resolver = compilationModuleGroup.Resolver;
Header = new GlobalHeaderNode(Target, flags);
if (!win32Resources.IsEmpty)
Win32ResourcesNode = new Win32ResourcesNode(win32Resources);
@@ -243,11 +243,16 @@ private void CreateNodeCaches()
return new TypeFixupSignature(key.FixupKind, key.TypeDesc);
});
+ _virtualResolutionSignatures = new NodeCache(key =>
+ {
+ return new ReadyToRun.VirtualResolutionFixupSignature(key.FixupKind, key.DeclMethod, key.ImplType, key.ImplMethod);
+ });
+
_dynamicHelperCellCache = new NodeCache(key =>
{
return new DelayLoadHelperMethodImport(
this,
- DispatchImports,
+ DispatchImports,
ReadyToRunHelper.DelayLoad_Helper_Obj,
key.Method,
useVirtualCall: false,
@@ -504,6 +509,50 @@ public TypeFixupSignature TypeSignature(ReadyToRunFixupKind fixupKind, TypeDesc
return _typeSignatures.GetOrAdd(fixupKey);
}
+ private struct VirtualResolutionFixupSignatureFixupKey : IEquatable
+ {
+ public readonly ReadyToRunFixupKind FixupKind;
+ public readonly MethodWithToken DeclMethod;
+ public readonly TypeDesc ImplType;
+ public readonly MethodWithToken ImplMethod;
+
+ public VirtualResolutionFixupSignatureFixupKey(ReadyToRunFixupKind fixupKind, MethodWithToken declMethod, TypeDesc implType, MethodWithToken implMethod)
+ {
+ FixupKind = fixupKind;
+ DeclMethod = declMethod;
+ ImplType = implType;
+ ImplMethod = implMethod;
+ }
+
+ public bool Equals(VirtualResolutionFixupSignatureFixupKey other)
+ {
+ return FixupKind == other.FixupKind && DeclMethod.Equals(other.DeclMethod) && ImplType == other.ImplType &&
+ ((ImplMethod == null && other.ImplMethod == null) || (ImplMethod != null && ImplMethod.Equals(other.ImplMethod)));
+ }
+
+ public override bool Equals(object obj)
+ {
+ return obj is VirtualResolutionFixupSignatureFixupKey other && Equals(other);
+ }
+
+ public override int GetHashCode()
+ {
+ if (ImplMethod != null)
+ return HashCode.Combine(FixupKind, DeclMethod, ImplType, ImplMethod);
+ else
+ return HashCode.Combine(FixupKind, DeclMethod, ImplType);
+ }
+
+ public override string ToString() => $"'{FixupKind}' '{DeclMethod}' on '{ImplType}' results in '{(ImplMethod != null ? ImplMethod.ToString() : "null")}'";
+ }
+
+ private NodeCache _virtualResolutionSignatures;
+
+ public VirtualResolutionFixupSignature VirtualResolutionFixupSignature(ReadyToRunFixupKind fixupKind, MethodWithToken declMethod, TypeDesc implType, MethodWithToken implMethod)
+ {
+ return _virtualResolutionSignatures.GetOrAdd(new VirtualResolutionFixupSignatureFixupKey(fixupKind, declMethod, implType, implMethod));
+ }
+
private struct ImportThunkKey : IEquatable
{
public readonly ReadyToRunHelper Helper;
diff --git a/src/coreclr/tools/aot/ILCompiler.ReadyToRun/Compiler/DependencyAnalysis/ReadyToRunSymbolNodeFactory.cs b/src/coreclr/tools/aot/ILCompiler.ReadyToRun/Compiler/DependencyAnalysis/ReadyToRunSymbolNodeFactory.cs
index a5f6bbeb7c9e53..38b1893f531628 100644
--- a/src/coreclr/tools/aot/ILCompiler.ReadyToRun/Compiler/DependencyAnalysis/ReadyToRunSymbolNodeFactory.cs
+++ b/src/coreclr/tools/aot/ILCompiler.ReadyToRun/Compiler/DependencyAnalysis/ReadyToRunSymbolNodeFactory.cs
@@ -135,6 +135,11 @@ private void CreateNodeCaches()
);
});
+ _virtualFunctionOverrideCache = new NodeCache(key =>
+ {
+ return new PrecodeHelperImport(_codegenNodeFactory, key);
+ });
+
_genericLookupHelpers = new NodeCache(key =>
{
return new DelayLoadHelperImport(
@@ -441,6 +446,15 @@ public ISymbolNode CheckTypeLayout(TypeDesc type)
return _checkTypeLayoutCache.GetOrAdd(type);
}
+ private NodeCache _virtualFunctionOverrideCache;
+
+ public ISymbolNode CheckVirtualFunctionOverride(MethodWithToken declMethod, TypeDesc implType, MethodWithToken implMethod)
+ {
+ return _virtualFunctionOverrideCache.GetOrAdd(_codegenNodeFactory.VirtualResolutionFixupSignature(
+ _verifyTypeAndFieldLayout ? ReadyToRunFixupKind.Verify_VirtualFunctionOverride : ReadyToRunFixupKind.Check_VirtualFunctionOverride,
+ declMethod, implType, implMethod));
+ }
+
struct MethodAndCallSite : IEquatable
{
public readonly MethodWithToken Method;
diff --git a/src/coreclr/tools/aot/ILCompiler.ReadyToRun/Compiler/NoMethodsCompilationModuleGroup.cs b/src/coreclr/tools/aot/ILCompiler.ReadyToRun/Compiler/NoMethodsCompilationModuleGroup.cs
index a98dd12ef5b27e..beaf854a71adc2 100644
--- a/src/coreclr/tools/aot/ILCompiler.ReadyToRun/Compiler/NoMethodsCompilationModuleGroup.cs
+++ b/src/coreclr/tools/aot/ILCompiler.ReadyToRun/Compiler/NoMethodsCompilationModuleGroup.cs
@@ -16,7 +16,7 @@ namespace ILCompiler
public class NoMethodsCompilationModuleGroup : ReadyToRunCompilationModuleGroupBase
{
public NoMethodsCompilationModuleGroup(
- TypeSystemContext context,
+ CompilerTypeSystemContext context,
bool isCompositeBuildMode,
bool isInputBubble,
IEnumerable compilationModuleSet,
diff --git a/src/coreclr/tools/aot/ILCompiler.ReadyToRun/Compiler/ReadyToRunCodegenCompilation.cs b/src/coreclr/tools/aot/ILCompiler.ReadyToRun/Compiler/ReadyToRunCodegenCompilation.cs
index 0573222b6791be..1dce9f8f67f538 100644
--- a/src/coreclr/tools/aot/ILCompiler.ReadyToRun/Compiler/ReadyToRunCodegenCompilation.cs
+++ b/src/coreclr/tools/aot/ILCompiler.ReadyToRun/Compiler/ReadyToRunCodegenCompilation.cs
@@ -123,9 +123,9 @@ public bool IsEffectivelySealed(MethodDesc method)
return _devirtualizationManager.IsEffectivelySealed(method);
}
- public MethodDesc ResolveVirtualMethod(MethodDesc declMethod, TypeDesc implType)
+ public MethodDesc ResolveVirtualMethod(MethodDesc declMethod, TypeDesc implType, out CORINFO_DEVIRTUALIZATION_DETAIL devirtualizationDetail)
{
- return _devirtualizationManager.ResolveVirtualMethod(declMethod, implType);
+ return _devirtualizationManager.ResolveVirtualMethod(declMethod, implType, out devirtualizationDetail);
}
public bool IsModuleInstrumented(ModuleDesc module)
diff --git a/src/coreclr/tools/aot/ILCompiler.ReadyToRun/Compiler/ReadyToRunCodegenCompilationBuilder.cs b/src/coreclr/tools/aot/ILCompiler.ReadyToRun/Compiler/ReadyToRunCodegenCompilationBuilder.cs
index de5ca06d1d1e8d..fa4d3182e9afb6 100644
--- a/src/coreclr/tools/aot/ILCompiler.ReadyToRun/Compiler/ReadyToRunCodegenCompilationBuilder.cs
+++ b/src/coreclr/tools/aot/ILCompiler.ReadyToRun/Compiler/ReadyToRunCodegenCompilationBuilder.cs
@@ -248,7 +248,7 @@ public override ICompilation ToCompilation()
NodeFactory factory = new NodeFactory(
_context,
- _compilationGroup,
+ (ReadyToRunCompilationModuleGroupBase)_compilationGroup,
_profileData,
_nameMangler,
corHeaderNode,
diff --git a/src/coreclr/tools/aot/ILCompiler.ReadyToRun/Compiler/ReadyToRunCompilationModuleGroupBase.cs b/src/coreclr/tools/aot/ILCompiler.ReadyToRun/Compiler/ReadyToRunCompilationModuleGroupBase.cs
index 92ef5346c770fd..08cdc64d09819d 100644
--- a/src/coreclr/tools/aot/ILCompiler.ReadyToRun/Compiler/ReadyToRunCompilationModuleGroupBase.cs
+++ b/src/coreclr/tools/aot/ILCompiler.ReadyToRun/Compiler/ReadyToRunCompilationModuleGroupBase.cs
@@ -23,12 +23,14 @@ public abstract class ReadyToRunCompilationModuleGroupBase : CompilationModuleGr
private readonly bool _isInputBubble;
private readonly ConcurrentDictionary _layoutCompilationUnits = new ConcurrentDictionary();
private readonly ConcurrentDictionary _versionsWithTypeCache = new ConcurrentDictionary();
+ private readonly ConcurrentDictionary _versionsWithTypeReferenceCache = new ConcurrentDictionary();
private readonly ConcurrentDictionary _versionsWithMethodCache = new ConcurrentDictionary();
private readonly Dictionary _moduleCompilationUnits = new Dictionary();
private CompilationUnitIndex _nextCompilationUnit = CompilationUnitIndex.FirstDynamicallyAssigned;
+ private ModuleTokenResolver _tokenResolver = null;
public ReadyToRunCompilationModuleGroupBase(
- TypeSystemContext context,
+ CompilerTypeSystemContext context,
bool isCompositeBuildMode,
bool isInputBubble,
IEnumerable compilationModuleSet,
@@ -45,6 +47,16 @@ public ReadyToRunCompilationModuleGroupBase(
_versionBubbleModuleSet.UnionWith(_compilationModuleSet);
_compileGenericDependenciesFromVersionBubbleModuleSet = compileGenericDependenciesFromVersionBubbleModuleSet;
+
+ _tokenResolver = new ModuleTokenResolver(this, context);
+ }
+
+ public ModuleTokenResolver Resolver => _tokenResolver;
+
+ public void AssociateTokenResolver(ModuleTokenResolver tokenResolver)
+ {
+ Debug.Assert(_tokenResolver == null);
+ _tokenResolver = tokenResolver;
}
public sealed override bool ContainsType(TypeDesc type)
@@ -292,6 +304,11 @@ public sealed override bool VersionsWithType(TypeDesc typeDesc)
_versionsWithTypeCache.GetOrAdd(typeDesc, ComputeTypeVersionsWithCode);
}
+ public sealed override bool VersionsWithTypeReference(TypeDesc typeDesc)
+ {
+ return _versionsWithTypeReferenceCache.GetOrAdd(typeDesc, ComputeTypeReferenceVersionsWithCode);
+ }
+
public sealed override bool VersionsWithMethodBody(MethodDesc method)
{
@@ -399,6 +416,74 @@ private bool ComputeTypeVersionsWithCode(TypeDesc type)
return ComputeInstantiationVersionsWithCode(type.Instantiation, type);
}
+ private bool ComputeTypeReferenceVersionsWithCode(TypeDesc type)
+ {
+ // Type represented by simple element type
+ if (type.IsPrimitive || type.IsVoid || type.IsObject || type.IsString)
+ return true;
+
+ if (VersionsWithType(type))
+ return true;
+
+ if (type.IsParameterizedType)
+ {
+ return VersionsWithTypeReference(type.GetParameterType());
+ }
+
+ if (type.IsFunctionPointer)
+ {
+ MethodSignature ptrSignature = ((FunctionPointerType)type).Signature;
+
+ if (!VersionsWithTypeReference(ptrSignature.ReturnType))
+ return false;
+
+ for (int i = 0; i < ptrSignature.Length; i++)
+ {
+ if (!VersionsWithTypeReference(ptrSignature[i]))
+ return false;
+ }
+ if (ptrSignature.HasEmbeddedSignatureData)
+ {
+ foreach (var embeddedSigData in ptrSignature.GetEmbeddedSignatureData())
+ {
+ if (embeddedSigData.type != null)
+ {
+ if (!VersionsWithTypeReference(embeddedSigData.type))
+ return false;
+ }
+ }
+ }
+ }
+
+ if (type is EcmaType ecmaType)
+ {
+ return !_tokenResolver.GetModuleTokenForType(ecmaType, false).IsNull;
+ }
+
+ if (type.GetTypeDefinition() == type)
+ {
+ // Must not be an ECMA type, which are the only form of simple type which cannot reach here
+ return false;
+ }
+
+ if (type.HasInstantiation)
+ {
+ if (!VersionsWithTypeReference(type.GetTypeDefinition()))
+ return false;
+
+ foreach (TypeDesc instParam in type.Instantiation)
+ {
+ if (!VersionsWithTypeReference(instParam))
+ return false;
+ }
+
+ return true;
+ }
+
+ Debug.Assert(false, "Unhandled form of type in VersionsWithTypeReference");
+ return false;
+ }
+
private bool ComputeInstantiationVersionsWithCode(Instantiation inst, TypeSystemEntity entityWithInstantiation)
{
for (int iInstantiation = 0; iInstantiation < inst.Length; iInstantiation++)
@@ -407,7 +492,7 @@ private bool ComputeInstantiationVersionsWithCode(Instantiation inst, TypeSystem
if (!ComputeInstantiationTypeVersionsWithCode(this, instType))
{
- if (instType.IsPrimitive)
+ if (instType.IsPrimitive || instType.IsObject || instType.IsString)
{
// Primitive type instantiations are only instantiated in the module of the generic defining type
// if the generic does not apply interface constraints to that type parameter, or if System.Private.CoreLib is part of the version bubble
@@ -423,8 +508,20 @@ private bool ComputeInstantiationVersionsWithCode(Instantiation inst, TypeSystem
}
GenericParameterDesc genericParam = (GenericParameterDesc)entityDefinitionInstantiation[iInstantiation];
- if (genericParam.HasReferenceTypeConstraint)
- return false;
+ if (instType.IsPrimitive)
+ {
+ if (genericParam.HasReferenceTypeConstraint)
+ return false;
+ }
+ else
+ {
+ Debug.Assert(instType.IsString || instType.IsObject);
+ if (genericParam.HasNotNullableValueTypeConstraint)
+ return false;
+
+ if (instType.IsString && genericParam.HasDefaultConstructorConstraint)
+ return false;
+ }
// This checks to see if the type constraints list is empty
if (genericParam.TypeConstraints.GetEnumerator().MoveNext())
diff --git a/src/coreclr/tools/aot/ILCompiler.ReadyToRun/Compiler/ReadyToRunSingleAssemblyCompilationModuleGroup.cs b/src/coreclr/tools/aot/ILCompiler.ReadyToRun/Compiler/ReadyToRunSingleAssemblyCompilationModuleGroup.cs
index 44c3da4633796d..f3c2bd46bc4563 100644
--- a/src/coreclr/tools/aot/ILCompiler.ReadyToRun/Compiler/ReadyToRunSingleAssemblyCompilationModuleGroup.cs
+++ b/src/coreclr/tools/aot/ILCompiler.ReadyToRun/Compiler/ReadyToRunSingleAssemblyCompilationModuleGroup.cs
@@ -17,7 +17,7 @@ public class ReadyToRunSingleAssemblyCompilationModuleGroup : ReadyToRunCompilat
private bool _profileGuidedCompileRestrictionSet;
public ReadyToRunSingleAssemblyCompilationModuleGroup(
- TypeSystemContext context,
+ CompilerTypeSystemContext context,
bool isCompositeBuildMode,
bool isInputBubble,
IEnumerable compilationModuleSet,
diff --git a/src/coreclr/tools/aot/ILCompiler.ReadyToRun/Compiler/SingleMethodCompilationModuleGroup.cs b/src/coreclr/tools/aot/ILCompiler.ReadyToRun/Compiler/SingleMethodCompilationModuleGroup.cs
index ee98f72e438717..d091b8f21af34b 100644
--- a/src/coreclr/tools/aot/ILCompiler.ReadyToRun/Compiler/SingleMethodCompilationModuleGroup.cs
+++ b/src/coreclr/tools/aot/ILCompiler.ReadyToRun/Compiler/SingleMethodCompilationModuleGroup.cs
@@ -17,7 +17,7 @@ public class SingleMethodCompilationModuleGroup : ReadyToRunCompilationModuleGro
private MethodDesc _method;
public SingleMethodCompilationModuleGroup(
- TypeSystemContext context,
+ CompilerTypeSystemContext context,
bool isCompositeBuildMode,
bool isInputBubble,
IEnumerable compilationModuleSet,
diff --git a/src/coreclr/tools/aot/ILCompiler.ReadyToRun/IBC/MIbcProfileParser.cs b/src/coreclr/tools/aot/ILCompiler.ReadyToRun/IBC/MIbcProfileParser.cs
index 22e54a8922adb8..07d8b0c53d0f7f 100644
--- a/src/coreclr/tools/aot/ILCompiler.ReadyToRun/IBC/MIbcProfileParser.cs
+++ b/src/coreclr/tools/aot/ILCompiler.ReadyToRun/IBC/MIbcProfileParser.cs
@@ -35,7 +35,7 @@ TypeSystemEntityOrUnknown IPgoSchemaDataLoader.TypeFr
try
{
if (token == 0)
- return new TypeSystemEntityOrUnknown(0);
+ return new TypeSystemEntityOrUnknown(null);
if ((token & 0xFF000000) == 0)
{
// token type is 0, therefore it can't be a type
diff --git a/src/coreclr/tools/aot/ILCompiler.ReadyToRun/ILCompiler.ReadyToRun.csproj b/src/coreclr/tools/aot/ILCompiler.ReadyToRun/ILCompiler.ReadyToRun.csproj
index bedb1ce62e3a8f..c1a8ea5d772129 100644
--- a/src/coreclr/tools/aot/ILCompiler.ReadyToRun/ILCompiler.ReadyToRun.csproj
+++ b/src/coreclr/tools/aot/ILCompiler.ReadyToRun/ILCompiler.ReadyToRun.csproj
@@ -181,6 +181,7 @@
+
diff --git a/src/coreclr/tools/aot/ILCompiler.ReadyToRun/JitInterface/CorInfoImpl.ReadyToRun.cs b/src/coreclr/tools/aot/ILCompiler.ReadyToRun/JitInterface/CorInfoImpl.ReadyToRun.cs
index 0119522f0af4e4..e3da2dc734fef5 100644
--- a/src/coreclr/tools/aot/ILCompiler.ReadyToRun/JitInterface/CorInfoImpl.ReadyToRun.cs
+++ b/src/coreclr/tools/aot/ILCompiler.ReadyToRun/JitInterface/CorInfoImpl.ReadyToRun.cs
@@ -23,7 +23,7 @@
using ILCompiler;
using ILCompiler.DependencyAnalysis;
using ILCompiler.DependencyAnalysis.ReadyToRun;
-
+using System.Text;
namespace Internal.JitInterface
{
@@ -221,6 +221,9 @@ public override int GetHashCode()
public bool Equals(MethodWithToken methodWithToken)
{
+ if (methodWithToken == null)
+ return false;
+
bool equals = Method == methodWithToken.Method && Token.Equals(methodWithToken.Token)
&& OwningType == methodWithToken.OwningType && ConstrainedType == methodWithToken.ConstrainedType
&& Unboxing == methodWithToken.Unboxing;
@@ -247,6 +250,24 @@ public void AppendMangledName(NameMangler nameMangler, Utf8StringBuilder sb)
sb.Append("; UNBOXING");
}
+ public override string ToString()
+ {
+ StringBuilder debuggingName = new StringBuilder();
+ debuggingName.Append(Method.ToString());
+ if (ConstrainedType != null)
+ {
+ debuggingName.Append(" @ ");
+ debuggingName.Append(ConstrainedType.ToString());
+ }
+
+ debuggingName.Append("; ");
+ debuggingName.Append(Token.ToString());
+ if (Unboxing)
+ debuggingName.Append("; UNBOXING");
+
+ return debuggingName.ToString();
+ }
+
public int CompareTo(MethodWithToken other, TypeSystemComparer comparer)
{
int result;
@@ -918,7 +939,9 @@ private MethodWithToken ComputeMethodWithToken(MethodDesc method, ref CORINFO_RE
private ModuleToken HandleToModuleToken(ref CORINFO_RESOLVED_TOKEN pResolvedToken, MethodDesc methodDesc, out object context, ref TypeDesc constrainedType)
{
- if (methodDesc != null && (_compilation.NodeFactory.CompilationModuleGroup.VersionsWithMethodBody(methodDesc) || methodDesc.IsPInvoke))
+ if (methodDesc != null && (_compilation.NodeFactory.CompilationModuleGroup.VersionsWithMethodBody(methodDesc)
+ || (pResolvedToken.tokenType == CorInfoTokenKind.CORINFO_TOKENKIND_DevirtualizedMethod)
+ || methodDesc.IsPInvoke))
{
if ((CorTokenType)(unchecked((uint)pResolvedToken.token) & 0xFF000000u) == CorTokenType.mdtMethodDef &&
methodDesc?.GetTypicalMethodDefinition() is EcmaMethod ecmaMethod)
@@ -954,8 +977,8 @@ private ModuleToken HandleToModuleToken(ref CORINFO_RESOLVED_TOKEN pResolvedToke
// within the current version bubble**, but this happens to be good enough because
// we only do this replacement within CoreLib to replace method bodies in places
// that we cannot express in C# right now and for p/invokes in large version bubbles).
- MethodIL methodILDef = methodIL.GetMethodILDefinition();
- bool isFauxMethodIL = !(methodILDef is EcmaMethodIL);
+ MethodILScope methodILDef = methodIL.GetMethodILScopeDefinition();
+ bool isFauxMethodIL = !(methodILDef is IEcmaMethodIL);
if (isFauxMethodIL)
{
object resultDef = methodILDef.GetObject((int)pResolvedToken.token);
@@ -1015,10 +1038,10 @@ private ModuleToken HandleToModuleToken(ref CORINFO_RESOLVED_TOKEN pResolvedToke
private InfoAccessType constructStringLiteral(CORINFO_MODULE_STRUCT_* module, mdToken metaTok, ref void* ppValue)
{
- MethodIL methodIL = HandleToObject(module);
+ MethodILScope methodIL = HandleToObject(module);
// If this is not a MethodIL backed by a physical method body, we need to remap the token.
- Debug.Assert(methodIL.GetMethodILDefinition() is EcmaMethodIL);
+ Debug.Assert(methodIL.GetMethodILScopeDefinition() is EcmaMethodIL);
EcmaMethod method = (EcmaMethod)methodIL.OwningMethod.GetTypicalMethodDefinition();
ISymbolNode stringObject = _compilation.SymbolNodeFactory.StringLiteral(
@@ -1523,7 +1546,7 @@ private void ceeInfoGetCallInfo(
exactType == MethodBeingCompiled.OwningType)
{
var methodIL = HandleToObject(pResolvedToken.tokenScope);
- var rawMethod = (MethodDesc)methodIL.GetMethodILDefinition().GetObject((int)pResolvedToken.token);
+ var rawMethod = (MethodDesc)methodIL.GetMethodILScopeDefinition().GetObject((int)pResolvedToken.token);
if (IsTypeSpecForTypicalInstantiation(rawMethod.OwningType))
{
pResult->contextHandle = contextFromMethodBeingCompiled();
diff --git a/src/coreclr/tools/aot/ILCompiler.ReadyToRun/ObjectWriter/R2RPEBuilder.cs b/src/coreclr/tools/aot/ILCompiler.ReadyToRun/ObjectWriter/R2RPEBuilder.cs
index 3f6764b0e804ba..c7212368ace07a 100644
--- a/src/coreclr/tools/aot/ILCompiler.ReadyToRun/ObjectWriter/R2RPEBuilder.cs
+++ b/src/coreclr/tools/aot/ILCompiler.ReadyToRun/ObjectWriter/R2RPEBuilder.cs
@@ -489,7 +489,7 @@ private void SetPEHeaderTimeStamp(Stream outputStream, int timeDateStamp)
DosHeaderSize +
PESignatureSize +
sizeof(short) + // Machine
- sizeof(short); //NumberOfSections
+ sizeof(short); // NumberOfSections
outputStream.Seek(seekSize, SeekOrigin.Begin);
outputStream.Write(patchedTimestamp, 0, patchedTimestamp.Length);
@@ -664,52 +664,28 @@ protected override BlobBuilder SerializeSection(string name, SectionLocation loc
}
///
- /// Simple helper for filling in PE header information by either copying over
- /// data from a pre-existing input PE header (used for single-assembly R2R files)
- /// or by explicitly specifying the image characteristics (for composite R2R).
+ /// Simple helper for filling in PE header information.
///
static class PEHeaderProvider
{
- ///
- /// Copy PE headers into a PEHeaderBuilder used by PEBuilder.
- ///
- /// Headers to copy
- /// Target architecture to set in the header
- public static PEHeaderBuilder Copy(PEHeaders peHeaders, TargetDetails target)
- {
- return Create(
- peHeaders.CoffHeader.Characteristics,
- peHeaders.PEHeader.DllCharacteristics,
- peHeaders.PEHeader.Subsystem,
- target);
- }
-
///
/// Fill in PE header information into a PEHeaderBuilder used by PEBuilder.
///
- /// Relocs are not present in the PE executable
- /// Extra DLL characteristics to apply
/// Targeting subsystem
/// Target architecture to set in the header
- public static PEHeaderBuilder Create(Characteristics imageCharacteristics, DllCharacteristics dllCharacteristics, Subsystem subsystem, TargetDetails target)
+ public static PEHeaderBuilder Create(Subsystem subsystem, TargetDetails target)
{
bool is64BitTarget = target.PointerSize == sizeof(long);
- imageCharacteristics &= ~(Characteristics.Bit32Machine | Characteristics.LargeAddressAware);
- imageCharacteristics |= (is64BitTarget ? Characteristics.LargeAddressAware : Characteristics.Bit32Machine);
+ Characteristics imageCharacteristics = Characteristics.ExecutableImage | Characteristics.Dll;
+ imageCharacteristics |= is64BitTarget ? Characteristics.LargeAddressAware : Characteristics.Bit32Machine;
- ulong imageBase = PE32HeaderConstants.ImageBase;
- if (target.IsWindows && is64BitTarget && (imageBase <= uint.MaxValue))
- {
- // Base addresses below 4 GiB are reserved for WoW on x64 and disallowed on ARM64.
- // If the input assembly was compiled for anycpu, its base address is 32-bit and we need to fix it.
- imageBase = (imageCharacteristics & Characteristics.Dll) != 0 ? PE64HeaderConstants.DllImageBase : PE64HeaderConstants.ExeImageBase;
- }
+ ulong imageBase = is64BitTarget ? PE64HeaderConstants.DllImageBase : PE32HeaderConstants.ImageBase;
int fileAlignment = 0x200;
if (!target.IsWindows && !is64BitTarget)
{
- // To minimize wasted VA space on 32 bit systems align file to page bounaries (presumed to be 4K).
+ // To minimize wasted VA space on 32-bit systems, align file to page boundaries (presumed to be 4K)
fileAlignment = 0x1000;
}
@@ -721,13 +697,11 @@ public static PEHeaderBuilder Create(Characteristics imageCharacteristics, DllCh
sectionAlignment = fileAlignment;
}
- dllCharacteristics &= DllCharacteristics.AppContainer;
-
- // In Crossgen1, this is under a debug-specific condition 'if (0 == CLRConfig::GetConfigValue(CLRConfig::INTERNAL_NoASLRForNgen))'
- dllCharacteristics |= DllCharacteristics.DynamicBase;
-
// Without NxCompatible the PE executable cannot execute on Windows ARM64
- dllCharacteristics |= DllCharacteristics.NxCompatible | DllCharacteristics.TerminalServerAware;
+ DllCharacteristics dllCharacteristics =
+ DllCharacteristics.DynamicBase |
+ DllCharacteristics.NxCompatible |
+ DllCharacteristics.TerminalServerAware;
if (is64BitTarget)
{
diff --git a/src/coreclr/tools/aot/ILCompiler.Reflection.ReadyToRun/ILCompiler.Reflection.ReadyToRun.csproj b/src/coreclr/tools/aot/ILCompiler.Reflection.ReadyToRun/ILCompiler.Reflection.ReadyToRun.csproj
index 6259c0639e36db..1424375efc4e33 100644
--- a/src/coreclr/tools/aot/ILCompiler.Reflection.ReadyToRun/ILCompiler.Reflection.ReadyToRun.csproj
+++ b/src/coreclr/tools/aot/ILCompiler.Reflection.ReadyToRun/ILCompiler.Reflection.ReadyToRun.csproj
@@ -14,7 +14,6 @@
$(RuntimeBinDir)
AnyCPU;x64
AnyCPU
- 7.3
diff --git a/src/coreclr/tools/aot/ILCompiler.Reflection.ReadyToRun/ReadyToRunMethod.cs b/src/coreclr/tools/aot/ILCompiler.Reflection.ReadyToRun/ReadyToRunMethod.cs
index 18ca5eb3b0816f..9032ef27421797 100644
--- a/src/coreclr/tools/aot/ILCompiler.Reflection.ReadyToRun/ReadyToRunMethod.cs
+++ b/src/coreclr/tools/aot/ILCompiler.Reflection.ReadyToRun/ReadyToRunMethod.cs
@@ -305,7 +305,7 @@ private void EnsureRuntimeFunctions()
if (this._runtimeFunctions == null)
{
this._runtimeFunctions = new List();
- this.ParseRuntimeFunctions();
+ this.ParseRuntimeFunctions(false);
}
}
@@ -474,17 +474,21 @@ public ReadyToRunMethod(
private void EnsureInitialized()
{
- if (_gcInfo == null && GcInfoRva != 0)
+ if (_gcInfo == null)
{
- int gcInfoOffset = _readyToRunReader.CompositeReader.GetOffset(GcInfoRva);
- if (_readyToRunReader.Machine == Machine.I386)
- {
- _gcInfo = new x86.GcInfo(_readyToRunReader.Image, gcInfoOffset, _readyToRunReader.Machine, _readyToRunReader.ReadyToRunHeader.MajorVersion);
- }
- else
+ ParseRuntimeFunctions(true);
+ if (GcInfoRva != 0)
{
- // Arm and Arm64 use the same GcInfo format as Amd64
- _gcInfo = new Amd64.GcInfo(_readyToRunReader.Image, gcInfoOffset, _readyToRunReader.Machine, _readyToRunReader.ReadyToRunHeader.MajorVersion);
+ int gcInfoOffset = _readyToRunReader.CompositeReader.GetOffset(GcInfoRva);
+ if (_readyToRunReader.Machine == Machine.I386)
+ {
+ _gcInfo = new x86.GcInfo(_readyToRunReader.Image, gcInfoOffset, _readyToRunReader.Machine, _readyToRunReader.ReadyToRunHeader.MajorVersion);
+ }
+ else
+ {
+ // Arm and Arm64 use the same GcInfo format as Amd64
+ _gcInfo = new Amd64.GcInfo(_readyToRunReader.Image, gcInfoOffset, _readyToRunReader.Machine, _readyToRunReader.ReadyToRunHeader.MajorVersion);
+ }
}
}
if (_pgoInfo == null)
@@ -547,7 +551,7 @@ private void EnsureFixupCells()
/// Get the RVAs of the runtime functions for each method
/// based on ZapUnwindInfo::Save
///
- private void ParseRuntimeFunctions()
+ private void ParseRuntimeFunctions(bool partial)
{
int runtimeFunctionId = EntryPointRuntimeFunctionId;
int runtimeFunctionSize = _readyToRunReader.CalculateRuntimeFunctionSize();
@@ -602,6 +606,11 @@ private void ParseRuntimeFunctions()
}
}
+ if (partial)
+ {
+ return;
+ }
+
RuntimeFunction rtf = new RuntimeFunction(
_readyToRunReader,
runtimeFunctionId,
diff --git a/src/coreclr/tools/aot/ILCompiler.Reflection.ReadyToRun/ReadyToRunSignature.cs b/src/coreclr/tools/aot/ILCompiler.Reflection.ReadyToRun/ReadyToRunSignature.cs
index 91a1da0cdb06f7..a71ad97519f5c3 100644
--- a/src/coreclr/tools/aot/ILCompiler.Reflection.ReadyToRun/ReadyToRunSignature.cs
+++ b/src/coreclr/tools/aot/ILCompiler.Reflection.ReadyToRun/ReadyToRunSignature.cs
@@ -457,14 +457,16 @@ protected void UpdateOffset(int offset)
///
/// R2RReader object representing the PE file containing the ECMA metadata
/// Signature offset within the PE file byte array
- public R2RSignatureDecoder(IR2RSignatureTypeProvider provider, TGenericContext context, MetadataReader metadataReader, ReadyToRunReader r2rReader, int offset)
+ public R2RSignatureDecoder(IR2RSignatureTypeProvider provider, TGenericContext context, MetadataReader metadataReader, ReadyToRunReader r2rReader, int offset, bool skipOverrideMetadataReader = false)
{
Context = context;
_provider = provider;
_image = r2rReader.Image;
_originalOffset = _offset = offset;
_contextReader = r2rReader;
- MetadataReader moduleOverrideMetadataReader = TryGetModuleOverrideMetadataReader();
+ MetadataReader moduleOverrideMetadataReader = null;
+ if (!skipOverrideMetadataReader)
+ moduleOverrideMetadataReader = TryGetModuleOverrideMetadataReader();
_metadataReader = moduleOverrideMetadataReader ?? metadataReader;
_outerReader = moduleOverrideMetadataReader ?? metadataReader;
Reset();
@@ -479,14 +481,16 @@ public R2RSignatureDecoder(IR2RSignatureTypeProviderSignature offset within the signature byte array
/// Metadata reader representing the outer signature context
/// Top-level signature context reader
- public R2RSignatureDecoder(IR2RSignatureTypeProvider provider, TGenericContext context, MetadataReader metadataReader, byte[] signature, int offset, MetadataReader outerReader, ReadyToRunReader contextReader)
+ public R2RSignatureDecoder(IR2RSignatureTypeProvider provider, TGenericContext context, MetadataReader metadataReader, byte[] signature, int offset, MetadataReader outerReader, ReadyToRunReader contextReader, bool skipOverrideMetadataReader = false)
{
Context = context;
_provider = provider;
_image = signature;
_originalOffset = _offset = offset;
_contextReader = contextReader;
- MetadataReader moduleOverrideMetadataReader = TryGetModuleOverrideMetadataReader();
+ MetadataReader moduleOverrideMetadataReader = null;
+ if (!skipOverrideMetadataReader)
+ moduleOverrideMetadataReader = TryGetModuleOverrideMetadataReader();
_metadataReader = moduleOverrideMetadataReader ?? metadataReader;
_outerReader = moduleOverrideMetadataReader ?? outerReader;
Reset();
@@ -805,6 +809,24 @@ public TMethod ParseMethod()
{
uint methodFlags = ReadUInt();
+ if ((methodFlags & (uint)ReadyToRunMethodSigFlags.READYTORUN_METHOD_SIG_UpdateContext) != 0)
+ {
+ int moduleIndex = (int)ReadUInt();
+ IAssemblyMetadata refAsmReader = _contextReader.OpenReferenceAssembly(moduleIndex);
+ var refAsmDecoder = new R2RSignatureDecoder(_provider, Context, refAsmReader.MetadataReader, _image, _offset, _outerReader, _contextReader, skipOverrideMetadataReader: true);
+ var result = refAsmDecoder.ParseMethodWithMethodFlags(methodFlags);
+ _offset = refAsmDecoder.Offset;
+ return result;
+ }
+ else
+ {
+ return ParseMethodWithMethodFlags(methodFlags);
+ }
+ }
+
+
+ private TMethod ParseMethodWithMethodFlags(uint methodFlags)
+ {
TType owningTypeOverride = default(TType);
if ((methodFlags & (uint)ReadyToRunMethodSigFlags.READYTORUN_METHOD_SIG_OwnerType) != 0)
{
@@ -1348,6 +1370,28 @@ private ReadyToRunSignature ParseSignature(ReadyToRunFixupKind fixupType, String
builder.Append(" (VERIFY_TYPE_LAYOUT)");
break;
+ case ReadyToRunFixupKind.Check_VirtualFunctionOverride:
+ case ReadyToRunFixupKind.Verify_VirtualFunctionOverride:
+ ReadyToRunVirtualFunctionOverrideFlags flags = (ReadyToRunVirtualFunctionOverrideFlags)ReadUInt();
+ ParseMethod(builder);
+ builder.Append($" ImplType :");
+ ParseType(builder);
+ if (flags.HasFlag(ReadyToRunVirtualFunctionOverrideFlags.VirtualFunctionOverriden))
+ {
+ builder.Append($" ImplMethod :");
+ ParseMethod(builder);
+ }
+ else
+ {
+ builder.Append("Not Overriden");
+ }
+
+ if (fixupType == ReadyToRunFixupKind.Check_TypeLayout)
+ builder.Append(" (CHECK_VIRTUAL_FUNCTION_OVERRIDE)");
+ else
+ builder.Append(" (VERIFY_VIRTUAL_FUNCTION_OVERRIDE)");
+ break;
+
case ReadyToRunFixupKind.Check_FieldOffset:
builder.Append($"{ReadUInt()} ");
ParseField(builder);
diff --git a/src/coreclr/tools/aot/ILCompiler.TypeSystem.ReadyToRun.Tests/InstanceFieldLayoutTests.cs b/src/coreclr/tools/aot/ILCompiler.TypeSystem.ReadyToRun.Tests/InstanceFieldLayoutTests.cs
index 03b5b160fdaf87..e92749b5ab272d 100644
--- a/src/coreclr/tools/aot/ILCompiler.TypeSystem.ReadyToRun.Tests/InstanceFieldLayoutTests.cs
+++ b/src/coreclr/tools/aot/ILCompiler.TypeSystem.ReadyToRun.Tests/InstanceFieldLayoutTests.cs
@@ -96,12 +96,12 @@ public void TestExplicitTypeLayoutWithInheritance()
if (f.Name == "Lol")
{
// First field after base class, with offset 0 so it should lie on the byte count of
- // the base class = 24
+ // the base class = 20
Assert.Equal(20, f.Offset.AsInt);
}
else if (f.Name == "Omg")
{
- // Offset 20 from base class byte count = 44
+ // Offset 20 from base class byte count = 40
Assert.Equal(40, f.Offset.AsInt);
}
else
diff --git a/src/coreclr/tools/aot/ILCompiler.TypeSystem.ReadyToRun.Tests/MarshalUtilsTests.cs b/src/coreclr/tools/aot/ILCompiler.TypeSystem.ReadyToRun.Tests/MarshalUtilsTests.cs
index c3befa87af72d4..3523f5d62f8e14 100644
--- a/src/coreclr/tools/aot/ILCompiler.TypeSystem.ReadyToRun.Tests/MarshalUtilsTests.cs
+++ b/src/coreclr/tools/aot/ILCompiler.TypeSystem.ReadyToRun.Tests/MarshalUtilsTests.cs
@@ -74,10 +74,10 @@ public void IsBlittableType_TypeWithBlittableBase_ReturnsTrue(string className)
[InlineData("ClassWithExplicitEmptyBase")]
[InlineData("ClassWithExplicitEmptySizeZeroBase")]
[InlineData("ClassWithSequentialEmptyBase")]
- public void IsBlittableType_TypeWithEmptyBase_ReturnsFalse(string className)
+ public void IsBlittableType_TypeWithEmptyBase_ReturnsTrue(string className)
{
TypeDesc classWithEmptyBase = _testModule.GetType("Marshalling", className);
- Assert.False(MarshalUtils.IsBlittableType(classWithEmptyBase));
+ Assert.True(MarshalUtils.IsBlittableType(classWithEmptyBase));
}
}
}
diff --git a/src/coreclr/tools/dotnet-pgo/Program.cs b/src/coreclr/tools/dotnet-pgo/Program.cs
index b8828d7df6fb6c..00ee6f87121366 100644
--- a/src/coreclr/tools/dotnet-pgo/Program.cs
+++ b/src/coreclr/tools/dotnet-pgo/Program.cs
@@ -396,28 +396,31 @@ static int InnerCompareMibcMain(CommandLineOptions options)
FileInfo file1 = options.CompareMibc[0];
FileInfo file2 = options.CompareMibc[1];
+ // Look for the shortest unique names for the input files.
+ string name1 = file1.Name;
+ string name2 = file2.Name;
+ string path1 = Path.GetDirectoryName(file1.FullName);
+ string path2 = Path.GetDirectoryName(file2.FullName);
+ while (name1 == name2)
+ {
+ name1 = Path.Combine(Path.GetFileName(path1), name1);
+ name2 = Path.Combine(Path.GetFileName(path2), name2);
+ path1 = Path.GetDirectoryName(path1);
+ path2 = Path.GetDirectoryName(path2);
+ }
+
PEReader mibc1 = MIbcProfileParser.OpenMibcAsPEReader(file1.FullName);
PEReader mibc2 = MIbcProfileParser.OpenMibcAsPEReader(file2.FullName);
var tsc = new TypeRefTypeSystem.TypeRefTypeSystemContext(new PEReader[] { mibc1, mibc2 });
ProfileData profile1 = MIbcProfileParser.ParseMIbcFile(tsc, mibc1, null, onlyDefinedInAssembly: null);
ProfileData profile2 = MIbcProfileParser.ParseMIbcFile(tsc, mibc2, null, onlyDefinedInAssembly: null);
- PrintOutput($"Comparing {file1.Name} to {file2.Name}");
- PrintOutput($"Statistics for {file1.Name}");
- PrintStats(profile1, file1.Name, profile2, file2.Name);
+ PrintOutput($"Comparing {name1} to {name2}");
+ PrintOutput($"Statistics for {name1}");
+ PrintMibcStats(profile1);
PrintOutput("");
- PrintOutput($"Statistics for {file2.Name}");
- PrintStats(profile2, file2.Name, profile1, file1.Name);
-
- static void PrintStats(ProfileData self, string selfName, ProfileData other, string otherName)
- {
- var methods = self.GetAllMethodProfileData().ToList();
- var profiledMethods = methods.Where(spd => spd.SchemaData != null).ToList();
- var otherMethods = other.GetAllMethodProfileData().ToList();
- var otherProfiledMethods = otherMethods.Where(spd => spd.SchemaData != null).ToList();
- PrintMibcStats(self);
- PrintOutput($"# Profiled methods in {selfName} not in {otherName}: {profiledMethods.Select(m => m.Method).Except(otherProfiledMethods.Select(m => m.Method)).Count()}");
- }
+ PrintOutput($"Statistics for {name2}");
+ PrintMibcStats(profile2);
PrintOutput("");
PrintOutput("Comparison");
@@ -426,6 +429,8 @@ static void PrintStats(ProfileData self, string selfName, ProfileData other, str
var profiledMethods1 = methods1.Where(m => m.SchemaData != null).ToList();
var profiledMethods2 = methods2.Where(m => m.SchemaData != null).ToList();
+ PrintOutput($"# Profiled methods in {name1} not in {name2}: {profiledMethods1.Select(m => m.Method).Except(profiledMethods2.Select(m => m.Method)).Count()}");
+ PrintOutput($"# Profiled methods in {name2} not in {name1}: {profiledMethods2.Select(m => m.Method).Except(profiledMethods1.Select(m => m.Method)).Count()}");
PrintOutput($"# Methods with profile data in both .mibc files: {profiledMethods1.Select(m => m.Method).Intersect(profiledMethods2.Select(m => m.Method)).Count()}");
var fgMatches = new List<(MethodProfileData prof1, MethodProfileData prof2)>();
var fgMismatches = new List<(MethodProfileData prof1, MethodProfileData prof2, List mismatches)>();
@@ -436,16 +441,6 @@ static void PrintStats(ProfileData self, string selfName, ProfileData other, str
if (prof2?.SchemaData == null)
continue;
- Dictionary GroupBlocks(MethodProfileData data)
- => data.SchemaData
- .Where(e => e.InstrumentationKind == PgoInstrumentationKind.BasicBlockIntCount || e.InstrumentationKind == PgoInstrumentationKind.BasicBlockLongCount)
- .ToDictionary(e => e.ILOffset);
-
- Dictionary<(int, int), PgoSchemaElem> GroupEdges(MethodProfileData data)
- => data.SchemaData
- .Where(e => e.InstrumentationKind == PgoInstrumentationKind.EdgeIntCount || e.InstrumentationKind == PgoInstrumentationKind.EdgeLongCount)
- .ToDictionary(e => (e.ILOffset, e.Other));
-
var (blocks1, blocks2) = (GroupBlocks(prof1), GroupBlocks(prof2));
var (edges1, edges2) = (GroupEdges(prof1), GroupEdges(prof2));
@@ -456,9 +451,9 @@ Dictionary GroupBlocks(MethodProfileData data)
var in2 = blocks2.Keys.Where(k => !blocks1.ContainsKey(k)).ToList();
foreach (var m1 in in1)
- mismatches.Add($"{file1.Name} has a block at {m1:x} not present in {file2.Name}");
+ mismatches.Add($"{name1} has a block at {m1:x} not present in {name2}");
foreach (var m2 in in2)
- mismatches.Add($"{file2.Name} has a block at {m2:x} not present in {file1.Name}");
+ mismatches.Add($"{name2} has a block at {m2:x} not present in {name1}");
}
if (edges1.Count > 0 && edges2.Count > 0)
@@ -467,9 +462,9 @@ Dictionary GroupBlocks(MethodProfileData data)
var in2 = edges2.Keys.Where(k => !edges1.ContainsKey(k)).ToList();
foreach (var (from, to) in in1)
- mismatches.Add($"{file1.Name} has an edge {from:x}->{to:x} not present in {file2.Name}");
+ mismatches.Add($"{name1} has an edge {from:x}->{to:x} not present in {name2}");
foreach (var (from, to) in in2)
- mismatches.Add($"{file2.Name} has an edge {from:x}->{to:x} not present in {file1.Name}");
+ mismatches.Add($"{name2} has an edge {from:x}->{to:x} not present in {name1}");
}
if (mismatches.Count > 0)
@@ -495,10 +490,10 @@ Dictionary GroupBlocks(MethodProfileData data)
if (fgMatches.Count > 0)
{
PrintOutput("");
- PrintOutput($"When comparing the flow-graphs of the matching methods, their overlaps break down as follows:");
+ PrintOutput($"Comparing methods with matching flow-graphs");
- List blockOverlaps = new List();
- List edgeOverlaps = new List();
+ var blockOverlaps = new List<(MethodDesc Method, double Overlap)>();
+ var edgeOverlaps = new List<(MethodDesc Method, double Overlap)>();
foreach ((MethodProfileData prof1, MethodProfileData prof2) in fgMatches)
{
@@ -512,6 +507,12 @@ double Overlap(Dictionary left, Dictionary right.ContainsKey(k)));
Debug.Assert(right.Keys.All(k => left.ContainsKey(k)));
+ if (leftTotal == 0 && rightTotal == 0)
+ return 1;
+
+ if (leftTotal == 0 || rightTotal == 0)
+ return 0;
+
var leftPW = left.ToDictionary(k => k.Key, k => k.Value.DataLong / (double)leftTotal);
var rightPW = right.ToDictionary(k => k.Key, k => k.Value.DataLong / (double)rightTotal);
@@ -519,26 +520,27 @@ double Overlap(Dictionary left, Dictionary 0 && blocks2.Count > 0)
- blockOverlaps.Add(Overlap(blocks1, blocks2));
+ blockOverlaps.Add((prof1.Method, Overlap(blocks1, blocks2)));
if (edges1.Count > 0 && edges2.Count > 0)
- edgeOverlaps.Add(Overlap(edges1, edges2));
+ edgeOverlaps.Add((prof1.Method, Overlap(edges1, edges2)));
}
- void PrintHistogram(List overlaps)
+ void PrintHistogram(List<(MethodDesc Method, double Overlap)> overlaps)
{
int maxWidth = Console.WindowWidth - 10;
const int maxLabelWidth = 4; // to print "100%".
int barMaxWidth = maxWidth - (maxLabelWidth + 10); // Leave 10 chars for writing other things on the line
const int bucketSize = 5;
int width = Console.WindowWidth - 10;
- var sorted = overlaps.OrderByDescending(d => d).ToList();
+ var sorted = overlaps.OrderByDescending(t => t.Overlap).ToList();
void PrintBar(string label, ref int curIndex, Func include, bool forcePrint)
{
int count = 0;
- while (curIndex < sorted.Count && include(sorted[curIndex]))
+ while (curIndex < sorted.Count && include(sorted[curIndex].Overlap))
{
count++;
curIndex++;
@@ -574,23 +576,85 @@ void PrintBar(string label, ref int curIndex, Func include, bool f
for (int proportion = 100 - bucketSize; proportion >= 0; proportion -= bucketSize)
PrintBar($">{(int)proportion,2}%", ref curIndex, d => d * 100 > proportion, true);
PrintBar("0%", ref curIndex, d => true, false);
+
+ var wtf = sorted.Where(t => double.IsNaN(t.Overlap)).ToList();
+ PrintOutput(FormattableString.Invariant($"The average overlap is {sorted.Average(t => t.Overlap)*100:F2}% for the {sorted.Count} methods with matching flow graphs and profile data"));
+ double mse = sorted.Sum(t => (100 - t.Overlap*100) * (100 - t.Overlap*100)) / sorted.Count;
+ PrintOutput(FormattableString.Invariant($"The mean squared error is {mse:F2}"));
+ PrintOutput(FormattableString.Invariant($"There are {sorted.Count(t => t.Overlap < 0.5)}/{sorted.Count} methods with overlaps < 50%:"));
+ foreach (var badMethod in sorted.Where(t => t.Overlap < 0.5).OrderBy(t => t.Overlap))
+ {
+ PrintOutput(FormattableString.Invariant($" {badMethod.Method} ({badMethod.Overlap * 100:F2}%)"));
+ }
}
// Need UTF8 for the block chars.
Console.OutputEncoding = Encoding.UTF8;
if (blockOverlaps.Count > 0)
{
+ PrintOutput("The overlap of the block counts break down as follows:");
PrintHistogram(blockOverlaps);
- PrintOutput("(using block counts)");
+ PrintOutput("");
}
if (edgeOverlaps.Count > 0)
{
- if (blockOverlaps.Count > 0)
- PrintOutput("");
-
+ PrintOutput("The overlap of the edge counts break down as follows:");
PrintHistogram(edgeOverlaps);
- PrintOutput("(using edge counts)");
+ PrintOutput("");
+ }
+
+ var changes = new List<(MethodDesc method, int ilOffset, GetLikelyClassResult result1, GetLikelyClassResult result2)>();
+ int devirtToSame = 0;
+ int devirtToSameLikelihood100 = 0;
+ int devirtToSameLikelihood70 = 0;
+ foreach ((MethodProfileData prof1, MethodProfileData prof2) in fgMatches)
+ {
+ List typeHandleHistogramCallSites =
+ prof1.SchemaData.Concat(prof2.SchemaData)
+ .Where(e => e.InstrumentationKind == PgoInstrumentationKind.GetLikelyClass || e.InstrumentationKind == PgoInstrumentationKind.TypeHandleHistogramTypeHandle)
+ .Select(e => e.ILOffset)
+ .Distinct()
+ .ToList();
+
+ foreach (int callsite in typeHandleHistogramCallSites)
+ {
+ GetLikelyClassResult result1 = GetLikelyClass(prof1.SchemaData, callsite);
+ GetLikelyClassResult result2 = GetLikelyClass(prof2.SchemaData, callsite);
+ if (result1.Devirtualizes != result2.Devirtualizes || (result1.Devirtualizes && result2.Devirtualizes && result1.Type != result2.Type))
+ changes.Add((prof1.Method, callsite, result1, result2));
+
+ if (result1.Devirtualizes && result2.Devirtualizes && result1.Type == result2.Type)
+ {
+ devirtToSame++;
+ devirtToSameLikelihood100 += result1.Likelihood == 100 && result2.Likelihood == 100 ? 1 : 0;
+ devirtToSameLikelihood70 += result1.Likelihood >= 70 && result2.Likelihood >= 70 ? 1 : 0;
+ }
+ }
+ }
+
+ PrintOutput($"There are {changes.Count(t => t.result1.Devirtualizes && !t.result2.Devirtualizes)} sites that devirtualize with {name1} but not with {name2}");
+ PrintOutput($"There are {changes.Count(t => !t.result1.Devirtualizes && t.result2.Devirtualizes)} sites that do not devirtualize with {name1} but do with {name2}");
+ PrintOutput($"There are {changes.Count(t => t.result1.Devirtualizes && t.result2.Devirtualizes && t.result1.Type != t.result2.Type)} sites that change devirtualized type");
+ PrintOutput($"There are {devirtToSame} sites that devirtualize to the same type before and after");
+ PrintOutput($" Of these, {devirtToSameLikelihood100} have a likelihood of 100 in both .mibc files");
+ PrintOutput($" and {devirtToSameLikelihood70} have a likelihood >= 70 in both .mibc files");
+
+ foreach (var group in changes.GroupBy(g => g.method))
+ {
+ PrintOutput($" In {group.Key}");
+ foreach (var change in group)
+ {
+ string FormatDevirt(GetLikelyClassResult result)
+ {
+ if (result.Type != null)
+ return $"{result.Type}, likelihood {result.Likelihood}{(result.Devirtualizes ? "" : " (does not devirt)")}";
+
+ return $"(null)";
+ }
+
+ PrintOutput($" At +{change.ilOffset:x}: {FormatDevirt(change.result1)} vs {FormatDevirt(change.result2)}");
+ }
}
}
@@ -607,8 +671,115 @@ static void PrintMibcStats(ProfileData data)
PrintOutput($"# Methods with 64-bit block counts: {profiledMethods.Count(spd => spd.SchemaData.Any(elem => elem.InstrumentationKind == PgoInstrumentationKind.BasicBlockLongCount))}");
PrintOutput($"# Methods with 32-bit edge counts: {profiledMethods.Count(spd => spd.SchemaData.Any(elem => elem.InstrumentationKind == PgoInstrumentationKind.EdgeIntCount))}");
PrintOutput($"# Methods with 64-bit edge counts: {profiledMethods.Count(spd => spd.SchemaData.Any(elem => elem.InstrumentationKind == PgoInstrumentationKind.EdgeLongCount))}");
- PrintOutput($"# Methods with type handle histograms: {profiledMethods.Count(spd => spd.SchemaData.Any(elem => elem.InstrumentationKind == PgoInstrumentationKind.TypeHandleHistogramTypeHandle))}");
- PrintOutput($"# Methods with GetLikelyClass data: {profiledMethods.Count(spd => spd.SchemaData.Any(elem => elem.InstrumentationKind == PgoInstrumentationKind.GetLikelyClass))}");
+ int numTypeHandleHistograms = profiledMethods.Sum(spd => spd.SchemaData.Count(elem => elem.InstrumentationKind == PgoInstrumentationKind.TypeHandleHistogramTypeHandle));
+ int methodsWithTypeHandleHistograms = profiledMethods.Count(spd => spd.SchemaData.Any(elem => elem.InstrumentationKind == PgoInstrumentationKind.TypeHandleHistogramTypeHandle));
+ PrintOutput($"# Type handle histograms: {numTypeHandleHistograms} in {methodsWithTypeHandleHistograms} methods");
+ int numGetLikelyClass = profiledMethods.Sum(spd => spd.SchemaData.Count(elem => elem.InstrumentationKind == PgoInstrumentationKind.GetLikelyClass));
+ int methodsWithGetLikelyClass = profiledMethods.Count(spd => spd.SchemaData.Any(elem => elem.InstrumentationKind == PgoInstrumentationKind.GetLikelyClass));
+ PrintOutput($"# GetLikelyClass data: {numGetLikelyClass} in {methodsWithGetLikelyClass} methods");
+
+ var histogramCallSites = new List<(MethodProfileData mpd, int ilOffset)>();
+ foreach (var mpd in profiledMethods)
+ {
+ var sites =
+ mpd.SchemaData
+ .Where(e => e.InstrumentationKind == PgoInstrumentationKind.TypeHandleHistogramTypeHandle || e.InstrumentationKind == PgoInstrumentationKind.GetLikelyClass)
+ .Select(e => e.ILOffset)
+ .Distinct();
+
+ histogramCallSites.AddRange(sites.Select(ilOffset => (mpd, ilOffset)));
+ }
+
+ int CountGetLikelyClass(Func predicate)
+ => histogramCallSites.Count(t => predicate(GetLikelyClass(t.mpd.SchemaData, t.ilOffset)));
+
+ PrintOutput($"# Call sites where getLikelyClass is null: {CountGetLikelyClass(r => r.IsNull)}");
+ PrintOutput($"# Call sites where getLikelyClass is unknown: {CountGetLikelyClass(r => r.IsUnknown)}");
+ PrintOutput($"# Call sites where getLikelyClass returns data that devirtualizes: {CountGetLikelyClass(r => r.Devirtualizes)}");
+
+ static bool PresentAndZero(MethodProfileData mpd, PgoInstrumentationKind kind)
+ => mpd.SchemaData.Any(e => e.InstrumentationKind == kind) && mpd.SchemaData.Sum(e => e.InstrumentationKind == kind ? e.DataLong : 0) == 0;
+
+ static bool CountersSumToZero(MethodProfileData data)
+ => PresentAndZero(data, PgoInstrumentationKind.BasicBlockIntCount) ||
+ PresentAndZero(data, PgoInstrumentationKind.BasicBlockLongCount) ||
+ PresentAndZero(data, PgoInstrumentationKind.EdgeIntCount) ||
+ PresentAndZero(data, PgoInstrumentationKind.EdgeLongCount);
+
+ List methodsWithZeroCounters = profiledMethods.Where(CountersSumToZero).ToList();
+ if (methodsWithZeroCounters.Count > 0)
+ {
+ PrintOutput($"There are {methodsWithZeroCounters.Count} methods whose counters sum to 0:");
+ foreach (MethodProfileData mpd in methodsWithZeroCounters)
+ PrintOutput($" {mpd.Method}");
+ }
+ }
+
+ private struct GetLikelyClassResult
+ {
+ public bool IsNull;
+ public bool IsUnknown;
+ public TypeDesc Type;
+ public int Likelihood;
+ public bool Devirtualizes;
+ }
+
+ private static GetLikelyClassResult GetLikelyClass(PgoSchemaElem[] schema, int ilOffset)
+ {
+ const int UNKNOWN_TYPEHANDLE_MIN = 1;
+ const int UNKNOWN_TYPEHANDLE_MAX = 33;
+
+ static bool IsUnknownTypeHandle(int handle)
+ => handle >= UNKNOWN_TYPEHANDLE_MIN && handle <= UNKNOWN_TYPEHANDLE_MAX;
+
+ for (int i = 0; i < schema.Length; i++)
+ {
+ var elem = schema[i];
+ if (elem.InstrumentationKind == PgoInstrumentationKind.GetLikelyClass)
+ {
+ Trace.Assert(elem.Count == 1);
+ return new GetLikelyClassResult
+ {
+ IsUnknown = IsUnknownTypeHandle(((TypeSystemEntityOrUnknown[])elem.DataObject)[0].AsUnknown),
+ Likelihood = (byte)elem.Other,
+ };
+ }
+
+ bool isHistogramCount =
+ elem.InstrumentationKind == PgoInstrumentationKind.TypeHandleHistogramIntCount ||
+ elem.InstrumentationKind == PgoInstrumentationKind.TypeHandleHistogramLongCount;
+
+ if (isHistogramCount && elem.Count == 1 && i + 1 < schema.Length && schema[i + 1].InstrumentationKind == PgoInstrumentationKind.TypeHandleHistogramTypeHandle)
+ {
+ var handles = (TypeSystemEntityOrUnknown[])schema[i + 1].DataObject;
+ var histogram = handles.Where(e => !e.IsNull).GroupBy(e => e).ToList();
+ if (histogram.Count == 0)
+ return new GetLikelyClassResult { IsNull = true };
+
+ int totalCount = histogram.Sum(g => g.Count());
+ // The number of unknown type handles matters for the likelihood, but not for the most likely class that we pick, so we can remove them now.
+ histogram.RemoveAll(e => IsUnknownTypeHandle(e.Key.AsUnknown));
+ if (histogram.Count == 0)
+ return new GetLikelyClassResult { IsUnknown = true };
+
+ // Now return the most likely one
+ var best = histogram.OrderByDescending(h => h.Count()).First();
+ Trace.Assert(best.Key.AsType != null);
+ int likelihood = best.Count() * 100 / totalCount;
+ // The threshold is different for interfaces and classes.
+ // A flag in the Other field of the TypeHandleHistogram*Count entry indicates which kind of call site this is.
+ bool isInterface = (elem.Other & (uint)ClassProfileFlags.IsInterface) != 0;
+ int threshold = isInterface ? 25 : 30;
+ return new GetLikelyClassResult
+ {
+ Type = best.Key.AsType,
+ Likelihood = likelihood,
+ Devirtualizes = likelihood >= threshold,
+ };
+ }
+ }
+
+ return new GetLikelyClassResult { IsNull = true };
}
private static Dictionary GroupBlocks(MethodProfileData data)
@@ -1139,6 +1310,8 @@ FlowGraph GetFlowGraph(MethodDesc desc)
SampleProfile sp = SampleProfile.Create(il, GetFlowGraph(g.Key), g.Select(t => t.Offset));
sampleProfiles.Add(g.Key, sp);
}
+
+ PrintOutput($"Profile is based on {samples.Count} samples");
}
else
{
@@ -1149,6 +1322,7 @@ FlowGraph GetFlowGraph(MethodDesc desc)
Dictionary<(ulong startRun, ulong endRun), long> runs = new Dictionary<(ulong startRun, ulong endRun), long>();
List<(ulong start, ulong end)> lbrRuns = new List<(ulong start, ulong end)>();
LbrEntry64[] lbr64Arr = null;
+ long numLbrRecords = 0;
foreach (var e in traceLog.Events)
{
if (e.TaskGuid != lbrGuid)
@@ -1158,6 +1332,8 @@ FlowGraph GetFlowGraph(MethodDesc desc)
if (e.Opcode != (TraceEventOpcode)32)
continue;
+ numLbrRecords++;
+
unsafe
{
Span lbr;
@@ -1291,6 +1467,8 @@ FlowGraph GetFlowGraph(MethodDesc desc)
sampleProfiles.Add(g.Key.Method, ep);
}
+
+ PrintOutput($"Profile is based on {numLbrRecords} LBR records");
}
}
diff --git a/src/coreclr/unwinder/s390x/unwinder_s390x.cpp b/src/coreclr/unwinder/s390x/unwinder_s390x.cpp
new file mode 100644
index 00000000000000..66cb24b46e6e75
--- /dev/null
+++ b/src/coreclr/unwinder/s390x/unwinder_s390x.cpp
@@ -0,0 +1,11 @@
+// Licensed to the .NET Foundation under one or more agreements.
+// The .NET Foundation licenses this file to you under the MIT license.
+
+//
+
+#include "stdafx.h"
+#include "utilcode.h"
+#include "crosscomp.h"
+
+#error Unsupported platform
+
diff --git a/src/coreclr/utilcode/CMakeLists.txt b/src/coreclr/utilcode/CMakeLists.txt
index fec22cf9dce38a..1ae433adbfd893 100644
--- a/src/coreclr/utilcode/CMakeLists.txt
+++ b/src/coreclr/utilcode/CMakeLists.txt
@@ -76,10 +76,6 @@ set(UTILCODE_DAC_SOURCES
hostimpl.cpp
)
-set(UTILCODE_CROSSGEN_SOURCES
- ${UTILCODE_COMMON_SOURCES}
- hostimpl.cpp
-)
set(UTILCODE_STATICNOHOST_SOURCES
${UTILCODE_COMMON_SOURCES}
@@ -90,7 +86,6 @@ set (UTILCODE_DEPENDENCIES eventing_headers)
convert_to_absolute_path(UTILCODE_SOURCES ${UTILCODE_SOURCES})
convert_to_absolute_path(UTILCODE_DAC_SOURCES ${UTILCODE_DAC_SOURCES})
-convert_to_absolute_path(UTILCODE_CROSSGEN_SOURCES ${UTILCODE_CROSSGEN_SOURCES})
convert_to_absolute_path(UTILCODE_STATICNOHOST_SOURCES ${UTILCODE_STATICNOHOST_SOURCES})
add_library_clr(utilcode_dac STATIC ${UTILCODE_DAC_SOURCES})
@@ -98,11 +93,9 @@ add_library_clr(utilcode_obj OBJECT ${UTILCODE_SOURCES})
add_library(utilcode INTERFACE)
target_sources(utilcode INTERFACE $)
add_library_clr(utilcodestaticnohost STATIC ${UTILCODE_STATICNOHOST_SOURCES})
-add_library_clr(utilcode_crossgen STATIC ${UTILCODE_CROSSGEN_SOURCES})
if(CLR_CMAKE_HOST_UNIX)
target_link_libraries(utilcodestaticnohost nativeresourcestring)
- target_link_libraries(utilcode_crossgen nativeresourcestring)
target_link_libraries(utilcode_dac nativeresourcestring)
target_link_libraries(utilcode INTERFACE nativeresourcestring)
add_dependencies(utilcode_dac coreclrpal)
@@ -114,20 +107,16 @@ if(CLR_CMAKE_HOST_WIN32)
target_compile_definitions(utilcodestaticnohost PRIVATE _CRTIMP=) # use static version of crt
link_natvis_sources_for_target(utilcodestaticnohost INTERFACE utilcode.natvis)
- link_natvis_sources_for_target(utilcode_crossgen INTERFACE utilcode.natvis)
link_natvis_sources_for_target(utilcode_dac INTERFACE utilcode.natvis)
link_natvis_sources_for_target(utilcode INTERFACE utilcode.natvis)
endif(CLR_CMAKE_HOST_WIN32)
set_target_properties(utilcode_dac PROPERTIES DAC_COMPONENT TRUE)
-set_target_properties(utilcode_crossgen PROPERTIES CROSSGEN_COMPONENT TRUE)
target_compile_definitions(utilcode_dac PRIVATE SELF_NO_HOST)
target_compile_definitions(utilcodestaticnohost PRIVATE SELF_NO_HOST)
add_dependencies(utilcode_dac ${UTILCODE_DEPENDENCIES})
add_dependencies(utilcode_obj ${UTILCODE_DEPENDENCIES})
-add_dependencies(utilcode_crossgen ${UTILCODE_DEPENDENCIES})
add_dependencies(utilcodestaticnohost ${UTILCODE_DEPENDENCIES})
target_precompile_headers(utilcode_dac PRIVATE [["stdafx.h"]])
target_precompile_headers(utilcode_obj PRIVATE [["stdafx.h"]])
-target_precompile_headers(utilcode_crossgen PRIVATE [["stdafx.h"]])
target_precompile_headers(utilcodestaticnohost PRIVATE [["stdafx.h"]])
diff --git a/src/coreclr/utilcode/loaderheap.cpp b/src/coreclr/utilcode/loaderheap.cpp
index 33974b9e290764..adaf07d8f5825f 100644
--- a/src/coreclr/utilcode/loaderheap.cpp
+++ b/src/coreclr/utilcode/loaderheap.cpp
@@ -1330,8 +1330,14 @@ void *UnlockedLoaderHeap::UnlockedAllocMem_NoThrow(size_t dwSize
if (pData)
{
#ifdef _DEBUG
+ BYTE *pAllocatedBytes = (BYTE*)pData;
+ ExecutableWriterHolder dataWriterHolder;
+ if (m_Options & LHF_EXECUTABLE)
+ {
+ dataWriterHolder = ExecutableWriterHolder(pData, dwSize);
+ pAllocatedBytes = (BYTE *)dataWriterHolder.GetRW();
+ }
- BYTE *pAllocatedBytes = (BYTE *)pData;
#if LOADER_HEAP_DEBUG_BOUNDARY > 0
// Don't fill the memory we allocated - it is assumed to be zeroed - fill the memory after it
memset(pAllocatedBytes + dwRequestedSize, 0xEE, LOADER_HEAP_DEBUG_BOUNDARY);
@@ -1344,7 +1350,7 @@ void *UnlockedLoaderHeap::UnlockedAllocMem_NoThrow(size_t dwSize
if (!m_fExplicitControl)
{
- LoaderHeapValidationTag *pTag = AllocMem_GetTag(pData, dwRequestedSize);
+ LoaderHeapValidationTag *pTag = AllocMem_GetTag(pAllocatedBytes, dwRequestedSize);
pTag->m_allocationType = kAllocMem;
pTag->m_dwRequestedSize = dwRequestedSize;
pTag->m_szFile = szFile;
@@ -1514,7 +1520,14 @@ void UnlockedLoaderHeap::UnlockedBackoutMem(void *pMem,
{
// Cool. This was the last block allocated. We can just undo the allocation instead
// of going to the freelist.
- memset(pMem, 0x00, dwSize); // Fill freed region with 0
+ void *pMemRW = pMem;
+ ExecutableWriterHolder memWriterHolder;
+ if (m_Options & LHF_EXECUTABLE)
+ {
+ memWriterHolder = ExecutableWriterHolder(pMem, dwSize);
+ pMemRW = memWriterHolder.GetRW();
+ }
+ memset(pMemRW, 0x00, dwSize); // Fill freed region with 0
m_pAllocPtr = (BYTE*)pMem;
}
else
@@ -1626,7 +1639,14 @@ void *UnlockedLoaderHeap::UnlockedAllocAlignedMem_NoThrow(size_t dwRequestedSiz
((BYTE*&)pResult) += extra;
#ifdef _DEBUG
- BYTE *pAllocatedBytes = (BYTE *)pResult;
+ BYTE *pAllocatedBytes = (BYTE *)pResult;
+ ExecutableWriterHolder resultWriterHolder;
+ if (m_Options & LHF_EXECUTABLE)
+ {
+ resultWriterHolder = ExecutableWriterHolder(pResult, dwSize - extra);
+ pAllocatedBytes = (BYTE *)resultWriterHolder.GetRW();
+ }
+
#if LOADER_HEAP_DEBUG_BOUNDARY > 0
// Don't fill the entire memory - we assume it is all zeroed -just the memory after our alloc
memset(pAllocatedBytes + dwRequestedSize, 0xee, LOADER_HEAP_DEBUG_BOUNDARY);
@@ -1656,7 +1676,7 @@ void *UnlockedLoaderHeap::UnlockedAllocAlignedMem_NoThrow(size_t dwRequestedSiz
if (!m_fExplicitControl)
{
- LoaderHeapValidationTag *pTag = AllocMem_GetTag(((BYTE*)pResult) - extra, dwRequestedSize + extra);
+ LoaderHeapValidationTag *pTag = AllocMem_GetTag(pAllocatedBytes - extra, dwRequestedSize + extra);
pTag->m_allocationType = kAllocMem;
pTag->m_dwRequestedSize = dwRequestedSize + extra;
pTag->m_szFile = szFile;
diff --git a/src/coreclr/utilcode/pedecoder.cpp b/src/coreclr/utilcode/pedecoder.cpp
index 3b46c56a415879..639cb02099bf4d 100644
--- a/src/coreclr/utilcode/pedecoder.cpp
+++ b/src/coreclr/utilcode/pedecoder.cpp
@@ -1034,14 +1034,29 @@ CHECK PEDecoder::CheckCorHeader() const
IMAGE_COR20_HEADER *pCor = GetCorHeader();
+ // Currently composite r2r images miss some information, for example the version is 0.0.
+ // We may want to change that to something more conforming and explicit.
+ // For now, for compatibility purposes, we will accept that as a valid format.
+ bool possiblyCompositeR2R =
+ pCor->MinorRuntimeVersion == 0 &&
+ pCor->MajorRuntimeVersion == 0;
+
//CHECK(((ULONGLONG)pCor & 0x3)==0);
// If the file is COM+ 1.0, which by definition has nothing the runtime can
// use, or if the file requires a newer version of this engine than us,
// it cannot be run by this engine.
- CHECK(VAL16(pCor->MajorRuntimeVersion) > 1 && VAL16(pCor->MajorRuntimeVersion) <= COR_VERSION_MAJOR);
+ if (!possiblyCompositeR2R)
+ CHECK(VAL16(pCor->MajorRuntimeVersion) > 1 && VAL16(pCor->MajorRuntimeVersion) <= COR_VERSION_MAJOR);
+#ifdef HOST_WINDOWS
CHECK(CheckDirectory(&pCor->MetaData, IMAGE_SCN_MEM_WRITE, HasNativeHeader() ? NULL_OK : NULL_NOT_OK));
+#else
+ CHECK(CheckDirectory(
+ &pCor->MetaData,
+ possiblyCompositeR2R ? 0 : IMAGE_SCN_MEM_WRITE,
+ HasNativeHeader() ? NULL_OK : NULL_NOT_OK));
+#endif
CHECK(CheckDirectory(&pCor->Resources, IMAGE_SCN_MEM_WRITE, NULL_OK));
CHECK(CheckDirectory(&pCor->StrongNameSignature, IMAGE_SCN_MEM_WRITE, NULL_OK));
CHECK(CheckDirectory(&pCor->CodeManagerTable, IMAGE_SCN_MEM_WRITE, NULL_OK));
@@ -1083,7 +1098,7 @@ CHECK PEDecoder::CheckCorHeader() const
// IL library files (really a misnomer - these are native images or ReadyToRun images)
// only they can have a native image header
- if ((pCor->Flags&VAL32(COMIMAGE_FLAGS_IL_LIBRARY)) == 0)
+ if ((pCor->Flags&VAL32(COMIMAGE_FLAGS_IL_LIBRARY)) == 0 && !possiblyCompositeR2R)
{
CHECK(VAL32(pCor->ManagedNativeHeader.Size) == 0);
}
@@ -1769,7 +1784,7 @@ void PEDecoder::LayoutILOnly(void *base, bool enableExecution) const
PAGE_READONLY, &oldProtection))
ThrowLastError();
- // Finally, apply proper protection to copied sections
+ // Finally, apply proper protection to copied sections
for (section = sectionStart; section < sectionEnd; section++)
{
// Add appropriate page protection.
diff --git a/src/coreclr/vm/CMakeLists.txt b/src/coreclr/vm/CMakeLists.txt
index d38633d6951a46..1d682d2a428bbf 100644
--- a/src/coreclr/vm/CMakeLists.txt
+++ b/src/coreclr/vm/CMakeLists.txt
@@ -566,6 +566,10 @@ if(FEATURE_COMWRAPPERS OR FEATURE_OBJCMARSHAL)
if (FEATURE_COMWRAPPERS)
list(APPEND VM_SOURCES_WKS
interoplibinterface.cpp
+ rcwrefcache.cpp
+ )
+ list(APPEND VM_HEADERS_WKS
+ rcwrefcache.h
)
endif (FEATURE_COMWRAPPERS)
@@ -626,7 +630,6 @@ if(CLR_CMAKE_TARGET_WIN32)
mngstdinterfaces.cpp
notifyexternals.cpp
olecontexthelpers.cpp
- rcwrefcache.cpp
runtimecallablewrapper.cpp
stdinterfaces.cpp
stdinterfaces_wrapper.cpp
@@ -644,7 +647,6 @@ if(CLR_CMAKE_TARGET_WIN32)
mngstdinterfaces.h
notifyexternals.h
olecontexthelpers.h
- rcwrefcache.h
runtimecallablewrapper.h
stdinterfaces.h
stdinterfaces_internal.h
diff --git a/src/coreclr/vm/ClrEtwAll.man b/src/coreclr/vm/ClrEtwAll.man
index 79e1f17b069be8..d8a275c6da6295 100644
--- a/src/coreclr/vm/ClrEtwAll.man
+++ b/src/coreclr/vm/ClrEtwAll.man
@@ -87,6 +87,8 @@
message="$(string.RuntimePublisher.TypeDiagnosticKeywordMessage)" symbol="CLR_TYPEDIAGNOSTIC_KEYWORD" />
+
@@ -429,7 +431,14 @@
-
+
+
+
+
+
+
@@ -2896,6 +2905,17 @@
+
+
+
+
+
+
+ %1
+ %2
+
+
+
@@ -3921,6 +3941,11 @@
task="JitInstrumentationData"
symbol="JitInstrumentationDataVerbose" message="$(string.RuntimePublisher.JitInstrumentationDataEventMessage)"/>
+
+
+
@@ -7383,6 +7409,7 @@
+
@@ -7712,6 +7739,7 @@
+
@@ -7855,6 +7883,8 @@
+
+
diff --git a/src/coreclr/vm/amd64/cgenamd64.cpp b/src/coreclr/vm/amd64/cgenamd64.cpp
index 153993cb37c2dd..d00f7b74df0d48 100644
--- a/src/coreclr/vm/amd64/cgenamd64.cpp
+++ b/src/coreclr/vm/amd64/cgenamd64.cpp
@@ -450,7 +450,7 @@ void EncodeLoadAndJumpThunk (LPBYTE pBuffer, LPVOID pv, LPVOID pTarget)
_ASSERTE(DbgIsExecutable(pBuffer, 22));
}
-void emitCOMStubCall (ComCallMethodDesc *pCOMMethod, PCODE target)
+void emitCOMStubCall (ComCallMethodDesc *pCOMMethodRX, ComCallMethodDesc *pCOMMethodRW, PCODE target)
{
CONTRACT_VOID
{
@@ -460,7 +460,8 @@ void emitCOMStubCall (ComCallMethodDesc *pCOMMethod, PCODE target)
}
CONTRACT_END;
- BYTE *pBuffer = (BYTE*)pCOMMethod - COMMETHOD_CALL_PRESTUB_SIZE;
+ BYTE *pBufferRX = (BYTE*)pCOMMethodRX - COMMETHOD_CALL_PRESTUB_SIZE;
+ BYTE *pBufferRW = (BYTE*)pCOMMethodRW - COMMETHOD_CALL_PRESTUB_SIZE;
// We need the target to be in a 64-bit aligned memory location and the call instruction
// to immediately precede the ComCallMethodDesc. We'll generate an indirect call to avoid
@@ -471,21 +472,21 @@ void emitCOMStubCall (ComCallMethodDesc *pCOMMethod, PCODE target)
// nop 90
// call [$ - 10] ff 15 f0 ff ff ff
- *((UINT64 *)&pBuffer[COMMETHOD_CALL_PRESTUB_ADDRESS_OFFSET]) = (UINT64)target;
+ *((UINT64 *)&pBufferRW[COMMETHOD_CALL_PRESTUB_ADDRESS_OFFSET]) = (UINT64)target;
- pBuffer[-2] = 0x90;
- pBuffer[-1] = 0x90;
+ pBufferRW[-2] = 0x90;
+ pBufferRW[-1] = 0x90;
- pBuffer[0] = 0xFF;
- pBuffer[1] = 0x15;
- *((UINT32 UNALIGNED *)&pBuffer[2]) = (UINT32)(COMMETHOD_CALL_PRESTUB_ADDRESS_OFFSET - COMMETHOD_CALL_PRESTUB_SIZE);
+ pBufferRW[0] = 0xFF;
+ pBufferRW[1] = 0x15;
+ *((UINT32 UNALIGNED *)&pBufferRW[2]) = (UINT32)(COMMETHOD_CALL_PRESTUB_ADDRESS_OFFSET - COMMETHOD_CALL_PRESTUB_SIZE);
- _ASSERTE(DbgIsExecutable(pBuffer, COMMETHOD_CALL_PRESTUB_SIZE));
+ _ASSERTE(DbgIsExecutable(pBufferRX, COMMETHOD_CALL_PRESTUB_SIZE));
RETURN;
}
-void emitJump(LPBYTE pBuffer, LPVOID target)
+void emitJump(LPBYTE pBufferRX, LPBYTE pBufferRW, LPVOID target)
{
CONTRACTL
{
@@ -493,25 +494,25 @@ void emitJump(LPBYTE pBuffer, LPVOID target)
GC_NOTRIGGER;
MODE_ANY;
- PRECONDITION(CheckPointer(pBuffer));
+ PRECONDITION(CheckPointer(pBufferRX));
}
CONTRACTL_END;
// mov rax, 123456789abcdef0h 48 b8 xx xx xx xx xx xx xx xx
// jmp rax ff e0
- pBuffer[0] = 0x48;
- pBuffer[1] = 0xB8;
+ pBufferRW[0] = 0x48;
+ pBufferRW[1] = 0xB8;
- *((UINT64 UNALIGNED *)&pBuffer[2]) = (UINT64)target;
+ *((UINT64 UNALIGNED *)&pBufferRW[2]) = (UINT64)target;
- pBuffer[10] = 0xFF;
- pBuffer[11] = 0xE0;
+ pBufferRW[10] = 0xFF;
+ pBufferRW[11] = 0xE0;
- _ASSERTE(DbgIsExecutable(pBuffer, 12));
+ _ASSERTE(DbgIsExecutable(pBufferRX, 12));
}
-void UMEntryThunkCode::Encode(BYTE* pTargetCode, void* pvSecretParam)
+void UMEntryThunkCode::Encode(UMEntryThunkCode *pEntryThunkCodeRX, BYTE* pTargetCode, void* pvSecretParam)
{
CONTRACTL
{
@@ -542,7 +543,7 @@ void UMEntryThunkCode::Encode(BYTE* pTargetCode, void* pvSecretParam)
m_jmpRAX[1] = 0xFF;
m_jmpRAX[2] = 0xE0;
- _ASSERTE(DbgIsExecutable(&m_movR10[0], &m_jmpRAX[3]-&m_movR10[0]));
+ _ASSERTE(DbgIsExecutable(&pEntryThunkCodeRX->m_movR10[0], &pEntryThunkCodeRX->m_jmpRAX[3]-&pEntryThunkCodeRX->m_movR10[0]));
}
void UMEntryThunkCode::Poison()
@@ -555,15 +556,18 @@ void UMEntryThunkCode::Poison()
}
CONTRACTL_END;
- m_execstub = (BYTE *)UMEntryThunk::ReportViolation;
+ ExecutableWriterHolder thunkWriterHolder(this, sizeof(UMEntryThunkCode));
+ UMEntryThunkCode *pThisRW = thunkWriterHolder.GetRW();
+
+ pThisRW->m_execstub = (BYTE *)UMEntryThunk::ReportViolation;
- m_movR10[0] = REX_PREFIX_BASE | REX_OPERAND_SIZE_64BIT;
+ pThisRW->m_movR10[0] = REX_PREFIX_BASE | REX_OPERAND_SIZE_64BIT;
#ifdef _WIN32
// mov rcx, pUMEntryThunk // 48 b9 xx xx xx xx xx xx xx xx
- m_movR10[1] = 0xB9;
+ pThisRW->m_movR10[1] = 0xB9;
#else
// mov rdi, pUMEntryThunk // 48 bf xx xx xx xx xx xx xx xx
- m_movR10[1] = 0xBF;
+ pThisRW->m_movR10[1] = 0xBF;
#endif
ClrFlushInstructionCache(&m_movR10[0], &m_jmpRAX[3]-&m_movR10[0]);
@@ -647,7 +651,7 @@ INT32 rel32UsingJumpStub(INT32 UNALIGNED * pRel32, PCODE target, MethodDesc *pMe
return static_cast(offset);
}
-INT32 rel32UsingPreallocatedJumpStub(INT32 UNALIGNED * pRel32, PCODE target, PCODE jumpStubAddr, bool emitJump)
+INT32 rel32UsingPreallocatedJumpStub(INT32 UNALIGNED * pRel32, PCODE target, PCODE jumpStubAddrRX, PCODE jumpStubAddrRW, bool emitJump)
{
CONTRACTL
{
@@ -657,12 +661,12 @@ INT32 rel32UsingPreallocatedJumpStub(INT32 UNALIGNED * pRel32, PCODE target, PCO
CONTRACTL_END;
TADDR baseAddr = (TADDR)pRel32 + 4;
- _ASSERTE(FitsInI4(jumpStubAddr - baseAddr));
+ _ASSERTE(FitsInI4(jumpStubAddrRX - baseAddr));
INT_PTR offset = target - baseAddr;
if (!FitsInI4(offset) INDEBUG(|| PEDecoder::GetForceRelocs()))
{
- offset = jumpStubAddr - baseAddr;
+ offset = jumpStubAddrRX - baseAddr;
if (!FitsInI4(offset))
{
_ASSERTE(!"jump stub was not in expected range");
@@ -671,11 +675,11 @@ INT32 rel32UsingPreallocatedJumpStub(INT32 UNALIGNED * pRel32, PCODE target, PCO
if (emitJump)
{
- emitBackToBackJump((LPBYTE)jumpStubAddr, (LPVOID)target);
+ emitBackToBackJump((LPBYTE)jumpStubAddrRX, (LPBYTE)jumpStubAddrRW, (LPVOID)target);
}
else
{
- _ASSERTE(decodeBackToBackJump(jumpStubAddr) == target);
+ _ASSERTE(decodeBackToBackJump(jumpStubAddrRX) == target);
}
}
@@ -862,7 +866,9 @@ EXTERN_C PCODE VirtualMethodFixupWorker(TransitionBlock * pTransitionBlock, CORC
*(INT32 *)(pNewValue+1) = rel32UsingJumpStub((INT32*)(&pThunk->callJmp[1]), pCode, pMD, NULL);
_ASSERTE(IS_ALIGNED(pThunk, sizeof(INT64)));
- FastInterlockCompareExchangeLong((INT64*)pThunk, newValue, oldValue);
+
+ ExecutableWriterHolder thunkWriterHolder((INT64*)pThunk, sizeof(INT64));
+ FastInterlockCompareExchangeLong(thunkWriterHolder.GetRW(), newValue, oldValue);
FlushInstructionCache(GetCurrentProcess(), pThunk, 8);
}
@@ -888,14 +894,17 @@ EXTERN_C PCODE VirtualMethodFixupWorker(TransitionBlock * pTransitionBlock, CORC
#define BEGIN_DYNAMIC_HELPER_EMIT(size) \
SIZE_T cb = size; \
SIZE_T cbAligned = ALIGN_UP(cb, DYNAMIC_HELPER_ALIGNMENT); \
- BYTE * pStart = (BYTE *)(void *)pAllocator->GetDynamicHelpersHeap()->AllocAlignedMem(cbAligned, DYNAMIC_HELPER_ALIGNMENT); \
+ BYTE * pStartRX = (BYTE *)(void*)pAllocator->GetDynamicHelpersHeap()->AllocAlignedMem(cbAligned, DYNAMIC_HELPER_ALIGNMENT); \
+ ExecutableWriterHolder startWriterHolder(pStartRX, cbAligned); \
+ BYTE * pStart = startWriterHolder.GetRW(); \
+ size_t rxOffset = pStartRX - pStart; \
BYTE * p = pStart;
#define END_DYNAMIC_HELPER_EMIT() \
_ASSERTE(pStart + cb == p); \
while (p < pStart + cbAligned) *p++ = X86_INSTR_INT3; \
- ClrFlushInstructionCache(pStart, cbAligned); \
- return (PCODE)pStart
+ ClrFlushInstructionCache(pStartRX, cbAligned); \
+ return (PCODE)pStartRX
PCODE DynamicHelpers::CreateHelper(LoaderAllocator * pAllocator, TADDR arg, PCODE target)
{
@@ -913,13 +922,13 @@ PCODE DynamicHelpers::CreateHelper(LoaderAllocator * pAllocator, TADDR arg, PCOD
p += 8;
*p++ = X86_INSTR_JMP_REL32; // jmp rel32
- *(INT32 *)p = rel32UsingJumpStub((INT32 *)p, target, NULL, pAllocator);
+ *(INT32 *)p = rel32UsingJumpStub((INT32 *)(p + rxOffset), target, NULL, pAllocator);
p += 4;
END_DYNAMIC_HELPER_EMIT();
}
-void DynamicHelpers::EmitHelperWithArg(BYTE*& p, LoaderAllocator * pAllocator, TADDR arg, PCODE target)
+void DynamicHelpers::EmitHelperWithArg(BYTE*& p, size_t rxOffset, LoaderAllocator * pAllocator, TADDR arg, PCODE target)
{
CONTRACTL
{
@@ -940,7 +949,7 @@ void DynamicHelpers::EmitHelperWithArg(BYTE*& p, LoaderAllocator * pAllocator, T
p += 8;
*p++ = X86_INSTR_JMP_REL32; // jmp rel32
- *(INT32 *)p = rel32UsingJumpStub((INT32 *)p, target, NULL, pAllocator);
+ *(INT32 *)p = rel32UsingJumpStub((INT32 *)(p + rxOffset), target, NULL, pAllocator);
p += 4;
}
@@ -948,7 +957,7 @@ PCODE DynamicHelpers::CreateHelperWithArg(LoaderAllocator * pAllocator, TADDR ar
{
BEGIN_DYNAMIC_HELPER_EMIT(15);
- EmitHelperWithArg(p, pAllocator, arg, target);
+ EmitHelperWithArg(p, rxOffset, pAllocator, arg, target);
END_DYNAMIC_HELPER_EMIT();
}
@@ -976,7 +985,7 @@ PCODE DynamicHelpers::CreateHelper(LoaderAllocator * pAllocator, TADDR arg, TADD
p += 8;
*p++ = X86_INSTR_JMP_REL32; // jmp rel32
- *(INT32 *)p = rel32UsingJumpStub((INT32 *)p, target, NULL, pAllocator);
+ *(INT32 *)p = rel32UsingJumpStub((INT32 *)(p + rxOffset), target, NULL, pAllocator);
p += 4;
END_DYNAMIC_HELPER_EMIT();
@@ -1005,7 +1014,7 @@ PCODE DynamicHelpers::CreateHelperArgMove(LoaderAllocator * pAllocator, TADDR ar
p += 8;
*p++ = X86_INSTR_JMP_REL32; // jmp rel32
- *(INT32 *)p = rel32UsingJumpStub((INT32 *)p, target, NULL, pAllocator);
+ *(INT32 *)p = rel32UsingJumpStub((INT32 *)(p + rxOffset), target, NULL, pAllocator);
p += 4;
END_DYNAMIC_HELPER_EMIT();
@@ -1071,7 +1080,7 @@ PCODE DynamicHelpers::CreateHelperWithTwoArgs(LoaderAllocator * pAllocator, TADD
p += 8;
*p++ = X86_INSTR_JMP_REL32; // jmp rel32
- *(INT32 *)p = rel32UsingJumpStub((INT32 *)p, target, NULL, pAllocator);
+ *(INT32 *)p = rel32UsingJumpStub((INT32 *)(p + rxOffset), target, NULL, pAllocator);
p += 4;
END_DYNAMIC_HELPER_EMIT();
@@ -1100,7 +1109,7 @@ PCODE DynamicHelpers::CreateHelperWithTwoArgs(LoaderAllocator * pAllocator, TADD
p += 8;
*p++ = X86_INSTR_JMP_REL32; // jmp rel32
- *(INT32 *)p = rel32UsingJumpStub((INT32 *)p, target, NULL, pAllocator);
+ *(INT32 *)p = rel32UsingJumpStub((INT32 *)(p + rxOffset), target, NULL, pAllocator);
p += 4;
END_DYNAMIC_HELPER_EMIT();
@@ -1117,9 +1126,10 @@ PCODE DynamicHelpers::CreateDictionaryLookupHelper(LoaderAllocator * pAllocator,
GetEEFuncEntryPoint(JIT_GenericHandleClassWithSlotAndModule));
GenericHandleArgs * pArgs = (GenericHandleArgs *)(void *)pAllocator->GetDynamicHelpersHeap()->AllocAlignedMem(sizeof(GenericHandleArgs), DYNAMIC_HELPER_ALIGNMENT);
- pArgs->dictionaryIndexAndSlot = dictionaryIndexAndSlot;
- pArgs->signature = pLookup->signature;
- pArgs->module = (CORINFO_MODULE_HANDLE)pModule;
+ ExecutableWriterHolder argsWriterHolder(pArgs, sizeof(GenericHandleArgs));
+ argsWriterHolder.GetRW()->dictionaryIndexAndSlot = dictionaryIndexAndSlot;
+ argsWriterHolder.GetRW()->signature = pLookup->signature;
+ argsWriterHolder.GetRW()->module = (CORINFO_MODULE_HANDLE)pModule;
WORD slotOffset = (WORD)(dictionaryIndexAndSlot & 0xFFFF) * sizeof(Dictionary*);
@@ -1131,7 +1141,7 @@ PCODE DynamicHelpers::CreateDictionaryLookupHelper(LoaderAllocator * pAllocator,
// rcx/rdi contains the generic context parameter
// mov rdx/rsi,pArgs
// jmp helperAddress
- EmitHelperWithArg(p, pAllocator, (TADDR)pArgs, helperAddress);
+ EmitHelperWithArg(p, rxOffset, pAllocator, (TADDR)pArgs, helperAddress);
END_DYNAMIC_HELPER_EMIT();
}
@@ -1238,7 +1248,7 @@ PCODE DynamicHelpers::CreateDictionaryLookupHelper(LoaderAllocator * pAllocator,
// mov rdx|rsi,pArgs
// jmp helperAddress
- EmitHelperWithArg(p, pAllocator, (TADDR)pArgs, helperAddress);
+ EmitHelperWithArg(p, rxOffset, pAllocator, (TADDR)pArgs, helperAddress);
}
}
diff --git a/src/coreclr/vm/amd64/cgencpu.h b/src/coreclr/vm/amd64/cgencpu.h
index 7312ad0a019fef..6300876fa330e3 100644
--- a/src/coreclr/vm/amd64/cgencpu.h
+++ b/src/coreclr/vm/amd64/cgencpu.h
@@ -370,11 +370,11 @@ INT32 rel32UsingJumpStub(INT32 UNALIGNED * pRel32, PCODE target, MethodDesc *pMe
LoaderAllocator *pLoaderAllocator = NULL, bool throwOnOutOfMemoryWithinRange = true);
// Get Rel32 destination, emit jumpStub if necessary into a preallocated location
-INT32 rel32UsingPreallocatedJumpStub(INT32 UNALIGNED * pRel32, PCODE target, PCODE jumpStubAddr, bool emitJump);
+INT32 rel32UsingPreallocatedJumpStub(INT32 UNALIGNED * pRel32, PCODE target, PCODE jumpStubAddr, PCODE jumpStubAddrRW, bool emitJump);
-void emitCOMStubCall (ComCallMethodDesc *pCOMMethod, PCODE target);
+void emitCOMStubCall (ComCallMethodDesc *pCOMMethodRX, ComCallMethodDesc *pCOMMethodRW, PCODE target);
-void emitJump(LPBYTE pBuffer, LPVOID target);
+void emitJump(LPBYTE pBufferRX, LPBYTE pBufferRW, LPVOID target);
BOOL isJumpRel32(PCODE pCode);
PCODE decodeJump32(PCODE pCode);
@@ -388,11 +388,11 @@ PCODE decodeJump64(PCODE pCode);
// For all other platforms back to back jumps don't require anything special
// That is why we have these two wrapper functions that call emitJump and decodeJump
//
-inline void emitBackToBackJump(LPBYTE pBuffer, LPVOID target)
+inline void emitBackToBackJump(LPBYTE pBufferRX, LPBYTE pBufferRW, LPVOID target)
{
WRAPPER_NO_CONTRACT;
- emitJump(pBuffer, target);
+ emitJump(pBufferRX, pBufferRW, target);
}
inline BOOL isBackToBackJump(PCODE pCode)
@@ -438,7 +438,7 @@ struct DECLSPEC_ALIGN(8) UMEntryThunkCode
BYTE m_jmpRAX[3]; // JMP RAX
BYTE m_padding2[5];
- void Encode(BYTE* pTargetCode, void* pvSecretParam);
+ void Encode(UMEntryThunkCode *pEntryThunkCodeRX, BYTE* pTargetCode, void* pvSecretParam);
void Poison();
LPCBYTE GetEntryPoint() const
@@ -610,19 +610,19 @@ class CallCountingStubShort : public CallCountingStub
#ifndef DACCESS_COMPILE
public:
- CallCountingStubShort(CallCount *remainingCallCountCell, PCODE targetForMethod)
+ CallCountingStubShort(CallCountingStubShort* stubRX, CallCount *remainingCallCountCell, PCODE targetForMethod)
: m_part0{ 0x48, 0xb8}, // mov rax,
m_remainingCallCountCell(remainingCallCountCell), //
m_part1{ 0x66, 0xff, 0x08, // dec word ptr [rax]
0x0f, 0x85}, // jnz
m_rel32TargetForMethod( //
GetRelative32BitOffset(
- &m_rel32TargetForMethod,
+ &stubRX->m_rel32TargetForMethod,
targetForMethod)),
m_part2{ 0xe8}, // call
m_rel32TargetForThresholdReached( //
GetRelative32BitOffset(
- &m_rel32TargetForThresholdReached,
+ &stubRX->m_rel32TargetForThresholdReached,
TargetForThresholdReached)),
// (rip == stub-identifying token)
m_alignmentPadding{}
diff --git a/src/coreclr/vm/amd64/virtualcallstubcpu.hpp b/src/coreclr/vm/amd64/virtualcallstubcpu.hpp
index 860a681e213527..70b2de5813438e 100644
--- a/src/coreclr/vm/amd64/virtualcallstubcpu.hpp
+++ b/src/coreclr/vm/amd64/virtualcallstubcpu.hpp
@@ -97,7 +97,7 @@ struct LookupHolder
{
static void InitializeStatic();
- void Initialize(PCODE resolveWorkerTarget, size_t dispatchToken);
+ void Initialize(LookupHolder* pLookupHolderRX, PCODE resolveWorkerTarget, size_t dispatchToken);
LookupStub* stub() { LIMITED_METHOD_CONTRACT; return &_stub; }
@@ -317,7 +317,7 @@ struct DispatchHolder
{
static void InitializeStatic();
- void Initialize(PCODE implTarget, PCODE failTarget, size_t expectedMT,
+ void Initialize(DispatchHolder* pDispatchHolderRX, PCODE implTarget, PCODE failTarget, size_t expectedMT,
DispatchStub::DispatchStubType type);
static size_t GetHolderSize(DispatchStub::DispatchStubType type)
@@ -453,7 +453,8 @@ struct ResolveHolder
{
static void InitializeStatic();
- void Initialize(PCODE resolveWorkerTarget, PCODE patcherTarget,
+ void Initialize(ResolveHolder* pResolveHolderRX,
+ PCODE resolveWorkerTarget, PCODE patcherTarget,
size_t dispatchToken, UINT32 hashedToken,
void * cacheAddr, INT32* counterAddr);
@@ -573,7 +574,7 @@ void LookupHolder::InitializeStatic()
lookupInit.part3 [1] = 0xE0;
}
-void LookupHolder::Initialize(PCODE resolveWorkerTarget, size_t dispatchToken)
+void LookupHolder::Initialize(LookupHolder* pLookupHolderRX, PCODE resolveWorkerTarget, size_t dispatchToken)
{
_stub = lookupInit;
@@ -632,7 +633,7 @@ void DispatchHolder::InitializeStatic()
dispatchLongInit.part5 [1] = 0xE0;
};
-void DispatchHolder::Initialize(PCODE implTarget, PCODE failTarget, size_t expectedMT,
+void DispatchHolder::Initialize(DispatchHolder* pDispatchHolderRX, PCODE implTarget, PCODE failTarget, size_t expectedMT,
DispatchStub::DispatchStubType type)
{
//
@@ -650,17 +651,18 @@ void DispatchHolder::Initialize(PCODE implTarget, PCODE failTarget, size_t expe
//
if (type == DispatchStub::e_TYPE_SHORT)
{
- DispatchStubShort *shortStub = const_cast(stub()->getShortStub());
+ DispatchStubShort *shortStubRW = const_cast(stub()->getShortStub());
+ DispatchStubShort *shortStubRX = const_cast(pDispatchHolderRX->stub()->getShortStub());
// initialize the static data
- *shortStub = dispatchShortInit;
+ *shortStubRW = dispatchShortInit;
// fill in the dynamic data
- size_t displ = (failTarget - ((PCODE) &shortStub->_failDispl + sizeof(DISPL)));
+ size_t displ = (failTarget - ((PCODE) &shortStubRX->_failDispl + sizeof(DISPL)));
CONSISTENCY_CHECK(FitsInI4(displ));
- shortStub->_failDispl = (DISPL) displ;
- shortStub->_implTarget = (size_t) implTarget;
- CONSISTENCY_CHECK((PCODE)&shortStub->_failDispl + sizeof(DISPL) + shortStub->_failDispl == failTarget);
+ shortStubRW->_failDispl = (DISPL) displ;
+ shortStubRW->_implTarget = (size_t) implTarget;
+ CONSISTENCY_CHECK((PCODE)&shortStubRX->_failDispl + sizeof(DISPL) + shortStubRX->_failDispl == failTarget);
}
else
{
@@ -769,7 +771,8 @@ void ResolveHolder::InitializeStatic()
resolveInit.part10 [1] = 0xE0;
};
-void ResolveHolder::Initialize(PCODE resolveWorkerTarget, PCODE patcherTarget,
+void ResolveHolder::Initialize(ResolveHolder* pResolveHolderRX,
+ PCODE resolveWorkerTarget, PCODE patcherTarget,
size_t dispatchToken, UINT32 hashedToken,
void * cacheAddr, INT32* counterAddr)
{
diff --git a/src/coreclr/vm/appdomain.cpp b/src/coreclr/vm/appdomain.cpp
index 8fa02c4f12179b..f7bf57c5b52cf1 100644
--- a/src/coreclr/vm/appdomain.cpp
+++ b/src/coreclr/vm/appdomain.cpp
@@ -47,10 +47,13 @@
#include "runtimecallablewrapper.h"
#include "mngstdinterfaces.h"
#include "olevariant.h"
-#include "rcwrefcache.h"
#include "olecontexthelpers.h"
#endif // FEATURE_COMINTEROP
+#if defined(FEATURE_COMWRAPPERS)
+#include "rcwrefcache.h"
+#endif // FEATURE_COMWRAPPERS
+
#include "typeequivalencehash.hpp"
#include "appdomain.inl"
@@ -1502,7 +1505,14 @@ void SystemDomain::LoadBaseSystemClasses()
g_pThreadClass = CoreLibBinder::GetClass(CLASS__THREAD);
#ifdef FEATURE_COMINTEROP
- g_pBaseCOMObject = CoreLibBinder::GetClass(CLASS__COM_OBJECT);
+ if (g_pConfig->IsBuiltInCOMSupported())
+ {
+ g_pBaseCOMObject = CoreLibBinder::GetClass(CLASS__COM_OBJECT);
+ }
+ else
+ {
+ g_pBaseCOMObject = NULL;
+ }
#endif
g_pIDynamicInterfaceCastableInterface = CoreLibBinder::GetClass(CLASS__IDYNAMICINTERFACECASTABLE);
@@ -2024,31 +2034,31 @@ void SystemDomain::NotifyProfilerStartup()
CONTRACTL_END;
{
- BEGIN_PIN_PROFILER(CORProfilerTrackAppDomainLoads());
+ BEGIN_PROFILER_CALLBACK(CORProfilerTrackAppDomainLoads());
_ASSERTE(System());
- g_profControlBlock.pProfInterface->AppDomainCreationStarted((AppDomainID) System());
- END_PIN_PROFILER();
+ (&g_profControlBlock)->AppDomainCreationStarted((AppDomainID) System());
+ END_PROFILER_CALLBACK();
}
{
- BEGIN_PIN_PROFILER(CORProfilerTrackAppDomainLoads());
+ BEGIN_PROFILER_CALLBACK(CORProfilerTrackAppDomainLoads());
_ASSERTE(System());
- g_profControlBlock.pProfInterface->AppDomainCreationFinished((AppDomainID) System(), S_OK);
- END_PIN_PROFILER();
+ (&g_profControlBlock)->AppDomainCreationFinished((AppDomainID) System(), S_OK);
+ END_PROFILER_CALLBACK();
}
{
- BEGIN_PIN_PROFILER(CORProfilerTrackAppDomainLoads());
+ BEGIN_PROFILER_CALLBACK(CORProfilerTrackAppDomainLoads());
_ASSERTE(System()->DefaultDomain());
- g_profControlBlock.pProfInterface->AppDomainCreationStarted((AppDomainID) System()->DefaultDomain());
- END_PIN_PROFILER();
+ (&g_profControlBlock)->AppDomainCreationStarted((AppDomainID) System()->DefaultDomain());
+ END_PROFILER_CALLBACK();
}
{
- BEGIN_PIN_PROFILER(CORProfilerTrackAppDomainLoads());
+ BEGIN_PROFILER_CALLBACK(CORProfilerTrackAppDomainLoads());
_ASSERTE(System()->DefaultDomain());
- g_profControlBlock.pProfInterface->AppDomainCreationFinished((AppDomainID) System()->DefaultDomain(), S_OK);
- END_PIN_PROFILER();
+ (&g_profControlBlock)->AppDomainCreationFinished((AppDomainID) System()->DefaultDomain(), S_OK);
+ END_PROFILER_CALLBACK();
}
}
@@ -2063,31 +2073,31 @@ HRESULT SystemDomain::NotifyProfilerShutdown()
CONTRACTL_END;
{
- BEGIN_PIN_PROFILER(CORProfilerTrackAppDomainLoads());
+ BEGIN_PROFILER_CALLBACK(CORProfilerTrackAppDomainLoads());
_ASSERTE(System());
- g_profControlBlock.pProfInterface->AppDomainShutdownStarted((AppDomainID) System());
- END_PIN_PROFILER();
+ (&g_profControlBlock)->AppDomainShutdownStarted((AppDomainID) System());
+ END_PROFILER_CALLBACK();
}
{
- BEGIN_PIN_PROFILER(CORProfilerTrackAppDomainLoads());
+ BEGIN_PROFILER_CALLBACK(CORProfilerTrackAppDomainLoads());
_ASSERTE(System());
- g_profControlBlock.pProfInterface->AppDomainShutdownFinished((AppDomainID) System(), S_OK);
- END_PIN_PROFILER();
+ (&g_profControlBlock)->AppDomainShutdownFinished((AppDomainID) System(), S_OK);
+ END_PROFILER_CALLBACK();
}
{
- BEGIN_PIN_PROFILER(CORProfilerTrackAppDomainLoads());
+ BEGIN_PROFILER_CALLBACK(CORProfilerTrackAppDomainLoads());
_ASSERTE(System()->DefaultDomain());
- g_profControlBlock.pProfInterface->AppDomainShutdownStarted((AppDomainID) System()->DefaultDomain());
- END_PIN_PROFILER();
+ (&g_profControlBlock)->AppDomainShutdownStarted((AppDomainID) System()->DefaultDomain());
+ END_PROFILER_CALLBACK();
}
{
- BEGIN_PIN_PROFILER(CORProfilerTrackAppDomainLoads());
+ BEGIN_PROFILER_CALLBACK(CORProfilerTrackAppDomainLoads());
_ASSERTE(System()->DefaultDomain());
- g_profControlBlock.pProfInterface->AppDomainShutdownFinished((AppDomainID) System()->DefaultDomain(), S_OK);
- END_PIN_PROFILER();
+ (&g_profControlBlock)->AppDomainShutdownFinished((AppDomainID) System()->DefaultDomain(), S_OK);
+ END_PROFILER_CALLBACK();
}
return (S_OK);
}
@@ -2113,8 +2123,10 @@ AppDomain::AppDomain()
m_dwFlags = 0;
#ifdef FEATURE_COMINTEROP
m_pRCWCache = NULL;
+#endif //FEATURE_COMINTEROP
+#ifdef FEATURE_COMWRAPPERS
m_pRCWRefCache = NULL;
-#endif // FEATURE_COMINTEROP
+#endif // FEATURE_COMWRAPPERS
m_handleStore = NULL;
@@ -4418,7 +4430,7 @@ void AppDomain::NotifyDebuggerUnload()
#ifndef CROSSGEN_COMPILE
-#ifdef FEATURE_COMINTEROP
+#ifdef FEATURE_COMWRAPPERS
RCWRefCache *AppDomain::GetRCWRefCache()
{
@@ -4440,6 +4452,9 @@ RCWRefCache *AppDomain::GetRCWRefCache()
}
RETURN m_pRCWRefCache;
}
+#endif // FEATURE_COMWRAPPERS
+
+#ifdef FEATURE_COMINTEROP
RCWCache *AppDomain::CreateRCWCache()
{
diff --git a/src/coreclr/vm/appdomain.hpp b/src/coreclr/vm/appdomain.hpp
index 84101004f84f4a..0487299c5f26c9 100644
--- a/src/coreclr/vm/appdomain.hpp
+++ b/src/coreclr/vm/appdomain.hpp
@@ -48,8 +48,10 @@ class TypeEquivalenceHashTable;
#ifdef FEATURE_COMINTEROP
class RCWCache;
+#endif //FEATURE_COMINTEROP
+#ifdef FEATURE_COMWRAPPERS
class RCWRefCache;
-#endif // FEATURE_COMINTEROP
+#endif // FEATURE_COMWRAPPERS
#ifdef _MSC_VER
#pragma warning(push)
@@ -1972,9 +1974,13 @@ class AppDomain : public BaseDomain
return m_pRCWCache;
}
- RCWRefCache *GetRCWRefCache();
#endif // FEATURE_COMINTEROP
+#ifdef FEATURE_COMWRAPPERS
+public:
+ RCWRefCache *GetRCWRefCache();
+#endif // FEATURE_COMWRAPPERS
+
TPIndex GetTPIndex()
{
LIMITED_METHOD_CONTRACT;
@@ -2240,10 +2246,11 @@ class AppDomain : public BaseDomain
#ifdef FEATURE_COMINTEROP
// this cache stores the RCWs in this domain
RCWCache *m_pRCWCache;
-
+#endif //FEATURE_COMINTEROP
+#ifdef FEATURE_COMWRAPPERS
// this cache stores the RCW -> CCW references in this domain
RCWRefCache *m_pRCWRefCache;
-#endif // FEATURE_COMINTEROP
+#endif // FEATURE_COMWRAPPERS
// The thread-pool index of this app domain among existing app domains (starting from 1)
TPIndex m_tpIndex;
diff --git a/src/coreclr/vm/arm/cgencpu.h b/src/coreclr/vm/arm/cgencpu.h
index 4f6e1deb4fe45f..88d0c6802b69df 100644
--- a/src/coreclr/vm/arm/cgencpu.h
+++ b/src/coreclr/vm/arm/cgencpu.h
@@ -232,7 +232,7 @@ inline void ClearITState(T_CONTEXT *context) {
}
#ifdef FEATURE_COMINTEROP
-void emitCOMStubCall (ComCallMethodDesc *pCOMMethod, PCODE target);
+void emitCOMStubCall (ComCallMethodDesc *pCOMMethodRX, ComCallMethodDesc *pCOMMethodRW, PCODE target);
#endif // FEATURE_COMINTEROP
//------------------------------------------------------------------------
@@ -283,14 +283,14 @@ inline int16_t decodeUnconditionalBranchThumb(LPBYTE pBuffer)
}
//------------------------------------------------------------------------
-inline void emitJump(LPBYTE pBuffer, LPVOID target)
+inline void emitJump(LPBYTE pBufferRX, LPBYTE pBufferRW, LPVOID target)
{
LIMITED_METHOD_CONTRACT;
// The PC-relative load we emit below requires 4-byte alignment for the offset to be calculated correctly.
- _ASSERTE(((UINT_PTR)pBuffer & 3) == 0);
+ _ASSERTE(((UINT_PTR)pBufferRX & 3) == 0);
- DWORD * pCode = (DWORD *)pBuffer;
+ DWORD * pCode = (DWORD *)pBufferRW;
// ldr pc, [pc, #0]
pCode[0] = 0xf000f8df;
@@ -335,10 +335,10 @@ inline BOOL isBackToBackJump(PCODE pBuffer)
}
//------------------------------------------------------------------------
-inline void emitBackToBackJump(LPBYTE pBuffer, LPVOID target)
+inline void emitBackToBackJump(LPBYTE pBufferRX, LPBYTE pBufferRW, LPVOID target)
{
WRAPPER_NO_CONTRACT;
- emitJump(pBuffer, target);
+ emitJump(pBufferRX, pBufferRW, target);
}
//------------------------------------------------------------------------
@@ -943,7 +943,7 @@ struct DECLSPEC_ALIGN(4) UMEntryThunkCode
TADDR m_pTargetCode;
TADDR m_pvSecretParam;
- void Encode(BYTE* pTargetCode, void* pvSecretParam);
+ void Encode(UMEntryThunkCode *pEntryThunkCodeRX, BYTE* pTargetCode, void* pvSecretParam);
void Poison();
LPCBYTE GetEntryPoint() const
@@ -1055,7 +1055,7 @@ struct StubPrecode {
TADDR m_pTarget;
TADDR m_pMethodDesc;
- void Init(MethodDesc* pMD, LoaderAllocator *pLoaderAllocator);
+ void Init(StubPrecode* pPrecodeRX, MethodDesc* pMD, LoaderAllocator *pLoaderAllocator);
TADDR GetMethodDesc()
{
@@ -1078,7 +1078,8 @@ struct StubPrecode {
}
CONTRACTL_END;
- InterlockedExchange((LONG*)&m_pTarget, (LONG)GetPreStubEntryPoint());
+ ExecutableWriterHolder precodeWriterHolder(this, sizeof(StubPrecode));
+ InterlockedExchange((LONG*)&precodeWriterHolder.GetRW()->m_pTarget, (LONG)GetPreStubEntryPoint());
}
BOOL SetTargetInterlocked(TADDR target, TADDR expected)
@@ -1090,8 +1091,9 @@ struct StubPrecode {
}
CONTRACTL_END;
+ ExecutableWriterHolder precodeWriterHolder(this, sizeof(StubPrecode));
return (TADDR)InterlockedCompareExchange(
- (LONG*)&m_pTarget, (LONG)target, (LONG)expected) == expected;
+ (LONG*)&precodeWriterHolder.GetRW()->m_pTarget, (LONG)target, (LONG)expected) == expected;
}
#ifdef FEATURE_PREJIT
@@ -1114,7 +1116,7 @@ struct NDirectImportPrecode {
// takes advantage of this to detect NDirectImportPrecode.
TADDR m_pTarget;
- void Init(MethodDesc* pMD, LoaderAllocator *pLoaderAllocator);
+ void Init(NDirectImportPrecode* pPrecodeRX, MethodDesc* pMD, LoaderAllocator *pLoaderAllocator);
TADDR GetMethodDesc()
{
@@ -1155,7 +1157,7 @@ struct FixupPrecode {
BYTE m_PrecodeChunkIndex;
TADDR m_pTarget;
- void Init(MethodDesc* pMD, LoaderAllocator *pLoaderAllocator, int iMethodDescChunkIndex = 0, int iPrecodeChunkIndex = 0);
+ void Init(FixupPrecode* pPrecodeRX, MethodDesc* pMD, LoaderAllocator *pLoaderAllocator, int iMethodDescChunkIndex = 0, int iPrecodeChunkIndex = 0);
TADDR GetBase()
{
@@ -1182,7 +1184,8 @@ struct FixupPrecode {
}
CONTRACTL_END;
- InterlockedExchange((LONG*)&m_pTarget, (LONG)GetEEFuncEntryPoint(PrecodeFixupThunk));
+ ExecutableWriterHolder precodeWriterHolder(this, sizeof(FixupPrecode));
+ InterlockedExchange((LONG*)&precodeWriterHolder.GetRW()->m_pTarget, (LONG)GetEEFuncEntryPoint(PrecodeFixupThunk));
}
BOOL SetTargetInterlocked(TADDR target, TADDR expected)
@@ -1194,8 +1197,9 @@ struct FixupPrecode {
}
CONTRACTL_END;
+ ExecutableWriterHolder precodeWriterHolder(this, sizeof(FixupPrecode));
return (TADDR)InterlockedCompareExchange(
- (LONG*)&m_pTarget, (LONG)target, (LONG)expected) == expected;
+ (LONG*)&precodeWriterHolder.GetRW()->m_pTarget, (LONG)target, (LONG)expected) == expected;
}
static BOOL IsFixupPrecodeByASM(PCODE addr)
@@ -1261,7 +1265,8 @@ struct ThisPtrRetBufPrecode {
}
CONTRACTL_END;
- return FastInterlockCompareExchange((LONG*)&m_pTarget, (LONG)target, (LONG)expected) == (LONG)expected;
+ ExecutableWriterHolder precodeWriterHolder(this, sizeof(ThisPtrRetBufPrecode));
+ return FastInterlockCompareExchange((LONG*)&precodeWriterHolder.GetRW()->m_pTarget, (LONG)target, (LONG)expected) == (LONG)expected;
}
};
typedef DPTR(ThisPtrRetBufPrecode) PTR_ThisPtrRetBufPrecode;
@@ -1364,7 +1369,7 @@ class CallCountingStubShort : public CallCountingStub
#ifndef DACCESS_COMPILE
public:
- CallCountingStubShort(CallCount *remainingCallCountCell, PCODE targetForMethod)
+ CallCountingStubShort(CallCountingStubShort* stubRX, CallCount *remainingCallCountCell, PCODE targetForMethod)
: m_part0{ 0xb401, // push {r0}
0xf8df, 0xc01c, // ldr r12, [pc, #(m_remainingCallCountCell)]
0xf8bc, 0x0000, // ldrh r0, [r12]
diff --git a/src/coreclr/vm/arm/stubs.cpp b/src/coreclr/vm/arm/stubs.cpp
index 1ca6fd09642d03..aac3e25b18146f 100644
--- a/src/coreclr/vm/arm/stubs.cpp
+++ b/src/coreclr/vm/arm/stubs.cpp
@@ -98,7 +98,7 @@ class ThumbCondJump : public InstructionFormat
//Encoding 1|0|1|1|op|0|i|1|imm5|Rn
//op = Bit3(variation)
//Rn = Bits2-0(variation)
- virtual VOID EmitInstruction(UINT refsize, __int64 fixedUpReference, BYTE *pOutBuffer, UINT variationCode, BYTE *pDataBuffer)
+ virtual VOID EmitInstruction(UINT refsize, __int64 fixedUpReference, BYTE *pOutBufferRX, BYTE *pOutBufferRW, UINT variationCode, BYTE *pDataBuffer)
{
LIMITED_METHOD_CONTRACT
@@ -109,8 +109,8 @@ class ThumbCondJump : public InstructionFormat
_ASSERTE((fixedUpReference & 0x1) == 0);
- pOutBuffer[0] = static_cast(((0x3e & fixedUpReference) << 2) | (0x7 & variationCode));
- pOutBuffer[1] = static_cast(0xb1 | (0x8 & variationCode)| ((0x40 & fixedUpReference)>>5));
+ pOutBufferRW[0] = static_cast(((0x3e & fixedUpReference) << 2) | (0x7 & variationCode));
+ pOutBufferRW[1] = static_cast(0xb1 | (0x8 & variationCode)| ((0x40 & fixedUpReference)>>5));
}
};
@@ -138,7 +138,7 @@ class ThumbNearJump : public InstructionFormat
return 0;
}
- virtual VOID EmitInstruction(UINT refsize, __int64 fixedUpReference, BYTE *pOutBuffer, UINT cond, BYTE *pDataBuffer)
+ virtual VOID EmitInstruction(UINT refsize, __int64 fixedUpReference, BYTE *pOutBufferRX, BYTE *pOutBufferRW, UINT cond, BYTE *pDataBuffer)
{
LIMITED_METHOD_CONTRACT
@@ -155,8 +155,8 @@ class ThumbNearJump : public InstructionFormat
_ASSERTE(!"Expected refSize to be 2");
//Emit T2 encoding of B