diff --git a/clang/cmake/caches/Fuchsia-stage2-instrumented.cmake b/clang/cmake/caches/Fuchsia-stage2-instrumented.cmake new file mode 100644 index 0000000000000..b3c3b63066363 --- /dev/null +++ b/clang/cmake/caches/Fuchsia-stage2-instrumented.cmake @@ -0,0 +1,44 @@ +# This file sets up a CMakeCache for the second stage of a Fuchsia toolchain build. + +include(${CMAKE_CURRENT_LIST_DIR}/Fuchsia-stage2.cmake) + +if(NOT APPLE) + set(BOOTSTRAP_LLVM_ENABLE_LLD ON CACHE BOOL "") +endif() + +set(CLANG_BOOTSTRAP_TARGETS + check-all + check-clang + check-lld + check-llvm + clang + clang-test-depends + toolchain-distribution + install-toolchain-distribution + install-toolchain-distribution-stripped + install-toolchain-distribution-toolchain + lld-test-depends + llvm-config + llvm-test-depends + test-depends + test-suite CACHE STRING "") + +get_cmake_property(variableNames VARIABLES) +foreach(variableName ${variableNames}) + if(variableName MATCHES "^STAGE2_") + string(REPLACE "STAGE2_" "" new_name ${variableName}) + list(APPEND EXTRA_ARGS "-D${new_name}=${${variableName}}") + endif() +endforeach() + +set(CLANG_PGO_TRAINING_DEPS + builtins + runtimes + CACHE STRING "") + +# Setup the bootstrap build. +set(CLANG_ENABLE_BOOTSTRAP ON CACHE BOOL "") +set(CLANG_BOOTSTRAP_CMAKE_ARGS + ${EXTRA_ARGS} + -C ${CMAKE_CURRENT_LIST_DIR}/Fuchsia-stage2.cmake + CACHE STRING "") diff --git a/clang/cmake/caches/Fuchsia.cmake b/clang/cmake/caches/Fuchsia.cmake index 83336589da305..373b7ddd6e344 100644 --- a/clang/cmake/caches/Fuchsia.cmake +++ b/clang/cmake/caches/Fuchsia.cmake @@ -126,6 +126,16 @@ else() set(LIBCXX_ENABLE_STATIC_ABI_LIBRARY ON CACHE BOOL "") set(LIBCXX_HARDENING_MODE "none" CACHE STRING "") set(LIBCXX_USE_COMPILER_RT ON CACHE BOOL "") + set(COMPILER_RT_BUILD_LIBFUZZER OFF CACHE BOOL "") + set(COMPILER_RT_BUILD_PROFILE ON CACHE BOOL "") + set(COMPILER_RT_BUILD_SANITIZERS OFF CACHE BOOL "") + set(COMPILER_RT_BUILD_XRAY OFF CACHE BOOL "") + set(COMPILER_RT_USE_BUILTINS_LIBRARY ON CACHE BOOL "") + set(COMPILER_RT_DEFAULT_TARGET_ONLY ON CACHE BOOL "") + set(SANITIZER_CXX_ABI "libc++" CACHE STRING "") + set(SANITIZER_CXX_ABI_INTREE ON CACHE BOOL "") + set(SANITIZER_TEST_CXX "libc++" CACHE STRING "") + set(SANITIZER_TEST_CXX_INTREE ON CACHE BOOL "") set(LLVM_ENABLE_RUNTIMES "compiler-rt;libcxx;libcxxabi;libunwind" CACHE STRING "") set(RUNTIMES_CMAKE_ARGS "-DCMAKE_OSX_DEPLOYMENT_TARGET=10.13;-DCMAKE_OSX_ARCHITECTURES=arm64|x86_64" CACHE STRING "") endif() @@ -164,34 +174,29 @@ endif() set(BOOTSTRAP_LLVM_ENABLE_LLD ON CACHE BOOL "") set(BOOTSTRAP_LLVM_ENABLE_LTO ON CACHE BOOL "") +set(BOOTSTRAP_LLVM_BUILD_INSTRUMENTED ON CACHE BOOL "") set(_FUCHSIA_BOOTSTRAP_TARGETS - check-all - check-clang - check-lld - check-llvm - check-polly - llvm-config - clang-test-depends - lld-test-depends - llvm-test-depends - test-suite - test-depends - toolchain-distribution - install-toolchain-distribution - install-toolchain-distribution-stripped - install-toolchain-distribution-toolchain - clang) + generate-profdata + stage2 + stage2-toolchain-distribution + stage2-install-toolchain-distribution + stage2-install-toolchain-distribution-stripped + stage2-install-toolchain-distribution-toolchain + stage2-check-all + stage2-check-lld + stage2-check-llvm + stage2-check-clang + stage2-test-suite) if(FUCHSIA_ENABLE_LLDB) list(APPEND _FUCHSIA_ENABLE_PROJECTS lldb) list(APPEND _FUCHSIA_BOOTSTRAP_TARGETS - check-lldb - lldb-test-depends - debugger-distribution - install-debugger-distribution - install-debugger-distribution-stripped - install-debugger-distribution-toolchain) + stage2-check-lldb + stage2-debugger-distribution + stage2-install-debugger-distribution + stage2-install-debugger-distribution-stripped + stage2-install-debugger-distribution-toolchain) endif() set(LLVM_ENABLE_PROJECTS ${_FUCHSIA_ENABLE_PROJECTS} CACHE STRING "") @@ -200,6 +205,7 @@ set(CLANG_BOOTSTRAP_TARGETS ${_FUCHSIA_BOOTSTRAP_TARGETS} CACHE STRING "") get_cmake_property(variableNames VARIABLES) foreach(variableName ${variableNames}) if(variableName MATCHES "^STAGE2_") + list(APPEND EXTRA_ARGS "-D${variableName}=${${variableName}}") string(REPLACE "STAGE2_" "" new_name ${variableName}) string(REPLACE ";" "|" value "${${variableName}}") list(APPEND EXTRA_ARGS "-D${new_name}=${value}") @@ -209,6 +215,9 @@ endforeach() # TODO: This is a temporary workaround until we figure out the right solution. set(BOOTSTRAP_LLVM_ENABLE_RUNTIMES "compiler-rt;libcxx;libcxxabi;libunwind" CACHE STRING "") +set(LLVM_BUILTIN_TARGETS "default" CACHE STRING "") +set(LLVM_RUNTIME_TARGETS "default" CACHE STRING "") + # Setup the bootstrap build. set(CLANG_ENABLE_BOOTSTRAP ON CACHE BOOL "") set(CLANG_BOOTSTRAP_EXTRA_DEPS @@ -217,5 +226,5 @@ set(CLANG_BOOTSTRAP_EXTRA_DEPS CACHE STRING "") set(CLANG_BOOTSTRAP_CMAKE_ARGS ${EXTRA_ARGS} - -C ${CMAKE_CURRENT_LIST_DIR}/Fuchsia-stage2.cmake + -C ${CMAKE_CURRENT_LIST_DIR}/Fuchsia-stage2-instrumented.cmake CACHE STRING "") diff --git a/clang/docs/BoundsSafety.rst b/clang/docs/BoundsSafety.rst index e24c69d8c7855..8635bec6e17c7 100644 --- a/clang/docs/BoundsSafety.rst +++ b/clang/docs/BoundsSafety.rst @@ -959,7 +959,8 @@ that has the define. #if defined(__has_feature) && __has_feature(bounds_safety) #define __counted_by(T) __attribute__((__counted_by__(T))) // ... other bounds annotations - #else #define __counted_by(T) // defined as nothing + #else + #define __counted_by(T) // defined as nothing // ... other bounds annotations #endif @@ -987,7 +988,7 @@ and it does not guarantee other types of memory safety properties. Consequently, it may not prevent some of the secondary bounds safety violations caused by other types of safety violations such as type confusion. For instance, ``-fbounds-safety`` does not perform type-safety checks on conversions between -`__single`` pointers of different pointee types (e.g., ``char *__single`` → +``__single`` pointers of different pointee types (e.g., ``char *__single`` → ``void *__single`` → ``int *__single``) beyond what the foundation languages (C/C++) already offer. @@ -1003,4 +1004,4 @@ Try it out Your feedback on the programming model is valuable. You may want to follow the instruction in :doc:`BoundsSafetyAdoptionGuide` to play with ``-fbounds-safety`` -and please send your feedback to `Yeoul Na `_. \ No newline at end of file +and please send your feedback to `Yeoul Na `_. diff --git a/clang/include/clang/AST/OperationKinds.def b/clang/include/clang/AST/OperationKinds.def index 8788b8ff0ef0a..b3dc7c3d8dc77 100644 --- a/clang/include/clang/AST/OperationKinds.def +++ b/clang/include/clang/AST/OperationKinds.def @@ -367,6 +367,9 @@ CAST_OPERATION(HLSLVectorTruncation) // Non-decaying array RValue cast (HLSL only). CAST_OPERATION(HLSLArrayRValue) +// Aggregate by Value cast (HLSL only). +CAST_OPERATION(HLSLElementwiseCast) + //===- Binary Operations -------------------------------------------------===// // Operators listed in order of precedence. // Note that additions to this should also update the StmtVisitor class, diff --git a/clang/include/clang/ASTMatchers/ASTMatchers.h b/clang/include/clang/ASTMatchers/ASTMatchers.h index 239fcba4e5e05..0f7e3a8a01762 100644 --- a/clang/include/clang/ASTMatchers/ASTMatchers.h +++ b/clang/include/clang/ASTMatchers/ASTMatchers.h @@ -2489,7 +2489,28 @@ extern const internal::VariadicDynCastAllOfMatcher extern const internal::VariadicDynCastAllOfMatcher imaginaryLiteral; -/// Matches fixed point literals +/// Matches fixed-point literals eg. +/// 0.5r, 0.5hr, 0.5lr, 0.5uhr, 0.5ur, 0.5ulr +/// 1.0k, 1.0hk, 1.0lk, 1.0uhk, 1.0uk, 1.0ulk +/// Exponents 1.0e10k +/// Hexadecimal numbers 0x0.2p2r +/// +/// Does not match implicit conversions such as first two lines: +/// \code +/// short _Accum sa = 2; +/// _Accum a = 12.5; +/// _Accum b = 1.25hk; +/// _Fract c = 0.25hr; +/// _Fract v = 0.35uhr; +/// _Accum g = 1.45uhk; +/// _Accum decexp1 = 1.575e1k; +/// \endcode +/// \compile_args{-ffixed-point;-std=c99} +/// +/// The matcher \matcher{fixedPointLiteral()} matches +/// \match{1.25hk}, \match{0.25hr}, \match{0.35uhr}, +/// \match{1.45uhk}, \match{1.575e1k}, but does not +/// match \nomatch{12.5} and \nomatch{2} from the code block. extern const internal::VariadicDynCastAllOfMatcher fixedPointLiteral; diff --git a/clang/include/clang/Sema/SemaHLSL.h b/clang/include/clang/Sema/SemaHLSL.h index 20376e980ab35..6e8ca2e4710de 100644 --- a/clang/include/clang/Sema/SemaHLSL.h +++ b/clang/include/clang/Sema/SemaHLSL.h @@ -141,6 +141,9 @@ class SemaHLSL : public SemaBase { // Diagnose whether the input ID is uint/unit2/uint3 type. bool diagnoseInputIDType(QualType T, const ParsedAttr &AL); + bool CanPerformScalarCast(QualType SrcTy, QualType DestTy); + bool ContainsBitField(QualType BaseTy); + bool CanPerformElementwiseCast(Expr *Src, QualType DestType); ExprResult ActOnOutParamExpr(ParmVarDecl *Param, Expr *Arg); QualType getInoutParameterType(QualType Ty); diff --git a/clang/lib/AST/ByteCode/Compiler.cpp b/clang/lib/AST/ByteCode/Compiler.cpp index 1e1e96a1c4782..1f0e022edcd76 100644 --- a/clang/lib/AST/ByteCode/Compiler.cpp +++ b/clang/lib/AST/ByteCode/Compiler.cpp @@ -6234,9 +6234,6 @@ bool Compiler::visitDeclRef(const ValueDecl *D, const Expr *E) { return this->emitGetPtrParam(It->second.Offset, E); } - - if (D->getType()->isReferenceType()) - return this->emitDummyPtr(D, E); } // In case we need to re-visit a declaration. diff --git a/clang/lib/AST/Expr.cpp b/clang/lib/AST/Expr.cpp index 4fc62919fde94..c22aa66ba2cfb 100644 --- a/clang/lib/AST/Expr.cpp +++ b/clang/lib/AST/Expr.cpp @@ -1956,6 +1956,7 @@ bool CastExpr::CastConsistency() const { case CK_FixedPointToBoolean: case CK_HLSLArrayRValue: case CK_HLSLVectorTruncation: + case CK_HLSLElementwiseCast: CheckNoBasePath: assert(path_empty() && "Cast kind should not have a base path!"); break; diff --git a/clang/lib/AST/ExprConstant.cpp b/clang/lib/AST/ExprConstant.cpp index 37019b5235f56..192b679b4c995 100644 --- a/clang/lib/AST/ExprConstant.cpp +++ b/clang/lib/AST/ExprConstant.cpp @@ -15047,6 +15047,7 @@ bool IntExprEvaluator::VisitCastExpr(const CastExpr *E) { case CK_NoOp: case CK_LValueToRValueBitCast: case CK_HLSLArrayRValue: + case CK_HLSLElementwiseCast: return ExprEvaluatorBaseTy::VisitCastExpr(E); case CK_MemberPointerToBoolean: @@ -15905,6 +15906,7 @@ bool ComplexExprEvaluator::VisitCastExpr(const CastExpr *E) { case CK_IntegralToFixedPoint: case CK_MatrixCast: case CK_HLSLVectorTruncation: + case CK_HLSLElementwiseCast: llvm_unreachable("invalid cast kind for complex value"); case CK_LValueToRValue: diff --git a/clang/lib/CodeGen/CGExpr.cpp b/clang/lib/CodeGen/CGExpr.cpp index bf8df2789f58d..1e233c42c8782 100644 --- a/clang/lib/CodeGen/CGExpr.cpp +++ b/clang/lib/CodeGen/CGExpr.cpp @@ -5338,6 +5338,7 @@ LValue CodeGenFunction::EmitCastLValue(const CastExpr *E) { case CK_MatrixCast: case CK_HLSLVectorTruncation: case CK_HLSLArrayRValue: + case CK_HLSLElementwiseCast: return EmitUnsupportedLValue(E, "unexpected cast lvalue"); case CK_Dependent: @@ -6376,3 +6377,75 @@ RValue CodeGenFunction::EmitPseudoObjectRValue(const PseudoObjectExpr *E, LValue CodeGenFunction::EmitPseudoObjectLValue(const PseudoObjectExpr *E) { return emitPseudoObjectExpr(*this, E, true, AggValueSlot::ignored()).LV; } + +void CodeGenFunction::FlattenAccessAndType( + Address Addr, QualType AddrType, + SmallVectorImpl> &AccessList, + SmallVectorImpl &FlatTypes) { + // WorkList is list of type we are processing + the Index List to access + // the field of that type in Addr for use in a GEP + llvm::SmallVector>, + 16> + WorkList; + llvm::IntegerType *IdxTy = llvm::IntegerType::get(getLLVMContext(), 32); + // Addr should be a pointer so we need to 'dereference' it + WorkList.push_back({AddrType, {llvm::ConstantInt::get(IdxTy, 0)}}); + + while (!WorkList.empty()) { + auto [T, IdxList] = WorkList.pop_back_val(); + T = T.getCanonicalType().getUnqualifiedType(); + assert(!isa(T) && "Matrix types not yet supported in HLSL"); + if (const auto *CAT = dyn_cast(T)) { + uint64_t Size = CAT->getZExtSize(); + for (int64_t I = Size - 1; I > -1; I--) { + llvm::SmallVector IdxListCopy = IdxList; + IdxListCopy.push_back(llvm::ConstantInt::get(IdxTy, I)); + WorkList.emplace_back(CAT->getElementType(), IdxListCopy); + } + } else if (const auto *RT = dyn_cast(T)) { + const RecordDecl *Record = RT->getDecl(); + assert(!Record->isUnion() && "Union types not supported in flat cast."); + + const CXXRecordDecl *CXXD = dyn_cast(Record); + + llvm::SmallVector FieldTypes; + if (CXXD && CXXD->isStandardLayout()) + Record = CXXD->getStandardLayoutBaseWithFields(); + + // deal with potential base classes + if (CXXD && !CXXD->isStandardLayout()) { + for (auto &Base : CXXD->bases()) + FieldTypes.push_back(Base.getType()); + } + + for (auto *FD : Record->fields()) + FieldTypes.push_back(FD->getType()); + + for (int64_t I = FieldTypes.size() - 1; I > -1; I--) { + llvm::SmallVector IdxListCopy = IdxList; + IdxListCopy.push_back(llvm::ConstantInt::get(IdxTy, I)); + WorkList.insert(WorkList.end(), {FieldTypes[I], IdxListCopy}); + } + } else if (const auto *VT = dyn_cast(T)) { + llvm::Type *LLVMT = ConvertTypeForMem(T); + CharUnits Align = getContext().getTypeAlignInChars(T); + Address GEP = + Builder.CreateInBoundsGEP(Addr, IdxList, LLVMT, Align, "vector.gep"); + for (unsigned I = 0, E = VT->getNumElements(); I < E; I++) { + llvm::Value *Idx = llvm::ConstantInt::get(IdxTy, I); + // gep on vector fields is not recommended so combine gep with + // extract/insert + AccessList.emplace_back(GEP, Idx); + FlatTypes.push_back(VT->getElementType()); + } + } else { + // a scalar/builtin type + llvm::Type *LLVMT = ConvertTypeForMem(T); + CharUnits Align = getContext().getTypeAlignInChars(T); + Address GEP = + Builder.CreateInBoundsGEP(Addr, IdxList, LLVMT, Align, "gep"); + AccessList.emplace_back(GEP, nullptr); + FlatTypes.push_back(T); + } + } +} diff --git a/clang/lib/CodeGen/CGExprAgg.cpp b/clang/lib/CodeGen/CGExprAgg.cpp index 2ad6587089f10..c3f1cbed6b39f 100644 --- a/clang/lib/CodeGen/CGExprAgg.cpp +++ b/clang/lib/CodeGen/CGExprAgg.cpp @@ -491,6 +491,79 @@ static bool isTrivialFiller(Expr *E) { return false; } +// emit a flat cast where the RHS is a scalar, including vector +static void EmitHLSLScalarFlatCast(CodeGenFunction &CGF, Address DestVal, + QualType DestTy, llvm::Value *SrcVal, + QualType SrcTy, SourceLocation Loc) { + // Flatten our destination + SmallVector DestTypes; // Flattened type + SmallVector, 16> StoreGEPList; + // ^^ Flattened accesses to DestVal we want to store into + CGF.FlattenAccessAndType(DestVal, DestTy, StoreGEPList, DestTypes); + + assert(SrcTy->isVectorType() && "HLSL Flat cast doesn't handle splatting."); + const VectorType *VT = SrcTy->getAs(); + SrcTy = VT->getElementType(); + assert(StoreGEPList.size() <= VT->getNumElements() && + "Cannot perform HLSL flat cast when vector source \ + object has less elements than flattened destination \ + object."); + for (unsigned I = 0, Size = StoreGEPList.size(); I < Size; I++) { + llvm::Value *Load = CGF.Builder.CreateExtractElement(SrcVal, I, "vec.load"); + llvm::Value *Cast = + CGF.EmitScalarConversion(Load, SrcTy, DestTypes[I], Loc); + + // store back + llvm::Value *Idx = StoreGEPList[I].second; + if (Idx) { + llvm::Value *V = + CGF.Builder.CreateLoad(StoreGEPList[I].first, "load.for.insert"); + Cast = CGF.Builder.CreateInsertElement(V, Cast, Idx); + } + CGF.Builder.CreateStore(Cast, StoreGEPList[I].first); + } + return; +} + +// emit a flat cast where the RHS is an aggregate +static void EmitHLSLElementwiseCast(CodeGenFunction &CGF, Address DestVal, + QualType DestTy, Address SrcVal, + QualType SrcTy, SourceLocation Loc) { + // Flatten our destination + SmallVector DestTypes; // Flattened type + SmallVector, 16> StoreGEPList; + // ^^ Flattened accesses to DestVal we want to store into + CGF.FlattenAccessAndType(DestVal, DestTy, StoreGEPList, DestTypes); + // Flatten our src + SmallVector SrcTypes; // Flattened type + SmallVector, 16> LoadGEPList; + // ^^ Flattened accesses to SrcVal we want to load from + CGF.FlattenAccessAndType(SrcVal, SrcTy, LoadGEPList, SrcTypes); + + assert(StoreGEPList.size() <= LoadGEPList.size() && + "Cannot perform HLSL flat cast when flattened source object \ + has less elements than flattened destination object."); + // apply casts to what we load from LoadGEPList + // and store result in Dest + for (unsigned I = 0, E = StoreGEPList.size(); I < E; I++) { + llvm::Value *Idx = LoadGEPList[I].second; + llvm::Value *Load = CGF.Builder.CreateLoad(LoadGEPList[I].first, "load"); + Load = + Idx ? CGF.Builder.CreateExtractElement(Load, Idx, "vec.extract") : Load; + llvm::Value *Cast = + CGF.EmitScalarConversion(Load, SrcTypes[I], DestTypes[I], Loc); + + // store back + Idx = StoreGEPList[I].second; + if (Idx) { + llvm::Value *V = + CGF.Builder.CreateLoad(StoreGEPList[I].first, "load.for.insert"); + Cast = CGF.Builder.CreateInsertElement(V, Cast, Idx); + } + CGF.Builder.CreateStore(Cast, StoreGEPList[I].first); + } +} + /// Emit initialization of an array from an initializer list. ExprToVisit must /// be either an InitListEpxr a CXXParenInitListExpr. void AggExprEmitter::EmitArrayInit(Address DestPtr, llvm::ArrayType *AType, @@ -890,7 +963,25 @@ void AggExprEmitter::VisitCastExpr(CastExpr *E) { case CK_HLSLArrayRValue: Visit(E->getSubExpr()); break; - + case CK_HLSLElementwiseCast: { + Expr *Src = E->getSubExpr(); + QualType SrcTy = Src->getType(); + RValue RV = CGF.EmitAnyExpr(Src); + QualType DestTy = E->getType(); + Address DestVal = Dest.getAddress(); + SourceLocation Loc = E->getExprLoc(); + + if (RV.isScalar()) { + llvm::Value *SrcVal = RV.getScalarVal(); + EmitHLSLScalarFlatCast(CGF, DestVal, DestTy, SrcVal, SrcTy, Loc); + } else { + assert(RV.isAggregate() && + "Can't perform HLSL Aggregate cast on a complex type."); + Address SrcVal = RV.getAggregateAddress(); + EmitHLSLElementwiseCast(CGF, DestVal, DestTy, SrcVal, SrcTy, Loc); + } + break; + } case CK_NoOp: case CK_UserDefinedConversion: case CK_ConstructorConversion: @@ -1461,6 +1552,7 @@ static bool castPreservesZero(const CastExpr *CE) { case CK_NonAtomicToAtomic: case CK_AtomicToNonAtomic: case CK_HLSLVectorTruncation: + case CK_HLSLElementwiseCast: return true; case CK_BaseToDerivedMemberPointer: diff --git a/clang/lib/CodeGen/CGExprComplex.cpp b/clang/lib/CodeGen/CGExprComplex.cpp index ac31dff11b585..c2679ea92dc97 100644 --- a/clang/lib/CodeGen/CGExprComplex.cpp +++ b/clang/lib/CodeGen/CGExprComplex.cpp @@ -610,6 +610,7 @@ ComplexPairTy ComplexExprEmitter::EmitCast(CastKind CK, Expr *Op, case CK_MatrixCast: case CK_HLSLVectorTruncation: case CK_HLSLArrayRValue: + case CK_HLSLElementwiseCast: llvm_unreachable("invalid cast kind for complex value"); case CK_FloatingRealToComplex: diff --git a/clang/lib/CodeGen/CGExprConstant.cpp b/clang/lib/CodeGen/CGExprConstant.cpp index 655fc3dc954c8..ef11798869d3b 100644 --- a/clang/lib/CodeGen/CGExprConstant.cpp +++ b/clang/lib/CodeGen/CGExprConstant.cpp @@ -1335,6 +1335,7 @@ class ConstExprEmitter case CK_MatrixCast: case CK_HLSLVectorTruncation: case CK_HLSLArrayRValue: + case CK_HLSLElementwiseCast: return nullptr; } llvm_unreachable("Invalid CastKind"); diff --git a/clang/lib/CodeGen/CGExprScalar.cpp b/clang/lib/CodeGen/CGExprScalar.cpp index df850421c72c6..80daed7e53951 100644 --- a/clang/lib/CodeGen/CGExprScalar.cpp +++ b/clang/lib/CodeGen/CGExprScalar.cpp @@ -2269,6 +2269,42 @@ bool CodeGenFunction::ShouldNullCheckClassCastValue(const CastExpr *CE) { return true; } +// RHS is an aggregate type +static Value *EmitHLSLElementwiseCast(CodeGenFunction &CGF, Address RHSVal, + QualType RHSTy, QualType LHSTy, + SourceLocation Loc) { + SmallVector, 16> LoadGEPList; + SmallVector SrcTypes; // Flattened type + CGF.FlattenAccessAndType(RHSVal, RHSTy, LoadGEPList, SrcTypes); + // LHS is either a vector or a builtin? + // if its a vector create a temp alloca to store into and return that + if (auto *VecTy = LHSTy->getAs()) { + assert(SrcTypes.size() >= VecTy->getNumElements() && + "Flattened type on RHS must have more elements than vector on LHS."); + llvm::Value *V = + CGF.Builder.CreateLoad(CGF.CreateIRTemp(LHSTy, "flatcast.tmp")); + // write to V. + for (unsigned I = 0, E = VecTy->getNumElements(); I < E; I++) { + llvm::Value *Load = CGF.Builder.CreateLoad(LoadGEPList[I].first, "load"); + llvm::Value *Idx = LoadGEPList[I].second; + Load = Idx ? CGF.Builder.CreateExtractElement(Load, Idx, "vec.extract") + : Load; + llvm::Value *Cast = CGF.EmitScalarConversion( + Load, SrcTypes[I], VecTy->getElementType(), Loc); + V = CGF.Builder.CreateInsertElement(V, Cast, I); + } + return V; + } + // i its a builtin just do an extract element or load. + assert(LHSTy->isBuiltinType() && + "Destination type must be a vector or builtin type."); + llvm::Value *Load = CGF.Builder.CreateLoad(LoadGEPList[0].first, "load"); + llvm::Value *Idx = LoadGEPList[0].second; + Load = + Idx ? CGF.Builder.CreateExtractElement(Load, Idx, "vec.extract") : Load; + return CGF.EmitScalarConversion(Load, LHSTy, SrcTypes[0], Loc); +} + // VisitCastExpr - Emit code for an explicit or implicit cast. Implicit casts // have to handle a more broad range of conversions than explicit casts, as they // handle things like function to ptr-to-function decay etc. @@ -2759,7 +2795,16 @@ Value *ScalarExprEmitter::VisitCastExpr(CastExpr *CE) { llvm::Value *Zero = llvm::Constant::getNullValue(CGF.SizeTy); return Builder.CreateExtractElement(Vec, Zero, "cast.vtrunc"); } + case CK_HLSLElementwiseCast: { + RValue RV = CGF.EmitAnyExpr(E); + SourceLocation Loc = CE->getExprLoc(); + QualType SrcTy = E->getType(); + assert(RV.isAggregate() && "Not a valid HLSL Flat Cast."); + // RHS is an aggregate + Address SrcVal = RV.getAggregateAddress(); + return EmitHLSLElementwiseCast(CGF, SrcVal, SrcTy, DestTy, Loc); + } } // end of switch llvm_unreachable("unknown scalar cast"); diff --git a/clang/lib/CodeGen/CodeGenFunction.h b/clang/lib/CodeGen/CodeGenFunction.h index ced3484fbd2b6..e7a5100a9fa29 100644 --- a/clang/lib/CodeGen/CodeGenFunction.h +++ b/clang/lib/CodeGen/CodeGenFunction.h @@ -4439,6 +4439,11 @@ class CodeGenFunction : public CodeGenTypeCache { AggValueSlot slot = AggValueSlot::ignored()); LValue EmitPseudoObjectLValue(const PseudoObjectExpr *e); + void FlattenAccessAndType( + Address Addr, QualType AddrTy, + SmallVectorImpl> &AccessList, + SmallVectorImpl &FlatTypes); + llvm::Value *EmitIvarOffset(const ObjCInterfaceDecl *Interface, const ObjCIvarDecl *Ivar); llvm::Value *EmitIvarOffsetAsPointerDiff(const ObjCInterfaceDecl *Interface, diff --git a/clang/lib/Driver/ToolChains/Clang.cpp b/clang/lib/Driver/ToolChains/Clang.cpp index 045cae0a2c468..62d4336c6be59 100644 --- a/clang/lib/Driver/ToolChains/Clang.cpp +++ b/clang/lib/Driver/ToolChains/Clang.cpp @@ -9187,7 +9187,7 @@ void LinkerWrapper::ConstructJob(Compilation &C, const JobAction &JA, OPT_fno_lto, OPT_flto, OPT_flto_EQ}; - const llvm::DenseSet LinkerOptions{OPT_mllvm}; + const llvm::DenseSet LinkerOptions{OPT_mllvm, OPT_Zlinker_input}; auto ShouldForward = [&](const llvm::DenseSet &Set, Arg *A) { return Set.contains(A->getOption().getID()) || (A->getOption().getGroup().isValid() && @@ -9205,7 +9205,9 @@ void LinkerWrapper::ConstructJob(Compilation &C, const JobAction &JA, ArgStringList CompilerArgs; ArgStringList LinkerArgs; for (Arg *A : C.getArgsForToolChain(TC, /*BoundArch=*/"", Kind)) { - if (ShouldForward(CompilerOptions, A)) + if (A->getOption().matches(OPT_Zlinker_input)) + LinkerArgs.emplace_back(A->getValue()); + else if (ShouldForward(CompilerOptions, A)) A->render(Args, CompilerArgs); else if (ShouldForward(LinkerOptions, A)) A->render(Args, LinkerArgs); diff --git a/clang/lib/Edit/RewriteObjCFoundationAPI.cpp b/clang/lib/Edit/RewriteObjCFoundationAPI.cpp index 81797c8c4dc75..32f5ebb55155e 100644 --- a/clang/lib/Edit/RewriteObjCFoundationAPI.cpp +++ b/clang/lib/Edit/RewriteObjCFoundationAPI.cpp @@ -1085,6 +1085,7 @@ static bool rewriteToNumericBoxedExpression(const ObjCMessageExpr *Msg, llvm_unreachable("OpenCL-specific cast in Objective-C?"); case CK_HLSLVectorTruncation: + case CK_HLSLElementwiseCast: llvm_unreachable("HLSL-specific cast in Objective-C?"); break; diff --git a/clang/lib/Sema/SemaCast.cpp b/clang/lib/Sema/SemaCast.cpp index 54bc52fa2ac40..23be71ad8e2ae 100644 --- a/clang/lib/Sema/SemaCast.cpp +++ b/clang/lib/Sema/SemaCast.cpp @@ -23,6 +23,7 @@ #include "clang/Basic/TargetInfo.h" #include "clang/Lex/Preprocessor.h" #include "clang/Sema/Initialization.h" +#include "clang/Sema/SemaHLSL.h" #include "clang/Sema/SemaObjC.h" #include "clang/Sema/SemaRISCV.h" #include "llvm/ADT/SmallVector.h" @@ -2772,6 +2773,22 @@ void CastOperation::CheckCXXCStyleCast(bool FunctionalStyle, return; } + CheckedConversionKind CCK = FunctionalStyle + ? CheckedConversionKind::FunctionalCast + : CheckedConversionKind::CStyleCast; + // This case should not trigger on regular vector splat + // vector cast, vector truncation, or special hlsl splat cases + QualType SrcTy = SrcExpr.get()->getType(); + if (Self.getLangOpts().HLSL && + Self.HLSL().CanPerformElementwiseCast(SrcExpr.get(), DestType)) { + if (SrcTy->isConstantArrayType()) + SrcExpr = Self.ImpCastExprToType( + SrcExpr.get(), Self.Context.getArrayParameterType(SrcTy), + CK_HLSLArrayRValue, VK_PRValue, nullptr, CCK); + Kind = CK_HLSLElementwiseCast; + return; + } + if (ValueKind == VK_PRValue && !DestType->isRecordType() && !isPlaceholder(BuiltinType::Overload)) { SrcExpr = Self.DefaultFunctionArrayLvalueConversion(SrcExpr.get()); @@ -2824,9 +2841,6 @@ void CastOperation::CheckCXXCStyleCast(bool FunctionalStyle, if (isValidCast(tcr)) Kind = CK_NoOp; - CheckedConversionKind CCK = FunctionalStyle - ? CheckedConversionKind::FunctionalCast - : CheckedConversionKind::CStyleCast; if (tcr == TC_NotApplicable) { tcr = TryAddressSpaceCast(Self, SrcExpr, DestType, /*CStyle*/ true, msg, Kind); diff --git a/clang/lib/Sema/SemaHLSL.cpp b/clang/lib/Sema/SemaHLSL.cpp index 2cb389eefaac2..ec6b5b45de42b 100644 --- a/clang/lib/Sema/SemaHLSL.cpp +++ b/clang/lib/Sema/SemaHLSL.cpp @@ -2706,6 +2706,150 @@ bool SemaHLSL::CheckCompatibleParameterABI(FunctionDecl *New, return HadError; } +// Generally follows PerformScalarCast, with cases reordered for +// clarity of what types are supported +bool SemaHLSL::CanPerformScalarCast(QualType SrcTy, QualType DestTy) { + + if (SemaRef.getASTContext().hasSameUnqualifiedType(SrcTy, DestTy)) + return true; + + switch (SrcTy->getScalarTypeKind()) { + case Type::STK_Bool: // casting from bool is like casting from an integer + case Type::STK_Integral: + switch (DestTy->getScalarTypeKind()) { + case Type::STK_Bool: + case Type::STK_Integral: + case Type::STK_Floating: + return true; + case Type::STK_CPointer: + case Type::STK_ObjCObjectPointer: + case Type::STK_BlockPointer: + case Type::STK_MemberPointer: + llvm_unreachable("HLSL doesn't support pointers."); + case Type::STK_IntegralComplex: + case Type::STK_FloatingComplex: + llvm_unreachable("HLSL doesn't support complex types."); + case Type::STK_FixedPoint: + llvm_unreachable("HLSL doesn't support fixed point types."); + } + llvm_unreachable("Should have returned before this"); + + case Type::STK_Floating: + switch (DestTy->getScalarTypeKind()) { + case Type::STK_Floating: + case Type::STK_Bool: + case Type::STK_Integral: + return true; + case Type::STK_FloatingComplex: + case Type::STK_IntegralComplex: + llvm_unreachable("HLSL doesn't support complex types."); + case Type::STK_FixedPoint: + llvm_unreachable("HLSL doesn't support fixed point types."); + case Type::STK_CPointer: + case Type::STK_ObjCObjectPointer: + case Type::STK_BlockPointer: + case Type::STK_MemberPointer: + llvm_unreachable("HLSL doesn't support pointers."); + } + llvm_unreachable("Should have returned before this"); + + case Type::STK_MemberPointer: + case Type::STK_CPointer: + case Type::STK_BlockPointer: + case Type::STK_ObjCObjectPointer: + llvm_unreachable("HLSL doesn't support pointers."); + + case Type::STK_FixedPoint: + llvm_unreachable("HLSL doesn't support fixed point types."); + + case Type::STK_FloatingComplex: + case Type::STK_IntegralComplex: + llvm_unreachable("HLSL doesn't support complex types."); + } + + llvm_unreachable("Unhandled scalar cast"); +} + +// Detect if a type contains a bitfield. Will be removed when +// bitfield support is added to HLSLElementwiseCast +bool SemaHLSL::ContainsBitField(QualType BaseTy) { + llvm::SmallVector WorkList; + WorkList.push_back(BaseTy); + while (!WorkList.empty()) { + QualType T = WorkList.pop_back_val(); + T = T.getCanonicalType().getUnqualifiedType(); + // only check aggregate types + if (const auto *AT = dyn_cast(T)) { + WorkList.push_back(AT->getElementType()); + continue; + } + if (const auto *RT = dyn_cast(T)) { + const RecordDecl *RD = RT->getDecl(); + if (RD->isUnion()) + continue; + + const CXXRecordDecl *CXXD = dyn_cast(RD); + + if (CXXD && CXXD->isStandardLayout()) + RD = CXXD->getStandardLayoutBaseWithFields(); + + for (const auto *FD : RD->fields()) { + if (FD->isBitField()) + return true; + WorkList.push_back(FD->getType()); + } + continue; + } + } + return false; +} + +// Can we perform an HLSL Elementwise cast? +// TODO: update this code when matrices are added; see issue #88060 +bool SemaHLSL::CanPerformElementwiseCast(Expr *Src, QualType DestTy) { + + // Don't handle casts where LHS and RHS are any combination of scalar/vector + // There must be an aggregate somewhere + QualType SrcTy = Src->getType(); + if (SrcTy->isScalarType()) // always a splat and this cast doesn't handle that + return false; + + if (SrcTy->isVectorType() && + (DestTy->isScalarType() || DestTy->isVectorType())) + return false; + + if (ContainsBitField(DestTy) || ContainsBitField(SrcTy)) + return false; + + llvm::SmallVector DestTypes; + BuildFlattenedTypeList(DestTy, DestTypes); + llvm::SmallVector SrcTypes; + BuildFlattenedTypeList(SrcTy, SrcTypes); + + // Usually the size of SrcTypes must be greater than or equal to the size of + // DestTypes. + if (SrcTypes.size() < DestTypes.size()) + return false; + + unsigned SrcSize = SrcTypes.size(); + unsigned DstSize = DestTypes.size(); + unsigned I; + for (I = 0; I < DstSize && I < SrcSize; I++) { + if (SrcTypes[I]->isUnionType() || DestTypes[I]->isUnionType()) + return false; + if (!CanPerformScalarCast(SrcTypes[I], DestTypes[I])) { + return false; + } + } + + // check the rest of the source type for unions. + for (; I < SrcSize; I++) { + if (SrcTypes[I]->isUnionType()) + return false; + } + return true; +} + ExprResult SemaHLSL::ActOnOutParamExpr(ParmVarDecl *Param, Expr *Arg) { assert(Param->hasAttr() && "We should not get here without a parameter modifier expression"); diff --git a/clang/lib/StaticAnalyzer/Core/ExprEngineC.cpp b/clang/lib/StaticAnalyzer/Core/ExprEngineC.cpp index 7a900780384a9..3a983421358c7 100644 --- a/clang/lib/StaticAnalyzer/Core/ExprEngineC.cpp +++ b/clang/lib/StaticAnalyzer/Core/ExprEngineC.cpp @@ -522,6 +522,7 @@ void ExprEngine::VisitCast(const CastExpr *CastE, const Expr *Ex, case CK_ToUnion: case CK_MatrixCast: case CK_VectorSplat: + case CK_HLSLElementwiseCast: case CK_HLSLVectorTruncation: { QualType resultType = CastE->getType(); if (CastE->isGLValue()) diff --git a/clang/test/CodeGenHLSL/BasicFeatures/ArrayElementwiseCast.hlsl b/clang/test/CodeGenHLSL/BasicFeatures/ArrayElementwiseCast.hlsl new file mode 100644 index 0000000000000..18f82bff3b308 --- /dev/null +++ b/clang/test/CodeGenHLSL/BasicFeatures/ArrayElementwiseCast.hlsl @@ -0,0 +1,144 @@ +// RUN: %clang_cc1 -triple dxil-pc-shadermodel6.0-library -disable-llvm-passes -emit-llvm -finclude-default-header -o - %s | FileCheck %s + +// array truncation to a scalar +// CHECK-LABEL: define void {{.*}}call0 +// CHECK: [[A:%.*]] = alloca [2 x i32], align 4 +// CHECK-NEXT: [[B:%.*]] = alloca float, align 4 +// CHECK-NEXT: [[Tmp:%.*]] = alloca [2 x i32], align 4 +// CHECK-NEXT: call void @llvm.memcpy.p0.p0.i32(ptr align 4 [[A]], ptr align 4 {{.*}}, i32 8, i1 false) +// CHECK-NEXT: call void @llvm.memcpy.p0.p0.i32(ptr align 4 [[Tmp]], ptr align 4 [[A]], i32 8, i1 false) +// CHECK-NEXT: [[G1:%.*]] = getelementptr inbounds [2 x i32], ptr [[Tmp]], i32 0, i32 0 +// CHECK-NEXT: [[G2:%.*]] = getelementptr inbounds [2 x i32], ptr [[Tmp]], i32 0, i32 1 +// CHECK-NEXT: [[L:%.*]] = load i32, ptr [[G1]], align 4 +// CHECK-NEXT: store i32 [[L]], ptr [[B]], align 4 +export void call0() { + int A[2] = {0,1}; + float B = (float)A; +} + +// array truncation +// CHECK-LABEL: define void {{.*}}call1 +// CHECK: [[A:%.*]] = alloca [2 x i32], align 4 +// CHECK-NEXT: [[B:%.*]] = alloca [1 x i32], align 4 +// CHECK-NEXT: [[Tmp:%.*]] = alloca [2 x i32], align 4 +// CHECK-NEXT: call void @llvm.memcpy.p0.p0.i32(ptr align 4 [[A]], ptr align 4 {{.*}}, i32 8, i1 false) +// CHECK-NEXT: call void @llvm.memcpy.p0.p0.i32(ptr align 4 [[B]], ptr align 4 {{.*}}, i32 4, i1 false) +// CHECK-NEXT: call void @llvm.memcpy.p0.p0.i32(ptr align 4 [[Tmp]], ptr align 4 [[A]], i32 8, i1 false) +// CHECK-NEXT: [[G1:%.*]] = getelementptr inbounds [1 x i32], ptr [[B]], i32 0, i32 0 +// CHECK-NEXT: [[G2:%.*]] = getelementptr inbounds [2 x i32], ptr [[Tmp]], i32 0, i32 0 +// CHECK-NEXT: [[G3:%.*]] = getelementptr inbounds [2 x i32], ptr [[Tmp]], i32 0, i32 1 +// CHECK-NEXT: [[L:%.*]] = load i32, ptr [[G2]], align 4 +// CHECK-NEXT: store i32 [[L]], ptr [[G1]], align 4 +export void call1() { + int A[2] = {0,1}; + int B[1] = {4}; + B = (int[1])A; +} + +// just a cast +// CHECK-LABEL: define void {{.*}}call2 +// CHECK: [[A:%.*]] = alloca [1 x i32], align 4 +// CHECK-NEXT: [[B:%.*]] = alloca [1 x float], align 4 +// CHECK-NEXT: [[Tmp:%.*]] = alloca [1 x i32], align 4 +// CHECK-NEXT: call void @llvm.memset.p0.i32(ptr align 4 [[A]], i8 0, i32 4, i1 false) +// CHECK-NEXT: call void @llvm.memcpy.p0.p0.i32(ptr align 4 [[B]], ptr align 4 {{.*}}, i32 4, i1 false) +// CHECK-NEXT: call void @llvm.memcpy.p0.p0.i32(ptr align 4 [[Tmp]], ptr align 4 [[A]], i32 4, i1 false) +// CHECK-NEXT: [[G1:%.*]] = getelementptr inbounds [1 x float], ptr [[B]], i32 0, i32 0 +// CHECK-NEXT: [[G2:%.*]] = getelementptr inbounds [1 x i32], ptr [[Tmp]], i32 0, i32 0 +// CHECK-NEXT: [[L:%.*]] = load i32, ptr [[G2]], align 4 +// CHECK-NEXT: [[C:%.*]] = sitofp i32 [[L]] to float +// CHECK-NEXT: store float [[C]], ptr [[G1]], align 4 +export void call2() { + int A[1] = {0}; + float B[1] = {1.0}; + B = (float[1])A; +} + +// vector to array +// CHECK-LABEL: define void {{.*}}call3 +// CHECK: [[A:%.*]] = alloca <1 x float>, align 4 +// CHECK-NEXT: [[B:%.*]] = alloca [1 x i32], align 4 +// CHECK-NEXT: store <1 x float> splat (float 0x3FF3333340000000), ptr [[A]], align 4 +// CHECK-NEXT: call void @llvm.memcpy.p0.p0.i32(ptr align 4 [[B]], ptr align 4 {{.*}}, i32 4, i1 false) +// CHECK-NEXT: [[C:%.*]] = load <1 x float>, ptr [[A]], align 4 +// CHECK-NEXT: [[G1:%.*]] = getelementptr inbounds [1 x i32], ptr [[B]], i32 0, i32 0 +// CHECK-NEXT: [[V:%.*]] = extractelement <1 x float> [[C]], i64 0 +// CHECK-NEXT: [[C:%.*]] = fptosi float [[V]] to i32 +// CHECK-NEXT: store i32 [[C]], ptr [[G1]], align 4 +export void call3() { + float1 A = {1.2}; + int B[1] = {1}; + B = (int[1])A; +} + +// flatten array of vector to array with cast +// CHECK-LABEL: define void {{.*}}call5 +// CHECK: [[A:%.*]] = alloca [1 x <2 x float>], align 8 +// CHECK-NEXT: [[B:%.*]] = alloca [2 x i32], align 4 +// CHECK-NEXT: [[Tmp:%.*]] = alloca [1 x <2 x float>], align 8 +// CHECK-NEXT: call void @llvm.memcpy.p0.p0.i32(ptr align 8 [[A]], ptr align 8 {{.*}}, i32 8, i1 false) +// CHECK-NEXT: call void @llvm.memcpy.p0.p0.i32(ptr align 4 [[B]], ptr align 4 {{.*}}, i32 8, i1 false) +// CHECK-NEXT: call void @llvm.memcpy.p0.p0.i32(ptr align 8 [[Tmp]], ptr align 8 [[A]], i32 8, i1 false) +// CHECK-NEXT: [[G1:%.*]] = getelementptr inbounds [2 x i32], ptr [[B]], i32 0, i32 0 +// CHECK-NEXT: [[G2:%.*]] = getelementptr inbounds [2 x i32], ptr [[B]], i32 0, i32 1 +// CHECK-NEXT: [[VG:%.*]] = getelementptr inbounds [1 x <2 x float>], ptr [[Tmp]], i32 0, i32 0 +// CHECK-NEXT: [[L:%.*]] = load <2 x float>, ptr [[VG]], align 8 +// CHECK-NEXT: [[VL:%.*]] = extractelement <2 x float> [[L]], i32 0 +// CHECK-NEXT: [[C:%.*]] = fptosi float [[VL]] to i32 +// CHECK-NEXT: store i32 [[C]], ptr [[G1]], align 4 +// CHECK-NEXT: [[L4:%.*]] = load <2 x float>, ptr [[VG]], align 8 +// CHECK-NEXT: [[VL5:%.*]] = extractelement <2 x float> [[L4]], i32 1 +// CHECK-NEXT: [[C6:%.*]] = fptosi float [[VL5]] to i32 +// CHECK-NEXT: store i32 [[C6]], ptr [[G2]], align 4 +export void call5() { + float2 A[1] = {{1.2,3.4}}; + int B[2] = {1,2}; + B = (int[2])A; +} + +// flatten 2d array +// CHECK-LABEL: define void {{.*}}call6 +// CHECK: [[A:%.*]] = alloca [2 x [1 x i32]], align 4 +// CHECK-NEXT: [[B:%.*]] = alloca [2 x i32], align 4 +// CHECK-NEXT: [[Tmp:%.*]] = alloca [2 x [1 x i32]], align 4 +// CHECK-NEXT: call void @llvm.memcpy.p0.p0.i32(ptr align 4 [[A]], ptr align 4 {{.*}}, i32 8, i1 false) +// CHECK-NEXT: call void @llvm.memcpy.p0.p0.i32(ptr align 4 [[B]], ptr align 4 {{.*}}, i32 8, i1 false) +// CHECK-NEXT: call void @llvm.memcpy.p0.p0.i32(ptr align 4 [[Tmp]], ptr align 4 [[A]], i32 8, i1 false) +// CHECK-NEXT: [[G1:%.*]] = getelementptr inbounds [2 x i32], ptr [[B]], i32 0, i32 0 +// CHECK-NEXT: [[G2:%.*]] = getelementptr inbounds [2 x i32], ptr [[B]], i32 0, i32 1 +// CHECK-NEXT: [[G3:%.*]] = getelementptr inbounds [2 x [1 x i32]], ptr [[Tmp]], i32 0, i32 0, i32 0 +// CHECK-NEXT: [[G4:%.*]] = getelementptr inbounds [2 x [1 x i32]], ptr [[Tmp]], i32 0, i32 1, i32 0 +// CHECK-NEXT: [[L:%.*]] = load i32, ptr [[G3]], align 4 +// CHECK-NEXT: store i32 [[L]], ptr [[G1]], align 4 +// CHECK-NEXT: [[L4:%.*]] = load i32, ptr [[G4]], align 4 +// CHECK-NEXT: store i32 [[L4]], ptr [[G2]], align 4 +export void call6() { + int A[2][1] = {{1},{3}}; + int B[2] = {1,2}; + B = (int[2])A; +} + +struct S { + int X; + float Y; +}; + +// flatten and truncate from a struct +// CHECK-LABEL: define void {{.*}}call7 +// CHECK: [[s:%.*]] = alloca %struct.S, align 4 +// CHECK-NEXT: [[A:%.*]] = alloca [1 x i32], align 4 +// CHECK-NEXT: [[Tmp:%.*]] = alloca %struct.S, align 4 +// CHECK-NEXT: call void @llvm.memcpy.p0.p0.i32(ptr align 4 [[s]], ptr align 4 {{.*}}, i32 8, i1 false) +// CHECK-NEXT: call void @llvm.memcpy.p0.p0.i32(ptr align 4 [[A]], ptr align 4 {{.*}}, i32 4, i1 false) +// CHECK-NEXT: call void @llvm.memcpy.p0.p0.i32(ptr align 4 [[Tmp]], ptr align 4 [[s]], i32 8, i1 false) +// CHECK-NEXT: [[G1:%.*]] = getelementptr inbounds [1 x i32], ptr [[A]], i32 0, i32 0 +// CHECK-NEXT: [[G2:%.*]] = getelementptr inbounds %struct.S, ptr [[Tmp]], i32 0, i32 0 +// CHECK-NEXT: [[G3:%.*]] = getelementptr inbounds %struct.S, ptr [[Tmp]], i32 0, i32 1 +// CHECK-NEXT: [[L:%.*]] = load i32, ptr [[G2]], align 4 +// CHECK-NEXT: store i32 [[L]], ptr [[G1]], align 4 +export void call7() { + S s = {1, 2.9}; + int A[1] = {1}; + A = (int[1])s; +} + diff --git a/clang/test/CodeGenHLSL/BasicFeatures/StructElementwiseCast.hlsl b/clang/test/CodeGenHLSL/BasicFeatures/StructElementwiseCast.hlsl new file mode 100644 index 0000000000000..26fde37c901dd --- /dev/null +++ b/clang/test/CodeGenHLSL/BasicFeatures/StructElementwiseCast.hlsl @@ -0,0 +1,140 @@ +// RUN: %clang_cc1 -finclude-default-header -triple dxil-pc-shadermodel6.3-library -x hlsl -emit-llvm -disable-llvm-passes -o - %s | FileCheck %s + +struct S { + int X; + float Y; +}; + +// struct truncation to a scalar +// CHECK-LABEL: define void {{.*}}call0 +// CHECK: [[s:%.*]] = alloca %struct.S, align 4 +// CHECK-NEXT: [[A:%.*]] = alloca i32, align 4 +// CHECK-NEXT: [[Tmp:%.*]] = alloca %struct.S, align 4 +// CHECK-NEXT: call void @llvm.memcpy.p0.p0.i32(ptr align 4 [[s]], ptr align 4 {{.*}}, i32 8, i1 false) +// CHECK-NEXT: call void @llvm.memcpy.p0.p0.i32(ptr align 4 [[Tmp]], ptr align 4 [[s]], i32 8, i1 false) +// CHECK-NEXT: [[G1:%.*]] = getelementptr inbounds %struct.S, ptr [[Tmp]], i32 0, i32 0 +// CHECK-NEXT: [[G2:%.*]] = getelementptr inbounds %struct.S, ptr [[Tmp]], i32 0, i32 1 +// CHECK-NEXT: [[L:%.*]] = load i32, ptr [[G1]], align 4 +// CHECK-NEXT: store i32 [[L]], ptr [[A]], align 4 +export void call0() { + S s = {1,2}; + int A = (int)s; +} + +// struct from vector +// CHECK-LABEL: define void {{.*}}call1 +// CHECK: [[A:%.*]] = alloca <2 x i32>, align 8 +// CHECK-NEXT: [[s:%.*]] = alloca %struct.S, align 4 +// CHECK-NEXT: store <2 x i32> , ptr [[A]], align 8 +// CHECK-NEXT: [[L:%.*]] = load <2 x i32>, ptr [[A]], align 8 +// CHECK-NEXT: [[G1:%.*]] = getelementptr inbounds %struct.S, ptr [[s]], i32 0, i32 0 +// CHECK-NEXT: [[G2:%.*]] = getelementptr inbounds %struct.S, ptr [[s]], i32 0, i32 1 +// CHECK-NEXT: [[VL:%.*]] = extractelement <2 x i32> [[L]], i64 0 +// CHECK-NEXT: store i32 [[VL]], ptr [[G1]], align 4 +// CHECK-NEXT: [[VL2:%.*]] = extractelement <2 x i32> [[L]], i64 1 +// CHECK-NEXT: [[C:%.*]] = sitofp i32 [[VL2]] to float +// CHECK-NEXT: store float [[C]], ptr [[G2]], align 4 +export void call1() { + int2 A = {1,2}; + S s = (S)A; +} + + +// struct from array +// CHECK-LABEL: define void {{.*}}call2 +// CHECK: [[A:%.*]] = alloca [2 x i32], align 4 +// CHECK-NEXT: [[s:%.*]] = alloca %struct.S, align 4 +// CHECK-NEXT: [[Tmp:%.*]] = alloca [2 x i32], align 4 +// CHECK-NEXT: call void @llvm.memcpy.p0.p0.i32(ptr align 4 [[A]], ptr align 4 {{.*}}, i32 8, i1 false) +// CHECK-NEXT: call void @llvm.memcpy.p0.p0.i32(ptr align 4 [[Tmp]], ptr align 4 [[A]], i32 8, i1 false) +// CHECK-NEXT: [[G1:%.*]] = getelementptr inbounds %struct.S, ptr [[s]], i32 0, i32 0 +// CHECK-NEXT: [[G2:%.*]] = getelementptr inbounds %struct.S, ptr [[s]], i32 0, i32 1 +// CHECK-NEXT: [[G3:%.*]] = getelementptr inbounds [2 x i32], ptr [[Tmp]], i32 0, i32 0 +// CHECK-NEXT: [[G4:%.*]] = getelementptr inbounds [2 x i32], ptr [[Tmp]], i32 0, i32 1 +// CHECK-NEXT: [[L:%.*]] = load i32, ptr [[G3]], align 4 +// CHECK-NEXT: store i32 [[L]], ptr [[G1]], align 4 +// CHECK-NEXT: [[L4:%.*]] = load i32, ptr [[G4]], align 4 +// CHECK-NEXT: [[C:%.*]] = sitofp i32 [[L4]] to float +// CHECK-NEXT: store float [[C]], ptr [[G2]], align 4 +export void call2() { + int A[2] = {1,2}; + S s = (S)A; +} + +struct Q { + int Z; +}; + +struct R { + Q q; + float F; +}; + +// struct from nested struct? +// CHECK-LABEL: define void {{.*}}call6 +// CHECK: [[r:%.*]] = alloca %struct.R, align 4 +// CHECK-NEXT: [[s:%.*]] = alloca %struct.S, align 4 +// CHECK-NEXT: [[Tmp:%.*]] = alloca %struct.R, align 4 +// CHECK-NEXT: call void @llvm.memcpy.p0.p0.i32(ptr align 4 [[r]], ptr align 4 {{.*}}, i32 8, i1 false) +// CHECK-NEXT: call void @llvm.memcpy.p0.p0.i32(ptr align 4 [[Tmp]], ptr align 4 [[r]], i32 8, i1 false) +// CHECK-NEXT: [[G1:%.*]] = getelementptr inbounds %struct.S, ptr [[s]], i32 0, i32 0 +// CHECK-NEXT: [[G2:%.*]] = getelementptr inbounds %struct.S, ptr [[s]], i32 0, i32 1 +// CHECK-NEXT: [[G3:%.*]] = getelementptr inbounds %struct.R, ptr [[Tmp]], i32 0, i32 0 +// CHECK-NEXT: [[G4:%.*]] = getelementptr inbounds %struct.R, ptr [[Tmp]], i32 0, i32 1 +// CHECK-NEXT: [[L:%.*]] = load i32, ptr [[G3]], align 4 +// CHECK-NEXT: store i32 [[L]], ptr [[G1]], align 4 +// CHECK-NEXT: [[L4:%.*]] = load float, ptr [[G4]], align 4 +// CHECK-NEXT: store float [[L4]], ptr [[G2]], align 4 +export void call6() { + R r = {{1}, 2.0}; + S s = (S)r; +} + +// nested struct from array? +// CHECK-LABEL: define void {{.*}}call7 +// CHECK: [[A:%.*]] = alloca [2 x i32], align 4 +// CHECK-NEXT: [[r:%.*]] = alloca %struct.R, align 4 +// CHECK-NEXT: [[Tmp:%.*]] = alloca [2 x i32], align 4 +// CHECK-NEXT: call void @llvm.memcpy.p0.p0.i32(ptr align 4 [[A]], ptr align 4 {{.*}}, i32 8, i1 false) +// CHECK-NEXT: call void @llvm.memcpy.p0.p0.i32(ptr align 4 [[Tmp]], ptr align 4 [[A]], i32 8, i1 false) +// CHECK-NEXT: [[G1:%.*]] = getelementptr inbounds %struct.R, ptr [[r]], i32 0, i32 0 +// CHECK-NEXT: [[G2:%.*]] = getelementptr inbounds %struct.R, ptr [[r]], i32 0, i32 1 +// CHECK-NEXT: [[G3:%.*]] = getelementptr inbounds [2 x i32], ptr [[Tmp]], i32 0, i32 0 +// CHECK-NEXT: [[G4:%.*]] = getelementptr inbounds [2 x i32], ptr [[Tmp]], i32 0, i32 1 +// CHECK-NEXT: [[L:%.*]] = load i32, ptr [[G3]], align 4 +// CHECK-NEXT: store i32 [[L]], ptr [[G1]], align 4 +// CHECK-NEXT: [[L4:%.*]] = load i32, ptr [[G4]], align 4 +// CHECK-NEXT: [[C:%.*]] = sitofp i32 [[L4]] to float +// CHECK-NEXT: store float [[C]], ptr [[G2]], align 4 +export void call7() { + int A[2] = {1,2}; + R r = (R)A; +} + +struct T { + int A; + int B; + int C; +}; + +// struct truncation +// CHECK-LABEL: define void {{.*}}call8 +// CHECK: [[t:%.*]] = alloca %struct.T, align 4 +// CHECK-NEXT: [[s:%.*]] = alloca %struct.S, align 4 +// CHECK-NEXT: [[Tmp:%.*]] = alloca %struct.T, align 4 +// CHECK-NEXT: call void @llvm.memcpy.p0.p0.i32(ptr align 4 [[t]], ptr align 4 {{.*}}, i32 12, i1 false) +// CHECK-NEXT: call void @llvm.memcpy.p0.p0.i32(ptr align 4 [[Tmp]], ptr align 4 [[t]], i32 12, i1 false) +// CHECK-NEXT: [[G1:%.*]] = getelementptr inbounds %struct.S, ptr [[s]], i32 0, i32 0 +// CHECK-NEXT: [[G2:%.*]] = getelementptr inbounds %struct.S, ptr [[s]], i32 0, i32 1 +// CHECK-NEXT: [[G3:%.*]] = getelementptr inbounds %struct.T, ptr [[Tmp]], i32 0, i32 0 +// CHECK-NEXT: %gep3 = getelementptr inbounds %struct.T, ptr %agg-temp, i32 0, i32 1 +// CHECK-NEXT: %gep4 = getelementptr inbounds %struct.T, ptr %agg-temp, i32 0, i32 2 +// CHECK-NEXT: %load = load i32, ptr %gep2, align 4 +// CHECK-NEXT: store i32 %load, ptr %gep, align 4 +// CHECK-NEXT: %load5 = load i32, ptr %gep3, align 4 +// CHECK-NEXT: %conv = sitofp i32 %load5 to float +// CHECK-NEXT: store float %conv, ptr %gep1, align 4 +export void call8() { + T t = {1,2,3}; + S s = (S)t; +} diff --git a/clang/test/CodeGenHLSL/BasicFeatures/VectorElementwiseCast.hlsl b/clang/test/CodeGenHLSL/BasicFeatures/VectorElementwiseCast.hlsl new file mode 100644 index 0000000000000..f579dfb377de5 --- /dev/null +++ b/clang/test/CodeGenHLSL/BasicFeatures/VectorElementwiseCast.hlsl @@ -0,0 +1,81 @@ +// RUN: %clang_cc1 -finclude-default-header -triple dxil-pc-shadermodel6.3-library -x hlsl -emit-llvm -disable-llvm-passes -o - %s | FileCheck %s + +// vector flat cast from array +// CHECK-LABEL: define void {{.*}}call2 +// CHECK: [[A:%.*]] = alloca [2 x [1 x i32]], align 4 +// CHECK-NEXT: [[B:%.*]] = alloca <2 x i32>, align 8 +// CHECK-NEXT: [[Tmp:%.*]] = alloca [2 x [1 x i32]], align 4 +// CHECK-NEXT: [[Tmp2:%.*]] = alloca <2 x i32>, align 8 +// CHECK-NEXT: call void @llvm.memcpy.p0.p0.i32(ptr align 4 [[A]], ptr align 4 {{.*}}, i32 8, i1 false) +// CHECK-NEXT: call void @llvm.memcpy.p0.p0.i32(ptr align 4 [[Tmp]], ptr align 4 [[A]], i32 8, i1 false) +// CHECK-NEXT: [[G1:%.*]] = getelementptr inbounds [2 x [1 x i32]], ptr [[Tmp]], i32 0, i32 0, i32 0 +// CHECK-NEXT: [[G2:%.*]] = getelementptr inbounds [2 x [1 x i32]], ptr [[Tmp]], i32 0, i32 1, i32 0 +// CHECK-NEXT: [[C:%.*]] = load <2 x i32>, ptr [[Tmp2]], align 8 +// CHECK-NEXT: [[L:%.*]] = load i32, ptr [[G1]], align 4 +// CHECK-NEXT: [[D:%.*]] = insertelement <2 x i32> [[C]], i32 [[L]], i64 0 +// CHECK-NEXT: [[L2:%.*]] = load i32, ptr [[G2]], align 4 +// CHECK-NEXT: [[E:%.*]] = insertelement <2 x i32> [[D]], i32 [[L2]], i64 1 +// CHECK-NEXT: store <2 x i32> [[E]], ptr [[B]], align 8 +export void call2() { + int A[2][1] = {{1},{2}}; + int2 B = (int2)A; +} + +struct S { + int X; + float Y; +}; + +// vector flat cast from struct +// CHECK-LABEL: define void {{.*}}call3 +// CHECK: [[s:%.*]] = alloca %struct.S, align 4 +// CHECK-NEXT: [[A:%.*]] = alloca <2 x i32>, align 8 +// CHECK-NEXT: [[Tmp:%.*]] = alloca %struct.S, align 4 +// CHECK-NEXT: [[Tmp2:%.*]] = alloca <2 x i32>, align 8 +// CHECK-NEXT: call void @llvm.memcpy.p0.p0.i32(ptr align 4 [[s]], ptr align 4 {{.*}}, i32 8, i1 false) +// CHECK-NEXT: call void @llvm.memcpy.p0.p0.i32(ptr align 4 [[Tmp]], ptr align 4 [[s]], i32 8, i1 false) +// CHECK-NEXT: [[G1:%.*]] = getelementptr inbounds %struct.S, ptr [[Tmp]], i32 0, i32 0 +// CHECK-NEXT: [[G2:%.*]] = getelementptr inbounds %struct.S, ptr [[Tmp]], i32 0, i32 1 +// CHECK-NEXT: [[B:%.*]] = load <2 x i32>, ptr [[Tmp2]], align 8 +// CHECK-NEXT: [[L:%.*]] = load i32, ptr [[G1]], align 4 +// CHECK-NEXT: [[C:%.*]] = insertelement <2 x i32> [[B]], i32 [[L]], i64 0 +// CHECK-NEXT: [[L2:%.*]] = load float, ptr [[G2]], align 4 +// CHECK-NEXT: [[D:%.*]] = fptosi float [[L2]] to i32 +// CHECK-NEXT: [[E:%.*]] = insertelement <2 x i32> [[C]], i32 [[D]], i64 1 +// CHECK-NEXT: store <2 x i32> [[E]], ptr [[A]], align 8 +export void call3() { + S s = {1, 2.0}; + int2 A = (int2)s; +} + +// truncate array to scalar +// CHECK-LABEL: define void {{.*}}call4 +// CHECK: [[A:%.*]] = alloca [2 x i32], align 4 +// CHECK-NEXT: [[B:%.*]] = alloca i32, align 4 +// CHECK-NEXT: [[Tmp:%.*]] = alloca [2 x i32], align 4 +// CHECK-NEXT: call void @llvm.memcpy.p0.p0.i32(ptr align 4 [[A]], ptr align 4 {{.*}}, i32 8, i1 false) +// CHECK-NEXT: call void @llvm.memcpy.p0.p0.i32(ptr align 4 [[Tmp]], ptr align 4 [[A]], i32 8, i1 false) +// CHECK-NEXT: [[G1:%.*]] = getelementptr inbounds [2 x i32], ptr [[Tmp]], i32 0, i32 0 +// CHECK-NEXT: [[G2:%.*]] = getelementptr inbounds [2 x i32], ptr [[Tmp]], i32 0, i32 1 +// CHECK-NEXT: [[L:%.*]] = load i32, ptr [[G1]], align 4 +// CHECK-NEXT: store i32 [[L]], ptr [[B]], align 4 +export void call4() { + int A[2] = {1,2}; + int B = (int)A; +} + +// truncate struct to scalar +// CHECK-LABEL: define void {{.*}}call5 +// CHECK: [[s:%.*]] = alloca %struct.S, align 4 +// CHECK-NEXT: [[A:%.*]] = alloca i32, align 4 +// CHECK-NEXT: [[Tmp:%.*]] = alloca %struct.S, align 4 +// CHECK-NEXT: call void @llvm.memcpy.p0.p0.i32(ptr align 4 [[s]], ptr align 4 {{.*}}, i32 8, i1 false) +// CHECK-NEXT: call void @llvm.memcpy.p0.p0.i32(ptr align 4 [[Tmp]], ptr align 4 [[s]], i32 8, i1 false) +// CHECK-NEXT: [[G1:%.*]] = getelementptr inbounds %struct.S, ptr [[Tmp]], i32 0, i32 0 +// CHECK-NEXT: [[G2:%.*]] = getelementptr inbounds %struct.S, ptr [[Tmp]], i32 0, i32 1 +// CHECK-NEXT: [[L:%.*]] = load i32, ptr [[G1]], align 4 +// CHECK-NEXT: store i32 [[L]], ptr [[A]], align 4 +export void call5() { + S s = {1, 2.0}; + int A = (int)s; +} diff --git a/clang/test/CodeGenHLSL/resource-bindings.hlsl b/clang/test/CodeGenHLSL/resource-bindings.hlsl index bfa7896bd9811..57e8cc29572b1 100644 --- a/clang/test/CodeGenHLSL/resource-bindings.hlsl +++ b/clang/test/CodeGenHLSL/resource-bindings.hlsl @@ -2,14 +2,17 @@ // CHECK: define internal void @_init_resource_U0S0() // CHECK: %U0S0_h = call target("dx.TypedBuffer", <4 x float>, 1, 0, 0) @llvm.dx.resource.handlefrombinding.tdx.TypedBuffer_v4f32_1_0_0t(i32 0, i32 0, i32 1, i32 0, i1 false) +// CHECK: store target("dx.TypedBuffer", <4 x float>, 1, 0, 0) %U0S0_h, ptr @U0S0, align 4 RWBuffer U0S0 : register(u0); // CHECK: define internal void @_init_resource_U5S3() // CHECK: %U5S3_h = call target("dx.TypedBuffer", float, 1, 0, 0) @llvm.dx.resource.handlefrombinding.tdx.TypedBuffer_f32_1_0_0t(i32 3, i32 5, i32 1, i32 0, i1 false) +// CHECK: store target("dx.TypedBuffer", float, 1, 0, 0) %U5S3_h, ptr @U5S3, align 4 RWBuffer U5S3 : register(u5, space3); // CHECK: define internal void @_init_resource_T2S2() // CHECK: %T2S2_h = call target("dx.RawBuffer", i32, 0, 0) @llvm.dx.resource.handlefrombinding.tdx.RawBuffer_i32_0_0t(i32 2, i32 2, i32 1, i32 0, i1 false) +// CHECK: store target("dx.RawBuffer", i32, 0, 0) %T2S2_h, ptr @T2S2, align 4 StructuredBuffer T2S2 : register(t2, space2); struct S { float4 f; @@ -18,6 +21,7 @@ struct S { // CHECK: define internal void @_init_resource_T3S0() // CHECK: %T3S0_h = call target("dx.RawBuffer", %struct.S, 0, 0) @llvm.dx.resource.handlefrombinding.tdx.RawBuffer_s_struct.Ss_0_0t(i32 0, i32 3, i32 1, i32 0, i1 false) +// CHECK: store target("dx.RawBuffer", %struct.S, 0, 0) %T3S0_h, ptr @T3S0, align 4 StructuredBuffer T3S0 : register(t3); // CHECK: define void @main() diff --git a/clang/test/Driver/offload-Xarch.c b/clang/test/Driver/offload-Xarch.c index 17db891b06834..18c68f2acc884 100644 --- a/clang/test/Driver/offload-Xarch.c +++ b/clang/test/Driver/offload-Xarch.c @@ -14,7 +14,7 @@ // RUN: --target=x86_64-unknown-linux-gnu -Xopenmp-target=nvptx64-nvidia-cuda --offload-arch=sm_52,sm_60 -nogpuinc \ // RUN: -Xopenmp-target=amdgcn-amd-amdhsa --offload-arch=gfx90a,gfx1030 -ccc-print-bindings -### %s 2>&1 \ // RUN: | FileCheck -check-prefix=OPENMP %s -// + // OPENMP: # "x86_64-unknown-linux-gnu" - "clang", inputs: ["[[INPUT:.+]]"], output: "[[HOST_BC:.+]]" // OPENMP: # "amdgcn-amd-amdhsa" - "clang", inputs: ["[[INPUT]]", "[[HOST_BC]]"], output: "[[GFX1030_BC:.+]]" // OPENMP: # "amdgcn-amd-amdhsa" - "clang", inputs: ["[[INPUT]]", "[[HOST_BC]]"], output: "[[GFX90A_BC:.+]]" @@ -32,3 +32,12 @@ // CUDA: "-cc1" "-triple" "nvptx64-nvidia-cuda" {{.*}}"-target-cpu" "sm_52" {{.*}}"-O3" // CUDA: "-cc1" "-triple" "nvptx64-nvidia-cuda" {{.*}}"-target-cpu" "sm_60" {{.*}}"-O0" // CUDA: "-cc1" "-triple" "x86_64-unknown-linux-gnu" {{.*}}"-O3" + +// Make sure that `-Xarch_amdgcn` forwards libraries to the device linker. +// RUN: %clang -fopenmp=libomp --offload-arch=gfx90a -nogpulib -nogpuinc \ +// RUN: -Xarch_amdgcn -Wl,-lfoo -### %s 2>&1 \ +// RUN: | FileCheck -check-prefix=LIBS %s +// RUN: %clang -fopenmp=libomp --offload-arch=gfx90a -nogpulib -nogpuinc \ +// RUN: -Xoffload-linker-amdgcn-amd-amdhsa -lfoo -### %s 2>&1 \ +// RUN: | FileCheck -check-prefix=LIBS %s +// LIBS: "--device-linker=amdgcn-amd-amdhsa=-lfoo" diff --git a/clang/test/SemaCXX/unique_object_duplication.cpp b/clang/test/SemaCXX/unique_object_duplication.cpp index 8a19fb7b81187..4b41bfbfdc2f7 100644 --- a/clang/test/SemaCXX/unique_object_duplication.cpp +++ b/clang/test/SemaCXX/unique_object_duplication.cpp @@ -1,7 +1,7 @@ // RUN: %clang_cc1 -fsyntax-only -verify=hidden -Wunique-object-duplication -fvisibility=hidden -Wno-unused-value %s // RUN: %clang_cc1 -fsyntax-only -verify -Wunique-object-duplication -Wno-unused-value %s -// The check is currently disabled on windows. The test should fail because we're not getting the expected warnings. -// XFAIL: target={{.*}}-windows{{.*}}, {{.*}}-ps{{(4|5)(-.+)?}} +// The check is currently disabled on windows in MSVC-like environments. The test should fail because we're not getting the expected warnings. +// XFAIL: target={{.*}}-windows-msvc, {{.*}}-ps{{(4|5)(-.+)?}} #include "unique_object_duplication.h" diff --git a/clang/test/SemaHLSL/BuiltIns/vector-constructors-erros.hlsl b/clang/test/SemaHLSL/BuiltIns/vector-constructors-erros.hlsl index 7f6bdc7e67836..b004acdc7c502 100644 --- a/clang/test/SemaHLSL/BuiltIns/vector-constructors-erros.hlsl +++ b/clang/test/SemaHLSL/BuiltIns/vector-constructors-erros.hlsl @@ -17,6 +17,4 @@ void entry() { // These _should_ work in HLSL but aren't yet supported. S s; float2 GettingStrange = float2(s, s); // expected-error{{no viable conversion from 'S' to 'float'}} expected-error{{no viable conversion from 'S' to 'float'}} - S2 s2; - float2 EvenStranger = float2(s2); // expected-error{{cannot convert 'S2' to 'float2' (vector of 2 'float' values) without a conversion operator}} } diff --git a/clang/test/SemaHLSL/Language/ElementwiseCast-errors.hlsl b/clang/test/SemaHLSL/Language/ElementwiseCast-errors.hlsl new file mode 100644 index 0000000000000..c900c83a063a0 --- /dev/null +++ b/clang/test/SemaHLSL/Language/ElementwiseCast-errors.hlsl @@ -0,0 +1,29 @@ +// RUN: %clang_cc1 -finclude-default-header -triple dxil-pc-shadermodel6.6-library %s -verify + +export void cantCast() { + int A[3] = {1,2,3}; + int B[4] = {1,2,3,4}; + B = (int[4])A; + // expected-error@-1 {{C-style cast from 'int *' to 'int[4]' is not allowed}} +} + +struct S { +// expected-note@-1 {{candidate constructor (the implicit copy constructor) not viable: no known conversion from 'int2' (aka 'vector') to 'const S' for 1st argument}} +// expected-note@-2 {{candidate constructor (the implicit move constructor) not viable: no known conversion from 'int2' (aka 'vector') to 'S' for 1st argument}} +// expected-note@-3 {{candidate constructor (the implicit default constructor) not viable: requires 0 arguments, but 1 was provided}} + int A : 8; + int B; +}; + +// casting types which contain bitfields is not yet supported. +export void cantCast2() { + S s = {1,2}; + int2 C = (int2)s; + // expected-error@-1 {{cannot convert 'S' to 'int2' (aka 'vector') without a conversion operator}} +} + +export void cantCast3() { + int2 C = {1,2}; + S s = (S)C; + // expected-error@-1 {{no matching conversion for C-style cast from 'int2' (aka 'vector') to 'S'}} +} diff --git a/clang/test/SemaHLSL/Language/ElementwiseCasts.hlsl b/clang/test/SemaHLSL/Language/ElementwiseCasts.hlsl new file mode 100644 index 0000000000000..563d3f02a1485 --- /dev/null +++ b/clang/test/SemaHLSL/Language/ElementwiseCasts.hlsl @@ -0,0 +1,23 @@ +// RUN: %clang_cc1 -triple dxil-pc-shadermodel6.3-library -finclude-default-header -fnative-half-type %s -ast-dump | FileCheck %s + +// truncation +// CHECK-LABEL: call1 +// CHECK: CStyleCastExpr {{.*}} 'int[1]' +// CHECK-NEXT: ImplicitCastExpr {{.*}} 'int[2]' part_of_explicit_cast +// CHECK-NEXT: DeclRefExpr {{.*}} 'int[2]' lvalue Var {{.*}} 'A' 'int[2]' +export void call1() { + int A[2] = {0,1}; + int B[1] = {4}; + B = (int[1])A; +} + +// flat cast of equal size +// CHECK-LABEL: call2 +// CHECK: CStyleCastExpr {{.*}} 'float[1]' +// CHECK-NEXT: ImplicitCastExpr {{.*}} 'int[1]' part_of_explicit_cast +// CHECK-NEXT: DeclRefExpr {{.*}} 'int[1]' lvalue Var {{.*}} 'A' 'int[1]' +export void call2() { + int A[1] = {0}; + float B[1] = {1.0}; + B = (float[1])A; +} diff --git a/clang/test/Tooling/clang-linker-wrapper-spirv-elf.cpp b/clang/test/Tooling/clang-linker-wrapper-spirv-elf.cpp index 50457f47868a0..4f8658064e857 100644 --- a/clang/test/Tooling/clang-linker-wrapper-spirv-elf.cpp +++ b/clang/test/Tooling/clang-linker-wrapper-spirv-elf.cpp @@ -1,6 +1,4 @@ // Verify the ELF packaging of OpenMP SPIR-V device images. -// FIXME: Re-enable when spirv-tools feature detection fixed -// UNSUPPORTED: system-linux // REQUIRES: system-linux // REQUIRES: spirv-tools // RUN: mkdir -p %t_tmp diff --git a/clang/test/lit.site.cfg.py.in b/clang/test/lit.site.cfg.py.in index ce10e9128a1df..6890da5327cb9 100644 --- a/clang/test/lit.site.cfg.py.in +++ b/clang/test/lit.site.cfg.py.in @@ -43,7 +43,7 @@ config.llvm_external_lit = path(r"@LLVM_EXTERNAL_LIT@") config.standalone_build = @CLANG_BUILT_STANDALONE@ config.ppc_linux_default_ieeelongdouble = @PPC_LINUX_DEFAULT_IEEELONGDOUBLE@ config.have_llvm_driver = @LLVM_TOOL_LLVM_DRIVER_BUILD@ -config.spirv_tools_tests = "@LLVM_INCLUDE_SPIRV_TOOLS_TESTS@" +config.spirv_tools_tests = @LLVM_INCLUDE_SPIRV_TOOLS_TESTS@ config.substitutions.append(("%llvm-version-major", "@LLVM_VERSION_MAJOR@")) import lit.llvm diff --git a/clang/tools/clang-format/clang-format.el b/clang/tools/clang-format/clang-format.el index 54ab2e1fd4c61..b356e1bfa1436 100644 --- a/clang/tools/clang-format/clang-format.el +++ b/clang/tools/clang-format/clang-format.el @@ -32,6 +32,7 @@ (require 'cl-lib) (require 'xml) +(require 'vc-git) (defgroup clang-format nil "Format code using clang-format." diff --git a/clang/utils/TableGen/MveEmitter.cpp b/clang/utils/TableGen/MveEmitter.cpp index e77679876a3af..014b20667e03e 100644 --- a/clang/utils/TableGen/MveEmitter.cpp +++ b/clang/utils/TableGen/MveEmitter.cpp @@ -209,9 +209,7 @@ class PointerType : public Type { Name = "const " + Name; return Name + " *"; } - std::string llvmName() const override { - return "llvm::PointerType::getUnqual(" + Pointee->llvmName() + ")"; - } + std::string llvmName() const override { return "Builder.getPtrTy()"; } const Type *getPointeeType() const { return Pointee; } static bool classof(const Type *T) { diff --git a/flang/include/flang/Evaluate/target.h b/flang/include/flang/Evaluate/target.h index f4595dfe4e433..ead4481c32e12 100644 --- a/flang/include/flang/Evaluate/target.h +++ b/flang/include/flang/Evaluate/target.h @@ -143,9 +143,10 @@ class TargetCharacteristics { std::string compilerOptionsString_; std::string compilerVersionString_; IeeeFeatures ieeeFeatures_{IeeeFeature::Denormal, IeeeFeature::Divide, - IeeeFeature::Flags, IeeeFeature::Inf, IeeeFeature::Io, IeeeFeature::NaN, - IeeeFeature::Rounding, IeeeFeature::Sqrt, IeeeFeature::Standard, - IeeeFeature::Subnormal, IeeeFeature::UnderflowControl}; + IeeeFeature::Flags, IeeeFeature::Halting, IeeeFeature::Inf, + IeeeFeature::Io, IeeeFeature::NaN, IeeeFeature::Rounding, + IeeeFeature::Sqrt, IeeeFeature::Standard, IeeeFeature::Subnormal, + IeeeFeature::UnderflowControl}; }; } // namespace Fortran::evaluate diff --git a/flang/include/flang/Tools/TargetSetup.h b/flang/include/flang/Tools/TargetSetup.h index 5d23df6823a94..d167f44fe2fd7 100644 --- a/flang/include/flang/Tools/TargetSetup.h +++ b/flang/include/flang/Tools/TargetSetup.h @@ -25,8 +25,6 @@ namespace Fortran::tools { const llvm::Triple &targetTriple{targetMachine.getTargetTriple()}; - targetCharacteristics.set_ieeeFeature(evaluate::IeeeFeature::Halting, true); - if (targetTriple.getArch() == llvm::Triple::ArchType::x86_64) { targetCharacteristics.set_hasSubnormalFlushingControl(/*kind=*/3); targetCharacteristics.set_hasSubnormalFlushingControl(/*kind=*/4); @@ -37,6 +35,8 @@ namespace Fortran::tools { targetCharacteristics.set_haltingSupportIsUnknownAtCompileTime(); targetCharacteristics.set_ieeeFeature( evaluate::IeeeFeature::Halting, false); + targetCharacteristics.set_ieeeFeature( + evaluate::IeeeFeature::Standard, false); targetCharacteristics.set_hasSubnormalFlushingControl(/*kind=*/3); targetCharacteristics.set_hasSubnormalFlushingControl(/*kind=*/4); targetCharacteristics.set_hasSubnormalFlushingControl(/*kind=*/8); diff --git a/flang/test/Evaluate/fold-ieee.f90 b/flang/test/Evaluate/fold-ieee.f90 index 99f8526fd23db..a393fcc6b4297 100644 --- a/flang/test/Evaluate/fold-ieee.f90 +++ b/flang/test/Evaluate/fold-ieee.f90 @@ -54,9 +54,6 @@ module m logical, parameter :: test_sq_all = ieee_support_sqrt() logical, parameter :: test_sq_4 = ieee_support_sqrt(1.) logical, parameter :: test_sq_8 = ieee_support_sqrt(1.d0) - logical, parameter :: test_std_all = ieee_support_standard() - logical, parameter :: test_std_4 = ieee_support_standard(1.) - logical, parameter :: test_std_8 = ieee_support_standard(1.d0) logical, parameter :: test_sn_all = ieee_support_subnormal() logical, parameter :: test_sn_4 = ieee_support_subnormal(1.) logical, parameter :: test_sn_8 = ieee_support_subnormal(1.d0) @@ -64,5 +61,8 @@ module m logical, parameter :: test_uc_all = .not. ieee_support_underflow_control() logical, parameter :: test_uc_4 = ieee_support_underflow_control(1.) logical, parameter :: test_uc_8 = ieee_support_underflow_control(1.d0) + logical, parameter :: test_std_all = ieee_support_standard() + logical, parameter :: test_std_4 = ieee_support_standard(1.) + logical, parameter :: test_std_8 = ieee_support_standard(1.d0) #endif end diff --git a/flang/test/Evaluate/folding18.f90 b/flang/test/Evaluate/folding18.f90 index 52aeb6a3532d0..a27eeabefae55 100644 --- a/flang/test/Evaluate/folding18.f90 +++ b/flang/test/Evaluate/folding18.f90 @@ -51,6 +51,7 @@ module m .and. ieee_support_sqrt(1.0_8) & .and. ieee_support_sqrt(1.0_10) & .and. ieee_support_sqrt(1.0_16) +#if __x86_64__ logical, parameter :: test_ieee_support_standard = ieee_support_standard() & .and. ieee_support_standard(1.0_2) & .and. ieee_support_standard(1.0_3) & @@ -58,6 +59,7 @@ module m .and. ieee_support_standard(1.0_8) & .and. ieee_support_standard(1.0_10) & .and. ieee_support_standard(1.0_16) +#endif logical, parameter :: test_ieee_support_subnormal = ieee_support_subnormal() & .and. ieee_support_subnormal(1.0_2) & .and. ieee_support_subnormal(1.0_3) & diff --git a/flang/test/Lower/PowerPC/ppc-vec-load-elem-order.f90 b/flang/test/Lower/PowerPC/ppc-vec-load-elem-order.f90 index 214fe423628d6..355fd6c3a742a 100644 --- a/flang/test/Lower/PowerPC/ppc-vec-load-elem-order.f90 +++ b/flang/test/Lower/PowerPC/ppc-vec-load-elem-order.f90 @@ -719,8 +719,8 @@ subroutine vec_xlds_testi64a(arg1, arg2, res) ! LLVMIR: %[[arg1:.*]] = load i64, ptr %0, align 8 ! LLVMIR: %[[addr:.*]] = getelementptr i8, ptr %1, i64 %[[arg1]] ! LLVMIR: %[[ld:.*]] = load i64, ptr %[[addr]], align 8 -! LLVMIR: %[[insrt:.*]] = insertelement <2 x i64> undef, i64 %[[ld]], i32 0 -! LLVMIR: %[[shflv:.*]] = shufflevector <2 x i64> %[[insrt]], <2 x i64> undef, <2 x i32> zeroinitializer +! LLVMIR: %[[insrt:.*]] = insertelement <2 x i64> poison, i64 %[[ld]], i32 0 +! LLVMIR: %[[shflv:.*]] = shufflevector <2 x i64> %[[insrt]], <2 x i64> poison, <2 x i32> zeroinitializer ! LLVMIR: store <2 x i64> %[[shflv]], ptr %2, align 16 end subroutine vec_xlds_testi64a @@ -734,8 +734,8 @@ subroutine vec_xlds_testf64a(arg1, arg2, res) ! LLVMIR: %[[arg1:.*]] = load i64, ptr %0, align 8 ! LLVMIR: %[[addr:.*]] = getelementptr i8, ptr %1, i64 %[[arg1]] ! LLVMIR: %[[ld:.*]] = load i64, ptr %[[addr]], align 8 -! LLVMIR: %[[insrt:.*]] = insertelement <2 x i64> undef, i64 %[[ld]], i32 0 -! LLVMIR: %[[shflv:.*]] = shufflevector <2 x i64> %[[insrt]], <2 x i64> undef, <2 x i32> zeroinitializer +! LLVMIR: %[[insrt:.*]] = insertelement <2 x i64> poison, i64 %[[ld]], i32 0 +! LLVMIR: %[[shflv:.*]] = shufflevector <2 x i64> %[[insrt]], <2 x i64> poison, <2 x i32> zeroinitializer ! LLVMIR: %[[bc:.*]] = bitcast <2 x i64> %[[shflv]] to <2 x double> ! LLVMIR: store <2 x double> %[[bc]], ptr %2, align 16 end subroutine vec_xlds_testf64a diff --git a/flang/test/Lower/PowerPC/ppc-vec-load.f90 b/flang/test/Lower/PowerPC/ppc-vec-load.f90 index a81ed055ce08c..f2c918ecf5bfe 100644 --- a/flang/test/Lower/PowerPC/ppc-vec-load.f90 +++ b/flang/test/Lower/PowerPC/ppc-vec-load.f90 @@ -683,8 +683,8 @@ subroutine vec_xlds_testi64a(arg1, arg2, res) ! LLVMIR: %[[arg1:.*]] = load i64, ptr %0, align 8 ! LLVMIR: %[[addr:.*]] = getelementptr i8, ptr %1, i64 %[[arg1]] ! LLVMIR: %[[ld:.*]] = load i64, ptr %[[addr]], align 8 -! LLVMIR: %[[insrt:.*]] = insertelement <2 x i64> undef, i64 %[[ld]], i32 0 -! LLVMIR: %[[shfl:.*]] = shufflevector <2 x i64> %[[insrt]], <2 x i64> undef, <2 x i32> zeroinitializer +! LLVMIR: %[[insrt:.*]] = insertelement <2 x i64> poison, i64 %[[ld]], i32 0 +! LLVMIR: %[[shfl:.*]] = shufflevector <2 x i64> %[[insrt]], <2 x i64> poison, <2 x i32> zeroinitializer ! LLVMIR: store <2 x i64> %[[shfl]], ptr %2, align 16 end subroutine vec_xlds_testi64a @@ -698,8 +698,8 @@ subroutine vec_xlds_testf64a(arg1, arg2, res) ! LLVMIR: %[[arg1:.*]] = load i64, ptr %0, align 8 ! LLVMIR: %[[addr:.*]] = getelementptr i8, ptr %1, i64 %[[arg1]] ! LLVMIR: %[[ld:.*]] = load i64, ptr %[[addr]], align 8 -! LLVMIR: %[[insrt:.*]] = insertelement <2 x i64> undef, i64 %[[ld]], i32 0 -! LLVMIR: %[[shfl:.*]] = shufflevector <2 x i64> %[[insrt]], <2 x i64> undef, <2 x i32> zeroinitializer +! LLVMIR: %[[insrt:.*]] = insertelement <2 x i64> poison, i64 %[[ld]], i32 0 +! LLVMIR: %[[shfl:.*]] = shufflevector <2 x i64> %[[insrt]], <2 x i64> poison, <2 x i32> zeroinitializer ! LLVMIR: %[[bc:.*]] = bitcast <2 x i64> %[[shfl]] to <2 x double> ! LLVMIR: store <2 x double> %[[bc]], ptr %2, align 16 end subroutine vec_xlds_testf64a diff --git a/flang/test/Lower/PowerPC/ppc-vec-splat-elem-order.f90 b/flang/test/Lower/PowerPC/ppc-vec-splat-elem-order.f90 index d95e9828531cd..50604e1f720f3 100644 --- a/flang/test/Lower/PowerPC/ppc-vec-splat-elem-order.f90 +++ b/flang/test/Lower/PowerPC/ppc-vec-splat-elem-order.f90 @@ -8,8 +8,8 @@ subroutine vec_splat_testf32i64(x) ! LLVMIR: %[[x:.*]] = load <4 x float>, ptr %{{[0-9]}}, align 16 ! LLVMIR: %[[ele:.*]] = extractelement <4 x float> %[[x]], i64 3 -! LLVMIR: %[[ins:.*]] = insertelement <4 x float> undef, float %[[ele]], i32 0 -! LLVMIR: %[[y:.*]] = shufflevector <4 x float> %[[ins]], <4 x float> undef, <4 x i32> zeroinitializer +! LLVMIR: %[[ins:.*]] = insertelement <4 x float> poison, float %[[ele]], i32 0 +! LLVMIR: %[[y:.*]] = shufflevector <4 x float> %[[ins]], <4 x float> poison, <4 x i32> zeroinitializer ! LLVMIR: store <4 x float> %[[y]], ptr %{{[0-9]}}, align 16 end subroutine vec_splat_testf32i64 @@ -20,7 +20,7 @@ subroutine vec_splat_testu8i16(x) ! LLVMIR: %[[x:.*]] = load <16 x i8>, ptr %{{[0-9]}}, align 16 ! LLVMIR: %[[ele:.*]] = extractelement <16 x i8> %[[x]], i16 15 -! LLVMIR: %[[ins:.*]] = insertelement <16 x i8> undef, i8 %[[ele]], i32 0 -! LLVMIR: %[[y:.*]] = shufflevector <16 x i8> %[[ins]], <16 x i8> undef, <16 x i32> zeroinitializer +! LLVMIR: %[[ins:.*]] = insertelement <16 x i8> poison, i8 %[[ele]], i32 0 +! LLVMIR: %[[y:.*]] = shufflevector <16 x i8> %[[ins]], <16 x i8> poison, <16 x i32> zeroinitializer ! LLVMIR: store <16 x i8> %[[y]], ptr %{{[0-9]}}, align 16 end subroutine vec_splat_testu8i16 diff --git a/flang/test/Lower/PowerPC/ppc-vec-splat.f90 b/flang/test/Lower/PowerPC/ppc-vec-splat.f90 index 17558926afd5f..f3c1f19d5877d 100644 --- a/flang/test/Lower/PowerPC/ppc-vec-splat.f90 +++ b/flang/test/Lower/PowerPC/ppc-vec-splat.f90 @@ -14,8 +14,8 @@ subroutine vec_splat_testi8i8(x) ! LLVMIR: %[[x:.*]] = load <16 x i8>, ptr %{{[0-9]}}, align 16 ! LLVMIR-LE: %[[ele:.*]] = extractelement <16 x i8> %[[x]], i8 0 ! LLVMIR-BE: %[[ele:.*]] = extractelement <16 x i8> %[[x]], i8 15 -! LLVMIR: %[[ins:.*]] = insertelement <16 x i8> undef, i8 %[[ele]], i32 0 -! LLVMIR: %[[y:.*]] = shufflevector <16 x i8> %[[ins]], <16 x i8> undef, <16 x i32> zeroinitializer +! LLVMIR: %[[ins:.*]] = insertelement <16 x i8> poison, i8 %[[ele]], i32 0 +! LLVMIR: %[[y:.*]] = shufflevector <16 x i8> %[[ins]], <16 x i8> poison, <16 x i32> zeroinitializer ! LLVMIR: store <16 x i8> %[[y]], ptr %{{[0-9]}}, align 16 end subroutine vec_splat_testi8i8 @@ -27,8 +27,8 @@ subroutine vec_splat_testi8i16(x) ! LLVMIR: %[[x:.*]] = load <16 x i8>, ptr %{{[0-9]}}, align 16 ! LLVMIR-LE: %[[ele:.*]] = extractelement <16 x i8> %[[x]], i16 0 ! LLVMIR-BE: %[[ele:.*]] = extractelement <16 x i8> %[[x]], i16 15 -! LLVMIR: %[[ins:.*]] = insertelement <16 x i8> undef, i8 %[[ele]], i32 0 -! LLVMIR: %[[y:.*]] = shufflevector <16 x i8> %[[ins]], <16 x i8> undef, <16 x i32> zeroinitializer +! LLVMIR: %[[ins:.*]] = insertelement <16 x i8> poison, i8 %[[ele]], i32 0 +! LLVMIR: %[[y:.*]] = shufflevector <16 x i8> %[[ins]], <16 x i8> poison, <16 x i32> zeroinitializer ! LLVMIR: store <16 x i8> %[[y]], ptr %{{[0-9]}}, align 16 end subroutine vec_splat_testi8i16 @@ -40,8 +40,8 @@ subroutine vec_splat_testi8i32(x) ! LLVMIR: %[[x:.*]] = load <16 x i8>, ptr %{{[0-9]}}, align 16 ! LLVMIR-LE: %[[ele:.*]] = extractelement <16 x i8> %[[x]], i32 0 ! LLVMIR-BE: %[[ele:.*]] = extractelement <16 x i8> %[[x]], i32 15 -! LLVMIR: %[[ins:.*]] = insertelement <16 x i8> undef, i8 %[[ele]], i32 0 -! LLVMIR: %[[y:.*]] = shufflevector <16 x i8> %[[ins]], <16 x i8> undef, <16 x i32> zeroinitializer +! LLVMIR: %[[ins:.*]] = insertelement <16 x i8> poison, i8 %[[ele]], i32 0 +! LLVMIR: %[[y:.*]] = shufflevector <16 x i8> %[[ins]], <16 x i8> poison, <16 x i32> zeroinitializer ! LLVMIR: store <16 x i8> %[[y]], ptr %{{[0-9]}}, align 16 end subroutine vec_splat_testi8i32 @@ -53,8 +53,8 @@ subroutine vec_splat_testi8i64(x) ! LLVMIR: %[[x:.*]] = load <16 x i8>, ptr %{{[0-9]}}, align 16 ! LLVMIR-LE: %[[ele:.*]] = extractelement <16 x i8> %[[x]], i64 0 ! LLVMIR-BE: %[[ele:.*]] = extractelement <16 x i8> %[[x]], i64 15 -! LLVMIR: %[[ins:.*]] = insertelement <16 x i8> undef, i8 %[[ele]], i32 0 -! LLVMIR: %[[y:.*]] = shufflevector <16 x i8> %[[ins]], <16 x i8> undef, <16 x i32> zeroinitializer +! LLVMIR: %[[ins:.*]] = insertelement <16 x i8> poison, i8 %[[ele]], i32 0 +! LLVMIR: %[[y:.*]] = shufflevector <16 x i8> %[[ins]], <16 x i8> poison, <16 x i32> zeroinitializer ! LLVMIR: store <16 x i8> %[[y]], ptr %{{[0-9]}}, align 16 end subroutine vec_splat_testi8i64 @@ -66,8 +66,8 @@ subroutine vec_splat_testi16i8(x) ! LLVMIR: %[[x:.*]] = load <8 x i16>, ptr %{{[0-9]}}, align 16 ! LLVMIR-LE: %[[ele:.*]] = extractelement <8 x i16> %[[x]], i8 0 ! LLVMIR-BE: %[[ele:.*]] = extractelement <8 x i16> %[[x]], i8 7 -! LLVMIR: %[[ins:.*]] = insertelement <8 x i16> undef, i16 %[[ele]], i32 0 -! LLVMIR: %[[y:.*]] = shufflevector <8 x i16> %[[ins]], <8 x i16> undef, <8 x i32> zeroinitializer +! LLVMIR: %[[ins:.*]] = insertelement <8 x i16> poison, i16 %[[ele]], i32 0 +! LLVMIR: %[[y:.*]] = shufflevector <8 x i16> %[[ins]], <8 x i16> poison, <8 x i32> zeroinitializer ! LLVMIR: store <8 x i16> %[[y]], ptr %{{[0-9]}}, align 16 end subroutine vec_splat_testi16i8 @@ -79,8 +79,8 @@ subroutine vec_splat_testi16i16(x) ! LLVMIR: %[[x:.*]] = load <8 x i16>, ptr %{{[0-9]}}, align 16 ! LLVMIR-LE: %[[ele:.*]] = extractelement <8 x i16> %[[x]], i16 0 ! LLVMIR-BE: %[[ele:.*]] = extractelement <8 x i16> %[[x]], i16 7 -! LLVMIR: %[[ins:.*]] = insertelement <8 x i16> undef, i16 %[[ele]], i32 0 -! LLVMIR: %[[y:.*]] = shufflevector <8 x i16> %[[ins]], <8 x i16> undef, <8 x i32> zeroinitializer +! LLVMIR: %[[ins:.*]] = insertelement <8 x i16> poison, i16 %[[ele]], i32 0 +! LLVMIR: %[[y:.*]] = shufflevector <8 x i16> %[[ins]], <8 x i16> poison, <8 x i32> zeroinitializer ! LLVMIR: store <8 x i16> %[[y]], ptr %{{[0-9]}}, align 16 end subroutine vec_splat_testi16i16 @@ -92,8 +92,8 @@ subroutine vec_splat_testi16i32(x) ! LLVMIR: %[[x:.*]] = load <8 x i16>, ptr %{{[0-9]}}, align 16 ! LLVMIR-LE: %[[ele:.*]] = extractelement <8 x i16> %[[x]], i32 0 ! LLVMIR-BE: %[[ele:.*]] = extractelement <8 x i16> %[[x]], i32 7 -! LLVMIR: %[[ins:.*]] = insertelement <8 x i16> undef, i16 %[[ele]], i32 0 -! LLVMIR: %[[y:.*]] = shufflevector <8 x i16> %[[ins]], <8 x i16> undef, <8 x i32> zeroinitializer +! LLVMIR: %[[ins:.*]] = insertelement <8 x i16> poison, i16 %[[ele]], i32 0 +! LLVMIR: %[[y:.*]] = shufflevector <8 x i16> %[[ins]], <8 x i16> poison, <8 x i32> zeroinitializer ! LLVMIR: store <8 x i16> %[[y]], ptr %{{[0-9]}}, align 16 end subroutine vec_splat_testi16i32 @@ -105,8 +105,8 @@ subroutine vec_splat_testi16i64(x) ! LLVMIR: %[[x:.*]] = load <8 x i16>, ptr %{{[0-9]}}, align 16 ! LLVMIR-LE: %[[ele:.*]] = extractelement <8 x i16> %[[x]], i64 0 ! LLVMIR-BE: %[[ele:.*]] = extractelement <8 x i16> %[[x]], i64 7 -! LLVMIR: %[[ins:.*]] = insertelement <8 x i16> undef, i16 %[[ele]], i32 0 -! LLVMIR: %[[y:.*]] = shufflevector <8 x i16> %[[ins]], <8 x i16> undef, <8 x i32> zeroinitializer +! LLVMIR: %[[ins:.*]] = insertelement <8 x i16> poison, i16 %[[ele]], i32 0 +! LLVMIR: %[[y:.*]] = shufflevector <8 x i16> %[[ins]], <8 x i16> poison, <8 x i32> zeroinitializer ! LLVMIR: store <8 x i16> %[[y]], ptr %{{[0-9]}}, align 16 end subroutine vec_splat_testi16i64 @@ -118,8 +118,8 @@ subroutine vec_splat_testi32i8(x) ! LLVMIR: %[[x:.*]] = load <4 x i32>, ptr %{{[0-9]}}, align 16 ! LLVMIR-LE: %[[ele:.*]] = extractelement <4 x i32> %[[x]], i8 0 ! LLVMIR-BE: %[[ele:.*]] = extractelement <4 x i32> %[[x]], i8 3 -! LLVMIR: %[[ins:.*]] = insertelement <4 x i32> undef, i32 %[[ele]], i32 0 -! LLVMIR: %[[y:.*]] = shufflevector <4 x i32> %[[ins]], <4 x i32> undef, <4 x i32> zeroinitializer +! LLVMIR: %[[ins:.*]] = insertelement <4 x i32> poison, i32 %[[ele]], i32 0 +! LLVMIR: %[[y:.*]] = shufflevector <4 x i32> %[[ins]], <4 x i32> poison, <4 x i32> zeroinitializer ! LLVMIR: store <4 x i32> %[[y]], ptr %{{[0-9]}}, align 16 end subroutine vec_splat_testi32i8 @@ -131,8 +131,8 @@ subroutine vec_splat_testi32i16(x) ! LLVMIR: %[[x:.*]] = load <4 x i32>, ptr %{{[0-9]}}, align 16 ! LLVMIR-LE: %[[ele:.*]] = extractelement <4 x i32> %[[x]], i16 0 ! LLVMIR-BE: %[[ele:.*]] = extractelement <4 x i32> %[[x]], i16 3 -! LLVMIR: %[[ins:.*]] = insertelement <4 x i32> undef, i32 %[[ele]], i32 0 -! LLVMIR: %[[y:.*]] = shufflevector <4 x i32> %[[ins]], <4 x i32> undef, <4 x i32> zeroinitializer +! LLVMIR: %[[ins:.*]] = insertelement <4 x i32> poison, i32 %[[ele]], i32 0 +! LLVMIR: %[[y:.*]] = shufflevector <4 x i32> %[[ins]], <4 x i32> poison, <4 x i32> zeroinitializer ! LLVMIR: store <4 x i32> %[[y]], ptr %{{[0-9]}}, align 16 end subroutine vec_splat_testi32i16 @@ -144,8 +144,8 @@ subroutine vec_splat_testi32i32(x) ! LLVMIR: %[[x:.*]] = load <4 x i32>, ptr %{{[0-9]}}, align 16 ! LLVMIR-LE: %[[ele:.*]] = extractelement <4 x i32> %[[x]], i32 0 ! LLVMIR-BE: %[[ele:.*]] = extractelement <4 x i32> %[[x]], i32 3 -! LLVMIR: %[[ins:.*]] = insertelement <4 x i32> undef, i32 %[[ele]], i32 0 -! LLVMIR: %[[y:.*]] = shufflevector <4 x i32> %[[ins]], <4 x i32> undef, <4 x i32> zeroinitializer +! LLVMIR: %[[ins:.*]] = insertelement <4 x i32> poison, i32 %[[ele]], i32 0 +! LLVMIR: %[[y:.*]] = shufflevector <4 x i32> %[[ins]], <4 x i32> poison, <4 x i32> zeroinitializer ! LLVMIR: store <4 x i32> %[[y]], ptr %{{[0-9]}}, align 16 end subroutine vec_splat_testi32i32 @@ -157,8 +157,8 @@ subroutine vec_splat_testi32i64(x) ! LLVMIR: %[[x:.*]] = load <4 x i32>, ptr %{{[0-9]}}, align 16 ! LLVMIR-LE: %[[ele:.*]] = extractelement <4 x i32> %[[x]], i64 0 ! LLVMIR-BE: %[[ele:.*]] = extractelement <4 x i32> %[[x]], i64 3 -! LLVMIR: %[[ins:.*]] = insertelement <4 x i32> undef, i32 %[[ele]], i32 0 -! LLVMIR: %[[y:.*]] = shufflevector <4 x i32> %[[ins]], <4 x i32> undef, <4 x i32> zeroinitializer +! LLVMIR: %[[ins:.*]] = insertelement <4 x i32> poison, i32 %[[ele]], i32 0 +! LLVMIR: %[[y:.*]] = shufflevector <4 x i32> %[[ins]], <4 x i32> poison, <4 x i32> zeroinitializer ! LLVMIR: store <4 x i32> %[[y]], ptr %{{[0-9]}}, align 16 end subroutine vec_splat_testi32i64 @@ -170,8 +170,8 @@ subroutine vec_splat_testi64i8(x) ! LLVMIR: %[[x:.*]] = load <2 x i64>, ptr %{{[0-9]}}, align 16 ! LLVMIR-LE: %[[ele:.*]] = extractelement <2 x i64> %[[x]], i8 0 ! LLVMIR-BE: %[[ele:.*]] = extractelement <2 x i64> %[[x]], i8 1 -! LLVMIR: %[[ins:.*]] = insertelement <2 x i64> undef, i64 %[[ele]], i32 0 -! LLVMIR: %[[y:.*]] = shufflevector <2 x i64> %[[ins]], <2 x i64> undef, <2 x i32> zeroinitializer +! LLVMIR: %[[ins:.*]] = insertelement <2 x i64> poison, i64 %[[ele]], i32 0 +! LLVMIR: %[[y:.*]] = shufflevector <2 x i64> %[[ins]], <2 x i64> poison, <2 x i32> zeroinitializer ! LLVMIR: store <2 x i64> %[[y]], ptr %{{[0-9]}}, align 16 end subroutine vec_splat_testi64i8 @@ -183,8 +183,8 @@ subroutine vec_splat_testi64i16(x) ! LLVMIR: %[[x:.*]] = load <2 x i64>, ptr %{{[0-9]}}, align 16 ! LLVMIR-LE: %[[ele:.*]] = extractelement <2 x i64> %[[x]], i16 0 ! LLVMIR-BE: %[[ele:.*]] = extractelement <2 x i64> %[[x]], i16 1 -! LLVMIR: %[[ins:.*]] = insertelement <2 x i64> undef, i64 %[[ele]], i32 0 -! LLVMIR: %[[y:.*]] = shufflevector <2 x i64> %[[ins]], <2 x i64> undef, <2 x i32> zeroinitializer +! LLVMIR: %[[ins:.*]] = insertelement <2 x i64> poison, i64 %[[ele]], i32 0 +! LLVMIR: %[[y:.*]] = shufflevector <2 x i64> %[[ins]], <2 x i64> poison, <2 x i32> zeroinitializer ! LLVMIR: store <2 x i64> %[[y]], ptr %{{[0-9]}}, align 16 end subroutine vec_splat_testi64i16 @@ -196,8 +196,8 @@ subroutine vec_splat_testi64i32(x) ! LLVMIR: %[[x:.*]] = load <2 x i64>, ptr %{{[0-9]}}, align 16 ! LLVMIR-LE: %[[ele:.*]] = extractelement <2 x i64> %[[x]], i32 0 ! LLVMIR-BE: %[[ele:.*]] = extractelement <2 x i64> %[[x]], i32 1 -! LLVMIR: %[[ins:.*]] = insertelement <2 x i64> undef, i64 %[[ele]], i32 0 -! LLVMIR: %[[y:.*]] = shufflevector <2 x i64> %[[ins]], <2 x i64> undef, <2 x i32> zeroinitializer +! LLVMIR: %[[ins:.*]] = insertelement <2 x i64> poison, i64 %[[ele]], i32 0 +! LLVMIR: %[[y:.*]] = shufflevector <2 x i64> %[[ins]], <2 x i64> poison, <2 x i32> zeroinitializer ! LLVMIR: store <2 x i64> %[[y]], ptr %{{[0-9]}}, align 16 end subroutine vec_splat_testi64i32 @@ -209,8 +209,8 @@ subroutine vec_splat_testi64i64(x) ! LLVMIR: %[[x:.*]] = load <2 x i64>, ptr %{{[0-9]}}, align 16 ! LLVMIR-LE: %[[ele:.*]] = extractelement <2 x i64> %[[x]], i64 0 ! LLVMIR-BE: %[[ele:.*]] = extractelement <2 x i64> %[[x]], i64 1 -! LLVMIR: %[[ins:.*]] = insertelement <2 x i64> undef, i64 %[[ele]], i32 0 -! LLVMIR: %[[y:.*]] = shufflevector <2 x i64> %[[ins]], <2 x i64> undef, <2 x i32> zeroinitializer +! LLVMIR: %[[ins:.*]] = insertelement <2 x i64> poison, i64 %[[ele]], i32 0 +! LLVMIR: %[[y:.*]] = shufflevector <2 x i64> %[[ins]], <2 x i64> poison, <2 x i32> zeroinitializer ! LLVMIR: store <2 x i64> %[[y]], ptr %{{[0-9]}}, align 16 end subroutine vec_splat_testi64i64 @@ -222,8 +222,8 @@ subroutine vec_splat_testf32i8(x) ! LLVMIR: %[[x:.*]] = load <4 x float>, ptr %{{[0-9]}}, align 16 ! LLVMIR-LE: %[[ele:.*]] = extractelement <4 x float> %[[x]], i8 0 ! LLVMIR-BE: %[[ele:.*]] = extractelement <4 x float> %[[x]], i8 3 -! LLVMIR: %[[ins:.*]] = insertelement <4 x float> undef, float %[[ele]], i32 0 -! LLVMIR: %[[y:.*]] = shufflevector <4 x float> %[[ins]], <4 x float> undef, <4 x i32> zeroinitializer +! LLVMIR: %[[ins:.*]] = insertelement <4 x float> poison, float %[[ele]], i32 0 +! LLVMIR: %[[y:.*]] = shufflevector <4 x float> %[[ins]], <4 x float> poison, <4 x i32> zeroinitializer ! LLVMIR: store <4 x float> %[[y]], ptr %{{[0-9]}}, align 16 end subroutine vec_splat_testf32i8 @@ -235,8 +235,8 @@ subroutine vec_splat_testf32i16(x) ! LLVMIR: %[[x:.*]] = load <4 x float>, ptr %{{[0-9]}}, align 16 ! LLVMIR-LE: %[[ele:.*]] = extractelement <4 x float> %[[x]], i16 0 ! LLVMIR-BE: %[[ele:.*]] = extractelement <4 x float> %[[x]], i16 3 -! LLVMIR: %[[ins:.*]] = insertelement <4 x float> undef, float %[[ele]], i32 0 -! LLVMIR: %[[y:.*]] = shufflevector <4 x float> %[[ins]], <4 x float> undef, <4 x i32> zeroinitializer +! LLVMIR: %[[ins:.*]] = insertelement <4 x float> poison, float %[[ele]], i32 0 +! LLVMIR: %[[y:.*]] = shufflevector <4 x float> %[[ins]], <4 x float> poison, <4 x i32> zeroinitializer ! LLVMIR: store <4 x float> %[[y]], ptr %{{[0-9]}}, align 16 end subroutine vec_splat_testf32i16 @@ -248,8 +248,8 @@ subroutine vec_splat_testf32i32(x) ! LLVMIR: %[[x:.*]] = load <4 x float>, ptr %{{[0-9]}}, align 16 ! LLVMIR-LE: %[[ele:.*]] = extractelement <4 x float> %[[x]], i32 0 ! LLVMIR-BE: %[[ele:.*]] = extractelement <4 x float> %[[x]], i32 3 -! LLVMIR: %[[ins:.*]] = insertelement <4 x float> undef, float %[[ele]], i32 0 -! LLVMIR: %[[y:.*]] = shufflevector <4 x float> %[[ins]], <4 x float> undef, <4 x i32> zeroinitializer +! LLVMIR: %[[ins:.*]] = insertelement <4 x float> poison, float %[[ele]], i32 0 +! LLVMIR: %[[y:.*]] = shufflevector <4 x float> %[[ins]], <4 x float> poison, <4 x i32> zeroinitializer ! LLVMIR: store <4 x float> %[[y]], ptr %{{[0-9]}}, align 16 end subroutine vec_splat_testf32i32 @@ -261,8 +261,8 @@ subroutine vec_splat_testf32i64(x) ! LLVMIR: %[[x:.*]] = load <4 x float>, ptr %{{[0-9]}}, align 16 ! LLVMIR-LE: %[[ele:.*]] = extractelement <4 x float> %[[x]], i64 0 ! LLVMIR-BE: %[[ele:.*]] = extractelement <4 x float> %[[x]], i64 3 -! LLVMIR: %[[ins:.*]] = insertelement <4 x float> undef, float %[[ele]], i32 0 -! LLVMIR: %[[y:.*]] = shufflevector <4 x float> %[[ins]], <4 x float> undef, <4 x i32> zeroinitializer +! LLVMIR: %[[ins:.*]] = insertelement <4 x float> poison, float %[[ele]], i32 0 +! LLVMIR: %[[y:.*]] = shufflevector <4 x float> %[[ins]], <4 x float> poison, <4 x i32> zeroinitializer ! LLVMIR: store <4 x float> %[[y]], ptr %{{[0-9]}}, align 16 end subroutine vec_splat_testf32i64 @@ -274,8 +274,8 @@ subroutine vec_splat_testf64i8(x) ! LLVMIR: %[[x:.*]] = load <2 x double>, ptr %{{[0-9]}}, align 16 ! LLVMIR-LE: %[[ele:.*]] = extractelement <2 x double> %[[x]], i8 0 ! LLVMIR-BE: %[[ele:.*]] = extractelement <2 x double> %[[x]], i8 1 -! LLVMIR: %[[ins:.*]] = insertelement <2 x double> undef, double %[[ele]], i32 0 -! LLVMIR: %[[y:.*]] = shufflevector <2 x double> %[[ins]], <2 x double> undef, <2 x i32> zeroinitializer +! LLVMIR: %[[ins:.*]] = insertelement <2 x double> poison, double %[[ele]], i32 0 +! LLVMIR: %[[y:.*]] = shufflevector <2 x double> %[[ins]], <2 x double> poison, <2 x i32> zeroinitializer ! LLVMIR: store <2 x double> %[[y]], ptr %{{[0-9]}}, align 16 end subroutine vec_splat_testf64i8 @@ -287,8 +287,8 @@ subroutine vec_splat_testf64i16(x) ! LLVMIR: %[[x:.*]] = load <2 x double>, ptr %{{[0-9]}}, align 16 ! LLVMIR-LE: %[[ele:.*]] = extractelement <2 x double> %[[x]], i16 0 ! LLVMIR-BE: %[[ele:.*]] = extractelement <2 x double> %[[x]], i16 1 -! LLVMIR: %[[ins:.*]] = insertelement <2 x double> undef, double %[[ele]], i32 0 -! LLVMIR: %[[y:.*]] = shufflevector <2 x double> %[[ins]], <2 x double> undef, <2 x i32> zeroinitializer +! LLVMIR: %[[ins:.*]] = insertelement <2 x double> poison, double %[[ele]], i32 0 +! LLVMIR: %[[y:.*]] = shufflevector <2 x double> %[[ins]], <2 x double> poison, <2 x i32> zeroinitializer ! LLVMIR: store <2 x double> %[[y]], ptr %{{[0-9]}}, align 16 end subroutine vec_splat_testf64i16 @@ -300,8 +300,8 @@ subroutine vec_splat_testf64i32(x) ! LLVMIR: %[[x:.*]] = load <2 x double>, ptr %{{[0-9]}}, align 16 ! LLVMIR-LE: %[[ele:.*]] = extractelement <2 x double> %[[x]], i32 0 ! LLVMIR-BE: %[[ele:.*]] = extractelement <2 x double> %[[x]], i32 1 -! LLVMIR: %[[ins:.*]] = insertelement <2 x double> undef, double %[[ele]], i32 0 -! LLVMIR: %[[y:.*]] = shufflevector <2 x double> %[[ins]], <2 x double> undef, <2 x i32> zeroinitializer +! LLVMIR: %[[ins:.*]] = insertelement <2 x double> poison, double %[[ele]], i32 0 +! LLVMIR: %[[y:.*]] = shufflevector <2 x double> %[[ins]], <2 x double> poison, <2 x i32> zeroinitializer ! LLVMIR: store <2 x double> %[[y]], ptr %{{[0-9]}}, align 16 end subroutine vec_splat_testf64i32 @@ -313,8 +313,8 @@ subroutine vec_splat_testf64i64(x) ! LLVMIR: %[[x:.*]] = load <2 x double>, ptr %{{[0-9]}}, align 16 ! LLVMIR-LE: %[[ele:.*]] = extractelement <2 x double> %[[x]], i64 0 ! LLVMIR-BE: %[[ele:.*]] = extractelement <2 x double> %[[x]], i64 1 -! LLVMIR: %[[ins:.*]] = insertelement <2 x double> undef, double %[[ele]], i32 0 -! LLVMIR: %[[y:.*]] = shufflevector <2 x double> %[[ins]], <2 x double> undef, <2 x i32> zeroinitializer +! LLVMIR: %[[ins:.*]] = insertelement <2 x double> poison, double %[[ele]], i32 0 +! LLVMIR: %[[y:.*]] = shufflevector <2 x double> %[[ins]], <2 x double> poison, <2 x i32> zeroinitializer ! LLVMIR: store <2 x double> %[[y]], ptr %{{[0-9]}}, align 16 end subroutine vec_splat_testf64i64 @@ -326,8 +326,8 @@ subroutine vec_splat_testu8i8(x) ! LLVMIR: %[[x:.*]] = load <16 x i8>, ptr %{{[0-9]}}, align 16 ! LLVMIR-LE: %[[ele:.*]] = extractelement <16 x i8> %[[x]], i8 0 ! LLVMIR-BE: %[[ele:.*]] = extractelement <16 x i8> %[[x]], i8 15 -! LLVMIR: %[[ins:.*]] = insertelement <16 x i8> undef, i8 %[[ele]], i32 0 -! LLVMIR: %[[y:.*]] = shufflevector <16 x i8> %[[ins]], <16 x i8> undef, <16 x i32> zeroinitializer +! LLVMIR: %[[ins:.*]] = insertelement <16 x i8> poison, i8 %[[ele]], i32 0 +! LLVMIR: %[[y:.*]] = shufflevector <16 x i8> %[[ins]], <16 x i8> poison, <16 x i32> zeroinitializer ! LLVMIR: store <16 x i8> %[[y]], ptr %{{[0-9]}}, align 16 end subroutine vec_splat_testu8i8 @@ -339,8 +339,8 @@ subroutine vec_splat_testu8i16(x) ! LLVMIR: %[[x:.*]] = load <16 x i8>, ptr %{{[0-9]}}, align 16 ! LLVMIR-LE: %[[ele:.*]] = extractelement <16 x i8> %[[x]], i16 0 ! LLVMIR-BE: %[[ele:.*]] = extractelement <16 x i8> %[[x]], i16 15 -! LLVMIR: %[[ins:.*]] = insertelement <16 x i8> undef, i8 %[[ele]], i32 0 -! LLVMIR: %[[y:.*]] = shufflevector <16 x i8> %[[ins]], <16 x i8> undef, <16 x i32> zeroinitializer +! LLVMIR: %[[ins:.*]] = insertelement <16 x i8> poison, i8 %[[ele]], i32 0 +! LLVMIR: %[[y:.*]] = shufflevector <16 x i8> %[[ins]], <16 x i8> poison, <16 x i32> zeroinitializer ! LLVMIR: store <16 x i8> %[[y]], ptr %{{[0-9]}}, align 16 end subroutine vec_splat_testu8i16 @@ -352,8 +352,8 @@ subroutine vec_splat_testu8i32(x) ! LLVMIR: %[[x:.*]] = load <16 x i8>, ptr %{{[0-9]}}, align 16 ! LLVMIR-LE: %[[ele:.*]] = extractelement <16 x i8> %[[x]], i32 0 ! LLVMIR-BE: %[[ele:.*]] = extractelement <16 x i8> %[[x]], i32 15 -! LLVMIR: %[[ins:.*]] = insertelement <16 x i8> undef, i8 %[[ele]], i32 0 -! LLVMIR: %[[y:.*]] = shufflevector <16 x i8> %[[ins]], <16 x i8> undef, <16 x i32> zeroinitializer +! LLVMIR: %[[ins:.*]] = insertelement <16 x i8> poison, i8 %[[ele]], i32 0 +! LLVMIR: %[[y:.*]] = shufflevector <16 x i8> %[[ins]], <16 x i8> poison, <16 x i32> zeroinitializer ! LLVMIR: store <16 x i8> %[[y]], ptr %{{[0-9]}}, align 16 end subroutine vec_splat_testu8i32 @@ -365,8 +365,8 @@ subroutine vec_splat_testu8i64(x) ! LLVMIR: %[[x:.*]] = load <16 x i8>, ptr %{{[0-9]}}, align 16 ! LLVMIR-LE: %[[ele:.*]] = extractelement <16 x i8> %[[x]], i64 0 ! LLVMIR-BE: %[[ele:.*]] = extractelement <16 x i8> %[[x]], i64 15 -! LLVMIR: %[[ins:.*]] = insertelement <16 x i8> undef, i8 %[[ele]], i32 0 -! LLVMIR: %[[y:.*]] = shufflevector <16 x i8> %[[ins]], <16 x i8> undef, <16 x i32> zeroinitializer +! LLVMIR: %[[ins:.*]] = insertelement <16 x i8> poison, i8 %[[ele]], i32 0 +! LLVMIR: %[[y:.*]] = shufflevector <16 x i8> %[[ins]], <16 x i8> poison, <16 x i32> zeroinitializer ! LLVMIR: store <16 x i8> %[[y]], ptr %{{[0-9]}}, align 16 end subroutine vec_splat_testu8i64 @@ -378,8 +378,8 @@ subroutine vec_splat_testu16i8(x) ! LLVMIR: %[[x:.*]] = load <8 x i16>, ptr %{{[0-9]}}, align 16 ! LLVMIR-LE: %[[ele:.*]] = extractelement <8 x i16> %[[x]], i8 0 ! LLVMIR-BE: %[[ele:.*]] = extractelement <8 x i16> %[[x]], i8 7 -! LLVMIR: %[[ins:.*]] = insertelement <8 x i16> undef, i16 %[[ele]], i32 0 -! LLVMIR: %[[y:.*]] = shufflevector <8 x i16> %[[ins]], <8 x i16> undef, <8 x i32> zeroinitializer +! LLVMIR: %[[ins:.*]] = insertelement <8 x i16> poison, i16 %[[ele]], i32 0 +! LLVMIR: %[[y:.*]] = shufflevector <8 x i16> %[[ins]], <8 x i16> poison, <8 x i32> zeroinitializer ! LLVMIR: store <8 x i16> %[[y]], ptr %{{[0-9]}}, align 16 end subroutine vec_splat_testu16i8 @@ -391,8 +391,8 @@ subroutine vec_splat_testu16i16(x) ! LLVMIR: %[[x:.*]] = load <8 x i16>, ptr %{{[0-9]}}, align 16 ! LLVMIR-LE: %[[ele:.*]] = extractelement <8 x i16> %[[x]], i16 0 ! LLVMIR-BE: %[[ele:.*]] = extractelement <8 x i16> %[[x]], i16 7 -! LLVMIR: %[[ins:.*]] = insertelement <8 x i16> undef, i16 %[[ele]], i32 0 -! LLVMIR: %[[y:.*]] = shufflevector <8 x i16> %[[ins]], <8 x i16> undef, <8 x i32> zeroinitializer +! LLVMIR: %[[ins:.*]] = insertelement <8 x i16> poison, i16 %[[ele]], i32 0 +! LLVMIR: %[[y:.*]] = shufflevector <8 x i16> %[[ins]], <8 x i16> poison, <8 x i32> zeroinitializer ! LLVMIR: store <8 x i16> %[[y]], ptr %{{[0-9]}}, align 16 end subroutine vec_splat_testu16i16 @@ -404,8 +404,8 @@ subroutine vec_splat_testu16i32(x) ! LLVMIR: %[[x:.*]] = load <8 x i16>, ptr %{{[0-9]}}, align 16 ! LLVMIR-LE: %[[ele:.*]] = extractelement <8 x i16> %[[x]], i32 0 ! LLVMIR-BE: %[[ele:.*]] = extractelement <8 x i16> %[[x]], i32 7 -! LLVMIR: %[[ins:.*]] = insertelement <8 x i16> undef, i16 %[[ele]], i32 0 -! LLVMIR: %[[y:.*]] = shufflevector <8 x i16> %[[ins]], <8 x i16> undef, <8 x i32> zeroinitializer +! LLVMIR: %[[ins:.*]] = insertelement <8 x i16> poison, i16 %[[ele]], i32 0 +! LLVMIR: %[[y:.*]] = shufflevector <8 x i16> %[[ins]], <8 x i16> poison, <8 x i32> zeroinitializer ! LLVMIR: store <8 x i16> %[[y]], ptr %{{[0-9]}}, align 16 end subroutine vec_splat_testu16i32 @@ -417,8 +417,8 @@ subroutine vec_splat_testu16i64(x) ! LLVMIR: %[[x:.*]] = load <8 x i16>, ptr %{{[0-9]}}, align 16 ! LLVMIR-LE: %[[ele:.*]] = extractelement <8 x i16> %[[x]], i64 0 ! LLVMIR-BE: %[[ele:.*]] = extractelement <8 x i16> %[[x]], i64 7 -! LLVMIR: %[[ins:.*]] = insertelement <8 x i16> undef, i16 %[[ele]], i32 0 -! LLVMIR: %[[y:.*]] = shufflevector <8 x i16> %[[ins]], <8 x i16> undef, <8 x i32> zeroinitializer +! LLVMIR: %[[ins:.*]] = insertelement <8 x i16> poison, i16 %[[ele]], i32 0 +! LLVMIR: %[[y:.*]] = shufflevector <8 x i16> %[[ins]], <8 x i16> poison, <8 x i32> zeroinitializer ! LLVMIR: store <8 x i16> %[[y]], ptr %{{[0-9]}}, align 16 end subroutine vec_splat_testu16i64 @@ -430,8 +430,8 @@ subroutine vec_splat_testu32i8(x) ! LLVMIR: %[[x:.*]] = load <4 x i32>, ptr %{{[0-9]}}, align 16 ! LLVMIR-LE: %[[ele:.*]] = extractelement <4 x i32> %[[x]], i8 0 ! LLVMIR-BE: %[[ele:.*]] = extractelement <4 x i32> %[[x]], i8 3 -! LLVMIR: %[[ins:.*]] = insertelement <4 x i32> undef, i32 %[[ele]], i32 0 -! LLVMIR: %[[y:.*]] = shufflevector <4 x i32> %[[ins]], <4 x i32> undef, <4 x i32> zeroinitializer +! LLVMIR: %[[ins:.*]] = insertelement <4 x i32> poison, i32 %[[ele]], i32 0 +! LLVMIR: %[[y:.*]] = shufflevector <4 x i32> %[[ins]], <4 x i32> poison, <4 x i32> zeroinitializer ! LLVMIR: store <4 x i32> %[[y]], ptr %{{[0-9]}}, align 16 end subroutine vec_splat_testu32i8 @@ -443,8 +443,8 @@ subroutine vec_splat_testu32i16(x) ! LLVMIR: %[[x:.*]] = load <4 x i32>, ptr %{{[0-9]}}, align 16 ! LLVMIR-LE: %[[ele:.*]] = extractelement <4 x i32> %[[x]], i16 0 ! LLVMIR-BE: %[[ele:.*]] = extractelement <4 x i32> %[[x]], i16 3 -! LLVMIR: %[[ins:.*]] = insertelement <4 x i32> undef, i32 %[[ele]], i32 0 -! LLVMIR: %[[y:.*]] = shufflevector <4 x i32> %[[ins]], <4 x i32> undef, <4 x i32> zeroinitializer +! LLVMIR: %[[ins:.*]] = insertelement <4 x i32> poison, i32 %[[ele]], i32 0 +! LLVMIR: %[[y:.*]] = shufflevector <4 x i32> %[[ins]], <4 x i32> poison, <4 x i32> zeroinitializer ! LLVMIR: store <4 x i32> %[[y]], ptr %{{[0-9]}}, align 16 end subroutine vec_splat_testu32i16 @@ -456,8 +456,8 @@ subroutine vec_splat_testu32i32(x) ! LLVMIR: %[[x:.*]] = load <4 x i32>, ptr %{{[0-9]}}, align 16 ! LLVMIR-LE: %[[ele:.*]] = extractelement <4 x i32> %[[x]], i32 0 ! LLVMIR-BE: %[[ele:.*]] = extractelement <4 x i32> %[[x]], i32 3 -! LLVMIR: %[[ins:.*]] = insertelement <4 x i32> undef, i32 %[[ele]], i32 0 -! LLVMIR: %[[y:.*]] = shufflevector <4 x i32> %[[ins]], <4 x i32> undef, <4 x i32> zeroinitializer +! LLVMIR: %[[ins:.*]] = insertelement <4 x i32> poison, i32 %[[ele]], i32 0 +! LLVMIR: %[[y:.*]] = shufflevector <4 x i32> %[[ins]], <4 x i32> poison, <4 x i32> zeroinitializer ! LLVMIR: store <4 x i32> %[[y]], ptr %{{[0-9]}}, align 16 end subroutine vec_splat_testu32i32 @@ -469,8 +469,8 @@ subroutine vec_splat_testu32i64(x) ! LLVMIR: %[[x:.*]] = load <4 x i32>, ptr %{{[0-9]}}, align 16 ! LLVMIR-LE: %[[ele:.*]] = extractelement <4 x i32> %[[x]], i64 0 ! LLVMIR-BE: %[[ele:.*]] = extractelement <4 x i32> %[[x]], i64 3 -! LLVMIR: %[[ins:.*]] = insertelement <4 x i32> undef, i32 %[[ele]], i32 0 -! LLVMIR: %[[y:.*]] = shufflevector <4 x i32> %[[ins]], <4 x i32> undef, <4 x i32> zeroinitializer +! LLVMIR: %[[ins:.*]] = insertelement <4 x i32> poison, i32 %[[ele]], i32 0 +! LLVMIR: %[[y:.*]] = shufflevector <4 x i32> %[[ins]], <4 x i32> poison, <4 x i32> zeroinitializer ! LLVMIR: store <4 x i32> %[[y]], ptr %{{[0-9]}}, align 16 end subroutine vec_splat_testu32i64 @@ -482,8 +482,8 @@ subroutine vec_splat_testu64i8(x) ! LLVMIR: %[[x:.*]] = load <2 x i64>, ptr %{{[0-9]}}, align 16 ! LLVMIR-LE: %[[ele:.*]] = extractelement <2 x i64> %[[x]], i8 0 ! LLVMIR-BE: %[[ele:.*]] = extractelement <2 x i64> %[[x]], i8 1 -! LLVMIR: %[[ins:.*]] = insertelement <2 x i64> undef, i64 %[[ele]], i32 0 -! LLVMIR: %[[y:.*]] = shufflevector <2 x i64> %[[ins]], <2 x i64> undef, <2 x i32> zeroinitializer +! LLVMIR: %[[ins:.*]] = insertelement <2 x i64> poison, i64 %[[ele]], i32 0 +! LLVMIR: %[[y:.*]] = shufflevector <2 x i64> %[[ins]], <2 x i64> poison, <2 x i32> zeroinitializer ! LLVMIR: store <2 x i64> %[[y]], ptr %{{[0-9]}}, align 16 end subroutine vec_splat_testu64i8 @@ -495,8 +495,8 @@ subroutine vec_splat_testu64i16(x) ! LLVMIR: %[[x:.*]] = load <2 x i64>, ptr %{{[0-9]}}, align 16 ! LLVMIR-LE: %[[ele:.*]] = extractelement <2 x i64> %[[x]], i16 0 ! LLVMIR-BE: %[[ele:.*]] = extractelement <2 x i64> %[[x]], i16 1 -! LLVMIR: %[[ins:.*]] = insertelement <2 x i64> undef, i64 %[[ele]], i32 0 -! LLVMIR: %[[y:.*]] = shufflevector <2 x i64> %[[ins]], <2 x i64> undef, <2 x i32> zeroinitializer +! LLVMIR: %[[ins:.*]] = insertelement <2 x i64> poison, i64 %[[ele]], i32 0 +! LLVMIR: %[[y:.*]] = shufflevector <2 x i64> %[[ins]], <2 x i64> poison, <2 x i32> zeroinitializer ! LLVMIR: store <2 x i64> %[[y]], ptr %{{[0-9]}}, align 16 end subroutine vec_splat_testu64i16 @@ -508,8 +508,8 @@ subroutine vec_splat_testu64i32(x) ! LLVMIR: %[[x:.*]] = load <2 x i64>, ptr %{{[0-9]}}, align 16 ! LLVMIR-LE: %[[ele:.*]] = extractelement <2 x i64> %[[x]], i32 0 ! LLVMIR-BE: %[[ele:.*]] = extractelement <2 x i64> %[[x]], i32 1 -! LLVMIR: %[[ins:.*]] = insertelement <2 x i64> undef, i64 %[[ele]], i32 0 -! LLVMIR: %[[y:.*]] = shufflevector <2 x i64> %[[ins]], <2 x i64> undef, <2 x i32> zeroinitializer +! LLVMIR: %[[ins:.*]] = insertelement <2 x i64> poison, i64 %[[ele]], i32 0 +! LLVMIR: %[[y:.*]] = shufflevector <2 x i64> %[[ins]], <2 x i64> poison, <2 x i32> zeroinitializer ! LLVMIR: store <2 x i64> %[[y]], ptr %{{[0-9]}}, align 16 end subroutine vec_splat_testu64i32 @@ -521,8 +521,8 @@ subroutine vec_splat_testu64i64(x) ! LLVMIR: %[[x:.*]] = load <2 x i64>, ptr %{{[0-9]}}, align 16 ! LLVMIR-LE: %[[ele:.*]] = extractelement <2 x i64> %[[x]], i64 0 ! LLVMIR-BE: %[[ele:.*]] = extractelement <2 x i64> %[[x]], i64 1 -! LLVMIR: %[[ins:.*]] = insertelement <2 x i64> undef, i64 %[[ele]], i32 0 -! LLVMIR: %[[y:.*]] = shufflevector <2 x i64> %[[ins]], <2 x i64> undef, <2 x i32> zeroinitializer +! LLVMIR: %[[ins:.*]] = insertelement <2 x i64> poison, i64 %[[ele]], i32 0 +! LLVMIR: %[[y:.*]] = shufflevector <2 x i64> %[[ins]], <2 x i64> poison, <2 x i32> zeroinitializer ! LLVMIR: store <2 x i64> %[[y]], ptr %{{[0-9]}}, align 16 end subroutine vec_splat_testu64i64 @@ -537,8 +537,8 @@ subroutine vec_splats_testi8(x) y = vec_splats(x) ! LLVMIR: %[[x:.*]] = load i8, ptr %{{[0-9]}}, align 1 -! LLVMIR: %[[ins:.*]] = insertelement <16 x i8> undef, i8 %[[x]], i32 0 -! LLVMIR: %[[y:.*]] = shufflevector <16 x i8> %[[ins]], <16 x i8> undef, <16 x i32> zeroinitializer +! LLVMIR: %[[ins:.*]] = insertelement <16 x i8> poison, i8 %[[x]], i32 0 +! LLVMIR: %[[y:.*]] = shufflevector <16 x i8> %[[ins]], <16 x i8> poison, <16 x i32> zeroinitializer ! LLVMIR: store <16 x i8> %[[y]], ptr %{{[0-9]}}, align 16 end subroutine vec_splats_testi8 @@ -549,8 +549,8 @@ subroutine vec_splats_testi16(x) y = vec_splats(x) ! LLVMIR: %[[x:.*]] = load i16, ptr %{{[0-9]}}, align 2 -! LLVMIR: %[[ins:.*]] = insertelement <8 x i16> undef, i16 %[[x]], i32 0 -! LLVMIR: %[[y:.*]] = shufflevector <8 x i16> %[[ins]], <8 x i16> undef, <8 x i32> zeroinitializer +! LLVMIR: %[[ins:.*]] = insertelement <8 x i16> poison, i16 %[[x]], i32 0 +! LLVMIR: %[[y:.*]] = shufflevector <8 x i16> %[[ins]], <8 x i16> poison, <8 x i32> zeroinitializer ! LLVMIR: store <8 x i16> %[[y]], ptr %{{[0-9]}}, align 16 end subroutine vec_splats_testi16 @@ -561,8 +561,8 @@ subroutine vec_splats_testi32(x) y = vec_splats(x) ! LLVMIR: %[[x:.*]] = load i32, ptr %{{[0-9]}}, align 4 -! LLVMIR: %[[ins:.*]] = insertelement <4 x i32> undef, i32 %[[x]], i32 0 -! LLVMIR: %[[y:.*]] = shufflevector <4 x i32> %[[ins]], <4 x i32> undef, <4 x i32> zeroinitializer +! LLVMIR: %[[ins:.*]] = insertelement <4 x i32> poison, i32 %[[x]], i32 0 +! LLVMIR: %[[y:.*]] = shufflevector <4 x i32> %[[ins]], <4 x i32> poison, <4 x i32> zeroinitializer ! LLVMIR: store <4 x i32> %[[y]], ptr %{{[0-9]}}, align 16 end subroutine vec_splats_testi32 @@ -573,8 +573,8 @@ subroutine vec_splats_testi64(x) y = vec_splats(x) ! LLVMIR: %[[x:.*]] = load i64, ptr %{{[0-9]}}, align 8 -! LLVMIR: %[[ins:.*]] = insertelement <2 x i64> undef, i64 %[[x]], i32 0 -! LLVMIR: %[[y:.*]] = shufflevector <2 x i64> %[[ins]], <2 x i64> undef, <2 x i32> zeroinitializer +! LLVMIR: %[[ins:.*]] = insertelement <2 x i64> poison, i64 %[[x]], i32 0 +! LLVMIR: %[[y:.*]] = shufflevector <2 x i64> %[[ins]], <2 x i64> poison, <2 x i32> zeroinitializer ! LLVMIR: store <2 x i64> %[[y]], ptr %{{[0-9]}}, align 16 end subroutine vec_splats_testi64 @@ -585,8 +585,8 @@ subroutine vec_splats_testf32(x) y = vec_splats(x) ! LLVMIR: %[[x:.*]] = load float, ptr %{{[0-9]}}, align 4 -! LLVMIR: %[[ins:.*]] = insertelement <4 x float> undef, float %[[x]], i32 0 -! LLVMIR: %[[y:.*]] = shufflevector <4 x float> %[[ins]], <4 x float> undef, <4 x i32> zeroinitializer +! LLVMIR: %[[ins:.*]] = insertelement <4 x float> poison, float %[[x]], i32 0 +! LLVMIR: %[[y:.*]] = shufflevector <4 x float> %[[ins]], <4 x float> poison, <4 x i32> zeroinitializer ! LLVMIR: store <4 x float> %[[y]], ptr %{{[0-9]}}, align 16 end subroutine vec_splats_testf32 @@ -597,8 +597,8 @@ subroutine vec_splats_testf64(x) y = vec_splats(x) ! LLVMIR: %[[x:.*]] = load double, ptr %{{[0-9]}}, align 8 -! LLVMIR: %[[ins:.*]] = insertelement <2 x double> undef, double %[[x]], i32 0 -! LLVMIR: %[[y:.*]] = shufflevector <2 x double> %[[ins]], <2 x double> undef, <2 x i32> zeroinitializer +! LLVMIR: %[[ins:.*]] = insertelement <2 x double> poison, double %[[x]], i32 0 +! LLVMIR: %[[y:.*]] = shufflevector <2 x double> %[[ins]], <2 x double> poison, <2 x i32> zeroinitializer ! LLVMIR: store <2 x double> %[[y]], ptr %{{[0-9]}}, align 16 end subroutine vec_splats_testf64 diff --git a/libc/CMakeLists.txt b/libc/CMakeLists.txt index c061e2a05ebd8..1c4c0cd5aa22b 100644 --- a/libc/CMakeLists.txt +++ b/libc/CMakeLists.txt @@ -51,7 +51,8 @@ set(LIBC_KERNEL_HEADERS "/usr/include" CACHE STRING "Path to Linux kernel header # Defining a global namespace to enclose all libc functions. set(default_namespace "__llvm_libc") if(LLVM_VERSION_MAJOR) - set(default_namespace "__llvm_libc_${LLVM_VERSION_MAJOR}_${LLVM_VERSION_MINOR}_${LLVM_VERSION_PATCH}_${LLVM_VERSION_SUFFIX}") + string(REPLACE "-" "" NS_LLVM_VERSION_SUFFIX ${LLVM_VERSION_SUFFIX}) + set(default_namespace "__llvm_libc_${LLVM_VERSION_MAJOR}_${LLVM_VERSION_MINOR}_${LLVM_VERSION_PATCH}_${NS_LLVM_VERSION_SUFFIX}") endif() set(LIBC_NAMESPACE ${default_namespace} CACHE STRING "The namespace to use to enclose internal implementations. Must start with '__llvm_libc'." diff --git a/libc/config/baremetal/arm/entrypoints.txt b/libc/config/baremetal/arm/entrypoints.txt index 694cd7b1993ca..351f727389e3a 100644 --- a/libc/config/baremetal/arm/entrypoints.txt +++ b/libc/config/baremetal/arm/entrypoints.txt @@ -469,6 +469,18 @@ if(LIBC_COMPILER_HAS_FIXED_POINT) libc.src.stdfix.ukbits libc.src.stdfix.lkbits libc.src.stdfix.ulkbits + libc.src.stdfix.countlshr + libc.src.stdfix.countlsr + libc.src.stdfix.countlslr + libc.src.stdfix.countlshk + libc.src.stdfix.countlsk + libc.src.stdfix.countlslk + libc.src.stdfix.countlsuhr + libc.src.stdfix.countlsur + libc.src.stdfix.countlsulr + libc.src.stdfix.countlsuhk + libc.src.stdfix.countlsuk + libc.src.stdfix.countlsulk ) endif() diff --git a/libc/config/baremetal/riscv/entrypoints.txt b/libc/config/baremetal/riscv/entrypoints.txt index 667ab40dca999..39c70a22a21e0 100644 --- a/libc/config/baremetal/riscv/entrypoints.txt +++ b/libc/config/baremetal/riscv/entrypoints.txt @@ -464,6 +464,18 @@ if(LIBC_COMPILER_HAS_FIXED_POINT) libc.src.stdfix.ukbits libc.src.stdfix.lkbits libc.src.stdfix.ulkbits + libc.src.stdfix.countlshr + libc.src.stdfix.countlsr + libc.src.stdfix.countlslr + libc.src.stdfix.countlshk + libc.src.stdfix.countlsk + libc.src.stdfix.countlslk + libc.src.stdfix.countlsuhr + libc.src.stdfix.countlsur + libc.src.stdfix.countlsulr + libc.src.stdfix.countlsuhk + libc.src.stdfix.countlsuk + libc.src.stdfix.countlsulk ) endif() diff --git a/libc/config/linux/riscv/entrypoints.txt b/libc/config/linux/riscv/entrypoints.txt index 6e67ea559d57b..a9ba0c257755b 100644 --- a/libc/config/linux/riscv/entrypoints.txt +++ b/libc/config/linux/riscv/entrypoints.txt @@ -749,6 +749,18 @@ if(LIBC_COMPILER_HAS_FIXED_POINT) # TODO: https://github.com/llvm/llvm-project/issues/115778 libc.src.stdfix.lkbits libc.src.stdfix.ulkbits + libc.src.stdfix.countlshr + libc.src.stdfix.countlsr + libc.src.stdfix.countlslr + libc.src.stdfix.countlshk + libc.src.stdfix.countlsk + libc.src.stdfix.countlslk + libc.src.stdfix.countlsuhr + libc.src.stdfix.countlsur + libc.src.stdfix.countlsulr + libc.src.stdfix.countlsuhk + libc.src.stdfix.countlsuk + libc.src.stdfix.countlsulk ) endif() diff --git a/libc/config/linux/x86_64/entrypoints.txt b/libc/config/linux/x86_64/entrypoints.txt index 81dceb74a1774..2a4c17a56f377 100644 --- a/libc/config/linux/x86_64/entrypoints.txt +++ b/libc/config/linux/x86_64/entrypoints.txt @@ -874,6 +874,18 @@ if(LIBC_COMPILER_HAS_FIXED_POINT) libc.src.stdfix.ukbits libc.src.stdfix.lkbits libc.src.stdfix.ulkbits + libc.src.stdfix.countlshr + libc.src.stdfix.countlsr + libc.src.stdfix.countlslr + libc.src.stdfix.countlshk + libc.src.stdfix.countlsk + libc.src.stdfix.countlslk + libc.src.stdfix.countlsuhr + libc.src.stdfix.countlsur + libc.src.stdfix.countlsulr + libc.src.stdfix.countlsuhk + libc.src.stdfix.countlsuk + libc.src.stdfix.countlsulk ) endif() diff --git a/libc/docs/headers/math/stdfix.rst b/libc/docs/headers/math/stdfix.rst index 58052f000995c..4507f2b608bf1 100644 --- a/libc/docs/headers/math/stdfix.rst +++ b/libc/docs/headers/math/stdfix.rst @@ -73,7 +73,7 @@ The following functions are included in the ISO/IEC TR 18037:2008 standard. +---------------+----------------+-------------+---------------+------------+----------------+-------------+----------------+-------------+---------------+------------+----------------+-------------+ | \*bits | | | | | | | | | | | | | +---------------+----------------+-------------+---------------+------------+----------------+-------------+----------------+-------------+---------------+------------+----------------+-------------+ -| countls | | | | | | | | | | | | | +| countls | |check| | |check| | |check| | |check| | |check| | |check| | |check| | |check| | |check| | |check| | |check| | |check| | +---------------+----------------+-------------+---------------+------------+----------------+-------------+----------------+-------------+---------------+------------+----------------+-------------+ | divi | | | | | | | | | | | | | +---------------+----------------+-------------+---------------+------------+----------------+-------------+----------------+-------------+---------------+------------+----------------+-------------+ diff --git a/libc/hdr/types/CMakeLists.txt b/libc/hdr/types/CMakeLists.txt index dfc90009ef54a..84a2647ba664d 100644 --- a/libc/hdr/types/CMakeLists.txt +++ b/libc/hdr/types/CMakeLists.txt @@ -250,15 +250,6 @@ add_proxy_header_library( libc.include.locale ) -add_proxy_header_library( - sighandler_t - HDRS - sighandler_t.h - FULL_BUILD_DEPENDS - libc.include.llvm-libc-types.__sighandler_t - libc.include.signal -) - add_proxy_header_library( stack_t HDRS diff --git a/libc/hdr/types/sighandler_t.h b/libc/hdr/types/sighandler_t.h deleted file mode 100644 index bc40dd8b4c8f4..0000000000000 --- a/libc/hdr/types/sighandler_t.h +++ /dev/null @@ -1,24 +0,0 @@ -//===-- Definition of macros from __sighandler_t.h ------------------------===// -// -// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. -// See https://llvm.org/LICENSE.txt for license information. -// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception -// -//===----------------------------------------------------------------------===// - -#ifndef LLVM_LIBC_HDR_TYPES_SIGHANDLER_T_H -#define LLVM_LIBC_HDR_TYPES_SIGHANDLER_T_H - -#ifdef LIBC_FULL_BUILD - -#include "include/llvm-libc-types/__sighandler_t.h" - -using sighandler_t = __sighandler_t; - -#else // overlay mode - -#include - -#endif // LLVM_LIBC_FULL_BUILD - -#endif // LLVM_LIBC_HDR_TYPES_SIGHANDLER_T_H diff --git a/libc/include/CMakeLists.txt b/libc/include/CMakeLists.txt index 63745542662d5..867bd1e5ee20f 100644 --- a/libc/include/CMakeLists.txt +++ b/libc/include/CMakeLists.txt @@ -284,13 +284,14 @@ add_header_macro( signal.h DEPENDS .llvm-libc-macros.signal_macros + .llvm-libc-types.pid_t .llvm-libc-types.sig_atomic_t + .llvm-libc-types.sighandler_t + .llvm-libc-types.siginfo_t .llvm-libc-types.sigset_t + .llvm-libc-types.stack_t .llvm-libc-types.struct_sigaction .llvm-libc-types.union_sigval - .llvm-libc-types.siginfo_t - .llvm-libc-types.stack_t - .llvm-libc-types.pid_t ) add_header_macro( diff --git a/libc/include/llvm-libc-macros/gpu/signal-macros.h b/libc/include/llvm-libc-macros/gpu/signal-macros.h index 2d8159240de8b..f0d49ea34fe0e 100644 --- a/libc/include/llvm-libc-macros/gpu/signal-macros.h +++ b/libc/include/llvm-libc-macros/gpu/signal-macros.h @@ -16,9 +16,9 @@ #define SIGSEGV 11 #define SIGTERM 15 -#define SIG_DFL ((__sighandler_t)(0)) -#define SIG_IGN ((__sighandler_t)(1)) -#define SIG_ERR ((__sighandler_t)(-1)) +#define SIG_DFL ((void (*)(int))(0)) +#define SIG_IGN ((void (*)(int))(1)) +#define SIG_ERR ((void (*)(int))(-1)) // Max signal number #define NSIG 64 diff --git a/libc/include/llvm-libc-macros/linux/signal-macros.h b/libc/include/llvm-libc-macros/linux/signal-macros.h index 0b7317ebc9b80..d220241a38206 100644 --- a/libc/include/llvm-libc-macros/linux/signal-macros.h +++ b/libc/include/llvm-libc-macros/linux/signal-macros.h @@ -86,9 +86,9 @@ #error "Signal stack sizes not defined for your platform." #endif -#define SIG_DFL ((__sighandler_t)0) -#define SIG_IGN ((__sighandler_t)1) -#define SIG_ERR ((__sighandler_t)-1) +#define SIG_DFL ((void (*)(int))0) +#define SIG_IGN ((void (*)(int))1) +#define SIG_ERR ((void (*)(int))(-1)) // SIGCHLD si_codes #define CLD_EXITED 1 // child has exited diff --git a/libc/include/llvm-libc-types/CMakeLists.txt b/libc/include/llvm-libc-types/CMakeLists.txt index 9e8d2f818d4ed..7ed69ab1af6d9 100644 --- a/libc/include/llvm-libc-types/CMakeLists.txt +++ b/libc/include/llvm-libc-types/CMakeLists.txt @@ -15,7 +15,6 @@ add_header(__pthread_start_t HDR __pthread_start_t.h) add_header(__pthread_tss_dtor_t HDR __pthread_tss_dtor_t.h) add_header(__qsortcompare_t HDR __qsortcompare_t.h) add_header(__qsortrcompare_t HDR __qsortrcompare_t.h) -add_header(__sighandler_t HDR __sighandler_t.h) add_header(__thread_type HDR __thread_type.h) add_header(blkcnt_t HDR blkcnt_t.h) add_header(blksize_t HDR blksize_t.h) @@ -66,6 +65,7 @@ if(LIBC_TYPES_TIME_T_IS_32_BIT) else() add_header(time_t HDR time_t_64.h DEST_HDR time_t.h) endif() +add_header(sighandler_t HDR sighandler_t.h) add_header(stack_t HDR stack_t.h DEPENDS .size_t) add_header(suseconds_t HDR suseconds_t.h) add_header(struct_dirent HDR struct_dirent.h DEPENDS .ino_t .off_t) diff --git a/libc/include/llvm-libc-types/sighandler_t.h b/libc/include/llvm-libc-types/sighandler_t.h new file mode 100644 index 0000000000000..f39ab04685200 --- /dev/null +++ b/libc/include/llvm-libc-types/sighandler_t.h @@ -0,0 +1,17 @@ +//===-- Definition of sighandler_t ----------------------------------------===// +// +// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. +// See https://llvm.org/LICENSE.txt for license information. +// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception +// +//===----------------------------------------------------------------------===// + +#ifndef LLVM_LIBC_TYPES_SIGHANDLER_T_H +#define LLVM_LIBC_TYPES_SIGHANDLER_T_H + +#ifdef __linux__ +// For compatibility with glibc. +typedef void (*sighandler_t)(int); +#endif + +#endif // LLVM_LIBC_TYPES_SIGHANDLER_T_H diff --git a/libc/include/llvm-libc-types/struct_sigaction.h b/libc/include/llvm-libc-types/struct_sigaction.h index b4d0c965a4c63..907418b5e0f9a 100644 --- a/libc/include/llvm-libc-types/struct_sigaction.h +++ b/libc/include/llvm-libc-types/struct_sigaction.h @@ -25,6 +25,4 @@ struct sigaction { #endif }; -typedef void (*__sighandler_t)(int); - #endif // LLVM_LIBC_TYPES_STRUCT_SIGACTION_H diff --git a/libc/include/signal.yaml b/libc/include/signal.yaml index 576e77576ac74..6fdd8c97ccbe2 100644 --- a/libc/include/signal.yaml +++ b/libc/include/signal.yaml @@ -3,12 +3,13 @@ header_template: signal.h.def macros: [] types: - type_name: pid_t - - type_name: stack_t + - type_name: sig_atomic_t + - type_name: sighandler_t - type_name: siginfo_t - - type_name: struct_sigaction - type_name: sigset_t + - type_name: stack_t + - type_name: struct_sigaction - type_name: union_sigval - - type_name: sig_atomic_t enums: [] objects: [] functions: @@ -69,10 +70,15 @@ functions: - name: signal standards: - stdc - return_type: __sighandler_t + # May the Geneva Convention have mercy on my soul... Why this insanity? + # Well: signal returns a function pointer to a function with no return + # value and which accepts an int. The parameter list appears on the far + # right of the declaration. i.e. + # void (*signal(int, void (*)(int)))(int); + return_type: void (* arguments: - type: int - - type: __sighandler_t + - type: void (*)(int)))(int - name: sigprocmask standards: - POSIX diff --git a/libc/include/stdfix.yaml b/libc/include/stdfix.yaml index 9663ac0c7df4d..0abf2f3a9b3b6 100644 --- a/libc/include/stdfix.yaml +++ b/libc/include/stdfix.yaml @@ -306,3 +306,87 @@ functions: arguments: - type: unsigned int guard: LIBC_COMPILER_HAS_FIXED_POINT + - name: countlshr + standards: + - stdc_ext + return_type: int + arguments: + - type: short fract + guard: LIBC_COMPILER_HAS_FIXED_POINT + - name: countlsr + standards: + - stdc_ext + return_type: int + arguments: + - type: fract + guard: LIBC_COMPILER_HAS_FIXED_POINT + - name: countlslr + standards: + - stdc_ext + return_type: int + arguments: + - type: long fract + guard: LIBC_COMPILER_HAS_FIXED_POINT + - name: countlshk + standards: + - stdc_ext + return_type: int + arguments: + - type: short accum + guard: LIBC_COMPILER_HAS_FIXED_POINT + - name: countlsk + standards: + - stdc_ext + return_type: int + arguments: + - type: accum + guard: LIBC_COMPILER_HAS_FIXED_POINT + - name: countlslk + standards: + - stdc_ext + return_type: int + arguments: + - type: long accum + guard: LIBC_COMPILER_HAS_FIXED_POINT + - name: countlsuhr + standards: + - stdc_ext + return_type: int + arguments: + - type: unsigned short fract + guard: LIBC_COMPILER_HAS_FIXED_POINT + - name: countlsur + standards: + - stdc_ext + return_type: int + arguments: + - type: unsigned fract + guard: LIBC_COMPILER_HAS_FIXED_POINT + - name: countlsulr + standards: + - stdc_ext + return_type: int + arguments: + - type: unsigned long fract + guard: LIBC_COMPILER_HAS_FIXED_POINT + - name: countlsuhk + standards: + - stdc_ext + return_type: int + arguments: + - type: unsigned short accum + guard: LIBC_COMPILER_HAS_FIXED_POINT + - name: countlsuk + standards: + - stdc_ext + return_type: int + arguments: + - type: unsigned accum + guard: LIBC_COMPILER_HAS_FIXED_POINT + - name: countlsulk + standards: + - stdc_ext + return_type: int + arguments: + - type: unsigned long accum + guard: LIBC_COMPILER_HAS_FIXED_POINT diff --git a/libc/src/__support/fixed_point/CMakeLists.txt b/libc/src/__support/fixed_point/CMakeLists.txt index 3b744081765e4..b415e2c00c488 100644 --- a/libc/src/__support/fixed_point/CMakeLists.txt +++ b/libc/src/__support/fixed_point/CMakeLists.txt @@ -19,6 +19,7 @@ add_header_library( libc.src.__support.macros.optimization libc.src.__support.CPP.type_traits libc.src.__support.CPP.bit + libc.src.__support.CPP.limits libc.src.__support.math_extras ) diff --git a/libc/src/__support/fixed_point/fx_bits.h b/libc/src/__support/fixed_point/fx_bits.h index 225ea417760a0..21985e6442534 100644 --- a/libc/src/__support/fixed_point/fx_bits.h +++ b/libc/src/__support/fixed_point/fx_bits.h @@ -11,9 +11,10 @@ #include "include/llvm-libc-macros/stdfix-macros.h" #include "src/__support/CPP/bit.h" +#include "src/__support/CPP/limits.h" // numeric_limits #include "src/__support/CPP/type_traits.h" -#include "src/__support/macros/attributes.h" // LIBC_INLINE -#include "src/__support/macros/config.h" +#include "src/__support/macros/attributes.h" // LIBC_INLINE +#include "src/__support/macros/config.h" // LIBC_NAMESPACE_DECL #include "src/__support/macros/optimization.h" // LIBC_UNLIKELY #include "src/__support/math_extras.h" @@ -50,6 +51,12 @@ template struct FXBits { static constexpr StorageType SIGN_MASK = (fx_rep::SIGN_LEN == 0 ? 0 : StorageType(1) << SIGN_OFFSET); + // mask for + static constexpr StorageType VALUE_MASK = INTEGRAL_MASK | FRACTION_MASK; + + // mask for + static constexpr StorageType TOTAL_MASK = SIGN_MASK | VALUE_MASK; + public: LIBC_INLINE constexpr FXBits() = default; @@ -74,6 +81,12 @@ template struct FXBits { return (value & INTEGRAL_MASK) >> INTEGRAL_OFFSET; } + // returns complete bitstring representation the fixed point number + // the bitstring is of the form: padding | sign | integral | fraction + LIBC_INLINE constexpr StorageType get_bits() { + return (value & TOTAL_MASK) >> FRACTION_OFFSET; + } + // TODO: replace bool with Sign LIBC_INLINE constexpr bool get_sign() { return static_cast((value & SIGN_MASK) >> SIGN_OFFSET); @@ -163,6 +176,26 @@ template LIBC_INLINE constexpr T round(T x, int n) { return bit_and((x + round_bit), rounding_mask); } +// count leading sign bits +template +LIBC_INLINE constexpr cpp::enable_if_t, int> +countls(T f) { + using FXRep = FXRep; + using BitType = typename FXRep::StorageType; + using FXBits = FXBits; + + constexpr int CONTAIN_LEN = cpp::numeric_limits::digits; + constexpr int PADDING_LEN = CONTAIN_LEN - FXRep::TOTAL_LEN; + + if constexpr (FXRep::SIGN_LEN != 0) { + if (x < 0) + x = bit_not(x); + } + + BitType value_bits = FXBits(x)::get_bits(); + return cpp::countl_zero(value_bits) - PADDING_LEN; +} + } // namespace fixed_point } // namespace LIBC_NAMESPACE_DECL diff --git a/libc/src/signal/linux/CMakeLists.txt b/libc/src/signal/linux/CMakeLists.txt index f7457d31cf4f8..c0dd61e473881 100644 --- a/libc/src/signal/linux/CMakeLists.txt +++ b/libc/src/signal/linux/CMakeLists.txt @@ -127,7 +127,6 @@ add_entrypoint_object( DEPENDS .sigaction libc.hdr.signal_macros - libc.hdr.types.sighandler_t ) add_entrypoint_object( diff --git a/libc/src/signal/linux/signal.cpp b/libc/src/signal/linux/signal.cpp index 1da0ef8c97a20..7c8ea16c6cd2e 100644 --- a/libc/src/signal/linux/signal.cpp +++ b/libc/src/signal/linux/signal.cpp @@ -8,14 +8,17 @@ #include "src/signal/signal.h" #include "hdr/signal_macros.h" -#include "hdr/types/sighandler_t.h" #include "src/__support/common.h" #include "src/__support/macros/config.h" #include "src/signal/sigaction.h" namespace LIBC_NAMESPACE_DECL { -LLVM_LIBC_FUNCTION(sighandler_t, signal, (int signum, sighandler_t handler)) { +// Our LLVM_LIBC_FUNCTION macro doesn't handle function pointer return types. +using signal_handler = void (*)(int); + +LLVM_LIBC_FUNCTION(signal_handler, signal, + (int signum, signal_handler handler)) { struct sigaction action, old; action.sa_handler = handler; action.sa_flags = SA_RESTART; diff --git a/libc/src/signal/signal.h b/libc/src/signal/signal.h index 06e77e11bf0bd..e1f31a8e126c5 100644 --- a/libc/src/signal/signal.h +++ b/libc/src/signal/signal.h @@ -9,12 +9,11 @@ #ifndef LLVM_LIBC_SRC_SIGNAL_SIGNAL_H #define LLVM_LIBC_SRC_SIGNAL_SIGNAL_H -#include "hdr/types/sighandler_t.h" #include "src/__support/macros/config.h" namespace LIBC_NAMESPACE_DECL { -sighandler_t signal(int signum, sighandler_t handler); +void (*signal(int signum, void (*handler)(int)))(int); } // namespace LIBC_NAMESPACE_DECL diff --git a/libc/src/stdfix/CMakeLists.txt b/libc/src/stdfix/CMakeLists.txt index 815f739d23efa..3f6f9125a086b 100644 --- a/libc/src/stdfix/CMakeLists.txt +++ b/libc/src/stdfix/CMakeLists.txt @@ -53,6 +53,18 @@ foreach(suffix IN ITEMS hr r lr hk k lk uhr ur ulr uhk uk ulk) libc.src.__support.CPP.bit libc.src.__support.fixed_point.fx_bits ) + + add_entrypoint_object( + countls${suffix} + HDRS + countls${suffix}.h + SRCS + countls${suffix}.cpp + COMPILE_OPTIONS + ${libc_opt_high_flag} + DEPENDS + libc.src.__support.fixed_point.fx_bits + ) endforeach() add_entrypoint_object( diff --git a/libc/src/stdfix/countlshk.cpp b/libc/src/stdfix/countlshk.cpp new file mode 100644 index 0000000000000..f94728beff1cb --- /dev/null +++ b/libc/src/stdfix/countlshk.cpp @@ -0,0 +1,20 @@ +//===-- Implementation for countlshk function ----------------------------===// +// +// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. +// See https://llvm.org/LICENSE.txt for license information. +// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception +// +//===----------------------------------------------------------------------===// + +#include "countlshk.h" +#include "src/__support/common.h" +#include "src/__support/fixed_point/fx_bits.h" +#include "src/__support/macros/config.h" // LIBC_NAMESPACE_DECL + +namespace LIBC_NAMESPACE_DECL { + +LLVM_LIBC_FUNCTION(int, countlshk, (short accum f)) { + return fixed_point::countls(f); +} + +} // namespace LIBC_NAMESPACE_DECL diff --git a/libc/src/stdfix/countlshk.h b/libc/src/stdfix/countlshk.h new file mode 100644 index 0000000000000..ab334244e166a --- /dev/null +++ b/libc/src/stdfix/countlshk.h @@ -0,0 +1,21 @@ +//===-- Implementation header for countlshk function ------------*- C++ -*-===// +// +// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. +// See https://llvm.org/LICENSE.txt for license information. +// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception +// +//===----------------------------------------------------------------------===// + +#ifndef LLVM_LIBC_SRC_STDFIX_COUNTLSHK_H +#define LLVM_LIBC_SRC_STDFIX_COUNTLSHK_H + +#include "include/llvm-libc-macros/stdfix-macros.h" +#include "src/__support/macros/config.h" // LIBC_NAMESPACE_DECL + +namespace LIBC_NAMESPACE_DECL { + +int countlshk(short accum f); + +} // namespace LIBC_NAMESPACE_DECL + +#endif // LLVM_LIBC_SRC_STDFIX_COUNTLSHK_H diff --git a/libc/src/stdfix/countlshr.cpp b/libc/src/stdfix/countlshr.cpp new file mode 100644 index 0000000000000..d77d3e9a3c22a --- /dev/null +++ b/libc/src/stdfix/countlshr.cpp @@ -0,0 +1,20 @@ +//===-- Implementation for countlshr function ----------------------------===// +// +// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. +// See https://llvm.org/LICENSE.txt for license information. +// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception +// +//===----------------------------------------------------------------------===// + +#include "countlshr.h" +#include "src/__support/common.h" +#include "src/__support/fixed_point/fx_bits.h" +#include "src/__support/macros/config.h" // LIBC_NAMESPACE_DECL + +namespace LIBC_NAMESPACE_DECL { + +LLVM_LIBC_FUNCTION(int, countlshr, (short fract f)) { + return fixed_point::countls(f); +} + +} // namespace LIBC_NAMESPACE_DECL diff --git a/libc/src/stdfix/countlshr.h b/libc/src/stdfix/countlshr.h new file mode 100644 index 0000000000000..579b7b680406e --- /dev/null +++ b/libc/src/stdfix/countlshr.h @@ -0,0 +1,21 @@ +//===-- Implementation header for countlshr function ------------*- C++ -*-===// +// +// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. +// See https://llvm.org/LICENSE.txt for license information. +// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception +// +//===----------------------------------------------------------------------===// + +#ifndef LLVM_LIBC_SRC_STDFIX_COUNTLSHR_H +#define LLVM_LIBC_SRC_STDFIX_COUNTLSHR_H + +#include "include/llvm-libc-macros/stdfix-macros.h" +#include "src/__support/macros/config.h" // LIBC_NAMESPACE_DECL + +namespace LIBC_NAMESPACE_DECL { + +int countlshr(short fract f); + +} // namespace LIBC_NAMESPACE_DECL + +#endif // LLVM_LIBC_SRC_STDFIX_COUNTLSHR_H diff --git a/libc/src/stdfix/countlsk.cpp b/libc/src/stdfix/countlsk.cpp new file mode 100644 index 0000000000000..b6f56adee16a6 --- /dev/null +++ b/libc/src/stdfix/countlsk.cpp @@ -0,0 +1,18 @@ +//===-- Implementation for countlsk function -----------------------------===// +// +// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. +// See https://llvm.org/LICENSE.txt for license information. +// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception +// +//===----------------------------------------------------------------------===// + +#include "countlsk.h" +#include "src/__support/common.h" +#include "src/__support/fixed_point/fx_bits.h" +#include "src/__support/macros/config.h" // LIBC_NAMESPACE_DECL + +namespace LIBC_NAMESPACE_DECL { + +LLVM_LIBC_FUNCTION(int, countlsk, (accum f)) { return fixed_point::countls(f); } + +} // namespace LIBC_NAMESPACE_DECL diff --git a/libc/src/stdfix/countlsk.h b/libc/src/stdfix/countlsk.h new file mode 100644 index 0000000000000..d0c893bc078d5 --- /dev/null +++ b/libc/src/stdfix/countlsk.h @@ -0,0 +1,21 @@ +//===-- Implementation header for countlsk function -------------*- C++ -*-===// +// +// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. +// See https://llvm.org/LICENSE.txt for license information. +// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception +// +//===----------------------------------------------------------------------===// + +#ifndef LLVM_LIBC_SRC_STDFIX_COUNTLSK_H +#define LLVM_LIBC_SRC_STDFIX_COUNTLSK_H + +#include "include/llvm-libc-macros/stdfix-macros.h" +#include "src/__support/macros/config.h" // LIBC_NAMESPACE_DECL + +namespace LIBC_NAMESPACE_DECL { + +int countlsk(accum f); + +} // namespace LIBC_NAMESPACE_DECL + +#endif // LLVM_LIBC_SRC_STDFIX_COUNTLSK_H diff --git a/libc/src/stdfix/countlslk.cpp b/libc/src/stdfix/countlslk.cpp new file mode 100644 index 0000000000000..9bf30ff34c6ee --- /dev/null +++ b/libc/src/stdfix/countlslk.cpp @@ -0,0 +1,20 @@ +//===-- Implementation for countlslk function ----------------------------===// +// +// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. +// See https://llvm.org/LICENSE.txt for license information. +// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception +// +//===----------------------------------------------------------------------===// + +#include "countlslk.h" +#include "src/__support/common.h" +#include "src/__support/fixed_point/fx_bits.h" +#include "src/__support/macros/config.h" // LIBC_NAMESPACE_DECL + +namespace LIBC_NAMESPACE_DECL { + +LLVM_LIBC_FUNCTION(int, countlslk, (long accum f)) { + return fixed_point::countls(f); +} + +} // namespace LIBC_NAMESPACE_DECL diff --git a/libc/src/stdfix/countlslk.h b/libc/src/stdfix/countlslk.h new file mode 100644 index 0000000000000..60fa469797b7a --- /dev/null +++ b/libc/src/stdfix/countlslk.h @@ -0,0 +1,21 @@ +//===-- Implementation header for countlslk function ------------*- C++ -*-===// +// +// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. +// See https://llvm.org/LICENSE.txt for license information. +// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception +// +//===----------------------------------------------------------------------===// + +#ifndef LLVM_LIBC_SRC_STDFIX_COUNTLSLK_H +#define LLVM_LIBC_SRC_STDFIX_COUNTLSLK_H + +#include "include/llvm-libc-macros/stdfix-macros.h" +#include "src/__support/macros/config.h" // LIBC_NAMESPACE_DECL + +namespace LIBC_NAMESPACE_DECL { + +int countlslk(long accum f); + +} // namespace LIBC_NAMESPACE_DECL + +#endif // LLVM_LIBC_SRC_STDFIX_COUNTLSLK_H diff --git a/libc/src/stdfix/countlslr.cpp b/libc/src/stdfix/countlslr.cpp new file mode 100644 index 0000000000000..774023c734a37 --- /dev/null +++ b/libc/src/stdfix/countlslr.cpp @@ -0,0 +1,20 @@ +//===-- Implementation for countlslr function ----------------------------===// +// +// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. +// See https://llvm.org/LICENSE.txt for license information. +// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception +// +//===----------------------------------------------------------------------===// + +#include "countlslr.h" +#include "src/__support/common.h" +#include "src/__support/fixed_point/fx_bits.h" +#include "src/__support/macros/config.h" // LIBC_NAMESPACE_DECL + +namespace LIBC_NAMESPACE_DECL { + +LLVM_LIBC_FUNCTION(int, countlslr, (long fract f)) { + return fixed_point::countls(f); +} + +} // namespace LIBC_NAMESPACE_DECL diff --git a/libc/src/stdfix/countlslr.h b/libc/src/stdfix/countlslr.h new file mode 100644 index 0000000000000..c909551e77a1a --- /dev/null +++ b/libc/src/stdfix/countlslr.h @@ -0,0 +1,21 @@ +//===-- Implementation header for countlslr function ------------*- C++ -*-===// +// +// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. +// See https://llvm.org/LICENSE.txt for license information. +// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception +// +//===----------------------------------------------------------------------===// + +#ifndef LLVM_LIBC_SRC_STDFIX_COUNTLSLR_H +#define LLVM_LIBC_SRC_STDFIX_COUNTLSLR_H + +#include "include/llvm-libc-macros/stdfix-macros.h" +#include "src/__support/macros/config.h" // LIBC_NAMESPACE_DECL + +namespace LIBC_NAMESPACE_DECL { + +int countlslr(long fract f); + +} // namespace LIBC_NAMESPACE_DECL + +#endif // LLVM_LIBC_SRC_STDFIX_COUNTLSLR_H diff --git a/libc/src/stdfix/countlsr.cpp b/libc/src/stdfix/countlsr.cpp new file mode 100644 index 0000000000000..14563127ad5e9 --- /dev/null +++ b/libc/src/stdfix/countlsr.cpp @@ -0,0 +1,18 @@ +//===-- Implementation for countlsr function -----------------------------===// +// +// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. +// See https://llvm.org/LICENSE.txt for license information. +// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception +// +//===----------------------------------------------------------------------===// + +#include "countlsr.h" +#include "src/__support/common.h" +#include "src/__support/fixed_point/fx_bits.h" +#include "src/__support/macros/config.h" // LIBC_NAMESPACE_DECL + +namespace LIBC_NAMESPACE_DECL { + +LLVM_LIBC_FUNCTION(int, countlsr, (fract f)) { return fixed_point::countls(f); } + +} // namespace LIBC_NAMESPACE_DECL diff --git a/libc/src/stdfix/countlsr.h b/libc/src/stdfix/countlsr.h new file mode 100644 index 0000000000000..75dcf4aff0ca3 --- /dev/null +++ b/libc/src/stdfix/countlsr.h @@ -0,0 +1,21 @@ +//===-- Implementation header for countlsr function -------------*- C++ -*-===// +// +// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. +// See https://llvm.org/LICENSE.txt for license information. +// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception +// +//===----------------------------------------------------------------------===// + +#ifndef LLVM_LIBC_SRC_STDFIX_COUNTLSR_H +#define LLVM_LIBC_SRC_STDFIX_COUNTLSR_H + +#include "include/llvm-libc-macros/stdfix-macros.h" +#include "src/__support/macros/config.h" // LIBC_NAMESPACE_DECL + +namespace LIBC_NAMESPACE_DECL { + +int countlsr(fract f); + +} // namespace LIBC_NAMESPACE_DECL + +#endif // LLVM_LIBC_SRC_STDFIX_COUNTLSR_H diff --git a/libc/src/stdfix/countlsuhk.cpp b/libc/src/stdfix/countlsuhk.cpp new file mode 100644 index 0000000000000..2cc266f47da1f --- /dev/null +++ b/libc/src/stdfix/countlsuhk.cpp @@ -0,0 +1,20 @@ +//===-- Implementation for countlsuhk function ---------------------------===// +// +// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. +// See https://llvm.org/LICENSE.txt for license information. +// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception +// +//===----------------------------------------------------------------------===// + +#include "countlsuhk.h" +#include "src/__support/common.h" +#include "src/__support/fixed_point/fx_bits.h" +#include "src/__support/macros/config.h" // LIBC_NAMESPACE_DECL + +namespace LIBC_NAMESPACE_DECL { + +LLVM_LIBC_FUNCTION(int, countlsuhk, (unsigned short accum f)) { + return fixed_point::countls(f); +} + +} // namespace LIBC_NAMESPACE_DECL diff --git a/libc/src/stdfix/countlsuhk.h b/libc/src/stdfix/countlsuhk.h new file mode 100644 index 0000000000000..fcb2fec3500d4 --- /dev/null +++ b/libc/src/stdfix/countlsuhk.h @@ -0,0 +1,21 @@ +//===-- Implementation header for countlsuhk function -----------*- C++ -*-===// +// +// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. +// See https://llvm.org/LICENSE.txt for license information. +// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception +// +//===----------------------------------------------------------------------===// + +#ifndef LLVM_LIBC_SRC_STDFIX_COUNTLSUHK_H +#define LLVM_LIBC_SRC_STDFIX_COUNTLSUHK_H + +#include "include/llvm-libc-macros/stdfix-macros.h" +#include "src/__support/macros/config.h" // LIBC_NAMESPACE_DECL + +namespace LIBC_NAMESPACE_DECL { + +int countlsuhk(unsigned short accum f); + +} // namespace LIBC_NAMESPACE_DECL + +#endif // LLVM_LIBC_SRC_STDFIX_COUNTLSUHK_H diff --git a/libc/src/stdfix/countlsuhr.cpp b/libc/src/stdfix/countlsuhr.cpp new file mode 100644 index 0000000000000..f30b0dd731aa9 --- /dev/null +++ b/libc/src/stdfix/countlsuhr.cpp @@ -0,0 +1,20 @@ +//===-- Implementation for countlsuhr function ---------------------------===// +// +// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. +// See https://llvm.org/LICENSE.txt for license information. +// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception +// +//===----------------------------------------------------------------------===// + +#include "countlsuhr.h" +#include "src/__support/common.h" +#include "src/__support/fixed_point/fx_bits.h" +#include "src/__support/macros/config.h" // LIBC_NAMESPACE_DECL + +namespace LIBC_NAMESPACE_DECL { + +LLVM_LIBC_FUNCTION(int, countlsuhr, (unsigned short fract f)) { + return fixed_point::countls(f); +} + +} // namespace LIBC_NAMESPACE_DECL diff --git a/libc/src/stdfix/countlsuhr.h b/libc/src/stdfix/countlsuhr.h new file mode 100644 index 0000000000000..c6ce001d38b11 --- /dev/null +++ b/libc/src/stdfix/countlsuhr.h @@ -0,0 +1,21 @@ +//===-- Implementation header for countlsuhr function -----------*- C++ -*-===// +// +// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. +// See https://llvm.org/LICENSE.txt for license information. +// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception +// +//===----------------------------------------------------------------------===// + +#ifndef LLVM_LIBC_SRC_STDFIX_COUNTLSUHR_H +#define LLVM_LIBC_SRC_STDFIX_COUNTLSUHR_H + +#include "include/llvm-libc-macros/stdfix-macros.h" +#include "src/__support/macros/config.h" // LIBC_NAMESPACE_DECL + +namespace LIBC_NAMESPACE_DECL { + +int countlsuhr(unsigned long fract f); + +} // namespace LIBC_NAMESPACE_DECL + +#endif // LLVM_LIBC_SRC_STDFIX_COUNTLSUHR_H diff --git a/libc/src/stdfix/countlsuk.cpp b/libc/src/stdfix/countlsuk.cpp new file mode 100644 index 0000000000000..3f32ba0815b6e --- /dev/null +++ b/libc/src/stdfix/countlsuk.cpp @@ -0,0 +1,20 @@ +//===-- Implementation for countlsuhk function ---------------------------===// +// +// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. +// See https://llvm.org/LICENSE.txt for license information. +// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception +// +//===----------------------------------------------------------------------===// + +#include "countlsuhk.h" +#include "src/__support/common.h" +#include "src/__support/fixed_point/fx_bits.h" +#include "src/__support/macros/config.h" // LIBC_NAMESPACE_DECL + +namespace LIBC_NAMESPACE_DECL { + +LLVM_LIBC_FUNCTION(int, countlsuhk, (unsigned accum f)) { + return fixed_point::countls(f); +} + +} // namespace LIBC_NAMESPACE_DECL diff --git a/libc/src/stdfix/countlsuk.h b/libc/src/stdfix/countlsuk.h new file mode 100644 index 0000000000000..7ad0e701b927b --- /dev/null +++ b/libc/src/stdfix/countlsuk.h @@ -0,0 +1,21 @@ +//===-- Implementation header for countlsuk function ------------*- C++ -*-===// +// +// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. +// See https://llvm.org/LICENSE.txt for license information. +// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception +// +//===----------------------------------------------------------------------===// + +#ifndef LLVM_LIBC_SRC_STDFIX_COUNTLSUK_H +#define LLVM_LIBC_SRC_STDFIX_COUNTLSUK_H + +#include "include/llvm-libc-macros/stdfix-macros.h" +#include "src/__support/macros/config.h" // LIBC_NAMESPACE_DECL + +namespace LIBC_NAMESPACE_DECL { + +int countlsuk(unsigned accum f); + +} // namespace LIBC_NAMESPACE_DECL + +#endif // LLVM_LIBC_SRC_STDFIX_COUNTLSUK_H diff --git a/libc/src/stdfix/countlsulk.cpp b/libc/src/stdfix/countlsulk.cpp new file mode 100644 index 0000000000000..04090dd86c732 --- /dev/null +++ b/libc/src/stdfix/countlsulk.cpp @@ -0,0 +1,20 @@ +//===-- Implementation for countlsulk function ---------------------------===// +// +// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. +// See https://llvm.org/LICENSE.txt for license information. +// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception +// +//===----------------------------------------------------------------------===// + +#include "countlsulk.h" +#include "src/__support/common.h" +#include "src/__support/fixed_point/fx_bits.h" +#include "src/__support/macros/config.h" // LIBC_NAMESPACE_DECL + +namespace LIBC_NAMESPACE_DECL { + +LLVM_LIBC_FUNCTION(int, countlsulk, (unsigned long accum f)) { + return fixed_point::countls(f); +} + +} // namespace LIBC_NAMESPACE_DECL diff --git a/libc/src/stdfix/countlsulk.h b/libc/src/stdfix/countlsulk.h new file mode 100644 index 0000000000000..55ca9d2e20ff0 --- /dev/null +++ b/libc/src/stdfix/countlsulk.h @@ -0,0 +1,21 @@ +//===-- Implementation header for countlsulk function -----------*- C++ -*-===// +// +// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. +// See https://llvm.org/LICENSE.txt for license information. +// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception +// +//===----------------------------------------------------------------------===// + +#ifndef LLVM_LIBC_SRC_STDFIX_COUNTLSULK_H +#define LLVM_LIBC_SRC_STDFIX_COUNTLSULK_H + +#include "include/llvm-libc-macros/stdfix-macros.h" +#include "src/__support/macros/config.h" // LIBC_NAMESPACE_DECL + +namespace LIBC_NAMESPACE_DECL { + +int countlsulk(unsigned long accum f); + +} // namespace LIBC_NAMESPACE_DECL + +#endif // LLVM_LIBC_SRC_STDFIX_COUNTLSULK_H diff --git a/libc/src/stdfix/countlsulr.cpp b/libc/src/stdfix/countlsulr.cpp new file mode 100644 index 0000000000000..d9d6ff404c211 --- /dev/null +++ b/libc/src/stdfix/countlsulr.cpp @@ -0,0 +1,20 @@ +//===-- Implementation for countlsulr function ---------------------------===// +// +// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. +// See https://llvm.org/LICENSE.txt for license information. +// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception +// +//===----------------------------------------------------------------------===// + +#include "countlsulr.h" +#include "src/__support/common.h" +#include "src/__support/fixed_point/fx_bits.h" +#include "src/__support/macros/config.h" // LIBC_NAMESPACE_DECL + +namespace LIBC_NAMESPACE_DECL { + +LLVM_LIBC_FUNCTION(int, countlsulr, (unsigned long fract f)) { + return fixed_point::countls(f); +} + +} // namespace LIBC_NAMESPACE_DECL diff --git a/libc/src/stdfix/countlsulr.h b/libc/src/stdfix/countlsulr.h new file mode 100644 index 0000000000000..59e7d726d01b9 --- /dev/null +++ b/libc/src/stdfix/countlsulr.h @@ -0,0 +1,21 @@ +//===-- Implementation header for countlsulr function -----------*- C++ -*-===// +// +// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. +// See https://llvm.org/LICENSE.txt for license information. +// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception +// +//===----------------------------------------------------------------------===// + +#ifndef LLVM_LIBC_SRC_STDFIX_COUNTLSULR_H +#define LLVM_LIBC_SRC_STDFIX_COUNTLSULR_H + +#include "include/llvm-libc-macros/stdfix-macros.h" +#include "src/__support/macros/config.h" // LIBC_NAMESPACE_DECL + +namespace LIBC_NAMESPACE_DECL { + +int countlsulr(unsigned long fract f); + +} // namespace LIBC_NAMESPACE_DECL + +#endif // LLVM_LIBC_SRC_STDFIX_COUNTLSULR_H diff --git a/libc/src/stdfix/countlsur.cpp b/libc/src/stdfix/countlsur.cpp new file mode 100644 index 0000000000000..777e5f387aadf --- /dev/null +++ b/libc/src/stdfix/countlsur.cpp @@ -0,0 +1,20 @@ +//===-- Implementation for countlsur function ----------------------------===// +// +// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. +// See https://llvm.org/LICENSE.txt for license information. +// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception +// +//===----------------------------------------------------------------------===// + +#include "countlsur.h" +#include "src/__support/common.h" +#include "src/__support/fixed_point/fx_bits.h" +#include "src/__support/macros/config.h" // LIBC_NAMESPACE_DECL + +namespace LIBC_NAMESPACE_DECL { + +LLVM_LIBC_FUNCTION(int, countlsur, (unsigned fract f)) { + return fixed_point::countls(f); +} + +} // namespace LIBC_NAMESPACE_DECL diff --git a/libc/src/stdfix/countlsur.h b/libc/src/stdfix/countlsur.h new file mode 100644 index 0000000000000..1d34e971a52b3 --- /dev/null +++ b/libc/src/stdfix/countlsur.h @@ -0,0 +1,21 @@ +//===-- Implementation header for countlsur function ------------*- C++ -*-===// +// +// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. +// See https://llvm.org/LICENSE.txt for license information. +// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception +// +//===----------------------------------------------------------------------===// + +#ifndef LLVM_LIBC_SRC_STDFIX_COUNTLSUR_H +#define LLVM_LIBC_SRC_STDFIX_COUNTLSUR_H + +#include "include/llvm-libc-macros/stdfix-macros.h" +#include "src/__support/macros/config.h" // LIBC_NAMESPACE_DECL + +namespace LIBC_NAMESPACE_DECL { + +int countlsur(unsigned fract f); + +} // namespace LIBC_NAMESPACE_DECL + +#endif // LLVM_LIBC_SRC_STDFIX_COUNTLSUR_H diff --git a/libc/test/UnitTest/FPExceptMatcher.cpp b/libc/test/UnitTest/FPExceptMatcher.cpp index 119a06985b8f1..d66066023984e 100644 --- a/libc/test/UnitTest/FPExceptMatcher.cpp +++ b/libc/test/UnitTest/FPExceptMatcher.cpp @@ -37,7 +37,7 @@ static void sigfpeHandler(int sig) { } FPExceptMatcher::FPExceptMatcher(FunctionCaller *func) { - sighandler_t oldSIGFPEHandler = signal(SIGFPE, &sigfpeHandler); + auto *oldSIGFPEHandler = signal(SIGFPE, &sigfpeHandler); caughtExcept = false; fenv_t oldEnv; diff --git a/libc/test/src/signal/CMakeLists.txt b/libc/test/src/signal/CMakeLists.txt index a27f5b8f1000e..f86ce2ae96857 100644 --- a/libc/test/src/signal/CMakeLists.txt +++ b/libc/test/src/signal/CMakeLists.txt @@ -74,7 +74,6 @@ add_libc_unittest( SRCS signal_test.cpp DEPENDS - libc.hdr.types.sighandler_t libc.src.errno.errno libc.src.signal.raise libc.src.signal.signal diff --git a/libc/test/src/signal/signal_test.cpp b/libc/test/src/signal/signal_test.cpp index 4b57311eee2d8..bac9c3b8b68bb 100644 --- a/libc/test/src/signal/signal_test.cpp +++ b/libc/test/src/signal/signal_test.cpp @@ -13,14 +13,12 @@ #include "test/UnitTest/ErrnoSetterMatcher.h" #include "test/UnitTest/Test.h" -#include "hdr/types/sighandler_t.h" - using LIBC_NAMESPACE::testing::ErrnoSetterMatcher::Fails; using LIBC_NAMESPACE::testing::ErrnoSetterMatcher::Succeeds; TEST(LlvmLibcSignal, Invalid) { LIBC_NAMESPACE::libc_errno = 0; - sighandler_t valid = +[](int) {}; + auto *valid = +[](int) {}; EXPECT_THAT((void *)LIBC_NAMESPACE::signal(0, valid), Fails(EINVAL, (void *)SIG_ERR)); EXPECT_THAT((void *)LIBC_NAMESPACE::signal(65, valid), diff --git a/libc/test/src/stdfix/CMakeLists.txt b/libc/test/src/stdfix/CMakeLists.txt index 90d20438edb4b..c8c4fd96bc2b3 100644 --- a/libc/test/src/stdfix/CMakeLists.txt +++ b/libc/test/src/stdfix/CMakeLists.txt @@ -73,6 +73,22 @@ foreach(suffix IN ITEMS hr r lr hk k lk uhr ur ulr uhk uk ulk) libc.src.__support.CPP.bit libc.src.__support.fixed_point.fx_bits ) + + add_libc_test( + countls${suffix}_test + SUITE + libc-stdfix-tests + HDRS + CountlsTest.h + SRCS + countls${suffix}_test.cpp + COMPILE_OPTIONS + -O3 + DEPENDS + libc.src.stdfix.countls${suffix} + libc.src.__support.fixed_point.fx_rep + libc.src.__support.fixed_point.fx_bits + ) endforeach() add_libc_test( diff --git a/libc/test/src/stdfix/CountlsTest.h b/libc/test/src/stdfix/CountlsTest.h new file mode 100644 index 0000000000000..fe3917754a251 --- /dev/null +++ b/libc/test/src/stdfix/CountlsTest.h @@ -0,0 +1,62 @@ +//===-- Utility class to test countls -------------------*- C++ -*-===// +// +// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. +// See https://llvm.org/LICENSE.txt for license information. +// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception +// +//===----------------------------------------------------------------------===// + +#include "test/UnitTest/Test.h" + +#include "src/__support/fixed_point/fx_rep.h" + +template class CountlsTest : public LIBC_NAMESPACE::testing::Test { + + using FXRep = LIBC_NAMESPACE::fixed_point::FXRep; + static constexpr T zero = FXRep::ZERO(); + static constexpr T max = FXRep::MAX(); + static constexpr T min = FXRep::MIN(); + static constexpr T one_half = FXRep::ONE_HALF(); + static constexpr T one_fourth = FXRep::ONE_FOURTH(); + static constexpr T eps = FXRep::EPS(); + + static constexpr auto value_len = FXRep::INTEGRAL_LEN + FXRep::FRACTION_LEN; + +public: + typedef int (*CountlsFunc)(T); + + void testSpecialNumbers(CountlsFunc func) { + constexpr bool is_signed = (FXRep::SIGN_LEN > 0); + + EXPECT_EQ(FXRep::INTEGRAL_LEN, func(one_half)); + EXPECT_EQ(FXRep::INTEGRAL_LEN + 1, func(one_fourth)); + EXPECT_EQ(value_len, func(zero)); + EXPECT_EQ(value_len - 1, func(eps)); + EXPECT_EQ(0, func(max)); + // If signed, left shifting the minimum value will overflow, so countls = 0. + // If unsigned, the minimum value is zero, so countls is the number of value + // bits according to ISO/IEC TR 18037. + EXPECT_EQ(is_signed ? 0 : value_len, func(min)); + + if (10 <= static_cast(max)) { + EXPECT_EQ(FXRep::INTEGRAL_LEN - 4, func(10)); + } + + if (static_cast(min) <= -10) { + EXPECT_EQ(FXRep::INTEGRAL_LEN - 4, func(-10)); + } + + if constexpr (is_signed) { + EXPECT_EQ(value_len, func(-eps)); + EXPECT_EQ(FXRep::INTEGRAL_LEN + 1, func(-one_half)); + if (FXRep::FRACTION_LEN >= 2) { + EXPECT_EQ(FXRep::INTEGRAL_LEN + 2, func(-one_fourth)); + } + } + } +}; + +#define LIST_COUNTLS_TESTS(T, func) \ + using LlvmLibcCountlsTest = CountlsTest; \ + TEST_F(LlvmLibcCountlsTest, SpecialNumbers) { testSpecialNumbers(&func); } \ + static_assert(true, "Require semicolon.") diff --git a/libc/include/llvm-libc-types/__sighandler_t.h b/libc/test/src/stdfix/countlshk_test.cpp similarity index 57% rename from libc/include/llvm-libc-types/__sighandler_t.h rename to libc/test/src/stdfix/countlshk_test.cpp index 9c1ac997fc4ee..659f869706b5f 100644 --- a/libc/include/llvm-libc-types/__sighandler_t.h +++ b/libc/test/src/stdfix/countlshk_test.cpp @@ -1,4 +1,4 @@ -//===-- Definition of struct __sighandler_t -------------------------------===// +//===-- Unittests for countlshk -------------------------------------------===// // // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. // See https://llvm.org/LICENSE.txt for license information. @@ -6,9 +6,8 @@ // //===----------------------------------------------------------------------===// -#ifndef LLVM_LIBC_TYPES___SIGHANDLER_T_H -#define LLVM_LIBC_TYPES___SIGHANDLER_T_H +#include "CountlsTest.h" -typedef void (*__sighandler_t)(int); +#include "src/stdfix/countlshk.h" -#endif // LLVM_LIBC_TYPES___SIGHANDLER_T_H +LIST_COUNTLS_TESTS(short accum, LIBC_NAMESPACE::countlshk); diff --git a/libc/test/src/stdfix/countlshr_test.cpp b/libc/test/src/stdfix/countlshr_test.cpp new file mode 100644 index 0000000000000..361d4acab3b11 --- /dev/null +++ b/libc/test/src/stdfix/countlshr_test.cpp @@ -0,0 +1,13 @@ +//===-- Unittests for countlshr -------------------------------------------===// +// +// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. +// See https://llvm.org/LICENSE.txt for license information. +// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception +// +//===----------------------------------------------------------------------===// + +#include "CountlsTest.h" + +#include "src/stdfix/countlshr.h" + +LIST_COUNTLS_TESTS(short fract, LIBC_NAMESPACE::countlshr); diff --git a/libc/test/src/stdfix/countlsk_test.cpp b/libc/test/src/stdfix/countlsk_test.cpp new file mode 100644 index 0000000000000..74cb519ec78de --- /dev/null +++ b/libc/test/src/stdfix/countlsk_test.cpp @@ -0,0 +1,13 @@ +//===-- Unittests for countlsk --------------------------------------------===// +// +// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. +// See https://llvm.org/LICENSE.txt for license information. +// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception +// +//===----------------------------------------------------------------------===// + +#include "CountlsTest.h" + +#include "src/stdfix/countlsk.h" + +LIST_COUNTLS_TESTS(accum, LIBC_NAMESPACE::countlsk); diff --git a/libc/test/src/stdfix/countlslk_test.cpp b/libc/test/src/stdfix/countlslk_test.cpp new file mode 100644 index 0000000000000..006939db3c87e --- /dev/null +++ b/libc/test/src/stdfix/countlslk_test.cpp @@ -0,0 +1,13 @@ +//===-- Unittests for countlslk -------------------------------------------===// +// +// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. +// See https://llvm.org/LICENSE.txt for license information. +// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception +// +//===----------------------------------------------------------------------===// + +#include "CountlsTest.h" + +#include "src/stdfix/countlslk.h" + +LIST_COUNTLS_TESTS(long accum, LIBC_NAMESPACE::countlslk); diff --git a/libc/test/src/stdfix/countlslr_test.cpp b/libc/test/src/stdfix/countlslr_test.cpp new file mode 100644 index 0000000000000..896cf9259c3ea --- /dev/null +++ b/libc/test/src/stdfix/countlslr_test.cpp @@ -0,0 +1,13 @@ +//===-- Unittests for countlslr -------------------------------------------===// +// +// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. +// See https://llvm.org/LICENSE.txt for license information. +// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception +// +//===----------------------------------------------------------------------===// + +#include "CountlsTest.h" + +#include "src/stdfix/countlslr.h" + +LIST_COUNTLS_TESTS(long fract, LIBC_NAMESPACE::countlslr); diff --git a/libc/test/src/stdfix/countlsr_test.cpp b/libc/test/src/stdfix/countlsr_test.cpp new file mode 100644 index 0000000000000..d7ae91ccd6a92 --- /dev/null +++ b/libc/test/src/stdfix/countlsr_test.cpp @@ -0,0 +1,13 @@ +//===-- Unittests for countlsr --------------------------------------------===// +// +// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. +// See https://llvm.org/LICENSE.txt for license information. +// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception +// +//===----------------------------------------------------------------------===// + +#include "CountlsTest.h" + +#include "src/stdfix/countlsr.h" + +LIST_COUNTLS_TESTS(fract, LIBC_NAMESPACE::countlsr); diff --git a/libc/test/src/stdfix/countlsuhk_test.cpp b/libc/test/src/stdfix/countlsuhk_test.cpp new file mode 100644 index 0000000000000..d8e68d65160e7 --- /dev/null +++ b/libc/test/src/stdfix/countlsuhk_test.cpp @@ -0,0 +1,13 @@ +//===-- Unittests for countlsuhk ------------------------------------------===// +// +// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. +// See https://llvm.org/LICENSE.txt for license information. +// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception +// +//===----------------------------------------------------------------------===// + +#include "CountlsTest.h" + +#include "src/stdfix/countlsuhk.h" + +LIST_COUNTLS_TESTS(unsigned short accum, LIBC_NAMESPACE::countlsuhk); diff --git a/libc/test/src/stdfix/countlsuhr_test.cpp b/libc/test/src/stdfix/countlsuhr_test.cpp new file mode 100644 index 0000000000000..7dbc590d4a552 --- /dev/null +++ b/libc/test/src/stdfix/countlsuhr_test.cpp @@ -0,0 +1,13 @@ +//===-- Unittests for countlsuhr ------------------------------------------===// +// +// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. +// See https://llvm.org/LICENSE.txt for license information. +// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception +// +//===----------------------------------------------------------------------===// + +#include "CountlsTest.h" + +#include "src/stdfix/countlsuhr.h" + +LIST_COUNTLS_TESTS(unsigned short fract, LIBC_NAMESPACE::countlsuhr); diff --git a/libc/test/src/stdfix/countlsuk_test.cpp b/libc/test/src/stdfix/countlsuk_test.cpp new file mode 100644 index 0000000000000..20f78d8c942b6 --- /dev/null +++ b/libc/test/src/stdfix/countlsuk_test.cpp @@ -0,0 +1,13 @@ +//===-- Unittests for countlsuk -------------------------------------------===// +// +// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. +// See https://llvm.org/LICENSE.txt for license information. +// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception +// +//===----------------------------------------------------------------------===// + +#include "CountlsTest.h" + +#include "src/stdfix/countlsuk.h" + +LIST_COUNTLS_TESTS(unsigned accum, LIBC_NAMESPACE::countlsuk); diff --git a/libc/test/src/stdfix/countlsulk_test.cpp b/libc/test/src/stdfix/countlsulk_test.cpp new file mode 100644 index 0000000000000..81ae208055cd9 --- /dev/null +++ b/libc/test/src/stdfix/countlsulk_test.cpp @@ -0,0 +1,13 @@ +//===-- Unittests for countlsulk ------------------------------------------===// +// +// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. +// See https://llvm.org/LICENSE.txt for license information. +// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception +// +//===----------------------------------------------------------------------===// + +#include "CountlsTest.h" + +#include "src/stdfix/countlsulk.h" + +LIST_COUNTLS_TESTS(unsigned long accum, LIBC_NAMESPACE::countlsulk); diff --git a/libc/test/src/stdfix/countlsulr_test.cpp b/libc/test/src/stdfix/countlsulr_test.cpp new file mode 100644 index 0000000000000..5b9b047f7fd74 --- /dev/null +++ b/libc/test/src/stdfix/countlsulr_test.cpp @@ -0,0 +1,13 @@ +//===-- Unittests for countlsulr ------------------------------------------===// +// +// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. +// See https://llvm.org/LICENSE.txt for license information. +// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception +// +//===----------------------------------------------------------------------===// + +#include "CountlsTest.h" + +#include "src/stdfix/countlsulr.h" + +LIST_COUNTLS_TESTS(unsigned long fract, LIBC_NAMESPACE::countlsulr); diff --git a/libc/test/src/stdfix/countlsur_test.cpp b/libc/test/src/stdfix/countlsur_test.cpp new file mode 100644 index 0000000000000..67e32d7b56217 --- /dev/null +++ b/libc/test/src/stdfix/countlsur_test.cpp @@ -0,0 +1,13 @@ +//===-- Unittests for countlsur -------------------------------------------===// +// +// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. +// See https://llvm.org/LICENSE.txt for license information. +// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception +// +//===----------------------------------------------------------------------===// + +#include "CountlsTest.h" + +#include "src/stdfix/countlsur.h" + +LIST_COUNTLS_TESTS(unsigned fract, LIBC_NAMESPACE::countlsur); diff --git a/libcxx/include/__type_traits/conjunction.h b/libcxx/include/__type_traits/conjunction.h index 6b6717a50a468..ad9656acd47ec 100644 --- a/libcxx/include/__type_traits/conjunction.h +++ b/libcxx/include/__type_traits/conjunction.h @@ -10,6 +10,8 @@ #define _LIBCPP___TYPE_TRAITS_CONJUNCTION_H #include <__config> +#include <__type_traits/conditional.h> +#include <__type_traits/enable_if.h> #include <__type_traits/integral_constant.h> #include <__type_traits/is_same.h> @@ -19,29 +21,22 @@ _LIBCPP_BEGIN_NAMESPACE_STD -template -struct _AndImpl; +template +using __expand_to_true _LIBCPP_NODEBUG = true_type; -template <> -struct _AndImpl { - template - using _Result _LIBCPP_NODEBUG = - typename _AndImpl::template _Result<_First, _Rest...>; -}; +template +__expand_to_true<__enable_if_t<_Pred::value>...> __and_helper(int); -template <> -struct _AndImpl { - template - using _Result _LIBCPP_NODEBUG = _Res; -}; +template +false_type __and_helper(...); // _And always performs lazy evaluation of its arguments. // // However, `_And<_Pred...>` itself will evaluate its result immediately (without having to // be instantiated) since it is an alias, unlike `conjunction<_Pred...>`, which is a struct. // If you want to defer the evaluation of `_And<_Pred...>` itself, use `_Lazy<_And, _Pred...>`. -template -using _And _LIBCPP_NODEBUG = typename _AndImpl::template _Result; +template +using _And _LIBCPP_NODEBUG = decltype(std::__and_helper<_Pred...>(0)); template struct __all_dummy; @@ -51,11 +46,22 @@ struct __all : _IsSame<__all_dummy<_Pred...>, __all_dummy<((void)_Pred, true)... #if _LIBCPP_STD_VER >= 17 -template -struct _LIBCPP_NO_SPECIALIZATIONS conjunction : _And<_Args...> {}; +template +struct _LIBCPP_NO_SPECIALIZATIONS conjunction : true_type {}; + +_LIBCPP_DIAGNOSTIC_PUSH +# if __has_warning("-Winvalid-specialization") +_LIBCPP_CLANG_DIAGNOSTIC_IGNORED("-Winvalid-specialization") +# endif +template +struct conjunction<_Arg> : _Arg {}; + +template +struct conjunction<_Arg, _Args...> : conditional_t> {}; +_LIBCPP_DIAGNOSTIC_POP template -_LIBCPP_NO_SPECIALIZATIONS inline constexpr bool conjunction_v = _And<_Args...>::value; +_LIBCPP_NO_SPECIALIZATIONS inline constexpr bool conjunction_v = conjunction<_Args...>::value; #endif // _LIBCPP_STD_VER >= 17 diff --git a/libcxx/test/benchmarks/containers/container_benchmarks.h b/libcxx/test/benchmarks/containers/container_benchmarks.h deleted file mode 100644 index e24bd767177e8..0000000000000 --- a/libcxx/test/benchmarks/containers/container_benchmarks.h +++ /dev/null @@ -1,609 +0,0 @@ -// -*- C++ -*- -//===----------------------------------------------------------------------===// -// -// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. -// See https://llvm.org/LICENSE.txt for license information. -// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception -// -//===----------------------------------------------------------------------===// - -#ifndef TEST_BENCHMARKS_CONTAINERS_CONTAINER_BENCHMARKS_H -#define TEST_BENCHMARKS_CONTAINERS_CONTAINER_BENCHMARKS_H - -#include -#include -#include -#include // for std::from_range -#include -#include -#include - -#include "benchmark/benchmark.h" -#include "test_iterators.h" -#include "test_macros.h" -#include "../GenerateInput.h" - -namespace ContainerBenchmarks { - -template -void DoNotOptimizeData(Container& c) { - if constexpr (requires { c.data(); }) { - benchmark::DoNotOptimize(c.data()); - } else { - benchmark::DoNotOptimize(&c); - } -} - -// -// Sequence container operations -// -template -void BM_ctor_size(benchmark::State& st) { - auto size = st.range(0); - - for (auto _ : st) { - Container c(size); // we assume the destructor doesn't dominate the benchmark - DoNotOptimizeData(c); - } -} - -template -void BM_ctor_size_value(benchmark::State& st, Generator gen) { - using ValueType = typename Container::value_type; - const auto size = st.range(0); - ValueType value = gen(); - benchmark::DoNotOptimize(value); - - for (auto _ : st) { - Container c(size, value); // we assume the destructor doesn't dominate the benchmark - DoNotOptimizeData(c); - } -} - -template -void BM_ctor_iter_iter(benchmark::State& st, Generator gen) { - using ValueType = typename Container::value_type; - const auto size = st.range(0); - std::vector in; - std::generate_n(std::back_inserter(in), size, gen); - const auto begin = in.begin(); - const auto end = in.end(); - benchmark::DoNotOptimize(in); - - for (auto _ : st) { - Container c(begin, end); // we assume the destructor doesn't dominate the benchmark - DoNotOptimizeData(c); - } -} - -#if TEST_STD_VER >= 23 -template -void BM_ctor_from_range(benchmark::State& st, Generator gen) { - using ValueType = typename Container::value_type; - const auto size = st.range(0); - std::vector in; - std::generate_n(std::back_inserter(in), size, gen); - benchmark::DoNotOptimize(in); - - for (auto _ : st) { - Container c(std::from_range, in); // we assume the destructor doesn't dominate the benchmark - DoNotOptimizeData(c); - } -} -#endif - -template -void BM_ctor_copy(benchmark::State& st, Generator gen) { - auto size = st.range(0); - Container in; - std::generate_n(std::back_inserter(in), size, gen); - DoNotOptimizeData(in); - - for (auto _ : st) { - Container c(in); // we assume the destructor doesn't dominate the benchmark - DoNotOptimizeData(c); - DoNotOptimizeData(in); - } -} - -template -void BM_assignment(benchmark::State& st, Generator gen) { - auto size = st.range(0); - Container in1, in2; - std::generate_n(std::back_inserter(in1), size, gen); - std::generate_n(std::back_inserter(in2), size, gen); - DoNotOptimizeData(in1); - DoNotOptimizeData(in2); - - // Assign from one of two containers in succession to avoid - // hitting a self-assignment corner-case - Container c(in1); - bool toggle = false; - for (auto _ : st) { - c = toggle ? in1 : in2; - toggle = !toggle; - DoNotOptimizeData(c); - DoNotOptimizeData(in1); - DoNotOptimizeData(in2); - } -} - -// Benchmark Container::assign(input-iter, input-iter) when the container already contains -// the same number of elements that we're assigning. The intent is to check whether the -// implementation basically creates a new container from scratch or manages to reuse the -// pre-existing storage. -template -void BM_assign_input_iter_full(benchmark::State& st, Generator gen) { - using ValueType = typename Container::value_type; - auto size = st.range(0); - std::vector in1, in2; - std::generate_n(std::back_inserter(in1), size, gen); - std::generate_n(std::back_inserter(in2), size, gen); - DoNotOptimizeData(in1); - DoNotOptimizeData(in2); - - Container c(in1.begin(), in1.end()); - bool toggle = false; - for (auto _ : st) { - std::vector& in = toggle ? in1 : in2; - auto first = in.data(); - auto last = in.data() + in.size(); - c.assign(cpp17_input_iterator(first), cpp17_input_iterator(last)); - toggle = !toggle; - DoNotOptimizeData(c); - } -} - -template -void BM_insert_begin(benchmark::State& st, Generator gen) { - using ValueType = typename Container::value_type; - const int size = st.range(0); - std::vector in; - std::generate_n(std::back_inserter(in), size, gen); - DoNotOptimizeData(in); - - Container c(in.begin(), in.end()); - DoNotOptimizeData(c); - - ValueType value = gen(); - benchmark::DoNotOptimize(value); - - for (auto _ : st) { - c.insert(c.begin(), value); - DoNotOptimizeData(c); - - c.erase(std::prev(c.end())); // avoid growing indefinitely - } -} - -template - requires std::random_access_iterator -void BM_insert_middle(benchmark::State& st, Generator gen) { - using ValueType = typename Container::value_type; - const int size = st.range(0); - std::vector in; - std::generate_n(std::back_inserter(in), size, gen); - DoNotOptimizeData(in); - - Container c(in.begin(), in.end()); - DoNotOptimizeData(c); - - ValueType value = gen(); - benchmark::DoNotOptimize(value); - - for (auto _ : st) { - auto mid = c.begin() + (size / 2); // requires random-access iterators in order to make sense - c.insert(mid, value); - DoNotOptimizeData(c); - - c.erase(c.end() - 1); // avoid growing indefinitely - } -} - -// Insert at the start of a vector in a scenario where the vector already -// has enough capacity to hold all the elements we are inserting. -template -void BM_insert_begin_input_iter_with_reserve_no_realloc(benchmark::State& st, Generator gen) { - using ValueType = typename Container::value_type; - const int size = st.range(0); - std::vector in; - std::generate_n(std::back_inserter(in), size, gen); - DoNotOptimizeData(in); - auto first = in.data(); - auto last = in.data() + in.size(); - - const int small = 100; // arbitrary - Container c; - c.reserve(size + small); // ensure no reallocation - std::generate_n(std::back_inserter(c), small, gen); - - for (auto _ : st) { - c.insert(c.begin(), cpp17_input_iterator(first), cpp17_input_iterator(last)); - DoNotOptimizeData(c); - - st.PauseTiming(); - c.erase(c.begin() + small, c.end()); // avoid growing indefinitely - st.ResumeTiming(); - } -} - -// Insert at the start of a vector in a scenario where the vector already -// has almost enough capacity to hold all the elements we are inserting, -// but does need to reallocate. -template -void BM_insert_begin_input_iter_with_reserve_almost_no_realloc(benchmark::State& st, Generator gen) { - using ValueType = typename Container::value_type; - const int size = st.range(0); - std::vector in; - std::generate_n(std::back_inserter(in), size, gen); - DoNotOptimizeData(in); - auto first = in.data(); - auto last = in.data() + in.size(); - - const int overflow = size / 10; // 10% of elements won't fit in the vector when we insert - Container c; - for (auto _ : st) { - st.PauseTiming(); - c = Container(); - c.reserve(size); - std::generate_n(std::back_inserter(c), overflow, gen); - st.ResumeTiming(); - - c.insert(c.begin(), cpp17_input_iterator(first), cpp17_input_iterator(last)); - DoNotOptimizeData(c); - } -} - -// Insert at the start of a vector in a scenario where the vector can fit a few -// more elements, but needs to reallocate almost immediately to fit the remaining -// elements. -template -void BM_insert_begin_input_iter_with_reserve_near_full(benchmark::State& st, Generator gen) { - using ValueType = typename Container::value_type; - const int size = st.range(0); - std::vector in; - std::generate_n(std::back_inserter(in), size, gen); - DoNotOptimizeData(in); - auto first = in.data(); - auto last = in.data() + in.size(); - - const int overflow = 9 * (size / 10); // 90% of elements won't fit in the vector when we insert - Container c; - for (auto _ : st) { - st.PauseTiming(); - c = Container(); - c.reserve(size); - std::generate_n(std::back_inserter(c), overflow, gen); - st.ResumeTiming(); - - c.insert(c.begin(), cpp17_input_iterator(first), cpp17_input_iterator(last)); - DoNotOptimizeData(c); - } -} - -template -void BM_erase_begin(benchmark::State& st, Generator gen) { - using ValueType = typename Container::value_type; - const int size = st.range(0); - std::vector in; - std::generate_n(std::back_inserter(in), size, gen); - DoNotOptimizeData(in); - - Container c(in.begin(), in.end()); - DoNotOptimizeData(c); - - ValueType value = gen(); - benchmark::DoNotOptimize(value); - - for (auto _ : st) { - c.erase(c.begin()); - DoNotOptimizeData(c); - - c.insert(c.end(), value); // re-insert an element at the end to avoid needing a new container - } -} - -template - requires std::random_access_iterator -void BM_erase_middle(benchmark::State& st, Generator gen) { - using ValueType = typename Container::value_type; - const int size = st.range(0); - std::vector in; - std::generate_n(std::back_inserter(in), size, gen); - DoNotOptimizeData(in); - - Container c(in.begin(), in.end()); - DoNotOptimizeData(c); - - ValueType value = gen(); - benchmark::DoNotOptimize(value); - - for (auto _ : st) { - auto mid = c.begin() + (size / 2); - c.erase(mid); - DoNotOptimizeData(c); - - c.insert(c.end(), value); // re-insert an element at the end to avoid needing a new container - } -} - -template -void BM_push_back(benchmark::State& st, Generator gen) { - using ValueType = typename Container::value_type; - const int size = st.range(0); - std::vector in; - std::generate_n(std::back_inserter(in), size, gen); - DoNotOptimizeData(in); - - Container c; - DoNotOptimizeData(c); - while (st.KeepRunningBatch(size)) { - c.clear(); - for (int i = 0; i != size; ++i) { - c.push_back(in[i]); - } - DoNotOptimizeData(c); - } -} - -template -void BM_push_back_with_reserve(benchmark::State& st, Generator gen) { - using ValueType = typename Container::value_type; - const int size = st.range(0); - std::vector in; - std::generate_n(std::back_inserter(in), size, gen); - DoNotOptimizeData(in); - - Container c; - c.reserve(size); - DoNotOptimizeData(c); - while (st.KeepRunningBatch(size)) { - c.clear(); - for (int i = 0; i != size; ++i) { - c.push_back(in[i]); - } - DoNotOptimizeData(c); - } -} - -template -void sequence_container_benchmarks(std::string container) { - using ValueType = typename Container::value_type; - - using Generator = ValueType (*)(); - Generator cheap = [] { return Generate::cheap(); }; - Generator expensive = [] { return Generate::expensive(); }; - auto tostr = [&](Generator gen) { return gen == cheap ? " (cheap elements)" : " (expensive elements)"; }; - std::vector generators; - generators.push_back(cheap); - if constexpr (!std::is_integral_v) { - generators.push_back(expensive); - } - - // constructors - if constexpr (std::is_constructible_v) { - // not all containers provide this one - benchmark::RegisterBenchmark(container + "::ctor(size)", BM_ctor_size)->Arg(1024); - } - for (auto gen : generators) - benchmark::RegisterBenchmark(container + "::ctor(size, value_type)" + tostr(gen), [=](auto& st) { - BM_ctor_size_value(st, gen); - })->Arg(1024); - for (auto gen : generators) - benchmark::RegisterBenchmark(container + "::ctor(Iterator, Iterator)" + tostr(gen), [=](auto& st) { - BM_ctor_iter_iter(st, gen); - })->Arg(1024); -#if TEST_STD_VER >= 23 - for (auto gen : generators) - benchmark::RegisterBenchmark(container + "::ctor(Range)" + tostr(gen), [=](auto& st) { - BM_ctor_from_range(st, gen); - })->Arg(1024); -#endif - for (auto gen : generators) - benchmark::RegisterBenchmark(container + "::ctor(const&)" + tostr(gen), [=](auto& st) { - BM_ctor_copy(st, gen); - })->Arg(1024); - - // assignment - for (auto gen : generators) - benchmark::RegisterBenchmark(container + "::operator=(const&)" + tostr(gen), [=](auto& st) { - BM_assignment(st, gen); - })->Arg(1024); - for (auto gen : generators) - benchmark::RegisterBenchmark(container + "::assign(input-iter, input-iter) (full container)" + tostr(gen), - [=](auto& st) { BM_assign_input_iter_full(st, gen); }) - ->Arg(1024); - - // insert - for (auto gen : generators) - benchmark::RegisterBenchmark(container + "::insert(begin)" + tostr(gen), [=](auto& st) { - BM_insert_begin(st, gen); - })->Arg(1024); - if constexpr (std::random_access_iterator) { - for (auto gen : generators) - benchmark::RegisterBenchmark(container + "::insert(middle)" + tostr(gen), [=](auto& st) { - BM_insert_middle(st, gen); - })->Arg(1024); - } - if constexpr (requires(Container c) { c.reserve(0); }) { - for (auto gen : generators) - benchmark::RegisterBenchmark( - container + "::insert(begin, input-iter, input-iter) (no realloc)" + tostr(gen), - [=](auto& st) { BM_insert_begin_input_iter_with_reserve_no_realloc(st, gen); }) - ->Arg(1024); - for (auto gen : generators) - benchmark::RegisterBenchmark( - container + "::insert(begin, input-iter, input-iter) (half filled)" + tostr(gen), - [=](auto& st) { BM_insert_begin_input_iter_with_reserve_almost_no_realloc(st, gen); }) - ->Arg(1024); - for (auto gen : generators) - benchmark::RegisterBenchmark( - container + "::insert(begin, input-iter, input-iter) (near full)" + tostr(gen), - [=](auto& st) { BM_insert_begin_input_iter_with_reserve_near_full(st, gen); }) - ->Arg(1024); - } - - // erase - for (auto gen : generators) - benchmark::RegisterBenchmark(container + "::erase(begin)" + tostr(gen), [=](auto& st) { - BM_erase_begin(st, gen); - })->Arg(1024); - if constexpr (std::random_access_iterator) { - for (auto gen : generators) - benchmark::RegisterBenchmark(container + "::erase(middle)" + tostr(gen), [=](auto& st) { - BM_erase_middle(st, gen); - })->Arg(1024); - } - - // push_back (optional) - if constexpr (requires(Container c, ValueType v) { c.push_back(v); }) { - for (auto gen : generators) - benchmark::RegisterBenchmark(container + "::push_back()" + tostr(gen), [=](auto& st) { - BM_push_back(st, gen); - })->Arg(1024); - if constexpr (requires(Container c) { c.reserve(0); }) { - for (auto gen : generators) - benchmark::RegisterBenchmark(container + "::push_back() (with reserve)" + tostr(gen), [=](auto& st) { - BM_push_back_with_reserve(st, gen); - })->Arg(1024); - } - } -} - -// -// Misc operations -// -template -void BM_InsertValue(benchmark::State& st, Container c, GenInputs gen) { - auto in = gen(st.range(0)); - const auto end = in.end(); - while (st.KeepRunning()) { - c.clear(); - for (auto it = in.begin(); it != end; ++it) { - benchmark::DoNotOptimize(&(*c.insert(*it).first)); - } - benchmark::ClobberMemory(); - } -} - -template -void BM_InsertValueRehash(benchmark::State& st, Container c, GenInputs gen) { - auto in = gen(st.range(0)); - const auto end = in.end(); - while (st.KeepRunning()) { - c.clear(); - c.rehash(16); - for (auto it = in.begin(); it != end; ++it) { - benchmark::DoNotOptimize(&(*c.insert(*it).first)); - } - benchmark::ClobberMemory(); - } -} - -template -void BM_InsertDuplicate(benchmark::State& st, Container c, GenInputs gen) { - auto in = gen(st.range(0)); - const auto end = in.end(); - c.insert(in.begin(), in.end()); - benchmark::DoNotOptimize(c); - benchmark::DoNotOptimize(in); - while (st.KeepRunning()) { - for (auto it = in.begin(); it != end; ++it) { - benchmark::DoNotOptimize(&(*c.insert(*it).first)); - } - benchmark::ClobberMemory(); - } -} - -template -void BM_EmplaceDuplicate(benchmark::State& st, Container c, GenInputs gen) { - auto in = gen(st.range(0)); - const auto end = in.end(); - c.insert(in.begin(), in.end()); - benchmark::DoNotOptimize(c); - benchmark::DoNotOptimize(in); - while (st.KeepRunning()) { - for (auto it = in.begin(); it != end; ++it) { - benchmark::DoNotOptimize(&(*c.emplace(*it).first)); - } - benchmark::ClobberMemory(); - } -} - -template -void BM_Find(benchmark::State& st, Container c, GenInputs gen) { - auto in = gen(st.range(0)); - c.insert(in.begin(), in.end()); - benchmark::DoNotOptimize(&(*c.begin())); - const auto end = in.data() + in.size(); - while (st.KeepRunning()) { - for (auto it = in.data(); it != end; ++it) { - benchmark::DoNotOptimize(&(*c.find(*it))); - } - benchmark::ClobberMemory(); - } -} - -template -void BM_FindRehash(benchmark::State& st, Container c, GenInputs gen) { - c.rehash(8); - auto in = gen(st.range(0)); - c.insert(in.begin(), in.end()); - benchmark::DoNotOptimize(&(*c.begin())); - const auto end = in.data() + in.size(); - while (st.KeepRunning()) { - for (auto it = in.data(); it != end; ++it) { - benchmark::DoNotOptimize(&(*c.find(*it))); - } - benchmark::ClobberMemory(); - } -} - -template -void BM_Rehash(benchmark::State& st, Container c, GenInputs gen) { - auto in = gen(st.range(0)); - c.max_load_factor(3.0); - c.insert(in.begin(), in.end()); - benchmark::DoNotOptimize(c); - const auto bucket_count = c.bucket_count(); - while (st.KeepRunning()) { - c.rehash(bucket_count + 1); - c.rehash(bucket_count); - benchmark::ClobberMemory(); - } -} - -template -void BM_Compare_same_container(benchmark::State& st, Container, GenInputs gen) { - auto in = gen(st.range(0)); - Container c1(in.begin(), in.end()); - Container c2 = c1; - - benchmark::DoNotOptimize(&(*c1.begin())); - benchmark::DoNotOptimize(&(*c2.begin())); - while (st.KeepRunning()) { - bool res = c1 == c2; - benchmark::DoNotOptimize(&res); - benchmark::ClobberMemory(); - } -} - -template -void BM_Compare_different_containers(benchmark::State& st, Container, GenInputs gen) { - auto in1 = gen(st.range(0)); - auto in2 = gen(st.range(0)); - Container c1(in1.begin(), in1.end()); - Container c2(in2.begin(), in2.end()); - - benchmark::DoNotOptimize(&(*c1.begin())); - benchmark::DoNotOptimize(&(*c2.begin())); - while (st.KeepRunning()) { - bool res = c1 == c2; - benchmark::DoNotOptimize(&res); - benchmark::ClobberMemory(); - } -} - -} // namespace ContainerBenchmarks - -#endif // TEST_BENCHMARKS_CONTAINERS_CONTAINER_BENCHMARKS_H diff --git a/libcxx/test/benchmarks/containers/deque.bench.cpp b/libcxx/test/benchmarks/containers/sequence/deque.bench.cpp similarity index 73% rename from libcxx/test/benchmarks/containers/deque.bench.cpp rename to libcxx/test/benchmarks/containers/sequence/deque.bench.cpp index 6a650fa4dce2a..e37c9fef4ac23 100644 --- a/libcxx/test/benchmarks/containers/deque.bench.cpp +++ b/libcxx/test/benchmarks/containers/sequence/deque.bench.cpp @@ -11,12 +11,12 @@ #include #include -#include "container_benchmarks.h" +#include "sequence_container_benchmarks.h" #include "benchmark/benchmark.h" int main(int argc, char** argv) { - ContainerBenchmarks::sequence_container_benchmarks>("std::deque"); - ContainerBenchmarks::sequence_container_benchmarks>("std::deque"); + support::sequence_container_benchmarks>("std::deque"); + support::sequence_container_benchmarks>("std::deque"); benchmark::Initialize(&argc, argv); benchmark::RunSpecifiedBenchmarks(); diff --git a/libcxx/test/benchmarks/containers/list.bench.cpp b/libcxx/test/benchmarks/containers/sequence/list.bench.cpp similarity index 73% rename from libcxx/test/benchmarks/containers/list.bench.cpp rename to libcxx/test/benchmarks/containers/sequence/list.bench.cpp index 2212affa02ba4..e40aae6cf9fa5 100644 --- a/libcxx/test/benchmarks/containers/list.bench.cpp +++ b/libcxx/test/benchmarks/containers/sequence/list.bench.cpp @@ -11,12 +11,12 @@ #include #include -#include "container_benchmarks.h" +#include "sequence_container_benchmarks.h" #include "benchmark/benchmark.h" int main(int argc, char** argv) { - ContainerBenchmarks::sequence_container_benchmarks>("std::list"); - ContainerBenchmarks::sequence_container_benchmarks>("std::list"); + support::sequence_container_benchmarks>("std::list"); + support::sequence_container_benchmarks>("std::list"); benchmark::Initialize(&argc, argv); benchmark::RunSpecifiedBenchmarks(); diff --git a/libcxx/test/benchmarks/containers/sequence/sequence_container_benchmarks.h b/libcxx/test/benchmarks/containers/sequence/sequence_container_benchmarks.h new file mode 100644 index 0000000000000..dcd251d6997dd --- /dev/null +++ b/libcxx/test/benchmarks/containers/sequence/sequence_container_benchmarks.h @@ -0,0 +1,455 @@ +// -*- C++ -*- +//===----------------------------------------------------------------------===// +// +// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. +// See https://llvm.org/LICENSE.txt for license information. +// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception +// +//===----------------------------------------------------------------------===// + +#ifndef TEST_BENCHMARKS_CONTAINERS_SEQUENCE_SEQUENCE_CONTAINER_BENCHMARKS_H +#define TEST_BENCHMARKS_CONTAINERS_SEQUENCE_SEQUENCE_CONTAINER_BENCHMARKS_H + +#include +#include +#include +#include +#include // for std::from_range +#include +#include +#include + +#include "benchmark/benchmark.h" +#include "test_iterators.h" +#include "../../GenerateInput.h" + +namespace support { + +template +void DoNotOptimizeData(Container& c) { + if constexpr (requires { c.data(); }) { + benchmark::DoNotOptimize(c.data()); + } else { + benchmark::DoNotOptimize(&c); + } +} + +template +void sequence_container_benchmarks(std::string container) { + using ValueType = typename Container::value_type; + + using Generator = ValueType (*)(); + Generator cheap = [] { return Generate::cheap(); }; + Generator expensive = [] { return Generate::expensive(); }; + auto tostr = [&](Generator gen) -> std::string { + return gen == cheap ? " (cheap elements)" : " (expensive elements)"; + }; + std::vector generators; + generators.push_back(cheap); + if constexpr (!std::is_integral_v) { + generators.push_back(expensive); + } + + // Some of these benchmarks are structured to perform the operation being benchmarked + // a small number of times at each iteration, in order to offset the cost of + // PauseTiming() and ResumeTiming(). + static constexpr std::size_t BatchSize = 32; + + auto bench = [&](std::string operation, auto f) { + benchmark::RegisterBenchmark(container + "::" + operation, f)->Arg(32)->Arg(1024)->Arg(8192); + }; + + ///////////////////////// + // Constructors + ///////////////////////// + if constexpr (std::is_constructible_v) { + // not all containers provide this constructor + bench("ctor(size)", [](auto& st) { + auto const size = st.range(0); + + for ([[maybe_unused]] auto _ : st) { + Container c(size); // we assume the destructor doesn't dominate the benchmark + DoNotOptimizeData(c); + } + }); + } + + for (auto gen : generators) + bench("ctor(size, value_type)" + tostr(gen), [gen](auto& st) { + auto const size = st.range(0); + ValueType value = gen(); + benchmark::DoNotOptimize(value); + + for ([[maybe_unused]] auto _ : st) { + Container c(size, value); // we assume the destructor doesn't dominate the benchmark + DoNotOptimizeData(c); + } + }); + + for (auto gen : generators) + bench("ctor(Iterator, Iterator)" + tostr(gen), [gen](auto& st) { + auto const size = st.range(0); + std::vector in; + std::generate_n(std::back_inserter(in), size, gen); + const auto begin = in.begin(); + const auto end = in.end(); + benchmark::DoNotOptimize(in); + + for ([[maybe_unused]] auto _ : st) { + Container c(begin, end); // we assume the destructor doesn't dominate the benchmark + DoNotOptimizeData(c); + } + }); + +#if defined(__cpp_lib_containers_ranges) && __cpp_lib_containers_ranges >= 202202L + for (auto gen : generators) + bench("ctor(Range)" + tostr(gen), [gen](auto& st) { + auto const size = st.range(0); + std::vector in; + std::generate_n(std::back_inserter(in), size, gen); + benchmark::DoNotOptimize(in); + + for ([[maybe_unused]] auto _ : st) { + Container c(std::from_range, in); // we assume the destructor doesn't dominate the benchmark + DoNotOptimizeData(c); + } + }); +#endif + + for (auto gen : generators) + bench("ctor(const&)" + tostr(gen), [gen](auto& st) { + auto const size = st.range(0); + Container in; + std::generate_n(std::back_inserter(in), size, gen); + DoNotOptimizeData(in); + + for ([[maybe_unused]] auto _ : st) { + Container c(in); // we assume the destructor doesn't dominate the benchmark + DoNotOptimizeData(c); + DoNotOptimizeData(in); + } + }); + + ///////////////////////// + // Assignment + ///////////////////////// + for (auto gen : generators) + bench("operator=(const&)" + tostr(gen), [gen](auto& st) { + auto const size = st.range(0); + Container in1, in2; + std::generate_n(std::back_inserter(in1), size, gen); + std::generate_n(std::back_inserter(in2), size, gen); + DoNotOptimizeData(in1); + DoNotOptimizeData(in2); + + // Assign from one of two containers in succession to avoid + // hitting a self-assignment corner-case + Container c(in1); + bool toggle = false; + for ([[maybe_unused]] auto _ : st) { + c = toggle ? in1 : in2; + toggle = !toggle; + DoNotOptimizeData(c); + DoNotOptimizeData(in1); + DoNotOptimizeData(in2); + } + }); + + // Benchmark Container::assign(input-iter, input-iter) when the container already contains + // the same number of elements that we're assigning. The intent is to check whether the + // implementation basically creates a new container from scratch or manages to reuse the + // pre-existing storage. + for (auto gen : generators) + bench("assign(input-iter, input-iter) (full container)" + tostr(gen), [gen](auto& st) { + auto const size = st.range(0); + std::vector in1, in2; + std::generate_n(std::back_inserter(in1), size, gen); + std::generate_n(std::back_inserter(in2), size, gen); + DoNotOptimizeData(in1); + DoNotOptimizeData(in2); + + Container c(in1.begin(), in1.end()); + bool toggle = false; + for ([[maybe_unused]] auto _ : st) { + std::vector& in = toggle ? in1 : in2; + auto first = in.data(); + auto last = in.data() + in.size(); + c.assign(cpp17_input_iterator(first), cpp17_input_iterator(last)); + toggle = !toggle; + DoNotOptimizeData(c); + } + }); + + ///////////////////////// + // Insertion + ///////////////////////// + for (auto gen : generators) + bench("insert(begin)" + tostr(gen), [gen](auto& st) { + auto const size = st.range(0); + std::vector in; + std::generate_n(std::back_inserter(in), size, gen); + DoNotOptimizeData(in); + + Container c(in.begin(), in.end()); + DoNotOptimizeData(c); + + ValueType value = gen(); + benchmark::DoNotOptimize(value); + + for ([[maybe_unused]] auto _ : st) { + c.insert(c.begin(), value); + DoNotOptimizeData(c); + + c.erase(std::prev(c.end())); // avoid growing indefinitely + } + }); + + if constexpr (std::random_access_iterator) { + for (auto gen : generators) + bench("insert(middle)" + tostr(gen), [gen](auto& st) { + auto const size = st.range(0); + std::vector in; + std::generate_n(std::back_inserter(in), size, gen); + DoNotOptimizeData(in); + + Container c(in.begin(), in.end()); + DoNotOptimizeData(c); + + ValueType value = gen(); + benchmark::DoNotOptimize(value); + + for ([[maybe_unused]] auto _ : st) { + auto mid = c.begin() + (size / 2); // requires random-access iterators in order to make sense + c.insert(mid, value); + DoNotOptimizeData(c); + + c.erase(c.end() - 1); // avoid growing indefinitely + } + }); + } + + if constexpr (requires(Container c) { c.reserve(0); }) { + // Insert at the start of a vector in a scenario where the vector already + // has enough capacity to hold all the elements we are inserting. + for (auto gen : generators) + bench("insert(begin, input-iter, input-iter) (no realloc)" + tostr(gen), [gen](auto& st) { + auto const size = st.range(0); + std::vector in; + std::generate_n(std::back_inserter(in), size, gen); + DoNotOptimizeData(in); + auto first = in.data(); + auto last = in.data() + in.size(); + + const int small = 100; // arbitrary + Container c; + c.reserve(size + small); // ensure no reallocation + std::generate_n(std::back_inserter(c), small, gen); + + for ([[maybe_unused]] auto _ : st) { + c.insert(c.begin(), cpp17_input_iterator(first), cpp17_input_iterator(last)); + DoNotOptimizeData(c); + + st.PauseTiming(); + c.erase(c.begin() + small, c.end()); // avoid growing indefinitely + st.ResumeTiming(); + } + }); + + // Insert at the start of a vector in a scenario where the vector already + // has almost enough capacity to hold all the elements we are inserting, + // but does need to reallocate. + for (auto gen : generators) + bench("insert(begin, input-iter, input-iter) (half filled)" + tostr(gen), [gen](auto& st) { + auto const size = st.range(0); + std::vector in; + std::generate_n(std::back_inserter(in), size, gen); + DoNotOptimizeData(in); + auto first = in.data(); + auto last = in.data() + in.size(); + + const int overflow = size / 10; // 10% of elements won't fit in the vector when we insert + Container c; + for ([[maybe_unused]] auto _ : st) { + st.PauseTiming(); + c = Container(); + c.reserve(size); + std::generate_n(std::back_inserter(c), overflow, gen); + st.ResumeTiming(); + + c.insert(c.begin(), cpp17_input_iterator(first), cpp17_input_iterator(last)); + DoNotOptimizeData(c); + } + }); + + // Insert at the start of a vector in a scenario where the vector can fit a few + // more elements, but needs to reallocate almost immediately to fit the remaining + // elements. + for (auto gen : generators) + bench("insert(begin, input-iter, input-iter) (near full)" + tostr(gen), [gen](auto& st) { + auto const size = st.range(0); + std::vector in; + std::generate_n(std::back_inserter(in), size, gen); + DoNotOptimizeData(in); + auto first = in.data(); + auto last = in.data() + in.size(); + + auto const overflow = 9 * (size / 10); // 90% of elements won't fit in the vector when we insert + Container c; + for ([[maybe_unused]] auto _ : st) { + st.PauseTiming(); + c = Container(); + c.reserve(size); + std::generate_n(std::back_inserter(c), overflow, gen); + st.ResumeTiming(); + + c.insert(c.begin(), cpp17_input_iterator(first), cpp17_input_iterator(last)); + DoNotOptimizeData(c); + } + }); + } + + ///////////////////////// + // Variations of push_back + ///////////////////////// + static constexpr bool has_push_back = requires(Container c, ValueType v) { c.push_back(v); }; + static constexpr bool has_capacity = requires(Container c) { c.capacity(); }; + static constexpr bool has_reserve = requires(Container c) { c.reserve(0); }; + if constexpr (has_push_back) { + if constexpr (has_capacity) { + // For containers where we can observe capacity(), push_back a single element + // without reserving to ensure the container needs to grow + for (auto gen : generators) + bench("push_back() (growing)" + tostr(gen), [gen](auto& st) { + auto const size = st.range(0); + std::vector in; + std::generate_n(std::back_inserter(in), size, gen); + DoNotOptimizeData(in); + + auto at_capacity = [](Container c) { + while (c.size() < c.capacity()) + c.push_back(c.back()); + return c; + }; + + std::vector c(BatchSize, at_capacity(Container(in.begin(), in.end()))); + std::vector const original = c; + + while (st.KeepRunningBatch(BatchSize)) { + for (std::size_t i = 0; i != BatchSize; ++i) { + c[i].push_back(in[i]); + DoNotOptimizeData(c[i]); + } + + st.PauseTiming(); + for (std::size_t i = 0; i != BatchSize; ++i) { + c[i] = at_capacity(Container(in.begin(), in.end())); + assert(c[i].size() == c[i].capacity()); + } + st.ResumeTiming(); + } + }); + } + + // For containers where we can reserve, push_back a single element after reserving to + // ensure the container doesn't grow + if constexpr (has_reserve) { + for (auto gen : generators) + bench("push_back() (with reserve)" + tostr(gen), [gen](auto& st) { + auto const size = st.range(0); + std::vector in; + std::generate_n(std::back_inserter(in), size, gen); + DoNotOptimizeData(in); + + Container c(in.begin(), in.end()); + // Ensure the container has enough capacity + c.reserve(c.size() + BatchSize); + DoNotOptimizeData(c); + + while (st.KeepRunningBatch(BatchSize)) { + for (std::size_t i = 0; i != BatchSize; ++i) { + c.push_back(in[i]); + } + DoNotOptimizeData(c); + + st.PauseTiming(); + c.erase(c.end() - BatchSize, c.end()); + st.ResumeTiming(); + } + }); + } + + // push_back many elements: this is amortized constant for std::vector but not all containers + for (auto gen : generators) + bench("push_back() (many elements)" + tostr(gen), [gen](auto& st) { + auto const size = st.range(0); + std::vector in; + std::generate_n(std::back_inserter(in), size, gen); + DoNotOptimizeData(in); + + Container c; + DoNotOptimizeData(c); + while (st.KeepRunningBatch(size)) { + for (int i = 0; i != size; ++i) { + c.push_back(in[i]); + } + DoNotOptimizeData(c); + + st.PauseTiming(); + c.clear(); + st.ResumeTiming(); + } + }); + } + + ///////////////////////// + // Erasure + ///////////////////////// + for (auto gen : generators) + bench("erase(begin)" + tostr(gen), [gen](auto& st) { + auto const size = st.range(0); + std::vector in; + std::generate_n(std::back_inserter(in), size, gen); + DoNotOptimizeData(in); + + Container c(in.begin(), in.end()); + DoNotOptimizeData(c); + + ValueType value = gen(); + benchmark::DoNotOptimize(value); + + for ([[maybe_unused]] auto _ : st) { + c.erase(c.begin()); + DoNotOptimizeData(c); + + c.insert(c.end(), value); // re-insert an element at the end to avoid needing a new container + } + }); + + if constexpr (std::random_access_iterator) { + for (auto gen : generators) + bench("erase(middle)" + tostr(gen), [gen](auto& st) { + auto const size = st.range(0); + std::vector in; + std::generate_n(std::back_inserter(in), size, gen); + DoNotOptimizeData(in); + + Container c(in.begin(), in.end()); + DoNotOptimizeData(c); + + ValueType value = gen(); + benchmark::DoNotOptimize(value); + + for ([[maybe_unused]] auto _ : st) { + auto mid = c.begin() + (size / 2); + c.erase(mid); + DoNotOptimizeData(c); + + c.insert(c.end(), value); // re-insert an element at the end to avoid needing a new container + } + }); + } +} + +} // namespace support + +#endif // TEST_BENCHMARKS_CONTAINERS_SEQUENCE_SEQUENCE_CONTAINER_BENCHMARKS_H diff --git a/libcxx/test/benchmarks/containers/vector.bench.cpp b/libcxx/test/benchmarks/containers/sequence/vector.bench.cpp similarity index 73% rename from libcxx/test/benchmarks/containers/vector.bench.cpp rename to libcxx/test/benchmarks/containers/sequence/vector.bench.cpp index eef23d2981642..599db1d90fa9a 100644 --- a/libcxx/test/benchmarks/containers/vector.bench.cpp +++ b/libcxx/test/benchmarks/containers/sequence/vector.bench.cpp @@ -11,12 +11,12 @@ #include #include -#include "container_benchmarks.h" +#include "sequence_container_benchmarks.h" #include "benchmark/benchmark.h" int main(int argc, char** argv) { - ContainerBenchmarks::sequence_container_benchmarks>("std::vector"); - ContainerBenchmarks::sequence_container_benchmarks>("std::vector"); + support::sequence_container_benchmarks>("std::vector"); + support::sequence_container_benchmarks>("std::vector"); benchmark::Initialize(&argc, argv); benchmark::RunSpecifiedBenchmarks(); diff --git a/lld/COFF/MinGW.cpp b/lld/COFF/MinGW.cpp index 8f1c1ebcc3f13..0786353b06432 100644 --- a/lld/COFF/MinGW.cpp +++ b/lld/COFF/MinGW.cpp @@ -92,6 +92,7 @@ AutoExporter::AutoExporter( "__fmode", "_environ", "___dso_handle", + "__load_config_used", // These are the MinGW names that differ from the standard // ones (lacking an extra underscore). "_DllMain@12", @@ -109,6 +110,7 @@ AutoExporter::AutoExporter( "_fmode", "environ", "__dso_handle", + "_load_config_used", // These are the MinGW names that differ from the standard // ones (lacking an extra underscore). "DllMain", @@ -117,6 +119,10 @@ AutoExporter::AutoExporter( }; excludeSymbolPrefixes.insert("_head_"); } + if (symtab.isEC()) { + excludeSymbols.insert("__chpe_metadata"); + excludeSymbolPrefixes.insert("__os_arm64x_"); + } } void AutoExporter::addWholeArchive(StringRef path) { diff --git a/lld/test/COFF/arm64ec-patchable-thunks.test b/lld/test/COFF/arm64ec-patchable-thunks.test index 5cebe7cc27ad6..1e1ff1f7f2ee4 100644 --- a/lld/test/COFF/arm64ec-patchable-thunks.test +++ b/lld/test/COFF/arm64ec-patchable-thunks.test @@ -5,6 +5,7 @@ RUN: llvm-mc -filetype=obj -triple=arm64ec-windows arm64ec-patchable.s -o arm64e RUN: llvm-mc -filetype=obj -triple=arm64ec-windows arm64ec-alias.s -o arm64ec-alias.obj RUN: llvm-mc -filetype=obj -triple=arm64ec-windows test-sec.s -o test-sec.obj RUN: llvm-mc -filetype=obj -triple=arm64ec-windows %S/Inputs/loadconfig-arm64ec.s -o loadconfig-arm64ec.obj +RUN: llvm-mc -filetype=obj -triple=aarch64-windows %S/Inputs/loadconfig-arm64.s -o loadconfig-arm64.obj RUN: lld-link -out:test.dll -machine:arm64ec arm64ec-patchable.obj test-sec.obj loadconfig-arm64ec.obj -dll -noentry @@ -59,6 +60,18 @@ RUN: not lld-link -out:test4.dll -machine:arm64ec test-sec.obj loadconfig-arm64e ERR: error: undefined symbol: EXP+#patchable_func +RUN: lld-link -out:testx.dll -machine:arm64x arm64ec-patchable.obj test-sec.obj \ +RUN: loadconfig-arm64.obj loadconfig-arm64ec.obj -dll -noentry +RUN: llvm-objdump -d testx.dll | FileCheck -check-prefix=PATCH-DISASM %s +RUN: llvm-readobj --coff-load-config testx.dll | FileCheck -check-prefix=PATCH-CHPE %s + + +RUN: lld-link -out:testx2.dll -machine:arm64x arm64ec-alias.obj loadconfig-arm64.obj \ +RUN: loadconfig-arm64ec.obj -dll -noentry +RUN: llvm-objdump -d testx2.dll | FileCheck -check-prefix=PATCH-DISASM %s +RUN: llvm-readobj --coff-load-config testx2.dll | FileCheck -check-prefix=PATCH-CHPE %s + + #--- arm64ec-patchable.s .section ".text", "x", discard, "#patchable_func$hp_target" .globl "#patchable_func$hp_target" diff --git a/lld/test/COFF/arm64x-export-all.s b/lld/test/COFF/arm64x-export-all.s index 831edfe0b6f88..06ea9ec50259e 100644 --- a/lld/test/COFF/arm64x-export-all.s +++ b/lld/test/COFF/arm64x-export-all.s @@ -15,16 +15,11 @@ // EXP-NEXT: AddressSize: 64bit // EXP-NEXT: Export { // EXP-NEXT: Ordinal: 1 -// EXP-NEXT: Name: _load_config_used -// EXP-NEXT: RVA: -// EXP-NEXT: } -// EXP-NEXT: Export { -// EXP-NEXT: Ordinal: 2 // EXP-NEXT: Name: sym // EXP-NEXT: RVA: 0x2000 // EXP-NEXT: } // EXP-NEXT: Export { -// EXP-NEXT: Ordinal: 3 +// EXP-NEXT: Ordinal: 2 // EXP-NEXT: Name: sym2 // EXP-NEXT: RVA: 0x2004 // EXP-NEXT: } @@ -34,31 +29,11 @@ // EXP-NEXT: AddressSize: 64bit // EXP-NEXT: Export { // EXP-NEXT: Ordinal: 1 -// EXP-NEXT: Name: __chpe_metadata -// EXP-NEXT: RVA: -// EXP-NEXT: } -// EXP-NEXT: Export { -// EXP-NEXT: Ordinal: 2 -// EXP-NEXT: Name: __os_arm64x_dispatch_icall -// EXP-NEXT: RVA: 0x12B0 -// EXP-NEXT: } -// EXP-NEXT: Export { -// EXP-NEXT: Ordinal: 3 -// EXP-NEXT: Name: __os_arm64x_dispatch_ret -// EXP-NEXT: RVA: -// EXP-NEXT: } -// EXP-NEXT: Export { -// EXP-NEXT: Ordinal: 4 -// EXP-NEXT: Name: _load_config_used -// EXP-NEXT: RVA: -// EXP-NEXT: } -// EXP-NEXT: Export { -// EXP-NEXT: Ordinal: 5 // EXP-NEXT: Name: sym // EXP-NEXT: RVA: 0x2008 // EXP-NEXT: } // EXP-NEXT: Export { -// EXP-NEXT: Ordinal: 6 +// EXP-NEXT: Ordinal: 2 // EXP-NEXT: Name: sym2 // EXP-NEXT: RVA: 0x200C // EXP-NEXT: } diff --git a/llvm/docs/LangRef.rst b/llvm/docs/LangRef.rst index 5f0ae5ce8614c..f57c29ccdd588 100644 --- a/llvm/docs/LangRef.rst +++ b/llvm/docs/LangRef.rst @@ -16162,9 +16162,11 @@ trapping or setting ``errno``. The first result is the fractional part of the operand and the second result is the integral part of the operand. Both results have the same sign as the operand. -Not including exceptional inputs (listed below), `llvm.modf.*` is semantically +Not including exceptional inputs (listed below), ``llvm.modf.*`` is semantically equivalent to: +:: + %fp = frem %x, 1.0 ; Fractional part %ip = fsub %x, %fp ; Integral part diff --git a/llvm/docs/NVPTXUsage.rst b/llvm/docs/NVPTXUsage.rst index dec6ad4e54115..dcd0a3ac3639b 100644 --- a/llvm/docs/NVPTXUsage.rst +++ b/llvm/docs/NVPTXUsage.rst @@ -1060,6 +1060,81 @@ flavors of the instruction respectively. For more information, refer to the PTX ISA ``_. +'``llvm.nvvm.tcgen05.commit``' +^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + +Syntax: +""""""" + +.. code-block:: llvm + + declare void @llvm.nvvm.tcgen05.commit.{cg1,cg2}(ptr %mbar) + declare void @llvm.nvvm.tcgen05.commit.shared.{cg1,cg2}(ptr addrspace(3) %mbar) + declare void @llvm.nvvm.tcgen05.commit.mc.{cg1,cg2}(ptr %mbar, i16 %mc) + declare void @llvm.nvvm.tcgen05.commit.mc.shared.{cg1,cg2}(ptr addrspace(3) %mbar, i16 %mc) + +Overview: +""""""""" + +The '``@llvm.nvvm.tcgen05.commit.*``' intrinsics correspond to the +``tcgen05.commit.{cg1/cg2}.mbarrier::arrive::one.*`` set of PTX instructions. +The ``tcgen05.commit`` is an asynchronous instruction which makes the mbarrier +object (``%mbar``) track the completion of all prior asynchronous tcgen05 operations. +The ``.mc`` variants allow signaling on the mbarrier objects of multiple CTAs +(specified by ``%mc``) in the cluster. The ``.cg1`` and ``.cg2`` variants generate +``cta_group::1`` and ``cta_group::2`` flavors of the instruction respectively. + +For more information, refer to the PTX ISA +``_. + +'``llvm.nvvm.tcgen05.wait``' +^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + +Syntax: +""""""" + +.. code-block:: llvm + + declare void @llvm.nvvm.tcgen05.wait.ld() + declare void @llvm.nvvm.tcgen05.wait.st() + +Overview: +""""""""" + +The '``@llvm.nvvm.tcgen05.wait.ld/st``' intrinsics correspond to +the ``tcgen05.wait::{ld/st}.sync.aligned`` pair of PTX instructions. +The ``tcgen05.wait::ld`` causes the executing thread to block until +all prior ``tcgen05.ld`` operations issued by the executing thread +have completed. The ``tcgen05.wait::st`` causes the executing thread +to block until all prior ``tcgen05.st`` operations issued by the +executing thread have completed. + +For more information, refer to the PTX ISA +``_. + +'``llvm.nvvm.tcgen05.fence``' +^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + +Syntax: +""""""" + +.. code-block:: llvm + + declare void @llvm.nvvm.tcgen05.fence.before.thread.sync() + declare void @llvm.nvvm.tcgen05.fence.after.thread.sync() + +Overview: +""""""""" + +The '``@llvm.nvvm.tcgen05.fence.*``' intrinsics correspond to +the ``tcgen05.fence::{before/after}_thread_sync`` pair of PTX instructions. +These instructions act as code motion fences for asynchronous tcgen05 +operations. + +For more information, refer to the PTX ISA +``_. + + Other Intrinsics ---------------- diff --git a/llvm/include/llvm/CodeGen/RDFGraph.h b/llvm/include/llvm/CodeGen/RDFGraph.h index cf7344e8c3e74..8a93afbcb5491 100644 --- a/llvm/include/llvm/CodeGen/RDFGraph.h +++ b/llvm/include/llvm/CodeGen/RDFGraph.h @@ -865,8 +865,9 @@ struct DataFlowGraph { using BlockRefsMap = RegisterAggrMap; void buildStmt(Block BA, MachineInstr &In); - void recordDefsForDF(BlockRefsMap &PhiM, Block BA); - void buildPhis(BlockRefsMap &PhiM, Block BA); + void recordDefsForDF(BlockRefsMap &PhiM, BlockRefsMap &PhiClobberM, Block BA); + void buildPhis(BlockRefsMap &PhiM, Block BA, + const DefStackMap &DefM = DefStackMap()); void removeUnusedPhis(); void pushClobbers(Instr IA, DefStackMap &DM); @@ -874,7 +875,7 @@ struct DataFlowGraph { template void linkRefUp(Instr IA, NodeAddr TA, DefStack &DS); template void linkStmtRefs(DefStackMap &DefM, Stmt SA, Predicate P); - void linkBlockRefs(DefStackMap &DefM, Block BA); + void linkBlockRefs(DefStackMap &DefM, BlockRefsMap &PhiClobberM, Block BA); void unlinkUseDF(Use UA); void unlinkDefDF(Def DA); diff --git a/llvm/include/llvm/IR/IntrinsicsNVVM.td b/llvm/include/llvm/IR/IntrinsicsNVVM.td index abbe25bf0040a..f299a145ac73b 100644 --- a/llvm/include/llvm/IR/IntrinsicsNVVM.td +++ b/llvm/include/llvm/IR/IntrinsicsNVVM.td @@ -5083,6 +5083,38 @@ foreach cta_group = ["cg1", "cg2"] in { def int_nvvm_tcgen05_relinq_alloc_permit_ # cta_group : Intrinsic<[], [], [IntrConvergent, IntrInaccessibleMemOnly]>; + + def int_nvvm_tcgen05_commit_ # cta_group : Intrinsic<[], + [llvm_ptr_ty], // mbar_ptr + [IntrConvergent, IntrInaccessibleMemOrArgMemOnly, + NoCapture>]>; + + def int_nvvm_tcgen05_commit_shared_ # cta_group : Intrinsic<[], + [llvm_shared_ptr_ty], // mbar_ptr + [IntrConvergent, IntrInaccessibleMemOrArgMemOnly, + NoCapture>]>; + + def int_nvvm_tcgen05_commit_mc_ # cta_group : Intrinsic<[], + [llvm_ptr_ty, llvm_i16_ty], // mbar_ptr, cta_mask + [IntrConvergent, IntrInaccessibleMemOrArgMemOnly, + NoCapture>]>; + + def int_nvvm_tcgen05_commit_mc_shared_ # cta_group : Intrinsic<[], + [llvm_shared_ptr_ty, llvm_i16_ty], // mbar_ptr, cta_mask + [IntrConvergent, IntrInaccessibleMemOrArgMemOnly, + NoCapture>]>; } +// Tcgen05 wait_ld/st intrinsics +def int_nvvm_tcgen05_wait_ld : Intrinsic<[], [], + [IntrConvergent, IntrInaccessibleMemOnly]>; +def int_nvvm_tcgen05_wait_st : Intrinsic<[], [], + [IntrConvergent, IntrInaccessibleMemOnly]>; + +// Tcgen05 Fence intrinsics +def int_nvvm_tcgen05_fence_before_thread_sync : Intrinsic<[], [], + [IntrNoMem, IntrHasSideEffects]>; +def int_nvvm_tcgen05_fence_after_thread_sync : Intrinsic<[], [], + [IntrNoMem, IntrHasSideEffects]>; + } // let TargetPrefix = "nvvm" diff --git a/llvm/lib/Analysis/ValueTracking.cpp b/llvm/lib/Analysis/ValueTracking.cpp index 6eba6c0f08c3f..8a9ad55366ee7 100644 --- a/llvm/lib/Analysis/ValueTracking.cpp +++ b/llvm/lib/Analysis/ValueTracking.cpp @@ -8599,7 +8599,7 @@ bool llvm::isKnownInversion(const Value *X, const Value *Y) { return false; // They must both have samesign flag or not. - if (cast(X)->hasSameSign() != cast(Y)->hasSameSign()) + if (Pred1.hasSameSign() != Pred2.hasSameSign()) return false; if (B == C) @@ -8611,8 +8611,7 @@ bool llvm::isKnownInversion(const Value *X, const Value *Y) { return false; // Sign bits of two RHSCs should match. - if (cast(X)->hasSameSign() && - RHSC1->isNonNegative() != RHSC2->isNonNegative()) + if (Pred1.hasSameSign() && RHSC1->isNonNegative() != RHSC2->isNonNegative()) return false; const auto CR1 = ConstantRange::makeExactICmpRegion(Pred1, *RHSC1); diff --git a/llvm/lib/CodeGen/RDFGraph.cpp b/llvm/lib/CodeGen/RDFGraph.cpp index 483e61db788f4..805b0ee7be0bc 100644 --- a/llvm/lib/CodeGen/RDFGraph.cpp +++ b/llvm/lib/CodeGen/RDFGraph.cpp @@ -966,15 +966,18 @@ void DataFlowGraph::build(const Config &config) { // Build a map "PhiM" which will contain, for each block, the set // of references that will require phi definitions in that block. + // "PhiClobberM" map contains references that require phis for clobbering defs BlockRefsMap PhiM(getPRI()); + BlockRefsMap PhiClobberM(getPRI()); for (Block BA : Blocks) - recordDefsForDF(PhiM, BA); + recordDefsForDF(PhiM, PhiClobberM, BA); for (Block BA : Blocks) buildPhis(PhiM, BA); // Link all the refs. This will recursively traverse the dominator tree. + // Phis for clobbering defs are added here. DefStackMap DM; - linkBlockRefs(DM, EA); + linkBlockRefs(DM, PhiClobberM, EA); // Finally, remove all unused phi nodes. if (!(BuildCfg.Options & BuildOptions::KeepDeadPhis)) @@ -1378,7 +1381,9 @@ void DataFlowGraph::buildStmt(Block BA, MachineInstr &In) { // Scan all defs in the block node BA and record in PhiM the locations of // phi nodes corresponding to these defs. -void DataFlowGraph::recordDefsForDF(BlockRefsMap &PhiM, Block BA) { +// Clobbering defs in BA are recorded in PhiClobberM +void DataFlowGraph::recordDefsForDF(BlockRefsMap &PhiM, + BlockRefsMap &PhiClobberM, Block BA) { // Check all defs from block BA and record them in each block in BA's // iterated dominance frontier. This information will later be used to // create phi nodes. @@ -1394,11 +1399,17 @@ void DataFlowGraph::recordDefsForDF(BlockRefsMap &PhiM, Block BA) { // This is done to make sure that each defined reference gets only one // phi node, even if it is defined multiple times. RegisterAggr Defs(getPRI()); + RegisterAggr ClobberDefs(getPRI()); for (Instr IA : BA.Addr->members(*this)) { for (Ref RA : IA.Addr->members_if(IsDef, *this)) { RegisterRef RR = RA.Addr->getRegRef(*this); - if (RR.isReg() && isTracked(RR)) + if (!isTracked(RR)) + continue; + if (RR.isReg()) Defs.insert(RR); + // Clobbering def + else if (RR.isMask()) + ClobberDefs.insert(RR); } } @@ -1416,12 +1427,14 @@ void DataFlowGraph::recordDefsForDF(BlockRefsMap &PhiM, Block BA) { for (auto *DB : IDF) { Block DBA = findBlock(DB); PhiM[DBA.Id].insert(Defs); + PhiClobberM[DBA.Id].insert(ClobberDefs); } } // Given the locations of phi nodes in the map PhiM, create the phi nodes // that are located in the block node BA. -void DataFlowGraph::buildPhis(BlockRefsMap &PhiM, Block BA) { +void DataFlowGraph::buildPhis(BlockRefsMap &PhiM, Block BA, + const DefStackMap &DefM) { // Check if this blocks has any DF defs, i.e. if there are any defs // that this block is in the iterated dominance frontier of. auto HasDF = PhiM.find(BA.Id); @@ -1434,10 +1447,37 @@ void DataFlowGraph::buildPhis(BlockRefsMap &PhiM, Block BA) { for (MachineBasicBlock *PB : MBB->predecessors()) Preds.push_back(findBlock(PB)); + RegisterAggr PhiDefs(getPRI()); + // DefM will be non empty when we are building phis + // for clobbering defs + if (!DefM.empty()) { + for (Instr IA : BA.Addr->members_if(IsPhi, *this)) { + for (Def DA : IA.Addr->members_if(IsDef, *this)) { + auto DR = DA.Addr->getRegRef(*this); + PhiDefs.insert(DR); + } + } + } + + MachineRegisterInfo &MRI = MF.getRegInfo(); const RegisterAggr &Defs = PhiM[BA.Id]; uint16_t PhiFlags = NodeAttrs::PhiRef | NodeAttrs::Preserving; for (RegisterRef RR : Defs.refs()) { + if (!DefM.empty()) { + auto F = DefM.find(RR.Reg); + // Do not create a phi for unallocatable registers, or for registers + // that are never livein to BA. + // If a phi exists for RR, do not create another. + if (!MRI.isAllocatable(RR.Reg) || PhiDefs.hasCoverOf(RR) || + F == DefM.end() || F->second.empty()) + continue; + // Do not create a phi, if all reaching defs are clobbering + auto RDef = F->second.top(); + if (RDef->Addr->getFlags() & NodeAttrs::Clobbering) + continue; + PhiDefs.insert(RR); + } Phi PA = newPhi(BA); PA.Addr->addMember(newDef(PA, RR, PhiFlags), *this); @@ -1576,7 +1616,15 @@ void DataFlowGraph::linkStmtRefs(DefStackMap &DefM, Stmt SA, Predicate P) { // Create data-flow links for all instructions in the block node BA. This // will include updating any phi nodes in BA. -void DataFlowGraph::linkBlockRefs(DefStackMap &DefM, Block BA) { +void DataFlowGraph::linkBlockRefs(DefStackMap &DefM, BlockRefsMap &PhiClobberM, + Block BA) { + // Create phi nodes for clobbering defs. + // Since a huge number of registers can get clobbered, it would result in many + // phi nodes being created in the graph. Only create phi nodes that have a non + // clobbering reaching def. Use DefM to get not clobbering defs reaching a + // block. + buildPhis(PhiClobberM, BA, DefM); + // Push block delimiters. markBlock(BA.Id, DefM); @@ -1613,7 +1661,7 @@ void DataFlowGraph::linkBlockRefs(DefStackMap &DefM, Block BA) { for (auto *I : *N) { MachineBasicBlock *SB = I->getBlock(); Block SBA = findBlock(SB); - linkBlockRefs(DefM, SBA); + linkBlockRefs(DefM, PhiClobberM, SBA); } // Link the phi uses from the successor blocks. diff --git a/llvm/lib/MC/ELFObjectWriter.cpp b/llvm/lib/MC/ELFObjectWriter.cpp index 5f586fe19a5bb..68e7f1785fa23 100644 --- a/llvm/lib/MC/ELFObjectWriter.cpp +++ b/llvm/lib/MC/ELFObjectWriter.cpp @@ -71,9 +71,12 @@ STATISTIC(StrtabBytes, "Total size of SHT_STRTAB sections"); STATISTIC(SymtabBytes, "Total size of SHT_SYMTAB sections"); STATISTIC(RelocationBytes, "Total size of relocation sections"); STATISTIC(DynsymBytes, "Total size of SHT_DYNSYM sections"); -STATISTIC(DebugBytes, "Total size of debug info sections"); +STATISTIC( + DebugBytes, + "Total size of debug info sections (not including those written to .dwo)"); STATISTIC(UnwindBytes, "Total size of unwind sections"); STATISTIC(OtherBytes, "Total size of uncategorized sections"); +STATISTIC(DwoBytes, "Total size of sections written to .dwo file"); } // namespace stats @@ -969,7 +972,9 @@ void ELFWriter::writeSectionHeaders(const MCAssembler &Asm) { return Section->getFlags() & Flag; }; - if (Section->getName().starts_with(".debug")) { + if (Mode == DwoOnly) { + stats::DwoBytes += Size; + } else if (Section->getName().starts_with(".debug")) { stats::DebugBytes += Size; } else if (Section->getName().starts_with(".eh_frame")) { stats::UnwindBytes += Size; diff --git a/llvm/lib/ObjectYAML/DWARFEmitter.cpp b/llvm/lib/ObjectYAML/DWARFEmitter.cpp index 421dfe7dfa30e..ec5e08082b0ca 100644 --- a/llvm/lib/ObjectYAML/DWARFEmitter.cpp +++ b/llvm/lib/ObjectYAML/DWARFEmitter.cpp @@ -96,12 +96,11 @@ Error DWARFYAML::emitDebugStr(raw_ostream &OS, const DWARFYAML::Data &DI) { StringRef DWARFYAML::Data::getAbbrevTableContentByIndex(uint64_t Index) const { assert(Index < DebugAbbrev.size() && "Index should be less than the size of DebugAbbrev array"); - auto It = AbbrevTableContents.find(Index); - if (It != AbbrevTableContents.cend()) + auto [It, Inserted] = AbbrevTableContents.try_emplace(Index); + if (!Inserted) return It->second; - std::string AbbrevTableBuffer; - raw_string_ostream OS(AbbrevTableBuffer); + raw_string_ostream OS(It->second); uint64_t AbbrevCode = 0; for (const DWARFYAML::Abbrev &AbbrevDecl : DebugAbbrev[Index].Table) { @@ -123,9 +122,7 @@ StringRef DWARFYAML::Data::getAbbrevTableContentByIndex(uint64_t Index) const { // consisting of a 0 byte for the abbreviation code. OS.write_zeros(1); - AbbrevTableContents.insert({Index, AbbrevTableBuffer}); - - return AbbrevTableContents[Index]; + return It->second; } Error DWARFYAML::emitDebugAbbrev(raw_ostream &OS, const DWARFYAML::Data &DI) { diff --git a/llvm/lib/Target/AArch64/AArch64ISelLowering.cpp b/llvm/lib/Target/AArch64/AArch64ISelLowering.cpp index 7f6da4fa38f90..34464d317beaf 100644 --- a/llvm/lib/Target/AArch64/AArch64ISelLowering.cpp +++ b/llvm/lib/Target/AArch64/AArch64ISelLowering.cpp @@ -22363,6 +22363,9 @@ static SDValue performZExtDeinterleaveShuffleCombine(SDNode *N, if (!IsDeInterleave) IsUndefDeInterleave = Shuffle->getOperand(1).isUndef() && + all_of( + Shuffle->getMask().slice(ExtOffset, VT.getVectorNumElements() / 2), + [](int M) { return M < 0; }) && ShuffleVectorInst::isDeInterleaveMaskOfFactor( Shuffle->getMask().slice(ExtOffset + VT.getVectorNumElements() / 2, VT.getVectorNumElements() / 2), diff --git a/llvm/lib/Target/AMDGPU/VOP3Instructions.td b/llvm/lib/Target/AMDGPU/VOP3Instructions.td index ce73e0ca361d9..afafc2ecccfaf 100644 --- a/llvm/lib/Target/AMDGPU/VOP3Instructions.td +++ b/llvm/lib/Target/AMDGPU/VOP3Instructions.td @@ -212,7 +212,13 @@ defm V_BFE_U32 : VOP3Inst <"v_bfe_u32", VOP3_Profile, AMDGP defm V_BFE_I32 : VOP3Inst <"v_bfe_i32", VOP3_Profile, AMDGPUbfe_i32>; defm V_BFI_B32 : VOP3Inst <"v_bfi_b32", VOP3_Profile, AMDGPUbfi>; defm V_ALIGNBIT_B32 : VOP3Inst <"v_alignbit_b32", VOP3_Profile, fshr>; + +let True16Predicate = NotHasTrue16BitInsts in defm V_ALIGNBYTE_B32 : VOP3Inst <"v_alignbyte_b32", VOP3_Profile, int_amdgcn_alignbyte>; +let True16Predicate = UseRealTrue16Insts in +defm V_ALIGNBYTE_B32_t16 : VOP3Inst <"v_alignbyte_b32_t16", VOP3_Profile_True16>; +let True16Predicate = UseFakeTrue16Insts in +defm V_ALIGNBYTE_B32_fake16 : VOP3Inst <"v_alignbyte_b32_fake16", VOP3_Profile_Fake16>; // XXX - No FPException seems suspect but manual doesn't say it does let mayRaiseFPException = 0 in { @@ -250,6 +256,25 @@ let SchedRW = [WriteDoubleAdd], FPDPRounding = 1 in { } // End SchedRW = [WriteDoubleAdd], FPDPRounding = 1 } // End isReMaterializable = 1 +let True16Predicate = UseFakeTrue16Insts in +def : GCNPat < +(i32 (int_amdgcn_alignbyte (i32 (VOP3OpSelMods i32:$src0, i32:$src0_modifiers)), + (i32 (VOP3OpSelMods i32:$src1, i32:$src1_modifiers)), + (i32 (VOP3OpSelMods i32:$src2, i32:$src2_modifiers)))), +(V_ALIGNBYTE_B32_fake16_e64 i32:$src0_modifiers, VSrc_b32:$src0, + i32:$src1_modifiers, VSrc_b32:$src1, + i32:$src2_modifiers, VGPR_32:$src2) +>; + +let True16Predicate = UseRealTrue16Insts in +def : GCNPat < +(i32 (int_amdgcn_alignbyte (i32 (VOP3OpSelMods i32:$src0, i32:$src0_modifiers)), + (i32 (VOP3OpSelMods i32:$src1, i32:$src1_modifiers)), + (i32 (VOP3OpSelMods i32:$src2, i32:$src2_modifiers)))), +(V_ALIGNBYTE_B32_t16_e64 i32:$src0_modifiers, VSrc_b32:$src0, + i32:$src1_modifiers, VSrc_b32:$src1, + i32:$src2_modifiers, (i16 (EXTRACT_SUBREG VGPR_32:$src2, lo16))) +>; let mayRaiseFPException = 0 in { // Seems suspicious but manual doesn't say it does. let SchedRW = [WriteFloatFMA, WriteSALU] in @@ -1690,7 +1715,7 @@ defm V_FMA_F32 : VOP3_Realtriple_gfx11_gfx12<0x213>; defm V_FMA_F64 : VOP3_Real_Base_gfx11_gfx12<0x214>; defm V_LERP_U8 : VOP3_Realtriple_gfx11_gfx12<0x215>; defm V_ALIGNBIT_B32 : VOP3_Realtriple_gfx11_gfx12<0x216>; -defm V_ALIGNBYTE_B32 : VOP3_Realtriple_gfx11_gfx12<0x217>; +defm V_ALIGNBYTE_B32 : VOP3_Realtriple_t16_and_fake16_gfx11_gfx12<0x217, "v_alignbyte_b32">; defm V_MULLIT_F32 : VOP3_Realtriple_gfx11_gfx12<0x218>; defm V_MIN3_F32 : VOP3_Realtriple_gfx11<0x219>; defm V_MIN3_I32 : VOP3_Realtriple_gfx11_gfx12<0x21a>; diff --git a/llvm/lib/Target/ARM/AsmParser/ARMAsmParser.cpp b/llvm/lib/Target/ARM/AsmParser/ARMAsmParser.cpp index dad91c6a969e8..325dfb33762a6 100644 --- a/llvm/lib/Target/ARM/AsmParser/ARMAsmParser.cpp +++ b/llvm/lib/Target/ARM/AsmParser/ARMAsmParser.cpp @@ -8652,6 +8652,37 @@ bool ARMAsmParser::validateInstruction(MCInst &Inst, "coprocessor must be configured as GCP"); break; } + + case ARM::VTOSHH: + case ARM::VTOUHH: + case ARM::VTOSLH: + case ARM::VTOULH: + case ARM::VTOSHS: + case ARM::VTOUHS: + case ARM::VTOSLS: + case ARM::VTOULS: + case ARM::VTOSHD: + case ARM::VTOUHD: + case ARM::VTOSLD: + case ARM::VTOULD: + case ARM::VSHTOH: + case ARM::VUHTOH: + case ARM::VSLTOH: + case ARM::VULTOH: + case ARM::VSHTOS: + case ARM::VUHTOS: + case ARM::VSLTOS: + case ARM::VULTOS: + case ARM::VSHTOD: + case ARM::VUHTOD: + case ARM::VSLTOD: + case ARM::VULTOD: { + if (Operands[MnemonicOpsEndInd]->getReg() != + Operands[MnemonicOpsEndInd + 1]->getReg()) + return Error(Operands[MnemonicOpsEndInd]->getStartLoc(), + "source and destination registers must be the same"); + break; + } } return false; diff --git a/llvm/lib/Target/Hexagon/HexagonMachineFunctionInfo.h b/llvm/lib/Target/Hexagon/HexagonMachineFunctionInfo.h index cb4a7a8c393fc..c5df02fa3b89c 100644 --- a/llvm/lib/Target/Hexagon/HexagonMachineFunctionInfo.h +++ b/llvm/lib/Target/Hexagon/HexagonMachineFunctionInfo.h @@ -70,12 +70,12 @@ class HexagonMachineFunctionInfo : public MachineFunctionInfo { PacketInfo[MI] |= Hexagon::EndPacket; } bool isStartPacket(const MachineInstr* MI) const { - return (PacketInfo.count(MI) && - (PacketInfo.find(MI)->second & Hexagon::StartPacket)); + auto It = PacketInfo.find(MI); + return It != PacketInfo.end() && (It->second & Hexagon::StartPacket); } bool isEndPacket(const MachineInstr* MI) const { - return (PacketInfo.count(MI) && - (PacketInfo.find(MI)->second & Hexagon::EndPacket)); + auto It = PacketInfo.find(MI); + return It != PacketInfo.end() && (It->second & Hexagon::EndPacket); } void setHasClobberLR(bool v) { HasClobberLR = v; } bool hasClobberLR() const { return HasClobberLR; } diff --git a/llvm/lib/Target/NVPTX/NVPTXIntrinsics.td b/llvm/lib/Target/NVPTX/NVPTXIntrinsics.td index 06c629c01d9ab..5331f36ad0999 100644 --- a/llvm/lib/Target/NVPTX/NVPTXIntrinsics.td +++ b/llvm/lib/Target/NVPTX/NVPTXIntrinsics.td @@ -7622,4 +7622,51 @@ multiclass TCGEN05_RELINQ_PERMIT_INTR { defm TCGEN05_RELINQ_CG1: TCGEN05_RELINQ_PERMIT_INTR<"1", int_nvvm_tcgen05_relinq_alloc_permit_cg1>; defm TCGEN05_RELINQ_CG2: TCGEN05_RELINQ_PERMIT_INTR<"2", int_nvvm_tcgen05_relinq_alloc_permit_cg2>; +def tcgen05_wait_ld: NVPTXInst<(outs), (ins), "tcgen05.wait::ld.sync.aligned;", + [(int_nvvm_tcgen05_wait_ld)]>, + Requires<[hasTcgen05Instructions]>; + +def tcgen05_wait_st: NVPTXInst<(outs), (ins), "tcgen05.wait::st.sync.aligned;", + [(int_nvvm_tcgen05_wait_st)]>, + Requires<[hasTcgen05Instructions]>; + +multiclass TCGEN05_COMMIT_INTR { + defvar prefix = "tcgen05.commit.cta_group::" # num; + defvar suffix = ".mbarrier::arrive::one.shared::cluster"; + + defvar intr_suffix = !if(!eq(AS, "shared"), "_shared", "") # "_cg" # num; + defvar Intr = !cast("int_nvvm_tcgen05_commit" # intr_suffix); + defvar IntrMC = !cast("int_nvvm_tcgen05_commit_mc" # intr_suffix); + + def NAME : NVPTXInst<(outs), (ins rc:$mbar), + !strconcat(prefix, suffix, ".b64 [$mbar];"), + [(Intr rc:$mbar)]>, + Requires<[hasTcgen05Instructions]>; + def NAME # _MC : NVPTXInst<(outs), (ins rc:$mbar, Int16Regs:$mc), + !strconcat(prefix, suffix, ".multicast::cluster.b64 [$mbar], $mc;"), + [(IntrMC rc:$mbar, Int16Regs:$mc)]>, + Requires<[hasTcgen05Instructions]>; +} + +defm TCGEN05_COMMIT_CG1 : TCGEN05_COMMIT_INTR; +defm TCGEN05_COMMIT_CG2 : TCGEN05_COMMIT_INTR; +defm TCGEN05_COMMIT_S64_CG1 : TCGEN05_COMMIT_INTR; +defm TCGEN05_COMMIT_S64_CG2 : TCGEN05_COMMIT_INTR; +defm TCGEN05_COMMIT_S32_CG1 : TCGEN05_COMMIT_INTR; +defm TCGEN05_COMMIT_S32_CG2 : TCGEN05_COMMIT_INTR; + } // isConvergent + +let hasSideEffects = 1 in { + +def tcgen05_fence_before_thread_sync: NVPTXInst<(outs), (ins), + "tcgen05.fence::before_thread_sync;", + [(int_nvvm_tcgen05_fence_before_thread_sync)]>, + Requires<[hasTcgen05Instructions]>; + +def tcgen05_fence_after_thread_sync: NVPTXInst<(outs), (ins), + "tcgen05.fence::after_thread_sync;", + [(int_nvvm_tcgen05_fence_after_thread_sync)]>, + Requires<[hasTcgen05Instructions]>; + +} // hasSideEffects diff --git a/llvm/lib/Target/RISCV/AsmParser/RISCVAsmParser.cpp b/llvm/lib/Target/RISCV/AsmParser/RISCVAsmParser.cpp index d050194142a47..ea6ca3b8f9a2d 100644 --- a/llvm/lib/Target/RISCV/AsmParser/RISCVAsmParser.cpp +++ b/llvm/lib/Target/RISCV/AsmParser/RISCVAsmParser.cpp @@ -1116,18 +1116,21 @@ struct RISCVOperand final : public MCParsedAsmOperand { switch (Kind) { case KindTy::Immediate: - OS << *getImm(); + OS << ""; break; case KindTy::FPImmediate: + OS << ""; break; case KindTy::Register: - OS << ""; + OS << "" : ")>"); break; case KindTy::Token: OS << "'" << getToken() << "'"; break; case KindTy::SystemRegister: - OS << "'; + OS << ""; break; case KindTy::VType: OS << "getIntrinsicID() == Intrinsic::spv_resource_getpointer) { auto *ImageType = cast(II->getOperand(0)->getType()); assert(ImageType->getTargetExtName() == "spirv.Image"); - Ty = ImageType->getTypeParameter(0); + (void)ImageType; + if (II->hasOneUse()) { + auto *U = *II->users().begin(); + Ty = cast(U)->getAccessType(); + assert(Ty && "Unable to get type for resource pointer."); + } } else if (Function *CalledF = CI->getCalledFunction()) { std::string DemangledName = getOclOrSpirvBuiltinDemangledName(CalledF->getName()); diff --git a/llvm/lib/Transforms/Scalar/GVNHoist.cpp b/llvm/lib/Transforms/Scalar/GVNHoist.cpp index c6f015112e59d..1c2e1531e47d8 100644 --- a/llvm/lib/Transforms/Scalar/GVNHoist.cpp +++ b/llvm/lib/Transforms/Scalar/GVNHoist.cpp @@ -564,21 +564,20 @@ unsigned int GVNHoist::rank(const Value *V) const { } bool GVNHoist::hasEH(const BasicBlock *BB) { - auto It = BBSideEffects.find(BB); - if (It != BBSideEffects.end()) + auto [It, Inserted] = BBSideEffects.try_emplace(BB); + if (!Inserted) return It->second; if (BB->isEHPad() || BB->hasAddressTaken()) { - BBSideEffects[BB] = true; + It->second = true; return true; } if (BB->getTerminator()->mayThrow()) { - BBSideEffects[BB] = true; + It->second = true; return true; } - BBSideEffects[BB] = false; return false; } diff --git a/llvm/lib/Transforms/Vectorize/VPlanRecipes.cpp b/llvm/lib/Transforms/Vectorize/VPlanRecipes.cpp index a1b78fa244e17..bc80c5ea0b1b2 100644 --- a/llvm/lib/Transforms/Vectorize/VPlanRecipes.cpp +++ b/llvm/lib/Transforms/Vectorize/VPlanRecipes.cpp @@ -720,6 +720,23 @@ Value *VPInstruction::generate(VPTransformState &State) { InstructionCost VPInstruction::computeCost(ElementCount VF, VPCostContext &Ctx) const { + if (Instruction::isBinaryOp(getOpcode())) { + if (!getUnderlyingValue()) { + // TODO: Compute cost for VPInstructions without underlying values once + // the legacy cost model has been retired. + return 0; + } + + assert(!doesGeneratePerAllLanes() && + "Should only generate a vector value or single scalar, not scalars " + "for all lanes."); + Type *ResTy = Ctx.Types.inferScalarType(this); + if (!vputils::onlyFirstLaneUsed(this)) + ResTy = toVectorTy(ResTy, VF); + + return Ctx.TTI.getArithmeticInstrCost(getOpcode(), ResTy, Ctx.CostKind); + } + switch (getOpcode()) { case VPInstruction::AnyOf: { auto *VecTy = toVectorTy(Ctx.Types.inferScalarType(this), VF); @@ -727,7 +744,10 @@ InstructionCost VPInstruction::computeCost(ElementCount VF, Instruction::Or, cast(VecTy), std::nullopt, Ctx.CostKind); } default: - // TODO: Fill out other opcodes! + // TODO: Compute cost other VPInstructions once the legacy cost model has + // been retired. + assert(!getUnderlyingValue() && + "unexpected VPInstruction witht underlying value"); return 0; } } diff --git a/llvm/test/Analysis/ScalarEvolution/infer-via-ranges.ll b/llvm/test/Analysis/ScalarEvolution/infer-via-ranges.ll index 9aa096b952be5..9cf3e06d58a88 100644 --- a/llvm/test/Analysis/ScalarEvolution/infer-via-ranges.ll +++ b/llvm/test/Analysis/ScalarEvolution/infer-via-ranges.ll @@ -1,22 +1,31 @@ -; RUN: opt -passes=indvars -S < %s | FileCheck %s +; NOTE: Assertions have been autogenerated by utils/update_analyze_test_checks.py UTC_ARGS: --version 5 +; RUN: opt -passes='print' \ +; RUN: -scalar-evolution-classify-expressions=0 -disable-output %s 2>&1 | FileCheck %s define void @infer_via_ranges(ptr %arr, i32 %n) { -; CHECK-LABEL: @infer_via_ranges +; CHECK-LABEL: 'infer_via_ranges' +; CHECK-NEXT: Determining loop execution counts for: @infer_via_ranges +; CHECK-NEXT: Loop %loop: backedge-taken count is ((-1 + %n) umin %n) +; CHECK-NEXT: exit count for loop: %n +; CHECK-NEXT: exit count for in.bounds: (-1 + %n) +; CHECK-NEXT: Loop %loop: constant max backedge-taken count is i32 -2147483648 +; CHECK-NEXT: Loop %loop: symbolic max backedge-taken count is ((-1 + %n) umin %n) +; CHECK-NEXT: symbolic max exit count for loop: %n +; CHECK-NEXT: symbolic max exit count for in.bounds: (-1 + %n) +; CHECK-NEXT: Loop %loop: Trip multiple is 1 +; entry: %first.itr.check = icmp sgt i32 %n, 0 %start = sub i32 %n, 1 br i1 %first.itr.check, label %loop, label %exit loop: -; CHECK-LABEL: loop: %idx = phi i32 [ %start, %entry ] , [ %idx.dec, %in.bounds ] %idx.dec = sub i32 %idx, 1 %abc = icmp sge i32 %idx, 0 -; CHECK: br i1 true, label %in.bounds, label %out.of.bounds br i1 %abc, label %in.bounds, label %out.of.bounds in.bounds: -; CHECK-LABEL: in.bounds: %addr = getelementptr i32, ptr %arr, i32 %idx store i32 0, ptr %addr %next = icmp sgt i32 %idx.dec, -1 diff --git a/llvm/test/CodeGen/AArch64/zext-shuffle.ll b/llvm/test/CodeGen/AArch64/zext-shuffle.ll index 2965996ddcb02..20d2071d7fe54 100644 --- a/llvm/test/CodeGen/AArch64/zext-shuffle.ll +++ b/llvm/test/CodeGen/AArch64/zext-shuffle.ll @@ -543,3 +543,146 @@ define <8 x double> @uitofp_load_fadd(ptr %p) { ret <8 x double> %c } +define <4 x i32> @isUndefDeInterleave_b0(<8 x i16> %a, <8 x i16> %b) { +; CHECK-LABEL: isUndefDeInterleave_b0: +; CHECK: // %bb.0: +; CHECK-NEXT: movi v1.2d, #0x00ffff0000ffff +; CHECK-NEXT: uzp1 v0.4s, v0.4s, v0.4s +; CHECK-NEXT: and v0.16b, v0.16b, v1.16b +; CHECK-NEXT: ret + %2 = shufflevector <8 x i16> %a, <8 x i16> poison, <8 x i32> + %s2 = shufflevector <8 x i16> %2, <8 x i16> poison, <4 x i32> + %3 = zext <4 x i16> %s2 to <4 x i32> + ret <4 x i32> %3 +} + +define <4 x i32> @isUndefDeInterleave_b1(<8 x i16> %a) { +; CHECK-LABEL: isUndefDeInterleave_b1: +; CHECK: // %bb.0: +; CHECK-NEXT: uzp1 v0.4s, v0.4s, v0.4s +; CHECK-NEXT: ushr v0.4s, v0.4s, #16 +; CHECK-NEXT: ret + %2 = shufflevector <8 x i16> %a, <8 x i16> poison, <8 x i32> + %s2 = shufflevector <8 x i16> %2, <8 x i16> poison, <4 x i32> + %3 = zext <4 x i16> %s2 to <4 x i32> + ret <4 x i32> %3 +} + +define <4 x i32> @isUndefDeInterleave_b2(<8 x i16> %a) { +; CHECK-LABEL: isUndefDeInterleave_b2: +; CHECK: // %bb.0: +; CHECK-NEXT: movi v1.2d, #0x00ffff0000ffff +; CHECK-NEXT: uzp2 v0.4s, v0.4s, v0.4s +; CHECK-NEXT: and v0.16b, v0.16b, v1.16b +; CHECK-NEXT: ret + %2 = shufflevector <8 x i16> %a, <8 x i16> poison, <8 x i32> + %s2 = shufflevector <8 x i16> %2, <8 x i16> poison, <4 x i32> + %3 = zext <4 x i16> %s2 to <4 x i32> + ret <4 x i32> %3 +} + +define <4 x i32> @isUndefDeInterleave_b3(<8 x i16> %a) { +; CHECK-LABEL: isUndefDeInterleave_b3: +; CHECK: // %bb.0: +; CHECK-NEXT: uzp2 v0.4s, v0.4s, v0.4s +; CHECK-NEXT: ushr v0.4s, v0.4s, #16 +; CHECK-NEXT: ret + %2 = shufflevector <8 x i16> %a, <8 x i16> poison, <8 x i32> + %s2 = shufflevector <8 x i16> %2, <8 x i16> poison, <4 x i32> + %3 = zext <4 x i16> %s2 to <4 x i32> + ret <4 x i32> %3 +} + +define <4 x i32> @isUndefDeInterleave_t0(<8 x i16> %a, <8 x i16> %b) { +; CHECK-LABEL: isUndefDeInterleave_t0: +; CHECK: // %bb.0: +; CHECK-NEXT: movi v1.2d, #0x00ffff0000ffff +; CHECK-NEXT: uzp1 v0.4s, v0.4s, v0.4s +; CHECK-NEXT: and v0.16b, v0.16b, v1.16b +; CHECK-NEXT: ret + %2 = shufflevector <8 x i16> %a, <8 x i16> poison, <8 x i32> + %s2 = shufflevector <8 x i16> %2, <8 x i16> poison, <4 x i32> + %3 = zext <4 x i16> %s2 to <4 x i32> + ret <4 x i32> %3 +} + +define <4 x i32> @isUndefDeInterleave_t1(<8 x i16> %a) { +; CHECK-LABEL: isUndefDeInterleave_t1: +; CHECK: // %bb.0: +; CHECK-NEXT: uzp1 v0.4s, v0.4s, v0.4s +; CHECK-NEXT: ushr v0.4s, v0.4s, #16 +; CHECK-NEXT: ret + %2 = shufflevector <8 x i16> %a, <8 x i16> poison, <8 x i32> + %s2 = shufflevector <8 x i16> %2, <8 x i16> poison, <4 x i32> + %3 = zext <4 x i16> %s2 to <4 x i32> + ret <4 x i32> %3 +} + +define <4 x i32> @isUndefDeInterleave_t2(<8 x i16> %a) { +; CHECK-LABEL: isUndefDeInterleave_t2: +; CHECK: // %bb.0: +; CHECK-NEXT: movi v1.2d, #0x00ffff0000ffff +; CHECK-NEXT: uzp2 v0.4s, v0.4s, v0.4s +; CHECK-NEXT: and v0.16b, v0.16b, v1.16b +; CHECK-NEXT: ret + %2 = shufflevector <8 x i16> %a, <8 x i16> poison, <8 x i32> + %s2 = shufflevector <8 x i16> %2, <8 x i16> poison, <4 x i32> + %3 = zext <4 x i16> %s2 to <4 x i32> + ret <4 x i32> %3 +} + +define <4 x i32> @isUndefDeInterleave_t3(<8 x i16> %a) { +; CHECK-LABEL: isUndefDeInterleave_t3: +; CHECK: // %bb.0: +; CHECK-NEXT: uzp2 v0.4s, v0.4s, v0.4s +; CHECK-NEXT: ushr v0.4s, v0.4s, #16 +; CHECK-NEXT: ret + %2 = shufflevector <8 x i16> %a, <8 x i16> poison, <8 x i32> + %s2 = shufflevector <8 x i16> %2, <8 x i16> poison, <4 x i32> + %3 = zext <4 x i16> %s2 to <4 x i32> + ret <4 x i32> %3 +} + +define <4 x i32> @isUndefDeInterleave_b0_bad(<8 x i16> %a, <8 x i16> %b) { +; CHECK-LABEL: isUndefDeInterleave_b0_bad: +; CHECK: // %bb.0: +; CHECK-NEXT: adrp x8, .LCPI40_0 +; CHECK-NEXT: ldr q1, [x8, :lo12:.LCPI40_0] +; CHECK-NEXT: tbl v0.16b, { v0.16b }, v1.16b +; CHECK-NEXT: ushll v0.4s, v0.4h, #0 +; CHECK-NEXT: ret + %2 = shufflevector <8 x i16> %a, <8 x i16> poison, <8 x i32> + %s2 = shufflevector <8 x i16> %2, <8 x i16> poison, <4 x i32> + %3 = zext <4 x i16> %s2 to <4 x i32> + ret <4 x i32> %3 +} + +define <4 x i32> @isUndefDeInterleave_t1_bad(<8 x i16> %a) { +; CHECK-LABEL: isUndefDeInterleave_t1_bad: +; CHECK: // %bb.0: +; CHECK-NEXT: adrp x8, .LCPI41_0 +; CHECK-NEXT: ldr q1, [x8, :lo12:.LCPI41_0] +; CHECK-NEXT: tbl v0.16b, { v0.16b }, v1.16b +; CHECK-NEXT: ushll2 v0.4s, v0.8h, #0 +; CHECK-NEXT: ret + %2 = shufflevector <8 x i16> %a, <8 x i16> poison, <8 x i32> + %s2 = shufflevector <8 x i16> %2, <8 x i16> poison, <4 x i32> + %3 = zext <4 x i16> %s2 to <4 x i32> + ret <4 x i32> %3 +} + +define i16 @undeftop(<8 x i16> %0) { +; CHECK-LABEL: undeftop: +; CHECK: // %bb.0: +; CHECK-NEXT: dup v0.8h, v0.h[4] +; CHECK-NEXT: uaddl v0.4s, v0.4h, v0.4h +; CHECK-NEXT: xtn v0.4h, v0.4s +; CHECK-NEXT: umov w0, v0.h[0] +; CHECK-NEXT: ret + %2 = shufflevector <8 x i16> %0, <8 x i16> zeroinitializer, <8 x i32> + %3 = zext <8 x i16> %2 to <8 x i64> + %new0 = add <8 x i64> %3, %3 + %last = trunc <8 x i64> %new0 to <8 x i16> + %4 = extractelement <8 x i16> %last, i32 0 + ret i16 %4 +} diff --git a/llvm/test/CodeGen/AMDGPU/llvm.amdgcn.alignbyte.ll b/llvm/test/CodeGen/AMDGPU/llvm.amdgcn.alignbyte.ll index 8b16fef915a79..07421afde7622 100644 --- a/llvm/test/CodeGen/AMDGPU/llvm.amdgcn.alignbyte.ll +++ b/llvm/test/CodeGen/AMDGPU/llvm.amdgcn.alignbyte.ll @@ -1,14 +1,128 @@ +; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 5 ; RUN: llc -mtriple=amdgcn -verify-machineinstrs < %s | FileCheck -check-prefix=GCN %s +; RUN: llc -mtriple=amdgcn -mcpu=gfx1100 -mattr=+real-true16 -verify-machineinstrs < %s | FileCheck -check-prefixes=GFX11-TRUE16 %s +; RUN: llc -mtriple=amdgcn -mcpu=gfx1100 -mattr=-real-true16 -verify-machineinstrs < %s | FileCheck -check-prefixes=GFX11-FAKE16 %s declare i32 @llvm.amdgcn.alignbyte(i32, i32, i32) #0 -; GCN-LABEL: {{^}}v_alignbyte_b32: -; GCN: v_alignbyte_b32 {{[vs][0-9]+}}, {{[vs][0-9]+}}, {{[vs][0-9]+}} define amdgpu_kernel void @v_alignbyte_b32(ptr addrspace(1) %out, i32 %src1, i32 %src2, i32 %src3) #1 { +; GCN-LABEL: v_alignbyte_b32: +; GCN: ; %bb.0: +; GCN-NEXT: s_load_dwordx4 s[0:3], s[4:5], 0xb +; GCN-NEXT: s_load_dwordx2 s[4:5], s[4:5], 0x9 +; GCN-NEXT: s_mov_b32 s7, 0xf000 +; GCN-NEXT: s_mov_b32 s6, -1 +; GCN-NEXT: s_waitcnt lgkmcnt(0) +; GCN-NEXT: v_mov_b32_e32 v0, s1 +; GCN-NEXT: v_mov_b32_e32 v1, s2 +; GCN-NEXT: v_alignbyte_b32 v0, s0, v0, v1 +; GCN-NEXT: buffer_store_dword v0, off, s[4:7], 0 +; GCN-NEXT: s_endpgm +; +; GFX11-TRUE16-LABEL: v_alignbyte_b32: +; GFX11-TRUE16: ; %bb.0: +; GFX11-TRUE16-NEXT: s_clause 0x1 +; GFX11-TRUE16-NEXT: s_load_b128 s[0:3], s[4:5], 0x2c +; GFX11-TRUE16-NEXT: s_load_b64 s[4:5], s[4:5], 0x24 +; GFX11-TRUE16-NEXT: v_mov_b32_e32 v1, 0 +; GFX11-TRUE16-NEXT: s_waitcnt lgkmcnt(0) +; GFX11-TRUE16-NEXT: v_mov_b16_e32 v0.l, s2 +; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) +; GFX11-TRUE16-NEXT: v_alignbyte_b32 v0, s0, s1, v0.l +; GFX11-TRUE16-NEXT: global_store_b32 v1, v0, s[4:5] +; GFX11-TRUE16-NEXT: s_endpgm +; +; GFX11-FAKE16-LABEL: v_alignbyte_b32: +; GFX11-FAKE16: ; %bb.0: +; GFX11-FAKE16-NEXT: s_clause 0x1 +; GFX11-FAKE16-NEXT: s_load_b128 s[0:3], s[4:5], 0x2c +; GFX11-FAKE16-NEXT: s_load_b64 s[4:5], s[4:5], 0x24 +; GFX11-FAKE16-NEXT: s_waitcnt lgkmcnt(0) +; GFX11-FAKE16-NEXT: v_dual_mov_b32 v1, 0 :: v_dual_mov_b32 v0, s2 +; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) +; GFX11-FAKE16-NEXT: v_alignbyte_b32 v0, s0, s1, v0 +; GFX11-FAKE16-NEXT: global_store_b32 v1, v0, s[4:5] +; GFX11-FAKE16-NEXT: s_endpgm %val = call i32 @llvm.amdgcn.alignbyte(i32 %src1, i32 %src2, i32 %src3) #0 store i32 %val, ptr addrspace(1) %out ret void } +define amdgpu_kernel void @v_alignbyte_b32_2(ptr addrspace(1) %out, ptr addrspace(1) %src1, ptr addrspace(1) %src2, i32 %src3) #1 { +; GCN-LABEL: v_alignbyte_b32_2: +; GCN: ; %bb.0: +; GCN-NEXT: s_load_dwordx4 s[0:3], s[4:5], 0x9 +; GCN-NEXT: s_load_dwordx2 s[8:9], s[4:5], 0xd +; GCN-NEXT: s_load_dword s16, s[4:5], 0xf +; GCN-NEXT: s_mov_b32 s7, 0xf000 +; GCN-NEXT: s_mov_b32 s14, 0 +; GCN-NEXT: v_lshlrev_b32_e32 v0, 2, v0 +; GCN-NEXT: v_mov_b32_e32 v1, 0 +; GCN-NEXT: s_mov_b32 s15, s7 +; GCN-NEXT: s_mov_b64 s[10:11], s[14:15] +; GCN-NEXT: s_waitcnt lgkmcnt(0) +; GCN-NEXT: s_mov_b64 s[12:13], s[2:3] +; GCN-NEXT: buffer_load_dword v2, v[0:1], s[12:15], 0 addr64 glc +; GCN-NEXT: s_waitcnt vmcnt(0) +; GCN-NEXT: buffer_load_dword v0, v[0:1], s[8:11], 0 addr64 glc +; GCN-NEXT: s_waitcnt vmcnt(0) +; GCN-NEXT: s_mov_b32 s6, -1 +; GCN-NEXT: s_mov_b32 s4, s0 +; GCN-NEXT: s_mov_b32 s5, s1 +; GCN-NEXT: v_alignbyte_b32 v0, v2, v0, s16 +; GCN-NEXT: buffer_store_dword v0, off, s[4:7], 0 +; GCN-NEXT: s_endpgm +; +; GFX11-TRUE16-LABEL: v_alignbyte_b32_2: +; GFX11-TRUE16: ; %bb.0: +; GFX11-TRUE16-NEXT: s_clause 0x1 +; GFX11-TRUE16-NEXT: s_load_b128 s[0:3], s[4:5], 0x24 +; GFX11-TRUE16-NEXT: s_load_b64 s[6:7], s[4:5], 0x34 +; GFX11-TRUE16-NEXT: v_dual_mov_b32 v3, 0 :: v_dual_and_b32 v0, 0x3ff, v0 +; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) +; GFX11-TRUE16-NEXT: v_lshlrev_b32_e32 v0, 2, v0 +; GFX11-TRUE16-NEXT: s_waitcnt lgkmcnt(0) +; GFX11-TRUE16-NEXT: global_load_b32 v1, v0, s[2:3] glc dlc +; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(0) +; GFX11-TRUE16-NEXT: global_load_b32 v2, v0, s[6:7] glc dlc +; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(0) +; GFX11-TRUE16-NEXT: s_load_b32 s2, s[4:5], 0x3c +; GFX11-TRUE16-NEXT: s_waitcnt lgkmcnt(0) +; GFX11-TRUE16-NEXT: v_mov_b16_e32 v0.l, s2 +; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) +; GFX11-TRUE16-NEXT: v_alignbyte_b32 v0, v1, v2, v0.l +; GFX11-TRUE16-NEXT: global_store_b32 v3, v0, s[0:1] +; GFX11-TRUE16-NEXT: s_endpgm +; +; GFX11-FAKE16-LABEL: v_alignbyte_b32_2: +; GFX11-FAKE16: ; %bb.0: +; GFX11-FAKE16-NEXT: s_clause 0x1 +; GFX11-FAKE16-NEXT: s_load_b128 s[0:3], s[4:5], 0x24 +; GFX11-FAKE16-NEXT: s_load_b64 s[6:7], s[4:5], 0x34 +; GFX11-FAKE16-NEXT: v_and_b32_e32 v0, 0x3ff, v0 +; GFX11-FAKE16-NEXT: v_mov_b32_e32 v2, 0 +; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_2) +; GFX11-FAKE16-NEXT: v_lshlrev_b32_e32 v0, 2, v0 +; GFX11-FAKE16-NEXT: s_waitcnt lgkmcnt(0) +; GFX11-FAKE16-NEXT: global_load_b32 v1, v0, s[2:3] glc dlc +; GFX11-FAKE16-NEXT: s_waitcnt vmcnt(0) +; GFX11-FAKE16-NEXT: global_load_b32 v0, v0, s[6:7] glc dlc +; GFX11-FAKE16-NEXT: s_waitcnt vmcnt(0) +; GFX11-FAKE16-NEXT: s_load_b32 s2, s[4:5], 0x3c +; GFX11-FAKE16-NEXT: s_waitcnt lgkmcnt(0) +; GFX11-FAKE16-NEXT: v_alignbyte_b32 v0, v1, v0, s2 +; GFX11-FAKE16-NEXT: global_store_b32 v2, v0, s[0:1] +; GFX11-FAKE16-NEXT: s_endpgm + %tid = call i32 @llvm.amdgcn.workitem.id.x() + %a.gep = getelementptr inbounds i32, ptr addrspace(1) %src1, i32 %tid + %b.gep = getelementptr inbounds i32, ptr addrspace(1) %src2, i32 %tid + %a.val = load volatile i32, ptr addrspace(1) %a.gep + %b.val = load volatile i32, ptr addrspace(1) %b.gep + + %val = call i32 @llvm.amdgcn.alignbyte(i32 %a.val, i32 %b.val, i32 %src3) #0 + store i32 %val, ptr addrspace(1) %out + ret void +} + attributes #0 = { nounwind readnone } attributes #1 = { nounwind } diff --git a/llvm/test/CodeGen/Hexagon/rdf-copy-clobber.mir b/llvm/test/CodeGen/Hexagon/rdf-copy-clobber.mir new file mode 100644 index 0000000000000..e0676a143eefe --- /dev/null +++ b/llvm/test/CodeGen/Hexagon/rdf-copy-clobber.mir @@ -0,0 +1,143 @@ +# RUN: llc -march=hexagon -run-pass=hexagon-rdf-opt -hexagon-rdf-dump -verify-machineinstrs -o /dev/null %s 2>&1 | FileCheck %s + +# Check that RDF graph has a phi node for R28 register in bb.3 and bb.4 +# R28 is clobbered by memcpy call. The clobbering def must be present in bb.4's IDF +# This phi node should prevent $r27 from being replaced by $r28 by RDF copy propagation + +#CHECK-LABEL: Starting copy propagation on: foo + +#CHECK-LABEL: --- %bb.3 --- +#CHECK: p{{[0-9]+}}: phi [+d{{[0-9]+}} + +#CHECK-LABEL: --- %bb.4 --- +#CHECK: p{{[0-9]+}}: phi [+d{{[0-9]+}} + +#CHECK-LABEL: After Hexagon RDF optimizations +#CHECK-LABEL: bb.3: +#CHECK: renamable $r0 = A2_add renamable $r27 + +--- | + define internal fastcc void @foo() unnamed_addr { + entry: + ret void + } + + declare void @llvm.memcpy.p0.p0.i32(ptr noalias nocapture writeonly, ptr noalias nocapture readonly, i32, i1 immarg) + +--- +name: foo +alignment: 16 +exposesReturnsTwice: false +legalized: false +regBankSelected: false +selected: false +failedISel: false +tracksRegLiveness: true +hasWinCFI: false +callsEHReturn: false +callsUnwindInit: false +hasEHCatchret: false +hasEHScopes: false +hasEHFunclets: false +isOutlined: false +debugInstrRef: false +failsVerification: false +tracksDebugUserValues: true +registers: [] +liveins: + - { reg: '$d0', virtual-reg: '' } + - { reg: '$d3', virtual-reg: '' } + - { reg: '$r23', virtual-reg: '' } +frameInfo: + isFrameAddressTaken: false + isReturnAddressTaken: false + hasStackMap: false + hasPatchPoint: false + stackSize: 0 + offsetAdjustment: 0 + maxAlignment: 8 + adjustsStack: true + hasCalls: true + stackProtector: '' + functionContext: '' + maxCallFrameSize: 4294967295 + cvBytesOfCalleeSavedRegisters: 0 + hasOpaqueSPAdjustment: false + hasVAStart: false + hasMustTailInVarArgFunc: false + hasTailCall: false + isCalleeSavedInfoValid: false + localFrameSize: 0 + savePoint: '' + restorePoint: '' +fixedStack: + - { id: 0, type: default, offset: 40, size: 8, alignment: 8, stack-id: default, + isImmutable: true, isAliased: false, callee-saved-register: '', callee-saved-restored: true, + debug-info-variable: '', debug-info-expression: '', debug-info-location: '' } +stack: + - { id: 0, name: '', type: spill-slot, offset: 0, size: 8, alignment: 8, + stack-id: default, callee-saved-register: '', callee-saved-restored: true, + debug-info-variable: '', debug-info-expression: '', debug-info-location: '' } + - { id: 1, name: '', type: spill-slot, offset: 0, size: 8, alignment: 8, + stack-id: default, callee-saved-register: '', callee-saved-restored: true, + debug-info-variable: '', debug-info-expression: '', debug-info-location: '' } + - { id: 2, name: '', type: spill-slot, offset: 0, size: 8, alignment: 8, + stack-id: default, callee-saved-register: '', callee-saved-restored: true, + debug-info-variable: '', debug-info-expression: '', debug-info-location: '' } + - { id: 3, name: '', type: spill-slot, offset: 0, size: 8, alignment: 8, + stack-id: default, callee-saved-register: '', callee-saved-restored: true, + debug-info-variable: '', debug-info-expression: '', debug-info-location: '' } +entry_values: [] +callSites: [] +debugValueSubstitutions: [] +constants: [] +machineFunctionInfo: {} +body: | + bb.0.entry: + successors: %bb.1 + liveins: $d0, $d3, $r23 + + J2_jump %bb.1, implicit-def dead $pc + + bb.1: + successors: %bb.2 + liveins: $d0:0x0000000000000003, $d3:0x0000000000000003, $r23 + + renamable $r28 = L2_loadri_io %fixed-stack.0, 0 :: (load (s32) from %fixed-stack.0) + renamable $r27 = COPY killed renamable $r28 + + bb.2: + successors: %bb.3 + liveins: $d0:0x0000000000000003, $d3:0x0000000000000003, $r23, $r27 + + renamable $d10 = L2_loadrd_io %stack.0, 0 :: (load (s64) from %stack.0) + renamable $d11 = L2_loadrd_io %stack.1, 0 :: (load (s64) from %stack.1) + + bb.3: + successors: %bb.4, %bb.3 + liveins: $d0:0x0000000000000003, $d3:0x0000000000000003, $d10:0x0000000000000003, $d11:0x0000000000000002, $r23, $r27 + + ADJCALLSTACKDOWN 0, 0, implicit-def $r29, implicit-def dead $r30, implicit $r31, implicit $r30, implicit $r29 + renamable $r1 = A2_add renamable $r23, killed renamable $r0 + $r2 = COPY renamable $r22 + renamable $r0 = A2_add renamable $r27, killed renamable $r6 + J2_call &memcpy, hexagoncsr, implicit-def dead $pc, implicit-def dead $r31, implicit $r29, implicit $r0, implicit $r1, implicit $r2, implicit-def $r29, implicit-def dead $r0 + renamable $p0 = C2_cmpgtp renamable $d11, renamable $d10 + ADJCALLSTACKUP 0, 0, implicit-def dead $r29, implicit-def dead $r30, implicit-def dead $r31, implicit $r29 + J2_jumpt killed renamable $p0, %bb.3, implicit-def dead $pc + J2_jump %bb.4, implicit-def dead $pc + + bb.4: + successors: %bb.5, %bb.2 + liveins: $d10:0x0000000000000003, $d11:0x0000000000000002, $r23, $r27 + + renamable $d0 = L2_loadrd_io %stack.2, 0 :: (load (s64) from %stack.2) + renamable $d3 = L2_loadrd_io %stack.3, 0 :: (load (s64) from %stack.3) + renamable $p0 = C2_cmpgtp killed renamable $d0, killed renamable $d3 + J2_jumpt killed renamable $p0, %bb.2, implicit-def dead $pc + J2_jump %bb.5, implicit-def dead $pc + + bb.5: + PS_jmpret $r31, implicit-def dead $pc + +... diff --git a/llvm/test/CodeGen/Hexagon/rdf-phi-clobber.mir b/llvm/test/CodeGen/Hexagon/rdf-phi-clobber.mir new file mode 100644 index 0000000000000..d49cc3403d644 --- /dev/null +++ b/llvm/test/CodeGen/Hexagon/rdf-phi-clobber.mir @@ -0,0 +1,102 @@ +# RUN: llc -march=hexagon -run-pass=hexagon-rdf-opt \ +# RUN: -hexagon-rdf-dump -verify-machineinstrs -o /dev/null %s 2>&1 \ +# RUN: | FileCheck %s + +# Check that phi nodes that only have clobbering reaching defs are not created +# during graph construction. Check that there are no phi nodes for HVX registers + +#CHECK-LABEL: --- %bb.1 --- +#CHECK-NOT: p{{[0-9]+}}: phi [+d{{[0-9]+}} + +--- | + @.str.3 = private unnamed_addr constant [2 x i8] c"%d", align 8 + @.str.4 = private unnamed_addr constant [2 x i8] c"%d", align 8 + + define internal fastcc void @foo() unnamed_addr { + entry: + ret void + } + + declare dso_local noundef i32 @printf(ptr nocapture noundef readonly, ...) local_unnamed_addr + +--- +name: foo +alignment: 16 +exposesReturnsTwice: false +legalized: false +regBankSelected: false +selected: false +failedISel: false +tracksRegLiveness: true +hasWinCFI: false +callsEHReturn: false +callsUnwindInit: false +hasEHCatchret: false +hasEHScopes: false +hasEHFunclets: false +isOutlined: false +debugInstrRef: false +failsVerification: false +tracksDebugUserValues: true +registers: [] +liveins: + - { reg: '$d0', virtual-reg: '' } + - { reg: '$d3', virtual-reg: '' } + - { reg: '$r23', virtual-reg: '' } +frameInfo: + isFrameAddressTaken: false + isReturnAddressTaken: false + hasStackMap: false + hasPatchPoint: false + stackSize: 0 + offsetAdjustment: 0 + maxAlignment: 8 + adjustsStack: true + hasCalls: true + stackProtector: '' + functionContext: '' + maxCallFrameSize: 4294967295 + cvBytesOfCalleeSavedRegisters: 0 + hasOpaqueSPAdjustment: false + hasVAStart: false + hasMustTailInVarArgFunc: false + hasTailCall: false + isCalleeSavedInfoValid: false + localFrameSize: 0 + savePoint: '' + restorePoint: '' +entry_values: [] +callSites: [] +debugValueSubstitutions: [] +constants: [] +machineFunctionInfo: {} +body: | + bb.0.entry: + successors: %bb.1 + liveins: $r25, $r26, $d11 + + renamable $r16 = A2_tfrsi 0 + S2_storerd_io $r29, 0, renamable $d11 :: (store (s64) into stack) + $r0 = A2_tfrsi @.str.3 + J2_call @printf, hexagoncsr, implicit-def dead $pc, implicit-def dead $r31, implicit $r29, implicit $r0, implicit-def $r29, implicit-def dead $r0 + J2_jump %bb.1, implicit-def dead $pc + + bb.1: + successors: %bb.2, %bb.1 + liveins: $r16, $r25, $r26 + + S2_storeri_io $r29, 0, killed renamable $r25 :: (store (s32) into stack) + $r0 = A2_tfrsi @.str.4 + S2_storeri_io $r29, 8, killed renamable $r26 :: (store (s64) into stack + 8) + J2_call @printf, hexagoncsr, implicit-def dead $pc, implicit-def dead $r31, implicit $r29, implicit $r0, implicit-def $r29, implicit-def dead $r0 + renamable $p0 = C2_cmpgti renamable $r16, 4 + renamable $r16 = nsw A2_addi killed renamable $r16, 1 + J2_jumpf killed renamable $p0, %bb.2, implicit-def dead $pc + J2_jump %bb.1, implicit-def dead $pc + + bb.2: + liveins: $r16, $r25, $r26 + + PS_jmpret $r31, implicit-def dead $pc + +... diff --git a/llvm/test/CodeGen/NVPTX/tcgen05-commit.ll b/llvm/test/CodeGen/NVPTX/tcgen05-commit.ll new file mode 100644 index 0000000000000..6e0ec6bcf4465 --- /dev/null +++ b/llvm/test/CodeGen/NVPTX/tcgen05-commit.ll @@ -0,0 +1,135 @@ +; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 5 +; RUN: llc < %s -march=nvptx64 -mcpu=sm_100a -mattr=+ptx86 | FileCheck --check-prefixes=CHECK_PTX64 %s +; RUN: llc < %s -march=nvptx64 -mcpu=sm_100a -mattr=+ptx86 --nvptx-short-ptr | FileCheck --check-prefixes=CHECK_PTX64_SHARED32 %s +; RUN: %if ptxas-12.8 %{ llc < %s -march=nvptx64 -mcpu=sm_100a -mattr=+ptx86 | %ptxas-verify -arch=sm_100a %} +; RUN: %if ptxas-12.8 %{ llc < %s -march=nvptx64 -mcpu=sm_100a -mattr=+ptx86 --nvptx-short-ptr | %ptxas-verify -arch=sm_100a %} + +declare void @llvm.nvvm.tcgen05.commit.cg1(ptr %bar_addr) +declare void @llvm.nvvm.tcgen05.commit.cg2(ptr %bar_addr) +declare void @llvm.nvvm.tcgen05.commit.shared.cg1(ptr addrspace(3) %bar_addr) +declare void @llvm.nvvm.tcgen05.commit.shared.cg2(ptr addrspace(3) %bar_addr) + +; CHECK-LABEL: test_tcgen05_commit +define void @test_tcgen05_commit(ptr %bar_addr) { +; CHECK_PTX64-LABEL: test_tcgen05_commit( +; CHECK_PTX64: { +; CHECK_PTX64-NEXT: .reg .b64 %rd<2>; +; CHECK_PTX64-EMPTY: +; CHECK_PTX64-NEXT: // %bb.0: +; CHECK_PTX64-NEXT: ld.param.u64 %rd1, [test_tcgen05_commit_param_0]; +; CHECK_PTX64-NEXT: tcgen05.commit.cta_group::1.mbarrier::arrive::one.shared::cluster.b64 [%rd1]; +; CHECK_PTX64-NEXT: tcgen05.commit.cta_group::2.mbarrier::arrive::one.shared::cluster.b64 [%rd1]; +; CHECK_PTX64-NEXT: ret; +; +; CHECK_PTX64_SHARED32-LABEL: test_tcgen05_commit( +; CHECK_PTX64_SHARED32: { +; CHECK_PTX64_SHARED32-NEXT: .reg .b64 %rd<2>; +; CHECK_PTX64_SHARED32-EMPTY: +; CHECK_PTX64_SHARED32-NEXT: // %bb.0: +; CHECK_PTX64_SHARED32-NEXT: ld.param.u64 %rd1, [test_tcgen05_commit_param_0]; +; CHECK_PTX64_SHARED32-NEXT: tcgen05.commit.cta_group::1.mbarrier::arrive::one.shared::cluster.b64 [%rd1]; +; CHECK_PTX64_SHARED32-NEXT: tcgen05.commit.cta_group::2.mbarrier::arrive::one.shared::cluster.b64 [%rd1]; +; CHECK_PTX64_SHARED32-NEXT: ret; + call void @llvm.nvvm.tcgen05.commit.cg1(ptr %bar_addr) + + call void @llvm.nvvm.tcgen05.commit.cg2(ptr %bar_addr) + + ret void +} + +; CHECK-LABEL: test_tcgen05_commit_shared +define void @test_tcgen05_commit_shared(ptr addrspace(3) %bar_addr) { +; CHECK_PTX64-LABEL: test_tcgen05_commit_shared( +; CHECK_PTX64: { +; CHECK_PTX64-NEXT: .reg .b64 %rd<2>; +; CHECK_PTX64-EMPTY: +; CHECK_PTX64-NEXT: // %bb.0: +; CHECK_PTX64-NEXT: ld.param.u64 %rd1, [test_tcgen05_commit_shared_param_0]; +; CHECK_PTX64-NEXT: tcgen05.commit.cta_group::1.mbarrier::arrive::one.shared::cluster.b64 [%rd1]; +; CHECK_PTX64-NEXT: tcgen05.commit.cta_group::2.mbarrier::arrive::one.shared::cluster.b64 [%rd1]; +; CHECK_PTX64-NEXT: ret; +; +; CHECK_PTX64_SHARED32-LABEL: test_tcgen05_commit_shared( +; CHECK_PTX64_SHARED32: { +; CHECK_PTX64_SHARED32-NEXT: .reg .b32 %r<2>; +; CHECK_PTX64_SHARED32-EMPTY: +; CHECK_PTX64_SHARED32-NEXT: // %bb.0: +; CHECK_PTX64_SHARED32-NEXT: ld.param.u32 %r1, [test_tcgen05_commit_shared_param_0]; +; CHECK_PTX64_SHARED32-NEXT: tcgen05.commit.cta_group::1.mbarrier::arrive::one.shared::cluster.b64 [%r1]; +; CHECK_PTX64_SHARED32-NEXT: tcgen05.commit.cta_group::2.mbarrier::arrive::one.shared::cluster.b64 [%r1]; +; CHECK_PTX64_SHARED32-NEXT: ret; + call void @llvm.nvvm.tcgen05.commit.shared.cg1(ptr addrspace(3) %bar_addr) + + call void @llvm.nvvm.tcgen05.commit.shared.cg2(ptr addrspace(3) %bar_addr) + + ret void +} + +declare void @llvm.nvvm.tcgen05.commit.mc.cg1(ptr %bar_addr, i16 %cta_mask) +declare void @llvm.nvvm.tcgen05.commit.mc.cg2(ptr %bar_addr, i16 %cta_mask) +declare void @llvm.nvvm.tcgen05.commit.mc.shared.cg1(ptr addrspace(3) %bar_addr, i16 %cta_mask) +declare void @llvm.nvvm.tcgen05.commit.mc.shared.cg2(ptr addrspace(3) %bar_addr, i16 %cta_mask) + +; CHECK-LABEL: test_tcgen05_commit_mc +define void @test_tcgen05_commit_mc(ptr %bar_addr, i16 %cta_mask) { +; CHECK_PTX64-LABEL: test_tcgen05_commit_mc( +; CHECK_PTX64: { +; CHECK_PTX64-NEXT: .reg .b16 %rs<2>; +; CHECK_PTX64-NEXT: .reg .b64 %rd<2>; +; CHECK_PTX64-EMPTY: +; CHECK_PTX64-NEXT: // %bb.0: +; CHECK_PTX64-NEXT: ld.param.u64 %rd1, [test_tcgen05_commit_mc_param_0]; +; CHECK_PTX64-NEXT: ld.param.u16 %rs1, [test_tcgen05_commit_mc_param_1]; +; CHECK_PTX64-NEXT: tcgen05.commit.cta_group::1.mbarrier::arrive::one.shared::cluster.multicast::cluster.b64 [%rd1], %rs1; +; CHECK_PTX64-NEXT: tcgen05.commit.cta_group::2.mbarrier::arrive::one.shared::cluster.multicast::cluster.b64 [%rd1], %rs1; +; CHECK_PTX64-NEXT: ret; +; +; CHECK_PTX64_SHARED32-LABEL: test_tcgen05_commit_mc( +; CHECK_PTX64_SHARED32: { +; CHECK_PTX64_SHARED32-NEXT: .reg .b16 %rs<2>; +; CHECK_PTX64_SHARED32-NEXT: .reg .b64 %rd<2>; +; CHECK_PTX64_SHARED32-EMPTY: +; CHECK_PTX64_SHARED32-NEXT: // %bb.0: +; CHECK_PTX64_SHARED32-NEXT: ld.param.u64 %rd1, [test_tcgen05_commit_mc_param_0]; +; CHECK_PTX64_SHARED32-NEXT: ld.param.u16 %rs1, [test_tcgen05_commit_mc_param_1]; +; CHECK_PTX64_SHARED32-NEXT: tcgen05.commit.cta_group::1.mbarrier::arrive::one.shared::cluster.multicast::cluster.b64 [%rd1], %rs1; +; CHECK_PTX64_SHARED32-NEXT: tcgen05.commit.cta_group::2.mbarrier::arrive::one.shared::cluster.multicast::cluster.b64 [%rd1], %rs1; +; CHECK_PTX64_SHARED32-NEXT: ret; + call void @llvm.nvvm.tcgen05.commit.mc.cg1(ptr %bar_addr, i16 %cta_mask) + + call void @llvm.nvvm.tcgen05.commit.mc.cg2(ptr %bar_addr, i16 %cta_mask) + + ret void +} + +; CHECK-LABEL: test_tcgen05_commit_mc_shared +define void @test_tcgen05_commit_mc_shared(ptr addrspace(3) %bar_addr, i16 %cta_mask) { +; CHECK_PTX64-LABEL: test_tcgen05_commit_mc_shared( +; CHECK_PTX64: { +; CHECK_PTX64-NEXT: .reg .b16 %rs<2>; +; CHECK_PTX64-NEXT: .reg .b64 %rd<2>; +; CHECK_PTX64-EMPTY: +; CHECK_PTX64-NEXT: // %bb.0: +; CHECK_PTX64-NEXT: ld.param.u64 %rd1, [test_tcgen05_commit_mc_shared_param_0]; +; CHECK_PTX64-NEXT: ld.param.u16 %rs1, [test_tcgen05_commit_mc_shared_param_1]; +; CHECK_PTX64-NEXT: tcgen05.commit.cta_group::1.mbarrier::arrive::one.shared::cluster.multicast::cluster.b64 [%rd1], %rs1; +; CHECK_PTX64-NEXT: tcgen05.commit.cta_group::2.mbarrier::arrive::one.shared::cluster.multicast::cluster.b64 [%rd1], %rs1; +; CHECK_PTX64-NEXT: ret; +; +; CHECK_PTX64_SHARED32-LABEL: test_tcgen05_commit_mc_shared( +; CHECK_PTX64_SHARED32: { +; CHECK_PTX64_SHARED32-NEXT: .reg .b16 %rs<2>; +; CHECK_PTX64_SHARED32-NEXT: .reg .b32 %r<2>; +; CHECK_PTX64_SHARED32-EMPTY: +; CHECK_PTX64_SHARED32-NEXT: // %bb.0: +; CHECK_PTX64_SHARED32-NEXT: ld.param.u32 %r1, [test_tcgen05_commit_mc_shared_param_0]; +; CHECK_PTX64_SHARED32-NEXT: ld.param.u16 %rs1, [test_tcgen05_commit_mc_shared_param_1]; +; CHECK_PTX64_SHARED32-NEXT: tcgen05.commit.cta_group::1.mbarrier::arrive::one.shared::cluster.multicast::cluster.b64 [%r1], %rs1; +; CHECK_PTX64_SHARED32-NEXT: tcgen05.commit.cta_group::2.mbarrier::arrive::one.shared::cluster.multicast::cluster.b64 [%r1], %rs1; +; CHECK_PTX64_SHARED32-NEXT: ret; + call void @llvm.nvvm.tcgen05.commit.mc.shared.cg1(ptr addrspace(3) %bar_addr, i16 %cta_mask) + + call void @llvm.nvvm.tcgen05.commit.mc.shared.cg2(ptr addrspace(3) %bar_addr, i16 %cta_mask) + + ret void +} diff --git a/llvm/test/CodeGen/NVPTX/tcgen05-fence.ll b/llvm/test/CodeGen/NVPTX/tcgen05-fence.ll new file mode 100644 index 0000000000000..07c62671d2fbd --- /dev/null +++ b/llvm/test/CodeGen/NVPTX/tcgen05-fence.ll @@ -0,0 +1,42 @@ +; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 5 +; RUN: llc < %s -march=nvptx64 -mcpu=sm_100a -mattr=+ptx86 | FileCheck --check-prefixes=CHECK %s +; RUN: %if ptxas-12.8 %{ llc < %s -march=nvptx64 -mcpu=sm_100a -mattr=+ptx86 | %ptxas-verify -arch=sm_100a %} + +declare void @llvm.nvvm.tcgen05.fence.before.thread.sync() +declare void @llvm.nvvm.tcgen05.fence.after.thread.sync() +declare void @llvm.nvvm.tcgen05.wait.ld() +declare void @llvm.nvvm.tcgen05.wait.st() + +; CHECK-LABEL: test_tcgen05_fence +define void @test_tcgen05_fence() { +; CHECK-LABEL: test_tcgen05_fence( +; CHECK: { +; CHECK-EMPTY: +; CHECK-EMPTY: +; CHECK-NEXT: // %bb.0: +; CHECK-NEXT: tcgen05.fence::before_thread_sync; +; CHECK-NEXT: tcgen05.fence::after_thread_sync; +; CHECK-NEXT: ret; + call void @llvm.nvvm.tcgen05.fence.before.thread.sync() + + call void @llvm.nvvm.tcgen05.fence.after.thread.sync() + + ret void +} + +; CHECK-LABEL: test_tcgen05_wait +define void @test_tcgen05_wait() { +; CHECK-LABEL: test_tcgen05_wait( +; CHECK: { +; CHECK-EMPTY: +; CHECK-EMPTY: +; CHECK-NEXT: // %bb.0: +; CHECK-NEXT: tcgen05.wait::ld.sync.aligned; +; CHECK-NEXT: tcgen05.wait::st.sync.aligned; +; CHECK-NEXT: ret; + call void @llvm.nvvm.tcgen05.wait.ld() + + call void @llvm.nvvm.tcgen05.wait.st() + + ret void +} diff --git a/llvm/test/CodeGen/SPIRV/hlsl-resources/BufferLoadStore.ll b/llvm/test/CodeGen/SPIRV/hlsl-resources/BufferLoadStore.ll index 25dcc90cb61cd..d810ef9ccecc4 100644 --- a/llvm/test/CodeGen/SPIRV/hlsl-resources/BufferLoadStore.ll +++ b/llvm/test/CodeGen/SPIRV/hlsl-resources/BufferLoadStore.ll @@ -2,6 +2,7 @@ ; RUN: %if spirv-tools %{ llc -O0 -mtriple=spirv-vulkan-library %s -o - -filetype=obj | spirv-val %} ; CHECK-DAG: [[float:%[0-9]+]] = OpTypeFloat 32 +; CHECK-DAG: [[v2float:%[0-9]+]] = OpTypeVector [[float]] 2 ; CHECK-DAG: [[v4float:%[0-9]+]] = OpTypeVector [[float]] 4 ; CHECK-DAG: [[int:%[0-9]+]] = OpTypeInt 32 0 ; CHECK-DAG: [[zero:%[0-9]+]] = OpConstant [[int]] 0 @@ -10,10 +11,11 @@ ; CHECK-DAG: [[twenty_three:%[0-9]+]] = OpConstant [[int]] 23 ; CHECK-DAG: [[ImageType:%[0-9]+]] = OpTypeImage [[float]] Buffer 2 0 0 2 Rgba32f ; CHECK-DAG: [[ImagePtr:%[0-9]+]] = OpTypePointer UniformConstant [[ImageType]] -; CHECK: [[Var:%[0-9]+]] = OpVariable [[ImagePtr]] UniformConstant +; CHECK-DAG: [[Var:%[0-9]+]] = OpVariable [[ImagePtr]] UniformConstant ; Function Attrs: mustprogress nofree noinline norecurse nosync nounwind willreturn memory(readwrite, inaccessiblemem: none) -define void @main() local_unnamed_addr #0 { +; CHECK: OpFunction +define void @main_scalar() local_unnamed_addr #0 { entry: ; CHECK: [[H:%[0-9]+]] = OpLoad [[ImageType]] [[Var]] %s_h.i = tail call target("spirv.Image", float, 5, 2, 0, 0, 2, 1) @llvm.spv.resource.handlefrombinding.tspirv.Image_f32_5_2_0_0_2_0t(i32 3, i32 5, i32 1, i32 0, i1 false) @@ -50,6 +52,86 @@ bb_both: ret void } +; Function Attrs: mustprogress nofree noinline norecurse nosync nounwind willreturn memory(readwrite, inaccessiblemem: none) +; CHECK: OpFunction +define void @main_vector2() local_unnamed_addr #0 { +entry: +; CHECK: [[H:%[0-9]+]] = OpLoad [[ImageType]] [[Var]] + %s_h.i = tail call target("spirv.Image", float, 5, 2, 0, 0, 2, 1) @llvm.spv.resource.handlefrombinding.tspirv.Image_f32_5_2_0_0_2_0t(i32 3, i32 5, i32 1, i32 0, i1 false) + +; CHECK: [[R:%[0-9]+]] = OpImageRead [[v4float]] [[H]] [[one]] +; CHECK: [[E0:%[0-9]+]] = OpCompositeExtract [[float]] [[R]] 0 +; CHECK: [[E1:%[0-9]+]] = OpCompositeExtract [[float]] [[R]] 1 +; CHECK: [[V:%[0-9]+]] = OpCompositeConstruct [[v2float]] [[E0]] [[E1]] + %0 = tail call noundef nonnull align 4 dereferenceable(4) ptr @llvm.spv.resource.getpointer.p0.tspirv.Image_f32_5_2_0_0_2_0t(target("spirv.Image", float, 5, 2, 0, 0, 2, 1) %s_h.i, i32 1) + %1 = load <2 x float>, ptr %0, align 4 +; CHECK: OpBranch [[bb_store:%[0-9]+]] + br label %bb_store + +; CHECK: [[bb_store]] = OpLabel +bb_store: + +; CHECK: [[H:%[0-9]+]] = OpLoad [[ImageType]] [[Var]] +; CHECK: OpImageWrite [[H]] [[zero]] [[V]] + %2 = tail call noundef nonnull align 4 dereferenceable(4) ptr @llvm.spv.resource.getpointer.p0.tspirv.Image_f32_5_2_0_0_2_0t(target("spirv.Image", float, 5, 2, 0, 0, 2, 1) %s_h.i, i32 0) + store <2 x float> %1, ptr %2, align 4 +; CHECK: OpBranch [[bb_both:%[0-9]+]] + br label %bb_both + +; CHECK: [[bb_both]] = OpLabel +bb_both: +; CHECK: [[H:%[0-9]+]] = OpLoad [[ImageType]] [[Var]] +; CHECK: [[R:%[0-9]+]] = OpImageRead [[v4float]] [[H]] [[twenty_three]] +; CHECK: [[E0:%[0-9]+]] = OpCompositeExtract [[float]] [[R]] 0 +; CHECK: [[E1:%[0-9]+]] = OpCompositeExtract [[float]] [[R]] 1 +; CHECK: [[V:%[0-9]+]] = OpCompositeConstruct [[v2float]] [[E0]] [[E1]] + %3 = tail call noundef nonnull align 4 dereferenceable(4) ptr @llvm.spv.resource.getpointer.p0.tspirv.Image_f32_5_2_0_0_2_0t(target("spirv.Image", float, 5, 2, 0, 0, 2, 1) %s_h.i, i32 23) + %4 = load <2 x float>, ptr %3, align 4 + +; CHECK: [[H:%[0-9]+]] = OpLoad [[ImageType]] [[Var]] +; CHECK: OpImageWrite [[H]] [[twenty]] [[V]] + %5 = tail call noundef nonnull align 4 dereferenceable(4) ptr @llvm.spv.resource.getpointer.p0.tspirv.Image_f32_5_2_0_0_2_0t(target("spirv.Image", float, 5, 2, 0, 0, 2, 1) %s_h.i, i32 20) + store <2 x float> %4, ptr %5, align 4 + ret void +} + +; Function Attrs: mustprogress nofree noinline norecurse nosync nounwind willreturn memory(readwrite, inaccessiblemem: none) +; CHECK: OpFunction +define void @main_vector4() local_unnamed_addr #0 { +entry: +; CHECK: [[H:%[0-9]+]] = OpLoad [[ImageType]] [[Var]] + %s_h.i = tail call target("spirv.Image", float, 5, 2, 0, 0, 2, 1) @llvm.spv.resource.handlefrombinding.tspirv.Image_f32_5_2_0_0_2_0t(i32 3, i32 5, i32 1, i32 0, i1 false) + +; CHECK: [[R:%[0-9]+]] = OpImageRead [[v4float]] [[H]] [[one]] + %0 = tail call noundef nonnull align 4 dereferenceable(4) ptr @llvm.spv.resource.getpointer.p0.tspirv.Image_f32_5_2_0_0_2_0t(target("spirv.Image", float, 5, 2, 0, 0, 2, 1) %s_h.i, i32 1) + %1 = load <4 x float>, ptr %0, align 4 +; CHECK: OpBranch [[bb_store:%[0-9]+]] + br label %bb_store + +; CHECK: [[bb_store]] = OpLabel +bb_store: + +; CHECK: [[H:%[0-9]+]] = OpLoad [[ImageType]] [[Var]] +; CHECK: OpImageWrite [[H]] [[zero]] [[R]] + %2 = tail call noundef nonnull align 4 dereferenceable(4) ptr @llvm.spv.resource.getpointer.p0.tspirv.Image_f32_5_2_0_0_2_0t(target("spirv.Image", float, 5, 2, 0, 0, 2, 1) %s_h.i, i32 0) + store <4 x float> %1, ptr %2, align 4 +; CHECK: OpBranch [[bb_both:%[0-9]+]] + br label %bb_both + +; CHECK: [[bb_both]] = OpLabel +bb_both: +; CHECK: [[H:%[0-9]+]] = OpLoad [[ImageType]] [[Var]] +; CHECK: [[R:%[0-9]+]] = OpImageRead [[v4float]] [[H]] [[twenty_three]] + %3 = tail call noundef nonnull align 4 dereferenceable(4) ptr @llvm.spv.resource.getpointer.p0.tspirv.Image_f32_5_2_0_0_2_0t(target("spirv.Image", float, 5, 2, 0, 0, 2, 1) %s_h.i, i32 23) + %4 = load <4 x float>, ptr %3, align 4 + +; CHECK: [[H:%[0-9]+]] = OpLoad [[ImageType]] [[Var]] +; CHECK: OpImageWrite [[H]] [[twenty]] [[R]] + %5 = tail call noundef nonnull align 4 dereferenceable(4) ptr @llvm.spv.resource.getpointer.p0.tspirv.Image_f32_5_2_0_0_2_0t(target("spirv.Image", float, 5, 2, 0, 0, 2, 1) %s_h.i, i32 20) + store <4 x float> %4, ptr %5, align 4 + ret void +} + ; Function Attrs: mustprogress nocallback nofree nosync nounwind willreturn memory(none) declare ptr @llvm.spv.resource.getpointer.p0.tspirv.Image_f32_5_2_0_0_2_0t(target("spirv.Image", float, 5, 2, 0, 0, 2, 1), i32) #1 diff --git a/llvm/test/CodeGen/X86/dwo-stats.ll b/llvm/test/CodeGen/X86/dwo-stats.ll new file mode 100644 index 0000000000000..fccfd55029c8b --- /dev/null +++ b/llvm/test/CodeGen/X86/dwo-stats.ll @@ -0,0 +1,30 @@ +; REQUIRES: asserts +; RUN: llc %s -mtriple=x86_64-linux --split-dwarf-file=%t.dwo --split-dwarf-output=%t.dwo --filetype=obj -o /dev/null -stats 2>&1 | FileCheck %s --check-prefixes=SPLIT,CHECK +; RUN: llc %s -mtriple=x86_64-linux --filetype=obj -o /dev/null -stats 2>&1 | FileCheck %s --check-prefixes=NOTSPLIT,CHECK + +; NOTSPLIT-NOT: {{[0-9]+}} elf-object-writer - Total size of sections written to .dwo file +; CHECK-DAG: {{[0-9]+}} elf-object-writer - Total size of debug info sections +; SPLIT-DAG: {{[0-9]+}} elf-object-writer - Total size of sections written to .dwo file +; NOTSPLIT-NOT: {{[0-9]+}} elf-object-writer - Total size of sections written to .dwo file + +define void @banana() !dbg !8 { + ret void, !dbg !12 +} + +!llvm.dbg.cu = !{!0} +!llvm.module.flags = !{!3, !4, !5, !6} +!llvm.ident = !{!7} + +!0 = distinct !DICompileUnit(language: DW_LANG_C99, file: !1, producer: "clang version 11.0.1", isOptimized: true, runtimeVersion: 0, splitDebugFilename: "test.dwo", emissionKind: FullDebug, enums: !2, splitDebugInlining: false, nameTableKind: GNU) +!1 = !DIFile(filename: "/tmp/test.c", directory: "/tmp") +!2 = !{} +!3 = !{i32 7, !"Dwarf Version", i32 4} +!4 = !{i32 2, !"Debug Info Version", i32 3} +!5 = !{i32 1, !"wchar_size", i32 4} +!6 = !{i32 7, !"PIC Level", i32 2} +!7 = !{!"clang version 11.0.1"} +!8 = distinct !DISubprogram(name: "banana", scope: !9, file: !9, line: 1, type: !10, scopeLine: 1, flags: DIFlagPrototyped | DIFlagAllCallsDescribed, spFlags: DISPFlagDefinition | DISPFlagOptimized, unit: !0, retainedNodes: !2) +!9 = !DIFile(filename: "test.c", directory: "/tmp") +!10 = !DISubroutineType(types: !11) +!11 = !{null} +!12 = !DILocation(line: 1, column: 20, scope: !8) diff --git a/llvm/test/CodeGen/X86/swifttailcc-store-ret-address-aliasing-stack-slot.ll b/llvm/test/CodeGen/X86/swifttailcc-store-ret-address-aliasing-stack-slot.ll new file mode 100644 index 0000000000000..78e810bb67f45 --- /dev/null +++ b/llvm/test/CodeGen/X86/swifttailcc-store-ret-address-aliasing-stack-slot.ll @@ -0,0 +1,44 @@ +; RUN: llc %s -o - | FileCheck %s + +target triple = "x86_64-apple-macosx" + +declare swifttailcc void @tc_fn(ptr swiftasync, i64, ptr, i8, ptr swiftself) + +declare void @foo() + +; FIXME: Currently the return address is written to the stack before loading the +; argument from an aliasing stack slot. +define swifttailcc void @test(ptr %0, ptr swiftasync %1, i64 %2, i64 %3, ptr %4, ptr %5, i64 %6, ptr %7, i8 %8) { +; CHECK-LABEL: test: +; CHECK: ## %bb.0: ## %entry +; CHECK-NEXT: pushq %r15 +; CHECK-NEXT: .cfi_def_cfa_offset 16 +; CHECK-NEXT: pushq %rbx +; CHECK-NEXT: .cfi_def_cfa_offset 24 +; CHECK-NEXT: pushq %rax +; CHECK-NEXT: .cfi_def_cfa_offset 32 +; CHECK-NEXT: .cfi_offset %rbx, -24 +; CHECK-NEXT: .cfi_offset %r15, -16 +; CHECK-NEXT: movq %r9, %r13 +; CHECK-NEXT: movq %r8, %rbx +; CHECK-NEXT: movq {{[0-9]+}}(%rsp), %r15 +; CHECK-NEXT: callq _foo +; CHECK-NEXT: movq %r14, (%rax) +; CHECK-NEXT: movq {{[0-9]+}}(%rsp), %rcx +; CHECK-NEXT: movq %rcx, [[OFF:[0-9]+]](%rsp) +; CHECK-NEXT: movl [[OFF]](%rsp), %edx +; CHECK-NEXT: movq %rax, %r14 +; CHECK-NEXT: movq %r13, %rdi +; CHECK-NEXT: movq %r15, %rsi +; CHECK-NEXT: movq %rbx, %r13 +; CHECK-NEXT: addq $8, %rsp +; CHECK-NEXT: popq %rbx +; CHECK-NEXT: popq %r15 +; CHECK-NEXT: addq $16, %rsp +; CHECK-NEXT: jmp _tc_fn ## TAILCALL +entry: + %res = tail call ptr @foo() + store ptr %1, ptr %res, align 8 + musttail call swifttailcc void @tc_fn(ptr swiftasync %res, i64 %6, ptr %7, i8 %8, ptr swiftself %5) + ret void +} diff --git a/llvm/test/MC/AMDGPU/gfx11_asm_vop3.s b/llvm/test/MC/AMDGPU/gfx11_asm_vop3.s index e55fbfc6e18c8..857a1359b00d9 100644 --- a/llvm/test/MC/AMDGPU/gfx11_asm_vop3.s +++ b/llvm/test/MC/AMDGPU/gfx11_asm_vop3.s @@ -461,11 +461,11 @@ v_alignbyte_b32 v5, s1, v255, s3 v_alignbyte_b32 v5, s105, s105, s105 // GFX11: v_alignbyte_b32 v5, s105, s105, s105 ; encoding: [0x05,0x00,0x17,0xd6,0x69,0xd2,0xa4,0x01] -v_alignbyte_b32 v5, vcc_lo, ttmp15, v3 -// GFX11: v_alignbyte_b32 v5, vcc_lo, ttmp15, v3 ; encoding: [0x05,0x00,0x17,0xd6,0x6a,0xf6,0x0c,0x04] +v_alignbyte_b32 v5, vcc_lo, ttmp15, v3.l +// GFX11: v_alignbyte_b32 v5, vcc_lo, ttmp15, v3.l ; encoding: [0x05,0x00,0x17,0xd6,0x6a,0xf6,0x0c,0x04] -v_alignbyte_b32 v5, vcc_hi, 0xaf123456, v255 -// GFX11: v_alignbyte_b32 v5, vcc_hi, 0xaf123456, v255 ; encoding: [0x05,0x00,0x17,0xd6,0x6b,0xfe,0xfd,0x07,0x56,0x34,0x12,0xaf] +v_alignbyte_b32 v5, vcc_hi, 0xaf123456, v255.l +// GFX11: v_alignbyte_b32 v5, vcc_hi, 0xaf123456, v255.l ; encoding: [0x05,0x00,0x17,0xd6,0x6b,0xfe,0xfd,0x07,0x56,0x34,0x12,0xaf] v_alignbyte_b32 v5, ttmp15, src_scc, ttmp15 // GFX11: v_alignbyte_b32 v5, ttmp15, src_scc, ttmp15 ; encoding: [0x05,0x00,0x17,0xd6,0x7b,0xfa,0xed,0x01] @@ -494,6 +494,9 @@ v_alignbyte_b32 v5, src_scc, vcc_lo, -1 v_alignbyte_b32 v255, 0xaf123456, vcc_hi, null // GFX11: v_alignbyte_b32 v255, 0xaf123456, vcc_hi, null ; encoding: [0xff,0x00,0x17,0xd6,0xff,0xd6,0xf0,0x01,0x56,0x34,0x12,0xaf] +v_alignbyte_b32 v5, vcc_hi, 0xaf123456, v255.h +// GFX11: v_alignbyte_b32 v5, vcc_hi, 0xaf123456, v255.h op_sel:[0,0,1,0] ; encoding: [0x05,0x20,0x17,0xd6,0x6b,0xfe,0xfd,0x07,0x56,0x34,0x12,0xaf] + v_and_b16 v5.l, v1.l, v2.l // GFX11: v_and_b16 v5.l, v1.l, v2.l ; encoding: [0x05,0x00,0x62,0xd7,0x01,0x05,0x02,0x00] diff --git a/llvm/test/MC/AMDGPU/gfx11_asm_vop3_dpp16.s b/llvm/test/MC/AMDGPU/gfx11_asm_vop3_dpp16.s index 3a3f6c178cbde..1864996b26028 100644 --- a/llvm/test/MC/AMDGPU/gfx11_asm_vop3_dpp16.s +++ b/llvm/test/MC/AMDGPU/gfx11_asm_vop3_dpp16.s @@ -363,22 +363,22 @@ v_alignbit_b32_e64_dpp v5, v1, v2, -1 row_xmask:0 row_mask:0x1 bank_mask:0x3 bou v_alignbit_b32_e64_dpp v255, v255, v255, src_scc row_xmask:15 row_mask:0x3 bank_mask:0x0 bound_ctrl:0 fi:1 // GFX11: v_alignbit_b32_e64_dpp v255, v255, v255, src_scc row_xmask:15 row_mask:0x3 bank_mask:0x0 fi:1 ; encoding: [0xff,0x00,0x16,0xd6,0xfa,0xfe,0xf7,0x03,0xff,0x6f,0x05,0x30] -v_alignbyte_b32_e64_dpp v5, v1, v2, v3 quad_perm:[3,2,1,0] -// GFX11: v_alignbyte_b32_e64_dpp v5, v1, v2, v3 quad_perm:[3,2,1,0] row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x00,0x17,0xd6,0xfa,0x04,0x0e,0x04,0x01,0x1b,0x00,0xff] +v_alignbyte_b32_e64_dpp v5, v1, v2, v3.l quad_perm:[3,2,1,0] +// GFX11: v_alignbyte_b32_e64_dpp v5, v1, v2, v3.l quad_perm:[3,2,1,0] row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x00,0x17,0xd6,0xfa,0x04,0x0e,0x04,0x01,0x1b,0x00,0xff] -v_alignbyte_b32_e64_dpp v5, v1, v2, v3 quad_perm:[0,1,2,3] -// GFX11: v_alignbyte_b32_e64_dpp v5, v1, v2, v3 quad_perm:[0,1,2,3] row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x00,0x17,0xd6,0xfa,0x04,0x0e,0x04,0x01,0xe4,0x00,0xff] +v_alignbyte_b32_e64_dpp v5, v1, v2, v3.l quad_perm:[0,1,2,3] +// GFX11: v_alignbyte_b32_e64_dpp v5, v1, v2, v3.l quad_perm:[0,1,2,3] row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x00,0x17,0xd6,0xfa,0x04,0x0e,0x04,0x01,0xe4,0x00,0xff] -v_alignbyte_b32_e64_dpp v5, v1, v2, v3 row_mirror -// GFX11: v_alignbyte_b32_e64_dpp v5, v1, v2, v3 row_mirror row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x00,0x17,0xd6,0xfa,0x04,0x0e,0x04,0x01,0x40,0x01,0xff] +v_alignbyte_b32_e64_dpp v5, v1, v2, v3.l row_mirror row_mask:0xf bank_mask:0xf +// GFX11: v_alignbyte_b32_e64_dpp v5, v1, v2, v3.l row_mirror row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x00,0x17,0xd6,0xfa,0x04,0x0e,0x04,0x01,0x40,0x01,0xff] -v_alignbyte_b32_e64_dpp v5, v1, v2, v3 row_half_mirror -// GFX11: v_alignbyte_b32_e64_dpp v5, v1, v2, v3 row_half_mirror row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x00,0x17,0xd6,0xfa,0x04,0x0e,0x04,0x01,0x41,0x01,0xff] +v_alignbyte_b32_e64_dpp v5, v1, v2, v3.l row_half_mirror row_mask:0xf bank_mask:0xf +// GFX11: v_alignbyte_b32_e64_dpp v5, v1, v2, v3.l row_half_mirror row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x00,0x17,0xd6,0xfa,0x04,0x0e,0x04,0x01,0x41,0x01,0xff] -v_alignbyte_b32_e64_dpp v5, v1, v2, v255 row_shl:1 -// GFX11: v_alignbyte_b32_e64_dpp v5, v1, v2, v255 row_shl:1 row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x00,0x17,0xd6,0xfa,0x04,0xfe,0x07,0x01,0x01,0x01,0xff] +v_alignbyte_b32_e64_dpp v5, v1, v2, v255.l row_shl:1 row_mask:0xf bank_mask:0xf +// GFX11: v_alignbyte_b32_e64_dpp v5, v1, v2, v255.l row_shl:1 row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x00,0x17,0xd6,0xfa,0x04,0xfe,0x07,0x01,0x01,0x01,0xff] -v_alignbyte_b32_e64_dpp v5, v1, v2, s105 row_shl:15 +v_alignbyte_b32_e64_dpp v5, v1, v2, s105 row_shl:15 row_mask:0xf bank_mask:0xf // GFX11: v_alignbyte_b32_e64_dpp v5, v1, v2, s105 row_shl:15 row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x00,0x17,0xd6,0xfa,0x04,0xa6,0x01,0x01,0x0f,0x01,0xff] v_alignbyte_b32_e64_dpp v5, v1, v2, vcc_hi row_shr:1 @@ -387,7 +387,7 @@ v_alignbyte_b32_e64_dpp v5, v1, v2, vcc_hi row_shr:1 v_alignbyte_b32_e64_dpp v5, v1, v2, vcc_lo row_shr:15 // GFX11: v_alignbyte_b32_e64_dpp v5, v1, v2, vcc_lo row_shr:15 row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x00,0x17,0xd6,0xfa,0x04,0xaa,0x01,0x01,0x1f,0x01,0xff] -v_alignbyte_b32_e64_dpp v5, v1, v2, ttmp15 row_ror:1 +v_alignbyte_b32_e64_dpp v5, v1, v2, ttmp15 row_ror:1 row_mask:0xf bank_mask:0xf // GFX11: v_alignbyte_b32_e64_dpp v5, v1, v2, ttmp15 row_ror:1 row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x00,0x17,0xd6,0xfa,0x04,0xee,0x01,0x01,0x21,0x01,0xff] v_alignbyte_b32_e64_dpp v5, v1, v2, exec_hi row_ror:15 @@ -405,6 +405,24 @@ v_alignbyte_b32_e64_dpp v5, v1, v2, -1 row_xmask:0 row_mask:0x1 bank_mask:0x3 bo v_alignbyte_b32_e64_dpp v255, v255, v255, src_scc row_xmask:15 row_mask:0x3 bank_mask:0x0 bound_ctrl:0 fi:1 // GFX11: v_alignbyte_b32_e64_dpp v255, v255, v255, src_scc row_xmask:15 row_mask:0x3 bank_mask:0x0 fi:1 ; encoding: [0xff,0x00,0x17,0xd6,0xfa,0xfe,0xf7,0x03,0xff,0x6f,0x05,0x30] +v_alignbyte_b32_e64_dpp v5, v1, v2, v255.l row_mirror +// GFX11: v_alignbyte_b32_e64_dpp v5, v1, v2, v255.l row_mirror row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x00,0x17,0xd6,0xfa,0x04,0xfe,0x07,0x01,0x40,0x01,0xff] + +v_alignbyte_b32_e64_dpp v5, v1, v2, s3 row_half_mirror +// GFX11: v_alignbyte_b32_e64_dpp v5, v1, v2, s3 row_half_mirror row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x00,0x17,0xd6,0xfa,0x04,0x0e,0x00,0x01,0x41,0x01,0xff] + +v_alignbyte_b32_e64_dpp v5, v1, v2, s105 row_shl:1 +// GFX11: v_alignbyte_b32_e64_dpp v5, v1, v2, s105 row_shl:1 row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x00,0x17,0xd6,0xfa,0x04,0xa6,0x01,0x01,0x01,0x01,0xff] + +v_alignbyte_b32_e64_dpp v5, v1, v2, ttmp15 row_shl:15 +// GFX11: v_alignbyte_b32_e64_dpp v5, v1, v2, ttmp15 row_shl:15 row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x00,0x17,0xd6,0xfa,0x04,0xee,0x01,0x01,0x0f,0x01,0xff] + +v_alignbyte_b32_e64_dpp v5, v1, v2, m0 row_ror:1 +// GFX11: v_alignbyte_b32_e64_dpp v5, v1, v2, m0 row_ror:1 row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x00,0x17,0xd6,0xfa,0x04,0xf6,0x01,0x01,0x21,0x01,0xff] + +v_alignbyte_b32_e64_dpp v5, v1, v2, v255.h row_mirror +// GFX11: v_alignbyte_b32_e64_dpp v5, v1, v2, v255.h op_sel:[0,0,1,0] row_mirror row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x20,0x17,0xd6,0xfa,0x04,0xfe,0x07,0x01,0x40,0x01,0xff] + v_and_b16_e64_dpp v5.l, v1.l, v2.l quad_perm:[3,2,1,0] // GFX11: v_and_b16_e64_dpp v5.l, v1.l, v2.l quad_perm:[3,2,1,0] row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x00,0x62,0xd7,0xfa,0x04,0x02,0x00,0x01,0x1b,0x00,0xff] diff --git a/llvm/test/MC/AMDGPU/gfx11_asm_vop3_dpp8.s b/llvm/test/MC/AMDGPU/gfx11_asm_vop3_dpp8.s index 1f9173518d412..300e5ef22f5ae 100644 --- a/llvm/test/MC/AMDGPU/gfx11_asm_vop3_dpp8.s +++ b/llvm/test/MC/AMDGPU/gfx11_asm_vop3_dpp8.s @@ -187,11 +187,11 @@ v_alignbit_b32_e64_dpp v5, v1, v2, -1 dpp8:[7,6,5,4,3,2,1,0] fi:1 v_alignbit_b32_e64_dpp v255, v255, v255, src_scc dpp8:[0,0,0,0,0,0,0,0] fi:0 // GFX11: v_alignbit_b32_e64_dpp v255, v255, v255, src_scc dpp8:[0,0,0,0,0,0,0,0] ; encoding: [0xff,0x00,0x16,0xd6,0xe9,0xfe,0xf7,0x03,0xff,0x00,0x00,0x00] -v_alignbyte_b32_e64_dpp v5, v1, v2, v3 dpp8:[7,6,5,4,3,2,1,0] -// GFX11: v_alignbyte_b32_e64_dpp v5, v1, v2, v3 dpp8:[7,6,5,4,3,2,1,0] ; encoding: [0x05,0x00,0x17,0xd6,0xe9,0x04,0x0e,0x04,0x01,0x77,0x39,0x05] +v_alignbyte_b32_e64_dpp v5, v1, v2, v3.l dpp8:[7,6,5,4,3,2,1,0] +// GFX11: v_alignbyte_b32_e64_dpp v5, v1, v2, v3.l dpp8:[7,6,5,4,3,2,1,0] ; encoding: [0x05,0x00,0x17,0xd6,0xe9,0x04,0x0e,0x04,0x01,0x77,0x39,0x05] -v_alignbyte_b32_e64_dpp v5, v1, v2, v255 dpp8:[7,6,5,4,3,2,1,0] -// GFX11: v_alignbyte_b32_e64_dpp v5, v1, v2, v255 dpp8:[7,6,5,4,3,2,1,0] ; encoding: [0x05,0x00,0x17,0xd6,0xe9,0x04,0xfe,0x07,0x01,0x77,0x39,0x05] +v_alignbyte_b32_e64_dpp v5, v1, v2, v255.l dpp8:[7,6,5,4,3,2,1,0] +// GFX11: v_alignbyte_b32_e64_dpp v5, v1, v2, v255.l dpp8:[7,6,5,4,3,2,1,0] ; encoding: [0x05,0x00,0x17,0xd6,0xe9,0x04,0xfe,0x07,0x01,0x77,0x39,0x05] v_alignbyte_b32_e64_dpp v5, v1, v2, s105 dpp8:[7,6,5,4,3,2,1,0] // GFX11: v_alignbyte_b32_e64_dpp v5, v1, v2, s105 dpp8:[7,6,5,4,3,2,1,0] ; encoding: [0x05,0x00,0x17,0xd6,0xe9,0x04,0xa6,0x01,0x01,0x77,0x39,0x05] @@ -220,6 +220,15 @@ v_alignbyte_b32_e64_dpp v5, v1, v2, -1 dpp8:[7,6,5,4,3,2,1,0] fi:1 v_alignbyte_b32_e64_dpp v255, v255, v255, src_scc dpp8:[0,0,0,0,0,0,0,0] fi:0 // GFX11: v_alignbyte_b32_e64_dpp v255, v255, v255, src_scc dpp8:[0,0,0,0,0,0,0,0] ; encoding: [0xff,0x00,0x17,0xd6,0xe9,0xfe,0xf7,0x03,0xff,0x00,0x00,0x00] +v_alignbyte_b32_e64_dpp v5, v1, v2, s3 dpp8:[7,6,5,4,3,2,1,0] +// GFX11: v_alignbyte_b32_e64_dpp v5, v1, v2, s3 dpp8:[7,6,5,4,3,2,1,0] ; encoding: [0x05,0x00,0x17,0xd6,0xe9,0x04,0x0e,0x00,0x01,0x77,0x39,0x05] + +v_alignbyte_b32_e64_dpp v5, v1, v2, m0 dpp8:[7,6,5,4,3,2,1,0] +// GFX11: v_alignbyte_b32_e64_dpp v5, v1, v2, m0 dpp8:[7,6,5,4,3,2,1,0] ; encoding: [0x05,0x00,0x17,0xd6,0xe9,0x04,0xf6,0x01,0x01,0x77,0x39,0x05] + +v_alignbyte_b32_e64_dpp v5, v1, v2, v255.h dpp8:[7,6,5,4,3,2,1,0] +// GFX11: v_alignbyte_b32_e64_dpp v5, v1, v2, v255.h op_sel:[0,0,1,0] dpp8:[7,6,5,4,3,2,1,0] ; encoding: [0x05,0x20,0x17,0xd6,0xe9,0x04,0xfe,0x07,0x01,0x77,0x39,0x05] + v_and_b16_e64_dpp v5.l, v1.l, v2.l dpp8:[7,6,5,4,3,2,1,0] // GFX11: v_and_b16_e64_dpp v5.l, v1.l, v2.l dpp8:[7,6,5,4,3,2,1,0] ; encoding: [0x05,0x00,0x62,0xd7,0xe9,0x04,0x02,0x00,0x01,0x77,0x39,0x05] diff --git a/llvm/test/MC/AMDGPU/gfx12_asm_vop3.s b/llvm/test/MC/AMDGPU/gfx12_asm_vop3.s index 016ad18637bbb..72d201e060df7 100644 --- a/llvm/test/MC/AMDGPU/gfx12_asm_vop3.s +++ b/llvm/test/MC/AMDGPU/gfx12_asm_vop3.s @@ -452,6 +452,9 @@ v_alignbyte_b32 v5, src_scc, vcc_lo, -1 v_alignbyte_b32 v255, 0xaf123456, vcc_hi, null // GFX12: v_alignbyte_b32 v255, 0xaf123456, vcc_hi, null ; encoding: [0xff,0x00,0x17,0xd6,0xff,0xd6,0xf0,0x01,0x56,0x34,0x12,0xaf] +v_alignbyte_b32 v5, vcc_hi, 0xaf123456, v255.h +// GFX12: v_alignbyte_b32 v5, vcc_hi, 0xaf123456, v255.h op_sel:[0,0,1,0] ; encoding: [0x05,0x20,0x17,0xd6,0x6b,0xfe,0xfd,0x07,0x56,0x34,0x12,0xaf] + v_and_b16 v5.l, v1.l, v2.l // GFX12: v_and_b16 v5.l, v1.l, v2.l ; encoding: [0x05,0x00,0x62,0xd7,0x01,0x05,0x02,0x00] diff --git a/llvm/test/MC/AMDGPU/gfx12_asm_vop3_dpp16.s b/llvm/test/MC/AMDGPU/gfx12_asm_vop3_dpp16.s index d21c64f25f6a3..08be9225890c2 100644 --- a/llvm/test/MC/AMDGPU/gfx12_asm_vop3_dpp16.s +++ b/llvm/test/MC/AMDGPU/gfx12_asm_vop3_dpp16.s @@ -485,6 +485,9 @@ v_alignbyte_b32_e64_dpp v5, v1, v2, -1 row_xmask:0 row_mask:0x1 bank_mask:0x3 bo v_alignbyte_b32_e64_dpp v255, v255, v255, src_scc row_xmask:15 row_mask:0x3 bank_mask:0x0 bound_ctrl:0 fi:1 // GFX12: v_alignbyte_b32_e64_dpp v255, v255, v255, src_scc row_xmask:15 row_mask:0x3 bank_mask:0x0 fi:1 ; encoding: [0xff,0x00,0x17,0xd6,0xfa,0xfe,0xf7,0x03,0xff,0x6f,0x05,0x30] +v_alignbyte_b32_e64_dpp v5, v1, v2, v255.h row_mirror +// GFX12: v_alignbyte_b32_e64_dpp v5, v1, v2, v255.h op_sel:[0,0,1,0] row_mirror row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x20,0x17,0xd6,0xfa,0x04,0xfe,0x07,0x01,0x40,0x01,0xff] + v_and_b16_e64_dpp v5.l, v1.l, v2.l quad_perm:[3,2,1,0] // GFX12: v_and_b16_e64_dpp v5.l, v1.l, v2.l quad_perm:[3,2,1,0] row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x00,0x62,0xd7,0xfa,0x04,0x02,0x00,0x01,0x1b,0x00,0xff] diff --git a/llvm/test/MC/AMDGPU/gfx12_asm_vop3_dpp8.s b/llvm/test/MC/AMDGPU/gfx12_asm_vop3_dpp8.s index cae8423b3a655..0523b0fe8c9a4 100644 --- a/llvm/test/MC/AMDGPU/gfx12_asm_vop3_dpp8.s +++ b/llvm/test/MC/AMDGPU/gfx12_asm_vop3_dpp8.s @@ -288,6 +288,9 @@ v_alignbyte_b32_e64_dpp v5, v1, v2, -1 dpp8:[7,6,5,4,3,2,1,0] fi:1 v_alignbyte_b32_e64_dpp v255, v255, v255, src_scc dpp8:[0,0,0,0,0,0,0,0] fi:0 // GFX12: v_alignbyte_b32_e64_dpp v255, v255, v255, src_scc dpp8:[0,0,0,0,0,0,0,0] ; encoding: [0xff,0x00,0x17,0xd6,0xe9,0xfe,0xf7,0x03,0xff,0x00,0x00,0x00] +v_alignbyte_b32_e64_dpp v5, v1, v2, v255.h dpp8:[7,6,5,4,3,2,1,0] +// GFX12: v_alignbyte_b32_e64_dpp v5, v1, v2, v255.h op_sel:[0,0,1,0] dpp8:[7,6,5,4,3,2,1,0] ; encoding: [0x05,0x20,0x17,0xd6,0xe9,0x04,0xfe,0x07,0x01,0x77,0x39,0x05] + v_and_b16_e64_dpp v5.l, v1.l, v2.l dpp8:[7,6,5,4,3,2,1,0] // GFX12: v_and_b16_e64_dpp v5.l, v1.l, v2.l dpp8:[7,6,5,4,3,2,1,0] ; encoding: [0x05,0x00,0x62,0xd7,0xe9,0x04,0x02,0x00,0x01,0x77,0x39,0x05] diff --git a/llvm/test/MC/ARM/vcvt-fixed-point-errors.s b/llvm/test/MC/ARM/vcvt-fixed-point-errors.s new file mode 100644 index 0000000000000..90e9da054a908 --- /dev/null +++ b/llvm/test/MC/ARM/vcvt-fixed-point-errors.s @@ -0,0 +1,51 @@ +// RUN: not llvm-mc -triple=armv8a-none-eabi -mattr=+fullfp16 < %s 2>&1 | FileCheck %s + + vcvt.u16.f16 s0, s1, #1 +// CHECK: [[@LINE-1]]{{.*}}error: source and destination registers must be the same + vcvt.s16.f16 s0, s1, #1 +// CHECK: [[@LINE-1]]{{.*}}error: source and destination registers must be the same + vcvt.u32.f16 s0, s1, #1 +// CHECK: [[@LINE-1]]{{.*}}error: source and destination registers must be the same + vcvt.s32.f16 s0, s1, #1 +// CHECK: [[@LINE-1]]{{.*}}error: source and destination registers must be the same + vcvt.u16.f32 s0, s1, #1 +// CHECK: [[@LINE-1]]{{.*}}error: source and destination registers must be the same + vcvt.s16.f32 s0, s1, #1 +// CHECK: [[@LINE-1]]{{.*}}error: source and destination registers must be the same + vcvt.u32.f32 s0, s1, #1 +// CHECK: [[@LINE-1]]{{.*}}error: source and destination registers must be the same + vcvt.s32.f32 s0, s1, #1 +// CHECK: [[@LINE-1]]{{.*}}error: source and destination registers must be the same + vcvt.u16.f64 d0, d1, #1 +// CHECK: [[@LINE-1]]{{.*}}error: source and destination registers must be the same + vcvt.s16.f64 d0, d1, #1 +// CHECK: [[@LINE-1]]{{.*}}error: source and destination registers must be the same + vcvt.u32.f64 d0, d1, #1 +// CHECK: [[@LINE-1]]{{.*}}error: source and destination registers must be the same + vcvt.s32.f64 d0, d1, #1 +// CHECK: [[@LINE-1]]{{.*}}error: source and destination registers must be the same + vcvt.f16.u16 s0, s1, #1 +// CHECK: [[@LINE-1]]{{.*}}error: source and destination registers must be the same + vcvt.f16.s16 s0, s1, #1 +// CHECK: [[@LINE-1]]{{.*}}error: source and destination registers must be the same + vcvt.f16.u32 s0, s1, #1 +// CHECK: [[@LINE-1]]{{.*}}error: source and destination registers must be the same + vcvt.f16.s32 s0, s1, #1 +// CHECK: [[@LINE-1]]{{.*}}error: source and destination registers must be the same + vcvt.f32.u16 s0, s1, #1 +// CHECK: [[@LINE-1]]{{.*}}error: source and destination registers must be the same + vcvt.f32.s16 s0, s1, #1 +// CHECK: [[@LINE-1]]{{.*}}error: source and destination registers must be the same + vcvt.f32.u32 s0, s1, #1 +// CHECK: [[@LINE-1]]{{.*}}error: source and destination registers must be the same + vcvt.f32.s32 s0, s1, #1 +// CHECK: [[@LINE-1]]{{.*}}error: source and destination registers must be the same + vcvt.f64.u16 d0, d1, #1 +// CHECK: [[@LINE-1]]{{.*}}error: source and destination registers must be the same + vcvt.f64.s16 d0, d1, #1 +// CHECK: [[@LINE-1]]{{.*}}error: source and destination registers must be the same + vcvt.f64.u32 d0, d1, #1 +// CHECK: [[@LINE-1]]{{.*}}error: source and destination registers must be the same + vcvt.f64.s32 d0, d1, #1 +// CHECK: [[@LINE-1]]{{.*}}error: source and destination registers must be the same + diff --git a/llvm/test/MC/Disassembler/AMDGPU/gfx11_dasm_vop3.txt b/llvm/test/MC/Disassembler/AMDGPU/gfx11_dasm_vop3.txt index fc0ae333b1745..b74128b21f563 100644 --- a/llvm/test/MC/Disassembler/AMDGPU/gfx11_dasm_vop3.txt +++ b/llvm/test/MC/Disassembler/AMDGPU/gfx11_dasm_vop3.txt @@ -508,10 +508,16 @@ # GFX11: v_alignbyte_b32 v5, s105, s105, s105 ; encoding: [0x05,0x00,0x17,0xd6,0x69,0xd2,0xa4,0x01] 0x05,0x00,0x17,0xd6,0x6a,0xf6,0x0c,0x04 -# GFX11: v_alignbyte_b32 v5, vcc_lo, ttmp15, v3 ; encoding: [0x05,0x00,0x17,0xd6,0x6a,0xf6,0x0c,0x04] +# W32-REAL16: v_alignbyte_b32 v5, vcc_lo, ttmp15, v3.l ; encoding: [0x05,0x00,0x17,0xd6,0x6a,0xf6,0x0c,0x04] +# W32-FAKE16: v_alignbyte_b32 v5, vcc_lo, ttmp15, v3 ; encoding: [0x05,0x00,0x17,0xd6,0x6a,0xf6,0x0c,0x04] +# W64-REAL16: v_alignbyte_b32 v5, vcc_lo, ttmp15, v3.l ; encoding: [0x05,0x00,0x17,0xd6,0x6a,0xf6,0x0c,0x04] +# W64-FAKE16: v_alignbyte_b32 v5, vcc_lo, ttmp15, v3 ; encoding: [0x05,0x00,0x17,0xd6,0x6a,0xf6,0x0c,0x04] 0x05,0x00,0x17,0xd6,0x6b,0xfe,0xfd,0x07,0x56,0x34,0x12,0xaf -# GFX11: v_alignbyte_b32 v5, vcc_hi, 0xaf123456, v255 ; encoding: [0x05,0x00,0x17,0xd6,0x6b,0xfe,0xfd,0x07,0x56,0x34,0x12,0xaf] +# W32-REAL16: v_alignbyte_b32 v5, vcc_hi, 0xaf123456, v255.l ; encoding: [0x05,0x00,0x17,0xd6,0x6b,0xfe,0xfd,0x07,0x56,0x34,0x12,0xaf] +# W32-FAKE16: v_alignbyte_b32 v5, vcc_hi, 0xaf123456, v255 ; encoding: [0x05,0x00,0x17,0xd6,0x6b,0xfe,0xfd,0x07,0x56,0x34,0x12,0xaf] +# W64-REAL16: v_alignbyte_b32 v5, vcc_hi, 0xaf123456, v255.l ; encoding: [0x05,0x00,0x17,0xd6,0x6b,0xfe,0xfd,0x07,0x56,0x34,0x12,0xaf] +# W64-FAKE16: v_alignbyte_b32 v5, vcc_hi, 0xaf123456, v255 ; encoding: [0x05,0x00,0x17,0xd6,0x6b,0xfe,0xfd,0x07,0x56,0x34,0x12,0xaf] 0x05,0x00,0x17,0xd6,0x7b,0xfa,0xed,0x01 # GFX11: v_alignbyte_b32 v5, ttmp15, src_scc, ttmp15 ; encoding: [0x05,0x00,0x17,0xd6,0x7b,0xfa,0xed,0x01] @@ -540,6 +546,12 @@ 0xff,0x00,0x17,0xd6,0xff,0xd6,0xf0,0x01,0x56,0x34,0x12,0xaf # GFX11: v_alignbyte_b32 v255, 0xaf123456, vcc_hi, null ; encoding: [0xff,0x00,0x17,0xd6,0xff,0xd6,0xf0,0x01,0x56,0x34,0x12,0xaf] +0x05,0x20,0x17,0xd6,0x6b,0xfe,0xfd,0x07,0x56,0x34,0x12,0xaf +# W32-REAL16: v_alignbyte_b32 v5, vcc_hi, 0xaf123456, v255.h op_sel:[0,0,1,0] ; encoding: [0x05,0x20,0x17,0xd6,0x6b,0xfe,0xfd,0x07,0x56,0x34,0x12,0xaf] +# W32-FAKE16: v_alignbyte_b32 v5, vcc_hi, 0xaf123456, v255 op_sel:[0,0,1,0] ; encoding: [0x05,0x20,0x17,0xd6,0x6b,0xfe,0xfd,0x07,0x56,0x34,0x12,0xaf] +# W64-REAL16: v_alignbyte_b32 v5, vcc_hi, 0xaf123456, v255.h op_sel:[0,0,1,0] ; encoding: [0x05,0x20,0x17,0xd6,0x6b,0xfe,0xfd,0x07,0x56,0x34,0x12,0xaf] +# W64-FAKE16: v_alignbyte_b32 v5, vcc_hi, 0xaf123456, v255 op_sel:[0,0,1,0] ; encoding: [0x05,0x20,0x17,0xd6,0x6b,0xfe,0xfd,0x07,0x56,0x34,0x12,0xaf] + 0x05,0x00,0x62,0xd7,0x01,0x05,0x02,0x00 # W32-REAL16: v_and_b16 v5.l, v1.l, v2.l ; encoding: [0x05,0x00,0x62,0xd7,0x01,0x05,0x02,0x00] # W32-FAKE16: v_and_b16 v5, v1, v2 ; encoding: [0x05,0x00,0x62,0xd7,0x01,0x05,0x02,0x00] diff --git a/llvm/test/MC/Disassembler/AMDGPU/gfx11_dasm_vop3_dpp16.txt b/llvm/test/MC/Disassembler/AMDGPU/gfx11_dasm_vop3_dpp16.txt index 8a9ab3da5e4e0..8e7122b902326 100644 --- a/llvm/test/MC/Disassembler/AMDGPU/gfx11_dasm_vop3_dpp16.txt +++ b/llvm/test/MC/Disassembler/AMDGPU/gfx11_dasm_vop3_dpp16.txt @@ -228,19 +228,34 @@ # GFX11: v_alignbit_b32_e64_dpp v255, v255, v255, src_scc row_xmask:15 row_mask:0x3 bank_mask:0x0 bound_ctrl:1 fi:1 ; encoding: [0xff,0x00,0x16,0xd6,0xfa,0xfe,0xf7,0x03,0xff,0x6f,0x0d,0x30] 0x05,0x00,0x17,0xd6,0xfa,0x04,0x0e,0x04,0x01,0x1b,0x00,0xff -# GFX11: v_alignbyte_b32_e64_dpp v5, v1, v2, v3 quad_perm:[3,2,1,0] row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x00,0x17,0xd6,0xfa,0x04,0x0e,0x04,0x01,0x1b,0x00,0xff] +# W32-REAL16: v_alignbyte_b32_e64_dpp v5, v1, v2, v3.l quad_perm:[3,2,1,0] row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x00,0x17,0xd6,0xfa,0x04,0x0e,0x04,0x01,0x1b,0x00,0xff] +# W32-FAKE16: v_alignbyte_b32_e64_dpp v5, v1, v2, v3 quad_perm:[3,2,1,0] row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x00,0x17,0xd6,0xfa,0x04,0x0e,0x04,0x01,0x1b,0x00,0xff] +# W64-REAL16: v_alignbyte_b32_e64_dpp v5, v1, v2, v3.l quad_perm:[3,2,1,0] row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x00,0x17,0xd6,0xfa,0x04,0x0e,0x04,0x01,0x1b,0x00,0xff] +# W64-FAKE16: v_alignbyte_b32_e64_dpp v5, v1, v2, v3 quad_perm:[3,2,1,0] row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x00,0x17,0xd6,0xfa,0x04,0x0e,0x04,0x01,0x1b,0x00,0xff] 0x05,0x00,0x17,0xd6,0xfa,0x04,0x0e,0x04,0x01,0xe4,0x00,0xff -# GFX11: v_alignbyte_b32_e64_dpp v5, v1, v2, v3 quad_perm:[0,1,2,3] row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x00,0x17,0xd6,0xfa,0x04,0x0e,0x04,0x01,0xe4,0x00,0xff] +# W32-REAL16: v_alignbyte_b32_e64_dpp v5, v1, v2, v3.l quad_perm:[0,1,2,3] row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x00,0x17,0xd6,0xfa,0x04,0x0e,0x04,0x01,0xe4,0x00,0xff] +# W32-FAKE16: v_alignbyte_b32_e64_dpp v5, v1, v2, v3 quad_perm:[0,1,2,3] row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x00,0x17,0xd6,0xfa,0x04,0x0e,0x04,0x01,0xe4,0x00,0xff] +# W64-REAL16: v_alignbyte_b32_e64_dpp v5, v1, v2, v3.l quad_perm:[0,1,2,3] row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x00,0x17,0xd6,0xfa,0x04,0x0e,0x04,0x01,0xe4,0x00,0xff] +# W64-FAKE16: v_alignbyte_b32_e64_dpp v5, v1, v2, v3 quad_perm:[0,1,2,3] row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x00,0x17,0xd6,0xfa,0x04,0x0e,0x04,0x01,0xe4,0x00,0xff] 0x05,0x00,0x17,0xd6,0xfa,0x04,0x0e,0x04,0x01,0x40,0x01,0xff -# GFX11: v_alignbyte_b32_e64_dpp v5, v1, v2, v3 row_mirror row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x00,0x17,0xd6,0xfa,0x04,0x0e,0x04,0x01,0x40,0x01,0xff] +# W32-REAL16: v_alignbyte_b32_e64_dpp v5, v1, v2, v3.l row_mirror row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x00,0x17,0xd6,0xfa,0x04,0x0e,0x04,0x01,0x40,0x01,0xff] +# W32-FAKE16: v_alignbyte_b32_e64_dpp v5, v1, v2, v3 row_mirror row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x00,0x17,0xd6,0xfa,0x04,0x0e,0x04,0x01,0x40,0x01,0xff] +# W64-REAL16: v_alignbyte_b32_e64_dpp v5, v1, v2, v3.l row_mirror row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x00,0x17,0xd6,0xfa,0x04,0x0e,0x04,0x01,0x40,0x01,0xff] +# W64-FAKE16: v_alignbyte_b32_e64_dpp v5, v1, v2, v3 row_mirror row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x00,0x17,0xd6,0xfa,0x04,0x0e,0x04,0x01,0x40,0x01,0xff] 0x05,0x00,0x17,0xd6,0xfa,0x04,0x0e,0x04,0x01,0x41,0x01,0xff -# GFX11: v_alignbyte_b32_e64_dpp v5, v1, v2, v3 row_half_mirror row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x00,0x17,0xd6,0xfa,0x04,0x0e,0x04,0x01,0x41,0x01,0xff] +# W32-REAL16: v_alignbyte_b32_e64_dpp v5, v1, v2, v3.l row_half_mirror row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x00,0x17,0xd6,0xfa,0x04,0x0e,0x04,0x01,0x41,0x01,0xff] +# W32-FAKE16: v_alignbyte_b32_e64_dpp v5, v1, v2, v3 row_half_mirror row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x00,0x17,0xd6,0xfa,0x04,0x0e,0x04,0x01,0x41,0x01,0xff] +# W64-REAL16: v_alignbyte_b32_e64_dpp v5, v1, v2, v3.l row_half_mirror row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x00,0x17,0xd6,0xfa,0x04,0x0e,0x04,0x01,0x41,0x01,0xff] +# W64-FAKE16: v_alignbyte_b32_e64_dpp v5, v1, v2, v3 row_half_mirror row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x00,0x17,0xd6,0xfa,0x04,0x0e,0x04,0x01,0x41,0x01,0xff] 0x05,0x00,0x17,0xd6,0xfa,0x04,0xfe,0x07,0x01,0x01,0x01,0xff -# GFX11: v_alignbyte_b32_e64_dpp v5, v1, v2, v255 row_shl:1 row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x00,0x17,0xd6,0xfa,0x04,0xfe,0x07,0x01,0x01,0x01,0xff] +# W32-REAL16: v_alignbyte_b32_e64_dpp v5, v1, v2, v255.l row_shl:1 row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x00,0x17,0xd6,0xfa,0x04,0xfe,0x07,0x01,0x01,0x01,0xff] +# W32-FAKE16: v_alignbyte_b32_e64_dpp v5, v1, v2, v255 row_shl:1 row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x00,0x17,0xd6,0xfa,0x04,0xfe,0x07,0x01,0x01,0x01,0xff] +# W64-REAL16: v_alignbyte_b32_e64_dpp v5, v1, v2, v255.l row_shl:1 row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x00,0x17,0xd6,0xfa,0x04,0xfe,0x07,0x01,0x01,0x01,0xff] +# W64-FAKE16: v_alignbyte_b32_e64_dpp v5, v1, v2, v255 row_shl:1 row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x00,0x17,0xd6,0xfa,0x04,0xfe,0x07,0x01,0x01,0x01,0xff] 0x05,0x00,0x17,0xd6,0xfa,0x04,0xa6,0x01,0x01,0x0f,0x01,0xff # GFX11: v_alignbyte_b32_e64_dpp v5, v1, v2, s105 row_shl:15 row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x00,0x17,0xd6,0xfa,0x04,0xa6,0x01,0x01,0x0f,0x01,0xff] @@ -269,6 +284,12 @@ 0xff,0x00,0x17,0xd6,0xfa,0xfe,0xf7,0x03,0xff,0x6f,0x0d,0x30 # GFX11: v_alignbyte_b32_e64_dpp v255, v255, v255, src_scc row_xmask:15 row_mask:0x3 bank_mask:0x0 bound_ctrl:1 fi:1 ; encoding: [0xff,0x00,0x17,0xd6,0xfa,0xfe,0xf7,0x03,0xff,0x6f,0x0d,0x30] +0x05,0x20,0x17,0xd6,0xfa,0x04,0xfe,0x07,0x01,0x40,0x01,0xff +# W32-REAL16: v_alignbyte_b32_e64_dpp v5, v1, v2, v255.h op_sel:[0,0,1,0] row_mirror row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x20,0x17,0xd6,0xfa,0x04,0xfe,0x07,0x01,0x40,0x01,0xff] +# W32-FAKE16: v_alignbyte_b32_e64_dpp v5, v1, v2, v255 op_sel:[0,0,1,0] row_mirror row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x20,0x17,0xd6,0xfa,0x04,0xfe,0x07,0x01,0x40,0x01,0xff] +# W64-REAL16: v_alignbyte_b32_e64_dpp v5, v1, v2, v255.h op_sel:[0,0,1,0] row_mirror row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x20,0x17,0xd6,0xfa,0x04,0xfe,0x07,0x01,0x40,0x01,0xff] +# W64-FAKE16: v_alignbyte_b32_e64_dpp v5, v1, v2, v255 op_sel:[0,0,1,0] row_mirror row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x20,0x17,0xd6,0xfa,0x04,0xfe,0x07,0x01,0x40,0x01,0xff] + 0x05,0x00,0x62,0xd7,0xfa,0x04,0x02,0x00,0x01,0x1b,0x00,0xff # W32-REAL16: v_and_b16_e64_dpp v5.l, v1.l, v2.l quad_perm:[3,2,1,0] row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x00,0x62,0xd7,0xfa,0x04,0x02,0x00,0x01,0x1b,0x00,0xff] # W32-FAKE16: v_and_b16_e64_dpp v5, v1, v2 quad_perm:[3,2,1,0] row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x00,0x62,0xd7,0xfa,0x04,0x02,0x00,0x01,0x1b,0x00,0xff] diff --git a/llvm/test/MC/Disassembler/AMDGPU/gfx11_dasm_vop3_dpp8.txt b/llvm/test/MC/Disassembler/AMDGPU/gfx11_dasm_vop3_dpp8.txt index 89f7c606152fb..f67eb32385407 100644 --- a/llvm/test/MC/Disassembler/AMDGPU/gfx11_dasm_vop3_dpp8.txt +++ b/llvm/test/MC/Disassembler/AMDGPU/gfx11_dasm_vop3_dpp8.txt @@ -135,10 +135,16 @@ # GFX11: v_alignbit_b32_e64_dpp v255, v255, v255, src_scc dpp8:[0,0,0,0,0,0,0,0] fi:1 ; encoding: [0xff,0x00,0x16,0xd6,0xea,0xfe,0xf7,0x03,0xff,0x00,0x00,0x00] 0x05,0x00,0x17,0xd6,0xe9,0x04,0x0e,0x04,0x01,0x77,0x39,0x05 -# GFX11: v_alignbyte_b32_e64_dpp v5, v1, v2, v3 dpp8:[7,6,5,4,3,2,1,0] ; encoding: [0x05,0x00,0x17,0xd6,0xe9,0x04,0x0e,0x04,0x01,0x77,0x39,0x05] +# W32-REAL16: v_alignbyte_b32_e64_dpp v5, v1, v2, v3.l dpp8:[7,6,5,4,3,2,1,0] ; encoding: [0x05,0x00,0x17,0xd6,0xe9,0x04,0x0e,0x04,0x01,0x77,0x39,0x05] +# W32-FAKE16: v_alignbyte_b32_e64_dpp v5, v1, v2, v3 dpp8:[7,6,5,4,3,2,1,0] ; encoding: [0x05,0x00,0x17,0xd6,0xe9,0x04,0x0e,0x04,0x01,0x77,0x39,0x05] +# W64-REAL16: v_alignbyte_b32_e64_dpp v5, v1, v2, v3.l dpp8:[7,6,5,4,3,2,1,0] ; encoding: [0x05,0x00,0x17,0xd6,0xe9,0x04,0x0e,0x04,0x01,0x77,0x39,0x05] +# W64-FAKE16: v_alignbyte_b32_e64_dpp v5, v1, v2, v3 dpp8:[7,6,5,4,3,2,1,0] ; encoding: [0x05,0x00,0x17,0xd6,0xe9,0x04,0x0e,0x04,0x01,0x77,0x39,0x05] 0x05,0x00,0x17,0xd6,0xe9,0x04,0xfe,0x07,0x01,0x77,0x39,0x05 -# GFX11: v_alignbyte_b32_e64_dpp v5, v1, v2, v255 dpp8:[7,6,5,4,3,2,1,0] ; encoding: [0x05,0x00,0x17,0xd6,0xe9,0x04,0xfe,0x07,0x01,0x77,0x39,0x05] +# W32-REAL16: v_alignbyte_b32_e64_dpp v5, v1, v2, v255.l dpp8:[7,6,5,4,3,2,1,0] ; encoding: [0x05,0x00,0x17,0xd6,0xe9,0x04,0xfe,0x07,0x01,0x77,0x39,0x05] +# W32-FAKE16: v_alignbyte_b32_e64_dpp v5, v1, v2, v255 dpp8:[7,6,5,4,3,2,1,0] ; encoding: [0x05,0x00,0x17,0xd6,0xe9,0x04,0xfe,0x07,0x01,0x77,0x39,0x05] +# W64-REAL16: v_alignbyte_b32_e64_dpp v5, v1, v2, v255.l dpp8:[7,6,5,4,3,2,1,0] ; encoding: [0x05,0x00,0x17,0xd6,0xe9,0x04,0xfe,0x07,0x01,0x77,0x39,0x05] +# W64-FAKE16: v_alignbyte_b32_e64_dpp v5, v1, v2, v255 dpp8:[7,6,5,4,3,2,1,0] ; encoding: [0x05,0x00,0x17,0xd6,0xe9,0x04,0xfe,0x07,0x01,0x77,0x39,0x05] 0x05,0x00,0x17,0xd6,0xe9,0x04,0xa6,0x01,0x01,0x77,0x39,0x05 # GFX11: v_alignbyte_b32_e64_dpp v5, v1, v2, s105 dpp8:[7,6,5,4,3,2,1,0] ; encoding: [0x05,0x00,0x17,0xd6,0xe9,0x04,0xa6,0x01,0x01,0x77,0x39,0x05] @@ -167,6 +173,12 @@ 0xff,0x00,0x17,0xd6,0xea,0xfe,0xf7,0x03,0xff,0x00,0x00,0x00 # GFX11: v_alignbyte_b32_e64_dpp v255, v255, v255, src_scc dpp8:[0,0,0,0,0,0,0,0] fi:1 ; encoding: [0xff,0x00,0x17,0xd6,0xea,0xfe,0xf7,0x03,0xff,0x00,0x00,0x00] +0x05,0x20,0x17,0xd6,0xe9,0x04,0xfe,0x07,0x01,0x77,0x39,0x05 +# W32-REAL16: v_alignbyte_b32_e64_dpp v5, v1, v2, v255.h op_sel:[0,0,1,0] dpp8:[7,6,5,4,3,2,1,0] ; encoding: [0x05,0x20,0x17,0xd6,0xe9,0x04,0xfe,0x07,0x01,0x77,0x39,0x05] +# W32-FAKE16: v_alignbyte_b32_e64_dpp v5, v1, v2, v255 op_sel:[0,0,1,0] dpp8:[7,6,5,4,3,2,1,0] ; encoding: [0x05,0x20,0x17,0xd6,0xe9,0x04,0xfe,0x07,0x01,0x77,0x39,0x05] +# W64-REAL16: v_alignbyte_b32_e64_dpp v5, v1, v2, v255.h op_sel:[0,0,1,0] dpp8:[7,6,5,4,3,2,1,0] ; encoding: [0x05,0x20,0x17,0xd6,0xe9,0x04,0xfe,0x07,0x01,0x77,0x39,0x05] +# W64-FAKE16: v_alignbyte_b32_e64_dpp v5, v1, v2, v255 op_sel:[0,0,1,0] dpp8:[7,6,5,4,3,2,1,0] ; encoding: [0x05,0x20,0x17,0xd6,0xe9,0x04,0xfe,0x07,0x01,0x77,0x39,0x05] + 0x05,0x00,0x62,0xd7,0xe9,0x04,0x02,0x00,0x01,0x77,0x39,0x05 # W32-REAL16: v_and_b16_e64_dpp v5.l, v1.l, v2.l dpp8:[7,6,5,4,3,2,1,0] ; encoding: [0x05,0x00,0x62,0xd7,0xe9,0x04,0x02,0x00,0x01,0x77,0x39,0x05] # W32-FAKE16: v_and_b16_e64_dpp v5, v1, v2 dpp8:[7,6,5,4,3,2,1,0] ; encoding: [0x05,0x00,0x62,0xd7,0xe9,0x04,0x02,0x00,0x01,0x77,0x39,0x05] diff --git a/llvm/test/MC/Disassembler/AMDGPU/gfx12_dasm_vop3.txt b/llvm/test/MC/Disassembler/AMDGPU/gfx12_dasm_vop3.txt index c2e1288c05cc4..3d5e78c86bc22 100644 --- a/llvm/test/MC/Disassembler/AMDGPU/gfx12_dasm_vop3.txt +++ b/llvm/test/MC/Disassembler/AMDGPU/gfx12_dasm_vop3.txt @@ -472,10 +472,16 @@ # GFX12: v_alignbyte_b32 v5, s105, s105, s105 ; encoding: [0x05,0x00,0x17,0xd6,0x69,0xd2,0xa4,0x01] 0x05,0x00,0x17,0xd6,0x6a,0xf6,0x0c,0x04 -# GFX12: v_alignbyte_b32 v5, vcc_lo, ttmp15, v3 ; encoding: [0x05,0x00,0x17,0xd6,0x6a,0xf6,0x0c,0x04] +# W32-REAL16: v_alignbyte_b32 v5, vcc_lo, ttmp15, v3.l ; encoding: [0x05,0x00,0x17,0xd6,0x6a,0xf6,0x0c,0x04] +# W32-FAKE16: v_alignbyte_b32 v5, vcc_lo, ttmp15, v3 ; encoding: [0x05,0x00,0x17,0xd6,0x6a,0xf6,0x0c,0x04] +# W64-REAL16: v_alignbyte_b32 v5, vcc_lo, ttmp15, v3.l ; encoding: [0x05,0x00,0x17,0xd6,0x6a,0xf6,0x0c,0x04] +# W64-FAKE16: v_alignbyte_b32 v5, vcc_lo, ttmp15, v3 ; encoding: [0x05,0x00,0x17,0xd6,0x6a,0xf6,0x0c,0x04] 0x05,0x00,0x17,0xd6,0x6b,0xfe,0xfd,0x07,0x56,0x34,0x12,0xaf -# GFX12: v_alignbyte_b32 v5, vcc_hi, 0xaf123456, v255 ; encoding: [0x05,0x00,0x17,0xd6,0x6b,0xfe,0xfd,0x07,0x56,0x34,0x12,0xaf] +# W32-REAL16: v_alignbyte_b32 v5, vcc_hi, 0xaf123456, v255.l ; encoding: [0x05,0x00,0x17,0xd6,0x6b,0xfe,0xfd,0x07,0x56,0x34,0x12,0xaf] +# W32-FAKE16: v_alignbyte_b32 v5, vcc_hi, 0xaf123456, v255 ; encoding: [0x05,0x00,0x17,0xd6,0x6b,0xfe,0xfd,0x07,0x56,0x34,0x12,0xaf] +# W64-REAL16: v_alignbyte_b32 v5, vcc_hi, 0xaf123456, v255.l ; encoding: [0x05,0x00,0x17,0xd6,0x6b,0xfe,0xfd,0x07,0x56,0x34,0x12,0xaf] +# W64-FAKE16: v_alignbyte_b32 v5, vcc_hi, 0xaf123456, v255 ; encoding: [0x05,0x00,0x17,0xd6,0x6b,0xfe,0xfd,0x07,0x56,0x34,0x12,0xaf] 0x05,0x00,0x17,0xd6,0x7b,0xfa,0xed,0x01 # GFX12: v_alignbyte_b32 v5, ttmp15, src_scc, ttmp15 ; encoding: [0x05,0x00,0x17,0xd6,0x7b,0xfa,0xed,0x01] @@ -504,6 +510,12 @@ 0xff,0x00,0x17,0xd6,0xff,0xd6,0xf0,0x01,0x56,0x34,0x12,0xaf # GFX12: v_alignbyte_b32 v255, 0xaf123456, vcc_hi, null ; encoding: [0xff,0x00,0x17,0xd6,0xff,0xd6,0xf0,0x01,0x56,0x34,0x12,0xaf] +0x05,0x20,0x17,0xd6,0x6b,0xfe,0xfd,0x07,0x56,0x34,0x12,0xaf +# W32-REAL16: v_alignbyte_b32 v5, vcc_hi, 0xaf123456, v255.h op_sel:[0,0,1,0] ; encoding: [0x05,0x20,0x17,0xd6,0x6b,0xfe,0xfd,0x07,0x56,0x34,0x12,0xaf] +# W32-FAKE16: v_alignbyte_b32 v5, vcc_hi, 0xaf123456, v255 op_sel:[0,0,1,0] ; encoding: [0x05,0x20,0x17,0xd6,0x6b,0xfe,0xfd,0x07,0x56,0x34,0x12,0xaf] +# W64-REAL16: v_alignbyte_b32 v5, vcc_hi, 0xaf123456, v255.h op_sel:[0,0,1,0] ; encoding: [0x05,0x20,0x17,0xd6,0x6b,0xfe,0xfd,0x07,0x56,0x34,0x12,0xaf] +# W64-FAKE16: v_alignbyte_b32 v5, vcc_hi, 0xaf123456, v255 op_sel:[0,0,1,0] ; encoding: [0x05,0x20,0x17,0xd6,0x6b,0xfe,0xfd,0x07,0x56,0x34,0x12,0xaf] + 0x05,0x00,0x62,0xd7,0x01,0x05,0x02,0x00 # W32-REAL16: v_and_b16 v5.l, v1.l, v2.l ; encoding: [0x05,0x00,0x62,0xd7,0x01,0x05,0x02,0x00] # W32-FAKE16: v_and_b16 v5, v1, v2 ; encoding: [0x05,0x00,0x62,0xd7,0x01,0x05,0x02,0x00] diff --git a/llvm/test/MC/Disassembler/AMDGPU/gfx12_dasm_vop3_dpp16.txt b/llvm/test/MC/Disassembler/AMDGPU/gfx12_dasm_vop3_dpp16.txt index f0328615aacb0..c63c09b6b24c5 100644 --- a/llvm/test/MC/Disassembler/AMDGPU/gfx12_dasm_vop3_dpp16.txt +++ b/llvm/test/MC/Disassembler/AMDGPU/gfx12_dasm_vop3_dpp16.txt @@ -240,22 +240,40 @@ # GFX12: v_alignbit_b32_e64_dpp v255, v255, v255, src_scc row_xmask:15 row_mask:0x3 bank_mask:0x0 bound_ctrl:1 fi:1 ; encoding: [0xff,0x00,0x16,0xd6,0xfa,0xfe,0xf7,0x03,0xff,0x6f,0x0d,0x30] 0x05,0x00,0x17,0xd6,0xfa,0x04,0x0e,0x04,0x01,0x1b,0x00,0xff -# GFX12: v_alignbyte_b32_e64_dpp v5, v1, v2, v3 quad_perm:[3,2,1,0] row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x00,0x17,0xd6,0xfa,0x04,0x0e,0x04,0x01,0x1b,0x00,0xff] +# W32-REAL16: v_alignbyte_b32_e64_dpp v5, v1, v2, v3.l quad_perm:[3,2,1,0] row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x00,0x17,0xd6,0xfa,0x04,0x0e,0x04,0x01,0x1b,0x00,0xff] +# W32-FAKE16: v_alignbyte_b32_e64_dpp v5, v1, v2, v3 quad_perm:[3,2,1,0] row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x00,0x17,0xd6,0xfa,0x04,0x0e,0x04,0x01,0x1b,0x00,0xff] +# W64-REAL16: v_alignbyte_b32_e64_dpp v5, v1, v2, v3.l quad_perm:[3,2,1,0] row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x00,0x17,0xd6,0xfa,0x04,0x0e,0x04,0x01,0x1b,0x00,0xff] +# W64-FAKE16: v_alignbyte_b32_e64_dpp v5, v1, v2, v3 quad_perm:[3,2,1,0] row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x00,0x17,0xd6,0xfa,0x04,0x0e,0x04,0x01,0x1b,0x00,0xff] 0x05,0x00,0x17,0xd6,0xfa,0x06,0x0c,0x04,0x01,0x1b,0x00,0xff -# GFX12: v_alignbyte_b32_e64_dpp v5, v1, s3, v3 quad_perm:[3,2,1,0] row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x00,0x17,0xd6,0xfa,0x06,0x0c,0x04,0x01,0x1b,0x00,0xff] +# W32-REAL16: v_alignbyte_b32_e64_dpp v5, v1, s3, v3.l quad_perm:[3,2,1,0] row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x00,0x17,0xd6,0xfa,0x06,0x0c,0x04,0x01,0x1b,0x00,0xff] +# W32-FAKE16: v_alignbyte_b32_e64_dpp v5, v1, s3, v3 quad_perm:[3,2,1,0] row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x00,0x17,0xd6,0xfa,0x06,0x0c,0x04,0x01,0x1b,0x00,0xff] +# W64-REAL16: v_alignbyte_b32_e64_dpp v5, v1, s3, v3.l quad_perm:[3,2,1,0] row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x00,0x17,0xd6,0xfa,0x06,0x0c,0x04,0x01,0x1b,0x00,0xff] +# W64-FAKE16: v_alignbyte_b32_e64_dpp v5, v1, s3, v3 quad_perm:[3,2,1,0] row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x00,0x17,0xd6,0xfa,0x06,0x0c,0x04,0x01,0x1b,0x00,0xff] 0x05,0x00,0x17,0xd6,0xfa,0x04,0x0e,0x04,0x01,0xe4,0x00,0xff -# GFX12: v_alignbyte_b32_e64_dpp v5, v1, v2, v3 quad_perm:[0,1,2,3] row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x00,0x17,0xd6,0xfa,0x04,0x0e,0x04,0x01,0xe4,0x00,0xff] +# W32-REAL16: v_alignbyte_b32_e64_dpp v5, v1, v2, v3.l quad_perm:[0,1,2,3] row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x00,0x17,0xd6,0xfa,0x04,0x0e,0x04,0x01,0xe4,0x00,0xff] +# W32-FAKE16: v_alignbyte_b32_e64_dpp v5, v1, v2, v3 quad_perm:[0,1,2,3] row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x00,0x17,0xd6,0xfa,0x04,0x0e,0x04,0x01,0xe4,0x00,0xff] +# W64-REAL16: v_alignbyte_b32_e64_dpp v5, v1, v2, v3.l quad_perm:[0,1,2,3] row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x00,0x17,0xd6,0xfa,0x04,0x0e,0x04,0x01,0xe4,0x00,0xff] +# W64-FAKE16: v_alignbyte_b32_e64_dpp v5, v1, v2, v3 quad_perm:[0,1,2,3] row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x00,0x17,0xd6,0xfa,0x04,0x0e,0x04,0x01,0xe4,0x00,0xff] 0x05,0x00,0x17,0xd6,0xfa,0x04,0x0e,0x04,0x01,0x40,0x01,0xff -# GFX12: v_alignbyte_b32_e64_dpp v5, v1, v2, v3 row_mirror row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x00,0x17,0xd6,0xfa,0x04,0x0e,0x04,0x01,0x40,0x01,0xff] +# W32-REAL16: v_alignbyte_b32_e64_dpp v5, v1, v2, v3.l row_mirror row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x00,0x17,0xd6,0xfa,0x04,0x0e,0x04,0x01,0x40,0x01,0xff] +# W32-FAKE16: v_alignbyte_b32_e64_dpp v5, v1, v2, v3 row_mirror row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x00,0x17,0xd6,0xfa,0x04,0x0e,0x04,0x01,0x40,0x01,0xff] +# W64-REAL16: v_alignbyte_b32_e64_dpp v5, v1, v2, v3.l row_mirror row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x00,0x17,0xd6,0xfa,0x04,0x0e,0x04,0x01,0x40,0x01,0xff] +# W64-FAKE16: v_alignbyte_b32_e64_dpp v5, v1, v2, v3 row_mirror row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x00,0x17,0xd6,0xfa,0x04,0x0e,0x04,0x01,0x40,0x01,0xff] 0x05,0x00,0x17,0xd6,0xfa,0x04,0x0e,0x04,0x01,0x41,0x01,0xff -# GFX12: v_alignbyte_b32_e64_dpp v5, v1, v2, v3 row_half_mirror row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x00,0x17,0xd6,0xfa,0x04,0x0e,0x04,0x01,0x41,0x01,0xff] +# W32-REAL16: v_alignbyte_b32_e64_dpp v5, v1, v2, v3.l row_half_mirror row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x00,0x17,0xd6,0xfa,0x04,0x0e,0x04,0x01,0x41,0x01,0xff] +# W32-FAKE16: v_alignbyte_b32_e64_dpp v5, v1, v2, v3 row_half_mirror row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x00,0x17,0xd6,0xfa,0x04,0x0e,0x04,0x01,0x41,0x01,0xff] +# W64-REAL16: v_alignbyte_b32_e64_dpp v5, v1, v2, v3.l row_half_mirror row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x00,0x17,0xd6,0xfa,0x04,0x0e,0x04,0x01,0x41,0x01,0xff] +# W64-FAKE16: v_alignbyte_b32_e64_dpp v5, v1, v2, v3 row_half_mirror row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x00,0x17,0xd6,0xfa,0x04,0x0e,0x04,0x01,0x41,0x01,0xff] 0x05,0x00,0x17,0xd6,0xfa,0x04,0xfe,0x07,0x01,0x01,0x01,0xff -# GFX12: v_alignbyte_b32_e64_dpp v5, v1, v2, v255 row_shl:1 row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x00,0x17,0xd6,0xfa,0x04,0xfe,0x07,0x01,0x01,0x01,0xff] +# W32-REAL16: v_alignbyte_b32_e64_dpp v5, v1, v2, v255.l row_shl:1 row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x00,0x17,0xd6,0xfa,0x04,0xfe,0x07,0x01,0x01,0x01,0xff] +# W32-FAKE16: v_alignbyte_b32_e64_dpp v5, v1, v2, v255 row_shl:1 row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x00,0x17,0xd6,0xfa,0x04,0xfe,0x07,0x01,0x01,0x01,0xff] +# W64-REAL16: v_alignbyte_b32_e64_dpp v5, v1, v2, v255.l row_shl:1 row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x00,0x17,0xd6,0xfa,0x04,0xfe,0x07,0x01,0x01,0x01,0xff] +# W64-FAKE16: v_alignbyte_b32_e64_dpp v5, v1, v2, v255 row_shl:1 row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x00,0x17,0xd6,0xfa,0x04,0xfe,0x07,0x01,0x01,0x01,0xff] 0x05,0x00,0x17,0xd6,0xfa,0x04,0xa6,0x01,0x01,0x0f,0x01,0xff # GFX12: v_alignbyte_b32_e64_dpp v5, v1, v2, s105 row_shl:15 row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x00,0x17,0xd6,0xfa,0x04,0xa6,0x01,0x01,0x0f,0x01,0xff] @@ -284,6 +302,12 @@ 0xff,0x00,0x17,0xd6,0xfa,0xfe,0xf7,0x03,0xff,0x6f,0x0d,0x30 # GFX12: v_alignbyte_b32_e64_dpp v255, v255, v255, src_scc row_xmask:15 row_mask:0x3 bank_mask:0x0 bound_ctrl:1 fi:1 ; encoding: [0xff,0x00,0x17,0xd6,0xfa,0xfe,0xf7,0x03,0xff,0x6f,0x0d,0x30] +0x05,0x20,0x17,0xd6,0xfa,0x04,0xfe,0x07,0x01,0x40,0x01,0xff +# W32-REAL16: v_alignbyte_b32_e64_dpp v5, v1, v2, v255.h op_sel:[0,0,1,0] row_mirror row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x20,0x17,0xd6,0xfa,0x04,0xfe,0x07,0x01,0x40,0x01,0xff] +# W32-FAKE16: v_alignbyte_b32_e64_dpp v5, v1, v2, v255 op_sel:[0,0,1,0] row_mirror row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x20,0x17,0xd6,0xfa,0x04,0xfe,0x07,0x01,0x40,0x01,0xff] +# W64-REAL16: v_alignbyte_b32_e64_dpp v5, v1, v2, v255.h op_sel:[0,0,1,0] row_mirror row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x20,0x17,0xd6,0xfa,0x04,0xfe,0x07,0x01,0x40,0x01,0xff] +# W64-FAKE16: v_alignbyte_b32_e64_dpp v5, v1, v2, v255 op_sel:[0,0,1,0] row_mirror row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x20,0x17,0xd6,0xfa,0x04,0xfe,0x07,0x01,0x40,0x01,0xff] + 0x05,0x00,0x62,0xd7,0xfa,0x04,0x02,0x00,0x01,0x1b,0x00,0xff # W32-REAL16: v_and_b16_e64_dpp v5.l, v1.l, v2.l quad_perm:[3,2,1,0] row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x00,0x62,0xd7,0xfa,0x04,0x02,0x00,0x01,0x1b,0x00,0xff] # W32-FAKE16: v_and_b16_e64_dpp v5, v1, v2 quad_perm:[3,2,1,0] row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x00,0x62,0xd7,0xfa,0x04,0x02,0x00,0x01,0x1b,0x00,0xff] diff --git a/llvm/test/MC/Disassembler/AMDGPU/gfx12_dasm_vop3_dpp8.txt b/llvm/test/MC/Disassembler/AMDGPU/gfx12_dasm_vop3_dpp8.txt index fc429adcbefb2..4182cd93f813c 100644 --- a/llvm/test/MC/Disassembler/AMDGPU/gfx12_dasm_vop3_dpp8.txt +++ b/llvm/test/MC/Disassembler/AMDGPU/gfx12_dasm_vop3_dpp8.txt @@ -147,13 +147,22 @@ # GFX12: v_alignbit_b32_e64_dpp v255, v255, v255, src_scc dpp8:[0,0,0,0,0,0,0,0] fi:1 ; encoding: [0xff,0x00,0x16,0xd6,0xea,0xfe,0xf7,0x03,0xff,0x00,0x00,0x00] 0x05,0x00,0x17,0xd6,0xe9,0x04,0x0e,0x04,0x01,0x77,0x39,0x05 -# GFX12: v_alignbyte_b32_e64_dpp v5, v1, v2, v3 dpp8:[7,6,5,4,3,2,1,0] ; encoding: [0x05,0x00,0x17,0xd6,0xe9,0x04,0x0e,0x04,0x01,0x77,0x39,0x05] +# W32-REAL16: v_alignbyte_b32_e64_dpp v5, v1, v2, v3.l dpp8:[7,6,5,4,3,2,1,0] ; encoding: [0x05,0x00,0x17,0xd6,0xe9,0x04,0x0e,0x04,0x01,0x77,0x39,0x05] +# W32-FAKE16: v_alignbyte_b32_e64_dpp v5, v1, v2, v3 dpp8:[7,6,5,4,3,2,1,0] ; encoding: [0x05,0x00,0x17,0xd6,0xe9,0x04,0x0e,0x04,0x01,0x77,0x39,0x05] +# W64-REAL16: v_alignbyte_b32_e64_dpp v5, v1, v2, v3.l dpp8:[7,6,5,4,3,2,1,0] ; encoding: [0x05,0x00,0x17,0xd6,0xe9,0x04,0x0e,0x04,0x01,0x77,0x39,0x05] +# W64-FAKE16: v_alignbyte_b32_e64_dpp v5, v1, v2, v3 dpp8:[7,6,5,4,3,2,1,0] ; encoding: [0x05,0x00,0x17,0xd6,0xe9,0x04,0x0e,0x04,0x01,0x77,0x39,0x05] 0x05,0x00,0x17,0xd6,0xe9,0x06,0x0c,0x04,0x01,0x77,0x39,0x05 -# GFX12: v_alignbyte_b32_e64_dpp v5, v1, s3, v3 dpp8:[7,6,5,4,3,2,1,0] ; encoding: [0x05,0x00,0x17,0xd6,0xe9,0x06,0x0c,0x04,0x01,0x77,0x39,0x05] +# W32-REAL16: v_alignbyte_b32_e64_dpp v5, v1, s3, v3.l dpp8:[7,6,5,4,3,2,1,0] ; encoding: [0x05,0x00,0x17,0xd6,0xe9,0x06,0x0c,0x04,0x01,0x77,0x39,0x05] +# W32-FAKE16: v_alignbyte_b32_e64_dpp v5, v1, s3, v3 dpp8:[7,6,5,4,3,2,1,0] ; encoding: [0x05,0x00,0x17,0xd6,0xe9,0x06,0x0c,0x04,0x01,0x77,0x39,0x05] +# W64-REAL16: v_alignbyte_b32_e64_dpp v5, v1, s3, v3.l dpp8:[7,6,5,4,3,2,1,0] ; encoding: [0x05,0x00,0x17,0xd6,0xe9,0x06,0x0c,0x04,0x01,0x77,0x39,0x05] +# W64-FAKE16: v_alignbyte_b32_e64_dpp v5, v1, s3, v3 dpp8:[7,6,5,4,3,2,1,0] ; encoding: [0x05,0x00,0x17,0xd6,0xe9,0x06,0x0c,0x04,0x01,0x77,0x39,0x05] 0x05,0x00,0x17,0xd6,0xe9,0x04,0xfe,0x07,0x01,0x77,0x39,0x05 -# GFX12: v_alignbyte_b32_e64_dpp v5, v1, v2, v255 dpp8:[7,6,5,4,3,2,1,0] ; encoding: [0x05,0x00,0x17,0xd6,0xe9,0x04,0xfe,0x07,0x01,0x77,0x39,0x05] +# W32-REAL16: v_alignbyte_b32_e64_dpp v5, v1, v2, v255.l dpp8:[7,6,5,4,3,2,1,0] ; encoding: [0x05,0x00,0x17,0xd6,0xe9,0x04,0xfe,0x07,0x01,0x77,0x39,0x05] +# W32-FAKE16: v_alignbyte_b32_e64_dpp v5, v1, v2, v255 dpp8:[7,6,5,4,3,2,1,0] ; encoding: [0x05,0x00,0x17,0xd6,0xe9,0x04,0xfe,0x07,0x01,0x77,0x39,0x05] +# W64-REAL16: v_alignbyte_b32_e64_dpp v5, v1, v2, v255.l dpp8:[7,6,5,4,3,2,1,0] ; encoding: [0x05,0x00,0x17,0xd6,0xe9,0x04,0xfe,0x07,0x01,0x77,0x39,0x05] +# W64-FAKE16: v_alignbyte_b32_e64_dpp v5, v1, v2, v255 dpp8:[7,6,5,4,3,2,1,0] ; encoding: [0x05,0x00,0x17,0xd6,0xe9,0x04,0xfe,0x07,0x01,0x77,0x39,0x05] 0x05,0x00,0x17,0xd6,0xe9,0x04,0xa6,0x01,0x01,0x77,0x39,0x05 # GFX12: v_alignbyte_b32_e64_dpp v5, v1, v2, s105 dpp8:[7,6,5,4,3,2,1,0] ; encoding: [0x05,0x00,0x17,0xd6,0xe9,0x04,0xa6,0x01,0x01,0x77,0x39,0x05] @@ -182,6 +191,12 @@ 0xff,0x00,0x17,0xd6,0xea,0xfe,0xf7,0x03,0xff,0x00,0x00,0x00 # GFX12: v_alignbyte_b32_e64_dpp v255, v255, v255, src_scc dpp8:[0,0,0,0,0,0,0,0] fi:1 ; encoding: [0xff,0x00,0x17,0xd6,0xea,0xfe,0xf7,0x03,0xff,0x00,0x00,0x00] +0x05,0x20,0x17,0xd6,0xe9,0x04,0xfe,0x07,0x01,0x77,0x39,0x05 +# W32-REAL16: v_alignbyte_b32_e64_dpp v5, v1, v2, v255.h op_sel:[0,0,1,0] dpp8:[7,6,5,4,3,2,1,0] ; encoding: [0x05,0x20,0x17,0xd6,0xe9,0x04,0xfe,0x07,0x01,0x77,0x39,0x05] +# W32-FAKE16: v_alignbyte_b32_e64_dpp v5, v1, v2, v255 op_sel:[0,0,1,0] dpp8:[7,6,5,4,3,2,1,0] ; encoding: [0x05,0x20,0x17,0xd6,0xe9,0x04,0xfe,0x07,0x01,0x77,0x39,0x05] +# W64-REAL16: v_alignbyte_b32_e64_dpp v5, v1, v2, v255.h op_sel:[0,0,1,0] dpp8:[7,6,5,4,3,2,1,0] ; encoding: [0x05,0x20,0x17,0xd6,0xe9,0x04,0xfe,0x07,0x01,0x77,0x39,0x05] +# W64-FAKE16: v_alignbyte_b32_e64_dpp v5, v1, v2, v255 op_sel:[0,0,1,0] dpp8:[7,6,5,4,3,2,1,0] ; encoding: [0x05,0x20,0x17,0xd6,0xe9,0x04,0xfe,0x07,0x01,0x77,0x39,0x05] + 0x05,0x00,0x62,0xd7,0xe9,0x04,0x02,0x00,0x01,0x77,0x39,0x05 # W32-REAL16: v_and_b16_e64_dpp v5.l, v1.l, v2.l dpp8:[7,6,5,4,3,2,1,0] ; encoding: [0x05,0x00,0x62,0xd7,0xe9,0x04,0x02,0x00,0x01,0x77,0x39,0x05] # W32-FAKE16: v_and_b16_e64_dpp v5, v1, v2 dpp8:[7,6,5,4,3,2,1,0] ; encoding: [0x05,0x00,0x62,0xd7,0xe9,0x04,0x02,0x00,0x01,0x77,0x39,0x05] diff --git a/llvm/test/Transforms/IndVarSimplify/infer-via-ranges.ll b/llvm/test/Transforms/IndVarSimplify/infer-via-ranges.ll new file mode 100644 index 0000000000000..effae2322dba3 --- /dev/null +++ b/llvm/test/Transforms/IndVarSimplify/infer-via-ranges.ll @@ -0,0 +1,51 @@ +; NOTE: Assertions have been autogenerated by utils/update_test_checks.py UTC_ARGS: --version 5 +; RUN: opt -passes=indvars -S < %s | FileCheck %s + +define void @infer_via_ranges(ptr %arr, i32 %n) { +; CHECK-LABEL: define void @infer_via_ranges( +; CHECK-SAME: ptr [[ARR:%.*]], i32 [[N:%.*]]) { +; CHECK-NEXT: [[ENTRY:.*:]] +; CHECK-NEXT: [[FIRST_ITR_CHECK:%.*]] = icmp sgt i32 [[N]], 0 +; CHECK-NEXT: [[START:%.*]] = sub i32 [[N]], 1 +; CHECK-NEXT: br i1 [[FIRST_ITR_CHECK]], label %[[LOOP_PREHEADER:.*]], label %[[EXIT:.*]] +; CHECK: [[LOOP_PREHEADER]]: +; CHECK-NEXT: br label %[[LOOP:.*]] +; CHECK: [[LOOP]]: +; CHECK-NEXT: [[IDX:%.*]] = phi i32 [ [[IDX_DEC:%.*]], %[[IN_BOUNDS:.*]] ], [ [[START]], %[[LOOP_PREHEADER]] ] +; CHECK-NEXT: [[IDX_DEC]] = sub nsw i32 [[IDX]], 1 +; CHECK-NEXT: br i1 true, label %[[IN_BOUNDS]], label %[[OUT_OF_BOUNDS:.*]] +; CHECK: [[IN_BOUNDS]]: +; CHECK-NEXT: [[ADDR:%.*]] = getelementptr i32, ptr [[ARR]], i32 [[IDX]] +; CHECK-NEXT: store i32 0, ptr [[ADDR]], align 4 +; CHECK-NEXT: [[NEXT:%.*]] = icmp sgt i32 [[IDX_DEC]], -1 +; CHECK-NEXT: br i1 [[NEXT]], label %[[LOOP]], label %[[EXIT_LOOPEXIT:.*]] +; CHECK: [[OUT_OF_BOUNDS]]: +; CHECK-NEXT: ret void +; CHECK: [[EXIT_LOOPEXIT]]: +; CHECK-NEXT: br label %[[EXIT]] +; CHECK: [[EXIT]]: +; CHECK-NEXT: ret void +; + entry: + %first.itr.check = icmp sgt i32 %n, 0 + %start = sub i32 %n, 1 + br i1 %first.itr.check, label %loop, label %exit + + loop: + %idx = phi i32 [ %start, %entry ] , [ %idx.dec, %in.bounds ] + %idx.dec = sub i32 %idx, 1 + %abc = icmp sge i32 %idx, 0 + br i1 %abc, label %in.bounds, label %out.of.bounds + + in.bounds: + %addr = getelementptr i32, ptr %arr, i32 %idx + store i32 0, ptr %addr + %next = icmp sgt i32 %idx.dec, -1 + br i1 %next, label %loop, label %exit + + out.of.bounds: + ret void + + exit: + ret void +} diff --git a/llvm/test/Transforms/LoopVectorize/X86/CostModel/vpinstruction-cost.ll b/llvm/test/Transforms/LoopVectorize/X86/CostModel/vpinstruction-cost.ll new file mode 100644 index 0000000000000..bb85b88f181f7 --- /dev/null +++ b/llvm/test/Transforms/LoopVectorize/X86/CostModel/vpinstruction-cost.ll @@ -0,0 +1,74 @@ +; NOTE: Assertions have been autogenerated by utils/update_analyze_test_checks.py UTC_ARGS: --filter "Cost of" +; RUN: opt -S -passes=loop-vectorize -mcpu=skylake-avx512 -mtriple=x86_64-apple-macosx -debug -disable-output -S %s 2>&1 | FileCheck %s + +; REQUIRES: asserts + +target datalayout = "e-m:e-p270:32:32-p271:32:32-p272:64:64-i64:64-i128:128-f80:128-n8:16:32:64-S128" + +define void @wide_or_replaced_with_add_vpinstruction(ptr %src, ptr noalias %dst) { +; CHECK-LABEL: 'wide_or_replaced_with_add_vpinstruction' +; CHECK: Cost of 1 for VF 2: induction instruction %iv.next = add nuw nsw i64 %iv, 1 +; CHECK: Cost of 0 for VF 2: induction instruction %iv = phi i64 [ 0, %entry ], [ %iv.next, %loop.latch ] +; CHECK: Cost of 1 for VF 2: exit condition instruction %exitcond = icmp eq i64 %iv.next, 32 +; CHECK: Cost of 0 for VF 2: EMIT vp<%3> = CANONICAL-INDUCTION ir<0>, vp<%index.next> +; CHECK: Cost of 0 for VF 2: ir<%iv> = WIDEN-INDUCTION ir<0>, ir<1>, vp<%0> +; CHECK: Cost of 0 for VF 2: vp<%4> = SCALAR-STEPS vp<%3>, ir<1> +; CHECK: Cost of 0 for VF 2: CLONE ir<%g.src> = getelementptr inbounds ir<%src>, vp<%4> +; CHECK: Cost of 0 for VF 2: vp<%5> = vector-pointer ir<%g.src> +; CHECK: Cost of 1 for VF 2: WIDEN ir<%l> = load vp<%5> +; CHECK: Cost of 1 for VF 2: WIDEN ir<%iv.4> = add ir<%iv>, ir<4> +; CHECK: Cost of 1 for VF 2: WIDEN ir<%c> = icmp ule ir<%l>, ir<128> +; CHECK: Cost of 1 for VF 2: EMIT ir<%or> = add ir<%iv.4>, ir<1> +; CHECK: Cost of 0 for VF 2: CLONE ir<%g.dst> = getelementptr ir<%dst>, ir<%or> +; CHECK: Cost of 0 for VF 2: vp<%6> = vector-pointer ir<%g.dst> +; CHECK: Cost of 1 for VF 2: WIDEN store vp<%6>, ir<%iv.4>, ir<%c> +; CHECK: Cost of 0 for VF 2: EMIT vp<%index.next> = add nuw vp<%3>, vp<%1> +; CHECK: Cost of 0 for VF 2: EMIT branch-on-count vp<%index.next>, vp<%2> +; CHECK: Cost of 0 for VF 2: vector loop backedge +; CHECK: Cost of 1 for VF 4: induction instruction %iv.next = add nuw nsw i64 %iv, 1 +; CHECK: Cost of 0 for VF 4: induction instruction %iv = phi i64 [ 0, %entry ], [ %iv.next, %loop.latch ] +; CHECK: Cost of 1 for VF 4: exit condition instruction %exitcond = icmp eq i64 %iv.next, 32 +; CHECK: Cost of 0 for VF 4: EMIT vp<%3> = CANONICAL-INDUCTION ir<0>, vp<%index.next> +; CHECK: Cost of 0 for VF 4: ir<%iv> = WIDEN-INDUCTION ir<0>, ir<1>, vp<%0> +; CHECK: Cost of 0 for VF 4: vp<%4> = SCALAR-STEPS vp<%3>, ir<1> +; CHECK: Cost of 0 for VF 4: CLONE ir<%g.src> = getelementptr inbounds ir<%src>, vp<%4> +; CHECK: Cost of 0 for VF 4: vp<%5> = vector-pointer ir<%g.src> +; CHECK: Cost of 1 for VF 4: WIDEN ir<%l> = load vp<%5> +; CHECK: Cost of 1 for VF 4: WIDEN ir<%iv.4> = add ir<%iv>, ir<4> +; CHECK: Cost of 1 for VF 4: WIDEN ir<%c> = icmp ule ir<%l>, ir<128> +; CHECK: Cost of 1 for VF 4: EMIT ir<%or> = add ir<%iv.4>, ir<1> +; CHECK: Cost of 0 for VF 4: CLONE ir<%g.dst> = getelementptr ir<%dst>, ir<%or> +; CHECK: Cost of 0 for VF 4: vp<%6> = vector-pointer ir<%g.dst> +; CHECK: Cost of 1 for VF 4: WIDEN store vp<%6>, ir<%iv.4>, ir<%c> +; CHECK: Cost of 0 for VF 4: EMIT vp<%index.next> = add nuw vp<%3>, vp<%1> +; CHECK: Cost of 0 for VF 4: EMIT branch-on-count vp<%index.next>, vp<%2> +; CHECK: Cost of 0 for VF 4: vector loop backedge +; CHECK: Cost of 1 for VF 4: induction instruction %iv.next = add nuw nsw i64 %iv, 1 +; CHECK: Cost of 0 for VF 4: induction instruction %iv = phi i64 [ 0, %entry ], [ %iv.next, %loop.latch ] +; CHECK: Cost of 1 for VF 4: exit condition instruction %exitcond = icmp eq i64 %iv.next, 32 +; +entry: + br label %loop.header + +loop.header: + %iv = phi i64 [ 0, %entry ], [ %iv.next, %loop.latch ] + %g.src = getelementptr inbounds i64, ptr %src, i64 %iv + %l = load i64, ptr %g.src + %iv.4 = add nuw nsw i64 %iv, 4 + %c = icmp ule i64 %l, 128 + br i1 %c, label %loop.then, label %loop.latch + +loop.then: + %or = or disjoint i64 %iv.4, 1 + %g.dst = getelementptr inbounds i64, ptr %dst, i64 %or + store i64 %iv.4, ptr %g.dst, align 4 + br label %loop.latch + +loop.latch: + %iv.next = add nuw nsw i64 %iv, 1 + %exitcond = icmp eq i64 %iv.next, 32 + br i1 %exitcond, label %exit, label %loop.header + +exit: + ret void +} diff --git a/llvm/test/tools/llvm-mca/ARM/m55-fp.s b/llvm/test/tools/llvm-mca/ARM/m55-fp.s index 6318cfa9d6e9c..1668f58c7937f 100644 --- a/llvm/test/tools/llvm-mca/ARM/m55-fp.s +++ b/llvm/test/tools/llvm-mca/ARM/m55-fp.s @@ -21,30 +21,30 @@ vcmpe.f32 s1, #0.0 vcmpe.f64 d1, #0.0 vcvt.f32.f64 s1, d2 vcvt.f64.f32 d1, s1 -vcvt.f16.u16 s1, s2, #8 -vcvt.f16.s16 s1, s2, #8 -vcvt.f16.u32 s1, s2, #8 -vcvt.f16.s32 s1, s2, #8 -vcvt.u16.f16 s1, s2, #8 -vcvt.s16.f16 s1, s2, #8 -vcvt.u32.f16 s1, s2, #8 -vcvt.s32.f16 s1, s2, #8 -vcvt.f32.u16 s1, s2, #8 -vcvt.f32.s16 s1, s2, #8 -vcvt.f32.u32 s1, s2, #8 -vcvt.f32.s32 s1, s2, #8 -vcvt.u16.f32 s1, s2, #8 -vcvt.s16.f32 s1, s2, #8 -vcvt.u32.f32 s1, s2, #8 -vcvt.s32.f32 s1, s2, #8 -vcvt.f64.u16 d1, d2, #8 -vcvt.f64.s16 d1, d2, #8 -vcvt.f64.u32 d1, d2, #8 -vcvt.f64.s32 d1, d2, #8 -vcvt.u16.f64 d1, d2, #8 -vcvt.s16.f64 d1, d2, #8 -vcvt.u32.f64 d1, d2, #8 -vcvt.s32.f64 d1, d2, #8 +vcvt.f16.u16 s1, s1, #8 +vcvt.f16.s16 s1, s1, #8 +vcvt.f16.u32 s1, s1, #8 +vcvt.f16.s32 s1, s1, #8 +vcvt.u16.f16 s1, s1, #8 +vcvt.s16.f16 s1, s1, #8 +vcvt.u32.f16 s1, s1, #8 +vcvt.s32.f16 s1, s1, #8 +vcvt.f32.u16 s1, s1, #8 +vcvt.f32.s16 s1, s1, #8 +vcvt.f32.u32 s1, s1, #8 +vcvt.f32.s32 s1, s1, #8 +vcvt.u16.f32 s1, s1, #8 +vcvt.s16.f32 s1, s1, #8 +vcvt.u32.f32 s1, s1, #8 +vcvt.s32.f32 s1, s1, #8 +vcvt.f64.u16 d1, d1, #8 +vcvt.f64.s16 d1, d1, #8 +vcvt.f64.u32 d1, d1, #8 +vcvt.f64.s32 d1, d1, #8 +vcvt.u16.f64 d1, d1, #8 +vcvt.s16.f64 d1, d1, #8 +vcvt.u32.f64 d1, d1, #8 +vcvt.s32.f64 d1, d1, #8 vcvt.u32.f16 s1, s2 vcvt.s32.f16 s1, s2 vcvt.u32.f32 s1, s2 diff --git a/llvm/test/tools/llvm-mca/ARM/m7-fp.s b/llvm/test/tools/llvm-mca/ARM/m7-fp.s index dcf9723461dec..dba7ff92f30cb 100644 --- a/llvm/test/tools/llvm-mca/ARM/m7-fp.s +++ b/llvm/test/tools/llvm-mca/ARM/m7-fp.s @@ -9,22 +9,22 @@ vcmp.f32 s1, s2 vcmp.f64 d1, d2 vcvt.f32.f64 s1, d2 vcvt.f64.f32 d1, s1 -vcvt.f32.u16 s1, s2, #8 -vcvt.f32.s16 s1, s2, #8 -vcvt.f32.u32 s1, s2, #8 -vcvt.f32.s32 s1, s2, #8 -vcvt.u16.f32 s1, s2, #8 -vcvt.s16.f32 s1, s2, #8 -vcvt.u32.f32 s1, s2, #8 -vcvt.s32.f32 s1, s2, #8 -vcvt.f64.u16 d1, d2, #8 -vcvt.f64.s16 d1, d2, #8 -vcvt.f64.u32 d1, d2, #8 -vcvt.f64.s32 d1, d2, #8 -vcvt.u16.f64 d1, d2, #8 -vcvt.s16.f64 d1, d2, #8 -vcvt.u32.f64 d1, d2, #8 -vcvt.s32.f64 d1, d2, #8 +vcvt.f32.u16 s1, s1, #8 +vcvt.f32.s16 s1, s1, #8 +vcvt.f32.u32 s1, s1, #8 +vcvt.f32.s32 s1, s1, #8 +vcvt.u16.f32 s1, s1, #8 +vcvt.s16.f32 s1, s1, #8 +vcvt.u32.f32 s1, s1, #8 +vcvt.s32.f32 s1, s1, #8 +vcvt.f64.u16 d1, d1, #8 +vcvt.f64.s16 d1, d1, #8 +vcvt.f64.u32 d1, d1, #8 +vcvt.f64.s32 d1, d1, #8 +vcvt.u16.f64 d1, d1, #8 +vcvt.s16.f64 d1, d1, #8 +vcvt.u32.f64 d1, d1, #8 +vcvt.s32.f64 d1, d1, #8 vcvt.u32.f32 s1, s2 vcvt.s32.f32 s1, s2 vcvt.u32.f64 s1, d2 diff --git a/llvm/test/tools/llvm-mca/ARM/m85-fp.s b/llvm/test/tools/llvm-mca/ARM/m85-fp.s index edc46060fe0f3..0fc1b394de2dc 100644 --- a/llvm/test/tools/llvm-mca/ARM/m85-fp.s +++ b/llvm/test/tools/llvm-mca/ARM/m85-fp.s @@ -21,30 +21,30 @@ vcmpe.f32 s1, #0.0 vcmpe.f64 d1, #0.0 vcvt.f32.f64 s1, d2 vcvt.f64.f32 d1, s1 -vcvt.f16.u16 s1, s2, #8 -vcvt.f16.s16 s1, s2, #8 -vcvt.f16.u32 s1, s2, #8 -vcvt.f16.s32 s1, s2, #8 -vcvt.u16.f16 s1, s2, #8 -vcvt.s16.f16 s1, s2, #8 -vcvt.u32.f16 s1, s2, #8 -vcvt.s32.f16 s1, s2, #8 -vcvt.f32.u16 s1, s2, #8 -vcvt.f32.s16 s1, s2, #8 -vcvt.f32.u32 s1, s2, #8 -vcvt.f32.s32 s1, s2, #8 -vcvt.u16.f32 s1, s2, #8 -vcvt.s16.f32 s1, s2, #8 -vcvt.u32.f32 s1, s2, #8 -vcvt.s32.f32 s1, s2, #8 -vcvt.f64.u16 d1, d2, #8 -vcvt.f64.s16 d1, d2, #8 -vcvt.f64.u32 d1, d2, #8 -vcvt.f64.s32 d1, d2, #8 -vcvt.u16.f64 d1, d2, #8 -vcvt.s16.f64 d1, d2, #8 -vcvt.u32.f64 d1, d2, #8 -vcvt.s32.f64 d1, d2, #8 +vcvt.f16.u16 s1, s1, #8 +vcvt.f16.s16 s1, s1, #8 +vcvt.f16.u32 s1, s1, #8 +vcvt.f16.s32 s1, s1, #8 +vcvt.u16.f16 s1, s1, #8 +vcvt.s16.f16 s1, s1, #8 +vcvt.u32.f16 s1, s1, #8 +vcvt.s32.f16 s1, s1, #8 +vcvt.f32.u16 s1, s1, #8 +vcvt.f32.s16 s1, s1, #8 +vcvt.f32.u32 s1, s1, #8 +vcvt.f32.s32 s1, s1, #8 +vcvt.u16.f32 s1, s1, #8 +vcvt.s16.f32 s1, s1, #8 +vcvt.u32.f32 s1, s1, #8 +vcvt.s32.f32 s1, s1, #8 +vcvt.f64.u16 d1, d1, #8 +vcvt.f64.s16 d1, d1, #8 +vcvt.f64.u32 d1, d1, #8 +vcvt.f64.s32 d1, d1, #8 +vcvt.u16.f64 d1, d1, #8 +vcvt.s16.f64 d1, d1, #8 +vcvt.u32.f64 d1, d1, #8 +vcvt.s32.f64 d1, d1, #8 vcvt.u32.f16 s1, s2 vcvt.s32.f16 s1, s2 vcvt.u32.f32 s1, s2 diff --git a/llvm/utils/TableGen/AsmWriterEmitter.cpp b/llvm/utils/TableGen/AsmWriterEmitter.cpp index e7606b9df4626..795185c677e30 100644 --- a/llvm/utils/TableGen/AsmWriterEmitter.cpp +++ b/llvm/utils/TableGen/AsmWriterEmitter.cpp @@ -967,12 +967,11 @@ void AsmWriterEmitter::EmitPrintAliasInstruction(raw_ostream &O) { IAP.addOperand(ROName, MIOpNum, PrintMethodIdx); // There might be an additional predicate on the MCOperand - unsigned Entry = MCOpPredicateMap[Rec]; + unsigned &Entry = MCOpPredicateMap[Rec]; if (!Entry) { if (!Rec->isValueUnset("MCOperandPredicate")) { MCOpPredicates.push_back(Rec); Entry = MCOpPredicates.size(); - MCOpPredicateMap[Rec] = Entry; } else break; // No conditions on this operand at all } diff --git a/llvm/utils/gn/secondary/clang/test/BUILD.gn b/llvm/utils/gn/secondary/clang/test/BUILD.gn index f333d457a0f99..c83c21ac7b549 100644 --- a/llvm/utils/gn/secondary/clang/test/BUILD.gn +++ b/llvm/utils/gn/secondary/clang/test/BUILD.gn @@ -75,6 +75,7 @@ write_lit_config("lit_site_cfg") { "Python3_EXECUTABLE=$python_path", "USE_Z3_SOLVER=", "PPC_LINUX_DEFAULT_IEEELONGDOUBLE=0", + "LLVM_INCLUDE_SPIRV_TOOLS_TESTS=0", ] if (clang_enable_static_analyzer) { diff --git a/mlir/include/mlir/Dialect/LLVMIR/NVVMOps.td b/mlir/include/mlir/Dialect/LLVMIR/NVVMOps.td index 3d378751e798f..11226dae2c3f3 100644 --- a/mlir/include/mlir/Dialect/LLVMIR/NVVMOps.td +++ b/mlir/include/mlir/Dialect/LLVMIR/NVVMOps.td @@ -463,7 +463,17 @@ def NVVM_BarrierOp : NVVM_Op<"barrier", [AttrSizedOperandSegments]> { } }]; let hasVerifier = 1; + let assemblyFormat = "(`id` `=` $barrierId^)? (`number_of_threads` `=` $numberOfThreads^)? attr-dict"; + + let builders = [ + OpBuilder<(ins), [{ + return build($_builder, $_state, Value{}, Value{}); + }]>, + OpBuilder<(ins "Value":$barrierId), [{ + return build($_builder, $_state, barrierId, Value{}); + }]> + ]; } def NVVM_BarrierArriveOp : NVVM_PTXBuilder_Op<"barrier.arrive"> diff --git a/mlir/include/mlir/Dialect/SPIRV/IR/SPIRVBase.td b/mlir/include/mlir/Dialect/SPIRV/IR/SPIRVBase.td index e9922b6fedb12..6b2e4189aea02 100644 --- a/mlir/include/mlir/Dialect/SPIRV/IR/SPIRVBase.td +++ b/mlir/include/mlir/Dialect/SPIRV/IR/SPIRVBase.td @@ -4301,238 +4301,239 @@ class SPIRV_OpCode { // Begin opcode section. Generated from SPIR-V spec; DO NOT MODIFY! -def SPIRV_OC_OpNop : I32EnumAttrCase<"OpNop", 0>; -def SPIRV_OC_OpUndef : I32EnumAttrCase<"OpUndef", 1>; -def SPIRV_OC_OpSourceContinued : I32EnumAttrCase<"OpSourceContinued", 2>; -def SPIRV_OC_OpSource : I32EnumAttrCase<"OpSource", 3>; -def SPIRV_OC_OpSourceExtension : I32EnumAttrCase<"OpSourceExtension", 4>; -def SPIRV_OC_OpName : I32EnumAttrCase<"OpName", 5>; -def SPIRV_OC_OpMemberName : I32EnumAttrCase<"OpMemberName", 6>; -def SPIRV_OC_OpString : I32EnumAttrCase<"OpString", 7>; -def SPIRV_OC_OpLine : I32EnumAttrCase<"OpLine", 8>; -def SPIRV_OC_OpExtension : I32EnumAttrCase<"OpExtension", 10>; -def SPIRV_OC_OpExtInstImport : I32EnumAttrCase<"OpExtInstImport", 11>; -def SPIRV_OC_OpExtInst : I32EnumAttrCase<"OpExtInst", 12>; -def SPIRV_OC_OpMemoryModel : I32EnumAttrCase<"OpMemoryModel", 14>; -def SPIRV_OC_OpEntryPoint : I32EnumAttrCase<"OpEntryPoint", 15>; -def SPIRV_OC_OpExecutionMode : I32EnumAttrCase<"OpExecutionMode", 16>; -def SPIRV_OC_OpCapability : I32EnumAttrCase<"OpCapability", 17>; -def SPIRV_OC_OpTypeVoid : I32EnumAttrCase<"OpTypeVoid", 19>; -def SPIRV_OC_OpTypeBool : I32EnumAttrCase<"OpTypeBool", 20>; -def SPIRV_OC_OpTypeInt : I32EnumAttrCase<"OpTypeInt", 21>; -def SPIRV_OC_OpTypeFloat : I32EnumAttrCase<"OpTypeFloat", 22>; -def SPIRV_OC_OpTypeVector : I32EnumAttrCase<"OpTypeVector", 23>; -def SPIRV_OC_OpTypeMatrix : I32EnumAttrCase<"OpTypeMatrix", 24>; -def SPIRV_OC_OpTypeImage : I32EnumAttrCase<"OpTypeImage", 25>; -def SPIRV_OC_OpTypeSampledImage : I32EnumAttrCase<"OpTypeSampledImage", 27>; -def SPIRV_OC_OpTypeArray : I32EnumAttrCase<"OpTypeArray", 28>; -def SPIRV_OC_OpTypeRuntimeArray : I32EnumAttrCase<"OpTypeRuntimeArray", 29>; -def SPIRV_OC_OpTypeStruct : I32EnumAttrCase<"OpTypeStruct", 30>; -def SPIRV_OC_OpTypePointer : I32EnumAttrCase<"OpTypePointer", 32>; -def SPIRV_OC_OpTypeFunction : I32EnumAttrCase<"OpTypeFunction", 33>; -def SPIRV_OC_OpTypeForwardPointer : I32EnumAttrCase<"OpTypeForwardPointer", 39>; -def SPIRV_OC_OpConstantTrue : I32EnumAttrCase<"OpConstantTrue", 41>; -def SPIRV_OC_OpConstantFalse : I32EnumAttrCase<"OpConstantFalse", 42>; -def SPIRV_OC_OpConstant : I32EnumAttrCase<"OpConstant", 43>; -def SPIRV_OC_OpConstantComposite : I32EnumAttrCase<"OpConstantComposite", 44>; -def SPIRV_OC_OpConstantNull : I32EnumAttrCase<"OpConstantNull", 46>; -def SPIRV_OC_OpSpecConstantTrue : I32EnumAttrCase<"OpSpecConstantTrue", 48>; -def SPIRV_OC_OpSpecConstantFalse : I32EnumAttrCase<"OpSpecConstantFalse", 49>; -def SPIRV_OC_OpSpecConstant : I32EnumAttrCase<"OpSpecConstant", 50>; -def SPIRV_OC_OpSpecConstantComposite : I32EnumAttrCase<"OpSpecConstantComposite", 51>; -def SPIRV_OC_OpSpecConstantOp : I32EnumAttrCase<"OpSpecConstantOp", 52>; -def SPIRV_OC_OpFunction : I32EnumAttrCase<"OpFunction", 54>; -def SPIRV_OC_OpFunctionParameter : I32EnumAttrCase<"OpFunctionParameter", 55>; -def SPIRV_OC_OpFunctionEnd : I32EnumAttrCase<"OpFunctionEnd", 56>; -def SPIRV_OC_OpFunctionCall : I32EnumAttrCase<"OpFunctionCall", 57>; -def SPIRV_OC_OpVariable : I32EnumAttrCase<"OpVariable", 59>; -def SPIRV_OC_OpLoad : I32EnumAttrCase<"OpLoad", 61>; -def SPIRV_OC_OpStore : I32EnumAttrCase<"OpStore", 62>; -def SPIRV_OC_OpCopyMemory : I32EnumAttrCase<"OpCopyMemory", 63>; -def SPIRV_OC_OpAccessChain : I32EnumAttrCase<"OpAccessChain", 65>; -def SPIRV_OC_OpPtrAccessChain : I32EnumAttrCase<"OpPtrAccessChain", 67>; -def SPIRV_OC_OpInBoundsPtrAccessChain : I32EnumAttrCase<"OpInBoundsPtrAccessChain", 70>; -def SPIRV_OC_OpDecorate : I32EnumAttrCase<"OpDecorate", 71>; -def SPIRV_OC_OpMemberDecorate : I32EnumAttrCase<"OpMemberDecorate", 72>; -def SPIRV_OC_OpVectorExtractDynamic : I32EnumAttrCase<"OpVectorExtractDynamic", 77>; -def SPIRV_OC_OpVectorInsertDynamic : I32EnumAttrCase<"OpVectorInsertDynamic", 78>; -def SPIRV_OC_OpVectorShuffle : I32EnumAttrCase<"OpVectorShuffle", 79>; -def SPIRV_OC_OpCompositeConstruct : I32EnumAttrCase<"OpCompositeConstruct", 80>; -def SPIRV_OC_OpCompositeExtract : I32EnumAttrCase<"OpCompositeExtract", 81>; -def SPIRV_OC_OpCompositeInsert : I32EnumAttrCase<"OpCompositeInsert", 82>; -def SPIRV_OC_OpTranspose : I32EnumAttrCase<"OpTranspose", 84>; -def SPIRV_OC_OpImageDrefGather : I32EnumAttrCase<"OpImageDrefGather", 97>; -def SPIRV_OC_OpImageWrite : I32EnumAttrCase<"OpImageWrite", 99>; -def SPIRV_OC_OpImage : I32EnumAttrCase<"OpImage", 100>; -def SPIRV_OC_OpImageQuerySize : I32EnumAttrCase<"OpImageQuerySize", 104>; -def SPIRV_OC_OpConvertFToU : I32EnumAttrCase<"OpConvertFToU", 109>; -def SPIRV_OC_OpConvertFToS : I32EnumAttrCase<"OpConvertFToS", 110>; -def SPIRV_OC_OpConvertSToF : I32EnumAttrCase<"OpConvertSToF", 111>; -def SPIRV_OC_OpConvertUToF : I32EnumAttrCase<"OpConvertUToF", 112>; -def SPIRV_OC_OpUConvert : I32EnumAttrCase<"OpUConvert", 113>; -def SPIRV_OC_OpSConvert : I32EnumAttrCase<"OpSConvert", 114>; -def SPIRV_OC_OpFConvert : I32EnumAttrCase<"OpFConvert", 115>; -def SPIRV_OC_OpConvertPtrToU : I32EnumAttrCase<"OpConvertPtrToU", 117>; -def SPIRV_OC_OpConvertUToPtr : I32EnumAttrCase<"OpConvertUToPtr", 120>; -def SPIRV_OC_OpPtrCastToGeneric : I32EnumAttrCase<"OpPtrCastToGeneric", 121>; -def SPIRV_OC_OpGenericCastToPtr : I32EnumAttrCase<"OpGenericCastToPtr", 122>; -def SPIRV_OC_OpGenericCastToPtrExplicit : I32EnumAttrCase<"OpGenericCastToPtrExplicit", 123>; -def SPIRV_OC_OpBitcast : I32EnumAttrCase<"OpBitcast", 124>; -def SPIRV_OC_OpSNegate : I32EnumAttrCase<"OpSNegate", 126>; -def SPIRV_OC_OpFNegate : I32EnumAttrCase<"OpFNegate", 127>; -def SPIRV_OC_OpIAdd : I32EnumAttrCase<"OpIAdd", 128>; -def SPIRV_OC_OpFAdd : I32EnumAttrCase<"OpFAdd", 129>; -def SPIRV_OC_OpISub : I32EnumAttrCase<"OpISub", 130>; -def SPIRV_OC_OpFSub : I32EnumAttrCase<"OpFSub", 131>; -def SPIRV_OC_OpIMul : I32EnumAttrCase<"OpIMul", 132>; -def SPIRV_OC_OpFMul : I32EnumAttrCase<"OpFMul", 133>; -def SPIRV_OC_OpUDiv : I32EnumAttrCase<"OpUDiv", 134>; -def SPIRV_OC_OpSDiv : I32EnumAttrCase<"OpSDiv", 135>; -def SPIRV_OC_OpFDiv : I32EnumAttrCase<"OpFDiv", 136>; -def SPIRV_OC_OpUMod : I32EnumAttrCase<"OpUMod", 137>; -def SPIRV_OC_OpSRem : I32EnumAttrCase<"OpSRem", 138>; -def SPIRV_OC_OpSMod : I32EnumAttrCase<"OpSMod", 139>; -def SPIRV_OC_OpFRem : I32EnumAttrCase<"OpFRem", 140>; -def SPIRV_OC_OpFMod : I32EnumAttrCase<"OpFMod", 141>; -def SPIRV_OC_OpVectorTimesScalar : I32EnumAttrCase<"OpVectorTimesScalar", 142>; -def SPIRV_OC_OpMatrixTimesScalar : I32EnumAttrCase<"OpMatrixTimesScalar", 143>; -def SPIRV_OC_OpVectorTimesMatrix : I32EnumAttrCase<"OpVectorTimesMatrix", 144>; -def SPIRV_OC_OpMatrixTimesVector : I32EnumAttrCase<"OpMatrixTimesVector", 145>; -def SPIRV_OC_OpMatrixTimesMatrix : I32EnumAttrCase<"OpMatrixTimesMatrix", 146>; -def SPIRV_OC_OpDot : I32EnumAttrCase<"OpDot", 148>; -def SPIRV_OC_OpIAddCarry : I32EnumAttrCase<"OpIAddCarry", 149>; -def SPIRV_OC_OpISubBorrow : I32EnumAttrCase<"OpISubBorrow", 150>; -def SPIRV_OC_OpUMulExtended : I32EnumAttrCase<"OpUMulExtended", 151>; -def SPIRV_OC_OpSMulExtended : I32EnumAttrCase<"OpSMulExtended", 152>; -def SPIRV_OC_OpIsNan : I32EnumAttrCase<"OpIsNan", 156>; -def SPIRV_OC_OpIsInf : I32EnumAttrCase<"OpIsInf", 157>; -def SPIRV_OC_OpOrdered : I32EnumAttrCase<"OpOrdered", 162>; -def SPIRV_OC_OpUnordered : I32EnumAttrCase<"OpUnordered", 163>; -def SPIRV_OC_OpLogicalEqual : I32EnumAttrCase<"OpLogicalEqual", 164>; -def SPIRV_OC_OpLogicalNotEqual : I32EnumAttrCase<"OpLogicalNotEqual", 165>; -def SPIRV_OC_OpLogicalOr : I32EnumAttrCase<"OpLogicalOr", 166>; -def SPIRV_OC_OpLogicalAnd : I32EnumAttrCase<"OpLogicalAnd", 167>; -def SPIRV_OC_OpLogicalNot : I32EnumAttrCase<"OpLogicalNot", 168>; -def SPIRV_OC_OpSelect : I32EnumAttrCase<"OpSelect", 169>; -def SPIRV_OC_OpIEqual : I32EnumAttrCase<"OpIEqual", 170>; -def SPIRV_OC_OpINotEqual : I32EnumAttrCase<"OpINotEqual", 171>; -def SPIRV_OC_OpUGreaterThan : I32EnumAttrCase<"OpUGreaterThan", 172>; -def SPIRV_OC_OpSGreaterThan : I32EnumAttrCase<"OpSGreaterThan", 173>; -def SPIRV_OC_OpUGreaterThanEqual : I32EnumAttrCase<"OpUGreaterThanEqual", 174>; -def SPIRV_OC_OpSGreaterThanEqual : I32EnumAttrCase<"OpSGreaterThanEqual", 175>; -def SPIRV_OC_OpULessThan : I32EnumAttrCase<"OpULessThan", 176>; -def SPIRV_OC_OpSLessThan : I32EnumAttrCase<"OpSLessThan", 177>; -def SPIRV_OC_OpULessThanEqual : I32EnumAttrCase<"OpULessThanEqual", 178>; -def SPIRV_OC_OpSLessThanEqual : I32EnumAttrCase<"OpSLessThanEqual", 179>; -def SPIRV_OC_OpFOrdEqual : I32EnumAttrCase<"OpFOrdEqual", 180>; -def SPIRV_OC_OpFUnordEqual : I32EnumAttrCase<"OpFUnordEqual", 181>; -def SPIRV_OC_OpFOrdNotEqual : I32EnumAttrCase<"OpFOrdNotEqual", 182>; -def SPIRV_OC_OpFUnordNotEqual : I32EnumAttrCase<"OpFUnordNotEqual", 183>; -def SPIRV_OC_OpFOrdLessThan : I32EnumAttrCase<"OpFOrdLessThan", 184>; -def SPIRV_OC_OpFUnordLessThan : I32EnumAttrCase<"OpFUnordLessThan", 185>; -def SPIRV_OC_OpFOrdGreaterThan : I32EnumAttrCase<"OpFOrdGreaterThan", 186>; -def SPIRV_OC_OpFUnordGreaterThan : I32EnumAttrCase<"OpFUnordGreaterThan", 187>; -def SPIRV_OC_OpFOrdLessThanEqual : I32EnumAttrCase<"OpFOrdLessThanEqual", 188>; -def SPIRV_OC_OpFUnordLessThanEqual : I32EnumAttrCase<"OpFUnordLessThanEqual", 189>; -def SPIRV_OC_OpFOrdGreaterThanEqual : I32EnumAttrCase<"OpFOrdGreaterThanEqual", 190>; -def SPIRV_OC_OpFUnordGreaterThanEqual : I32EnumAttrCase<"OpFUnordGreaterThanEqual", 191>; -def SPIRV_OC_OpShiftRightLogical : I32EnumAttrCase<"OpShiftRightLogical", 194>; -def SPIRV_OC_OpShiftRightArithmetic : I32EnumAttrCase<"OpShiftRightArithmetic", 195>; -def SPIRV_OC_OpShiftLeftLogical : I32EnumAttrCase<"OpShiftLeftLogical", 196>; -def SPIRV_OC_OpBitwiseOr : I32EnumAttrCase<"OpBitwiseOr", 197>; -def SPIRV_OC_OpBitwiseXor : I32EnumAttrCase<"OpBitwiseXor", 198>; -def SPIRV_OC_OpBitwiseAnd : I32EnumAttrCase<"OpBitwiseAnd", 199>; -def SPIRV_OC_OpNot : I32EnumAttrCase<"OpNot", 200>; -def SPIRV_OC_OpBitFieldInsert : I32EnumAttrCase<"OpBitFieldInsert", 201>; -def SPIRV_OC_OpBitFieldSExtract : I32EnumAttrCase<"OpBitFieldSExtract", 202>; -def SPIRV_OC_OpBitFieldUExtract : I32EnumAttrCase<"OpBitFieldUExtract", 203>; -def SPIRV_OC_OpBitReverse : I32EnumAttrCase<"OpBitReverse", 204>; -def SPIRV_OC_OpBitCount : I32EnumAttrCase<"OpBitCount", 205>; -def SPIRV_OC_OpEmitVertex : I32EnumAttrCase<"OpEmitVertex", 218>; -def SPIRV_OC_OpEndPrimitive : I32EnumAttrCase<"OpEndPrimitive", 219>; -def SPIRV_OC_OpControlBarrier : I32EnumAttrCase<"OpControlBarrier", 224>; -def SPIRV_OC_OpMemoryBarrier : I32EnumAttrCase<"OpMemoryBarrier", 225>; -def SPIRV_OC_OpAtomicExchange : I32EnumAttrCase<"OpAtomicExchange", 229>; -def SPIRV_OC_OpAtomicCompareExchange : I32EnumAttrCase<"OpAtomicCompareExchange", 230>; -def SPIRV_OC_OpAtomicCompareExchangeWeak : I32EnumAttrCase<"OpAtomicCompareExchangeWeak", 231>; -def SPIRV_OC_OpAtomicIIncrement : I32EnumAttrCase<"OpAtomicIIncrement", 232>; -def SPIRV_OC_OpAtomicIDecrement : I32EnumAttrCase<"OpAtomicIDecrement", 233>; -def SPIRV_OC_OpAtomicIAdd : I32EnumAttrCase<"OpAtomicIAdd", 234>; -def SPIRV_OC_OpAtomicISub : I32EnumAttrCase<"OpAtomicISub", 235>; -def SPIRV_OC_OpAtomicSMin : I32EnumAttrCase<"OpAtomicSMin", 236>; -def SPIRV_OC_OpAtomicUMin : I32EnumAttrCase<"OpAtomicUMin", 237>; -def SPIRV_OC_OpAtomicSMax : I32EnumAttrCase<"OpAtomicSMax", 238>; -def SPIRV_OC_OpAtomicUMax : I32EnumAttrCase<"OpAtomicUMax", 239>; -def SPIRV_OC_OpAtomicAnd : I32EnumAttrCase<"OpAtomicAnd", 240>; -def SPIRV_OC_OpAtomicOr : I32EnumAttrCase<"OpAtomicOr", 241>; -def SPIRV_OC_OpAtomicXor : I32EnumAttrCase<"OpAtomicXor", 242>; -def SPIRV_OC_OpPhi : I32EnumAttrCase<"OpPhi", 245>; -def SPIRV_OC_OpLoopMerge : I32EnumAttrCase<"OpLoopMerge", 246>; -def SPIRV_OC_OpSelectionMerge : I32EnumAttrCase<"OpSelectionMerge", 247>; -def SPIRV_OC_OpLabel : I32EnumAttrCase<"OpLabel", 248>; -def SPIRV_OC_OpBranch : I32EnumAttrCase<"OpBranch", 249>; -def SPIRV_OC_OpBranchConditional : I32EnumAttrCase<"OpBranchConditional", 250>; -def SPIRV_OC_OpReturn : I32EnumAttrCase<"OpReturn", 253>; -def SPIRV_OC_OpReturnValue : I32EnumAttrCase<"OpReturnValue", 254>; -def SPIRV_OC_OpUnreachable : I32EnumAttrCase<"OpUnreachable", 255>; -def SPIRV_OC_OpGroupBroadcast : I32EnumAttrCase<"OpGroupBroadcast", 263>; -def SPIRV_OC_OpGroupIAdd : I32EnumAttrCase<"OpGroupIAdd", 264>; -def SPIRV_OC_OpGroupFAdd : I32EnumAttrCase<"OpGroupFAdd", 265>; -def SPIRV_OC_OpGroupFMin : I32EnumAttrCase<"OpGroupFMin", 266>; -def SPIRV_OC_OpGroupUMin : I32EnumAttrCase<"OpGroupUMin", 267>; -def SPIRV_OC_OpGroupSMin : I32EnumAttrCase<"OpGroupSMin", 268>; -def SPIRV_OC_OpGroupFMax : I32EnumAttrCase<"OpGroupFMax", 269>; -def SPIRV_OC_OpGroupUMax : I32EnumAttrCase<"OpGroupUMax", 270>; -def SPIRV_OC_OpGroupSMax : I32EnumAttrCase<"OpGroupSMax", 271>; -def SPIRV_OC_OpNoLine : I32EnumAttrCase<"OpNoLine", 317>; -def SPIRV_OC_OpModuleProcessed : I32EnumAttrCase<"OpModuleProcessed", 330>; -def SPIRV_OC_OpGroupNonUniformElect : I32EnumAttrCase<"OpGroupNonUniformElect", 333>; -def SPIRV_OC_OpGroupNonUniformBroadcast : I32EnumAttrCase<"OpGroupNonUniformBroadcast", 337>; -def SPIRV_OC_OpGroupNonUniformBallot : I32EnumAttrCase<"OpGroupNonUniformBallot", 339>; -def SPIRV_OC_OpGroupNonUniformBallotFindLSB : I32EnumAttrCase<"OpGroupNonUniformBallotFindLSB", 343>; -def SPIRV_OC_OpGroupNonUniformBallotFindMSB : I32EnumAttrCase<"OpGroupNonUniformBallotFindMSB", 344>; -def SPIRV_OC_OpGroupNonUniformShuffle : I32EnumAttrCase<"OpGroupNonUniformShuffle", 345>; -def SPIRV_OC_OpGroupNonUniformShuffleXor : I32EnumAttrCase<"OpGroupNonUniformShuffleXor", 346>; -def SPIRV_OC_OpGroupNonUniformShuffleUp : I32EnumAttrCase<"OpGroupNonUniformShuffleUp", 347>; -def SPIRV_OC_OpGroupNonUniformShuffleDown : I32EnumAttrCase<"OpGroupNonUniformShuffleDown", 348>; -def SPIRV_OC_OpGroupNonUniformIAdd : I32EnumAttrCase<"OpGroupNonUniformIAdd", 349>; -def SPIRV_OC_OpGroupNonUniformFAdd : I32EnumAttrCase<"OpGroupNonUniformFAdd", 350>; -def SPIRV_OC_OpGroupNonUniformIMul : I32EnumAttrCase<"OpGroupNonUniformIMul", 351>; -def SPIRV_OC_OpGroupNonUniformFMul : I32EnumAttrCase<"OpGroupNonUniformFMul", 352>; -def SPIRV_OC_OpGroupNonUniformSMin : I32EnumAttrCase<"OpGroupNonUniformSMin", 353>; -def SPIRV_OC_OpGroupNonUniformUMin : I32EnumAttrCase<"OpGroupNonUniformUMin", 354>; -def SPIRV_OC_OpGroupNonUniformFMin : I32EnumAttrCase<"OpGroupNonUniformFMin", 355>; -def SPIRV_OC_OpGroupNonUniformSMax : I32EnumAttrCase<"OpGroupNonUniformSMax", 356>; -def SPIRV_OC_OpGroupNonUniformUMax : I32EnumAttrCase<"OpGroupNonUniformUMax", 357>; -def SPIRV_OC_OpGroupNonUniformFMax : I32EnumAttrCase<"OpGroupNonUniformFMax", 358>; -def SPIRV_OC_OpGroupNonUniformBitwiseAnd : I32EnumAttrCase<"OpGroupNonUniformBitwiseAnd", 359>; -def SPIRV_OC_OpGroupNonUniformBitwiseOr : I32EnumAttrCase<"OpGroupNonUniformBitwiseOr", 360>; -def SPIRV_OC_OpGroupNonUniformBitwiseXor : I32EnumAttrCase<"OpGroupNonUniformBitwiseXor", 361>; -def SPIRV_OC_OpGroupNonUniformLogicalAnd : I32EnumAttrCase<"OpGroupNonUniformLogicalAnd", 362>; -def SPIRV_OC_OpGroupNonUniformLogicalOr : I32EnumAttrCase<"OpGroupNonUniformLogicalOr", 363>; -def SPIRV_OC_OpGroupNonUniformLogicalXor : I32EnumAttrCase<"OpGroupNonUniformLogicalXor", 364>; -def SPIRV_OC_OpSubgroupBallotKHR : I32EnumAttrCase<"OpSubgroupBallotKHR", 4421>; -def SPIRV_OC_OpSDot : I32EnumAttrCase<"OpSDot", 4450>; -def SPIRV_OC_OpUDot : I32EnumAttrCase<"OpUDot", 4451>; -def SPIRV_OC_OpSUDot : I32EnumAttrCase<"OpSUDot", 4452>; -def SPIRV_OC_OpSDotAccSat : I32EnumAttrCase<"OpSDotAccSat", 4453>; -def SPIRV_OC_OpUDotAccSat : I32EnumAttrCase<"OpUDotAccSat", 4454>; -def SPIRV_OC_OpSUDotAccSat : I32EnumAttrCase<"OpSUDotAccSat", 4455>; -def SPIRV_OC_OpTypeCooperativeMatrixKHR : I32EnumAttrCase<"OpTypeCooperativeMatrixKHR", 4456>; -def SPIRV_OC_OpCooperativeMatrixLoadKHR : I32EnumAttrCase<"OpCooperativeMatrixLoadKHR", 4457>; -def SPIRV_OC_OpCooperativeMatrixStoreKHR : I32EnumAttrCase<"OpCooperativeMatrixStoreKHR", 4458>; -def SPIRV_OC_OpCooperativeMatrixMulAddKHR : I32EnumAttrCase<"OpCooperativeMatrixMulAddKHR", 4459>; -def SPIRV_OC_OpCooperativeMatrixLengthKHR : I32EnumAttrCase<"OpCooperativeMatrixLengthKHR", 4460>; -def SPIRV_OC_OpSubgroupBlockReadINTEL : I32EnumAttrCase<"OpSubgroupBlockReadINTEL", 5575>; -def SPIRV_OC_OpSubgroupBlockWriteINTEL : I32EnumAttrCase<"OpSubgroupBlockWriteINTEL", 5576>; -def SPIRV_OC_OpAssumeTrueKHR : I32EnumAttrCase<"OpAssumeTrueKHR", 5630>; -def SPIRV_OC_OpAtomicFAddEXT : I32EnumAttrCase<"OpAtomicFAddEXT", 6035>; -def SPIRV_OC_OpConvertFToBF16INTEL : I32EnumAttrCase<"OpConvertFToBF16INTEL", 6116>; -def SPIRV_OC_OpConvertBF16ToFINTEL : I32EnumAttrCase<"OpConvertBF16ToFINTEL", 6117>; -def SPIRV_OC_OpControlBarrierArriveINTEL : I32EnumAttrCase<"OpControlBarrierArriveINTEL", 6142>; -def SPIRV_OC_OpControlBarrierWaitINTEL : I32EnumAttrCase<"OpControlBarrierWaitINTEL", 6143>; -def SPIRV_OC_OpGroupIMulKHR : I32EnumAttrCase<"OpGroupIMulKHR", 6401>; -def SPIRV_OC_OpGroupFMulKHR : I32EnumAttrCase<"OpGroupFMulKHR", 6402>; +def SPIRV_OC_OpNop : I32EnumAttrCase<"OpNop", 0>; +def SPIRV_OC_OpUndef : I32EnumAttrCase<"OpUndef", 1>; +def SPIRV_OC_OpSourceContinued : I32EnumAttrCase<"OpSourceContinued", 2>; +def SPIRV_OC_OpSource : I32EnumAttrCase<"OpSource", 3>; +def SPIRV_OC_OpSourceExtension : I32EnumAttrCase<"OpSourceExtension", 4>; +def SPIRV_OC_OpName : I32EnumAttrCase<"OpName", 5>; +def SPIRV_OC_OpMemberName : I32EnumAttrCase<"OpMemberName", 6>; +def SPIRV_OC_OpString : I32EnumAttrCase<"OpString", 7>; +def SPIRV_OC_OpLine : I32EnumAttrCase<"OpLine", 8>; +def SPIRV_OC_OpExtension : I32EnumAttrCase<"OpExtension", 10>; +def SPIRV_OC_OpExtInstImport : I32EnumAttrCase<"OpExtInstImport", 11>; +def SPIRV_OC_OpExtInst : I32EnumAttrCase<"OpExtInst", 12>; +def SPIRV_OC_OpMemoryModel : I32EnumAttrCase<"OpMemoryModel", 14>; +def SPIRV_OC_OpEntryPoint : I32EnumAttrCase<"OpEntryPoint", 15>; +def SPIRV_OC_OpExecutionMode : I32EnumAttrCase<"OpExecutionMode", 16>; +def SPIRV_OC_OpCapability : I32EnumAttrCase<"OpCapability", 17>; +def SPIRV_OC_OpTypeVoid : I32EnumAttrCase<"OpTypeVoid", 19>; +def SPIRV_OC_OpTypeBool : I32EnumAttrCase<"OpTypeBool", 20>; +def SPIRV_OC_OpTypeInt : I32EnumAttrCase<"OpTypeInt", 21>; +def SPIRV_OC_OpTypeFloat : I32EnumAttrCase<"OpTypeFloat", 22>; +def SPIRV_OC_OpTypeVector : I32EnumAttrCase<"OpTypeVector", 23>; +def SPIRV_OC_OpTypeMatrix : I32EnumAttrCase<"OpTypeMatrix", 24>; +def SPIRV_OC_OpTypeImage : I32EnumAttrCase<"OpTypeImage", 25>; +def SPIRV_OC_OpTypeSampledImage : I32EnumAttrCase<"OpTypeSampledImage", 27>; +def SPIRV_OC_OpTypeArray : I32EnumAttrCase<"OpTypeArray", 28>; +def SPIRV_OC_OpTypeRuntimeArray : I32EnumAttrCase<"OpTypeRuntimeArray", 29>; +def SPIRV_OC_OpTypeStruct : I32EnumAttrCase<"OpTypeStruct", 30>; +def SPIRV_OC_OpTypePointer : I32EnumAttrCase<"OpTypePointer", 32>; +def SPIRV_OC_OpTypeFunction : I32EnumAttrCase<"OpTypeFunction", 33>; +def SPIRV_OC_OpTypeForwardPointer : I32EnumAttrCase<"OpTypeForwardPointer", 39>; +def SPIRV_OC_OpConstantTrue : I32EnumAttrCase<"OpConstantTrue", 41>; +def SPIRV_OC_OpConstantFalse : I32EnumAttrCase<"OpConstantFalse", 42>; +def SPIRV_OC_OpConstant : I32EnumAttrCase<"OpConstant", 43>; +def SPIRV_OC_OpConstantComposite : I32EnumAttrCase<"OpConstantComposite", 44>; +def SPIRV_OC_OpConstantNull : I32EnumAttrCase<"OpConstantNull", 46>; +def SPIRV_OC_OpSpecConstantTrue : I32EnumAttrCase<"OpSpecConstantTrue", 48>; +def SPIRV_OC_OpSpecConstantFalse : I32EnumAttrCase<"OpSpecConstantFalse", 49>; +def SPIRV_OC_OpSpecConstant : I32EnumAttrCase<"OpSpecConstant", 50>; +def SPIRV_OC_OpSpecConstantComposite : I32EnumAttrCase<"OpSpecConstantComposite", 51>; +def SPIRV_OC_OpSpecConstantOp : I32EnumAttrCase<"OpSpecConstantOp", 52>; +def SPIRV_OC_OpFunction : I32EnumAttrCase<"OpFunction", 54>; +def SPIRV_OC_OpFunctionParameter : I32EnumAttrCase<"OpFunctionParameter", 55>; +def SPIRV_OC_OpFunctionEnd : I32EnumAttrCase<"OpFunctionEnd", 56>; +def SPIRV_OC_OpFunctionCall : I32EnumAttrCase<"OpFunctionCall", 57>; +def SPIRV_OC_OpVariable : I32EnumAttrCase<"OpVariable", 59>; +def SPIRV_OC_OpLoad : I32EnumAttrCase<"OpLoad", 61>; +def SPIRV_OC_OpStore : I32EnumAttrCase<"OpStore", 62>; +def SPIRV_OC_OpCopyMemory : I32EnumAttrCase<"OpCopyMemory", 63>; +def SPIRV_OC_OpAccessChain : I32EnumAttrCase<"OpAccessChain", 65>; +def SPIRV_OC_OpPtrAccessChain : I32EnumAttrCase<"OpPtrAccessChain", 67>; +def SPIRV_OC_OpInBoundsPtrAccessChain : I32EnumAttrCase<"OpInBoundsPtrAccessChain", 70>; +def SPIRV_OC_OpDecorate : I32EnumAttrCase<"OpDecorate", 71>; +def SPIRV_OC_OpMemberDecorate : I32EnumAttrCase<"OpMemberDecorate", 72>; +def SPIRV_OC_OpVectorExtractDynamic : I32EnumAttrCase<"OpVectorExtractDynamic", 77>; +def SPIRV_OC_OpVectorInsertDynamic : I32EnumAttrCase<"OpVectorInsertDynamic", 78>; +def SPIRV_OC_OpVectorShuffle : I32EnumAttrCase<"OpVectorShuffle", 79>; +def SPIRV_OC_OpCompositeConstruct : I32EnumAttrCase<"OpCompositeConstruct", 80>; +def SPIRV_OC_OpCompositeExtract : I32EnumAttrCase<"OpCompositeExtract", 81>; +def SPIRV_OC_OpCompositeInsert : I32EnumAttrCase<"OpCompositeInsert", 82>; +def SPIRV_OC_OpTranspose : I32EnumAttrCase<"OpTranspose", 84>; +def SPIRV_OC_OpImageDrefGather : I32EnumAttrCase<"OpImageDrefGather", 97>; +def SPIRV_OC_OpImageWrite : I32EnumAttrCase<"OpImageWrite", 99>; +def SPIRV_OC_OpImage : I32EnumAttrCase<"OpImage", 100>; +def SPIRV_OC_OpImageQuerySize : I32EnumAttrCase<"OpImageQuerySize", 104>; +def SPIRV_OC_OpConvertFToU : I32EnumAttrCase<"OpConvertFToU", 109>; +def SPIRV_OC_OpConvertFToS : I32EnumAttrCase<"OpConvertFToS", 110>; +def SPIRV_OC_OpConvertSToF : I32EnumAttrCase<"OpConvertSToF", 111>; +def SPIRV_OC_OpConvertUToF : I32EnumAttrCase<"OpConvertUToF", 112>; +def SPIRV_OC_OpUConvert : I32EnumAttrCase<"OpUConvert", 113>; +def SPIRV_OC_OpSConvert : I32EnumAttrCase<"OpSConvert", 114>; +def SPIRV_OC_OpFConvert : I32EnumAttrCase<"OpFConvert", 115>; +def SPIRV_OC_OpConvertPtrToU : I32EnumAttrCase<"OpConvertPtrToU", 117>; +def SPIRV_OC_OpConvertUToPtr : I32EnumAttrCase<"OpConvertUToPtr", 120>; +def SPIRV_OC_OpPtrCastToGeneric : I32EnumAttrCase<"OpPtrCastToGeneric", 121>; +def SPIRV_OC_OpGenericCastToPtr : I32EnumAttrCase<"OpGenericCastToPtr", 122>; +def SPIRV_OC_OpGenericCastToPtrExplicit : I32EnumAttrCase<"OpGenericCastToPtrExplicit", 123>; +def SPIRV_OC_OpBitcast : I32EnumAttrCase<"OpBitcast", 124>; +def SPIRV_OC_OpSNegate : I32EnumAttrCase<"OpSNegate", 126>; +def SPIRV_OC_OpFNegate : I32EnumAttrCase<"OpFNegate", 127>; +def SPIRV_OC_OpIAdd : I32EnumAttrCase<"OpIAdd", 128>; +def SPIRV_OC_OpFAdd : I32EnumAttrCase<"OpFAdd", 129>; +def SPIRV_OC_OpISub : I32EnumAttrCase<"OpISub", 130>; +def SPIRV_OC_OpFSub : I32EnumAttrCase<"OpFSub", 131>; +def SPIRV_OC_OpIMul : I32EnumAttrCase<"OpIMul", 132>; +def SPIRV_OC_OpFMul : I32EnumAttrCase<"OpFMul", 133>; +def SPIRV_OC_OpUDiv : I32EnumAttrCase<"OpUDiv", 134>; +def SPIRV_OC_OpSDiv : I32EnumAttrCase<"OpSDiv", 135>; +def SPIRV_OC_OpFDiv : I32EnumAttrCase<"OpFDiv", 136>; +def SPIRV_OC_OpUMod : I32EnumAttrCase<"OpUMod", 137>; +def SPIRV_OC_OpSRem : I32EnumAttrCase<"OpSRem", 138>; +def SPIRV_OC_OpSMod : I32EnumAttrCase<"OpSMod", 139>; +def SPIRV_OC_OpFRem : I32EnumAttrCase<"OpFRem", 140>; +def SPIRV_OC_OpFMod : I32EnumAttrCase<"OpFMod", 141>; +def SPIRV_OC_OpVectorTimesScalar : I32EnumAttrCase<"OpVectorTimesScalar", 142>; +def SPIRV_OC_OpMatrixTimesScalar : I32EnumAttrCase<"OpMatrixTimesScalar", 143>; +def SPIRV_OC_OpVectorTimesMatrix : I32EnumAttrCase<"OpVectorTimesMatrix", 144>; +def SPIRV_OC_OpMatrixTimesVector : I32EnumAttrCase<"OpMatrixTimesVector", 145>; +def SPIRV_OC_OpMatrixTimesMatrix : I32EnumAttrCase<"OpMatrixTimesMatrix", 146>; +def SPIRV_OC_OpDot : I32EnumAttrCase<"OpDot", 148>; +def SPIRV_OC_OpIAddCarry : I32EnumAttrCase<"OpIAddCarry", 149>; +def SPIRV_OC_OpISubBorrow : I32EnumAttrCase<"OpISubBorrow", 150>; +def SPIRV_OC_OpUMulExtended : I32EnumAttrCase<"OpUMulExtended", 151>; +def SPIRV_OC_OpSMulExtended : I32EnumAttrCase<"OpSMulExtended", 152>; +def SPIRV_OC_OpIsNan : I32EnumAttrCase<"OpIsNan", 156>; +def SPIRV_OC_OpIsInf : I32EnumAttrCase<"OpIsInf", 157>; +def SPIRV_OC_OpOrdered : I32EnumAttrCase<"OpOrdered", 162>; +def SPIRV_OC_OpUnordered : I32EnumAttrCase<"OpUnordered", 163>; +def SPIRV_OC_OpLogicalEqual : I32EnumAttrCase<"OpLogicalEqual", 164>; +def SPIRV_OC_OpLogicalNotEqual : I32EnumAttrCase<"OpLogicalNotEqual", 165>; +def SPIRV_OC_OpLogicalOr : I32EnumAttrCase<"OpLogicalOr", 166>; +def SPIRV_OC_OpLogicalAnd : I32EnumAttrCase<"OpLogicalAnd", 167>; +def SPIRV_OC_OpLogicalNot : I32EnumAttrCase<"OpLogicalNot", 168>; +def SPIRV_OC_OpSelect : I32EnumAttrCase<"OpSelect", 169>; +def SPIRV_OC_OpIEqual : I32EnumAttrCase<"OpIEqual", 170>; +def SPIRV_OC_OpINotEqual : I32EnumAttrCase<"OpINotEqual", 171>; +def SPIRV_OC_OpUGreaterThan : I32EnumAttrCase<"OpUGreaterThan", 172>; +def SPIRV_OC_OpSGreaterThan : I32EnumAttrCase<"OpSGreaterThan", 173>; +def SPIRV_OC_OpUGreaterThanEqual : I32EnumAttrCase<"OpUGreaterThanEqual", 174>; +def SPIRV_OC_OpSGreaterThanEqual : I32EnumAttrCase<"OpSGreaterThanEqual", 175>; +def SPIRV_OC_OpULessThan : I32EnumAttrCase<"OpULessThan", 176>; +def SPIRV_OC_OpSLessThan : I32EnumAttrCase<"OpSLessThan", 177>; +def SPIRV_OC_OpULessThanEqual : I32EnumAttrCase<"OpULessThanEqual", 178>; +def SPIRV_OC_OpSLessThanEqual : I32EnumAttrCase<"OpSLessThanEqual", 179>; +def SPIRV_OC_OpFOrdEqual : I32EnumAttrCase<"OpFOrdEqual", 180>; +def SPIRV_OC_OpFUnordEqual : I32EnumAttrCase<"OpFUnordEqual", 181>; +def SPIRV_OC_OpFOrdNotEqual : I32EnumAttrCase<"OpFOrdNotEqual", 182>; +def SPIRV_OC_OpFUnordNotEqual : I32EnumAttrCase<"OpFUnordNotEqual", 183>; +def SPIRV_OC_OpFOrdLessThan : I32EnumAttrCase<"OpFOrdLessThan", 184>; +def SPIRV_OC_OpFUnordLessThan : I32EnumAttrCase<"OpFUnordLessThan", 185>; +def SPIRV_OC_OpFOrdGreaterThan : I32EnumAttrCase<"OpFOrdGreaterThan", 186>; +def SPIRV_OC_OpFUnordGreaterThan : I32EnumAttrCase<"OpFUnordGreaterThan", 187>; +def SPIRV_OC_OpFOrdLessThanEqual : I32EnumAttrCase<"OpFOrdLessThanEqual", 188>; +def SPIRV_OC_OpFUnordLessThanEqual : I32EnumAttrCase<"OpFUnordLessThanEqual", 189>; +def SPIRV_OC_OpFOrdGreaterThanEqual : I32EnumAttrCase<"OpFOrdGreaterThanEqual", 190>; +def SPIRV_OC_OpFUnordGreaterThanEqual : I32EnumAttrCase<"OpFUnordGreaterThanEqual", 191>; +def SPIRV_OC_OpShiftRightLogical : I32EnumAttrCase<"OpShiftRightLogical", 194>; +def SPIRV_OC_OpShiftRightArithmetic : I32EnumAttrCase<"OpShiftRightArithmetic", 195>; +def SPIRV_OC_OpShiftLeftLogical : I32EnumAttrCase<"OpShiftLeftLogical", 196>; +def SPIRV_OC_OpBitwiseOr : I32EnumAttrCase<"OpBitwiseOr", 197>; +def SPIRV_OC_OpBitwiseXor : I32EnumAttrCase<"OpBitwiseXor", 198>; +def SPIRV_OC_OpBitwiseAnd : I32EnumAttrCase<"OpBitwiseAnd", 199>; +def SPIRV_OC_OpNot : I32EnumAttrCase<"OpNot", 200>; +def SPIRV_OC_OpBitFieldInsert : I32EnumAttrCase<"OpBitFieldInsert", 201>; +def SPIRV_OC_OpBitFieldSExtract : I32EnumAttrCase<"OpBitFieldSExtract", 202>; +def SPIRV_OC_OpBitFieldUExtract : I32EnumAttrCase<"OpBitFieldUExtract", 203>; +def SPIRV_OC_OpBitReverse : I32EnumAttrCase<"OpBitReverse", 204>; +def SPIRV_OC_OpBitCount : I32EnumAttrCase<"OpBitCount", 205>; +def SPIRV_OC_OpEmitVertex : I32EnumAttrCase<"OpEmitVertex", 218>; +def SPIRV_OC_OpEndPrimitive : I32EnumAttrCase<"OpEndPrimitive", 219>; +def SPIRV_OC_OpControlBarrier : I32EnumAttrCase<"OpControlBarrier", 224>; +def SPIRV_OC_OpMemoryBarrier : I32EnumAttrCase<"OpMemoryBarrier", 225>; +def SPIRV_OC_OpAtomicExchange : I32EnumAttrCase<"OpAtomicExchange", 229>; +def SPIRV_OC_OpAtomicCompareExchange : I32EnumAttrCase<"OpAtomicCompareExchange", 230>; +def SPIRV_OC_OpAtomicCompareExchangeWeak : I32EnumAttrCase<"OpAtomicCompareExchangeWeak", 231>; +def SPIRV_OC_OpAtomicIIncrement : I32EnumAttrCase<"OpAtomicIIncrement", 232>; +def SPIRV_OC_OpAtomicIDecrement : I32EnumAttrCase<"OpAtomicIDecrement", 233>; +def SPIRV_OC_OpAtomicIAdd : I32EnumAttrCase<"OpAtomicIAdd", 234>; +def SPIRV_OC_OpAtomicISub : I32EnumAttrCase<"OpAtomicISub", 235>; +def SPIRV_OC_OpAtomicSMin : I32EnumAttrCase<"OpAtomicSMin", 236>; +def SPIRV_OC_OpAtomicUMin : I32EnumAttrCase<"OpAtomicUMin", 237>; +def SPIRV_OC_OpAtomicSMax : I32EnumAttrCase<"OpAtomicSMax", 238>; +def SPIRV_OC_OpAtomicUMax : I32EnumAttrCase<"OpAtomicUMax", 239>; +def SPIRV_OC_OpAtomicAnd : I32EnumAttrCase<"OpAtomicAnd", 240>; +def SPIRV_OC_OpAtomicOr : I32EnumAttrCase<"OpAtomicOr", 241>; +def SPIRV_OC_OpAtomicXor : I32EnumAttrCase<"OpAtomicXor", 242>; +def SPIRV_OC_OpPhi : I32EnumAttrCase<"OpPhi", 245>; +def SPIRV_OC_OpLoopMerge : I32EnumAttrCase<"OpLoopMerge", 246>; +def SPIRV_OC_OpSelectionMerge : I32EnumAttrCase<"OpSelectionMerge", 247>; +def SPIRV_OC_OpLabel : I32EnumAttrCase<"OpLabel", 248>; +def SPIRV_OC_OpBranch : I32EnumAttrCase<"OpBranch", 249>; +def SPIRV_OC_OpBranchConditional : I32EnumAttrCase<"OpBranchConditional", 250>; +def SPIRV_OC_OpReturn : I32EnumAttrCase<"OpReturn", 253>; +def SPIRV_OC_OpReturnValue : I32EnumAttrCase<"OpReturnValue", 254>; +def SPIRV_OC_OpUnreachable : I32EnumAttrCase<"OpUnreachable", 255>; +def SPIRV_OC_OpGroupBroadcast : I32EnumAttrCase<"OpGroupBroadcast", 263>; +def SPIRV_OC_OpGroupIAdd : I32EnumAttrCase<"OpGroupIAdd", 264>; +def SPIRV_OC_OpGroupFAdd : I32EnumAttrCase<"OpGroupFAdd", 265>; +def SPIRV_OC_OpGroupFMin : I32EnumAttrCase<"OpGroupFMin", 266>; +def SPIRV_OC_OpGroupUMin : I32EnumAttrCase<"OpGroupUMin", 267>; +def SPIRV_OC_OpGroupSMin : I32EnumAttrCase<"OpGroupSMin", 268>; +def SPIRV_OC_OpGroupFMax : I32EnumAttrCase<"OpGroupFMax", 269>; +def SPIRV_OC_OpGroupUMax : I32EnumAttrCase<"OpGroupUMax", 270>; +def SPIRV_OC_OpGroupSMax : I32EnumAttrCase<"OpGroupSMax", 271>; +def SPIRV_OC_OpNoLine : I32EnumAttrCase<"OpNoLine", 317>; +def SPIRV_OC_OpModuleProcessed : I32EnumAttrCase<"OpModuleProcessed", 330>; +def SPIRV_OC_OpGroupNonUniformElect : I32EnumAttrCase<"OpGroupNonUniformElect", 333>; +def SPIRV_OC_OpGroupNonUniformBroadcast : I32EnumAttrCase<"OpGroupNonUniformBroadcast", 337>; +def SPIRV_OC_OpGroupNonUniformBallot : I32EnumAttrCase<"OpGroupNonUniformBallot", 339>; +def SPIRV_OC_OpGroupNonUniformBallotBitCount : I32EnumAttrCase<"OpGroupNonUniformBallotBitCount", 342>; +def SPIRV_OC_OpGroupNonUniformBallotFindLSB : I32EnumAttrCase<"OpGroupNonUniformBallotFindLSB", 343>; +def SPIRV_OC_OpGroupNonUniformBallotFindMSB : I32EnumAttrCase<"OpGroupNonUniformBallotFindMSB", 344>; +def SPIRV_OC_OpGroupNonUniformShuffle : I32EnumAttrCase<"OpGroupNonUniformShuffle", 345>; +def SPIRV_OC_OpGroupNonUniformShuffleXor : I32EnumAttrCase<"OpGroupNonUniformShuffleXor", 346>; +def SPIRV_OC_OpGroupNonUniformShuffleUp : I32EnumAttrCase<"OpGroupNonUniformShuffleUp", 347>; +def SPIRV_OC_OpGroupNonUniformShuffleDown : I32EnumAttrCase<"OpGroupNonUniformShuffleDown", 348>; +def SPIRV_OC_OpGroupNonUniformIAdd : I32EnumAttrCase<"OpGroupNonUniformIAdd", 349>; +def SPIRV_OC_OpGroupNonUniformFAdd : I32EnumAttrCase<"OpGroupNonUniformFAdd", 350>; +def SPIRV_OC_OpGroupNonUniformIMul : I32EnumAttrCase<"OpGroupNonUniformIMul", 351>; +def SPIRV_OC_OpGroupNonUniformFMul : I32EnumAttrCase<"OpGroupNonUniformFMul", 352>; +def SPIRV_OC_OpGroupNonUniformSMin : I32EnumAttrCase<"OpGroupNonUniformSMin", 353>; +def SPIRV_OC_OpGroupNonUniformUMin : I32EnumAttrCase<"OpGroupNonUniformUMin", 354>; +def SPIRV_OC_OpGroupNonUniformFMin : I32EnumAttrCase<"OpGroupNonUniformFMin", 355>; +def SPIRV_OC_OpGroupNonUniformSMax : I32EnumAttrCase<"OpGroupNonUniformSMax", 356>; +def SPIRV_OC_OpGroupNonUniformUMax : I32EnumAttrCase<"OpGroupNonUniformUMax", 357>; +def SPIRV_OC_OpGroupNonUniformFMax : I32EnumAttrCase<"OpGroupNonUniformFMax", 358>; +def SPIRV_OC_OpGroupNonUniformBitwiseAnd : I32EnumAttrCase<"OpGroupNonUniformBitwiseAnd", 359>; +def SPIRV_OC_OpGroupNonUniformBitwiseOr : I32EnumAttrCase<"OpGroupNonUniformBitwiseOr", 360>; +def SPIRV_OC_OpGroupNonUniformBitwiseXor : I32EnumAttrCase<"OpGroupNonUniformBitwiseXor", 361>; +def SPIRV_OC_OpGroupNonUniformLogicalAnd : I32EnumAttrCase<"OpGroupNonUniformLogicalAnd", 362>; +def SPIRV_OC_OpGroupNonUniformLogicalOr : I32EnumAttrCase<"OpGroupNonUniformLogicalOr", 363>; +def SPIRV_OC_OpGroupNonUniformLogicalXor : I32EnumAttrCase<"OpGroupNonUniformLogicalXor", 364>; +def SPIRV_OC_OpSubgroupBallotKHR : I32EnumAttrCase<"OpSubgroupBallotKHR", 4421>; +def SPIRV_OC_OpSDot : I32EnumAttrCase<"OpSDot", 4450>; +def SPIRV_OC_OpUDot : I32EnumAttrCase<"OpUDot", 4451>; +def SPIRV_OC_OpSUDot : I32EnumAttrCase<"OpSUDot", 4452>; +def SPIRV_OC_OpSDotAccSat : I32EnumAttrCase<"OpSDotAccSat", 4453>; +def SPIRV_OC_OpUDotAccSat : I32EnumAttrCase<"OpUDotAccSat", 4454>; +def SPIRV_OC_OpSUDotAccSat : I32EnumAttrCase<"OpSUDotAccSat", 4455>; +def SPIRV_OC_OpTypeCooperativeMatrixKHR : I32EnumAttrCase<"OpTypeCooperativeMatrixKHR", 4456>; +def SPIRV_OC_OpCooperativeMatrixLoadKHR : I32EnumAttrCase<"OpCooperativeMatrixLoadKHR", 4457>; +def SPIRV_OC_OpCooperativeMatrixStoreKHR : I32EnumAttrCase<"OpCooperativeMatrixStoreKHR", 4458>; +def SPIRV_OC_OpCooperativeMatrixMulAddKHR : I32EnumAttrCase<"OpCooperativeMatrixMulAddKHR", 4459>; +def SPIRV_OC_OpCooperativeMatrixLengthKHR : I32EnumAttrCase<"OpCooperativeMatrixLengthKHR", 4460>; +def SPIRV_OC_OpSubgroupBlockReadINTEL : I32EnumAttrCase<"OpSubgroupBlockReadINTEL", 5575>; +def SPIRV_OC_OpSubgroupBlockWriteINTEL : I32EnumAttrCase<"OpSubgroupBlockWriteINTEL", 5576>; +def SPIRV_OC_OpAssumeTrueKHR : I32EnumAttrCase<"OpAssumeTrueKHR", 5630>; +def SPIRV_OC_OpAtomicFAddEXT : I32EnumAttrCase<"OpAtomicFAddEXT", 6035>; +def SPIRV_OC_OpConvertFToBF16INTEL : I32EnumAttrCase<"OpConvertFToBF16INTEL", 6116>; +def SPIRV_OC_OpConvertBF16ToFINTEL : I32EnumAttrCase<"OpConvertBF16ToFINTEL", 6117>; +def SPIRV_OC_OpControlBarrierArriveINTEL : I32EnumAttrCase<"OpControlBarrierArriveINTEL", 6142>; +def SPIRV_OC_OpControlBarrierWaitINTEL : I32EnumAttrCase<"OpControlBarrierWaitINTEL", 6143>; +def SPIRV_OC_OpGroupIMulKHR : I32EnumAttrCase<"OpGroupIMulKHR", 6401>; +def SPIRV_OC_OpGroupFMulKHR : I32EnumAttrCase<"OpGroupFMulKHR", 6402>; def SPIRV_OpcodeAttr : SPIRV_I32EnumAttr<"Opcode", "valid SPIR-V instructions", "opcode", [ @@ -4604,6 +4605,7 @@ def SPIRV_OpcodeAttr : SPIRV_OC_OpGroupFMax, SPIRV_OC_OpGroupUMax, SPIRV_OC_OpGroupSMax, SPIRV_OC_OpNoLine, SPIRV_OC_OpModuleProcessed, SPIRV_OC_OpGroupNonUniformElect, SPIRV_OC_OpGroupNonUniformBroadcast, SPIRV_OC_OpGroupNonUniformBallot, + SPIRV_OC_OpGroupNonUniformBallotBitCount, SPIRV_OC_OpGroupNonUniformBallotFindLSB, SPIRV_OC_OpGroupNonUniformBallotFindMSB, SPIRV_OC_OpGroupNonUniformShuffle, SPIRV_OC_OpGroupNonUniformShuffleXor, SPIRV_OC_OpGroupNonUniformShuffleUp, diff --git a/mlir/include/mlir/Dialect/SPIRV/IR/SPIRVNonUniformOps.td b/mlir/include/mlir/Dialect/SPIRV/IR/SPIRVNonUniformOps.td index a1b866387e2ec..98e435c18d3d7 100644 --- a/mlir/include/mlir/Dialect/SPIRV/IR/SPIRVNonUniformOps.td +++ b/mlir/include/mlir/Dialect/SPIRV/IR/SPIRVNonUniformOps.td @@ -14,6 +14,15 @@ #ifndef MLIR_DIALECT_SPIRV_IR_NON_UNIFORM_OPS #define MLIR_DIALECT_SPIRV_IR_NON_UNIFORM_OPS +class SPIRV_AttrIs : PredOpTrait< + operand # " must be " # type # " of value " # value, + CPred<"::llvm::cast<::mlir::spirv::" # type # "Attr>(getProperties()." # operand # ").getValue() == ::mlir::spirv::" # type # "::" # value> + >; + +class SPIRV_ExecutionScopeAttrIs : SPIRV_AttrIs; + +// ----- + class SPIRV_GroupNonUniformArithmeticOp traits = []> : SPIRV_Op { @@ -1287,4 +1296,69 @@ def SPIRV_GroupNonUniformLogicalXorOp : // ----- +def SPIRV_GroupNonUniformBallotBitCountOp : SPIRV_Op<"GroupNonUniformBallotBitCount", [ + SPIRV_ExecutionScopeAttrIs<"execution_scope", "Subgroup">, +]> { + let summary = [{ + Result is the number of bits that are set to 1 in Value, considering + only the bits in Value required to represent all bits of the scope + restricted tangle. + }]; + + let description = [{ + Result Type must be a scalar of integer type, whose Signedness operand + is 0. + + Execution is the scope defining the scope restricted tangle affected by + this command. It must be Subgroup. + + The identity I for Operation is 0. + + Value must be a vector of four components of integer type scalar, whose + Width operand is 32 and whose Signedness operand is 0. + + Value is a set of bitfields where the first invocation is represented in + the lowest bit of the first vector component and the last (up to the + size of the scope) is the higher bit number of the last bitmask needed + to represent all bits of the invocations in the scope restricted tangle. + + An invocation will not execute a dynamic instance of this instruction + (X') until all invocations in its scope restricted tangle have executed + all dynamic instances that are program-ordered before X'. + + + + #### Example: + + ```mlir + %count = spirv.GroupNonUniformBallotBitCount %val : vector<4xi32> -> i32 + ``` + }]; + + let availability = [ + MinVersion, + MaxVersion, + Extension<[]>, + Capability<[SPIRV_C_GroupNonUniformBallot]> + ]; + + let arguments = (ins + SPIRV_ScopeAttr:$execution_scope, + SPIRV_GroupOperationAttr:$group_operation, + SPIRV_Vec4>:$value + ); + + let results = (outs + SPIRV_SignlessOrUnsignedInt:$result + ); + + let hasVerifier = 0; + + let assemblyFormat = [{ + $execution_scope $group_operation $value attr-dict `:` type($value) `->` type($result) + }]; +} + +// ----- + #endif // MLIR_DIALECT_SPIRV_IR_NON_UNIFORM_OPS diff --git a/mlir/include/mlir/Dialect/Tosa/IR/TosaOps.td b/mlir/include/mlir/Dialect/Tosa/IR/TosaOps.td index 98bcbca3b02fa..840558a81493f 100644 --- a/mlir/include/mlir/Dialect/Tosa/IR/TosaOps.td +++ b/mlir/include/mlir/Dialect/Tosa/IR/TosaOps.td @@ -1625,7 +1625,7 @@ def Tosa_ReshapeOp : Tosa_InferTensorTypeOp<"reshape"> { let arguments = (ins Tosa_Tensor:$input1, - DenseI64ArrayAttr:$new_shape + Tosa_Shape:$shape ); let results = (outs diff --git a/mlir/include/mlir/Dialect/Tosa/Utils/ConversionUtils.h b/mlir/include/mlir/Dialect/Tosa/Utils/ConversionUtils.h index 78a8828855437..4e2f1b9cb19a9 100644 --- a/mlir/include/mlir/Dialect/Tosa/Utils/ConversionUtils.h +++ b/mlir/include/mlir/Dialect/Tosa/Utils/ConversionUtils.h @@ -145,7 +145,7 @@ TosaOp createOpAndInferShape(ImplicitLocOpBuilder &builder, Type resultTy, template TosaOp CreateOpAndInferShape(ImplicitLocOpBuilder &builder, Type resultTy, Args &&...args) { - if (TosaOp::template hasTrait()) { + if (TosaOp::template hasTrait<::mlir::OpTrait::SameOperandsAndResultRank>()) { // op requires same ranks for tensor operands if constexpr (sizeof...(Args) == 2) { auto argX = std::get<0>(std::tie(args...)); @@ -230,8 +230,11 @@ SmallVector applyTOSAPermutation(ArrayRef input, } // Computes shape value using tosa const_shape op. +Value getTosaConstShape(ImplicitLocOpBuilder &builder, + llvm::ArrayRef shape); Value getTosaConstShape(PatternRewriter &rewriter, Location loc, llvm::ArrayRef shape); + SmallVector convertFromMlirShape(ArrayRef shape); bool getConstShapeValue(Operation *op, diff --git a/mlir/lib/Conversion/TosaToLinalg/TosaToLinalg.cpp b/mlir/lib/Conversion/TosaToLinalg/TosaToLinalg.cpp index 67218cee518d5..e4f055ea2f5c4 100644 --- a/mlir/lib/Conversion/TosaToLinalg/TosaToLinalg.cpp +++ b/mlir/lib/Conversion/TosaToLinalg/TosaToLinalg.cpp @@ -1954,9 +1954,10 @@ struct TileConverter : public OpConversionPattern { nestedBuilder.create(op.getLoc(), *args.begin()); }); + auto shapeValue = getTosaConstShape( + rewriter, loc, mlir::tosa::convertFromMlirShape(resultTy.getShape())); rewriter.replaceOpWithNewOp( - op, resultTy, genericOp.getResult(0), - rewriter.getDenseI64ArrayAttr(resultTy.getShape())); + op, resultTy, genericOp.getResult(0), shapeValue); return success(); } }; diff --git a/mlir/lib/Conversion/TosaToTensor/TosaToTensor.cpp b/mlir/lib/Conversion/TosaToTensor/TosaToTensor.cpp index 2a9b4d111bdfa..7f029d56e2582 100644 --- a/mlir/lib/Conversion/TosaToTensor/TosaToTensor.cpp +++ b/mlir/lib/Conversion/TosaToTensor/TosaToTensor.cpp @@ -16,6 +16,7 @@ #include "mlir/Dialect/Tensor/IR/Tensor.h" #include "mlir/Dialect/Tensor/Utils/Utils.h" #include "mlir/Dialect/Tosa/IR/TosaOps.h" +#include "mlir/Dialect/Tosa/Utils/ConversionUtils.h" #include "mlir/IR/PatternMatch.h" #include "mlir/Transforms/DialectConversion.h" @@ -235,7 +236,12 @@ class ReshapeConverter : public OpConversionPattern { return rewriter.notifyMatchFailure(reshape.getLoc(), "expected input type to be tensor"); } - auto newShape = reshape.getNewShape(); + + llvm::SmallVector newShape; + if (!tosa::getConstShapeValue(reshape.getShape().getDefiningOp(), + newShape)) { + return failure(); + } // Infer all intermediate types auto inputType = inferReshapeInputType(input, newShape); diff --git a/mlir/lib/Dialect/Tosa/IR/TosaCanonicalizations.cpp b/mlir/lib/Dialect/Tosa/IR/TosaCanonicalizations.cpp index 8e22c879753a3..a9a65ac271b3c 100644 --- a/mlir/lib/Dialect/Tosa/IR/TosaCanonicalizations.cpp +++ b/mlir/lib/Dialect/Tosa/IR/TosaCanonicalizations.cpp @@ -180,7 +180,7 @@ struct TransposeIsReshape : public OpRewritePattern { rewriter.replaceOpWithNewOp( op, op.getType(), op.getInput1(), - rewriter.getDenseI64ArrayAttr(newShape)); + getTosaConstShape(rewriter, op.getLoc(), newShape)); return success(); } }; @@ -948,8 +948,12 @@ OpFoldResult ReshapeOp::fold(FoldAdaptor adaptor) { if (!getInput1().hasOneUse()) return {}; + llvm::SmallVector shapeVec; + if (!tosa::getConstShapeValue(getShape().getDefiningOp(), shapeVec)) + return {}; + return operand.reshape( - llvm::cast(operand.getType()).clone(getNewShape())); + llvm::cast(operand.getType()).clone(shapeVec)); } return {}; diff --git a/mlir/lib/Dialect/Tosa/IR/TosaOps.cpp b/mlir/lib/Dialect/Tosa/IR/TosaOps.cpp index 031c279ff09e2..955021abdd67b 100644 --- a/mlir/lib/Dialect/Tosa/IR/TosaOps.cpp +++ b/mlir/lib/Dialect/Tosa/IR/TosaOps.cpp @@ -1335,8 +1335,16 @@ LogicalResult tosa::ReshapeOp::inferReturnTypeComponents( SmallVectorImpl &inferredReturnShapes) { ShapeAdaptor inputShape(adaptor.getInput1().getType()); Type inputType = getElementTypeOrSelf(adaptor.getInput1().getType()); - llvm::SmallVector newShapeValue = - convertToMlirShape(adaptor.getNewShape()); + llvm::SmallVector newShapeValue; + if (!tosa::getConstShapeValue(adaptor.getShape().getDefiningOp(), + newShapeValue)) { + auto rank = cast(adaptor.getShape().getType()).getRank(); + SmallVector fallback(rank, ShapedType::kDynamic); + inferredReturnShapes.push_back(ShapedTypeComponents(fallback, inputType)); + return success(); + } else { + newShapeValue = convertToMlirShape(newShapeValue); + } // We cannot infer from the total number of elements so we must take the // shape attribute as exact. @@ -1372,13 +1380,19 @@ llvm::LogicalResult tosa::ReshapeOp::verify() { TensorType inputType = getInput1().getType(); RankedTensorType outputType = getType(); - if ((int64_t)getNewShape().size() != outputType.getRank()) + SmallVector shapeValues; + if (!tosa::getConstShapeValue(getShape().getDefiningOp(), shapeValues)) { + // skip following checks if shape is not constant + return mlir::success(); + } + + if ((int64_t)shapeValues.size() != outputType.getRank()) return emitOpError() << "new shape does not match result rank"; for (auto [newShapeDim, outputShapeDim] : - zip(getNewShape(), outputType.getShape())) { - if (newShapeDim != -1 && outputShapeDim != ShapedType::kDynamic && - newShapeDim != outputShapeDim) + zip(shapeValues, outputType.getShape())) { + if (newShapeDim != -1 && newShapeDim != ShapedType::kDynamic && + outputShapeDim != ShapedType::kDynamic && newShapeDim != outputShapeDim) return emitOpError() << "new shape is inconsistent with result shape"; if (newShapeDim != ShapedType::kDynamic && newShapeDim < -1) @@ -1397,10 +1411,10 @@ llvm::LogicalResult tosa::ReshapeOp::verify() { } int64_t newShapeElementsNum = std::accumulate( - getNewShape().begin(), getNewShape().end(), 1LL, + shapeValues.begin(), shapeValues.end(), 1LL, [](int64_t acc, int64_t dim) { return (dim > 0) ? acc * dim : acc; }); bool isStaticNewShape = - llvm::all_of(getNewShape(), [](int64_t s) { return s > 0; }); + llvm::all_of(shapeValues, [](int64_t s) { return s > 0; }); if ((isStaticNewShape && inputElementsNum != newShapeElementsNum) || (!isStaticNewShape && newShapeElementsNum > inputElementsNum)) { return emitOpError() << "cannot reshape " << inputElementsNum @@ -1408,7 +1422,7 @@ llvm::LogicalResult tosa::ReshapeOp::verify() { } } - int missingDims = llvm::count(getNewShape(), -1); + int missingDims = llvm::count(shapeValues, -1); if (missingDims > 1) return emitOpError() << "expected at most one target dimension to be -1"; diff --git a/mlir/lib/Dialect/Tosa/Transforms/TosaDecomposeConv2D.cpp b/mlir/lib/Dialect/Tosa/Transforms/TosaDecomposeConv2D.cpp index 4eba89b59bbd7..617a59bc87c9f 100644 --- a/mlir/lib/Dialect/Tosa/Transforms/TosaDecomposeConv2D.cpp +++ b/mlir/lib/Dialect/Tosa/Transforms/TosaDecomposeConv2D.cpp @@ -20,12 +20,6 @@ using namespace mlir::tosa; namespace { -SmallVector convertFromMlirShape(ArrayRef shape) { - return to_vector(llvm::map_range(shape, [](int64_t dim) { - return ShapedType::isDynamic(dim) ? -1 : dim; - })); -} - struct Conv2DIsFullyConnected : public OpRewritePattern { explicit Conv2DIsFullyConnected(MLIRContext *context) : OpRewritePattern(context) {} @@ -98,12 +92,13 @@ struct Conv2DIsFullyConnected : public OpRewritePattern { llvm::SmallVector revisedInputShape{combined, inputShape[3]}; auto revisedInputShapeType = RankedTensorType::get(revisedInputShape, inputType.getElementType()); - auto reshapedInput = rewriter - .create( - op.getLoc(), revisedInputShapeType, input, - rewriter.getDenseI64ArrayAttr( - convertFromMlirShape(revisedInputShape))) - .getResult(); + auto revisedInputShapeValue = getTosaConstShape( + rewriter, op.getLoc(), convertFromMlirShape(revisedInputShape)); + auto reshapedInput = + rewriter + .create(op.getLoc(), revisedInputShapeType, input, + revisedInputShapeValue) + .getResult(); // Reshape kernel to [OC,KH,KW,IC] -> [OC, IC]. llvm::SmallVector revisedWeightShape{weightShape[0], @@ -111,12 +106,13 @@ struct Conv2DIsFullyConnected : public OpRewritePattern { auto revisedWeightShapeType = RankedTensorType::get( revisedWeightShape, dyn_cast(weight.getType()).getElementType()); - auto reshapedWeight = rewriter - .create( - op.getLoc(), revisedWeightShapeType, weight, - rewriter.getDenseI64ArrayAttr( - convertFromMlirShape(revisedWeightShape))) - .getResult(); + auto revisedWeightShapeValue = getTosaConstShape( + rewriter, op.getLoc(), convertFromMlirShape(revisedWeightShape)); + auto reshapedWeight = + rewriter + .create(op.getLoc(), revisedWeightShapeType, + weight, revisedWeightShapeValue) + .getResult(); // Perform a fully connected network over the reshaped input and weight. llvm::SmallVector fullyConnectedShape{combined, weightShape[0]}; @@ -149,9 +145,10 @@ struct Conv2DIsFullyConnected : public OpRewritePattern { // Reshape output to [N, IH, IW, OC]. llvm::SmallVector outputShape{inputShape[0], inputShape[1], inputShape[2], weightShape[0]}; + auto outputShapeValue = getTosaConstShape( + rewriter, op.getLoc(), convertFromMlirShape(outputShape)); rewriter.replaceOpWithNewOp( - op, resultType, fullyConnectedValue, - rewriter.getDenseI64ArrayAttr(convertFromMlirShape(outputShape))); + op, resultType, fullyConnectedValue, outputShapeValue); return success(); } }; diff --git a/mlir/lib/Dialect/Tosa/Transforms/TosaDecomposeDepthwise.cpp b/mlir/lib/Dialect/Tosa/Transforms/TosaDecomposeDepthwise.cpp index ee857f1998a54..b26397d0e3ed7 100644 --- a/mlir/lib/Dialect/Tosa/Transforms/TosaDecomposeDepthwise.cpp +++ b/mlir/lib/Dialect/Tosa/Transforms/TosaDecomposeDepthwise.cpp @@ -55,10 +55,11 @@ struct DepthwiseConv2DIsMul : public OpRewritePattern { inputType = RankedTensorType::get( revisedInputShape, dyn_cast(input.getType()).getElementType()); + auto revisedInputShapeValue = + getTosaConstShape(rewriter, op.getLoc(), revisedInputShape); input = rewriter - .create( - op.getLoc(), inputType, input, - rewriter.getDenseI64ArrayAttr(revisedInputShape)) + .create(op.getLoc(), inputType, input, + revisedInputShapeValue) .getResult(); Type inputETy = inputType.getElementType(); @@ -153,9 +154,10 @@ struct DepthwiseConv2DIsMul : public OpRewritePattern { auto outputShapeType = RankedTensorType::get( outputShape, dyn_cast(input.getType()).getElementType()); + auto outputShapeValue = + getTosaConstShape(rewriter, op->getLoc(), outputShape); Value outputValue = rewriter.create( - op.getLoc(), outputShapeType, mulValue, - rewriter.getDenseI64ArrayAttr(outputShape)); + op.getLoc(), outputShapeType, mulValue, outputShapeValue); Value bias = op.getBias(); if (EqualizeRanks(rewriter, op.getLoc(), outputValue, bias).failed()) { diff --git a/mlir/lib/Dialect/Tosa/Transforms/TosaDecomposeTransposeConv.cpp b/mlir/lib/Dialect/Tosa/Transforms/TosaDecomposeTransposeConv.cpp index b5b3e9d76c47e..26baddcf1dd15 100644 --- a/mlir/lib/Dialect/Tosa/Transforms/TosaDecomposeTransposeConv.cpp +++ b/mlir/lib/Dialect/Tosa/Transforms/TosaDecomposeTransposeConv.cpp @@ -159,9 +159,11 @@ class TransposeConvStridedConverter outputChannels, weightHeight / stride[0], stride[0], weightWidth / stride[1], stride[1], inputChannels}; + + ImplicitLocOpBuilder builder(op->getLoc(), rewriter); weight = CreateOpAndInferShape( - rewriter, loc, UnrankedTensorType::get(weightETy), weight, - rewriter.getDenseI64ArrayAttr(weightReshapeDims0)); + builder, UnrankedTensorType::get(weightETy), weight, + getTosaConstShape(rewriter, loc, weightReshapeDims0)); // Transpose the factored-out stride to the output channels. Value transposeWeightVal = rewriter.create( @@ -173,12 +175,13 @@ class TransposeConvStridedConverter transposeWeightVal); // Collapse the strides and output channels into a single dimension. - llvm::SmallVector weightReshapeDims1 = { + llvm::SmallVector weightReshapeDims1 = { outputChannels * stride[0] * stride[1], weightHeight / stride[0], weightWidth / stride[1], inputChannels}; + weight = CreateOpAndInferShape( rewriter, loc, UnrankedTensorType::get(weightETy), weight, - rewriter.getDenseI64ArrayAttr(weightReshapeDims1)); + getTosaConstShape(rewriter, loc, weightReshapeDims1)); ShapedType restridedWeightTy = cast(weight.getType()); weight = CreateOpAndInferShape( @@ -257,9 +260,13 @@ class TransposeConvStridedConverter // Factor striding out of the convolution result. llvm::SmallVector convReshapeDims0 = { batch, convHeight, convWidth, stride[0], stride[1], outputChannels}; + + auto convReshapeDims0Value = + getTosaConstShape(rewriter, loc, convReshapeDims0); + conv2d = CreateOpAndInferShape( rewriter, loc, UnrankedTensorType::get(resultETy), conv2d, - rewriter.getDenseI64ArrayAttr(convReshapeDims0)); + convReshapeDims0Value); // Transpose the factored-out stride to the output channels. Value transposeConvVal = rewriter.create( @@ -273,9 +280,13 @@ class TransposeConvStridedConverter // Fuse striding behavior back into width / height. llvm::SmallVector convReshapeDims1 = { batch, convHeight * stride[0], convWidth * stride[1], outputChannels}; + + auto convReshapeDims1Value = + getTosaConstShape(rewriter, loc, convReshapeDims1); + conv2d = CreateOpAndInferShape( rewriter, loc, UnrankedTensorType::get(resultETy), conv2d, - rewriter.getDenseI64ArrayAttr(convReshapeDims1)); + convReshapeDims1Value); // Determine the amount to slice / pad from the result start. int64_t resultSliceTop = std::max(0, -pad[0]); diff --git a/mlir/lib/Dialect/Tosa/Transforms/TosaReduceTransposes.cpp b/mlir/lib/Dialect/Tosa/Transforms/TosaReduceTransposes.cpp index 520f283a3ba88..281f0529a5c08 100644 --- a/mlir/lib/Dialect/Tosa/Transforms/TosaReduceTransposes.cpp +++ b/mlir/lib/Dialect/Tosa/Transforms/TosaReduceTransposes.cpp @@ -402,13 +402,20 @@ std::optional TosaReduceTransposes::buildMappedToValue( return std::nullopt; // Do not insert a TransposeOp, instead we fold the reshape and its attribute. + llvm::SmallVector newShape; + if (!tosa::getConstShapeValue(reshapeOp.getShape().getDefiningOp(), + newShape)) { + // this mean shape is not constant + return std::nullopt; + } + ImplicitLocOpBuilder builder(reshapeOp.getLoc(), rewriter); auto foldedReshape = rewriter.create( reshapeOp.getLoc(), RankedTensorType::get(applyTOSAPermutation(shape, hoistedPerms), reshapeOutputType.getElementType()), reshapeOp.getInput1(), - rewriter.getDenseI64ArrayAttr( - applyTOSAPermutation(reshapeOp.getNewShape(), hoistedPerms))); + getTosaConstShape(builder, applyTOSAPermutation(llvm::ArrayRef(newShape), + hoistedPerms))); return foldedReshape->getResult(0); } diff --git a/mlir/lib/Dialect/Tosa/Utils/ConversionUtils.cpp b/mlir/lib/Dialect/Tosa/Utils/ConversionUtils.cpp index 62b0bc1857e39..8ab12d038849f 100644 --- a/mlir/lib/Dialect/Tosa/Utils/ConversionUtils.cpp +++ b/mlir/lib/Dialect/Tosa/Utils/ConversionUtils.cpp @@ -145,10 +145,10 @@ LogicalResult mlir::tosa::EqualizeRanks(ImplicitLocOpBuilder &builder, llvm::cast(lowerTensorValue.getType()); auto reshapeOutputType = RankedTensorType::get( ArrayRef(reshapeOutputShape), reshapeInputType.getElementType()); + auto reshapeOutputShapeValue = getTosaConstShape(builder, reshapeOutputShape); auto reshapeLower = builder.create( - reshapeOutputType, lowerTensorValue, - builder.getDenseI64ArrayAttr(reshapeOutputShape)); + reshapeOutputType, lowerTensorValue, reshapeOutputShapeValue); if (input1Rank > input2Rank) { input1 = higherTensorValue; @@ -161,15 +161,20 @@ LogicalResult mlir::tosa::EqualizeRanks(ImplicitLocOpBuilder &builder, return success(); } -Value mlir::tosa::getTosaConstShape(PatternRewriter &rewriter, Location loc, +Value mlir::tosa::getTosaConstShape(ImplicitLocOpBuilder &builder, llvm::ArrayRef shape) { - auto attr = rewriter.getIndexTensorAttr(shape); - auto type = mlir::tosa::shapeType::get(rewriter.getContext(), shape.size()); - mlir::Operation *mlir_op = - rewriter.create(loc, type, attr); + auto attr = builder.getIndexTensorAttr(convertFromMlirShape(shape)); + auto type = mlir::tosa::shapeType::get(builder.getContext(), shape.size()); + mlir::Operation *mlir_op = builder.create(type, attr); return mlir_op->getResult(0); } +Value mlir::tosa::getTosaConstShape(PatternRewriter &rewriter, Location loc, + llvm::ArrayRef shape) { + ImplicitLocOpBuilder builder(loc, rewriter); + return getTosaConstShape(builder, shape); +} + SmallVector mlir::tosa::convertFromMlirShape(ArrayRef shape) { return to_vector(llvm::map_range(shape, [](int64_t dim) { return ShapedType::isDynamic(dim) ? -1 : dim; diff --git a/mlir/lib/Dialect/Vector/IR/VectorOps.cpp b/mlir/lib/Dialect/Vector/IR/VectorOps.cpp index 30ff2df7c38fc..b4a5461f4405d 100644 --- a/mlir/lib/Dialect/Vector/IR/VectorOps.cpp +++ b/mlir/lib/Dialect/Vector/IR/VectorOps.cpp @@ -1991,15 +1991,23 @@ static Value foldScalarExtractFromFromElements(ExtractOp extractOp) { /// Fold an insert or extract operation into an poison value when a poison index /// is found at any dimension of the static position. -static ub::PoisonAttr -foldPoisonIndexInsertExtractOp(MLIRContext *context, - ArrayRef staticPos, int64_t poisonVal) { +static Attribute foldPoisonIndexInsertExtractOp(MLIRContext *context, + ArrayRef staticPos, + int64_t poisonVal) { if (!llvm::is_contained(staticPos, poisonVal)) - return ub::PoisonAttr(); + return {}; return ub::PoisonAttr::get(context); } +/// Fold a vector extract from is a poison source. +static Attribute foldPoisonSrcExtractOp(Attribute srcAttr) { + if (llvm::isa_and_nonnull(srcAttr)) + return srcAttr; + + return {}; +} + OpFoldResult ExtractOp::fold(FoldAdaptor adaptor) { // Fold "vector.extract %v[] : vector<2x2xf32> from vector<2x2xf32>" to %v. // Note: Do not fold "vector.extract %v[] : f32 from vector" (type @@ -2009,6 +2017,8 @@ OpFoldResult ExtractOp::fold(FoldAdaptor adaptor) { if (auto res = foldPoisonIndexInsertExtractOp( getContext(), adaptor.getStaticPosition(), kPoisonIndex)) return res; + if (auto res = foldPoisonSrcExtractOp(adaptor.getVector())) + return res; if (succeeded(foldExtractOpFromExtractChain(*this))) return getResult(); if (auto res = ExtractFromInsertTransposeChainState(*this).fold()) diff --git a/mlir/lib/Dialect/Vector/Transforms/LowerVectorBitCast.cpp b/mlir/lib/Dialect/Vector/Transforms/LowerVectorBitCast.cpp index d8c4939dc742a..89930a6bd35fa 100644 --- a/mlir/lib/Dialect/Vector/Transforms/LowerVectorBitCast.cpp +++ b/mlir/lib/Dialect/Vector/Transforms/LowerVectorBitCast.cpp @@ -11,6 +11,7 @@ // //===----------------------------------------------------------------------===// +#include "mlir/Dialect/UB/IR/UBOps.h" #include "mlir/Dialect/Vector/IR/VectorOps.h" #include "mlir/Dialect/Vector/Transforms/LoweringPatterns.h" #include "mlir/Dialect/Vector/Utils/VectorUtils.h" @@ -32,7 +33,7 @@ namespace { /// /// Would be unrolled to: /// -/// %result = arith.constant dense<0> : vector<1x2x3x8xi32> +/// %result = ub.poison : vector<1x2x3x8xi32> /// %0 = vector.extract %a[0, 0, 0] ─┐ /// : vector<4xi64> from vector<1x2x3x4xi64> | /// %1 = vector.bitcast %0 | - Repeated 6x for @@ -63,8 +64,7 @@ class UnrollBitCastOp final : public OpRewritePattern { VectorType::get(shape, resultType.getElementType(), scalableDims); Location loc = op.getLoc(); - Value result = rewriter.create( - loc, resultType, rewriter.getZeroAttr(resultType)); + Value result = rewriter.create(loc, resultType); for (auto position : *unrollIterator) { Value extract = rewriter.create(loc, op.getSource(), position); diff --git a/mlir/lib/Dialect/Vector/Transforms/LowerVectorBroadcast.cpp b/mlir/lib/Dialect/Vector/Transforms/LowerVectorBroadcast.cpp index 6c36bbaee8523..fec3c6c52e5e4 100644 --- a/mlir/lib/Dialect/Vector/Transforms/LowerVectorBroadcast.cpp +++ b/mlir/lib/Dialect/Vector/Transforms/LowerVectorBroadcast.cpp @@ -11,27 +11,16 @@ // //===----------------------------------------------------------------------===// -#include "mlir/Dialect/Affine/IR/AffineOps.h" -#include "mlir/Dialect/Arith/IR/Arith.h" -#include "mlir/Dialect/Arith/Utils/Utils.h" -#include "mlir/Dialect/Linalg/IR/Linalg.h" #include "mlir/Dialect/MemRef/IR/MemRef.h" -#include "mlir/Dialect/SCF/IR/SCF.h" -#include "mlir/Dialect/Tensor/IR/Tensor.h" -#include "mlir/Dialect/Utils/IndexingUtils.h" -#include "mlir/Dialect/Utils/StructuredOpsUtils.h" +#include "mlir/Dialect/UB/IR/UBOps.h" #include "mlir/Dialect/Vector/IR/VectorOps.h" #include "mlir/Dialect/Vector/Transforms/LoweringPatterns.h" #include "mlir/Dialect/Vector/Transforms/VectorRewritePatterns.h" #include "mlir/Dialect/Vector/Utils/VectorUtils.h" -#include "mlir/IR/BuiltinAttributeInterfaces.h" #include "mlir/IR/BuiltinTypes.h" -#include "mlir/IR/ImplicitLocOpBuilder.h" #include "mlir/IR/Location.h" -#include "mlir/IR/Matchers.h" #include "mlir/IR/PatternMatch.h" #include "mlir/IR/TypeUtilities.h" -#include "mlir/Interfaces/VectorInterfaces.h" #define DEBUG_TYPE "vector-broadcast-lowering" @@ -86,8 +75,7 @@ class BroadcastOpLowering : public OpRewritePattern { VectorType resType = VectorType::Builder(dstType).dropDim(0); Value bcst = rewriter.create(loc, resType, op.getSource()); - Value result = rewriter.create( - loc, dstType, rewriter.getZeroAttr(dstType)); + Value result = rewriter.create(loc, dstType); for (int64_t d = 0, dim = dstType.getDimSize(0); d < dim; ++d) result = rewriter.create(loc, bcst, result, d); rewriter.replaceOp(op, result); @@ -127,8 +115,7 @@ class BroadcastOpLowering : public OpRewritePattern { VectorType resType = VectorType::get(dstType.getShape().drop_front(), eltType, dstType.getScalableDims().drop_front()); - Value result = rewriter.create( - loc, dstType, rewriter.getZeroAttr(dstType)); + Value result = rewriter.create(loc, dstType); if (m == 0) { // Stetch at start. Value ext = rewriter.create(loc, op.getSource(), 0); diff --git a/mlir/lib/Dialect/Vector/Transforms/LowerVectorShapeCast.cpp b/mlir/lib/Dialect/Vector/Transforms/LowerVectorShapeCast.cpp index 239dc9aa1de6f..9c1e5fcee91de 100644 --- a/mlir/lib/Dialect/Vector/Transforms/LowerVectorShapeCast.cpp +++ b/mlir/lib/Dialect/Vector/Transforms/LowerVectorShapeCast.cpp @@ -11,8 +11,8 @@ // //===----------------------------------------------------------------------===// -#include "mlir/Dialect/Arith/IR/Arith.h" #include "mlir/Dialect/MemRef/IR/MemRef.h" +#include "mlir/Dialect/UB//IR/UBOps.h" #include "mlir/Dialect/Vector/IR/VectorOps.h" #include "mlir/Dialect/Vector/Transforms/LoweringPatterns.h" #include "mlir/Dialect/Vector/Transforms/VectorRewritePatterns.h" @@ -73,8 +73,7 @@ class ShapeCastOpNDDownCastRewritePattern SmallVector srcIdx(srcRank - 1, 0); SmallVector resIdx(resRank, 0); int64_t extractSize = sourceVectorType.getShape().back(); - Value result = rewriter.create( - loc, resultVectorType, rewriter.getZeroAttr(resultVectorType)); + Value result = rewriter.create(loc, resultVectorType); // Compute the indices of each 1-D vector element of the source extraction // and destination slice insertion and generate such instructions. @@ -129,8 +128,7 @@ class ShapeCastOpNDUpCastRewritePattern SmallVector srcIdx(srcRank, 0); SmallVector resIdx(resRank - 1, 0); int64_t extractSize = resultVectorType.getShape().back(); - Value result = rewriter.create( - loc, resultVectorType, rewriter.getZeroAttr(resultVectorType)); + Value result = rewriter.create(loc, resultVectorType); for (int64_t i = 0; i < numElts; ++i) { if (i != 0) { incIdx(srcIdx, sourceVectorType, /*step=*/extractSize); @@ -184,8 +182,7 @@ class ShapeCastOpRewritePattern : public OpRewritePattern { // within the source and result shape. SmallVector srcIdx(srcRank, 0); SmallVector resIdx(resRank, 0); - Value result = rewriter.create( - loc, resultVectorType, rewriter.getZeroAttr(resultVectorType)); + Value result = rewriter.create(loc, resultVectorType); for (int64_t i = 0; i < numElts; i++) { if (i != 0) { incIdx(srcIdx, sourceVectorType); @@ -291,9 +288,7 @@ class ScalableShapeCastOpRewritePattern auto extractionVectorType = VectorType::get( {minExtractionSize}, sourceVectorType.getElementType(), {true}); - Value result = rewriter.create( - loc, resultVectorType, rewriter.getZeroAttr(resultVectorType)); - + Value result = rewriter.create(loc, resultVectorType); SmallVector srcIdx(srcRank, 0); SmallVector resIdx(resRank, 0); diff --git a/mlir/lib/Dialect/Vector/Transforms/LowerVectorTranspose.cpp b/mlir/lib/Dialect/Vector/Transforms/LowerVectorTranspose.cpp index 3c92b222e6bc8..fb4dee33bc5f5 100644 --- a/mlir/lib/Dialect/Vector/Transforms/LowerVectorTranspose.cpp +++ b/mlir/lib/Dialect/Vector/Transforms/LowerVectorTranspose.cpp @@ -11,26 +11,19 @@ // //===----------------------------------------------------------------------===// -#include "mlir/Dialect/Affine/IR/AffineOps.h" #include "mlir/Dialect/Arith/IR/Arith.h" -#include "mlir/Dialect/Arith/Utils/Utils.h" -#include "mlir/Dialect/Linalg/IR/Linalg.h" #include "mlir/Dialect/MemRef/IR/MemRef.h" -#include "mlir/Dialect/SCF/IR/SCF.h" -#include "mlir/Dialect/Tensor/IR/Tensor.h" +#include "mlir/Dialect/UB/IR/UBOps.h" #include "mlir/Dialect/Utils/IndexingUtils.h" #include "mlir/Dialect/Utils/StructuredOpsUtils.h" #include "mlir/Dialect/Vector/IR/VectorOps.h" #include "mlir/Dialect/Vector/Transforms/LoweringPatterns.h" #include "mlir/Dialect/Vector/Utils/VectorUtils.h" -#include "mlir/IR/BuiltinAttributeInterfaces.h" #include "mlir/IR/BuiltinTypes.h" #include "mlir/IR/ImplicitLocOpBuilder.h" #include "mlir/IR/Location.h" -#include "mlir/IR/Matchers.h" #include "mlir/IR/PatternMatch.h" #include "mlir/IR/TypeUtilities.h" -#include "mlir/Interfaces/VectorInterfaces.h" #define DEBUG_TYPE "lower-vector-transpose" @@ -209,7 +202,7 @@ static Value transposeToShuffle16x16(OpBuilder &builder, Value source, int m, ImplicitLocOpBuilder b(source.getLoc(), builder); SmallVector vs; for (int64_t i = 0; i < m; ++i) - vs.push_back(b.create(source, i)); + vs.push_back(b.createOrFold(source, i)); // Interleave 32-bit lanes using // 8x _mm512_unpacklo_epi32 @@ -291,8 +284,7 @@ static Value transposeToShuffle16x16(OpBuilder &builder, Value source, int m, auto reshInputType = VectorType::get( {m, n}, cast(source.getType()).getElementType()); - Value res = - b.create(reshInputType, b.getZeroAttr(reshInputType)); + Value res = b.create(reshInputType); for (int64_t i = 0; i < m; ++i) res = b.create(vs[i], res, i); return res; @@ -368,8 +360,7 @@ class TransposeOpLowering : public OpRewritePattern { // of the leftmost transposed dimensions. We traverse every transpose // element using a linearized index that we delinearize to generate the // appropriate indices for the extract/insert operations. - Value result = rewriter.create( - loc, resType, rewriter.getZeroAttr(resType)); + Value result = rewriter.create(loc, resType); int64_t numTransposedElements = ShapedType::getNumElements(prunedInShape); for (int64_t linearIdx = 0; linearIdx < numTransposedElements; @@ -378,9 +369,9 @@ class TransposeOpLowering : public OpRewritePattern { SmallVector insertIdxs(extractIdxs); applyPermutationToVector(insertIdxs, prunedTransp); Value extractOp = - rewriter.create(loc, input, extractIdxs); - result = - rewriter.create(loc, extractOp, result, insertIdxs); + rewriter.createOrFold(loc, input, extractIdxs); + result = rewriter.createOrFold(loc, extractOp, result, + insertIdxs); } rewriter.replaceOp(op, result); diff --git a/mlir/lib/Dialect/Vector/Transforms/VectorUnroll.cpp b/mlir/lib/Dialect/Vector/Transforms/VectorUnroll.cpp index 800c1d9fb1dbf..c1e3850f05c5e 100644 --- a/mlir/lib/Dialect/Vector/Transforms/VectorUnroll.cpp +++ b/mlir/lib/Dialect/Vector/Transforms/VectorUnroll.cpp @@ -172,7 +172,7 @@ struct UnrollTransferReadPattern readOp.getPermutationMapAttr(), readOp.getPadding(), readOp.getMask(), readOp.getInBoundsAttr()); - result = rewriter.create( + result = rewriter.createOrFold( loc, slicedRead, result, elementOffsets, strides); } rewriter.replaceOp(readOp, result); @@ -213,7 +213,7 @@ struct UnrollTransferWritePattern Value resultTensor; for (SmallVector elementOffsets : StaticTileOffsetRange(originalSize, *targetShape, loopOrder)) { - Value slicedVector = rewriter.create( + Value slicedVector = rewriter.createOrFold( loc, writeOp.getVector(), elementOffsets, *targetShape, strides); SmallVector indices = sliceTransferIndices(elementOffsets, originalIndices, @@ -289,8 +289,9 @@ struct UnrollContractionPattern SmallVector operandShape = applyPermutationMap( permutationMap, ArrayRef(*targetShape)); SmallVector operandStrides(operandOffets.size(), 1); - slicesOperands[index] = rewriter.create( - loc, operand, operandOffets, operandShape, operandStrides); + slicesOperands[index] = + rewriter.createOrFold( + loc, operand, operandOffets, operandShape, operandStrides); }; // Extract the new lhs operand. @@ -333,7 +334,7 @@ struct UnrollContractionPattern loc, dstVecType, rewriter.getZeroAttr(dstVecType)); for (const auto &it : accCache) { SmallVector dstStrides(it.first.size(), 1); - result = rewriter.create( + result = rewriter.createOrFold( loc, it.second, result, it.first, dstStrides); } rewriter.replaceOp(contractOp, result); @@ -371,8 +372,10 @@ struct UnrollMultiReductionPattern StaticTileOffsetRange(originalSize, *targetShape)) { SmallVector operands; SmallVector operandStrides(offsets.size(), 1); - Value slicedOperand = rewriter.create( - loc, reductionOp.getSource(), offsets, *targetShape, operandStrides); + Value slicedOperand = + rewriter.createOrFold( + loc, reductionOp.getSource(), offsets, *targetShape, + operandStrides); operands.push_back(slicedOperand); SmallVector dstShape; SmallVector destOffset; @@ -390,7 +393,7 @@ struct UnrollMultiReductionPattern if (accIt != accCache.end()) acc = accIt->second; else - acc = rewriter.create( + acc = rewriter.createOrFold( loc, reductionOp.getAcc(), destOffset, dstShape, accStrides); operands.push_back(acc); auto targetType = VectorType::get( @@ -406,7 +409,7 @@ struct UnrollMultiReductionPattern rewriter.getZeroAttr(reductionOp.getDestType())); for (const auto &it : accCache) { SmallVector dstStrides(it.first.size(), 1); - result = rewriter.create( + result = rewriter.createOrFold( loc, it.second, result, it.first, dstStrides); } rewriter.replaceOp(reductionOp, result); @@ -453,12 +456,12 @@ struct UnrollElementwisePattern : public RewritePattern { continue; } extractOperands.push_back( - rewriter.create( + rewriter.createOrFold( loc, operand.get(), offsets, *targetShape, strides)); } Operation *newOp = cloneOpWithOperandsAndTypes( rewriter, loc, op, extractOperands, newVecType); - result = rewriter.create( + result = rewriter.createOrFold( loc, newOp->getResult(0), result, offsets, strides); } rewriter.replaceOp(op, result); @@ -490,8 +493,9 @@ struct UnrollReductionPattern : public OpRewritePattern { for (SmallVector offsets : StaticTileOffsetRange(originalSize, *targetShape)) { SmallVector strides(offsets.size(), 1); - Value slicedOperand = rewriter.create( - loc, reductionOp.getVector(), offsets, *targetShape, strides); + Value slicedOperand = + rewriter.createOrFold( + loc, reductionOp.getVector(), offsets, *targetShape, strides); Operation *newOp = cloneOpWithOperandsAndTypes( rewriter, loc, reductionOp, slicedOperand, reductionOp.getType()); Value result = newOp->getResult(0); @@ -548,12 +552,13 @@ struct UnrollTransposePattern : public OpRewritePattern { permutedOffsets[indices.value()] = elementOffsets[indices.index()]; permutedShape[indices.value()] = (*targetShape)[indices.index()]; } - Value slicedOperand = rewriter.create( - loc, transposeOp.getVector(), permutedOffsets, permutedShape, - strides); - Value transposedSlice = - rewriter.create(loc, slicedOperand, permutation); - result = rewriter.create( + Value slicedOperand = + rewriter.createOrFold( + loc, transposeOp.getVector(), permutedOffsets, permutedShape, + strides); + Value transposedSlice = rewriter.createOrFold( + loc, slicedOperand, permutation); + result = rewriter.createOrFold( loc, transposedSlice, result, elementOffsets, strides); } rewriter.replaceOp(transposeOp, result); @@ -596,17 +601,19 @@ struct UnrollGatherPattern : public OpRewritePattern { // To get the unrolled gather, extract the same slice based on the // decomposed shape from each of the index, mask, and pass-through // vectors. - Value indexSubVec = rewriter.create( + Value indexSubVec = rewriter.createOrFold( loc, gatherOp.getIndexVec(), elementOffsets, *targetShape, strides); - Value maskSubVec = rewriter.create( + Value maskSubVec = rewriter.createOrFold( loc, gatherOp.getMask(), elementOffsets, *targetShape, strides); - Value passThruSubVec = rewriter.create( - loc, gatherOp.getPassThru(), elementOffsets, *targetShape, strides); + Value passThruSubVec = + rewriter.createOrFold( + loc, gatherOp.getPassThru(), elementOffsets, *targetShape, + strides); auto slicedGather = rewriter.create( loc, targetType, gatherOp.getBase(), gatherOp.getIndices(), indexSubVec, maskSubVec, passThruSubVec); - result = rewriter.create( + result = rewriter.createOrFold( loc, slicedGather, result, elementOffsets, strides); } rewriter.replaceOp(gatherOp, result); diff --git a/mlir/lib/Transforms/ViewOpGraph.cpp b/mlir/lib/Transforms/ViewOpGraph.cpp index fa0af7665ba4c..75ee3ed74db5e 100644 --- a/mlir/lib/Transforms/ViewOpGraph.cpp +++ b/mlir/lib/Transforms/ViewOpGraph.cpp @@ -14,6 +14,7 @@ #include "mlir/IR/Operation.h" #include "mlir/Pass/Pass.h" #include "mlir/Support/IndentedOstream.h" +#include "llvm/ADT/STLExtras.h" #include "llvm/Support/Format.h" #include "llvm/Support/GraphWriter.h" #include @@ -29,7 +30,7 @@ using namespace mlir; static const StringRef kLineStyleControlFlow = "dashed"; static const StringRef kLineStyleDataFlow = "solid"; -static const StringRef kShapeNode = "ellipse"; +static const StringRef kShapeNode = "Mrecord"; static const StringRef kShapeNone = "plain"; /// Return the size limits for eliding large attributes. @@ -49,16 +50,25 @@ static std::string strFromOs(function_ref func) { return buf; } -/// Escape special characters such as '\n' and quotation marks. -static std::string escapeString(std::string str) { - return strFromOs([&](raw_ostream &os) { os.write_escaped(str); }); -} - /// Put quotation marks around a given string. static std::string quoteString(const std::string &str) { return "\"" + str + "\""; } +/// For Graphviz record nodes: +/// " Braces, vertical bars and angle brackets must be escaped with a backslash +/// character if you wish them to appear as a literal character " +std::string escapeLabelString(const std::string &str) { + std::string buf; + llvm::raw_string_ostream os(buf); + for (char c : str) { + if (llvm::is_contained({'{', '|', '<', '}', '>', '\n', '"'}, c)) + os << '\\'; + os << c; + } + return buf; +} + using AttributeMap = std::map; namespace { @@ -79,6 +89,12 @@ struct Node { std::optional clusterId; }; +struct DataFlowEdge { + Value value; + Node node; + std::string port; +}; + /// This pass generates a Graphviz dataflow visualization of an MLIR operation. /// Note: See https://www.graphviz.org/doc/info/lang.html for more information /// about the Graphviz DOT language. @@ -107,7 +123,7 @@ class PrintOpPass : public impl::ViewOpGraphBase { private: /// Generate a color mapping that will color every operation with the same /// name the same way. It'll interpolate the hue in the HSV color-space, - /// attempting to keep the contrast suitable for black text. + /// using muted colors that provide good contrast for black text. template void initColorMapping(T &irEntity) { backgroundColors.clear(); @@ -120,8 +136,10 @@ class PrintOpPass : public impl::ViewOpGraphBase { }); for (auto indexedOps : llvm::enumerate(ops)) { double hue = ((double)indexedOps.index()) / ops.size(); + // Use lower saturation (0.3) and higher value (0.95) for better + // readability backgroundColors[indexedOps.value()->getName()].second = - std::to_string(hue) + " 1.0 1.0"; + std::to_string(hue) + " 0.3 0.95"; } } @@ -129,8 +147,8 @@ class PrintOpPass : public impl::ViewOpGraphBase { /// emitted. void emitAllEdgeStmts() { if (printDataFlowEdges) { - for (const auto &[value, node, label] : dataFlowEdges) { - emitEdgeStmt(valueToNode[value], node, label, kLineStyleDataFlow); + for (const auto &e : dataFlowEdges) { + emitEdgeStmt(valueToNode[e.value], e.node, e.port, kLineStyleDataFlow); } } @@ -147,8 +165,7 @@ class PrintOpPass : public impl::ViewOpGraphBase { os.indent(); // Emit invisible anchor node from/to which arrows can be drawn. Node anchorNode = emitNodeStmt(" ", kShapeNone); - os << attrStmt("label", quoteString(escapeString(std::move(label)))) - << ";\n"; + os << attrStmt("label", quoteString(label)) << ";\n"; builder(); os.unindent(); os << "}\n"; @@ -176,7 +193,8 @@ class PrintOpPass : public impl::ViewOpGraphBase { // Always emit splat attributes. if (isa(attr)) { - attr.print(os); + os << escapeLabelString( + strFromOs([&](raw_ostream &os) { attr.print(os); })); return; } @@ -184,8 +202,8 @@ class PrintOpPass : public impl::ViewOpGraphBase { auto elements = dyn_cast(attr); if (elements && elements.getNumElements() > largeAttrLimit) { os << std::string(elements.getShapedType().getRank(), '[') << "..." - << std::string(elements.getShapedType().getRank(), ']') << " : " - << elements.getType(); + << std::string(elements.getShapedType().getRank(), ']') << " : "; + emitMlirType(os, elements.getType()); return; } @@ -199,19 +217,27 @@ class PrintOpPass : public impl::ViewOpGraphBase { std::string buf; llvm::raw_string_ostream ss(buf); attr.print(ss); - os << truncateString(buf); + os << escapeLabelString(truncateString(buf)); + } + + // Print a truncated and escaped MLIR type to `os`. + void emitMlirType(raw_ostream &os, Type type) { + std::string buf; + llvm::raw_string_ostream ss(buf); + type.print(ss); + os << escapeLabelString(truncateString(buf)); + } + + // Print a truncated and escaped MLIR operand to `os`. + void emitMlirOperand(raw_ostream &os, Value operand) { + operand.printAsOperand(os, OpPrintingFlags()); } /// Append an edge to the list of edges. /// Note: Edges are written to the output stream via `emitAllEdgeStmts`. - void emitEdgeStmt(Node n1, Node n2, std::string label, StringRef style) { + void emitEdgeStmt(Node n1, Node n2, std::string port, StringRef style) { AttributeMap attrs; attrs["style"] = style.str(); - // Do not label edges that start/end at a cluster boundary. Such edges are - // clipped at the boundary, but labels are not. This can lead to labels - // floating around without any edge next to them. - if (!n1.clusterId && !n2.clusterId) - attrs["label"] = quoteString(escapeString(std::move(label))); // Use `ltail` and `lhead` to draw edges between clusters. if (n1.clusterId) attrs["ltail"] = "cluster_" + std::to_string(*n1.clusterId); @@ -219,7 +245,15 @@ class PrintOpPass : public impl::ViewOpGraphBase { attrs["lhead"] = "cluster_" + std::to_string(*n2.clusterId); edges.push_back(strFromOs([&](raw_ostream &os) { - os << llvm::format("v%i -> v%i ", n1.id, n2.id); + os << "v" << n1.id; + if (!port.empty() && !n1.clusterId) + // Attach edge to south compass point of the result + os << ":res" << port << ":s"; + os << " -> "; + os << "v" << n2.id; + if (!port.empty() && !n2.clusterId) + // Attach edge to north compass point of the operand + os << ":arg" << port << ":n"; emitAttrList(os, attrs); })); } @@ -240,11 +274,11 @@ class PrintOpPass : public impl::ViewOpGraphBase { StringRef background = "") { int nodeId = ++counter; AttributeMap attrs; - attrs["label"] = quoteString(escapeString(std::move(label))); + attrs["label"] = quoteString(label); attrs["shape"] = shape.str(); if (!background.empty()) { attrs["style"] = "filled"; - attrs["fillcolor"] = ("\"" + background + "\"").str(); + attrs["fillcolor"] = quoteString(background.str()); } os << llvm::format("v%i ", nodeId); emitAttrList(os, attrs); @@ -252,8 +286,18 @@ class PrintOpPass : public impl::ViewOpGraphBase { return Node(nodeId); } - /// Generate a label for an operation. - std::string getLabel(Operation *op) { + std::string getValuePortName(Value operand) { + // Print value as an operand and omit the leading '%' character. + auto str = strFromOs([&](raw_ostream &os) { + operand.printAsOperand(os, OpPrintingFlags()); + }); + // Replace % and # with _ + std::replace(str.begin(), str.end(), '%', '_'); + std::replace(str.begin(), str.end(), '#', '_'); + return str; + } + + std::string getClusterLabel(Operation *op) { return strFromOs([&](raw_ostream &os) { // Print operation name and type. os << op->getName(); @@ -267,18 +311,73 @@ class PrintOpPass : public impl::ViewOpGraphBase { // Print attributes. if (printAttrs) { - os << "\n"; + os << "\\l"; + for (const NamedAttribute &attr : op->getAttrs()) { + os << escapeLabelString(attr.getName().getValue().str()) << ": "; + emitMlirAttr(os, attr.getValue()); + os << "\\l"; + } + } + }); + } + + /// Generate a label for an operation. + std::string getRecordLabel(Operation *op) { + return strFromOs([&](raw_ostream &os) { + os << "{"; + + // Print operation inputs. + if (op->getNumOperands() > 0) { + os << "{"; + auto operandToPort = [&](Value operand) { + os << " "; + emitMlirOperand(os, operand); + }; + interleave(op->getOperands(), os, operandToPort, "|"); + os << "}|"; + } + // Print operation name and type. + os << op->getName() << "\\l"; + + // Print attributes. + if (printAttrs && !op->getAttrs().empty()) { + // Extra line break to separate attributes from the operation name. + os << "\\l"; for (const NamedAttribute &attr : op->getAttrs()) { - os << '\n' << attr.getName().getValue() << ": "; + os << attr.getName().getValue() << ": "; emitMlirAttr(os, attr.getValue()); + os << "\\l"; } } + + if (op->getNumResults() > 0) { + os << "|{"; + auto resultToPort = [&](Value result) { + os << " "; + emitMlirOperand(os, result); + if (printResultTypes) { + os << " "; + emitMlirType(os, result.getType()); + } + }; + interleave(op->getResults(), os, resultToPort, "|"); + os << "}"; + } + + os << "}"; }); } /// Generate a label for a block argument. std::string getLabel(BlockArgument arg) { - return "arg" + std::to_string(arg.getArgNumber()); + return strFromOs([&](raw_ostream &os) { + os << " "; + arg.printAsOperand(os, OpPrintingFlags()); + if (printResultTypes) { + os << " "; + emitMlirType(os, arg.getType()); + } + }); } /// Process a block. Emit a cluster and one node per block argument and @@ -287,14 +386,12 @@ class PrintOpPass : public impl::ViewOpGraphBase { emitClusterStmt([&]() { for (BlockArgument &blockArg : block.getArguments()) valueToNode[blockArg] = emitNodeStmt(getLabel(blockArg)); - // Emit a node for each operation. std::optional prevNode; for (Operation &op : block) { Node nextNode = processOperation(&op); if (printControlFlowEdges && prevNode) - emitEdgeStmt(*prevNode, nextNode, /*label=*/"", - kLineStyleControlFlow); + emitEdgeStmt(*prevNode, nextNode, /*port=*/"", kLineStyleControlFlow); prevNode = nextNode; } }); @@ -311,18 +408,19 @@ class PrintOpPass : public impl::ViewOpGraphBase { for (Region ®ion : op->getRegions()) processRegion(region); }, - getLabel(op)); + getClusterLabel(op)); } else { - node = emitNodeStmt(getLabel(op), kShapeNode, + node = emitNodeStmt(getRecordLabel(op), kShapeNode, backgroundColors[op->getName()].second); } // Insert data flow edges originating from each operand. if (printDataFlowEdges) { unsigned numOperands = op->getNumOperands(); - for (unsigned i = 0; i < numOperands; i++) - dataFlowEdges.push_back({op->getOperand(i), node, - numOperands == 1 ? "" : std::to_string(i)}); + for (unsigned i = 0; i < numOperands; i++) { + auto operand = op->getOperand(i); + dataFlowEdges.push_back({operand, node, getValuePortName(operand)}); + } } for (Value result : op->getResults()) @@ -352,7 +450,7 @@ class PrintOpPass : public impl::ViewOpGraphBase { /// Mapping of SSA values to Graphviz nodes/clusters. DenseMap valueToNode; /// Output for data flow edges is delayed until the end to handle cycles - std::vector> dataFlowEdges; + std::vector dataFlowEdges; /// Counter for generating unique node/subgraph identifiers. int counter = 0; diff --git a/mlir/test/Conversion/ConvertToSPIRV/vector-unroll.mlir b/mlir/test/Conversion/ConvertToSPIRV/vector-unroll.mlir index 043f9422d8790..d68ba44ee8840 100644 --- a/mlir/test/Conversion/ConvertToSPIRV/vector-unroll.mlir +++ b/mlir/test/Conversion/ConvertToSPIRV/vector-unroll.mlir @@ -83,17 +83,17 @@ func.func @vaddi_reduction(%arg0 : vector<8xi32>, %arg1 : vector<8xi32>) -> (i32 // CHECK-LABEL: @transpose // CHECK-SAME: (%[[ARG0:.+]]: vector<3xi32>, %[[ARG1:.+]]: vector<3xi32>) func.func @transpose(%arg0 : vector<2x3xi32>) -> (vector<3x2xi32>) { - // CHECK: %[[CST:.*]] = arith.constant dense<0> : vector<2xi32> + // CHECK: %[[UB:.*]] = ub.poison : vector<2xi32> // CHECK: %[[EXTRACT0:.*]] = vector.extract %[[ARG0]][0] : i32 from vector<3xi32> - // CHECK: %[[INSERT0:.*]]= vector.insert %[[EXTRACT0]], %[[CST]] [0] : i32 into vector<2xi32> + // CHECK: %[[INSERT0:.*]]= vector.insert %[[EXTRACT0]], %[[UB]] [0] : i32 into vector<2xi32> // CHECK: %[[EXTRACT1:.*]] = vector.extract %[[ARG1]][0] : i32 from vector<3xi32> // CHECK: %[[INSERT1:.*]] = vector.insert %[[EXTRACT1]], %[[INSERT0]][1] : i32 into vector<2xi32> // CHECK: %[[EXTRACT2:.*]] = vector.extract %[[ARG0]][1] : i32 from vector<3xi32> - // CHECK: %[[INSERT2:.*]] = vector.insert %[[EXTRACT2]], %[[CST]] [0] : i32 into vector<2xi32> + // CHECK: %[[INSERT2:.*]] = vector.insert %[[EXTRACT2]], %[[UB]] [0] : i32 into vector<2xi32> // CHECK: %[[EXTRACT3:.*]] = vector.extract %[[ARG1]][1] : i32 from vector<3xi32> // CHECK: %[[INSERT3:.*]] = vector.insert %[[EXTRACT3]], %[[INSERT2]] [1] : i32 into vector<2xi32> // CHECK: %[[EXTRACT4:.*]] = vector.extract %[[ARG0]][2] : i32 from vector<3xi32> - // CHECK: %[[INSERT4:.*]] = vector.insert %[[EXTRACT4]], %[[CST]] [0] : i32 into vector<2xi32> + // CHECK: %[[INSERT4:.*]] = vector.insert %[[EXTRACT4]], %[[UB]] [0] : i32 into vector<2xi32> // CHECK: %[[EXTRACT5:.*]] = vector.extract %[[ARG1]][2] : i32 from vector<3xi32> // CHECK: %[[INSERT5:.*]] = vector.insert %[[EXTRACT5]], %[[INSERT4]] [1] : i32 into vector<2xi32> // CHECK: return %[[INSERT1]], %[[INSERT3]], %[[INSERT5]] : vector<2xi32>, vector<2xi32>, vector<2xi32> diff --git a/mlir/test/Conversion/TosaToLinalg/tosa-to-linalg-invalid.mlir b/mlir/test/Conversion/TosaToLinalg/tosa-to-linalg-invalid.mlir index 75b48f2b06d89..460e207d62de6 100644 --- a/mlir/test/Conversion/TosaToLinalg/tosa-to-linalg-invalid.mlir +++ b/mlir/test/Conversion/TosaToLinalg/tosa-to-linalg-invalid.mlir @@ -24,7 +24,8 @@ func.func @unranked_add(%arg0 : tensor<10x10xf32> , %arg1 : tensor<10x10xf32>, % %reduce = tosa.reduce_max %arg0 {axis = 1 : i32} : (tensor<10x10xf32>) -> tensor<10x1xf32> %1 = tosa.add %reduce, %arg1 : (tensor<10x1xf32>, tensor<10x10xf32>) -> tensor<10x10xf32> %0 = tosa.add %1, %arg2 : (tensor<10x10xf32>, tensor<*xf32>) -> tensor<*xf32> - %2 = tosa.reshape %0 {new_shape = array} : (tensor<*xf32>) -> tensor<10x10xf32> + %s = tosa.const_shape {value = dense<[10, 10]> : tensor<2xindex>} : () -> !tosa.shape<2> + %2 = tosa.reshape %0, %s : (tensor<*xf32>, !tosa.shape<2>) -> tensor<10x10xf32> return %2 : tensor<10x10xf32> } diff --git a/mlir/test/Conversion/TosaToLinalg/tosa-to-linalg.mlir b/mlir/test/Conversion/TosaToLinalg/tosa-to-linalg.mlir index 6e8501aaaf2af..3031434e6d4ba 100644 --- a/mlir/test/Conversion/TosaToLinalg/tosa-to-linalg.mlir +++ b/mlir/test/Conversion/TosaToLinalg/tosa-to-linalg.mlir @@ -1387,7 +1387,8 @@ func.func @tile(%arg0 : tensor<2x3xi8>) -> () { // CHECK: [[GENERIC:%.+]] = linalg.generic {indexing_maps = [#[[$MAP0]], #[[$MAP1]]], iterator_types = ["parallel", "parallel", "parallel", "parallel"]} ins(%[[ARG0]] : tensor<2x3xi8>) outs([[INIT]] : tensor<2x2x1x3xi8>) // CHECK: ^bb0(%[[ARG1:[0-9a-zA-Z_]+]]: i8 // CHECK: linalg.yield %[[ARG1]] : i8 - // CHECK: tosa.reshape [[GENERIC]] {new_shape = array} + // CHECK: [[CONST3:%.+]] = tosa.const_shape {value = dense<[4, 3]> : tensor<2xindex>} : () -> !tosa.shape<2> + // CHECK: tosa.reshape [[GENERIC]], [[CONST3]] %cst21 = tosa.const_shape { value = dense<[2, 1]> : tensor<2xindex> } : () -> !tosa.shape<2> %0 = tosa.tile %arg0, %cst21: (tensor<2x3xi8>, !tosa.shape<2>) -> tensor<4x3xi8> @@ -1395,7 +1396,8 @@ func.func @tile(%arg0 : tensor<2x3xi8>) -> () { // CHECK: [[GENERIC:%.+]] = linalg.generic {indexing_maps = [#[[$MAP0]], #[[$MAP1]]], iterator_types = ["parallel", "parallel", "parallel", "parallel"]} ins(%[[ARG0]] : tensor<2x3xi8>) outs([[INIT]] : tensor<1x2x2x3xi8>) // CHECK: ^bb0(%[[ARG1:[0-9a-zA-Z_]+]]: i8 // CHECK: linalg.yield %[[ARG1]] : i8 - // CHECK: tosa.reshape [[GENERIC]] {new_shape = array} + // CHECK: [[CONST8:%.+]] = tosa.const_shape {value = dense<[2, 6]> : tensor<2xindex>} : () -> !tosa.shape<2> + // tosa.reshape [[GENERIC]], [[CONST8]] %cst12 = tosa.const_shape { value = dense<[1, 2]> : tensor<2xindex> } : () -> !tosa.shape<2> %1 = tosa.tile %arg0, %cst12: (tensor<2x3xi8>, !tosa.shape<2>) -> tensor<2x6xi8> @@ -1403,8 +1405,9 @@ func.func @tile(%arg0 : tensor<2x3xi8>) -> () { // CHECK: [[GENERIC:%.+]] = linalg.generic {indexing_maps = [#[[$MAP0]], #[[$MAP1]]], iterator_types = ["parallel", "parallel", "parallel", "parallel"]} ins(%[[ARG0]] : tensor<2x3xi8>) outs([[INIT]] : tensor<5x2x7x3xi8>) // CHECK: ^bb0(%[[ARG1:[0-9a-zA-Z_]+]]: i8 // CHECK: linalg.yield %[[ARG1]] : i8 - // CHECK: tosa.reshape [[GENERIC]] {new_shape = array} %cst57 = tosa.const_shape { value = dense<[5, 7]> : tensor<2xindex> } : () -> !tosa.shape<2> + // CHECK: [[CONST13:%.+]] = tosa.const_shape {value = dense<[10, 21]> : tensor<2xindex>} : () -> !tosa.shape<2> + // CHECK: tosa.reshape [[GENERIC]], [[CONST13]] %2 = tosa.tile %arg0, %cst57: (tensor<2x3xi8>, !tosa.shape<2>) -> tensor<10x21xi8> return @@ -1424,7 +1427,8 @@ func.func @tile_dyn_input(%arg0 : tensor) -> () { // CHECK: %[[GENERIC:.+]] = linalg.generic {indexing_maps = [#[[$MAP0]], #[[$MAP1]]], iterator_types = ["parallel", "parallel", "parallel", "parallel"]} ins(%[[ARG0]] : tensor) outs(%[[INIT]] : tensor<2x?x1x3xi8>) // CHECK: ^bb0(%[[ARG1:.+]]: i8, // CHECK: linalg.yield %[[ARG1]] : i8 - // CHECK: tosa.reshape %[[GENERIC]] {new_shape = array} + // CHECK: %[[CONST3:.+]] = tosa.const_shape {value = dense<[-1, 3]> : tensor<2xindex>} : () -> !tosa.shape<2> + // CHECK: tosa.reshape %[[GENERIC]], %[[CONST3]] %cst21 = tosa.const_shape { value = dense<[2, 1]> : tensor<2xindex> } : () -> !tosa.shape<2> %0 = tosa.tile %arg0, %cst21: (tensor, !tosa.shape<2>) -> tensor @@ -1445,7 +1449,8 @@ func.func @tile_dyn_multiples(%arg0 : tensor<2x3xi8>) -> () { // CHECK: %[[GENERIC:.+]] = linalg.generic {indexing_maps = [#[[$MAP0]], #[[$MAP1]]], iterator_types = ["parallel", "parallel", "parallel", "parallel"]} ins(%[[ARG0]] : tensor<2x3xi8>) outs(%[[INIT]] : tensor<2x2x?x3xi8>) // CHECK: ^bb0(%[[ARG1:.+]]: i8, // CHECK: linalg.yield %[[ARG1]] : i8 - // CHECK: tosa.reshape %[[GENERIC]] {new_shape = array} + // CHECK: %[[CONST2:.+]] = tosa.const_shape {value = dense<[2, -1]> : tensor<2xindex>} : () -> !tosa.shape<2> + // CHECK: tosa.reshape %[[GENERIC]], %[[CONST2]] %cst = tosa.const_shape { value = dense<[2, -1]> : tensor<2xindex> } : () -> !tosa.shape<2> %0 = tosa.tile %arg0, %cst: (tensor<2x3xi8>, !tosa.shape<2>) -> tensor<2x?xi8> diff --git a/mlir/test/Conversion/TosaToTensor/tosa-to-tensor.mlir b/mlir/test/Conversion/TosaToTensor/tosa-to-tensor.mlir index e83e898644bc0..c2eaba4c563d0 100644 --- a/mlir/test/Conversion/TosaToTensor/tosa-to-tensor.mlir +++ b/mlir/test/Conversion/TosaToTensor/tosa-to-tensor.mlir @@ -6,7 +6,8 @@ // CHECK-SAME: %[[ARG_0:[a-zA-Z0-9_]+]]: tensor // CHECK: return %[[ARG_0]] : tensor func.func @test_reshape_0d_same_s2s_explicit(%arg0: tensor) -> tensor { - %0 = "tosa.reshape"(%arg0) {new_shape = array} : (tensor) -> tensor + %s = tosa.const_shape { value = dense<> : tensor<0xindex> } : () -> !tosa.shape<0> + %0 = "tosa.reshape"(%arg0, %s) : (tensor, !tosa.shape<0>) -> tensor return %0 : tensor } @@ -18,7 +19,8 @@ func.func @test_reshape_0d_same_s2s_explicit(%arg0: tensor) -> tensor // CHECK: %[[VAL_1:.*]] = tensor.cast %[[VAL_0]] : tensor<1xf32> to tensor // CHECK: return %[[VAL_1]] : tensor func.func @test_reshape_0d_up_s2d_auto(%arg0: tensor) -> tensor { - %0 = "tosa.reshape"(%arg0) {new_shape = array} : (tensor) -> tensor + %s = tosa.const_shape { value = dense<-1> : tensor<1xindex> } : () -> !tosa.shape<1> + %0 = "tosa.reshape"(%arg0, %s) : (tensor, !tosa.shape<1>) -> tensor return %0 : tensor } @@ -30,7 +32,8 @@ func.func @test_reshape_0d_up_s2d_auto(%arg0: tensor) -> tensor { // CHECK: %[[VAL_1:.*]] = tensor.cast %[[VAL_0]] : tensor<1xf32> to tensor // CHECK: return %[[VAL_1]] : tensor func.func @test_reshape_0d_up_s2d_explicit(%arg0: tensor) -> tensor { - %0 = "tosa.reshape"(%arg0) {new_shape = array} : (tensor) -> tensor + %s = tosa.const_shape { value = dense<1> : tensor<1xindex> } : () -> !tosa.shape<1> + %0 = "tosa.reshape"(%arg0, %s) : (tensor, !tosa.shape<1>) -> tensor return %0 : tensor } @@ -41,7 +44,8 @@ func.func @test_reshape_0d_up_s2d_explicit(%arg0: tensor) -> tensor // CHECK: %[[VAL_0:.*]] = tensor.expand_shape %[[ARG_0]] [] output_shape [1] : tensor into tensor<1xf32> // CHECK: return %[[VAL_0]] : tensor<1xf32> func.func @test_reshape_0d_up_s2s_auto(%arg0: tensor) -> tensor<1xf32> { - %0 = "tosa.reshape"(%arg0) {new_shape = array} : (tensor) -> tensor<1xf32> + %s = tosa.const_shape { value = dense<-1> : tensor<1xindex> } : () -> !tosa.shape<1> + %0 = "tosa.reshape"(%arg0, %s) : (tensor, !tosa.shape<1>) -> tensor<1xf32> return %0 : tensor<1xf32> } @@ -52,7 +56,8 @@ func.func @test_reshape_0d_up_s2s_auto(%arg0: tensor) -> tensor<1xf32> { // CHECK: %[[VAL_0:.*]] = tensor.expand_shape %[[ARG_0]] [] output_shape [1] : tensor into tensor<1xf32> // CHECK: return %[[VAL_0]] : tensor<1xf32> func.func @test_reshape_0d_up_s2s_explicit(%arg0: tensor) -> tensor<1xf32> { - %0 = "tosa.reshape"(%arg0) {new_shape = array} : (tensor) -> tensor<1xf32> + %s = tosa.const_shape { value = dense<1> : tensor<1xindex> } : () -> !tosa.shape<1> + %0 = "tosa.reshape"(%arg0, %s) : (tensor, !tosa.shape<1>) -> tensor<1xf32> return %0 : tensor<1xf32> } @@ -64,7 +69,8 @@ func.func @test_reshape_0d_up_s2s_explicit(%arg0: tensor) -> tensor<1xf32> // CHECK: %[[VAL_1:.*]] = tensor.collapse_shape %[[VAL_0]] [] : tensor<1xf32> into tensor // CHECK: return %[[VAL_1]] : tensor func.func @test_reshape_1d_down_d2s_explicit(%arg0: tensor) -> tensor { - %0 = "tosa.reshape"(%arg0) {new_shape = array} : (tensor) -> tensor + %s = tosa.const_shape { value = dense<> : tensor<0xindex> } : () -> !tosa.shape<0> + %0 = "tosa.reshape"(%arg0, %s) : (tensor, !tosa.shape<0>) -> tensor return %0 : tensor } @@ -75,7 +81,8 @@ func.func @test_reshape_1d_down_d2s_explicit(%arg0: tensor) -> tensor into tensor // CHECK: return %[[VAL_0]] : tensor func.func @test_reshape_1d_down_s2s_explicit(%arg0: tensor<1xf32>) -> tensor { - %0 = "tosa.reshape"(%arg0) {new_shape = array} : (tensor<1xf32>) -> tensor + %s = tosa.const_shape { value = dense<> : tensor<0xindex> } : () -> !tosa.shape<0> + %0 = "tosa.reshape"(%arg0, %s) : (tensor<1xf32>, !tosa.shape<0>) -> tensor return %0 : tensor } @@ -90,7 +97,8 @@ func.func @test_reshape_1d_down_s2s_explicit(%arg0: tensor<1xf32>) -> tensor into tensor<2x?xf32> // CHECK: return %[[EXPANDED]] : tensor<2x?xf32> func.func @test_reshape_1d_up_d2d_auto(%arg0: tensor) -> tensor<2x?xf32> { - %0 = "tosa.reshape"(%arg0) {new_shape = array} : (tensor) -> tensor<2x?xf32> + %s = tosa.const_shape { value = dense<[2, -1]> : tensor<2xindex> } : () -> !tosa.shape<2> + %0 = "tosa.reshape"(%arg0, %s) : (tensor, !tosa.shape<2>) -> tensor<2x?xf32> return %0 : tensor<2x?xf32> } @@ -101,7 +109,8 @@ func.func @test_reshape_1d_up_d2d_auto(%arg0: tensor) -> tensor<2x?xf32> // CHECK: %[[VAL_0:.*]] = tensor.expand_shape %[[ARG_0]] {{\[\[}}0, 1]] output_shape [2, 3] : tensor<6xf32> into tensor<2x3xf32> // CHECK: return %[[VAL_0]] : tensor<2x3xf32> func.func @test_reshape_1d_up_s2s_explicit(%arg0: tensor<6xf32>) -> tensor<2x3xf32> { - %0 = "tosa.reshape"(%arg0) {new_shape = array} : (tensor<6xf32>) -> tensor<2x3xf32> + %s = tosa.const_shape { value = dense<[2, 3]> : tensor<2xindex> } : () -> !tosa.shape<2> + %0 = "tosa.reshape"(%arg0, %s) : (tensor<6xf32>, !tosa.shape<2>) -> tensor<2x3xf32> return %0 : tensor<2x3xf32> } @@ -112,7 +121,8 @@ func.func @test_reshape_1d_up_s2s_explicit(%arg0: tensor<6xf32>) -> tensor<2x3xf // CHECK: %[[VAL_0:.*]] = tensor.collapse_shape %[[ARG_0]] {{\[\[}}0, 1]] : tensor<2x?xf32> into tensor // CHECK: return %[[VAL_0]] : tensor func.func @test_reshape_2d_down_d2d_auto(%arg0: tensor<2x?xf32>) -> tensor { - %0 = "tosa.reshape"(%arg0) {new_shape = array} : (tensor<2x?xf32>) -> tensor + %s = tosa.const_shape { value = dense<-1> : tensor<1xindex> } : () -> !tosa.shape<1> + %0 = "tosa.reshape"(%arg0, %s) : (tensor<2x?xf32>, !tosa.shape<1>) -> tensor return %0 : tensor } @@ -123,7 +133,8 @@ func.func @test_reshape_2d_down_d2d_auto(%arg0: tensor<2x?xf32>) -> tensor into tensor<6xf32> // CHECK: return %[[VAL_0]] : tensor<6xf32> func.func @test_reshape_2d_down_s2s_explicit(%arg0: tensor<2x3xf32>) -> tensor<6xf32> { - %0 = "tosa.reshape"(%arg0) {new_shape = array} : (tensor<2x3xf32>) -> tensor<6xf32> + %s = tosa.const_shape { value = dense<6> : tensor<1xindex> } : () -> !tosa.shape<1> + %0 = "tosa.reshape"(%arg0, %s) : (tensor<2x3xf32>, !tosa.shape<1>) -> tensor<6xf32> return %0 : tensor<6xf32> } @@ -139,7 +150,8 @@ func.func @test_reshape_2d_down_s2s_explicit(%arg0: tensor<2x3xf32>) -> tensor<6 // CHECK: %[[EXPANDED:.*]] = tensor.expand_shape %[[VAL_0]] {{\[\[}}0, 1]] output_shape [2, %[[DIV]]] : tensor into tensor<2x?xf32> // CHECK: return %[[EXPANDED]] : tensor<2x?xf32> func.func @test_reshape_2d_same_d2d_auto(%arg0: tensor) -> tensor<2x?xf32> { - %0 = "tosa.reshape"(%arg0) {new_shape = array} : (tensor) -> tensor<2x?xf32> + %s = tosa.const_shape { value = dense<[2, -1]> : tensor<2xindex> } : () -> !tosa.shape<2> + %0 = "tosa.reshape"(%arg0, %s) : (tensor, !tosa.shape<2>) -> tensor<2x?xf32> return %0 : tensor<2x?xf32> } @@ -152,10 +164,12 @@ func.func @test_reshape_2d_same_d2d_auto(%arg0: tensor) -> tensor<2x?xf // CHECK: %[[VAL_2:.*]] = tensor.cast %[[VAL_1]] : tensor<4x2xf32> to tensor // CHECK: return %[[VAL_2]] : tensor func.func @test_reshape_2d_same_s2d_auto(%arg0: tensor<2x4xf32>) -> tensor { - %0 = "tosa.reshape"(%arg0) {new_shape = array} : (tensor<2x4xf32>) -> tensor + %s = tosa.const_shape { value = dense<[-1, 2]> : tensor<2xindex> } : () -> !tosa.shape<2> + %0 = "tosa.reshape"(%arg0, %s) : (tensor<2x4xf32>, !tosa.shape<2>) -> tensor return %0 : tensor } + // ----- // CHECK-LABEL: test_reshape_2d_same_s2d_explicit @@ -165,7 +179,8 @@ func.func @test_reshape_2d_same_s2d_auto(%arg0: tensor<2x4xf32>) -> tensor to tensor // CHECK: return %[[VAL_2]] : tensor func.func @test_reshape_2d_same_s2d_explicit(%arg0: tensor<2x4xf32>) -> tensor { - %0 = "tosa.reshape"(%arg0) {new_shape = array} : (tensor<2x4xf32>) -> tensor + %s = tosa.const_shape { value = dense<[4, 2]> : tensor<2xindex> } : () -> !tosa.shape<2> + %0 = "tosa.reshape"(%arg0, %s) : (tensor<2x4xf32>, !tosa.shape<2>) -> tensor return %0 : tensor } @@ -177,7 +192,8 @@ func.func @test_reshape_2d_same_s2d_explicit(%arg0: tensor<2x4xf32>) -> tensor into tensor<2x3xf32> // CHECK: return %[[VAL_1]] : tensor<2x3xf32> func.func @test_reshape_2d_same_s2s_explicit(%arg0: tensor<3x2xf32>) -> tensor<2x3xf32> { - %0 = "tosa.reshape"(%arg0) {new_shape = array} : (tensor<3x2xf32>) -> tensor<2x3xf32> + %s = tosa.const_shape { value = dense<[2, 3]> : tensor<2xindex> } : () -> !tosa.shape<2> + %0 = "tosa.reshape"(%arg0, %s) : (tensor<3x2xf32>, !tosa.shape<2>) -> tensor<2x3xf32> return %0 : tensor<2x3xf32> } @@ -194,7 +210,8 @@ func.func @test_reshape_2d_same_s2s_explicit(%arg0: tensor<3x2xf32>) -> tensor<2 // CHECK: %[[VAL_2:.*]] = tensor.cast %[[VAL_1]] : tensor<0x3x?xf32> to tensor // CHECK: return %[[VAL_2]] : tensor func.func @test_reshape_3d_same_d2d_auto_empty(%arg0: tensor<3x2x?xf32>) -> tensor { - %0 = "tosa.reshape"(%arg0) {new_shape = array} : (tensor<3x2x?xf32>) -> tensor + %s = tosa.const_shape { value = dense<[0, 3, -1]> : tensor<3xindex> } : () -> !tosa.shape<3> + %0 = "tosa.reshape"(%arg0, %s) : (tensor<3x2x?xf32>, !tosa.shape<3>) -> tensor return %0 : tensor } @@ -211,7 +228,8 @@ func.func @test_reshape_3d_same_d2d_auto_empty(%arg0: tensor<3x2x?xf32>) -> tens // CHECK: %[[VAL_2:.*]] = tensor.cast %[[VAL_1]] : tensor<2x?x4xf32> to tensor // CHECK: return %[[VAL_2]] : tensor func.func @test_reshape_3d_same_d2d_auto(%arg0: tensor<2x?x?xf32>) -> tensor { - %0 = "tosa.reshape"(%arg0) {new_shape = array} : (tensor<2x?x?xf32>) -> tensor + %s = tosa.const_shape { value = dense<[2, -1, 4]> : tensor<3xindex> } : () -> !tosa.shape<3> + %0 = "tosa.reshape"(%arg0, %s) : (tensor<2x?x?xf32>, !tosa.shape<3>) -> tensor return %0 : tensor } @@ -227,7 +245,8 @@ func.func @test_reshape_3d_same_d2d_auto(%arg0: tensor<2x?x?xf32>) -> tensor into tensor<2x3x?xf32> // CHECK: return %[[VAL_1]] : tensor<2x3x?xf32> func.func @test_reshape_3d_same_d2d_auto_identity(%arg0: tensor) -> tensor<2x3x?xf32> { - %0 = "tosa.reshape"(%arg0) {new_shape = array} : (tensor) -> tensor<2x3x?xf32> + %s = tosa.const_shape { value = dense<[2, 3, -1]> : tensor<3xindex> } : () -> !tosa.shape<3> + %0 = "tosa.reshape"(%arg0, %s) : (tensor, !tosa.shape<3>) -> tensor<2x3x?xf32> return %0 : tensor<2x3x?xf32> } @@ -244,7 +263,8 @@ func.func @test_reshape_3d_same_d2d_auto_identity(%arg0: tensor) -> t // CHECK: %[[VAL_2:.*]] = tensor.cast %[[EXPANDED]] : tensor to tensor // CHECK: return %[[VAL_2]] : tensor func.func @test_reshape_3d_same_d2d_explicit_empty(%arg0: tensor<3x2x?xf32>) -> tensor { - %0 = "tosa.reshape"(%arg0) {new_shape = array} : (tensor<3x2x?xf32>) -> tensor + %s = tosa.const_shape { value = dense<[0, 3, 2]> : tensor<3xindex> } : () -> !tosa.shape<3> + %0 = "tosa.reshape"(%arg0, %s) : (tensor<3x2x?xf32>, !tosa.shape<3>) -> tensor return %0 : tensor } @@ -261,7 +281,8 @@ func.func @test_reshape_3d_same_d2d_explicit_empty(%arg0: tensor<3x2x?xf32>) -> // CHECK: %[[VAL_2:.*]] = tensor.cast %[[EXPANDED]] : tensor to tensor // CHECK: return %[[VAL_2]] : tensor func.func @test_reshape_3d_same_d2d_explicit(%arg0: tensor) -> tensor { - %0 = "tosa.reshape"(%arg0) {new_shape = array} : (tensor) -> tensor + %s = tosa.const_shape { value = dense<[2, 3, 4]> : tensor<3xindex> } : () -> !tosa.shape<3> + %0 = "tosa.reshape"(%arg0, %s) : (tensor, !tosa.shape<3>) -> tensor return %0 : tensor } @@ -272,7 +293,8 @@ func.func @test_reshape_3d_same_d2d_explicit(%arg0: tensor) -> tensor // CHECK: %[[VAL_0:.*]] = tensor.cast %[[ARG_0]] : tensor to tensor<2x3x?xf32> // CHECK: return %[[VAL_0]] : tensor<2x3x?xf32> func.func @test_reshape_3d_same_d2d_explicit_identity(%arg0: tensor) -> tensor<2x3x?xf32> { - %0 = "tosa.reshape"(%arg0) {new_shape = array} : (tensor) -> tensor<2x3x?xf32> + %s = tosa.const_shape { value = dense<[2, 3, 4]> : tensor<3xindex> } : () -> !tosa.shape<3> + %0 = "tosa.reshape"(%arg0, %s) : (tensor, !tosa.shape<3>) -> tensor<2x3x?xf32> return %0 : tensor<2x3x?xf32> } @@ -289,7 +311,8 @@ func.func @test_reshape_3d_same_d2d_explicit_identity(%arg0: tensor) // CHECK: %[[VAL_2:.*]] = tensor.cast %[[EXPANDED]] : tensor<2x?x4xf32> to tensor<2x3x4xf32> // CHECK: return %[[VAL_2]] : tensor<2x3x4xf32> func.func @test_reshape_3d_same_d2s_auto(%arg0: tensor) -> tensor<2x3x4xf32> { - %0 = "tosa.reshape"(%arg0) {new_shape = array} : (tensor) -> tensor<2x3x4xf32> + %s = tosa.const_shape { value = dense<[2, -1, 4]> : tensor<3xindex> } : () -> !tosa.shape<3> + %0 = "tosa.reshape"(%arg0, %s) : (tensor, !tosa.shape<3>) -> tensor<2x3x4xf32> return %0 : tensor<2x3x4xf32> } @@ -306,7 +329,8 @@ func.func @test_reshape_3d_same_d2s_auto(%arg0: tensor) -> tensor<2x3 // CHECK: %[[VAL_2:.*]] = tensor.cast %[[EXPANDED]] : tensor to tensor<2x3x4xf32> // CHECK: return %[[VAL_2]] : tensor<2x3x4xf32> func.func @test_reshape_3d_same_d2s_explicit(%arg0: tensor) -> tensor<2x3x4xf32> { - %0 = "tosa.reshape"(%arg0) {new_shape = array} : (tensor) -> tensor<2x3x4xf32> + %s = tosa.const_shape { value = dense<[2, 3, 4]> : tensor<3xindex> } : () -> !tosa.shape<3> + %0 = "tosa.reshape"(%arg0, %s) : (tensor, !tosa.shape<3>) -> tensor<2x3x4xf32> return %0 : tensor<2x3x4xf32> } @@ -316,7 +340,8 @@ func.func @test_reshape_3d_same_d2s_explicit(%arg0: tensor) -> tensor // CHECK-SAME: %[[ARG_0:[a-zA-Z0-9_]+]]: tensor<2x3x4xf32> // CHECK: return %[[ARG_0]] : tensor<2x3x4xf32> func.func @test_reshape_3d_same_s2s_explicit_identity(%arg0: tensor<2x3x4xf32>) -> tensor<2x3x4xf32> { - %0 = "tosa.reshape"(%arg0) {new_shape = array} : (tensor<2x3x4xf32>) -> tensor<2x3x4xf32> + %s = tosa.const_shape { value = dense<[2, 3, 4]> : tensor<3xindex> } : () -> !tosa.shape<3> + %0 = "tosa.reshape"(%arg0, %s) : (tensor<2x3x4xf32>, !tosa.shape<3>) -> tensor<2x3x4xf32> return %0 : tensor<2x3x4xf32> } @@ -333,7 +358,8 @@ func.func @test_reshape_3d_same_s2s_explicit_identity(%arg0: tensor<2x3x4xf32>) // CHECK: %[[CAST:.*]] = tensor.cast %[[EXPANDED]] : tensor to tensor<1x3x2x1xf32> // CHECK: return %[[CAST]] : tensor<1x3x2x1xf32> func.func @test_reshape_3d_up_d2s_explicit(%input: tensor) -> tensor<1x3x2x1xf32> { - %0 = tosa.reshape %input {new_shape = array} : (tensor) -> tensor<1x3x2x1xf32> + %s = tosa.const_shape { value = dense<[1, 3, 2, 1]> : tensor<4xindex> } : () -> !tosa.shape<4> + %0 = tosa.reshape %input, %s : (tensor, !tosa.shape<4>) -> tensor<1x3x2x1xf32> return %0 : tensor<1x3x2x1xf32> } @@ -345,7 +371,8 @@ func.func @test_reshape_3d_up_d2s_explicit(%input: tensor) -> tensor< // CHECK: %[[VAL_1:.*]] = tensor.collapse_shape %[[VAL_0]] [] : tensor<1x1x1x1xf32> into tensor // CHECK: return %[[VAL_1]] : tensor func.func @test_reshape_4d_down_d2s_explicit(%arg0: tensor) -> tensor { - %0 = "tosa.reshape"(%arg0) {new_shape = array} : (tensor) -> tensor + %s = tosa.const_shape { value = dense<> : tensor<0xindex> } : () -> !tosa.shape<0> + %0 = "tosa.reshape"(%arg0, %s) : (tensor, !tosa.shape<0>) -> tensor return %0 : tensor } @@ -361,7 +388,8 @@ func.func @test_reshape_4d_down_d2s_explicit(%arg0: tensor) -> tens // CHECK: %[[EXPANDED:.*]] = tensor.expand_shape %[[COLLAPSED]] {{\[\[}}0, 1, 2]] output_shape [%[[VAL_0]], 2, 3] : tensor into tensor // CHECK: return %[[EXPANDED]] : tensor func.func @test_reshape_5d_down_d2d_auto(%arg0: tensor) -> tensor { - %0 = "tosa.reshape"(%arg0) {new_shape = array} : (tensor) -> tensor + %s = tosa.const_shape { value = dense<[-1, 2, 3]> : tensor<3xindex> } : () -> !tosa.shape<3> + %0 = "tosa.reshape"(%arg0, %s) : (tensor, !tosa.shape<3>) -> tensor return %0 : tensor } @@ -377,7 +405,8 @@ func.func @test_reshape_5d_down_d2d_auto(%arg0: tensor) -> tensor // CHECK: %[[EXPANDED:.*]] = tensor.expand_shape %[[COLLAPSED]] {{\[\[}}0, 1, 2]] output_shape [%[[VAL_0]], 5, 77] : tensor into tensor // CHECK: return %[[EXPANDED]] : tensor func.func @test_reshape_6d_down_d2d_auto(%arg0: tensor<1x2x?x5x7x11xf32>) -> tensor { - %0 = "tosa.reshape"(%arg0) {new_shape = array} : (tensor<1x2x?x5x7x11xf32>) -> tensor + %s = tosa.const_shape { value = dense<[-1, 5, 77]> : tensor<3xindex> } : () -> !tosa.shape<3> + %0 = "tosa.reshape"(%arg0, %s) : (tensor<1x2x?x5x7x11xf32>, !tosa.shape<3>) -> tensor return %0 : tensor } @@ -388,7 +417,8 @@ func.func @test_reshape_6d_down_d2d_auto(%arg0: tensor<1x2x?x5x7x11xf32>) -> ten // CHECK: %[[VAL_0:.*]] = tensor.collapse_shape %[[ARG_0]] {{\[\[}}0, 1, 2], [3], [4, 5]] : tensor<1x2x3x5x7x11xf32> into tensor<6x5x77xf32> // CHECK: return %[[VAL_0]] : tensor<6x5x77xf32> func.func @test_reshape_6d_down_s2s_auto(%arg0: tensor<1x2x3x5x7x11xf32>) -> tensor<6x5x77xf32> { - %0 = "tosa.reshape"(%arg0) {new_shape = array} : (tensor<1x2x3x5x7x11xf32>) -> tensor<6x5x77xf32> + %s = tosa.const_shape { value = dense<[6, 5, -1]> : tensor<3xindex> } : () -> !tosa.shape<3> + %0 = "tosa.reshape"(%arg0, %s) : (tensor<1x2x3x5x7x11xf32>, !tosa.shape<3>) -> tensor<6x5x77xf32> return %0 : tensor<6x5x77xf32> } @@ -400,10 +430,13 @@ func.func @test_reshape_6d_down_s2s_auto(%arg0: tensor<1x2x3x5x7x11xf32>) -> ten // // See https://github.com/llvm/llvm-project/pull/91521 for a full description. +// ----- + // CHECK-LABEL: reshape_bug_fix // CHECK: tensor.expand_shape func.func @reshape_bug_fix(%arg0: tensor) -> tensor<1x1x1x?xf32> { - %0 = tosa.reshape %arg0 {new_shape = array} : (tensor) -> tensor<1x1x1x?xf32> + %1 = "tosa.const_shape"() {value = dense<[1, 1, 1, -1]> : tensor<4xindex>} : () -> !tosa.shape<4> + %0 = "tosa.reshape"(%arg0, %1) : (tensor, !tosa.shape<4>) -> tensor<1x1x1x?xf32> return %0 : tensor<1x1x1x?xf32> } @@ -414,21 +447,22 @@ func.func @reshape_bug_fix(%arg0: tensor) -> tensor<1x1x1x?xf32> { // CHECK: %[[VAL_0:.*]] = tensor.collapse_shape %[[ARG_0]] {{\[\[}}0, 1, 2], [3], [4, 5]] : tensor<1x2x3x5x7x11xf32> into tensor<6x5x77xf32> // CHECK: return %[[VAL_0]] : tensor<6x5x77xf32> func.func @test_reshape_6d_down_s2s_explicit(%arg0: tensor<1x2x3x5x7x11xf32>) -> tensor<6x5x77xf32> { - %0 = "tosa.reshape"(%arg0) {new_shape = array} : (tensor<1x2x3x5x7x11xf32>) -> tensor<6x5x77xf32> + %s = tosa.const_shape { value = dense<[6, 5, 77]> : tensor<3xindex> } : () -> !tosa.shape<3> + %0 = "tosa.reshape"(%arg0, %s) : (tensor<1x2x3x5x7x11xf32>, !tosa.shape<3>) -> tensor<6x5x77xf32> return %0 : tensor<6x5x77xf32> } // ----- // CHECK-LABEL: @test_reshape_samerank_unsigned -// CHECK-SAME: (%[[ARG0:.*]]: tensor<3x2xui8>) +// CHECK-SAME: (%[[VAL_0:.*]]: tensor<3x2xui8>) func.func @test_reshape_samerank_unsigned(%arg0: tensor<3x2xui8>) -> tensor<2x3xui8> { - // CHECK-NEXT: %[[CAST1:.*]] = builtin.unrealized_conversion_cast %[[ARG0]] : tensor<3x2xui8> to tensor<3x2xi8> - // CHECK-NEXT: %[[RESHAPE1:.*]] = tensor.collapse_shape %[[CAST1]] {{\[}}[0, 1]] : tensor<3x2xi8> into tensor<6xi8> - // CHECK-NEXT: %[[RESHAPE2:.*]] = tensor.expand_shape %[[RESHAPE1]] {{\[}}[0, 1]] output_shape {{\[}}2, 3] : tensor<6xi8> into tensor<2x3xi8> - // CHECK-NEXT: %[[CAST2:.*]] = builtin.unrealized_conversion_cast %[[RESHAPE2]] : tensor<2x3xi8> to tensor<2x3xui8 - %0 = "tosa.reshape"(%arg0) {new_shape = array} : (tensor<3x2xui8>) -> tensor<2x3xui8> - // CHECK-NEXT: return %[[CAST2]] + // CHECK: %[[CAST1:.*]] = builtin.unrealized_conversion_cast %arg0 : tensor<3x2xui8> to tensor<3x2xi8> + // CHECK: %[[RESHAPE1:.*]] = tensor.collapse_shape %[[CAST1]] {{\[}}[0, 1]] : tensor<3x2xi8> into tensor<6xi8> + // CHECK: %[[RESHAPE2:.*]] = tensor.expand_shape %[[RESHAPE1]] {{\[}}[0, 1]] output_shape {{\[}}2, 3] : tensor<6xi8> into tensor<2x3xi8> + // CHECK: %[[CAST2:.*]] = builtin.unrealized_conversion_cast %[[RESHAPE2]] : tensor<2x3xi8> to tensor<2x3xui8 + %s = tosa.const_shape { value = dense<[2, 3]> : tensor<2xindex> } : () -> !tosa.shape<2> + %0 = "tosa.reshape"(%arg0, %s): (tensor<3x2xui8>, !tosa.shape<2>) -> tensor<2x3xui8> return %0 : tensor<2x3xui8> } diff --git a/mlir/test/Conversion/VectorToLLVM/vector-to-llvm.mlir b/mlir/test/Conversion/VectorToLLVM/vector-to-llvm.mlir index f17e8f02c0d80..36b37a137ac1e 100644 --- a/mlir/test/Conversion/VectorToLLVM/vector-to-llvm.mlir +++ b/mlir/test/Conversion/VectorToLLVM/vector-to-llvm.mlir @@ -185,7 +185,7 @@ func.func @broadcast_vec2d_from_vec0d(%arg0: vector) -> vector<3x2xf32> { // CHECK-LABEL: @broadcast_vec2d_from_vec0d( // CHECK-SAME: %[[A:.*]]: vector) // CHECK: %[[T0:.*]] = builtin.unrealized_conversion_cast %[[A]] : vector to vector<1xf32> -// CHECK: %[[T1:.*]] = arith.constant dense<0.000000e+00> : vector<3x2xf32> +// CHECK: %[[T1:.*]] = ub.poison : vector<3x2xf32> // CHECK: %[[T2:.*]] = builtin.unrealized_conversion_cast %[[T1]] : vector<3x2xf32> to !llvm.array<3 x vector<2xf32>> // CHECK: %[[T4:.*]] = llvm.mlir.constant(0 : index) : i64 // CHECK: %[[T5:.*]] = llvm.extractelement %[[T0]][%[[T4]] : i64] : vector<1xf32> @@ -205,7 +205,7 @@ func.func @broadcast_vec2d_from_vec1d(%arg0: vector<2xf32>) -> vector<3x2xf32> { } // CHECK-LABEL: @broadcast_vec2d_from_vec1d( // CHECK-SAME: %[[A:.*]]: vector<2xf32>) -// CHECK: %[[T0:.*]] = arith.constant dense<0.000000e+00> : vector<3x2xf32> +// CHECK: %[[T0:.*]] = ub.poison : vector<3x2xf32> // CHECK: %[[T1:.*]] = builtin.unrealized_conversion_cast %[[T0]] : vector<3x2xf32> to !llvm.array<3 x vector<2xf32>> // CHECK: %[[T2:.*]] = llvm.insertvalue %[[A]], %[[T1]][0] : !llvm.array<3 x vector<2xf32>> // CHECK: %[[T3:.*]] = llvm.insertvalue %[[A]], %[[T2]][1] : !llvm.array<3 x vector<2xf32>> @@ -221,7 +221,7 @@ func.func @broadcast_vec2d_from_vec1d_scalable(%arg0: vector<[2]xf32>) -> vector } // CHECK-LABEL: @broadcast_vec2d_from_vec1d_scalable( // CHECK-SAME: %[[A:.*]]: vector<[2]xf32>) -// CHECK: %[[T0:.*]] = arith.constant dense<0.000000e+00> : vector<3x[2]xf32> +// CHECK: %[[T0:.*]] = ub.poison : vector<3x[2]xf32> // CHECK: %[[T1:.*]] = builtin.unrealized_conversion_cast %[[T0]] : vector<3x[2]xf32> to !llvm.array<3 x vector<[2]xf32>> // CHECK: %[[T2:.*]] = llvm.insertvalue %[[A]], %[[T1]][0] : !llvm.array<3 x vector<[2]xf32>> // CHECK: %[[T3:.*]] = llvm.insertvalue %[[A]], %[[T2]][1] : !llvm.array<3 x vector<[2]xf32>> @@ -238,7 +238,7 @@ func.func @broadcast_vec2d_from_index_vec1d(%arg0: vector<2xindex>) -> vector<3x // CHECK-LABEL: @broadcast_vec2d_from_index_vec1d( // CHECK-SAME: %[[A:.*]]: vector<2xindex>) // CHECK: %[[T1:.*]] = builtin.unrealized_conversion_cast %[[A]] : vector<2xindex> to vector<2xi64> -// CHECK: %[[T0:.*]] = arith.constant dense<0> : vector<3x2xindex> +// CHECK: %[[T0:.*]] = ub.poison : vector<3x2xindex> // CHECK: %[[T2:.*]] = builtin.unrealized_conversion_cast %[[T0]] : vector<3x2xindex> to !llvm.array<3 x vector<2xi64>> // CHECK: %[[T3:.*]] = llvm.insertvalue %[[T1]], %[[T2]][0] : !llvm.array<3 x vector<2xi64>> @@ -254,7 +254,7 @@ func.func @broadcast_vec2d_from_index_vec1d_scalable(%arg0: vector<[2]xindex>) - // CHECK-LABEL: @broadcast_vec2d_from_index_vec1d_scalable( // CHECK-SAME: %[[A:.*]]: vector<[2]xindex>) // CHECK: %[[T1:.*]] = builtin.unrealized_conversion_cast %[[A]] : vector<[2]xindex> to vector<[2]xi64> -// CHECK: %[[T0:.*]] = arith.constant dense<0> : vector<3x[2]xindex> +// CHECK: %[[T0:.*]] = ub.poison : vector<3x[2]xindex> // CHECK: %[[T2:.*]] = builtin.unrealized_conversion_cast %[[T0]] : vector<3x[2]xindex> to !llvm.array<3 x vector<[2]xi64>> // CHECK: %[[T3:.*]] = llvm.insertvalue %[[T1]], %[[T2]][0] : !llvm.array<3 x vector<[2]xi64>> @@ -269,9 +269,9 @@ func.func @broadcast_vec3d_from_vec1d(%arg0: vector<2xf32>) -> vector<4x3x2xf32> } // CHECK-LABEL: @broadcast_vec3d_from_vec1d( // CHECK-SAME: %[[A:.*]]: vector<2xf32>) -// CHECK-DAG: %[[T0:.*]] = arith.constant dense<0.000000e+00> : vector<3x2xf32> +// CHECK-DAG: %[[T0:.*]] = ub.poison : vector<3x2xf32> // CHECK-DAG: %[[T2:.*]] = builtin.unrealized_conversion_cast %[[T0]] : vector<3x2xf32> to !llvm.array<3 x vector<2xf32>> -// CHECK-DAG: %[[T1:.*]] = arith.constant dense<0.000000e+00> : vector<4x3x2xf32> +// CHECK-DAG: %[[T1:.*]] = ub.poison : vector<4x3x2xf32> // CHECK-DAG: %[[T6:.*]] = builtin.unrealized_conversion_cast %[[T1]] : vector<4x3x2xf32> to !llvm.array<4 x array<3 x vector<2xf32>>> // CHECK: %[[T3:.*]] = llvm.insertvalue %[[A]], %[[T2]][0] : !llvm.array<3 x vector<2xf32>> @@ -294,9 +294,9 @@ func.func @broadcast_vec3d_from_vec1d_scalable(%arg0: vector<[2]xf32>) -> vector } // CHECK-LABEL: @broadcast_vec3d_from_vec1d_scalable( // CHECK-SAME: %[[A:.*]]: vector<[2]xf32>) -// CHECK-DAG: %[[T0:.*]] = arith.constant dense<0.000000e+00> : vector<3x[2]xf32> +// CHECK-DAG: %[[T0:.*]] = ub.poison : vector<3x[2]xf32> // CHECK-DAG: %[[T2:.*]] = builtin.unrealized_conversion_cast %[[T0]] : vector<3x[2]xf32> to !llvm.array<3 x vector<[2]xf32>> -// CHECK-DAG: %[[T1:.*]] = arith.constant dense<0.000000e+00> : vector<4x3x[2]xf32> +// CHECK-DAG: %[[T1:.*]] = ub.poison : vector<4x3x[2]xf32> // CHECK-DAG: %[[T6:.*]] = builtin.unrealized_conversion_cast %[[T1]] : vector<4x3x[2]xf32> to !llvm.array<4 x array<3 x vector<[2]xf32>>> // CHECK: %[[T3:.*]] = llvm.insertvalue %[[A]], %[[T2]][0] : !llvm.array<3 x vector<[2]xf32>> @@ -320,7 +320,7 @@ func.func @broadcast_vec3d_from_vec2d(%arg0: vector<3x2xf32>) -> vector<4x3x2xf3 // CHECK-LABEL: @broadcast_vec3d_from_vec2d( // CHECK-SAME: %[[A:.*]]: vector<3x2xf32>) // CHECK: %[[T1:.*]] = builtin.unrealized_conversion_cast %[[A]] : vector<3x2xf32> to !llvm.array<3 x vector<2xf32>> -// CHECK: %[[T0:.*]] = arith.constant dense<0.000000e+00> : vector<4x3x2xf32> +// CHECK: %[[T0:.*]] = ub.poison : vector<4x3x2xf32> // CHECK: %[[T2:.*]] = builtin.unrealized_conversion_cast %[[T0]] : vector<4x3x2xf32> to !llvm.array<4 x array<3 x vector<2xf32>>> // CHECK: %[[T3:.*]] = llvm.insertvalue %[[T1]], %[[T2]][0] : !llvm.array<4 x array<3 x vector<2xf32>>> // CHECK: %[[T5:.*]] = llvm.insertvalue %[[T1]], %[[T3]][1] : !llvm.array<4 x array<3 x vector<2xf32>>> @@ -338,7 +338,7 @@ func.func @broadcast_vec3d_from_vec2d_scalable(%arg0: vector<3x[2]xf32>) -> vect // CHECK-LABEL: @broadcast_vec3d_from_vec2d_scalable( // CHECK-SAME: %[[A:.*]]: vector<3x[2]xf32>) // CHECK: %[[T1:.*]] = builtin.unrealized_conversion_cast %[[A]] : vector<3x[2]xf32> to !llvm.array<3 x vector<[2]xf32>> -// CHECK: %[[T0:.*]] = arith.constant dense<0.000000e+00> : vector<4x3x[2]xf32> +// CHECK: %[[T0:.*]] = ub.poison : vector<4x3x[2]xf32> // CHECK: %[[T2:.*]] = builtin.unrealized_conversion_cast %[[T0]] : vector<4x3x[2]xf32> to !llvm.array<4 x array<3 x vector<[2]xf32>>> // CHECK: %[[T3:.*]] = llvm.insertvalue %[[T1]], %[[T2]][0] : !llvm.array<4 x array<3 x vector<[2]xf32>>> // CHECK: %[[T5:.*]] = llvm.insertvalue %[[T1]], %[[T3]][1] : !llvm.array<4 x array<3 x vector<[2]xf32>>> @@ -385,7 +385,7 @@ func.func @broadcast_stretch_at_start(%arg0: vector<1x4xf32>) -> vector<3x4xf32> // CHECK-LABEL: @broadcast_stretch_at_start( // CHECK-SAME: %[[A:.*]]: vector<1x4xf32>) // CHECK: %[[T2:.*]] = builtin.unrealized_conversion_cast %[[A]] : vector<1x4xf32> to !llvm.array<1 x vector<4xf32>> -// CHECK: %[[T1:.*]] = arith.constant dense<0.000000e+00> : vector<3x4xf32> +// CHECK: %[[T1:.*]] = ub.poison : vector<3x4xf32> // CHECK: %[[T4:.*]] = builtin.unrealized_conversion_cast %[[T1]] : vector<3x4xf32> to !llvm.array<3 x vector<4xf32>> // CHECK: %[[T3:.*]] = llvm.extractvalue %[[T2]][0] : !llvm.array<1 x vector<4xf32>> // CHECK: %[[T5:.*]] = llvm.insertvalue %[[T3]], %[[T4]][0] : !llvm.array<3 x vector<4xf32>> @@ -403,7 +403,7 @@ func.func @broadcast_stretch_at_start_scalable(%arg0: vector<1x[4]xf32>) -> vect // CHECK-LABEL: @broadcast_stretch_at_start_scalable( // CHECK-SAME: %[[A:.*]]: vector<1x[4]xf32>) // CHECK: %[[T2:.*]] = builtin.unrealized_conversion_cast %[[A]] : vector<1x[4]xf32> to !llvm.array<1 x vector<[4]xf32>> -// CHECK: %[[T1:.*]] = arith.constant dense<0.000000e+00> : vector<3x[4]xf32> +// CHECK: %[[T1:.*]] = ub.poison : vector<3x[4]xf32> // CHECK: %[[T4:.*]] = builtin.unrealized_conversion_cast %[[T1]] : vector<3x[4]xf32> to !llvm.array<3 x vector<[4]xf32>> // CHECK: %[[T3:.*]] = llvm.extractvalue %[[T2]][0] : !llvm.array<1 x vector<[4]xf32>> // CHECK: %[[T5:.*]] = llvm.insertvalue %[[T3]], %[[T4]][0] : !llvm.array<3 x vector<[4]xf32>> @@ -421,7 +421,7 @@ func.func @broadcast_stretch_at_end(%arg0: vector<4x1xf32>) -> vector<4x3xf32> { // CHECK-LABEL: @broadcast_stretch_at_end( // CHECK-SAME: %[[A:.*]]: vector<4x1xf32>) // CHECK: %[[T2:.*]] = builtin.unrealized_conversion_cast %[[A]] : vector<4x1xf32> to !llvm.array<4 x vector<1xf32>> -// CHECK: %[[T1:.*]] = arith.constant dense<0.000000e+00> : vector<4x3xf32> +// CHECK: %[[T1:.*]] = ub.poison : vector<4x3xf32> // CHECK: %[[T7:.*]] = builtin.unrealized_conversion_cast %[[T1]] : vector<4x3xf32> to !llvm.array<4 x vector<3xf32>> // CHECK: %[[T3:.*]] = llvm.extractvalue %[[T2]][0] : !llvm.array<4 x vector<1xf32>> // CHECK: %[[T4:.*]] = llvm.mlir.constant(0 : i64) : i64 @@ -469,9 +469,9 @@ func.func @broadcast_stretch_in_middle(%arg0: vector<4x1x2xf32>) -> vector<4x3x2 // CHECK-LABEL: @broadcast_stretch_in_middle( // CHECK-SAME: %[[A:.*]]: vector<4x1x2xf32>) -> vector<4x3x2xf32> { // CHECK: %[[T3:.*]] = builtin.unrealized_conversion_cast %[[A]] : vector<4x1x2xf32> to !llvm.array<4 x array<1 x vector<2xf32>>> -// CHECK: %[[T1:.*]] = arith.constant dense<0.000000e+00> : vector<4x3x2xf32> +// CHECK: %[[T1:.*]] = ub.poison : vector<4x3x2xf32> // CHECK: %[[T9:.*]] = builtin.unrealized_conversion_cast %[[T1]] : vector<4x3x2xf32> to !llvm.array<4 x array<3 x vector<2xf32>>> -// CHECK: %[[T2:.*]] = arith.constant dense<0.000000e+00> : vector<3x2xf32> +// CHECK: %[[T2:.*]] = ub.poison : vector<3x2xf32> // CHECK: %[[T5:.*]] = builtin.unrealized_conversion_cast %[[T2]] : vector<3x2xf32> to !llvm.array<3 x vector<2xf32>> // CHECK: %[[T4:.*]] = llvm.extractvalue %[[T3]][0, 0] : !llvm.array<4 x array<1 x vector<2xf32>>> // CHECK: %[[T6:.*]] = llvm.insertvalue %[[T4]], %[[T5]][0] : !llvm.array<3 x vector<2xf32>> @@ -505,9 +505,9 @@ func.func @broadcast_stretch_in_middle_scalable_v1(%arg0: vector<4x1x[2]xf32>) - // CHECK-LABEL: @broadcast_stretch_in_middle_scalable_v1( // CHECK-SAME: %[[A:.*]]: vector<4x1x[2]xf32>) -> vector<4x3x[2]xf32> { // CHECK: %[[T3:.*]] = builtin.unrealized_conversion_cast %[[A]] : vector<4x1x[2]xf32> to !llvm.array<4 x array<1 x vector<[2]xf32>>> -// CHECK: %[[T1:.*]] = arith.constant dense<0.000000e+00> : vector<4x3x[2]xf32> +// CHECK: %[[T1:.*]] = ub.poison : vector<4x3x[2]xf32> // CHECK: %[[T9:.*]] = builtin.unrealized_conversion_cast %[[T1]] : vector<4x3x[2]xf32> to !llvm.array<4 x array<3 x vector<[2]xf32>>> -// CHECK: %[[T2:.*]] = arith.constant dense<0.000000e+00> : vector<3x[2]xf32> +// CHECK: %[[T2:.*]] = ub.poison : vector<3x[2]xf32> // CHECK: %[[T5:.*]] = builtin.unrealized_conversion_cast %[[T2]] : vector<3x[2]xf32> to !llvm.array<3 x vector<[2]xf32>> // CHECK: %[[T4:.*]] = llvm.extractvalue %[[T3]][0, 0] : !llvm.array<4 x array<1 x vector<[2]xf32>>> // CHECK: %[[T6:.*]] = llvm.insertvalue %[[T4]], %[[T5]][0] : !llvm.array<3 x vector<[2]xf32>> diff --git a/mlir/test/Dialect/SPIRV/IR/group-ops.mlir b/mlir/test/Dialect/SPIRV/IR/group-ops.mlir index c879b901311f2..d9957ad804161 100644 --- a/mlir/test/Dialect/SPIRV/IR/group-ops.mlir +++ b/mlir/test/Dialect/SPIRV/IR/group-ops.mlir @@ -196,3 +196,63 @@ func.func @group_fmul(%value: f32) -> f32 { %0 = spirv.KHR.GroupFMul %value : f32 return %0: f32 } + +// ----- + +//===----------------------------------------------------------------------===// +// spirv.GroupNonUniformBallotBitCount +//===----------------------------------------------------------------------===// + +func.func @group_non_uniform_ballot_bit_count(%value: vector<4xi32>) -> i32 { + // CHECK: {{%.*}} = spirv.GroupNonUniformBallotBitCount {{%.*}} : vector<4xi32> -> i32 + %0 = spirv.GroupNonUniformBallotBitCount %value : vector<4xi32> -> i32 + return %0: i32 +} + +// ----- + +func.func @group_non_uniform_ballot_bit_count_wrong_scope(%value: vector<4xi32>) -> i32 { + // expected-error @+1 {{execution_scope must be Scope of value Subgroup}} + %0 = spirv.GroupNonUniformBallotBitCount %value : vector<4xi32> -> i32 + return %0: i32 +} + +// ----- + +func.func @group_non_uniform_ballot_bit_count_wrong_value_len(%value: vector<3xi32>) -> i32 { + // expected-error @+1 {{operand #0 must be vector of 32-bit signless/unsigned integer values of length 4, but got 'vector<3xi32>'}} + %0 = spirv.GroupNonUniformBallotBitCount %value : vector<3xi32> -> i32 + return %0: i32 +} + +// ----- + +func.func @group_non_uniform_ballot_bit_count_wrong_value_type(%value: vector<4xi8>) -> i32 { + // expected-error @+1 {{operand #0 must be vector of 32-bit signless/unsigned integer values of length 4, but got 'vector<4xi8>'}} + %0 = spirv.GroupNonUniformBallotBitCount %value : vector<4xi8> -> i32 + return %0: i32 +} + +// ----- + +func.func @group_non_uniform_ballot_bit_count_value_sign(%value: vector<4xsi32>) -> i32 { + // expected-error @+1 {{operand #0 must be vector of 32-bit signless/unsigned integer values of length 4, but got 'vector<4xsi32>'}} + %0 = spirv.GroupNonUniformBallotBitCount %value : vector<4xsi32> -> i32 + return %0: i32 +} + +// ----- + +func.func @group_non_uniform_ballot_bit_count_wrong_result_type(%value: vector<4xi32>) -> f32 { + // expected-error @+1 {{result #0 must be 8/16/32/64-bit signless/unsigned integer, but got 'f32'}} + %0 = spirv.GroupNonUniformBallotBitCount %value : vector<4xi32> -> f32 + return %0: f32 +} + +// ----- + +func.func @group_non_uniform_ballot_bit_count_wrong_result_sign(%value: vector<4xi32>) -> si32 { + // expected-error @+1 {{result #0 must be 8/16/32/64-bit signless/unsigned integer, but got 'si32'}} + %0 = spirv.GroupNonUniformBallotBitCount %value : vector<4xi32> -> si32 + return %0: si32 +} diff --git a/mlir/test/Dialect/Tosa/canonicalize.mlir b/mlir/test/Dialect/Tosa/canonicalize.mlir index e0e1de6a94d10..582fd77cd7bc8 100644 --- a/mlir/test/Dialect/Tosa/canonicalize.mlir +++ b/mlir/test/Dialect/Tosa/canonicalize.mlir @@ -542,17 +542,20 @@ func.func @reduce_sum_nofold(%arg0: tensor) -> tensor { // CHECK-LABEL: @reshape_canonicalize func.func @reshape_canonicalize(%arg0: tensor) -> tensor { // CHECK: return %arg0 - %0 = tosa.reshape %arg0 {new_shape = array}: (tensor) -> tensor - return %0 : tensor + %0 = "tosa.const_shape"() {value = dense<[-1, 10]> : tensor<2xindex>} : () -> !tosa.shape<2> + %1 = tosa.reshape %arg0, %0 : (tensor, !tosa.shape<2>) -> tensor + return %1 : tensor } // ----- // CHECK-LABEL: @reshape_canonicalize_dyn_nofold func.func @reshape_canonicalize_dyn_nofold(%arg0: tensor) -> tensor { - // CHECK: %[[VAR0:.+]] = tosa.reshape %arg0 {new_shape = array} : (tensor) -> tensor + // CHECK: %[[SHAPE:.+]] = tosa.const_shape {value = dense<[-1, 2, 10]> : tensor<3xindex>} : () -> !tosa.shape<3> + // CHECK: %[[VAR0:.+]] = tosa.reshape %arg0, %[[SHAPE]] : (tensor, !tosa.shape<3>) -> tensor // CHECK: return %[[VAR0]] : tensor - %0 = tosa.reshape %arg0 {new_shape = array} : (tensor) -> tensor + %s = "tosa.const_shape"() {value = dense<[-1, 2, 10]> : tensor<3xindex>} : () -> !tosa.shape<3> + %0 = tosa.reshape %arg0, %s : (tensor, !tosa.shape<3>) -> tensor return %0 : tensor } @@ -560,10 +563,13 @@ func.func @reshape_canonicalize_dyn_nofold(%arg0: tensor) -> tensor< // CHECK-LABEL: @reshape_canonicalize_double func.func @reshape_canonicalize_double(%arg0: tensor) -> tensor { - // CHECK: %[[VAL_1:.*]] = tosa.reshape %arg0 {new_shape = array} + // CHECK: %[[VAL_0:.*]] = tosa.const_shape {value = dense<[-1, 5]> : tensor<2xindex>} : () -> !tosa.shape<2> + // CHECK: %[[VAL_1:.*]] = tosa.reshape %arg0, %[[VAL_0]] // CHECK: return %[[VAL_1]] - %0 = tosa.reshape %arg0 {new_shape = array}: (tensor) -> tensor<5x?xf32> - %1 = tosa.reshape %0 {new_shape = array}: (tensor<5x?xf32>) -> tensor + %cst0 = "tosa.const_shape"() <{value = dense<[5, -1]> : tensor<2xindex>}> : () -> !tosa.shape<2> + %0 = tosa.reshape %arg0, %cst0 : (tensor, !tosa.shape<2>) -> tensor<5x?xf32> + %cst1 = "tosa.const_shape"() <{value = dense<[-1, 5]> : tensor<2xindex>}> : () -> !tosa.shape<2> + %1 = tosa.reshape %0, %cst1 : (tensor<5x?xf32>, !tosa.shape<2>) -> tensor return %1 : tensor } @@ -574,8 +580,9 @@ func.func @reshape_canonicalize_const() -> tensor<1x5xi32> { // CHECK: %[[VAR0:.+]] = "tosa.const"() <{value = dense<{{\[\[}}0, 1, 2, 3, 4]]> : tensor<1x5xi32>} // CHECK: return %[[VAR0]] %0 = "tosa.const"() {value = dense<[0, 1, 2, 3, 4]> : tensor<5xi32>} : () -> tensor<5xi32> - %1 = tosa.reshape %0 {new_shape = array} : (tensor<5xi32>) -> tensor<1x5xi32> - return %1 : tensor<1x5xi32> + %1 = "tosa.const_shape"() {value = dense<[1, 5]> : tensor<2xindex>} : () -> !tosa.shape<2> + %2 = tosa.reshape %0, %1 : (tensor<5xi32>, !tosa.shape<2>) -> tensor<1x5xi32> + return %2 : tensor<1x5xi32> } // ----- @@ -584,7 +591,8 @@ func.func @reshape_canonicalize_const() -> tensor<1x5xi32> { func.func @reshape_canonicalize_const_dynamic() -> tensor<1x?xi32> { // CHECK: tosa.reshape %0 = "tosa.const"() {value = dense<[0, 1, 2, 3, 4]> : tensor<5xi32>} : () -> tensor<5xi32> - %1 = tosa.reshape %0 {new_shape = array} : (tensor<5xi32>) -> tensor<1x?xi32> + %2 = "tosa.const_shape"() {value = dense<[1, 5]> : tensor<2xindex>} : () -> !tosa.shape<2> + %1 = tosa.reshape %0, %2 : (tensor<5xi32>, !tosa.shape<2>) -> tensor<1x?xi32> return %1 : tensor<1x?xi32> } @@ -596,7 +604,8 @@ func.func @reshape_canonicalize_const_splat() -> (tensor<10xi32>, tensor<1x10xi3 // CHECK-DAG: %[[VAR1:.+]] = "tosa.const"() <{value = dense<0> : tensor<1x10xi32>} // CHECK: return %[[VAR0]], %[[VAR1]] %0 = "tosa.const"() {value = dense<0> : tensor<10xi32>} : () -> tensor<10xi32> - %1 = tosa.reshape %0 {new_shape = array} : (tensor<10xi32>) -> tensor<1x10xi32> + %2 = "tosa.const_shape"() {value = dense<[1, 10]> : tensor<2xindex>} : () -> !tosa.shape<2> + %1 = tosa.reshape %0, %2 : (tensor<10xi32>, !tosa.shape<2>) -> tensor<1x10xi32> return %0 , %1 : tensor<10xi32>, tensor<1x10xi32> } @@ -606,7 +615,8 @@ func.func @reshape_canonicalize_const_splat() -> (tensor<10xi32>, tensor<1x10xi3 func.func @reshape_canonicalize_const_sparse() -> (tensor<3xi32>, tensor<1x3xi32>) { // CHECK: tosa.reshape %0 = "tosa.const"() {value = dense<[1, 2, 3]> : tensor<3xi32>} : ()-> tensor<3xi32> - %1 = tosa.reshape %0 {new_shape = array} : (tensor<3xi32>) -> tensor<1x3xi32> + %2 = "tosa.const_shape"() {value = dense<[1, 3]> : tensor<2xindex>} : () -> !tosa.shape<2> + %1 = tosa.reshape %0, %2 : (tensor<3xi32>, !tosa.shape<2>) -> tensor<1x3xi32> return %0 , %1 : tensor<3xi32>, tensor<1x3xi32> } @@ -616,9 +626,10 @@ func.func @reshape_canonicalize_const_sparse() -> (tensor<3xi32>, tensor<1x3xi32 func.func @reshape_canonicalize_quant_nofold() -> (tensor<1x3x!quant.uniform>) { // disabled folding for quantized element types // CHECK{LITERAL}: "tosa.const"() <{value = dense<[1, 2, 3]> : tensor<3xi8>}> : () -> tensor<3x!quant.uniform> - // CHECK{LITERAL}: tosa.reshape %0 {new_shape = array} : (tensor<3x!quant.uniform>) -> tensor<1x3x!quant.uniform> + // CHECK{LITERAL}: tosa.reshape %0, %1 : (tensor<3x!quant.uniform>, !tosa.shape<2>) -> tensor<1x3x!quant.uniform> %0 = "tosa.const"() {value = dense<[1, 2, 3]> : tensor<3xi8>} : ()-> tensor<3x!quant.uniform> - %1 = tosa.reshape %0 {new_shape = array} : (tensor<3x!quant.uniform>) -> tensor<1x3x!quant.uniform> + %2 = "tosa.const_shape"() {value = dense<[1, 3]> : tensor<2xindex>} : () -> !tosa.shape<2> + %1 = tosa.reshape %0, %2 : (tensor<3x!quant.uniform>, !tosa.shape<2>) -> tensor<1x3x!quant.uniform> return %1 : tensor<1x3x!quant.uniform> } @@ -626,8 +637,9 @@ func.func @reshape_canonicalize_quant_nofold() -> (tensor<1x3x!quant.uniform (tensor<2x1x3x!quant.uniform>) { - // CHECK: "tosa.const"() <{value = dense<0> : tensor<1x2x3xi8>}> : () -> tensor<1x2x3x!quant.uniform> - // CHECK: tosa.reshape %0 {new_shape = array} : (tensor<1x2x3x!quant.uniform>) -> tensor<2x1x3x!quant.uniform> + // CHECK-DAG: tosa.const_shape {value = dense<[2, 1, 3]> : tensor<3xindex>} : () -> !tosa.shape<3> + // CHECK-DAG: "tosa.const"() <{value = dense<0> : tensor<1x2x3xi8>}> : () -> tensor<1x2x3x!quant.uniform> + // CHECK: tosa.reshape %0, %1 : (tensor<1x2x3x!quant.uniform>, !tosa.shape<3>) -> tensor<2x1x3x!quant.uniform> %perms = "tosa.const"() {value = dense<[1, 0, 2]> : tensor<3xi32>} : () -> tensor<3xi32> %0 = "tosa.const"() {value = dense<0> : tensor<1x2x3xi8>} : ()-> tensor<1x2x3x!quant.uniform> %1 = tosa.transpose %0, %perms : (tensor<1x2x3x!quant.uniform>, tensor<3xi32>) -> tensor<2x1x3x!quant.uniform> @@ -691,7 +703,8 @@ func.func @transpose_no_op(%arg0: tensor<3x4x5x6xf32>) -> tensor<3x4x5x6xf32> { // CHECK-LABEL: @transpose_is_reshape func.func @transpose_is_reshape(%arg0: tensor<1x4x5x1xf32>) -> tensor<1x4x1x5xf32> { - // CHECK: tosa.reshape %arg0 {new_shape = array} : (tensor<1x4x5x1xf32>) -> tensor<1x4x1x5xf32> + // CHECK: %[[CONST0:.+]] = tosa.const_shape {value = dense<[1, 4, 1, 5]> : tensor<4xindex>} : () -> !tosa.shape<4> + // CHECK: tosa.reshape %arg0, %[[CONST0]] %perms = "tosa.const"() <{value = dense<[3, 1, 0, 2]> : tensor<4xi32>}> : () -> tensor<4xi32> %0 = tosa.transpose %arg0, %perms : (tensor<1x4x5x1xf32>, tensor<4xi32>) -> tensor<1x4x1x5xf32> return %0 : tensor<1x4x1x5xf32> @@ -704,7 +717,8 @@ func.func @transpose_is_reshape(%arg0: tensor<1x4x5x1xf32>) -> tensor<1x4x1x5xf3 func.func @single_bit_reshape() -> tensor<1xi1> { // CHECK: "tosa.const"() <{value = dense : tensor<1xi1>} %0 = arith.constant dense : tensor<1x1xi1> - %1 = tosa.reshape %0 {new_shape = array} : (tensor<1x1xi1>) -> tensor<1xi1> + %2 = "tosa.const_shape"() <{value = dense<1> : tensor<1xindex>}> : () -> !tosa.shape<1> + %1 = tosa.reshape %0, %2 : (tensor<1x1xi1>, !tosa.shape<1>) -> tensor<1xi1> return %1 : tensor<1xi1> } @@ -870,8 +884,11 @@ func.func nested @fold_tile_rank_zero() -> tensor { // check that segfault is fixed func.func @reshape_quant_nofold() -> tensor<1x1x1x1xi32> { %0 = "tosa.const"() {value = dense<127> : tensor} : () -> tensor> - %1 = tosa.reshape %0 {new_shape = array} : (tensor>) -> tensor<1x1x1x1x!quant.uniform> - %2 = tosa.rescale %1 {double_round = true, input_zp = -128 : i32, multiplier = array, output_zp = 0 : i32, per_channel = false, scale32 = true, shift = array} : (tensor<1x1x1x1x!quant.uniform>) -> tensor<1x1x1x1xi32> + %cst0 = "tosa.const_shape"() {value = dense<[1, 1, 1, 1]> : tensor<4xindex>} : () -> !tosa.shape<4> + %1 = tosa.reshape %0, %cst0 : (tensor>, !tosa.shape<4>) -> tensor<1x1x1x1x!quant.uniform> + %multiplier = "tosa.const"() {value = dense<1073741824> : tensor<1xi32> } : () -> tensor<1xi32> + %shift = "tosa.const"() {value = dense<30> : tensor<1xi8> } : () -> tensor<1xi8> + %2 = tosa.rescale %1 {double_round = true, input_zp = -128 : i32, multiplier = array, output_zp = 0 : i32, per_channel = false, scale32 = true, shift = array, input_unsigned = false, output_unsigned = false} : (tensor<1x1x1x1x!quant.uniform>) -> tensor<1x1x1x1xi32> return %2 : tensor<1x1x1x1xi32> } diff --git a/mlir/test/Dialect/Tosa/constant-op-fold.mlir b/mlir/test/Dialect/Tosa/constant-op-fold.mlir index 32677f06e2252..40469987d89d0 100644 --- a/mlir/test/Dialect/Tosa/constant-op-fold.mlir +++ b/mlir/test/Dialect/Tosa/constant-op-fold.mlir @@ -500,7 +500,8 @@ func.func @fold_eq_i32(%arg0 : tensor<10xi32>) -> (tensor<10xi1>) { func.func @reshape_splat() -> tensor<6x5x4xi32> { // CHECK: %[[SPLAT:.+]] = "tosa.const"() <{value = dense<42> : tensor<6x5x4xi32>} %splat = "tosa.const"() {value = dense<42> : tensor<4x5x6xi32>} : () -> tensor<4x5x6xi32> - %reshape = tosa.reshape %splat { new_shape = array } : (tensor<4x5x6xi32>) -> tensor<6x5x4xi32> + %const = tosa.const_shape {value = dense<[6, 5, 4]> : tensor<3xindex>} : () -> !tosa.shape<3> + %reshape = tosa.reshape %splat, %const : (tensor<4x5x6xi32>, !tosa.shape<3>) -> tensor<6x5x4xi32> // CHECK: return %[[SPLAT]] return %reshape : tensor<6x5x4xi32> } diff --git a/mlir/test/Dialect/Tosa/inlining.mlir b/mlir/test/Dialect/Tosa/inlining.mlir index e892fdaa27750..2a3065e80d0ea 100644 --- a/mlir/test/Dialect/Tosa/inlining.mlir +++ b/mlir/test/Dialect/Tosa/inlining.mlir @@ -47,7 +47,8 @@ func.func @inlined_while_fn(%arg0: tensor, %arg1: tensor, %arg2: tenso } func.func private @while_body_50(%arg0: tensor, %arg1: tensor, %arg2: tensor, %arg3: tensor<10xi32>) -> (tensor, tensor, tensor, tensor<10xi32>) { %1 = "tosa.add"(%arg0, %arg1) : (tensor, tensor) -> tensor - %3 = "tosa.reshape"(%1) {new_shape = array} : (tensor) -> tensor<1xi32> + %4 = "tosa.const_shape"() {value = dense<1> : tensor<1xindex>} : () -> !tosa.shape<1> + %3 = "tosa.reshape"(%1, %4) : (tensor, !tosa.shape<1>) -> tensor<1xi32> %2 = "tosa.add"(%arg3, %3) : (tensor<10xi32>, tensor<1xi32>) -> tensor<10xi32> return %1, %arg1, %arg2, %2: tensor, tensor, tensor, tensor<10xi32> } diff --git a/mlir/test/Dialect/Tosa/invalid.mlir b/mlir/test/Dialect/Tosa/invalid.mlir index 006c5bd52a9f6..2165e1f7ae3ba 100644 --- a/mlir/test/Dialect/Tosa/invalid.mlir +++ b/mlir/test/Dialect/Tosa/invalid.mlir @@ -316,7 +316,8 @@ func.func @test_transpose_element_type_mismatch(%arg0: tensor<2x3xi32>) -> tenso func.func @test_fully_connected_non_const(%arg0: tensor<13x21x3xf32>, %arg1: tensor<2x3xf32>) -> tensor<273x2xf32> { %0 = "tosa.const"() {value = dense<0.000000e+00> : tensor<2xf32>} : () -> tensor<2xf32> - %1 = tosa.reshape %arg0 {new_shape = array} : (tensor<13x21x3xf32>) -> tensor<273x3xf32> + %3 = tosa.const_shape {value = dense<[273, 3]> : tensor<2xindex>} : () -> !tosa.shape<2> + %1 = tosa.reshape %arg0, %3 : (tensor<13x21x3xf32>, !tosa.shape<2>) -> tensor<273x3xf32> // expected-error@+1 {{'tosa.fully_connected' op weight of fully_connected is not constant}} %2 = tosa.fully_connected %1, %arg1, %0 : (tensor<273x3xf32>, tensor<2x3xf32>, tensor<2xf32>) -> tensor<273x2xf32> return %2 : tensor<273x2xf32> @@ -326,7 +327,8 @@ func.func @test_fully_connected_non_const(%arg0: tensor<13x21x3xf32>, %arg1: ten func.func @test_fully_connected_non_const(%arg0: tensor<13x21x3xf32>, %arg1: tensor<2xf32>) -> tensor<273x2xf32> { %0 = "tosa.const"() {value = dense<[[-0.613216758, -0.63714242, -0.73500061], [0.180762768, 0.773053169, -0.933686495]]> : tensor<2x3xf32>} : () -> tensor<2x3xf32> - %1 = tosa.reshape %arg0 {new_shape = array} : (tensor<13x21x3xf32>) -> tensor<273x3xf32> + %3 = tosa.const_shape {value = dense<[273, 3]> : tensor<2xindex>} : () -> !tosa.shape<2> + %1 = tosa.reshape %arg0, %3 : (tensor<13x21x3xf32>, !tosa.shape<2>) -> tensor<273x3xf32> // expected-error@+1 {{'tosa.fully_connected' op bias of fully_connected is not constant}} %2 = tosa.fully_connected %1, %0, %arg1 : (tensor<273x3xf32>, tensor<2x3xf32>, tensor<2xf32>) -> tensor<273x2xf32> return %2 : tensor<273x2xf32> @@ -426,81 +428,91 @@ func.func @test_reduce_min_invalid_output_rank(%arg0 : tensor) -> () { // ----- func.func @test_reshape_type_mismatch(%arg0 : tensor<13x21x3xf32>) -> () { + %1 = tosa.const_shape {value = dense<[13, 21, 3, 1]> : tensor<4xindex>} : () -> !tosa.shape<4> // expected-error@+2 {{failed to infer returned types}} // expected-error@+1 {{'tosa.reshape' op inferred type(s) 'tensor<13x21x3x1xf32>' are incompatible with return type(s) of operation 'tensor<13x21x3x1xi32>'}} - %0 = tosa.reshape %arg0 {new_shape = array} : (tensor<13x21x3xf32>) -> tensor<13x21x3x1xi32> + %0 = tosa.reshape %arg0, %1 : (tensor<13x21x3xf32>, !tosa.shape<4>) -> tensor<13x21x3x1xi32> return } // ----- func.func @test_reshape_static_zero_dim_input(%arg0 : tensor<13x0x3xf32>) -> () { + %s = tosa.const_shape {value = dense<[13, 21, 3]> : tensor<3xindex>} : () -> !tosa.shape<3> // expected-error@+1 {{'tosa.reshape' op operand #0 must be tosa-conformant tensor of number values, but got 'tensor<13x0x3xf32>'}} - %0 = "tosa.reshape"(%arg0) {new_shape = array} : (tensor<13x0x3xf32>) -> tensor<13x0x3xf32> + %0 = "tosa.reshape"(%arg0, %s) : (tensor<13x0x3xf32>, !tosa.shape<3>) -> tensor<13x0x3xf32> return } // ----- func.func @test_reshape_zero_dim_input(%arg0 : tensor) -> () { + %s = tosa.const_shape {value = dense<[13, 21, 3]> : tensor<3xindex>} : () -> !tosa.shape<3> // expected-error@+1 {{'tosa.reshape' op operand #0 must be tosa-conformant tensor of number values, but got 'tensor'}} - %0 = "tosa.reshape"(%arg0) {new_shape = array} : (tensor) -> tensor<13x0x3xf32> + %0 = "tosa.reshape"(%arg0, %s) : (tensor, !tosa.shape<3>) -> tensor<13x0x3xf32> return } // ----- func.func @test_reshape_rank_mismatch(%arg0 : tensor) -> () { + %s = tosa.const_shape {value = dense<[2, 4]> : tensor<2xindex>} : () -> !tosa.shape<2> // expected-error@+1 {{'tosa.reshape' op new shape does not match result rank}} - %0 = "tosa.reshape"(%arg0) {new_shape = array} : (tensor) -> tensor + %0 = "tosa.reshape"(%arg0, %s) : (tensor, !tosa.shape<2>) -> tensor return } // ----- func.func @test_reshape_inconsistent_result_type(%arg0 : tensor) -> () { + %s = tosa.const_shape {value = dense<[2, 4, -1]> : tensor<3xindex>} : () -> !tosa.shape<3> // expected-error@+1 {{'tosa.reshape' op new shape is inconsistent with result shape}} - %0 = "tosa.reshape"(%arg0) {new_shape = array} : (tensor) -> tensor + %0 = "tosa.reshape"(%arg0, %s) : (tensor, !tosa.shape<3>) -> tensor return } // ----- func.func @test_reshape_invalid_size(%arg0 : tensor<2x4xf32>) -> () { + %s = tosa.const_shape {value = dense<[3, 5]> : tensor<2xindex>} : () -> !tosa.shape<2> // expected-error@+1 {{'tosa.reshape' op cannot reshape 8 elements into 15}} - %0 = "tosa.reshape"(%arg0) {new_shape = array} : (tensor<2x4xf32>) -> tensor<3x5xf32> + %0 = "tosa.reshape"(%arg0, %s) : (tensor<2x4xf32>, !tosa.shape<2>) -> tensor<3x5xf32> return } // ----- func.func @test_reshape_invalid_newshape(%arg0 : tensor<1xf32>) -> () { + %s = tosa.const_shape {value = dense<[-1, 4]> : tensor<2xindex>} : () -> !tosa.shape<2> // expected-error@+1 {{'tosa.reshape' op cannot reshape 1 elements into 4}} - %0 = "tosa.reshape"(%arg0) {new_shape = array} : (tensor<1xf32>) -> tensor + %0 = "tosa.reshape"(%arg0, %s) : (tensor<1xf32>, !tosa.shape<2>) -> tensor return } // ----- func.func @test_reshape_invalid_newshape(%arg0 : tensor<8xf32>) -> () { + %s = tosa.const_shape {value = dense<[1, 4]> : tensor<2xindex>} : () -> !tosa.shape<2> // expected-error@+1 {{'tosa.reshape' op cannot reshape 8 elements into 4}} - %0 = "tosa.reshape"(%arg0) {new_shape = array} : (tensor<8xf32>) -> tensor + %0 = "tosa.reshape"(%arg0, %s) : (tensor<8xf32>, !tosa.shape<2>) -> tensor return } // ----- func.func @test_reshape_invalid_placeholders(%arg0 : tensor) -> () { + %s = tosa.const_shape {value = dense<[2, -1, -1]> : tensor<3xindex>} : () -> !tosa.shape<3> // expected-error@+1 {{'tosa.reshape' op expected at most one target dimension to be -1}} - %0 = "tosa.reshape"(%arg0) {new_shape = array} : (tensor) -> tensor<2x?x?xf32> + %0 = "tosa.reshape"(%arg0, %s) : (tensor, !tosa.shape<3>) -> tensor<2x?x?xf32> return } // ----- func.func @test_reshape_invalid_tensor_dim(%arg0 : tensor<4x?xf32>) -> () { + %s = tosa.const_shape {value = dense<[-2, -1]> : tensor<2xindex>} : () -> !tosa.shape<2> // expected-error@+1 {{'tosa.reshape' op new shape has invalid tensor dimension size -2}} - %0 = "tosa.reshape" (%arg0) {new_shape = array} : (tensor<4x?xf32>) -> tensor + %0 = "tosa.reshape" (%arg0, %s) : (tensor<4x?xf32>, !tosa.shape<2>) -> tensor return } @@ -514,6 +526,15 @@ func.func @test_reverse_axis_out_of_range(%arg0 : tensor<13x21x3xf32>) -> () { // ----- +func.func @test_reshape_zero_dim_input(%arg0 : tensor) -> () { + %1 = tosa.const_shape {value = dense<[13, 21, 3]> : tensor<3xindex>} : () -> !tosa.shape<3> + // expected-error@+1 {{'tosa.reshape' op operand #0 must be tosa-conformant tensor of number values, but got 'tensor'}} + %0 = "tosa.reshape"(%arg0, %1) : (tensor, !tosa.shape<3>) -> tensor<13x0x3xf32> + return +} + +// ----- + func.func @test_const_attribute_type_mismatch() -> tensor<100x100xf32> { // expected-error@+1 {{'tosa.const' op failed to verify that all of {value, output} have same shape}} %0 = "tosa.const"() {value = dense<0.000000e+00> : tensor<1x1xf32>} : () -> tensor<100x100xf32> diff --git a/mlir/test/Dialect/Tosa/level_check.mlir b/mlir/test/Dialect/Tosa/level_check.mlir index 26bebdd898a0d..a7f76f2d0fa64 100644 --- a/mlir/test/Dialect/Tosa/level_check.mlir +++ b/mlir/test/Dialect/Tosa/level_check.mlir @@ -70,8 +70,9 @@ func.func @test_concat(%arg0: tensor<1x1x1x13x21x3x8xf32>, %arg1: tensor<1x1x1x1 // ----- func.func @test_reshape(%arg0: tensor<13x21x3xf32>) -> tensor<1x1x1x1x1x1x819xf32> { + %1 = tosa.const_shape {value = dense<[1, 1, 1, 1, 1, 1, 819]> : tensor<7xindex>} : () -> !tosa.shape<7> // expected-error@+1 {{'tosa.reshape' op failed level check: result rank(shape) <= MAX_RANK}} - %0 = "tosa.reshape"(%arg0) {new_shape = array} : (tensor<13x21x3xf32>) -> tensor<1x1x1x1x1x1x819xf32> + %0 = "tosa.reshape"(%arg0, %1) : (tensor<13x21x3xf32>, !tosa.shape<7>) -> tensor<1x1x1x1x1x1x819xf32> return %0 : tensor<1x1x1x1x1x1x819xf32> } diff --git a/mlir/test/Dialect/Tosa/ops.mlir b/mlir/test/Dialect/Tosa/ops.mlir index d00230d12aab1..baf09e089aa30 100644 --- a/mlir/test/Dialect/Tosa/ops.mlir +++ b/mlir/test/Dialect/Tosa/ops.mlir @@ -504,7 +504,8 @@ func.func @test_greater_equal(%arg0: tensor<13x1x3xf32>, %arg1: tensor<13x21x3xf // CHECK-LABEL: reduce_all func.func @test_reduce_all(%arg0: tensor<13x21x3xi1>) -> tensor<21x3xi1> { %0 = tosa.reduce_all %arg0 {axis = 0 : i32} : (tensor<13x21x3xi1>) -> tensor<1x21x3xi1> - %1 = tosa.reshape %0 {new_shape = array} : (tensor<1x21x3xi1>) -> tensor<21x3xi1> + %2 = tosa.const_shape {value = dense<[21, 3]> : tensor<2xindex>} : () -> !tosa.shape<2> + %1 = tosa.reshape %0, %2 : (tensor<1x21x3xi1>, !tosa.shape<2>) -> tensor<21x3xi1> return %1 : tensor<21x3xi1> } @@ -512,7 +513,8 @@ func.func @test_reduce_all(%arg0: tensor<13x21x3xi1>) -> tensor<21x3xi1> { // CHECK-LABEL: reduce_any func.func @test_reduce_any(%arg0: tensor<13x21x3xi1>) -> tensor<21x3xi1> { %0 = tosa.reduce_any %arg0 {axis = 0 : i32} : (tensor<13x21x3xi1>) -> tensor<1x21x3xi1> - %1 = tosa.reshape %0 {new_shape = array} : (tensor<1x21x3xi1>) -> tensor<21x3xi1> + %2 = tosa.const_shape {value = dense<[21, 3]> : tensor<2xindex>} : () -> !tosa.shape<2> + %1 = tosa.reshape %0, %2 : (tensor<1x21x3xi1>, !tosa.shape<2>) -> tensor<21x3xi1> return %1 : tensor<21x3xi1> } @@ -520,7 +522,8 @@ func.func @test_reduce_any(%arg0: tensor<13x21x3xi1>) -> tensor<21x3xi1> { // CHECK-LABEL: reduce_max func.func @test_reduce_max(%arg0: tensor<13x21x3xf32>) -> tensor<21x3xf32> { %0 = tosa.reduce_max %arg0 {axis = 0 : i32} : (tensor<13x21x3xf32>) -> tensor<1x21x3xf32> - %1 = tosa.reshape %0 {new_shape = array} : (tensor<1x21x3xf32>) -> tensor<21x3xf32> + %2 = tosa.const_shape {value = dense<[21, 3]> : tensor<2xindex>} : () -> !tosa.shape<2> + %1 = tosa.reshape %0, %2 : (tensor<1x21x3xf32>, !tosa.shape<2>) -> tensor<21x3xf32> return %1 : tensor<21x3xf32> } @@ -528,7 +531,8 @@ func.func @test_reduce_max(%arg0: tensor<13x21x3xf32>) -> tensor<21x3xf32> { // CHECK-LABEL: reduce_min func.func @test_reduce_min(%arg0: tensor<13x21x3xf32>) -> tensor<21x3xf32> { %0 = tosa.reduce_min %arg0 {axis = 0 : i32} : (tensor<13x21x3xf32>) -> tensor<1x21x3xf32> - %1 = tosa.reshape %0 {new_shape = array} : (tensor<1x21x3xf32>) -> tensor<21x3xf32> + %2 = tosa.const_shape {value = dense<[21, 3]> : tensor<2xindex>} : () -> !tosa.shape<2> + %1 = tosa.reshape %0, %2 : (tensor<1x21x3xf32>, !tosa.shape<2>) -> tensor<21x3xf32> return %1 : tensor<21x3xf32> } @@ -536,7 +540,8 @@ func.func @test_reduce_min(%arg0: tensor<13x21x3xf32>) -> tensor<21x3xf32> { // CHECK-LABEL: reduce_product func.func @test_reduce_product(%arg0: tensor<13x21x3xf32>) -> tensor<21x3xf32> { %0 = tosa.reduce_prod %arg0 {axis = 0 : i32} : (tensor<13x21x3xf32>) -> tensor<1x21x3xf32> - %1 = tosa.reshape %0 {new_shape = array} : (tensor<1x21x3xf32>) -> tensor<21x3xf32> + %2 = tosa.const_shape {value = dense<[21, 3]> : tensor<2xindex>} : () -> !tosa.shape<2> + %1 = tosa.reshape %0, %2 : (tensor<1x21x3xf32>, !tosa.shape<2>) -> tensor<21x3xf32> return %1 : tensor<21x3xf32> } @@ -544,7 +549,8 @@ func.func @test_reduce_product(%arg0: tensor<13x21x3xf32>) -> tensor<21x3xf32> { // CHECK-LABEL: reduce_sum func.func @test_reduce_sum(%arg0: tensor<13x21x3xf32>) -> tensor<21x3xf32> { %0 = tosa.reduce_sum %arg0 {axis = 0 : i32} : (tensor<13x21x3xf32>) -> tensor<1x21x3xf32> - %1 = tosa.reshape %0 {new_shape = array} : (tensor<1x21x3xf32>) -> tensor<21x3xf32> + %2 = tosa.const_shape {value = dense<[21, 3]> : tensor<2xindex>} : () -> !tosa.shape<2> + %1 = tosa.reshape %0, %2 : (tensor<1x21x3xf32>, !tosa.shape<2>) -> tensor<21x3xf32> return %1 : tensor<21x3xf32> } @@ -575,7 +581,8 @@ func.func @test_pad_explicit_value(%arg0: tensor<13x21x3xf32>) -> tensor<13x21x3 // ----- // CHECK-LABEL: reshape func.func @test_reshape(%arg0: tensor<13x21x3xf32>) -> tensor<1x819xf32> { - %0 = tosa.reshape %arg0 {new_shape = array} : (tensor<13x21x3xf32>) -> tensor<1x819xf32> + %1 = tosa.const_shape {value = dense<[1, 819]> : tensor<2xindex>} : () -> !tosa.shape<2> + %0 = tosa.reshape %arg0, %1 : (tensor<13x21x3xf32>, !tosa.shape<2>) -> tensor<1x819xf32> return %0 : tensor<1x819xf32> } @@ -724,7 +731,8 @@ func.func @test_while_loop(%arg0: tensor<10xi32>, %arg1: tensor) { ^bb0(%arg2: tensor, %arg3: tensor, %arg4: tensor<10xi32>): %2 = "tosa.const"() {value = dense<1> : tensor} : () -> tensor %3 = tosa.add %arg3, %2 : (tensor, tensor) -> tensor - %4 = tosa.reshape %2 {new_shape = array} : (tensor) -> tensor<1xi32> + %7 = tosa.const_shape {value = dense<[1]> : tensor<1xindex>} : () -> !tosa.shape<1> + %4 = tosa.reshape %2, %7 : (tensor, !tosa.shape<1>) -> tensor<1xi32> %5 = tosa.add %arg4, %4 : (tensor<10xi32>, tensor<1xi32>) -> tensor<10xi32> %6 = tosa.add %arg2, %2 : (tensor, tensor) -> tensor tosa.yield %6, %3, %5 : tensor, tensor, tensor<10xi32> diff --git a/mlir/test/Dialect/Tosa/tosa-decompose-conv2d.mlir b/mlir/test/Dialect/Tosa/tosa-decompose-conv2d.mlir index e4a2897908072..9aade2fe45eb6 100644 --- a/mlir/test/Dialect/Tosa/tosa-decompose-conv2d.mlir +++ b/mlir/test/Dialect/Tosa/tosa-decompose-conv2d.mlir @@ -5,13 +5,16 @@ // CHECK-LABEL: @conv2d_as_fully_connected func.func @conv2d_as_fully_connected(%arg0: tensor<4x10x10x2xf32>, %arg1: tensor<3x1x1x2xf32>, %arg2: tensor<3xf32>) -> tensor<4x10x10x3xf32> { // CHECK-NOT: tosa.conv2d - // CHECK: %[[VAR0:.*]] = tosa.reshape %arg0 {new_shape = array} + // CHECK-DAG: %[[CONST0:.*]] = tosa.const_shape {value = dense<[400, 2]> : tensor<2xindex>} : () -> !tosa.shape<2> + // CHECK-DAG: %[[CONST1:.*]] = tosa.const_shape {value = dense<[3, 2]> : tensor<2xindex>} : () -> !tosa.shape<2> + // CHECK-DAG: %[[CONST2:.*]] = tosa.const_shape {value = dense<[4, 10, 10, 3]> : tensor<4xindex>} : () -> !tosa.shape<4> + // CHECK: %[[VAR0:.*]] = tosa.reshape %arg0, %[[CONST0]] // CHECK-SAME: -> tensor<400x2xf32> - // CHECK: %[[VAR1:.*]] = tosa.reshape %arg1 {new_shape = array} + // CHECK: %[[VAR1:.*]] = tosa.reshape %arg1, %[[CONST1]] // CHECK-SAME: -> tensor<3x2xf32> // CHECK: %[[VAR2:.*]] = tosa.fully_connected %[[VAR0]], %[[VAR1]], %arg2 // CHECK-SAME: -> tensor<400x3xf32> - // CHECK: %[[VAR3:.*]] = tosa.reshape %[[VAR2]] {new_shape = array} + // CHECK: %[[VAR3:.*]] = tosa.reshape %[[VAR2]], %[[CONST2]] // CHECK-SAME: -> tensor<4x10x10x3xf32> // CHECK: return %[[VAR3]] %0 = tosa.conv2d %arg0, %arg1, %arg2 {acc_type = f32, pad = array, stride = array, dilation = array} : (tensor<4x10x10x2xf32>, tensor<3x1x1x2xf32>, tensor<3xf32>) -> tensor<4x10x10x3xf32> @@ -23,14 +26,17 @@ func.func @conv2d_as_fully_connected(%arg0: tensor<4x10x10x2xf32>, %arg1: tensor // CHECK-LABEL: @conv2d_as_fully_connected_quant func.func @conv2d_as_fully_connected_quant(%arg0: tensor<4x10x10x2xi8>, %arg1: tensor<3x1x1x2xi8>, %arg2: tensor<3xi32>) -> tensor<4x10x10x3xi32> { // CHECK-NOT: tosa.conv2d - // CHECK: %[[VAR0:.*]] = tosa.reshape %arg0 {new_shape = array} + // CHECK-DAG: %[[CONST0:.*]] = tosa.const_shape {value = dense<[400, 2]> : tensor<2xindex>} : () -> !tosa.shape<2> + // CHECK-DAG: %[[CONST1:.*]] = tosa.const_shape {value = dense<[3, 2]> : tensor<2xindex>} : () -> !tosa.shape<2> + // CHECK-DAG: %[[CONST2:.*]] = tosa.const_shape {value = dense<[4, 10, 10, 3]> : tensor<4xindex>} : () -> !tosa.shape<4> + // CHECK: %[[VAR0:.*]] = tosa.reshape %arg0, %[[CONST0]] // CHECK-SAME: -> tensor<400x2xi8> - // CHECK: %[[VAR1:.*]] = tosa.reshape %arg1 {new_shape = array} + // CHECK: %[[VAR1:.*]] = tosa.reshape %arg1, %[[CONST1]] // CHECK-SAME: -> tensor<3x2xi8> // CHECK: %[[VAR2:.*]] = tosa.fully_connected %[[VAR0]], %[[VAR1]], %arg2 // CHECK-SAME: {input_zp = 42 : i32, weight_zp = 24 : i32} // CHECK-SAME: -> tensor<400x3xi32> - // CHECK: %[[VAR3:.*]] = tosa.reshape %[[VAR2]] {new_shape = array} + // CHECK: %[[VAR3:.*]] = tosa.reshape %[[VAR2]], %[[CONST2]] // CHECK-SAME: -> tensor<4x10x10x3xi32> // CHECK: return %[[VAR3]] %input_zp = "tosa.const"() {value = dense<42> : tensor<1xi8>} : () -> tensor<1xi8> @@ -42,14 +48,14 @@ func.func @conv2d_as_fully_connected_quant(%arg0: tensor<4x10x10x2xi8>, %arg1: t // ----- // CHECK-LABEL: func.func @conv_with_dynamic_dim( -// CHECK-SAME: %[[VAL_0:.*]]: tensor, -// CHECK-SAME: %[[VAL_1:.*]]: tensor<384x1x1x64xi8>, -// CHECK-SAME: %[[VAL_2:.*]]: tensor<384xi32>) -> tensor { func.func @conv_with_dynamic_dim(%arg0: tensor, %arg1: tensor<384x1x1x64xi8>, %arg2: tensor<384xi32>) -> tensor { -// CHECK: %[[VAL_3:.*]] = tosa.reshape %[[VAL_0]] {new_shape = array} : (tensor) -> tensor -// CHECK: %[[VAL_4:.*]] = tosa.reshape %[[VAL_1]] {new_shape = array} : (tensor<384x1x1x64xi8>) -> tensor<384x64xi8> -// CHECK: %[[VAL_5:.*]] = tosa.fully_connected %[[VAL_3]], %[[VAL_4]], %[[VAL_2]] {input_zp = -6 : i32, weight_zp = 11 : i32} : (tensor, tensor<384x64xi8>, tensor<384xi32>) -> tensor -// CHECK: %[[VAL_6:.*]] = tosa.reshape %[[VAL_5]] {new_shape = array} : (tensor) -> tensor +// CHECK-DAG: %[[CONST0:.*]] = tosa.const_shape {value = dense<[-1, 64]> : tensor<2xindex>} : () -> !tosa.shape<2> +// CHECK-DAG: %[[CONST1:.*]] = tosa.const_shape {value = dense<[384, 64]> : tensor<2xindex>} : () -> !tosa.shape<2> +// CHECK-DAG: %[[CONST2:.*]] = tosa.const_shape {value = dense<[-1, 14, 14, 384]> : tensor<4xindex>} : () -> !tosa.shape<4> +// CHECK: %[[VAL_3:.*]] = tosa.reshape %arg0, %[[CONST0]] +// CHECK: %[[VAL_4:.*]] = tosa.reshape %arg1, %[[CONST1]] : (tensor<384x1x1x64xi8>, !tosa.shape<2>) -> tensor<384x64xi8> +// CHECK: %[[VAL_5:.*]] = tosa.fully_connected %[[VAL_3]], %[[VAL_4]], %arg2 {input_zp = -6 : i32, weight_zp = 11 : i32} : (tensor, tensor<384x64xi8>, tensor<384xi32>) -> tensor +// CHECK: %[[VAL_6:.*]] = tosa.reshape %[[VAL_5]], %[[CONST2]] : (tensor, !tosa.shape<4>) -> tensor // CHECK: return %[[VAL_6]] : tensor // CHECK: } %input_zp = "tosa.const"() {value = dense<-6> : tensor<1xi8>} : () -> tensor<1xi8> @@ -62,15 +68,19 @@ func.func @conv_with_dynamic_dim(%arg0: tensor, %arg1: tensor<384 // CHECK-LABEL: @conv2d_as_fully_connected_padded func.func @conv2d_as_fully_connected_padded(%arg0: tensor<4x10x10x2xi8>, %arg1: tensor<3x1x1x2xi8>, %arg2: tensor<3xi32>) -> tensor<4x12x12x3xi32> { + // CHECK-DAG: %[[FULLY_NEW_SHAPE:.+]] = tosa.const_shape {value = dense<[4, 12, 12, 3]> : tensor<4xindex>} + // CHECK-DAG: %[[INPUT_NEW_SHAPE:.+]] = tosa.const_shape {value = dense<[576, 2]> : tensor<2xindex>} + // CHECK-DAG: %[[FILTER_NEW_SHAPE:.+]] = tosa.const_shape {value = dense<[3, 2]> : tensor<2xindex>} // CHECK-DAG: %[[PAD_SHAPE:.+]] = tosa.const_shape {value = dense<[0, 0, 1, 1, 1, 1, 0, 0]> : tensor<8xindex>} : () -> !tosa.shape<8> // CHECK-DAG: %[[PAD_VAL:.+]] = "tosa.const"() <{value = dense<42> : tensor} // CHECK-DAG: %[[PAD:.+]] = tosa.pad %arg0, %[[PAD_SHAPE]], %[[PAD_VAL]] : (tensor<4x10x10x2xi8>, !tosa.shape<8>, tensor) -> tensor<4x12x12x2xi8> - // CHECK-DAG: %[[RESHAPE_INPUT:.+]] = tosa.reshape %[[PAD]] {new_shape = array} - // CHECK-DAG: %[[RESHAPE_FILTER:.+]] = tosa.reshape %arg1 {new_shape = array} + // CHECK-DAG: %[[RESHAPE_INPUT:.+]] = tosa.reshape %[[PAD]], %[[INPUT_NEW_SHAPE]] + // CHECK-DAG: %[[RESHAPE_FILTER:.+]] = tosa.reshape %arg1, %[[FILTER_NEW_SHAPE]] // CHECK-DAG: %[[FULLY:.+]] = tosa.fully_connected %[[RESHAPE_INPUT]], %[[RESHAPE_FILTER]], %arg2 {input_zp = 42 : i32, weight_zp = 24 : i32} - // CHECK: %[[RESHAPE:.+]] = tosa.reshape %[[FULLY]] {new_shape = array} + // CHECK: %[[RESHAPE:.+]] = tosa.reshape %[[FULLY]], %[[FULLY_NEW_SHAPE]] %input_zp = "tosa.const"() {value = dense<42> : tensor<1xi8>} : () -> tensor<1xi8> %weight_zp = "tosa.const"() {value = dense<24> : tensor<1xi8>} : () -> tensor<1xi8> %0 = tosa.conv2d %arg0, %arg1, %arg2, %input_zp, %weight_zp {acc_type = i32, pad = array, stride = array, dilation = array} : (tensor<4x10x10x2xi8>, tensor<3x1x1x2xi8>, tensor<3xi32>, tensor<1xi8>, tensor<1xi8>) -> tensor<4x12x12x3xi32> return %0 : tensor<4x12x12x3xi32> } + diff --git a/mlir/test/Dialect/Tosa/tosa-decompose-depthwise.mlir b/mlir/test/Dialect/Tosa/tosa-decompose-depthwise.mlir index ce29d1a498b4f..6562a7c2ab55c 100644 --- a/mlir/test/Dialect/Tosa/tosa-decompose-depthwise.mlir +++ b/mlir/test/Dialect/Tosa/tosa-decompose-depthwise.mlir @@ -5,15 +5,19 @@ // CHECK-LABEL: @depthwise_conv2d_as_mul func.func @depthwise_conv2d_as_mul(%arg0: tensor<4x10x10x2xf32>, %arg1: tensor<1x1x2x3xf32>, %arg2: tensor<6xf32>) -> tensor<4x10x10x6xf32> { // CHECK-NOT: tosa.depthwise_conv2d - // CHECK: %[[VAR0:.*]] = tosa.reshape %arg0 {new_shape = array} + // CHECK-DAG: %[[CONST0:.+]] = tosa.const_shape {value = dense<[4, 10, 10, 2, 1]> : tensor<5xindex> + // CHECK-DAG: %[[CONST1:.+]] = tosa.const_shape {value = dense<[1, 1, 1, 2, 3]> : tensor<5xindex> + // CHECK-DAG: %[[CONST2:.+]] = tosa.const_shape {value = dense<[4, 10, 10, 6]> : tensor<4xindex> + // CHECK-DAG: %[[CONST3:.+]] = tosa.const_shape {value = dense<[1, 1, 1, 6]> : tensor<4xindex> + // CHECK-DAG: %[[VAR0:.*]] = tosa.reshape %arg0, %[[CONST0]] // CHECK-SAME: -> tensor<4x10x10x2x1xf32> - // CHECK: %[[VAR1:.*]] = tosa.reshape %arg1 {new_shape = array} + // CHECK: %[[VAR1:.*]] = tosa.reshape %arg1, %[[CONST1]] // CHECK-SAME: -> tensor<1x1x1x2x3xf32> // CHECK: %[[VAR2:.*]] = tosa.mul %[[VAR0]], %[[VAR1]] // CHECK-SAME: -> tensor<4x10x10x2x3xf32> - // CHECK: %[[VAR3:.*]] = tosa.reshape %[[VAR2]] {new_shape = array} + // CHECK: %[[VAR3:.*]] = tosa.reshape %[[VAR2]], %[[CONST2]] // CHECK-SAME: -> tensor<4x10x10x6xf32> - // CHECK: %[[VAR4:.*]] = tosa.reshape %arg2 {new_shape = array} + // CHECK: %[[VAR4:.*]] = tosa.reshape %arg2, %[[CONST3]] // CHECK-SAME: -> tensor<1x1x1x6xf32> // CHECK: %[[VAR5:.*]] = tosa.add %[[VAR3]], %[[VAR4]] // CHECK-SAME: -> tensor<4x10x10x6xf32> @@ -26,17 +30,22 @@ func.func @depthwise_conv2d_as_mul(%arg0: tensor<4x10x10x2xf32>, %arg1: tensor<1 // CHECK-LABEL: @depthwise_conv2d_as_mul_q func.func @depthwise_conv2d_as_mul_q(%arg0: tensor<4x10x10x2xi8>, %arg1: tensor<1x1x2x3xi8>, %arg2: tensor<6xi32>) -> tensor<4x10x10x6xi32> { + // CHECK-DAG: %[[CONST0:.+]] = tosa.const_shape {value = dense<[4, 10, 10, 2, 1]> : tensor<5xindex> // CHECK-DAG: %[[iZp:.+]] = "tosa.const"() <{value = dense<7> : tensor<1x1x1x1x1xi32>} // CHECK-DAG: %[[wZp:.+]] = "tosa.const"() <{value = dense<11> : tensor<1x1x1x1xi32>} - // CHECK: %[[rIn:.+]] = tosa.reshape %arg0 {new_shape = array} + // CHECK-DAG: %[[CONST3:.+]] = tosa.const_shape {value = dense<[1, 1, 1, 2, 3]> : tensor<5xindex> + // CHECK-DAG: %[[CONST4:.+]] = tosa.const_shape {value = dense<[4, 10, 10, 6]> : tensor<4xindex> + // CHECK-DAG: %[[CONST5:.+]] = tosa.const_shape {value = dense<[1, 1, 1, 6]> : tensor<4xindex> + // CHECK-DAG: %[[SHIFT:.*]] = "tosa.const"() <{value = dense<0> : tensor<1xi8>}> : () -> tensor<1xi8> + // CHECK: %[[rIn:.+]] = tosa.reshape %arg0, %[[CONST0]] // CHECK: %[[cIn:.+]] = tosa.cast %[[rIn]] : (tensor<4x10x10x2x1xi8>) -> tensor<4x10x10x2x1xi32> // CHECK: %[[cWe:.+]] = tosa.cast %arg1 : (tensor<1x1x2x3xi8>) -> tensor<1x1x2x3xi32> // CHECK: %[[sIn:.+]] = tosa.sub %[[cIn]], %[[iZp]] // CHECK: %[[sWe:.+]] = tosa.sub %[[cWe]], %[[wZp]] - // CHECK: %[[resWe:.+]] = tosa.reshape %[[sWe]] {new_shape = array} - // CHECK: %[[mul:.+]] = tosa.mul %[[sIn]], %[[resWe]] - // CHECK: %[[reO:.+]] = tosa.reshape %[[mul]] {new_shape = array} - // CHECK: %[[reArg2:.+]] = tosa.reshape %arg2 {new_shape = array} + // CHECK: %[[resWe:.+]] = tosa.reshape %[[sWe]], %[[CONST3]] + // CHECK: %[[mul:.+]] = tosa.mul %[[sIn]], %[[resWe]], %[[SHIFT]] + // CHECK: %[[reO:.+]] = tosa.reshape %[[mul]], %[[CONST4]] + // CHECK: %[[reArg2:.+]] = tosa.reshape %arg2, %[[CONST5]] // CHECK: %[[add:.+]] = tosa.add %[[reO]], %[[reArg2]] %input_zp = "tosa.const"() {value = dense<7> : tensor<1xi8>} : () -> tensor<1xi8> %weight_zp = "tosa.const"() {value = dense<11> : tensor<1xi8>} : () -> tensor<1xi8> @@ -48,14 +57,19 @@ func.func @depthwise_conv2d_as_mul_q(%arg0: tensor<4x10x10x2xi8>, %arg1: tensor< // CHECK-LABEL: @depthwise_conv2d_as_mul_padded func.func @depthwise_conv2d_as_mul_padded(%arg0: tensor<4x10x10x2xf32>, %arg1: tensor<1x1x2x3xf32>, %arg2: tensor<6xf32>) -> tensor<4x12x12x6xf32> { - // CHECK-DAG: %[[pad:.+]] = tosa.const_shape {value = dense<[0, 0, 1, 1, 1, 1, 0, 0, 0, 0]> : tensor<10xindex>} : () -> !tosa.shape<10> + // CHECK-DAG: %[[CONST0:.+]] = tosa.const_shape {value = dense<[4, 10, 10, 2, 1]> : tensor<5xindex>} + // CHECK-DAG: %[[pad:.+]] = tosa.const_shape {value = dense<[0, 0, 1, 1, 1, 1, 0, 0, 0, 0]> : tensor<10xindex>} : () -> !tosa.shape<10> // CHECK-DAG: %[[zero:.+]] = "tosa.const"() <{value = dense<0.000000e+00> : tensor} - // CHECK: %[[reIn:.+]] = tosa.reshape %arg0 {new_shape = array} + // CHECK-DAG: %[[CONST3:.+]] = tosa.const_shape {value = dense<[1, 1, 1, 2, 3]> : tensor<5xindex>} + // CHECK-DAG: %[[CONST4:.+]] = tosa.const_shape {value = dense<[4, 12, 12, 6]> : tensor<4xindex>} + // CHECK-DAG: %[[CONST5:.+]] = tosa.const_shape {value = dense<[1, 1, 1, 6]> : tensor<4xindex>} + // CHECK-DAG: %[[SHIFT:.*]] = "tosa.const"() <{value = dense<0> : tensor<1xi8>}> : () -> tensor<1xi8> + // CHECK: %[[reIn:.+]] = tosa.reshape %arg0, %[[CONST0]] // CHECK: %[[padded:.+]] = tosa.pad %[[reIn]], %[[pad]], %[[zero]] : (tensor<4x10x10x2x1xf32>, !tosa.shape<10>, tensor) -> tensor<4x12x12x2x1xf32> - // CHECK: %[[reArg1:.+]] = tosa.reshape %arg1 {new_shape = array} - // CHECK: %[[mul:.+]] = tosa.mul %[[padded]], %[[reArg1]] - // CHECK: %[[reOut:.+]] = tosa.reshape %[[mul]] {new_shape = array} - // CHECK: %[[reArg2:.+]] = tosa.reshape %arg2 {new_shape = array} + // CHECK: %[[reArg1:.+]] = tosa.reshape %arg1, %[[CONST3]] + // CHECK: %[[mul:.+]] = tosa.mul %[[padded]], %[[reArg1]], %[[SHIFT]] + // CHECK: %[[reOut:.+]] = tosa.reshape %[[mul]], %[[CONST4]] + // CHECK: %[[reArg2:.+]] = tosa.reshape %arg2, %[[CONST5]] // CHECK: %[[add:.+]] = tosa.add %[[reOut]], %[[reArg2]] %0 = tosa.depthwise_conv2d %arg0, %arg1, %arg2 {acc_type = f32, pad = array, stride = array, dilation = array} : (tensor<4x10x10x2xf32>, tensor<1x1x2x3xf32>, tensor<6xf32>) -> tensor<4x12x12x6xf32> return %0 : tensor<4x12x12x6xf32> diff --git a/mlir/test/Dialect/Tosa/tosa-decompose-transpose-conv.mlir b/mlir/test/Dialect/Tosa/tosa-decompose-transpose-conv.mlir index 82838cc7e1545..bd18b7ea0fdff 100644 --- a/mlir/test/Dialect/Tosa/tosa-decompose-transpose-conv.mlir +++ b/mlir/test/Dialect/Tosa/tosa-decompose-transpose-conv.mlir @@ -56,11 +56,15 @@ func.func @transpose_conv2d_strided(%arg0: tensor<2x17x15x3xf32>, %arg1: tensor< // CHECK-DAG: %[[PADV:.+]] = tosa.const_shape {value = dense<[0, 0, 0, 1, 0, 1, 0, 0]> : tensor<8xindex>} : () -> !tosa.shape<8> // CHECK-DAG: %[[TRANSV:.+]] = "tosa.const"() <{value = dense<[2, 4, 0, 1, 3, 5]> : tensor<6xi32>} // CHECK-DAG: %[[PADW:.+]] = tosa.pad %arg1, %[[PADV]] - // CHECK-DAG: %[[RESW1:.+]] = tosa.reshape %[[PADW]] {new_shape = array} + // CHECK-DAG: %[[CONST1:.+]] = tosa.const_shape {value = dense<[5, 2, 2, 2, 3, 3]> : tensor<6xindex>} + // CHECK-DAG: %[[RESW1:.+]] = tosa.reshape %[[PADW]], %[[CONST1]] // CHECK-DAG: %[[TRANS:.+]] = tosa.transpose %[[RESW1]], %[[TRANSV]] - // CHECK-DAG: %[[RESW2:.+]] = tosa.reshape %[[TRANS]] {new_shape = array} + // CHECK-DAG: %[[CONST3:.+]] = tosa.const_shape {value = dense<[30, 2, 2, 3]> : tensor<4xindex>} + // CHECK-DAG: %[[RESW2:.+]] = tosa.reshape %[[TRANS]], %[[CONST3]] // CHECK-DAG: %[[REV1:.+]] = tosa.reverse %[[RESW2]] {axis = 1 : i32} // CHECK-DAG: %[[NEWWEIGHT:.+]] = tosa.reverse %[[REV1]] {axis = 2 : i32} + // CHECK-DAG: %[[SIZE:.*]] = tosa.const_shape {value = dense<[2, 35, 47, 5]> : tensor<4xindex>} : () -> !tosa.shape<4> + // CHECK-DAG: %[[START:.*]] = tosa.const_shape {value = dense<0> : tensor<4xindex>} : () -> !tosa.shape<4> // Pad out the input matrix to handle the transpose conv. // CHECK-DAG: %[[PAD:.+]] = tosa.const_shape {value = dense<[0, 0, 1, 1, 1, 1, 0, 0]> : tensor<8xindex>} : () -> !tosa.shape<8> @@ -70,13 +74,14 @@ func.func @transpose_conv2d_strided(%arg0: tensor<2x17x15x3xf32>, %arg1: tensor< // Manipulate the final shape. // CHECK-DAG: %[[BIAS:.+]] = "tosa.const"() <{value = dense<0.000000e+00> : tensor<30xf32>} // CHECK-DAG: %[[CONV:.+]] = tosa.conv2d %[[NEWINPUT]], %[[NEWWEIGHT]], %[[BIAS]] {acc_type = f32, dilation = array, pad = array, stride = array} - // CHECK-DAG: %[[RESHAPE_OUT_1:.+]] = tosa.reshape %[[CONV]] {new_shape = array} + // CHECK-DAG: %[[CONST6:.+]] = tosa.const_shape {value = dense<[2, 18, 16, 2, 3, 5]> : tensor<6xindex>} + // CHECK-DAG: %[[RESHAPE_OUT_1:.+]] = tosa.reshape %[[CONV]], %[[CONST6]] // CHECK-DAG: %[[TRANS_OUT:.+]] = tosa.transpose %[[RESHAPE_OUT_1]], %[[TRANS2]] - // CHECK-DAG: %[[RESHAPE_OUT_2:.+]] = tosa.reshape %[[TRANS_OUT]] - // CHECK-DAG: %[[START:.*]] = tosa.const_shape {value = dense<0> : tensor<4xindex>} : () -> !tosa.shape<4> - // CHECK-DAG: %[[SIZE:.*]] = tosa.const_shape {value = dense<[2, 35, 47, 5]> : tensor<4xindex>} : () -> !tosa.shape<4> - // CHECK-DAG: %[[SLICE:.*]] = tosa.slice %[[RESHAPE_OUT_2]], %[[START]], %[[SIZE]] - // CHECK-DAG: %[[RESHAPE_ARG2:.+]] = tosa.reshape %arg2 + // CHECK-DAG: %[[CONST8:.+]] = tosa.const_shape {value = dense<[2, 36, 48, 5]> : tensor<4xindex> + // CHECK-DAG: %[[RESHAPE_OUT_2:.+]] = tosa.reshape %[[TRANS_OUT]], %[[CONST8]] + // CHECK-DAG: %[[SLICE:.+]] = tosa.slice %[[RESHAPE_OUT_2]], %[[START]], %[[SIZE]] + // CHECK-DAG: %[[CONST9:.+]] = tosa.const_shape {value = dense<[1, 1, 1, 5]> : tensor<4xindex>} + // CHECK-DAG: %[[RESHAPE_ARG2:.+]] = tosa.reshape %arg2, %[[CONST9]] // CHECK: %[[ADD:.+]] = tosa.add %[[SLICE]], %[[RESHAPE_ARG2]] %0 = tosa.transpose_conv2d %arg0, %arg1, %arg2{acc_type = f32, out_pad = array, out_shape = array, stride = array} : (tensor<2x17x15x3xf32>, tensor<5x3x5x3xf32>, tensor<5xf32>) -> tensor<2x35x47x5xf32> %1 = tensor.cast %0 : tensor<2x35x47x5xf32> to tensor<2x?x?x5xf32> @@ -92,11 +97,15 @@ func.func @transpose_conv2d_strided_quantized(%arg0: tensor<2x17x15x3xi8>, %arg1 // CHECK-DAG: %[[PADV:.+]] = tosa.const_shape {value = dense<[0, 0, 0, 1, 0, 1, 0, 0]> : tensor<8xindex>} : () -> !tosa.shape<8> // CHECK-DAG: %[[TRANSV:.+]] = "tosa.const"() <{value = dense<[2, 4, 0, 1, 3, 5]> : tensor<6xi32>} // CHECK-DAG: %[[PADW:.+]] = tosa.pad %arg1, %[[PADV]] {input_zp = 42 : i32} - // CHECK-DAG: %[[RESW1:.+]] = tosa.reshape %[[PADW]] {new_shape = array} + // CHECK-DAG: %[[CONST1:.+]] = tosa.const_shape {value = dense<[5, 2, 2, 2, 3, 3]> : tensor<6xindex>} + // CHECK-DAG: %[[RESW1:.+]] = tosa.reshape %[[PADW]], %[[CONST1]] // CHECK-DAG: %[[TRANS:.+]] = tosa.transpose %[[RESW1]], %[[TRANSV]] - // CHECK-DAG: %[[RESW2:.+]] = tosa.reshape %[[TRANS]] {new_shape = array} + // CHECK-DAG: %[[CONST3:.+]] = tosa.const_shape {value = dense<[30, 2, 2, 3]> : tensor<4xindex>} + // CHECK-DAG: %[[RESW2:.+]] = tosa.reshape %[[TRANS]], %[[CONST3]] // CHECK-DAG: %[[REV1:.+]] = tosa.reverse %[[RESW2]] {axis = 1 : i32} // CHECK-DAG: %[[NEWWEIGHT:.+]] = tosa.reverse %[[REV1]] {axis = 2 : i32} + // CHECK-DAG: %[[SIZE:.*]] = tosa.const_shape {value = dense<[2, 35, 47, 5]> : tensor<4xindex>} : () -> !tosa.shape<4> + // CHECK-DAG: %[[START:.*]] = tosa.const_shape {value = dense<0> : tensor<4xindex>} : () -> !tosa.shape<4> // Pad out the input matrix to handle the transpose conv. // CHECK-DAG: %[[PAD:.+]] = tosa.const_shape {value = dense<[0, 0, 1, 1, 1, 1, 0, 0]> : tensor<8xindex>} : () -> !tosa.shape<8> @@ -108,13 +117,14 @@ func.func @transpose_conv2d_strided_quantized(%arg0: tensor<2x17x15x3xi8>, %arg1 // CHECK-DAG: %[[INPUT_ZP:.+]] = "tosa.const"() <{value = dense<-22> : tensor<1xi8>} // CHECK-DAG: %[[WEIGHT_ZP:.+]] = "tosa.const"() <{value = dense<42> : tensor<1xi8>} // CHECK-DAG: %[[CONV:.+]] = tosa.conv2d %[[NEWINPUT]], %[[NEWWEIGHT]], %[[BIAS]], %[[INPUT_ZP]], %[[WEIGHT_ZP]] {acc_type = i32, dilation = array, pad = array, stride = array} - // CHECK-DAG: %[[RESHAPE_OUT_1:.+]] = tosa.reshape %[[CONV]] {new_shape = array} + // CHECK-DAG: %[[CONV_NEW_SHAPE:.*]] = tosa.const_shape {value = dense<[2, 18, 16, 2, 3, 5]> : tensor<6xindex>} + // CHECK-DAG: %[[RESHAPE_OUT_1:.+]] = tosa.reshape %[[CONV]], %[[CONV_NEW_SHAPE]] // CHECK-DAG: %[[TRANS_OUT:.+]] = tosa.transpose %[[RESHAPE_OUT_1]], %[[TRANS2]] - // CHECK-DAG: %[[RESHAPE_OUT_2:.+]] = tosa.reshape %[[TRANS_OUT]] - // CHECK-DAG: %[[START:.*]] = tosa.const_shape {value = dense<0> : tensor<4xindex>} - // CHECK-DAG: %[[SIZE:.*]] = tosa.const_shape {value = dense<[2, 35, 47, 5]> : tensor<4xindex>} - // CHECK-DAG: %[[SLICE:.*]] = tosa.slice %[[RESHAPE_OUT_2]], %[[START]], %[[SIZE]] - // CHECK-DAG: %[[RESHAPE_ARG2:.+]] = tosa.reshape %arg2 + // CHECK-DAG: %[[TEANS_NEW_SHAPE:.+]] = tosa.const_shape {value = dense<[2, 36, 48, 5]> : tensor<4xindex>} + // CHECK-DAG: %[[RESHAPE_OUT_2:.+]] = tosa.reshape %[[TRANS_OUT]], %[[TEANS_NEW_SHAPE]] + // CHECK-DAG: %[[SLICE:.+]] = tosa.slice %[[RESHAPE_OUT_2]], %[[START]], %[[SIZE]] + // CHECK-DAG: %[[ARG2_NEW_SHAPE:.+]] = tosa.const_shape {value = dense<[1, 1, 1, 5]> : tensor<4xindex>} + // CHECK-DAG: %[[RESHAPE_ARG2:.+]] = tosa.reshape %arg2, %[[ARG2_NEW_SHAPE]] // CHECK: %[[ADD:.+]] = tosa.add %[[SLICE]], %[[RESHAPE_ARG2]] %input_zp = "tosa.const"() {value = dense<-22> : tensor<1xi8>} : () -> tensor<1xi8> %weight_zp = "tosa.const"() {value = dense<42> : tensor<1xi8>} : () -> tensor<1xi8> @@ -126,25 +136,31 @@ func.func @transpose_conv2d_strided_quantized(%arg0: tensor<2x17x15x3xi8>, %arg1 // CHECK-LABEL: @transpose_conv2d_strided_overpad func.func @transpose_conv2d_strided_overpad(%arg0 : tensor<1x16x1x1xi8>, %arg1 : tensor<1x2x1x1xi8>, %arg2 : tensor<1xi32>) -> (tensor<1x19x2x1xi32>) { - // CHECK-DAG: %[[WEIGHT_PAD:.+]] = tosa.const_shape {value = dense<[0, 0, 0, 0, 0, 1, 0, 0]> : tensor<8xindex>} : () -> !tosa.shape<8> + // CHECK-DAG: %[[WEIGHT_PAD:.+]] = tosa.const_shape {value = dense<[0, 0, 0, 0, 0, 1, 0, 0]> : tensor<8xindex>} + // CHECK-DAG: %[[CONST1:.+]] = tosa.const_shape {value = dense<[1, 2, 1, 1, 2, 1]> : tensor<6xindex>} // CHECK-DAG: %[[WEIGHT_PERMS:.+]] = "tosa.const"() <{value = dense<[2, 4, 0, 1, 3, 5]> : tensor<6xi32>} - // CHECK-DAG: %[[INPUT_PAD:.+]] = tosa.const_shape {value = dense<[0, 0, 1, 1, 0, 0, 0, 0]> : tensor<8xindex>} : () -> !tosa.shape<8> + // CHECK-DAG: %[[CONST3:.+]] = tosa.const_shape {value = dense<[2, 2, 1, 1]> : tensor<4xindex>} + // CHECK-DAG: %[[INPUT_PAD:.+]] = tosa.const_shape {value = dense<[0, 0, 1, 1, 0, 0, 0, 0]> : tensor<8xindex>} // CHECK-DAG: %[[ZERO:.+]] = "tosa.const"() <{value = dense<0> : tensor<2xi32>} + // CHECK-DAG: %[[CONST6:.+]] = tosa.const_shape {value = dense<[1, 17, 1, 1, 2, 1]> : tensor<6xindex>} // CHECK-DAG: %[[RESULT_PERMS:.+]] = "tosa.const"() <{value = dense<[0, 1, 3, 2, 4, 5]> : tensor<6xi32>} - // CHECK-DAG: %[[RESULT_PAD:.+]] = tosa.const_shape {value = dense<[0, 0, 2, 0, 0, 0, 0, 0]> : tensor<8xindex>} : () -> !tosa.shape<8> + // CHECK-DAG: %[[CONST8:.+]] = tosa.const_shape {value = dense<[1, 17, 2, 1]> : tensor<4xindex>} + // CHECK-DAG: %[[RESULT_PAD:.+]] = tosa.const_shape {value = dense<[0, 0, 2, 0, 0, 0, 0, 0]> : tensor<8xindex>} + // CHECK-DAG: %[[CONST10:.+]] = tosa.const_shape {value = dense<1> : tensor<4xindex>} + // CHECK-DAG: %[[INPUT_ZP:.*]] = "tosa.const"() <{value = dense<-103> : tensor<1xi8>}> + // CHECK-DAG: %[[WEIGHT_ZP:.*]] = "tosa.const"() <{value = dense<93> : tensor<1xi8>}> // CHECK: %[[PAD_WEIGHT:.+]] = tosa.pad %arg1, %[[WEIGHT_PAD]] {input_zp = 93 : i32} - // CHECK: %[[RESHAPE_WEIGHT_0:.+]] = tosa.reshape %[[PAD_WEIGHT]] {new_shape = array} + // CHECK: %[[RESHAPE_WEIGHT_0:.+]] = tosa.reshape %[[PAD_WEIGHT]], %[[CONST1]] // CHECK: %[[TRANSPOSE_WEIGHT:.+]] = tosa.transpose %[[RESHAPE_WEIGHT_0]], %[[WEIGHT_PERMS]] - // CHECK: %[[RESHAPE_WEIGHT_1:.+]] = tosa.reshape %[[TRANSPOSE_WEIGHT]] {new_shape = array} + // CHECK: %[[RESHAPE_WEIGHT_1:.+]] = tosa.reshape %[[TRANSPOSE_WEIGHT]], %[[CONST3]] // CHECK: %[[REVERSE:.+]] = tosa.reverse %[[RESHAPE_WEIGHT_1]] {axis = 1 : i32} // CHECK: %[[PAD_INPUT:.+]] = tosa.pad %arg0, %[[INPUT_PAD]] {input_zp = -103 : i32} - // CHECK: %[[CONV:.+]] = tosa.conv2d %[[PAD_INPUT]], %[[REVERSE]], %[[ZERO]] - // CHECK-SAME{literal}: dilation = [1, 1], pad = [0, 0, 0, 0], input_zp = -103 : i32, weight_zp = 93 : i32, stride = [1, 1]} - // CHECK: %[[RESHAPE_RESULT_0:.+]] = tosa.reshape %[[CONV]] {new_shape = array} + // CHECK: %[[CONV:.+]] = tosa.conv2d %[[PAD_INPUT]], %[[REVERSE]], %[[ZERO]], %[[INPUT_ZP]], %[[WEIGHT_ZP]] {acc_type = i32, dilation = array, pad = array, stride = array} + // CHECK: %[[RESHAPE_RESULT_0:.+]] = tosa.reshape %[[CONV]], %[[CONST6]] // CHECK: %[[TRANSPOSE_RESULT:.+]] = tosa.transpose %[[RESHAPE_RESULT_0]], %[[RESULT_PERMS]] - // CHECK: %[[RESHAPE_RESULT_1:.+]] = tosa.reshape %[[TRANSPOSE_RESULT]] {new_shape = array} + // CHECK: %[[RESHAPE_RESULT_1:.+]] = tosa.reshape %[[TRANSPOSE_RESULT]], %[[CONST8]] // CHECK: %[[PAD_RESULT:.+]] = tosa.pad %[[RESHAPE_RESULT_1]], %[[RESULT_PAD]] - // CHECK: %[[RESHAPE_ARG2:.+]] = tosa.reshape %arg2 {new_shape = array} + // CHECK: %[[RESHAPE_ARG2:.+]] = tosa.reshape %arg2, %[[CONST10]] // CHECK: %[[ADD:.+]] = tosa.add %[[PAD_RESULT]], %[[RESHAPE_ARG2]] %input_zp = "tosa.const"() {value = dense<-103> : tensor<1xi8>} : () -> tensor<1xi8> %weight_zp = "tosa.const"() {value = dense<93> : tensor<1xi8>} : () -> tensor<1xi8> diff --git a/mlir/test/Dialect/Tosa/tosa-infer-shapes.mlir b/mlir/test/Dialect/Tosa/tosa-infer-shapes.mlir index 73eabab657f38..bdd403567a4ed 100644 --- a/mlir/test/Dialect/Tosa/tosa-infer-shapes.mlir +++ b/mlir/test/Dialect/Tosa/tosa-infer-shapes.mlir @@ -376,29 +376,42 @@ func.func @test_table_dynamic(%arg0 : tensor<4x?xi16>, %arg1 : tensor<513xi16>) // CHECK-LABEL: @test_static_reshape func.func @test_static_reshape(%arg0 : tensor<4x4xi32>) -> () { - // CHECK: tosa.reshape %arg0 {new_shape = array} : (tensor<4x4xi32>) -> tensor<16xi32> - %0 = tosa.reshape %arg0 {new_shape = array} : (tensor<4x4xi32>) -> tensor + // CHECK: %[[CONST3:.+]] = tosa.const_shape {value = dense<16> : tensor<1xindex>} : () -> !tosa.shape<1> + %3 = tosa.const_shape {value = dense<16> : tensor<1xindex>} : () -> !tosa.shape<1> + // CHECK: tosa.reshape %arg0, %[[CONST3]] : (tensor<4x4xi32>, !tosa.shape<1>) -> tensor<16xi32> + %0 = tosa.reshape %arg0, %3 : (tensor<4x4xi32>, !tosa.shape<1>) -> tensor<16xi32> - // CHECK: tosa.reshape %arg0 {new_shape = array} : (tensor<4x4xi32>) -> tensor<16xi32> - %1 = tosa.reshape %arg0 {new_shape = array} : (tensor<4x4xi32>) -> tensor + // CHECK: %[[CONST4:.+]] = tosa.const_shape {value = dense<-1> : tensor<1xindex>} : () -> !tosa.shape<1> + // CHECK: tosa.reshape %arg0, %[[CONST4]] : (tensor<4x4xi32>, !tosa.shape<1>) -> tensor<16xi32> + %4 = tosa.const_shape {value = dense<-1> : tensor<1xindex>} : () -> !tosa.shape<1> + %1 = tosa.reshape %arg0, %4 : (tensor<4x4xi32>, !tosa.shape<1>) -> tensor<16xi32> - // CHECK: tosa.reshape %arg0 {new_shape = array} : (tensor<4x4xi32>) -> tensor<2x8xi32> - %2 = tosa.reshape %arg0 {new_shape = array} : (tensor<4x4xi32>) -> tensor + // CHECK: %[[CONST5:.+]] = tosa.const_shape {value = dense<[2, -1]> : tensor<2xindex>} : () -> !tosa.shape<2> + // CHECK: tosa.reshape %arg0, %[[CONST5]] : (tensor<4x4xi32>, !tosa.shape<2>) -> tensor<2x8xi32> + %5 = tosa.const_shape {value = dense<[2, -1]> : tensor<2xindex>} : () -> !tosa.shape<2> + %2 = tosa.reshape %arg0, %5 : (tensor<4x4xi32>, !tosa.shape<2>) -> tensor<2x8xi32> return } + // ----- // CHECK-LABEL: @test_dynamic_reshape func.func @test_dynamic_reshape(%arg0 : tensor<4x?xi32>) -> () { - // CHECK: %0 = tosa.reshape %arg0 {new_shape = array} : (tensor<4x?xi32>) -> tensor<16xi32> - %0 = tosa.reshape %arg0 {new_shape = array} : (tensor<4x?xi32>) -> tensor - - // CHECK: %1 = tosa.reshape %arg0 {new_shape = array} : (tensor<4x?xi32>) -> tensor - %1 = tosa.reshape %arg0 {new_shape = array} : (tensor<4x?xi32>) -> tensor - - // CHECK: %2 = tosa.reshape %arg0 {new_shape = array} : (tensor<4x?xi32>) -> tensor<2x?xi32> - %2 = tosa.reshape %arg0 {new_shape = array} : (tensor<4x?xi32>) -> tensor + // CHECK: %0 = tosa.const_shape {value = dense<16> : tensor<1xindex>} : () -> !tosa.shape<1> + %0 = tosa.const_shape {value = dense<16> : tensor<1xindex>} : () -> !tosa.shape<1> + // CHECK: %1 = tosa.reshape %arg0, %0 : (tensor<4x?xi32>, !tosa.shape<1>) -> tensor<16xi32> + %1 = tosa.reshape %arg0, %0 : (tensor<4x?xi32>, !tosa.shape<1>) -> tensor + + // CHECK: %2 = tosa.const_shape {value = dense<-1> : tensor<1xindex>} : () -> !tosa.shape<1> + %2 = tosa.const_shape {value = dense<-1> : tensor<1xindex>} : () -> !tosa.shape<1> + // CHECK: %3 = tosa.reshape %arg0, %2 : (tensor<4x?xi32>, !tosa.shape<1>) -> tensor + %3 = tosa.reshape %arg0, %2 : (tensor<4x?xi32>, !tosa.shape<1>) -> tensor + + // CHECK: %4 = tosa.const_shape {value = dense<[2, -1]> : tensor<2xindex>} : () -> !tosa.shape<2> + %4 = tosa.const_shape {value = dense<[2, -1]> : tensor<2xindex>} : () -> !tosa.shape<2> + // CHECK: %5 = tosa.reshape %arg0, %4 : (tensor<4x?xi32>, !tosa.shape<2>) -> tensor<2x?xi32> + %5 = tosa.reshape %arg0, %4 : (tensor<4x?xi32>, !tosa.shape<2>) -> tensor return } diff --git a/mlir/test/Dialect/Tosa/tosa-reduce-transposes.mlir b/mlir/test/Dialect/Tosa/tosa-reduce-transposes.mlir index f274eb9c10a81..947335e45a9d9 100644 --- a/mlir/test/Dialect/Tosa/tosa-reduce-transposes.mlir +++ b/mlir/test/Dialect/Tosa/tosa-reduce-transposes.mlir @@ -141,12 +141,14 @@ func.func @test_mulop_conversion(%arg0: tensor<1x2x3x4xi32>, %arg1: tensor<1x2x3 // COM: this case is a reshape we don't convert, since can't fold the transpose into it. // COM: a transform actually occurs underneath the hood, but it results in identical IR. // CHECK-LABEL: @test_basic_non_broadcasting_reshape -// CHECK: "tosa.const"() <{value = dense<[0, 2, 1]> : tensor<3xi32>}> : () -> tensor<3xi32> -// CHECK: tosa.reshape %arg0 {new_shape = array} : (tensor<2x3xi32>) -> tensor<1x3x2xi32> -// CHECK: tosa.transpose %1, %0 : (tensor<1x3x2xi32>, tensor<3xi32>) -> tensor<1x2x3xi32> +// CHECK-DAG: %[[VAL_1:.*]] = tosa.const_shape {value = dense<[1, 3, 2]> : tensor<3xindex>} +// CHECK-DAG: %[[VAL_2:.*]] = "tosa.const"() <{value = dense<[0, 2, 1]> : tensor<3xi32>}> +// CHECK: %[[VAL_3:.*]] = tosa.reshape %arg0, %[[VAL_1]] : (tensor<2x3xi32>, !tosa.shape<3>) -> tensor<1x3x2xi32> +// CHECK: %[[VAL_4:.*]] = tosa.transpose %[[VAL_3]], %[[VAL_2]] : (tensor<1x3x2xi32>, tensor<3xi32>) -> tensor<1x2x3xi32> func.func @test_basic_non_broadcasting_reshape(%arg0: tensor<2x3xi32>) -> tensor<1x2x3xi32> { + %shape = tosa.const_shape {value = dense<[1, 3, 2]> : tensor<3xindex>} : () -> !tosa.shape<3> %perms = "tosa.const"() {value = dense<[0, 2, 1]> : tensor<3xi32>} : () -> tensor<3xi32> - %1 = tosa.reshape %arg0 {new_shape = array} : (tensor<2x3xi32>) -> tensor<1x3x2xi32> + %1 = tosa.reshape %arg0, %shape : (tensor<2x3xi32>, !tosa.shape<3>) -> tensor<1x3x2xi32> %2 = tosa.transpose %1, %perms : (tensor<1x3x2xi32>, tensor<3xi32>) -> tensor<1x2x3xi32> return %2 : tensor<1x2x3xi32> } @@ -154,11 +156,13 @@ func.func @test_basic_non_broadcasting_reshape(%arg0: tensor<2x3xi32>) -> tensor // ----- // CHECK-LABEL: @test_dynamic_broadcasting_reshape -// CHECK: %[[RES:.*]] = tosa.reshape %arg0 {new_shape = array} : (tensor) -> tensor<1x1x?xi32> +// CHECK-DAG: %[[SHAPE:.*]] = tosa.const_shape {value = dense<[1, 1, -1]> : tensor<3xindex>} +// CHECK: %[[RES:.*]] = tosa.reshape %arg0, %[[SHAPE]] : (tensor, !tosa.shape<3>) -> tensor<1x1x?xi32> // CHECK: return %[[RES]] func.func @test_dynamic_broadcasting_reshape(%arg0: tensor) -> tensor<1x1x?xi32> { + %shape = tosa.const_shape {value = dense<[1, -1, 1]> : tensor<3xindex>} : () -> !tosa.shape<3> %perms = "tosa.const"() {value = dense<[0, 2, 1]> : tensor<3xi32>} : () -> tensor<3xi32> - %1 = tosa.reshape %arg0 {new_shape = array} : (tensor) -> tensor<1x?x1xi32> + %1 = tosa.reshape %arg0, %shape : (tensor, !tosa.shape<3>) -> tensor<1x?x1xi32> %2 = tosa.transpose %1, %perms : (tensor<1x?x1xi32>, tensor<3xi32>) -> tensor<1x1x?xi32> return %2 : tensor<1x1x?xi32> } @@ -167,12 +171,14 @@ func.func @test_dynamic_broadcasting_reshape(%arg0: tensor) -> tensor<1x1 // CHECK-LABEL: @test_reshape_for_broadcast // CHECK-DAG: %[[RESHAPE_INPUT:.*]] = "tosa.const"() <{value = dense<[1, 2, 3, 4]> -// CHECK-DAG: %[[RESHAPE:.*]] = tosa.reshape %[[RESHAPE_INPUT]] {new_shape = array} -// CHECK-DAG: %[[ADD:.*]] = tosa.add %arg0, %[[RESHAPE]] +// CHECK-DAG: %[[SHAPE:.*]] = tosa.const_shape {value = dense<[4, 1, 1]> : tensor<3xindex>} +// CHECK: %[[RESHAPE:.*]] = tosa.reshape %[[RESHAPE_INPUT]], %[[SHAPE]] : (tensor<4xi32>, !tosa.shape<3>) -> tensor<4x1x1xi32> +// CHECK: %[[ADD:.*]] = tosa.add %arg0, %[[RESHAPE]] // CHECK: return %[[ADD]] func.func @test_reshape_for_broadcast(%arg0: tensor<4x3x2xi32>) -> tensor<4x3x2xi32> { %0 = "tosa.const"() {value = dense<[1,2,3,4]> : tensor<4xi32>} : () -> tensor<4xi32> - %reshape = tosa.reshape %0 {new_shape = array} : (tensor<4xi32>) -> tensor<1x1x4xi32> + %1 = tosa.const_shape {value = dense<[1, 1, 4]> : tensor<3xindex>} : () -> !tosa.shape<3> + %reshape = tosa.reshape %0, %1 : (tensor<4xi32>, !tosa.shape<3>) -> tensor<1x1x4xi32> %perms0 = "tosa.const"() {value = dense<[2, 1, 0]> : tensor<3xi32>} : () -> tensor<3xi32> %transpose0 = tosa.transpose %arg0, %perms0 : (tensor<4x3x2xi32>, tensor<3xi32>) -> tensor<2x3x4xi32> %add = tosa.add %transpose0, %reshape : (tensor<2x3x4xi32>, tensor<1x1x4xi32>) -> tensor<2x3x4xi32> @@ -187,25 +193,28 @@ func.func @test_reshape_for_broadcast(%arg0: tensor<4x3x2xi32>) -> tensor<4x3x2x // CHECK-LABEL: @test_resnet18_common_case // COM: note that %74 is now represented by %arg2 -// CHECK-DAG: %[[VAL_2:.*]] = "tosa.const"() <{value = dense_resource : tensor<64xf32>}> : () -> tensor<64xf32> -// CHECK-DAG: %[[VAL_3:.*]] = "tosa.const"() <{value = dense_resource : tensor<64xf32>}> : () -> tensor<64xf32> -// CHECK-DAG: %[[VAL_4:.*]] = "tosa.const"() <{value = dense<9.99999974E-6> : tensor<1xf32>}> : () -> tensor<1xf32> -// CHECK-DAG: %[[VAL_5:.*]] = "tosa.const"() <{value = dense<5.000000e-01> : tensor<1xf32>}> : () -> tensor<1xf32> -// CHECK-DAG: %[[VAL_6:.*]] = tosa.add %arg1, %[[VAL_4]] : (tensor<64xf32>, tensor<1xf32>) -> tensor<64xf32> -// CHECK-DAG: %[[VAL_7:.*]] = tosa.pow %[[VAL_6]], %[[VAL_5]] : (tensor<64xf32>, tensor<1xf32>) -> tensor<64xf32> -// CHECK-DAG: %[[VAL_8:.*]] = tosa.reciprocal %[[VAL_7]] : (tensor<64xf32>) -> tensor<64xf32> -// CHECK-DAG: %[[VAL_9:.*]] = tosa.reshape %arg0 {new_shape = array} : (tensor<64xf32>) -> tensor<1x1x1x64xf32> -// CHECK-DAG: %[[VAL_10:.*]] = tosa.sub %arg2, %[[VAL_9]] : (tensor<1x112x112x64xf32>, tensor<1x1x1x64xf32>) -> tensor<1x112x112x64xf32> -// CHECK-DAG: %[[VAL_11:.*]] = tosa.reshape %[[VAL_8]] {new_shape = array} : (tensor<64xf32>) -> tensor<1x1x1x64xf32> -// CHECK-DAG: %[[VAL_12:.*]] = tosa.mul %[[VAL_10]], %[[VAL_11]] : (tensor<1x112x112x64xf32>, tensor<1x1x1x64xf32>) -> tensor<1x112x112x64xf32> -// CHECK-DAG: %[[VAL_13:.*]] = tosa.reshape %[[VAL_3]] {new_shape = array} : (tensor<64xf32>) -> tensor<1x1x1x64xf32> -// CHECK-DAG: %[[VAL_14:.*]] = tosa.mul %[[VAL_12]], %[[VAL_13]] : (tensor<1x112x112x64xf32>, tensor<1x1x1x64xf32>) -> tensor<1x112x112x64xf32> -// CHECK-DAG: %[[VAL_15:.*]] = tosa.reshape %[[VAL_2]] {new_shape = array} : (tensor<64xf32>) -> tensor<1x1x1x64xf32> -// CHECK-DAG: %[[VAL_16:.*]] = tosa.add %[[VAL_14]], %[[VAL_15]] : (tensor<1x112x112x64xf32>, tensor<1x1x1x64xf32>) -> tensor<1x112x112x64xf32> -// CHECK-DAG: %[[VAL_17:.*]] = tosa.clamp %[[VAL_16]] {max_fp = 3.40282347E+38 : f32, max_int = 2147483647 : i64, min_fp = 0.000000e+00 : f32, min_int = 0 : i64} : (tensor<1x112x112x64xf32>) -> tensor<1x112x112x64xf32> -// CHECK: return %[[VAL_17]] : tensor<1x112x112x64xf32> - +// CHECK-DAG: %[[VAL_3:.*]] = "tosa.const"() <{value = dense_resource : tensor<64xf32>}> : () -> tensor<64xf32> +// CHECK-DAG: %[[VAL_4:.*]] = "tosa.const"() <{value = dense_resource : tensor<64xf32>}> : () -> tensor<64xf32> +// CHECK-DAG: %[[VAL_5:.*]] = "tosa.const"() <{value = dense<9.99999974E-6> : tensor<1xf32>}> : () -> tensor<1xf32> +// CHECK-DAG: %[[VAL_6:.*]] = "tosa.const"() <{value = dense<5.000000e-01> : tensor<1xf32>}> : () -> tensor<1xf32> +// CHECK-DAG: %[[VAL_7:.*]] = tosa.add %arg1, %[[VAL_5]] : (tensor<64xf32>, tensor<1xf32>) -> tensor<64xf32> +// CHECK-DAG: %[[VAL_8:.*]] = tosa.pow %[[VAL_7]], %[[VAL_6]] : (tensor<64xf32>, tensor<1xf32>) -> tensor<64xf32> +// CHECK-DAG: %[[VAL_9:.*]] = tosa.reciprocal %[[VAL_8]] : (tensor<64xf32>) -> tensor<64xf32> +// CHECK-DAG: %[[VAL_10:.*]] = tosa.const_shape {value = dense<[1, 1, 1, 64]> : tensor<4xindex>} : () -> !tosa.shape<4> +// CHECK-DAG: %[[VAL_11:.*]] = tosa.reshape %arg0, %[[VAL_10]] : (tensor<64xf32>, !tosa.shape<4>) -> tensor<1x1x1x64xf32> +// CHECK-DAG: %[[VAL_12:.*]] = tosa.sub %arg2, %[[VAL_11]] : (tensor<1x112x112x64xf32>, tensor<1x1x1x64xf32>) -> tensor<1x112x112x64xf32> +// CHECK-DAG: %[[VAL_13:.*]] = tosa.const_shape {value = dense<[1, 1, 1, 64]> : tensor<4xindex>} : () -> !tosa.shape<4> +// CHECK-DAG: %[[VAL_14:.*]] = tosa.reshape %[[VAL_9]], %[[VAL_13]] : (tensor<64xf32>, !tosa.shape<4>) -> tensor<1x1x1x64xf32> +// CHECK-DAG: %[[VAL_15:.*]] = tosa.mul %[[VAL_12]], %[[VAL_14]] : (tensor<1x112x112x64xf32>, tensor<1x1x1x64xf32>) -> tensor<1x112x112x64xf32> +// CHECK-DAG: %[[VAL_16:.*]] = tosa.const_shape {value = dense<[1, 1, 1, 64]> : tensor<4xindex>} : () -> !tosa.shape<4> +// CHECK-DAG: %[[VAL_17:.*]] = tosa.reshape %[[VAL_4]], %[[VAL_16]] : (tensor<64xf32>, !tosa.shape<4>) -> tensor<1x1x1x64xf32> +// CHECK-DAG: %[[VAL_18:.*]] = tosa.mul %[[VAL_15]], %[[VAL_17]] : (tensor<1x112x112x64xf32>, tensor<1x1x1x64xf32>) -> tensor<1x112x112x64xf32> +// CHECK-DAG: %[[VAL_19:.*]] = tosa.const_shape {value = dense<[1, 1, 1, 64]> : tensor<4xindex>} : () -> !tosa.shape<4> +// CHECK-DAG: %[[VAL_20:.*]] = tosa.reshape %[[VAL_3]], %[[VAL_19]] : (tensor<64xf32>, !tosa.shape<4>) -> tensor<1x1x1x64xf32> +// CHECK-DAG: %[[VAL_21:.*]] = tosa.add %[[VAL_18]], %[[VAL_20]] : (tensor<1x112x112x64xf32>, tensor<1x1x1x64xf32>) -> tensor<1x112x112x64xf32> +// CHECK-DAG: %[[VAL_22:.*]] = tosa.clamp %[[VAL_21]] {max_fp = 3.40282347E+38 : f32, max_int = 2147483647 : i64, min_fp = 0.000000e+00 : f32, min_int = 0 : i64} : (tensor<1x112x112x64xf32>) -> tensor<1x112x112x64xf32> func.func @test_resnet18_common_case(%arg0: tensor<64xf32>, %arg1: tensor<64xf32>, %74: tensor<1x112x112x64xf32>) -> tensor<1x112x112x64xf32> { + %58 = tosa.const_shape {value = dense<[1, 64, 1, 1]> : tensor<4xindex>} : () -> !tosa.shape<4> %59 = "tosa.const"() <{value = dense_resource : tensor<64xf32>}> : () -> tensor<64xf32> %60 = "tosa.const"() <{value = dense_resource : tensor<64xf32>}> : () -> tensor<64xf32> %63 = "tosa.const"() <{value = dense<[0, 2, 3, 1]> : tensor<4xi32>}> : () -> tensor<4xi32> @@ -216,13 +225,13 @@ func.func @test_resnet18_common_case(%arg0: tensor<64xf32>, %arg1: tensor<64xf32 %76 = tosa.add %arg1, %69 : (tensor<64xf32>, tensor<1xf32>) -> tensor<64xf32> %77 = tosa.pow %76, %70 : (tensor<64xf32>, tensor<1xf32>) -> tensor<64xf32> %78 = tosa.reciprocal %77 : (tensor<64xf32>) -> tensor<64xf32> - %79 = tosa.reshape %arg0 {new_shape = array} : (tensor<64xf32>) -> tensor<1x64x1x1xf32> + %79 = tosa.reshape %arg0, %58 : (tensor<64xf32>, !tosa.shape<4>) -> tensor<1x64x1x1xf32> %80 = tosa.sub %75, %79 : (tensor<1x64x112x112xf32>, tensor<1x64x1x1xf32>) -> tensor<1x64x112x112xf32> - %81 = tosa.reshape %78 {new_shape = array} : (tensor<64xf32>) -> tensor<1x64x1x1xf32> + %81 = tosa.reshape %78, %58 : (tensor<64xf32>, !tosa.shape<4>) -> tensor<1x64x1x1xf32> %82 = tosa.mul %80, %81 : (tensor<1x64x112x112xf32>, tensor<1x64x1x1xf32>) -> tensor<1x64x112x112xf32> - %83 = tosa.reshape %60 {new_shape = array} : (tensor<64xf32>) -> tensor<1x64x1x1xf32> + %83 = tosa.reshape %60, %58 : (tensor<64xf32>, !tosa.shape<4>) -> tensor<1x64x1x1xf32> %84 = tosa.mul %82, %83 : (tensor<1x64x112x112xf32>, tensor<1x64x1x1xf32>) -> tensor<1x64x112x112xf32> - %85 = tosa.reshape %59 {new_shape = array} : (tensor<64xf32>) -> tensor<1x64x1x1xf32> + %85 = tosa.reshape %59, %58 : (tensor<64xf32>, !tosa.shape<4>) -> tensor<1x64x1x1xf32> %86 = tosa.add %84, %85 : (tensor<1x64x112x112xf32>, tensor<1x64x1x1xf32>) -> tensor<1x64x112x112xf32> %87 = tosa.clamp %86 {max_fp = 3.40282347E+38 : f32, max_int = 2147483647 : i64, min_fp = 0.000000e+00 : f32, min_int = 0 : i64} : (tensor<1x64x112x112xf32>) -> tensor<1x64x112x112xf32> %88 = tosa.transpose %87, %63 : (tensor<1x64x112x112xf32>, tensor<4xi32>) -> tensor<1x112x112x64xf32> @@ -285,7 +294,8 @@ func.func @test_no_transform_if_outside_fan_in_cone(%arg0: tensor<3x3x3x3xi32>) // CHECK: return %[[RESHAPE]], %[[CLAMP]] func.func @test_two_different_downstream_converge_to_reshape_same_perms(%arg0: tensor<64xf32>) -> (tensor<1x1x64xf32>, tensor<1x1x64xf32>) { %0 = "tosa.const"() <{value = dense<[0, 2, 1]> : tensor<3xi32>}> : () -> tensor<3xi32> - %1 = tosa.reshape %arg0 {new_shape = array} : (tensor<64xf32>) -> tensor<1x64x1xf32> + %shape = tosa.const_shape {value = dense<[1, 64, 1]> : tensor<3xindex>} : () -> !tosa.shape<3> + %1 = tosa.reshape %arg0, %shape : (tensor<64xf32>, !tosa.shape<3>) -> tensor<1x64x1xf32> %2 = tosa.clamp %1 {max_fp = 3.40282347E+38 : f32, max_int = 2147483647 : i64, min_fp = 0.000000e+00 : f32, min_int = 0 : i64} : (tensor<1x64x1xf32>) -> tensor<1x64x1xf32> %3 = tosa.transpose %1, %0 : (tensor<1x64x1xf32>, tensor<3xi32>) -> tensor<1x1x64xf32> %4 = tosa.transpose %2, %0 : (tensor<1x64x1xf32>, tensor<3xi32>) -> tensor<1x1x64xf32> @@ -305,7 +315,8 @@ func.func @test_two_different_downstream_converge_to_reshape_same_perms(%arg0: t func.func @test_two_different_downstream_converge_to_reshape_different_perms(%arg0: tensor<64xf32>) -> (tensor<1x1x64xf32>, tensor<64x1x1xf32>) { %0 = "tosa.const"() <{value = dense<[1, 2, 0]> : tensor<3xi32>}> : () -> tensor<3xi32> %1 = "tosa.const"() <{value = dense<[0, 2, 1]> : tensor<3xi32>}> : () -> tensor<3xi32> - %2 = tosa.reshape %arg0 {new_shape = array} : (tensor<64xf32>) -> tensor<1x64x1xf32> + %shape = tosa.const_shape {value = dense<[1, 64, 1]> : tensor<3xindex>} : () -> !tosa.shape<3> + %2 = tosa.reshape %arg0, %shape : (tensor<64xf32>, !tosa.shape<3>) -> tensor<1x64x1xf32> %3 = tosa.clamp %2 {max_fp = 3.40282347E+38 : f32, max_int = 2147483647 : i64, min_fp = 0.000000e+00 : f32, min_int = 0 : i64} : (tensor<1x64x1xf32>) -> tensor<1x64x1xf32> %4 = tosa.transpose %2, %1 : (tensor<1x64x1xf32>, tensor<3xi32>) -> tensor<1x1x64xf32> %5 = tosa.transpose %3, %0 : (tensor<1x64x1xf32>, tensor<3xi32>) -> tensor<64x1x1xf32> diff --git a/mlir/test/Dialect/Vector/canonicalize.mlir b/mlir/test/Dialect/Vector/canonicalize.mlir index 61e858f5f226a..a74e562ad2f68 100644 --- a/mlir/test/Dialect/Vector/canonicalize.mlir +++ b/mlir/test/Dialect/Vector/canonicalize.mlir @@ -132,10 +132,35 @@ func.func @extract_from_create_mask_dynamic_position(%dim0: index, %index: index // ----- +// CHECK-LABEL: @extract_scalar_poison +func.func @extract_scalar_poison() -> f32 { + // CHECK-NEXT: %[[UB:.*]] = ub.poison : f32 + // CHECK-NOT: vector.extract + // CHECK-NEXT: return %[[UB]] : f32 + %0 = ub.poison : vector<4x8xf32> + %1 = vector.extract %0[2, 4] : f32 from vector<4x8xf32> + return %1 : f32 +} + +// ----- + +// CHECK-LABEL: @extract_vector_poison +func.func @extract_vector_poison() -> vector<8xf32> { + // CHECK-NEXT: %[[UB:.*]] = ub.poison : vector<8xf32> + // CHECK-NOT: vector.extract + // CHECK-NEXT: return %[[UB]] : vector<8xf32> + %0 = ub.poison : vector<4x8xf32> + %1 = vector.extract %0[2] : vector<8xf32> from vector<4x8xf32> + return %1 : vector<8xf32> +} + +// ----- + // CHECK-LABEL: @extract_scalar_poison_idx func.func @extract_scalar_poison_idx(%a: vector<4x5xf32>) -> f32 { + // CHECK-NEXT: %[[UB:.*]] = ub.poison : f32 // CHECK-NOT: vector.extract - // CHECK-NEXT: ub.poison : f32 + // CHECK-NEXT: return %[[UB]] : f32 %0 = vector.extract %a[-1, 0] : f32 from vector<4x5xf32> return %0 : f32 } @@ -144,8 +169,9 @@ func.func @extract_scalar_poison_idx(%a: vector<4x5xf32>) -> f32 { // CHECK-LABEL: @extract_vector_poison_idx func.func @extract_vector_poison_idx(%a: vector<4x5xf32>) -> vector<5xf32> { + // CHECK-NEXT: %[[UB:.*]] = ub.poison : vector<5xf32> // CHECK-NOT: vector.extract - // CHECK-NEXT: ub.poison : vector<5xf32> + // CHECK-NEXT: return %[[UB]] : vector<5xf32> %0 = vector.extract %a[-1] : vector<5xf32> from vector<4x5xf32> return %0 : vector<5xf32> } @@ -155,8 +181,9 @@ func.func @extract_vector_poison_idx(%a: vector<4x5xf32>) -> vector<5xf32> { // CHECK-LABEL: @extract_multiple_poison_idx func.func @extract_multiple_poison_idx(%a: vector<4x5x8xf32>) -> vector<8xf32> { + // CHECK-NEXT: %[[UB:.*]] = ub.poison : vector<8xf32> // CHECK-NOT: vector.extract - // CHECK-NEXT: ub.poison : vector<8xf32> + // CHECK-NEXT: return %[[UB]] : vector<8xf32> %0 = vector.extract %a[-1, -1] : vector<8xf32> from vector<4x5x8xf32> return %0 : vector<8xf32> } @@ -2886,13 +2913,47 @@ func.func @vector_insert_const_regression(%arg0: i8) -> vector<4xi8> { return %1 : vector<4xi8> } +// ----- + +// Insert a poison value shouldn't be folded as the resulting vector is not +// fully poison. + +// CHECK-LABEL: @insert_scalar_poison +func.func @insert_scalar_poison(%a: vector<4x8xf32>) + -> vector<4x8xf32> { + // CHECK-NEXT: %[[UB:.*]] = ub.poison : f32 + // CHECK-NEXT: %[[RES:.*]] = vector.insert %[[UB]] + // CHECK-NEXT: return %[[RES]] : vector<4x8xf32> + %0 = ub.poison : f32 + %1 = vector.insert %0, %a[2, 3] : f32 into vector<4x8xf32> + return %1 : vector<4x8xf32> +} + +// ----- + +// Insert a poison value shouldn't be folded as the resulting vector is not +// fully poison. + +// CHECK-LABEL: @insert_vector_poison +func.func @insert_vector_poison(%a: vector<4x8xf32>) + -> vector<4x8xf32> { + // CHECK-NEXT: %[[UB:.*]] = ub.poison : vector<8xf32> + // CHECK-NEXT: %[[RES:.*]] = vector.insert %[[UB]] + // CHECK-NEXT: return %[[RES]] : vector<4x8xf32> + %0 = ub.poison : vector<8xf32> + %1 = vector.insert %0, %a[2] : vector<8xf32> into vector<4x8xf32> + return %1 : vector<4x8xf32> +} + + // ----- // CHECK-LABEL: @insert_scalar_poison_idx func.func @insert_scalar_poison_idx(%a: vector<4x5xf32>, %b: f32) -> vector<4x5xf32> { + // CHECK-NEXT: %[[UB:.*]] = ub.poison : vector<4x5xf32> // CHECK-NOT: vector.insert - // CHECK-NEXT: ub.poison : vector<4x5xf32> + // CHECK-NEXT: return %[[UB]] : vector<4x5xf32> %0 = vector.insert %b, %a[-1, 0] : f32 into vector<4x5xf32> return %0 : vector<4x5xf32> } @@ -2902,8 +2963,9 @@ func.func @insert_scalar_poison_idx(%a: vector<4x5xf32>, %b: f32) // CHECK-LABEL: @insert_vector_poison_idx func.func @insert_vector_poison_idx(%a: vector<4x5xf32>, %b: vector<5xf32>) -> vector<4x5xf32> { + // CHECK-NEXT: %[[UB:.*]] = ub.poison : vector<4x5xf32> // CHECK-NOT: vector.insert - // CHECK-NEXT: ub.poison : vector<4x5xf32> + // CHECK-NEXT: return %[[UB]] : vector<4x5xf32> %0 = vector.insert %b, %a[-1] : vector<5xf32> into vector<4x5xf32> return %0 : vector<4x5xf32> } @@ -2913,8 +2975,9 @@ func.func @insert_vector_poison_idx(%a: vector<4x5xf32>, %b: vector<5xf32>) // CHECK-LABEL: @insert_multiple_poison_idx func.func @insert_multiple_poison_idx(%a: vector<4x5x8xf32>, %b: vector<8xf32>) -> vector<4x5x8xf32> { + // CHECK-NEXT: %[[UB:.*]] = ub.poison : vector<4x5x8xf32> // CHECK-NOT: vector.insert - // CHECK-NEXT: ub.poison : vector<4x5x8xf32> + // CHECK-NEXT: return %[[UB]] : vector<4x5x8xf32> %0 = vector.insert %b, %a[-1, -1] : vector<8xf32> into vector<4x5x8xf32> return %0 : vector<4x5x8xf32> } diff --git a/mlir/test/Dialect/Vector/vector-bitcast-lowering-transforms.mlir b/mlir/test/Dialect/Vector/vector-bitcast-lowering-transforms.mlir index 346291019451c..29e7007666e87 100644 --- a/mlir/test/Dialect/Vector/vector-bitcast-lowering-transforms.mlir +++ b/mlir/test/Dialect/Vector/vector-bitcast-lowering-transforms.mlir @@ -24,7 +24,7 @@ func.func @vector_bitcast_2d(%arg0: vector<2x4xi32>) -> vector<2x2xi64> { } // CHECK-LABEL: func.func @vector_bitcast_2d // CHECK-SAME: %[[IN:[a-zA-Z0-9]+]] -// CHECK: %[[INIT:.+]] = arith.constant {{.+}} : vector<2x2xi64> +// CHECK: %[[INIT:.+]] = ub.poison : vector<2x2xi64> // CHECK: %[[V1:.+]] = vector.extract %[[IN]][0] : vector<4xi32> from vector<2x4xi32> // CHECK: %[[B1:.+]] = vector.bitcast %[[V1]] : vector<4xi32> to vector<2xi64> // CHECK: %[[R1:.+]] = vector.insert %[[B1]], %[[INIT]] [0] @@ -39,7 +39,7 @@ func.func @vector_bitcast_4d_with_scalable_dim(%arg0: vector<1x2x[3]x4xi64>) -> } // CHECK-LABEL: func.func @vector_bitcast_4d_with_scalable_dim // CHECK-SAME: %[[IN:[a-zA-Z0-9]+]] -// CHECK: %[[INIT:.+]] = arith.constant dense<0> : vector<1x2x[3]x8xi32> +// CHECK: %[[INIT:.+]] = ub.poison : vector<1x2x[3]x8xi32> // CHECK: %[[V1:.+]] = vector.extract %[[IN]][0, 0] : vector<[3]x4xi64> from vector<1x2x[3]x4xi64> // CHECK: %[[B1:.+]] = vector.bitcast %[[V1]] : vector<[3]x4xi64> to vector<[3]x8xi32> // CHECK: %[[R1:.+]] = vector.insert %[[B1]], %[[INIT]] [0, 0] : vector<[3]x8xi32> into vector<1x2x[3]x8xi32> @@ -54,7 +54,7 @@ func.func @vector_bitcast_2d_trailing_scalable_dim(%arg0: vector<2x[2]xi64>) -> } // CHECK-LABEL: func.func @vector_bitcast_2d_trailing_scalable_dim // CHECK-SAME: %[[IN:[a-zA-Z0-9]+]] -// CHECK: %[[INIT:.+]] = arith.constant dense<0> : vector<2x[4]xi32> +// CHECK: %[[INIT:.+]] = ub.poison : vector<2x[4]xi32> // CHECK: %[[V1:.+]] = vector.extract %[[IN]][0] : vector<[2]xi64> from vector<2x[2]xi64> // CHECK: %[[B1:.+]] = vector.bitcast %[[V1]] : vector<[2]xi64> to vector<[4]xi32> // CHECK: %[[R1:.+]] = vector.insert %[[B1]], %[[INIT]] [0] : vector<[4]xi32> into vector<2x[4]xi32> diff --git a/mlir/test/Dialect/Vector/vector-broadcast-lowering-transforms.mlir b/mlir/test/Dialect/Vector/vector-broadcast-lowering-transforms.mlir index 4a5ea439134cf..8e167a520260f 100644 --- a/mlir/test/Dialect/Vector/vector-broadcast-lowering-transforms.mlir +++ b/mlir/test/Dialect/Vector/vector-broadcast-lowering-transforms.mlir @@ -41,8 +41,8 @@ func.func @broadcast_vec1d_from_vec1d(%arg0: vector<2xf32>) -> vector<2xf32> { // CHECK-LABEL: func @broadcast_vec2d_from_vec1d // CHECK-SAME: %[[A:.*0]]: vector<2xf32> -// CHECK: %[[C0:.*]] = arith.constant dense<0.000000e+00> : vector<3x2xf32> -// CHECK: %[[T0:.*]] = vector.insert %[[A]], %[[C0]] [0] : vector<2xf32> into vector<3x2xf32> +// CHECK: %[[U0:.*]] = ub.poison : vector<3x2xf32> +// CHECK: %[[T0:.*]] = vector.insert %[[A]], %[[U0]] [0] : vector<2xf32> into vector<3x2xf32> // CHECK: %[[T1:.*]] = vector.insert %[[A]], %[[T0]] [1] : vector<2xf32> into vector<3x2xf32> // CHECK: %[[T2:.*]] = vector.insert %[[A]], %[[T1]] [2] : vector<2xf32> into vector<3x2xf32> // CHECK: return %[[T2]] : vector<3x2xf32> @@ -54,12 +54,12 @@ func.func @broadcast_vec2d_from_vec1d(%arg0: vector<2xf32>) -> vector<3x2xf32> { // CHECK-LABEL: func @broadcast_vec3d_from_vec1d // CHECK-SAME: %[[A:.*0]]: vector<2xf32> -// CHECK-DAG: %[[C0:.*]] = arith.constant dense<0.000000e+00> : vector<3x2xf32> -// CHECK-DAG: %[[C1:.*]] = arith.constant dense<0.000000e+00> : vector<4x3x2xf32> -// CHECK: %[[T0:.*]] = vector.insert %[[A]], %[[C0]] [0] : vector<2xf32> into vector<3x2xf32> +// CHECK-DAG: %[[U0:.*]] = ub.poison : vector<3x2xf32> +// CHECK-DAG: %[[U1:.*]] = ub.poison : vector<4x3x2xf32> +// CHECK: %[[T0:.*]] = vector.insert %[[A]], %[[U0]] [0] : vector<2xf32> into vector<3x2xf32> // CHECK: %[[T1:.*]] = vector.insert %[[A]], %[[T0]] [1] : vector<2xf32> into vector<3x2xf32> // CHECK: %[[T2:.*]] = vector.insert %[[A]], %[[T1]] [2] : vector<2xf32> into vector<3x2xf32> -// CHECK: %[[T3:.*]] = vector.insert %[[T2]], %[[C1]] [0] : vector<3x2xf32> into vector<4x3x2xf32> +// CHECK: %[[T3:.*]] = vector.insert %[[T2]], %[[U1]] [0] : vector<3x2xf32> into vector<4x3x2xf32> // CHECK: %[[T4:.*]] = vector.insert %[[T2]], %[[T3]] [1] : vector<3x2xf32> into vector<4x3x2xf32> // CHECK: %[[T5:.*]] = vector.insert %[[T2]], %[[T4]] [2] : vector<3x2xf32> into vector<4x3x2xf32> // CHECK: %[[T6:.*]] = vector.insert %[[T2]], %[[T5]] [3] : vector<3x2xf32> into vector<4x3x2xf32> @@ -72,8 +72,8 @@ func.func @broadcast_vec3d_from_vec1d(%arg0: vector<2xf32>) -> vector<4x3x2xf32> // CHECK-LABEL: func @broadcast_vec3d_from_vec2d // CHECK-SAME: %[[A:.*0]]: vector<3x2xf32> -// CHECK: %[[C0:.*]] = arith.constant dense<0.000000e+00> : vector<4x3x2xf32> -// CHECK: %[[T0:.*]] = vector.insert %[[A]], %[[C0]] [0] : vector<3x2xf32> into vector<4x3x2xf32> +// CHECK: %[[U0:.*]] = ub.poison : vector<4x3x2xf32> +// CHECK: %[[T0:.*]] = vector.insert %[[A]], %[[U0]] [0] : vector<3x2xf32> into vector<4x3x2xf32> // CHECK: %[[T1:.*]] = vector.insert %[[A]], %[[T0]] [1] : vector<3x2xf32> into vector<4x3x2xf32> // CHECK: %[[T2:.*]] = vector.insert %[[A]], %[[T1]] [2] : vector<3x2xf32> into vector<4x3x2xf32> // CHECK: %[[T3:.*]] = vector.insert %[[A]], %[[T2]] [3] : vector<3x2xf32> into vector<4x3x2xf32> @@ -97,9 +97,9 @@ func.func @broadcast_stretch(%arg0: vector<1xf32>) -> vector<4xf32> { // CHECK-LABEL: func @broadcast_stretch_at_start // CHECK-SAME: %[[A:.*0]]: vector<1x4xf32> -// CHECK: %[[C0:.*]] = arith.constant dense<0.000000e+00> : vector<3x4xf32> +// CHECK: %[[U0:.*]] = ub.poison : vector<3x4xf32> // CHECK: %[[T0:.*]] = vector.extract %[[A]][0] : vector<4xf32> from vector<1x4xf32> -// CHECK: %[[T1:.*]] = vector.insert %[[T0]], %[[C0]] [0] : vector<4xf32> into vector<3x4xf32> +// CHECK: %[[T1:.*]] = vector.insert %[[T0]], %[[U0]] [0] : vector<4xf32> into vector<3x4xf32> // CHECK: %[[T2:.*]] = vector.insert %[[T0]], %[[T1]] [1] : vector<4xf32> into vector<3x4xf32> // CHECK: %[[T3:.*]] = vector.insert %[[T0]], %[[T2]] [2] : vector<4xf32> into vector<3x4xf32> // CHECK: return %[[T3]] : vector<3x4xf32> @@ -111,10 +111,10 @@ func.func @broadcast_stretch_at_start(%arg0: vector<1x4xf32>) -> vector<3x4xf32> // CHECK-LABEL: func @broadcast_stretch_at_end // CHECK-SAME: %[[A:.*0]]: vector<4x1xf32> -// CHECK: %[[C0:.*]] = arith.constant dense<0.000000e+00> : vector<4x3xf32> +// CHECK: %[[U0:.*]] = ub.poison : vector<4x3xf32> // CHECK: %[[T0:.*]] = vector.extract %[[A]][0, 0] : f32 from vector<4x1xf32> // CHECK: %[[T2:.*]] = vector.splat %[[T0]] : vector<3xf32> -// CHECK: %[[T3:.*]] = vector.insert %[[T2]], %[[C0]] [0] : vector<3xf32> into vector<4x3xf32> +// CHECK: %[[T3:.*]] = vector.insert %[[T2]], %[[U0]] [0] : vector<3xf32> into vector<4x3xf32> // CHECK: %[[T4:.*]] = vector.extract %[[A]][1, 0] : f32 from vector<4x1xf32> // CHECK: %[[T6:.*]] = vector.splat %[[T4]] : vector<3xf32> // CHECK: %[[T7:.*]] = vector.insert %[[T6]], %[[T3]] [1] : vector<3xf32> into vector<4x3xf32> @@ -133,25 +133,25 @@ func.func @broadcast_stretch_at_end(%arg0: vector<4x1xf32>) -> vector<4x3xf32> { // CHECK-LABEL: func @broadcast_stretch_in_middle // CHECK-SAME: %[[A:.*0]]: vector<4x1x2xf32> -// CHECK: %[[C0:.*]] = arith.constant dense<0.000000e+00> : vector<4x3x2xf32> -// CHECK: %[[C1:.*]] = arith.constant dense<0.000000e+00> : vector<3x2xf32> +// CHECK: %[[U0:.*]] = ub.poison : vector<4x3x2xf32> +// CHECK: %[[U1:.*]] = ub.poison : vector<3x2xf32> // CHECK: %[[T0:.*]] = vector.extract %[[A]][0, 0] : vector<2xf32> from vector<4x1x2xf32> -// CHECK: %[[T2:.*]] = vector.insert %[[T0]], %[[C1]] [0] : vector<2xf32> into vector<3x2xf32> +// CHECK: %[[T2:.*]] = vector.insert %[[T0]], %[[U1]] [0] : vector<2xf32> into vector<3x2xf32> // CHECK: %[[T3:.*]] = vector.insert %[[T0]], %[[T2]] [1] : vector<2xf32> into vector<3x2xf32> // CHECK: %[[T4:.*]] = vector.insert %[[T0]], %[[T3]] [2] : vector<2xf32> into vector<3x2xf32> -// CHECK: %[[T5:.*]] = vector.insert %[[T4]], %[[C0]] [0] : vector<3x2xf32> into vector<4x3x2xf32> +// CHECK: %[[T5:.*]] = vector.insert %[[T4]], %[[U0]] [0] : vector<3x2xf32> into vector<4x3x2xf32> // CHECK: %[[T6:.*]] = vector.extract %[[A]][1, 0] : vector<2xf32> from vector<4x1x2xf32> -// CHECK: %[[T8:.*]] = vector.insert %[[T6]], %[[C1]] [0] : vector<2xf32> into vector<3x2xf32> +// CHECK: %[[T8:.*]] = vector.insert %[[T6]], %[[U1]] [0] : vector<2xf32> into vector<3x2xf32> // CHECK: %[[T9:.*]] = vector.insert %[[T6]], %[[T8]] [1] : vector<2xf32> into vector<3x2xf32> // CHECK: %[[T10:.*]] = vector.insert %[[T6]], %[[T9]] [2] : vector<2xf32> into vector<3x2xf32> // CHECK: %[[T11:.*]] = vector.insert %[[T10]], %[[T5]] [1] : vector<3x2xf32> into vector<4x3x2xf32> // CHECK: %[[T12:.*]] = vector.extract %[[A]][2, 0] : vector<2xf32> from vector<4x1x2xf32> -// CHECK: %[[T14:.*]] = vector.insert %[[T12]], %[[C1]] [0] : vector<2xf32> into vector<3x2xf32> +// CHECK: %[[T14:.*]] = vector.insert %[[T12]], %[[U1]] [0] : vector<2xf32> into vector<3x2xf32> // CHECK: %[[T15:.*]] = vector.insert %[[T12]], %[[T14]] [1] : vector<2xf32> into vector<3x2xf32> // CHECK: %[[T16:.*]] = vector.insert %[[T12]], %[[T15]] [2] : vector<2xf32> into vector<3x2xf32> // CHECK: %[[T17:.*]] = vector.insert %[[T16]], %[[T11]] [2] : vector<3x2xf32> into vector<4x3x2xf32> // CHECK: %[[T18:.*]] = vector.extract %[[A]][3, 0] : vector<2xf32> from vector<4x1x2xf32> -// CHECK: %[[T20:.*]] = vector.insert %[[T18]], %[[C1]] [0] : vector<2xf32> into vector<3x2xf32> +// CHECK: %[[T20:.*]] = vector.insert %[[T18]], %[[U1]] [0] : vector<2xf32> into vector<3x2xf32> // CHECK: %[[T21:.*]] = vector.insert %[[T18]], %[[T20]] [1] : vector<2xf32> into vector<3x2xf32> // CHECK: %[[T22:.*]] = vector.insert %[[T18]], %[[T21]] [2] : vector<2xf32> into vector<3x2xf32> // CHECK: %[[T23:.*]] = vector.insert %[[T22]], %[[T17]] [3] : vector<3x2xf32> into vector<4x3x2xf32> @@ -164,8 +164,8 @@ func.func @broadcast_stretch_in_middle(%arg0: vector<4x1x2xf32>) -> vector<4x3x2 // CHECK-LABEL: func.func @broadcast_scalable_duplication // CHECK-SAME: %[[ARG0:.*]]: vector<[32]xf32>) -// CHECK: %[[CST:.*]] = arith.constant dense<0.000000e+00> : vector<1x[32]xf32> -// CHECK: %[[RES:.*]] = vector.insert %[[ARG0]], %[[CST]] [0] : vector<[32]xf32> into vector<1x[32]xf32> +// CHECK: %[[INIT:.*]] = ub.poison : vector<1x[32]xf32> +// CHECK: %[[RES:.*]] = vector.insert %[[ARG0]], %[[INIT]] [0] : vector<[32]xf32> into vector<1x[32]xf32> // CHECK: return %[[RES]] : vector<1x[32]xf32> func.func @broadcast_scalable_duplication(%arg0: vector<[32]xf32>) -> vector<1x[32]xf32> { diff --git a/mlir/test/Dialect/Vector/vector-contract-to-matrix-intrinsics-transforms.mlir b/mlir/test/Dialect/Vector/vector-contract-to-matrix-intrinsics-transforms.mlir index 4867a416e5d14..08ac2ac5bb7d5 100644 --- a/mlir/test/Dialect/Vector/vector-contract-to-matrix-intrinsics-transforms.mlir +++ b/mlir/test/Dialect/Vector/vector-contract-to-matrix-intrinsics-transforms.mlir @@ -14,15 +14,15 @@ // CHECK-SAME: %[[A:[a-zA-Z0-9]*]]: vector<2x4xf32>, // CHECK-SAME: %[[B:[a-zA-Z0-9]*]]: vector<4x3xf32>, // CHECK-SAME: %[[C:[a-zA-Z0-9]*]]: vector<2x3xf32> -// CHECK-DAG: %[[vcst:.*]] = arith.constant dense<0.000000e+00> : vector<8xf32> -// CHECK-DAG: %[[vcst_0:.*]] = arith.constant dense<0.000000e+00> : vector<12xf32> -// CHECK-DAG: %[[vcst_1:.*]] = arith.constant dense<0.000000e+00> : vector<2x3xf32> +// CHECK-DAG: %[[ub:.*]] = ub.poison : vector<8xf32> +// CHECK-DAG: %[[ub_0:.*]] = ub.poison : vector<12xf32> +// CHECK-DAG: %[[ub_1:.*]] = ub.poison : vector<2x3xf32> // CHECK: %[[a0:.*]] = vector.extract %[[A]][0] : vector<4xf32> from vector<2x4xf32> -// CHECK: %[[a1:.*]] = vector.insert_strided_slice %[[a0]], %[[vcst]] {offsets = [0], strides = [1]} : vector<4xf32> into vector<8xf32> +// CHECK: %[[a1:.*]] = vector.insert_strided_slice %[[a0]], %[[ub]] {offsets = [0], strides = [1]} : vector<4xf32> into vector<8xf32> // CHECK: %[[a2:.*]] = vector.extract %[[A]][1] : vector<4xf32> from vector<2x4xf32> // CHECK: %[[a3:.*]] = vector.insert_strided_slice %[[a2]], %[[a1]] {offsets = [4], strides = [1]} : vector<4xf32> into vector<8xf32> // CHECK: %[[b0:.*]] = vector.extract %[[B]][0] : vector<3xf32> from vector<4x3xf32> -// CHECK: %[[b1:.*]] = vector.insert_strided_slice %[[b0]], %[[vcst_0]] {offsets = [0], strides = [1]} : vector<3xf32> into vector<12xf32> +// CHECK: %[[b1:.*]] = vector.insert_strided_slice %[[b0]], %[[ub_0]] {offsets = [0], strides = [1]} : vector<3xf32> into vector<12xf32> // CHECK: %[[b2:.*]] = vector.extract %[[B]][1] : vector<3xf32> from vector<4x3xf32> // CHECK: %[[b3:.*]] = vector.insert_strided_slice %[[b2]], %[[b1]] {offsets = [3], strides = [1]} : vector<3xf32> into vector<12xf32> // CHECK: %[[b4:.*]] = vector.extract %[[B]][2] : vector<3xf32> from vector<4x3xf32> @@ -31,7 +31,7 @@ // CHECK: %[[b7:.*]] = vector.insert_strided_slice %[[b6]], %[[b5]] {offsets = [9], strides = [1]} : vector<3xf32> into vector<12xf32> // CHECK: %[[mm1:.*]] = vector.matrix_multiply %[[a3]], %[[b7]] {lhs_columns = 4 : i32, lhs_rows = 2 : i32, rhs_columns = 3 : i32} : (vector<8xf32>, vector<12xf32>) -> vector<6xf32> // CHECK: %[[mm2:.*]] = vector.extract_strided_slice %[[mm1]] {offsets = [0], sizes = [3], strides = [1]} : vector<6xf32> to vector<3xf32> -// CHECK: %[[mm3:.*]] = vector.insert %[[mm2]], %[[vcst_1]] [0] : vector<3xf32> into vector<2x3xf32> +// CHECK: %[[mm3:.*]] = vector.insert %[[mm2]], %[[ub_1]] [0] : vector<3xf32> into vector<2x3xf32> // CHECK: %[[mm4:.*]] = vector.extract_strided_slice %[[mm1]] {offsets = [3], sizes = [3], strides = [1]} : vector<6xf32> to vector<3xf32> // CHECK: %[[mm5:.*]] = vector.insert %[[mm4]], %[[mm3]] [1] : vector<3xf32> into vector<2x3xf32> // CHECK: %[[mm6:.*]] = arith.addf %[[C]], %[[mm5]] : vector<2x3xf32> diff --git a/mlir/test/Dialect/Vector/vector-shape-cast-lowering-scalable-vectors.mlir b/mlir/test/Dialect/Vector/vector-shape-cast-lowering-scalable-vectors.mlir index fde6ce9102446..f4becad3c79c1 100644 --- a/mlir/test/Dialect/Vector/vector-shape-cast-lowering-scalable-vectors.mlir +++ b/mlir/test/Dialect/Vector/vector-shape-cast-lowering-scalable-vectors.mlir @@ -7,9 +7,9 @@ // CHECK-SAME: %[[arg0:.*]]: vector<2x1x[4]xi32> func.func @i32_3d_to_1d_last_dim_scalable(%arg0: vector<2x1x[4]xi32>) -> vector<[8]xi32> { - // CHECK-NEXT: %[[cst:.*]] = arith.constant dense<0> : vector<[8]xi32> + // CHECK-NEXT: %[[ub:.*]] = ub.poison : vector<[8]xi32> // CHECK-NEXT: %[[subvec0:.*]] = vector.extract %[[arg0]][0, 0] : vector<[4]xi32> from vector<2x1x[4]xi32> - // CHECK-NEXT: %[[res0:.*]] = vector.scalable.insert %[[subvec0]], %[[cst]][0] : vector<[4]xi32> into vector<[8]xi32> + // CHECK-NEXT: %[[res0:.*]] = vector.scalable.insert %[[subvec0]], %[[ub]][0] : vector<[4]xi32> into vector<[8]xi32> // CHECK-NEXT: %[[subvec1:.*]] = vector.extract %[[arg0]][1, 0] : vector<[4]xi32> from vector<2x1x[4]xi32> // CHECK-NEXT: %[[res1:.*]] = vector.scalable.insert %[[subvec1]], %[[res0]][4] : vector<[4]xi32> into vector<[8]xi32> %flat = vector.shape_cast %arg0 : vector<2x1x[4]xi32> to vector<[8]xi32> @@ -22,9 +22,9 @@ func.func @i32_3d_to_1d_last_dim_scalable(%arg0: vector<2x1x[4]xi32>) -> vector< // CHECK-LABEL: i32_1d_to_3d_last_dim_scalable // CHECK-SAME: %[[arg0:.*]]: vector<[8]xi32> func.func @i32_1d_to_3d_last_dim_scalable(%arg0: vector<[8]xi32>) -> vector<2x1x[4]xi32> { - // CHECK-NEXT: %[[cst:.*]] = arith.constant dense<0> : vector<2x1x[4]xi32> + // CHECK-NEXT: %[[ub:.*]] = ub.poison : vector<2x1x[4]xi32> // CHECK-NEXT: %[[subvec0:.*]] = vector.scalable.extract %[[arg0]][0] : vector<[4]xi32> from vector<[8]xi32> - // CHECK-NEXT: %[[res0:.*]] = vector.insert %[[subvec0]], %[[cst]] [0, 0] : vector<[4]xi32> into vector<2x1x[4]xi32> + // CHECK-NEXT: %[[res0:.*]] = vector.insert %[[subvec0]], %[[ub]] [0, 0] : vector<[4]xi32> into vector<2x1x[4]xi32> // CHECK-NEXT: %[[subvec1:.*]] = vector.scalable.extract %[[arg0]][4] : vector<[4]xi32> from vector<[8]xi32> // CHECK-NEXT: %[[res1:.*]] = vector.insert %[[subvec1]], %[[res0]] [1, 0] : vector<[4]xi32> into vector<2x1x[4]xi32> %unflat = vector.shape_cast %arg0 : vector<[8]xi32> to vector<2x1x[4]xi32> @@ -37,9 +37,9 @@ func.func @i32_1d_to_3d_last_dim_scalable(%arg0: vector<[8]xi32>) -> vector<2x1x // CHECK-LABEL: i8_2d_to_1d_last_dim_scalable // CHECK-SAME: %[[arg0:.*]]: vector<4x[8]xi8> func.func @i8_2d_to_1d_last_dim_scalable(%arg0: vector<4x[8]xi8>) -> vector<[32]xi8> { - // CHECK-NEXT: %[[cst:.*]] = arith.constant dense<0> : vector<[32]xi8> + // CHECK-NEXT: %[[ub:.*]] = ub.poison : vector<[32]xi8> // CHECK-NEXT: %[[subvec0:.*]] = vector.extract %[[arg0]][0] : vector<[8]xi8> from vector<4x[8]xi8> - // CHECK-NEXT: %[[res0:.*]] = vector.scalable.insert %[[subvec0]], %[[cst]][0] : vector<[8]xi8> into vector<[32]xi8> + // CHECK-NEXT: %[[res0:.*]] = vector.scalable.insert %[[subvec0]], %[[ub]][0] : vector<[8]xi8> into vector<[32]xi8> // CHECK-NEXT: %[[subvec1:.*]] = vector.extract %[[arg0]][1] : vector<[8]xi8> from vector<4x[8]xi8> // CHECK-NEXT: %[[res1:.*]] = vector.scalable.insert %[[subvec1]], %[[res0]][8] : vector<[8]xi8> into vector<[32]xi8> // CHECK-NEXT: %[[subvec2:.*]] = vector.extract %[[arg0]][2] : vector<[8]xi8> from vector<4x[8]xi8> @@ -56,9 +56,9 @@ func.func @i8_2d_to_1d_last_dim_scalable(%arg0: vector<4x[8]xi8>) -> vector<[32] // CHECK-LABEL: i8_1d_to_2d_last_dim_scalable // CHECK-SAME: %[[arg0:.*]]: vector<[32]xi8> func.func @i8_1d_to_2d_last_dim_scalable(%arg0: vector<[32]xi8>) -> vector<4x[8]xi8> { - // CHECK-NEXT: %[[cst:.*]] = arith.constant dense<0> : vector<4x[8]xi8> + // CHECK-NEXT: %[[ub:.*]] = ub.poison : vector<4x[8]xi8> // CHECK-NEXT: %[[subvec0:.*]] = vector.scalable.extract %[[arg0]][0] : vector<[8]xi8> from vector<[32]xi8> - // CHECK-NEXT: %[[res0:.*]] = vector.insert %[[subvec0]], %[[cst]] [0] : vector<[8]xi8> into vector<4x[8]xi8> + // CHECK-NEXT: %[[res0:.*]] = vector.insert %[[subvec0]], %[[ub]] [0] : vector<[8]xi8> into vector<4x[8]xi8> // CHECK-NEXT: %[[subvec1:.*]] = vector.scalable.extract %[[arg0]][8] : vector<[8]xi8> from vector<[32]xi8> // CHECK-NEXT: %[[res1:.*]] = vector.insert %[[subvec1]], %[[res0]] [1] : vector<[8]xi8> into vector<4x[8]xi8> // CHECK-NEXT: %[[subvec2:.*]] = vector.scalable.extract %[[arg0]][16] : vector<[8]xi8> from vector<[32]xi8> @@ -75,9 +75,9 @@ func.func @i8_1d_to_2d_last_dim_scalable(%arg0: vector<[32]xi8>) -> vector<4x[8] // CHECK-LABEL: f32_permute_leading_non_scalable_dims // CHECK-SAME: %[[arg0:.*]]: vector<2x3x[4]xf32> func.func @f32_permute_leading_non_scalable_dims(%arg0: vector<2x3x[4]xf32>) -> vector<3x2x[4]xf32> { - // CHECK-NEXT: %[[cst:.*]] = arith.constant dense<0.000000e+00> : vector<3x2x[4]xf32> + // CHECK-NEXT: %[[ub:.*]] = ub.poison : vector<3x2x[4]xf32> // CHECK-NEXT: %[[subvec0:.*]] = vector.extract %[[arg0]][0, 0] : vector<[4]xf32> from vector<2x3x[4]xf32> - // CHECK-NEXT: %[[res0:.*]] = vector.insert %[[subvec0]], %[[cst]] [0, 0] : vector<[4]xf32> into vector<3x2x[4]xf32> + // CHECK-NEXT: %[[res0:.*]] = vector.insert %[[subvec0]], %[[ub]] [0, 0] : vector<[4]xf32> into vector<3x2x[4]xf32> // CHECK-NEXT: %[[subvec1:.*]] = vector.extract %[[arg0]][0, 1] : vector<[4]xf32> from vector<2x3x[4]xf32> // CHECK-NEXT: %[[res1:.*]] = vector.insert %[[subvec1]], %[[res0]] [0, 1] : vector<[4]xf32> into vector<3x2x[4]xf32> // CHECK-NEXT: %[[subvec2:.*]] = vector.extract %[[arg0]][0, 2] : vector<[4]xf32> from vector<2x3x[4]xf32> @@ -99,9 +99,9 @@ func.func @f32_permute_leading_non_scalable_dims(%arg0: vector<2x3x[4]xf32>) -> // CHECK-SAME: %[[arg0:.*]]: vector<2x2x[2]xf64> func.func @f64_flatten_leading_non_scalable_dims(%arg0: vector<2x2x[2]xf64>) -> vector<4x[2]xf64> { - // CHECK-NEXT: %[[cst:.*]] = arith.constant dense<0.000000e+00> : vector<4x[2]xf64> + // CHECK-NEXT: %[[ub:.*]] = ub.poison : vector<4x[2]xf64> // CHECK-NEXT: %[[subvec0:.*]] = vector.extract %[[arg0]][0, 0] : vector<[2]xf64> from vector<2x2x[2]xf64> - // CHECK-NEXT: %[[res0:.*]] = vector.insert %[[subvec0]], %[[cst]] [0] : vector<[2]xf64> into vector<4x[2]xf64> + // CHECK-NEXT: %[[res0:.*]] = vector.insert %[[subvec0]], %[[ub]] [0] : vector<[2]xf64> into vector<4x[2]xf64> // CHECK-NEXT: %[[subvec1:.*]] = vector.extract %[[arg0]][0, 1] : vector<[2]xf64> from vector<2x2x[2]xf64> // CHECK-NEXT: %[[res1:.*]] = vector.insert %[[subvec1]], %[[res0]] [1] : vector<[2]xf64> into vector<4x[2]xf64> // CHECK-NEXT: %[[subvec2:.*]] = vector.extract %[[arg0]][1, 0] : vector<[2]xf64> from vector<2x2x[2]xf64> @@ -109,7 +109,7 @@ func.func @f64_flatten_leading_non_scalable_dims(%arg0: vector<2x2x[2]xf64>) -> // CHECK-NEXT: %[[subvec3:.*]] = vector.extract %[[arg0]][1, 1] : vector<[2]xf64> from vector<2x2x[2]xf64> // CHECK-NEXT: %[[res3:.*]] = vector.insert %[[subvec3]], %[[res2]] [3] : vector<[2]xf64> into vector<4x[2]xf64> %res = vector.shape_cast %arg0: vector<2x2x[2]xf64> to vector<4x[2]xf64> - // CHECK-NEXT: return %7 : vector<4x[2]xf64> + // CHECK-NEXT: return %[[res3:.*]] : vector<4x[2]xf64> return %res : vector<4x[2]xf64> } @@ -119,10 +119,10 @@ func.func @f64_flatten_leading_non_scalable_dims(%arg0: vector<2x2x[2]xf64>) -> // CHECK-SAME: %[[arg0:.*]]: vector<3x[4]xf32> func.func @f32_reduce_trailing_scalable_dim(%arg0: vector<3x[4]xf32>) -> vector<6x[2]xf32> { - // CHECK-NEXT: %[[cst:.*]] = arith.constant dense<0.000000e+00> : vector<6x[2]xf32> + // CHECK-NEXT: %[[ub:.*]] = ub.poison : vector<6x[2]xf32> // CHECK-NEXT: %[[srcvec0:.*]] = vector.extract %[[arg0]][0] : vector<[4]xf32> from vector<3x[4]xf32> // CHECK-NEXT: %[[subvec0:.*]] = vector.scalable.extract %[[srcvec0]][0] : vector<[2]xf32> from vector<[4]xf32> - // CHECK-NEXT: %[[res0:.*]] = vector.insert %[[subvec0]], %[[cst]] [0] : vector<[2]xf32> into vector<6x[2]xf32> + // CHECK-NEXT: %[[res0:.*]] = vector.insert %[[subvec0]], %[[ub]] [0] : vector<[2]xf32> into vector<6x[2]xf32> // CHECK-NEXT: %[[subvec1:.*]] = vector.scalable.extract %[[srcvec0]][2] : vector<[2]xf32> from vector<[4]xf32> // CHECK-NEXT: %[[res1:.*]] = vector.insert %[[subvec1]], %[[res0]] [1] : vector<[2]xf32> into vector<6x[2]xf32> // CHECK-NEXT: %[[srcvec1:.*]] = vector.extract %[[arg0]][1] : vector<[4]xf32> from vector<3x[4]xf32> @@ -146,16 +146,15 @@ func.func @f32_reduce_trailing_scalable_dim(%arg0: vector<3x[4]xf32>) -> vector< // CHECK-SAME: %[[arg0:.*]]: vector<4x[2]xf32> func.func @f32_increase_trailing_scalable_dim(%arg0: vector<4x[2]xf32>) -> vector<2x[4]xf32> { - // CHECK-NEXT: %[[cst:.*]] = arith.constant dense<0.000000e+00> : vector<2x[4]xf32> + // CHECK-DAG: %[[ub0:.*]] = ub.poison : vector<2x[4]xf32> + // CHECK-DAG: %[[ub1:.*]] = ub.poison : vector<[4]xf32> // CHECK-NEXT: %[[subvec0:.*]] = vector.extract %[[arg0]][0] : vector<[2]xf32> from vector<4x[2]xf32> - // CHECK-NEXT: %[[resvec0:.*]] = vector.extract %[[cst]][0] : vector<[4]xf32> from vector<2x[4]xf32> - // CHECK-NEXT: %[[resvec1:.*]] = vector.scalable.insert %[[subvec0]], %[[resvec0]][0] : vector<[2]xf32> into vector<[4]xf32> + // CHECK-NEXT: %[[resvec1:.*]] = vector.scalable.insert %[[subvec0]], %[[ub1]][0] : vector<[2]xf32> into vector<[4]xf32> // CHECK-NEXT: %[[subvec1:.*]] = vector.extract %[[arg0]][1] : vector<[2]xf32> from vector<4x[2]xf32> // CHECK-NEXT: %[[resvec2:.*]] = vector.scalable.insert %[[subvec1]], %[[resvec1]][2] : vector<[2]xf32> into vector<[4]xf32> - // CHECK-NEXT: %[[res0:.*]] = vector.insert %[[resvec2]], %[[cst]] [0] : vector<[4]xf32> into vector<2x[4]xf32> + // CHECK-NEXT: %[[res0:.*]] = vector.insert %[[resvec2]], %[[ub0]] [0] : vector<[4]xf32> into vector<2x[4]xf32> // CHECK-NEXT: %[[subvec3:.*]] = vector.extract %[[arg0]][2] : vector<[2]xf32> from vector<4x[2]xf32> - // CHECK-NEXT: %[[resvec3:.*]] = vector.extract %[[cst]][1] : vector<[4]xf32> from vector<2x[4]xf32> - // CHECK-NEXT: %[[resvec4:.*]] = vector.scalable.insert %[[subvec3]], %[[resvec3]][0] : vector<[2]xf32> into vector<[4]xf32> + // CHECK-NEXT: %[[resvec4:.*]] = vector.scalable.insert %[[subvec3]], %[[ub1]][0] : vector<[2]xf32> into vector<[4]xf32> // CHECK-NEXT: %[[subvec4:.*]] = vector.extract %[[arg0]][3] : vector<[2]xf32> from vector<4x[2]xf32> // CHECK-NEXT: %[[resvec5:.*]] = vector.scalable.insert %[[subvec4]], %[[resvec4]][2] : vector<[2]xf32> into vector<[4]xf32> // CHECK-NEXT: %[[res1:.*]] = vector.insert %[[resvec5]], %[[res0]] [1] : vector<[4]xf32> into vector<2x[4]xf32> diff --git a/mlir/test/Dialect/Vector/vector-shape-cast-lowering-transforms.mlir b/mlir/test/Dialect/Vector/vector-shape-cast-lowering-transforms.mlir index b4c52d5533116..ab30acf68b30b 100644 --- a/mlir/test/Dialect/Vector/vector-shape-cast-lowering-transforms.mlir +++ b/mlir/test/Dialect/Vector/vector-shape-cast-lowering-transforms.mlir @@ -22,11 +22,11 @@ func.func @cancel_shape_cast(%arg0: vector<16xf32>) -> vector<16xf32> { // llvm.matrix operations // CHECK-LABEL: func @shape_casts func.func @shape_casts(%a: vector<2x2xf32>) -> (vector<4xf32>, vector<2x2xf32>) { - // CHECK-DAG: %[[cst22:.*]] = arith.constant dense<0.000000e+00> : vector<2x2xf32> - // CHECK-DAG: %[[cst:.*]] = arith.constant dense<0.000000e+00> : vector<4xf32> + // CHECK-DAG: %[[ub22:.*]] = ub.poison : vector<2x2xf32> + // CHECK-DAG: %[[ub:.*]] = ub.poison : vector<4xf32> // CHECK: %[[ex0:.*]] = vector.extract %{{.*}}[0] : vector<2xf32> from vector<2x2xf32> // - // CHECK: %[[in0:.*]] = vector.insert_strided_slice %[[ex0]], %[[cst]] + // CHECK: %[[in0:.*]] = vector.insert_strided_slice %[[ex0]], %[[ub]] // CHECK-SAME: {offsets = [0], strides = [1]} : vector<2xf32> into vector<4xf32> // // CHECK: %[[ex1:.*]] = vector.extract %{{.*}}[1] : vector<2xf32> from vector<2x2xf32> @@ -42,7 +42,7 @@ func.func @shape_casts(%a: vector<2x2xf32>) -> (vector<4xf32>, vector<2x2xf32>) // CHECK-SAME: {offsets = [0], sizes = [2], strides = [1]} : // CHECK-SAME: vector<4xf32> to vector<2xf32> // - // CHECK: %[[res0:.*]] = vector.insert %[[ss0]], %[[cst22]] [0] : + // CHECK: %[[res0:.*]] = vector.insert %[[ss0]], %[[ub22]] [0] : // CHECK-SAME: vector<2xf32> into vector<2x2xf32> // // CHECK: %[[s2:.*]] = vector.extract_strided_slice %[[add]] @@ -59,9 +59,9 @@ func.func @shape_casts(%a: vector<2x2xf32>) -> (vector<4xf32>, vector<2x2xf32>) // CHECK-LABEL: func @shape_cast_2d2d // CHECK-SAME: %[[A:.*]]: vector<3x2xf32> -// CHECK: %[[C:.*]] = arith.constant dense<0.000000e+00> : vector<2x3xf32> +// CHECK: %[[UB:.*]] = ub.poison : vector<2x3xf32> // CHECK: %[[T0:.*]] = vector.extract %[[A]][0, 0] : f32 from vector<3x2xf32> -// CHECK: %[[T1:.*]] = vector.insert %[[T0]], %[[C]] [0, 0] : f32 into vector<2x3xf32> +// CHECK: %[[T1:.*]] = vector.insert %[[T0]], %[[UB]] [0, 0] : f32 into vector<2x3xf32> // CHECK: %[[T2:.*]] = vector.extract %[[A]][0, 1] : f32 from vector<3x2xf32> // CHECK: %[[T3:.*]] = vector.insert %[[T2]], %[[T1]] [0, 1] : f32 into vector<2x3xf32> // CHECK: %[[T4:.*]] = vector.extract %[[A]][1, 0] : f32 from vector<3x2xf32> @@ -81,9 +81,9 @@ func.func @shape_cast_2d2d(%arg0 : vector<3x2xf32>) -> vector<2x3xf32> { // CHECK-LABEL: func @shape_cast_3d1d // CHECK-SAME: %[[A:.*]]: vector<1x3x2xf32> -// CHECK: %[[C:.*]] = arith.constant dense<0.000000e+00> : vector<6xf32> +// CHECK: %[[UB:.*]] = ub.poison : vector<6xf32> // CHECK: %[[T0:.*]] = vector.extract %[[A]][0, 0] : vector<2xf32> from vector<1x3x2xf32> -// CHECK: %[[T1:.*]] = vector.insert_strided_slice %[[T0]], %[[C]] +// CHECK: %[[T1:.*]] = vector.insert_strided_slice %[[T0]], %[[UB]] // CHECK-SAME: {offsets = [0], strides = [1]} : vector<2xf32> into vector<6xf32> // CHECK: %[[T2:.*]] = vector.extract %[[A]][0, 1] : vector<2xf32> from vector<1x3x2xf32> // CHECK: %[[T3:.*]] = vector.insert_strided_slice %[[T2]], %[[T1]] @@ -100,10 +100,10 @@ func.func @shape_cast_3d1d(%arg0 : vector<1x3x2xf32>) -> vector<6xf32> { // CHECK-LABEL: func @shape_cast_1d3d // CHECK-SAME: %[[A:.*]]: vector<6xf32> -// CHECK: %[[C:.*]] = arith.constant dense<0.000000e+00> : vector<2x1x3xf32> +// CHECK: %[[UB:.*]] = ub.poison : vector<2x1x3xf32> // CHECK: %[[T0:.*]] = vector.extract_strided_slice %[[A]] // CHECK-SAME: {offsets = [0], sizes = [3], strides = [1]} : vector<6xf32> to vector<3xf32> -// CHECK: %[[T1:.*]] = vector.insert %[[T0]], %[[C]] [0, 0] : vector<3xf32> into vector<2x1x3xf32> +// CHECK: %[[T1:.*]] = vector.insert %[[T0]], %[[UB]] [0, 0] : vector<3xf32> into vector<2x1x3xf32> // CHECK: %[[T2:.*]] = vector.extract_strided_slice %[[A]] // CHECK: {offsets = [3], sizes = [3], strides = [1]} : vector<6xf32> to vector<3xf32> // CHECK: %[[T3:.*]] = vector.insert %[[T2]], %[[T1]] [1, 0] : vector<3xf32> into vector<2x1x3xf32> @@ -115,11 +115,11 @@ func.func @shape_cast_1d3d(%arg0 : vector<6xf32>) -> vector<2x1x3xf32> { } // CHECK-LABEL: func.func @shape_cast_0d1d( -// CHECK-SAME: %[[VAL_0:.*]]: vector) -> vector<1xf32> { -// CHECK: %[[VAL_1:.*]] = arith.constant dense<0.000000e+00> : vector<1xf32> -// CHECK: %[[VAL_2:.*]] = vector.extractelement %[[VAL_0]][] : vector -// CHECK: %[[VAL_3:.*]] = vector.insert %[[VAL_2]], %[[VAL_1]] [0] : f32 into vector<1xf32> -// CHECK: return %[[VAL_3]] : vector<1xf32> +// CHECK-SAME: %[[ARG0:.*]]: vector) -> vector<1xf32> { +// CHECK: %[[UB:.*]] = ub.poison : vector<1xf32> +// CHECK: %[[EXTRACT0:.*]] = vector.extractelement %[[ARG0]][] : vector +// CHECK: %[[RES:.*]] = vector.insert %[[EXTRACT0]], %[[UB]] [0] : f32 into vector<1xf32> +// CHECK: return %[[RES]] : vector<1xf32> // CHECK: } func.func @shape_cast_0d1d(%arg0 : vector) -> vector<1xf32> { @@ -128,11 +128,11 @@ func.func @shape_cast_0d1d(%arg0 : vector) -> vector<1xf32> { } // CHECK-LABEL: func.func @shape_cast_1d0d( -// CHECK-SAME: %[[VAL_0:.*]]: vector<1xf32>) -> vector { -// CHECK: %[[VAL_1:.*]] = arith.constant dense<0.000000e+00> : vector -// CHECK: %[[VAL_2:.*]] = vector.extract %[[VAL_0]][0] : f32 from vector<1xf32> -// CHECK: %[[VAL_3:.*]] = vector.insertelement %[[VAL_2]], %[[VAL_1]][] : vector -// CHECK: return %[[VAL_3]] : vector +// CHECK-SAME: %[[ARG0:.*]]: vector<1xf32>) -> vector { +// CHECK: %[[UB:.*]] = ub.poison : vector +// CHECK: %[[EXTRACT0:.*]] = vector.extract %[[ARG0]][0] : f32 from vector<1xf32> +// CHECK: %[[RES:.*]] = vector.insertelement %[[EXTRACT0]], %[[UB]][] : vector +// CHECK: return %[[RES]] : vector // CHECK: } func.func @shape_cast_1d0d(%arg0 : vector<1xf32>) -> vector { diff --git a/mlir/test/Dialect/Vector/vector-transpose-lowering.mlir b/mlir/test/Dialect/Vector/vector-transpose-lowering.mlir index 219a72df52a19..83395504e8c74 100644 --- a/mlir/test/Dialect/Vector/vector-transpose-lowering.mlir +++ b/mlir/test/Dialect/Vector/vector-transpose-lowering.mlir @@ -2,9 +2,9 @@ // CHECK-LABEL: func @transpose23 // CHECK-SAME: %[[A:.*]]: vector<2x3xf32> -// CHECK: %[[Z:.*]] = arith.constant dense<0.000000e+00> : vector<3x2xf32> +// CHECK: %[[UB:.*]] = ub.poison : vector<3x2xf32> // CHECK: %[[T0:.*]] = vector.extract %[[A]][0, 0] : f32 from vector<2x3xf32> -// CHECK: %[[T1:.*]] = vector.insert %[[T0]], %[[Z]] [0, 0] : f32 into vector<3x2xf32> +// CHECK: %[[T1:.*]] = vector.insert %[[T0]], %[[UB]] [0, 0] : f32 into vector<3x2xf32> // CHECK: %[[T2:.*]] = vector.extract %[[A]][0, 1] : f32 from vector<2x3xf32> // CHECK: %[[T3:.*]] = vector.insert %[[T2]], %[[T1]] [1, 0] : f32 into vector<3x2xf32> // CHECK: %[[T4:.*]] = vector.extract %[[A]][0, 2] : f32 from vector<2x3xf32> diff --git a/mlir/test/Target/SPIRV/group-ops.mlir b/mlir/test/Target/SPIRV/group-ops.mlir index 32da4d9c26bd1..2ba7f23258e7f 100644 --- a/mlir/test/Target/SPIRV/group-ops.mlir +++ b/mlir/test/Target/SPIRV/group-ops.mlir @@ -103,5 +103,15 @@ spirv.module Logical GLSL450 requires #spirv.vce { %0 = spirv.KHR.GroupFMul %value : f32 spirv.ReturnValue %0: f32 } +} + +// ----- +spirv.module Logical GLSL450 requires #spirv.vce { + // CHECK-LABEL: @group_non_uniform_ballot_bit_count + spirv.func @group_non_uniform_ballot_bit_count(%value: vector<4xi32>) -> i32 "None" { + // CHECK: spirv.GroupNonUniformBallotBitCount {{%.*}} : vector<4xi32> -> i32 + %0 = spirv.GroupNonUniformBallotBitCount %value : vector<4xi32> -> i32 + spirv.ReturnValue %0 : i32 + } } diff --git a/mlir/test/Transforms/print-op-graph-back-edges.mlir b/mlir/test/Transforms/print-op-graph-back-edges.mlir index ed922dd7cb13b..7950125e2f735 100644 --- a/mlir/test/Transforms/print-op-graph-back-edges.mlir +++ b/mlir/test/Transforms/print-op-graph-back-edges.mlir @@ -1,21 +1,21 @@ // RUN: mlir-opt -view-op-graph %s -o %t 2>&1 | FileCheck -check-prefix=DFG %s // DFG-LABEL: digraph G { -// DFG: compound = true; -// DFG: subgraph cluster_1 { -// DFG: v2 [label = " ", shape = plain]; -// DFG: label = "builtin.module : ()\n"; -// DFG: subgraph cluster_3 { -// DFG: v4 [label = " ", shape = plain]; -// DFG: label = ""; -// DFG: v5 [fillcolor = "0.000000 1.0 1.0", label = "arith.addi : (index)\n\noverflowFlags: #arith.overflow v5 [label = "0", style = solid]; -// DFG: v7 -> v5 [label = "1", style = solid]; -// DFG: } +// DFG-NEXT: compound = true; +// DFG-NEXT: subgraph cluster_1 { +// DFG-NEXT: v2 [label = " ", shape = plain]; +// DFG-NEXT: label = "builtin.module : ()\l"; +// DFG-NEXT: subgraph cluster_3 { +// DFG-NEXT: v4 [label = " ", shape = plain]; +// DFG-NEXT: label = ""; +// DFG-NEXT: v5 [fillcolor = "0.000000 0.3 0.95", label = "{{\{\{}} %c0| %c1}|arith.addi\l\loverflowFlags: #arith.overflow\ %0 index}}", shape = Mrecord, style = filled]; +// DFG-NEXT: v6 [fillcolor = "0.333333 0.3 0.95", label = "{arith.constant\l\lvalue: 0 : index\l|{ %c0 index}}", shape = Mrecord, style = filled]; +// DFG-NEXT: v7 [fillcolor = "0.333333 0.3 0.95", label = "{arith.constant\l\lvalue: 1 : index\l|{ %c1 index}}", shape = Mrecord, style = filled]; +// DFG-NEXT: } +// DFG-NEXT: } +// DFG-NEXT: v6:res_c0:s -> v5:arg_c0:n[style = solid]; +// DFG-NEXT: v7:res_c1:s -> v5:arg_c1:n[style = solid]; +// DFG-NEXT: } module { %add = arith.addi %c0, %c1 : index diff --git a/mlir/test/Transforms/print-op-graph-cycles.mlir b/mlir/test/Transforms/print-op-graph-cycles.mlir index 7e4eb5616a28b..ba989544419f3 100644 --- a/mlir/test/Transforms/print-op-graph-cycles.mlir +++ b/mlir/test/Transforms/print-op-graph-cycles.mlir @@ -1,45 +1,45 @@ // RUN: mlir-opt -view-op-graph -allow-unregistered-dialect %s -o %t 2>&1 | FileCheck -check-prefix=DFG %s // DFG-LABEL: digraph G { -// DFG: compound = true; -// DFG: subgraph cluster_1 { -// DFG: v2 [label = " ", shape = plain]; -// DFG: label = "builtin.module : ()\n"; -// DFG: subgraph cluster_3 { -// DFG: v4 [label = " ", shape = plain]; -// DFG: label = ""; -// DFG: subgraph cluster_5 { -// DFG: v6 [label = " ", shape = plain]; -// DFG: label = "test.graph_region : ()\n"; -// DFG: subgraph cluster_7 { -// DFG: v8 [label = " ", shape = plain]; -// DFG: label = ""; -// DFG: v9 [fillcolor = "0.000000 1.0 1.0", label = "op1 : (i32)\n", shape = ellipse, style = filled]; -// DFG: subgraph cluster_10 { -// DFG: v11 [label = " ", shape = plain]; -// DFG: label = "test.ssacfg_region : (i32)\n"; -// DFG: subgraph cluster_12 { -// DFG: v13 [label = " ", shape = plain]; -// DFG: label = ""; -// DFG: v14 [fillcolor = "0.166667 1.0 1.0", label = "op2 : (i32)\n", shape = ellipse, style = filled]; -// DFG: } -// DFG: } -// DFG: v15 [fillcolor = "0.166667 1.0 1.0", label = "op2 : (i32)\n", shape = ellipse, style = filled]; -// DFG: v16 [fillcolor = "0.500000 1.0 1.0", label = "op3 : (i32)\n", shape = ellipse, style = filled]; -// DFG: } -// DFG: } -// DFG: } -// DFG: } -// DFG: v9 -> v9 [label = "0", style = solid]; -// DFG: v15 -> v9 [label = "1", style = solid]; -// DFG: v9 -> v14 [label = "0", style = solid]; -// DFG: v11 -> v14 [ltail = cluster_10, style = solid]; -// DFG: v15 -> v14 [label = "2", style = solid]; -// DFG: v16 -> v14 [label = "3", style = solid]; -// DFG: v9 -> v15 [label = "0", style = solid]; -// DFG: v16 -> v15 [label = "1", style = solid]; -// DFG: v9 -> v16 [label = "", style = solid]; -// DFG: } +// DFG-NEXT: compound = true; +// DFG-NEXT: subgraph cluster_1 { +// DFG-NEXT: v2 [label = " ", shape = plain]; +// DFG-NEXT: label = "builtin.module : ()\l"; +// DFG-NEXT: subgraph cluster_3 { +// DFG-NEXT: v4 [label = " ", shape = plain]; +// DFG-NEXT: label = ""; +// DFG-NEXT: subgraph cluster_5 { +// DFG-NEXT: v6 [label = " ", shape = plain]; +// DFG-NEXT: label = "test.graph_region : ()\l"; +// DFG-NEXT: subgraph cluster_7 { +// DFG-NEXT: v8 [label = " ", shape = plain]; +// DFG-NEXT: label = ""; +// DFG-NEXT: v9 [fillcolor = "0.000000 0.3 0.95", label = "{{\{\{}} %0| %2}|op1\l|{ %0 i32}}", shape = Mrecord, style = filled]; +// DFG-NEXT: subgraph cluster_10 { +// DFG-NEXT: v11 [label = " ", shape = plain]; +// DFG-NEXT: label = "test.ssacfg_region : (i32)\l"; +// DFG-NEXT: subgraph cluster_12 { +// DFG-NEXT: v13 [label = " ", shape = plain]; +// DFG-NEXT: label = ""; +// DFG-NEXT: v14 [fillcolor = "0.166667 0.3 0.95", label = "{{\{\{}} %0| %1| %2| %3}|op2\l|{ %4 i32}}", shape = Mrecord, style = filled]; +// DFG-NEXT: } +// DFG-NEXT: } +// DFG-NEXT: v15 [fillcolor = "0.166667 0.3 0.95", label = "{{\{\{}} %0| %3}|op2\l|{ %2 i32}}", shape = Mrecord, style = filled]; +// DFG-NEXT: v16 [fillcolor = "0.500000 0.3 0.95", label = "{{\{\{}} %0}|op3\l|{ %3 i32}}", shape = Mrecord, style = filled]; +// DFG-NEXT: } +// DFG-NEXT: } +// DFG-NEXT: } +// DFG-NEXT: } +// DFG-NEXT: v9:res_0:s -> v9:arg_0:n[style = solid]; +// DFG-NEXT: v15:res_2:s -> v9:arg_2:n[style = solid]; +// DFG-NEXT: v9:res_0:s -> v14:arg_0:n[style = solid]; +// DFG-NEXT: v11 -> v14:arg_1:n[ltail = cluster_10, style = solid]; +// DFG-NEXT: v15:res_2:s -> v14:arg_2:n[style = solid]; +// DFG-NEXT: v16:res_3:s -> v14:arg_3:n[style = solid]; +// DFG-NEXT: v9:res_0:s -> v15:arg_0:n[style = solid]; +// DFG-NEXT: v16:res_3:s -> v15:arg_3:n[style = solid]; +// DFG-NEXT: v9:res_0:s -> v16:arg_0:n[style = solid]; +// DFG-NEXT: } "test.graph_region"() ({ // A Graph region %1 = "op1"(%1, %3) : (i32, i32) -> (i32) // OK: %1, %3 allowed here diff --git a/mlir/test/Transforms/print-op-graph.mlir b/mlir/test/Transforms/print-op-graph.mlir index df03194a663d9..440b037d78092 100644 --- a/mlir/test/Transforms/print-op-graph.mlir +++ b/mlir/test/Transforms/print-op-graph.mlir @@ -6,49 +6,49 @@ // DFG: subgraph {{.*}} // DFG: label = "func.func{{.*}}merge_blocks // DFG: subgraph {{.*}} { -// DFG: v[[ARG0:.*]] [label = "arg0" +// DFG: v[[ARG0:.*]] [label = " %arg0 i32" // DFG: v[[CONST10:.*]] [{{.*}}label ={{.*}}10 : i32 // DFG: subgraph [[CLUSTER_MERGE_BLOCKS:.*]] { // DFG: v[[ANCHOR:.*]] [label = " ", shape = plain] // DFG: label = "test.merge_blocks // DFG: subgraph {{.*}} { -// DFG: v[[TEST_BR:.*]] [{{.*}}label = "test.br +// DFG: v[[TEST_BR:.*]] [{{.*}}label = "{{.*}}test.br // DFG: } // DFG: subgraph {{.*}} { // DFG: } // DFG: } -// DFG: v[[TEST_RET:.*]] [{{.*}}label = "test.return -// DFG: v[[ARG0]] -> v[[TEST_BR]] -// DFG: v[[CONST10]] -> v[[TEST_BR]] -// DFG: v[[ANCHOR]] -> v[[TEST_RET]] [ltail = [[CLUSTER_MERGE_BLOCKS]], style = solid]; -// DFG: v[[ANCHOR]] -> v[[TEST_RET]] [ltail = [[CLUSTER_MERGE_BLOCKS]], style = solid]; +// DFG: v[[TEST_RET:.*]] [{{.*}}label = "{{.*}}test.return +// DFG: v[[ARG0]]:res_arg0:s -> v[[TEST_BR]]:arg_arg0:n +// DFG: v[[CONST10]]:res_c10_i32:s -> v[[TEST_BR]] +// DFG: v[[ANCHOR]] -> v[[TEST_RET]]:arg_1_0:n[ltail = [[CLUSTER_MERGE_BLOCKS]], style = solid]; +// DFG: v[[ANCHOR]] -> v[[TEST_RET]]:arg_1_1:n[ltail = [[CLUSTER_MERGE_BLOCKS]], style = solid]; // CFG-LABEL: digraph G { // CFG: subgraph {{.*}} { // CFG: subgraph {{.*}} // CFG: label = "func.func{{.*}}merge_blocks // CFG: subgraph {{.*}} { -// CFG: v[[C1:.*]] [{{.*}}label = "arith.constant -// CFG: v[[C2:.*]] [{{.*}}label = "arith.constant -// CFG: v[[C3:.*]] [{{.*}}label = "arith.constant -// CFG: v[[C4:.*]] [{{.*}}label = "arith.constant -// CFG: v[[TEST_FUNC:.*]] [{{.*}}label = "test.func +// CFG: v[[C1:.*]] [{{.*}}label = "{arith.constant +// CFG: v[[C2:.*]] [{{.*}}label = "{arith.constant +// CFG: v[[C3:.*]] [{{.*}}label = "{arith.constant +// CFG: v[[C4:.*]] [{{.*}}label = "{arith.constant +// CFG: v[[TEST_FUNC:.*]] [{{.*}}label = "{test.func // CFG: subgraph [[CLUSTER_MERGE_BLOCKS:.*]] { // CFG: v[[ANCHOR:.*]] [label = " ", shape = plain] // CFG: label = "test.merge_blocks // CFG: subgraph {{.*}} { -// CFG: v[[TEST_BR:.*]] [{{.*}}label = "test.br +// CFG: v[[TEST_BR:.*]] [{{.*}}label = "{{.*}}test.br // CFG: } // CFG: subgraph {{.*}} { // CFG: } // CFG: } -// CFG: v[[TEST_RET:.*]] [{{.*}}label = "test.return +// CFG: v[[TEST_RET:.*]] [{{.*}}label = "{{.*}}test.return // CFG: v[[C1]] -> v[[C2]] // CFG: v[[C2]] -> v[[C3]] // CFG: v[[C3]] -> v[[C4]] // CFG: v[[C4]] -> v[[TEST_FUNC]] -// CFG: v[[TEST_FUNC]] -> v[[ANCHOR]] [lhead = [[CLUSTER_MERGE_BLOCKS]], style = dashed]; -// CFG: v[[ANCHOR]] -> v[[TEST_RET]] [ltail = [[CLUSTER_MERGE_BLOCKS]], style = dashed]; +// CFG: v[[TEST_FUNC]] -> v[[ANCHOR]][lhead = [[CLUSTER_MERGE_BLOCKS]], style = dashed]; +// CFG: v[[ANCHOR]] -> v[[TEST_RET]][ltail = [[CLUSTER_MERGE_BLOCKS]], style = dashed]; func.func @merge_blocks(%arg0: i32, %arg1 : i32) -> () { %0 = arith.constant dense<[[0, 1], [2, 3]]> : tensor<2x2xi32> diff --git a/utils/bazel/llvm-project-overlay/llvm/unittests/BUILD.bazel b/utils/bazel/llvm-project-overlay/llvm/unittests/BUILD.bazel index 561f2b8f408f0..61f4700b057ab 100644 --- a/utils/bazel/llvm-project-overlay/llvm/unittests/BUILD.bazel +++ b/utils/bazel/llvm-project-overlay/llvm/unittests/BUILD.bazel @@ -224,6 +224,8 @@ cc_test( # Skip a test that relies on reading files in a way that doesn't easily # work with Bazel. "--gtest_filter=-NativeSymbolReuseTest.*", + # TODO: this test is failing on some configs, investigate and re-enable it. + "--gtest_filter=-DebugLineBasicFixture.LookupAddressRangeWithStmtSequenceOffset", ], features = ["-layering_check"], # #include "../lib/CodeGen/AsmPrinter/DwarfStringPool.h" deps = [ diff --git a/utils/bazel/llvm-project-overlay/mlir/BUILD.bazel b/utils/bazel/llvm-project-overlay/mlir/BUILD.bazel index 5f7aaf7f8f31b..e07891f004850 100644 --- a/utils/bazel/llvm-project-overlay/mlir/BUILD.bazel +++ b/utils/bazel/llvm-project-overlay/mlir/BUILD.bazel @@ -5224,6 +5224,7 @@ cc_library( ":Support", ":TensorDialect", ":TransformUtils", + ":UBDialect", ":VectorDialect", ":VectorEnumsIncGen", ":VectorInterfaces",